repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
kevin11h/pyquil
https://github.com/kevin11h/pyquil
3c744bea6f43bf07412def0980492e07fc19e37b
5449f9a824ac76e0044cba0f9bcf4a65ffbee901
0e07d36c8adf9ad2ac114e6b1005f0db267be3be
refs/heads/master
2020-05-19T03:45:17.226504
2019-05-01T17:00:03
2019-05-01T17:00:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7155963182449341, "alphanum_fraction": 0.7431192398071289, "avg_line_length": 26.25, "blob_id": "82d671577be7692e68aa6130c8d3c345b994540d", "content_id": "9b7892fcf23ec4ec98c9e3d4ceaced1ecb3e26d1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "permissive", "max_line_length": 53, "num_lines": 4, "path": "/pyquil/__init__.py", "repo_name": "kevin11h/pyquil", "src_encoding": "UTF-8", "text": "__version__ = \"2.7.1\"\n\nfrom pyquil.quil import Program\nfrom pyquil.api import list_quantum_computers, get_qc\n" } ]
1
dimitri98713/Mytest
https://github.com/dimitri98713/Mytest
0009fdd1100ed50c1e862b102a01aef6924a5674
00f228ef7de909f9e02bdcdce19d2feaf8aa2d4d
06bbdb7dea3ef28c5bd03614e5fd721c9e6eecaf
refs/heads/master
2020-06-03T15:53:10.254687
2019-06-17T23:59:18
2019-06-17T23:59:18
191,637,981
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.5376344323158264, "avg_line_length": 10.25, "blob_id": "6f7b17e85278b8686773449dd74bec593284cc21", "content_id": "38060bf1fc384cea88940ab2656d90b555a5332c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 15, "num_lines": 8, "path": "/point.py", "repo_name": "dimitri98713/Mytest", "src_encoding": "UTF-8", "text": "def point(x,y):\n\treturn [x,y]\n\t\n\tdef getx(p):\n\t\treturn p[0]\n\t\n\tdef gety(p):\n\t\treturn p[1]\n\n\n\n" }, { "alpha_fraction": 0.5744680762290955, "alphanum_fraction": 0.6170212626457214, "avg_line_length": 18.85714340209961, "blob_id": "2bbc5fcbdd9eda9faceb1668dc4d61f4be81be88", "content_id": "dc1508a58d37d4a4f9776b4f21064a255eded847", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 141, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/test_point_creation.py", "repo_name": "dimitri98713/Mytest", "src_encoding": "UTF-8", "text": "from point import *\ndef test_point_creation():\n\tp = point(22,7)\n\t\n\tdef test_access_x_and_y():\n\t\tassert 22 == getx(p)\n\t\tassert 7 == gety(p)\n\n\n" } ]
2
kartikeya532001/GeeksForGeeks
https://github.com/kartikeya532001/GeeksForGeeks
b9d6e37a4932b210055c034ee7e349620eb910c2
371f9ecea5c1c23eee576929142b47c487e20612
b651972878e196737245f94d60dd5b99d65c19d9
refs/heads/main
2023-02-17T21:40:12.629298
2021-01-20T19:06:45
2021-01-20T19:06:45
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.35743802785873413, "alphanum_fraction": 0.37396693229675293, "avg_line_length": 15.689655303955078, "blob_id": "3fa2358b7811df0dab261c9e7271596e5ed9f71f", "content_id": "1fa94db57d90fd9d3fee5828a2bb58905decccb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 484, "license_type": "no_license", "max_line_length": 39, "num_lines": 29, "path": "/Ascii_Sum.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include<iostream>\nusing namespace std;\nint main()\n{\n int tcase;\n cin>>tcase;\n for(int i =0;i<tcase;i++){\n string s;\n int sum=0,pro=0;\n cin>>s;\n for(int j=0;j<s.length();j++){\n sum=sum+int(s[j]);\n }\n for(int i=0;i<sizeof(arr);i++){\n\n if(arr[i]==sum){\n cout<<\"1\\n\";\n break;\n }\n else{\n cout<<\"0\\n\";\n break;\n }\n }\n }\n\n\nreturn 0;\n}\n" }, { "alpha_fraction": 0.3513985872268677, "alphanum_fraction": 0.38111889362335205, "avg_line_length": 13.666666984558105, "blob_id": "191555a5b5e39edb540e6010cc7bc0111c81c23d", "content_id": "5e6935825e65844c044f465c30c865fe3040c9a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 572, "license_type": "no_license", "max_line_length": 44, "num_lines": 39, "path": "/Merge_Strings.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include<iostream>\nusing namespace std;\nint main()\n{\n int tcase ;\n cin>>tcase;\n for(int i=0;i<tcase;i++){\n string str1,str2;\n cin>>str1>>str2;\n int len=str1.length()+str2.length();\n char arr[len];\n for(int k=0;k<len;k++){\n\n\n if(k%2==0){\n\n arr[k]=str1[k];\n }\n\n }\n for(int k=0;k<len;k++){\n\n\n if(k%2!=0){\n\n arr[k]=str2[k];\n }\n\n }\n for(int l=0;l<len;l++){\n cout<<arr[l];\n }\n cout<<\"\\n\";\n\n }\n\n\nreturn 0;\n}\n" }, { "alpha_fraction": 0.5395683646202087, "alphanum_fraction": 0.5539568066596985, "avg_line_length": 9.692307472229004, "blob_id": "13eea3bf1842d938dd4333038b2b9139a7ca9367", "content_id": "96330fc958afbc9758c4b1d0c9baee3a04492b9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 139, "license_type": "no_license", "max_line_length": 28, "num_lines": 13, "path": "/Rm_Digit.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include<iostream>\nusing namespace std;\nint main()\n{\n int tcase;\n cin>>tcase;\n for(int i=0;i<tcase;i++)\n {\n\n }\n\nreturn 0;\n}\n" }, { "alpha_fraction": 0.5429141521453857, "alphanum_fraction": 0.5489022135734558, "avg_line_length": 16.275861740112305, "blob_id": "d4c8c32b4ee1a3177ff6d5be5198ce7426f1cf64", "content_id": "a54ae118cb968205c0c2423e335a1d73d9cb9de4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 501, "license_type": "no_license", "max_line_length": 47, "num_lines": 29, "path": "/Nth_term.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "// { Driver Code Starts\n// Initial Template for C++\n#include <bits/stdc++.h>\nusing namespace std;\n\n // } Driver Code Ends\n\n\n// User function Template for C++\nclass Solution {\n public:\n long long int nthOfSeries(long long int n){\n return 8*n*n+1;\n }\n};\n\n// { Driver Code Starts.\nint main() {\n int t;\n cin >> t;\n while (t--) {\n long long int n;\n cin >> n;\n Solution ob;\n cout << ob.nthOfSeries(n) << endl;\n }\n return 0;\n}\n // } Driver Code Ends\n" }, { "alpha_fraction": 0.30886074900627136, "alphanum_fraction": 0.32658228278160095, "avg_line_length": 21.571428298950195, "blob_id": "6a91860be9dc4281ff957ddcdced622ae0cc1b08", "content_id": "ccb50755db8b733b33a8e35ab9bcd001c4e95bf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 790, "license_type": "no_license", "max_line_length": 54, "num_lines": 35, "path": "/Smallest2nd.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include <iostream>\nusing namespace std;\n\nint main()\n{\n int len,temp,tcase;\n cin>>tcase;\n for(int i =0;i<tcase;i++){\n cin>>len;\n int arr[len];\n for(int i=0;i<len;i++){for(int i=0;i<len;i++){\n for(int j=i+1;j<len;j++){\n if(arr[i]>arr[j]){\n temp=arr[i];\n arr[i]=arr[j];\n arr[j]=temp;\n\n }\n }\n }\n cin>>arr[i];\n }\n\n for(int k=0;k<len;k++){\n if(len<2||arr[0]==arr[k]){\n cout<<\"-1\\n\";\n }else if(arr[0]==arr[k]){\n cout<<arr[0]<<\" \"<<arr[k+1]<<\"\\n\";\n }else{\n cout<<arr[0]<<\" \"<<arr[1]<<\"\\n\";\n }\n }\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.49744898080825806, "alphanum_fraction": 0.5076530575752258, "avg_line_length": 16.863636016845703, "blob_id": "e71fdbd95271ee04499ffad3a34530a4192022be", "content_id": "42bf1643079e65cb96a6ec4da73c927bb6846f25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "no_license", "max_line_length": 35, "num_lines": 22, "path": "/Fav_Num.py", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#User function Template for python3\nclass Solution:\n def isValid (self,N):\n if N%5==0:\n return \"YES\"\n return \"NO\" \n\n\n\n#{ \n# Driver Code Starts\n#Initial Template for Python 3\nif __name__ == '__main__': \n t = int (input ())\n for _ in range (t):\n \n N=int(input())\n \n\n ob = Solution()\n print(ob.isValid(N))\n# } Driver Code Ends" }, { "alpha_fraction": 0.4472222328186035, "alphanum_fraction": 0.4611110985279083, "avg_line_length": 17, "blob_id": "6883d02e1dceb38e4e4f1eb6ab1acdc724e5967c", "content_id": "c9b3516850ab237cdd267eb5214b6645cd7ca390", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 360, "license_type": "no_license", "max_line_length": 41, "num_lines": 20, "path": "/Uppercase.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<cctype>\nusing namespace std;\nint main()\n{\n string word;\n int tcase;\n cin>>tcase;\n for(int i=0;i<tcase;i++){\n int count=0;\n cin>>word;\n for(int j=0;j<word.length();j++){\n if(isupper(word[j])){\n count++;\n }\n }\n cout<<count+1<<\"\\n\";\n }\nreturn 0;\n}\n" }, { "alpha_fraction": 0.41435185074806213, "alphanum_fraction": 0.42592594027519226, "avg_line_length": 18.636363983154297, "blob_id": "4bf19415a88a5a671edbf193dd1ced75112b6649", "content_id": "3fa9f7a1df2d7e39c92572c3bb8311b4558b2b11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 432, "license_type": "no_license", "max_line_length": 47, "num_lines": 22, "path": "/Distinct_Char.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<cctype>\nusing namespace std;\nint main()\n{\n string word;\n int tcase;\n cin>>tcase;\n for(int i=0;i<tcase;i++){\n int count=0;\n cin>>word;\n for(int j=0;j<word.length();j++){\n for(int k=j+1;k<word.length();k++){\n if(word[j]==word[k]){\n count++;\n }\n }\n cout<<count<<\"\\n\";\n }\n }\nreturn 0;\n}\n" }, { "alpha_fraction": 0.39262819290161133, "alphanum_fraction": 0.3990384638309479, "avg_line_length": 11.979166984558105, "blob_id": "845362967def123aac50afc19aef05dbd121bbc9", "content_id": "de1953e8d60de4381097e9f396737d46bf8faa14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 624, "license_type": "no_license", "max_line_length": 35, "num_lines": 48, "path": "/Choclate.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "\n// { Driver Code Starts\n\n#include<bits/stdc++.h>\nusing namespace std;\n\n\nint chocolates(int arr[], int n);\n\n\nint main()\n{\n\n int t;cin>> t;\n while(t--)\n {\n int n;\n cin >> n;\n int arr[n];\n\n for(int i=0;i<n;i++)\n cin>>arr[i];\n\n\n cout << chocolates(arr, n);\n cout << endl;\n\n }\n\n}\n\n\n\nint chocolates(int arr[], int n)\n{\n int temp;\n for(int i=0;i<n;i++){\n for(int j=i+1;j<n;j++){\n if(arr[i]>arr[j]){\n temp=arr[i];\n arr[i]=arr[j];\n arr[j]=temp;\n\n }\n }\n }\n return arr[0];\n\n}\n" }, { "alpha_fraction": 0.5056179761886597, "alphanum_fraction": 0.5101123452186584, "avg_line_length": 14.344827651977539, "blob_id": "46664190cfb73e873bb3b34d8637901ca5be807c", "content_id": "c8984cb1ba41dc3b313adb5ea401aeb2c4ac6ff5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 445, "license_type": "no_license", "max_line_length": 46, "num_lines": 29, "path": "/Max_Money.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "// { Driver Code Starts\n#include <bits/stdc++.h>\nusing namespace std;\n\n // } Driver Code Ends\n\n\nclass Solution {\n public:\n int maximizeMoney(int N , int K) {\n int rem=N/2;\n return rem*K;\n }\n};\n\n// { Driver Code Starts.\nint main() {\n int t;\n cin >> t;\n while (t--) {\n int N,K;\n\n cin>>N>>K;\n\n Solution ob;\n cout << ob.maximizeMoney(N,K) << endl;\n }\n return 0;\n} // } Driver Code Ends\n" }, { "alpha_fraction": 0.4560810923576355, "alphanum_fraction": 0.47297295928001404, "avg_line_length": 14.578947067260742, "blob_id": "0be56c082436ded95994f8b326e10f73a40a6b72", "content_id": "7f4049e0d1c0ac397ae3152c1517375ffbcbb5f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 296, "license_type": "no_license", "max_line_length": 31, "num_lines": 19, "path": "/Reverse_Array.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include <iostream>\nusing namespace std;\n\nint main() {\n\tint tcase;\n\tcin>>tcase;\n\tfor(int i=0;i<tcase;i++){\n\t int len;\n\t cin>>len;\n\t int arr[len];\n\t for(int j=0;j<len;j++){\n\t cin>>arr[j];\n\t }\n\t for(int k=len-1;k>=0;k--){\n\t cout<<arr[k]<<\" \";\n\t }\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.4452347159385681, "alphanum_fraction": 0.452347069978714, "avg_line_length": 16.14634132385254, "blob_id": "07ed58181576571c9bda0dab8b689c54f19153ac", "content_id": "84fb881d7dade8a7917f908e8bb766e148766e5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 703, "license_type": "no_license", "max_line_length": 42, "num_lines": 41, "path": "/Deficient_Num.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "// { Driver Code Starts\n// Initial Template for C++\n#include <bits/stdc++.h>\nusing namespace std;\n\n // } Driver Code Ends\n\n\n// User function Template for C++\nclass Solution {\n public:\n string isDeficient(long long int x) {\n int sum=0;\n for(int i=1;i<=x;i++){\n if(x%i==0){\n\n sum=sum+i;\n }\n }\n if(sum>2*x){\n return \"NO\";\n }\n else{\n return \"YES\";\n }\n }\n};\n\n// { Driver Code Starts.\nint main() {\n int t;\n cin >> t;\n while (t--) {\n long long int x;\n cin >> x;\n Solution ob;\n cout << ob.isDeficient(x) << endl;\n }\n return 0;\n}\n // } Driver Code Ends\n" }, { "alpha_fraction": 0.3390558063983917, "alphanum_fraction": 0.3476394712924957, "avg_line_length": 19.2608699798584, "blob_id": "95071b25983fc2636274966eefbca965b780346a", "content_id": "87ca7908edd3ce9e50241b33e27127027b9d2e57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 466, "license_type": "no_license", "max_line_length": 44, "num_lines": 23, "path": "/Sort_Strings.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include<iostream>\nusing namespace std;\nint main(){\n int tcase,len;\n cin>>tcase;\n for(int i=0;i<tcase;i++){\n char ch;\n string s;\n cin>>s;\n for(int j=0;j<s.length();j++){\n for(int k=j+1;k<s.length();k++){\n if(int(s[j])>int(s[k])){\n ch=s[j];\n s[j]=s[k];\n s[k]=ch;\n }\n }\n }\n cout<<s<<\"\\n\";\n\n }\nreturn 0;\n}\n" }, { "alpha_fraction": 0.4417862892150879, "alphanum_fraction": 0.4481658637523651, "avg_line_length": 15.076923370361328, "blob_id": "a73ac587e8cf3f3ee6dbe52c26fb76d18807eca8", "content_id": "2294ab81229fcec4f465f273bbb87f2443bc8105", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 627, "license_type": "no_license", "max_line_length": 39, "num_lines": 39, "path": "/Sum_Divisor.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "// { Driver Code Starts\n#include<bits/stdc++.h>\nusing namespace std;\n\n // } Driver Code Ends\n\n\n//User function Template for C++\nclass Solution\n{\npublic:\n int sumOfDivisors(int N)\n {\n int sum=0;\n for(int i=1;i<=N;i++){\n if(N%i==0){\n cout<<i<<\"\\n\";\n sum=sum+i+N;\n }\n }\n return sum;\n }\n};\n\n// { Driver Code Starts.\nint main()\n{\n int t;\n cin >> t;\n while (t--)\n {\n int N;\n cin>>N;\n Solution ob;\n int ans = ob.sumOfDivisors(N);\n cout<<ans<<endl;\n }\n return 0;\n} // } Driver Code Ends\n" }, { "alpha_fraction": 0.30038022994995117, "alphanum_fraction": 0.32129278779029846, "avg_line_length": 16.53333282470703, "blob_id": "b5ffce9c57b509177f295dc3a10a2b2c077bbef6", "content_id": "9278b5111cf62f152a8c829be509dcde318ce5ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 526, "license_type": "no_license", "max_line_length": 32, "num_lines": 30, "path": "/StutuProb.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nusing namespace std;\nint main(){\n int tcase;\n cin>>tcase;\n for(int i=0;i<tcase;i++){\n long long n;\n cin>>n;\n int num;\n for(int j=0;j<1000;j++){\n int sum=j*(j+1)/2;\n if(n==sum){\n num=j;\n break;\n }else{\n num=-1;\n }\n\n }\n if(num!=-1){\n cout<<num<<\"\\n\";\n }\n else{\n cout<<num<<\"\\n\";\n }\n }\n\nreturn 0;\n}\n" }, { "alpha_fraction": 0.39411765336990356, "alphanum_fraction": 0.42235293984413147, "avg_line_length": 12.492063522338867, "blob_id": "cdeaf2ebb45d375156b8d43814884e639ffc4396", "content_id": "c7f63e4c733c216ae80a567628d02e2fe5fd74d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 850, "license_type": "no_license", "max_line_length": 48, "num_lines": 63, "path": "/Merge_array.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "// { Driver Code Starts\n#include <bits/stdc++.h>\nusing namespace std;\n\n\n // } Driver Code Ends\n\n\nvoid merge(int arr1[], int arr2[], int n, int m)\n{\n int k=m+n;\n int arr3[k];\n for(int i=0;i<n;i++){\n arr3[i]=arr1[i];\n }\n for(int j=0;j<m;j++){\n arr3[j+m]=arr2[j];\n }\n for(int i=0;i<k;i++){\n cout<<arr3[i]<<\" \";\n }\n\n\n}\n\n\n// { Driver Code Starts.\n\nint main()\n{\n\n\tint T;\n\tcin >> T;\n\n\twhile(T--){\n\t int n, m;\n\t cin >> n >> m;\n\n\t int arr1[n], arr2[m];\n\n\t for(int i = 0;i<n;i++){\n\t cin >> arr1[i];\n\t }\n\n\t for(int i = 0;i<m;i++){\n\t cin >> arr2[i];\n\t }\n\n\t merge(arr1, arr2, n, m);\n\n for (int i = 0; i < n; i++)\n printf(\"%d \", arr1[i]);\n\n\n\t for (int i = 0; i < m; i++)\n\t\t printf(\"%d \", arr2[i]);\n\n\t cout<<endl;\n\t}\n\n\treturn 0;\n}\n // } Driver Code Ends\n" }, { "alpha_fraction": 0.3426573574542999, "alphanum_fraction": 0.3729603588581085, "avg_line_length": 18.5, "blob_id": "cc6a6503e7cecd5dbfbf1efa77f68f6042814036", "content_id": "5b26f2316b15428e965f262b74ad56d091e73ae0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 429, "license_type": "no_license", "max_line_length": 36, "num_lines": 22, "path": "/Duplicate.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include<iostream>\nusing namespace std;\nint main()\n{\n int tcase;\n cin>>tcase;\n for(int i=0;i<tcase;i++){\n int arr[10],temp;\n for(int j=0;j<10;j++){\n cin>>arr[j];\n }\n for(int l=0;l<10;l++){\n for(int k=l+1;k<10;k++){\n if(arr[l]==arr[k]){\n temp=arr[l];\n }\n }\n }\n cout<<temp<<\"\\n\";\n }\nreturn 0;\n}\n" }, { "alpha_fraction": 0.37781110405921936, "alphanum_fraction": 0.4002998471260071, "avg_line_length": 13.800000190734863, "blob_id": "d3b1ca77f495969cfa334874424d52d69654da4c", "content_id": "0b84548d8726a6a500a57080a10e01004cc68c71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 667, "license_type": "no_license", "max_line_length": 42, "num_lines": 45, "path": "/Reverse_Coding.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "\n// { Driver Code Starts\n\n#include <bits/stdc++.h>\nusing namespace std;\n\n // } Driver Code Ends\n\n\n\nclass Solution {\n public:\n int revCoding(int n, int m) {\n int t=n/2;\n float m1;\n if(n%2==0){\n m1=n*t+t;\n }\n else{\n m1=n*(t+0.5)+(t+0.5);\n }\n\n cout<<m1<<\"\\n\";\n if(m1==m){\n return 1;\n }\n else{\n return 0;\n }\n }\n};\n\n// { Driver Code Starts.\nint main() {\n int t;\n cin >> t;\n while (t--) {\n int n,m;\n\n cin>>n>>m;\n\n Solution ob;\n cout << ob.revCoding(n,m) << endl;\n }\n return 0;\n} // } Driver Code Ends\n" }, { "alpha_fraction": 0.33931776881217957, "alphanum_fraction": 0.3518851101398468, "avg_line_length": 19.629629135131836, "blob_id": "66c0b0e0fa48c7933029566f3fa70de8bf6c4a62", "content_id": "88bf3e788abd5cba4311341fcf4ef3aecf0b5efd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 557, "license_type": "no_license", "max_line_length": 44, "num_lines": 27, "path": "/Max_Min.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include <iostream>\nusing namespace std;\n\nint main()\n{\n int len,temp,tcase;\n cin>>tcase;\n for(int i =0;i<tcase;i++){\n cin>>len;\n int arr[len];\n for(int i=0;i<len;i++){\n cin>>arr[i];\n }\n for(int i=0;i<len;i++){\n for(int j=i+1;j<len;j++){\n if(arr[i]>arr[j]){\n temp=arr[i];\n arr[i]=arr[j];\n arr[j]=temp;\n\n }\n }\n }\n cout<<arr[0]<<\" \"<<arr[len-1]<<\"\\n\";\n }\nreturn 0;\n}\n" }, { "alpha_fraction": 0.5322580933570862, "alphanum_fraction": 0.5645161271095276, "avg_line_length": 12.777777671813965, "blob_id": "a09f4460a5b04c2b1acb3df6e543086bbda11815", "content_id": "f6f4fe98ed96b98547a780e68374637a1bf77a6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 124, "license_type": "no_license", "max_line_length": 20, "num_lines": 9, "path": "/test.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include<iostream>\nusing namespace std;\nint main(){\n int n;\n cin>>n;\n double m1=n%2;\n cout<<m1;\nreturn 0;\n}\n" }, { "alpha_fraction": 0.3307839334011078, "alphanum_fraction": 0.3479923605918884, "avg_line_length": 17.64285659790039, "blob_id": "851ea10c68eef1f7f7fefb820d402be5cde19d02", "content_id": "2a8ef82b32aad2549c15a96d99a26820748cba18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 523, "license_type": "no_license", "max_line_length": 80, "num_lines": 28, "path": "/Good_Bad_str.cpp", "repo_name": "kartikeya532001/GeeksForGeeks", "src_encoding": "UTF-8", "text": "#include <iostream>\nusing namespace std;\n\nint main()\n{\n int tcase;\n cin>>tcase;\n for(int i =0;i<tcase;i++){\n string str;\n int vl=0,con=0;\n cin>>str;\n for(int i=0;i<str.length();i++){\n if(str[i]=='a'||str[i]=='e'||str[i]=='i'||str[i]=='o'||str[i]=='u'){\n vl++;\n }\n else{\n con++;\n }\n }\n if(con==3||vl>5){\n cout<<\"0\\n\";\n }else{\n cout<<\"1\\n\";\n }\n\n }\nreturn 0;\n}\n\n" } ]
21
Khairajani/Virtual-Keyboard-Using-Opencv-
https://github.com/Khairajani/Virtual-Keyboard-Using-Opencv-
c91b07462ad8c731c26f34ee33925b9f627e80c5
52008d5d38c3cf8c6a387b159362c544258539b2
79119eef9cb61b58391c8d1ccc3cd2c0cf0e280c
refs/heads/master
2022-12-21T01:21:08.585028
2020-07-27T10:48:15
2020-07-27T10:48:15
204,728,559
0
1
null
2019-08-27T15:04:52
2020-07-27T10:48:18
2020-07-27T10:48:16
Python
[ { "alpha_fraction": 0.8194444179534912, "alphanum_fraction": 0.8194444179534912, "avg_line_length": 106.5, "blob_id": "09b47d036f0c443bcc0a944d76ea489f74d7904d", "content_id": "b07e6ee42bbc0bc58678ef119c8e28af3ae8255c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 216, "license_type": "no_license", "max_line_length": 181, "num_lines": 2, "path": "/README.md", "repo_name": "Khairajani/Virtual-Keyboard-Using-Opencv-", "src_encoding": "UTF-8", "text": "# Virtual-Keyboard-Using-Opencv-\n- using webcam to capture frames and creating a virtual keyboard using opencv and displaying it on computer screen to control keyboard operation through detected object (blue-color) \n" }, { "alpha_fraction": 0.4466778337955475, "alphanum_fraction": 0.5312674641609192, "avg_line_length": 27.138212203979492, "blob_id": "1750abb8502b9d3d1e1931609fb25d52bfcc8da4", "content_id": "8b0fb1efe8cdcd690025f869085af10d79f854b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3582, "license_type": "no_license", "max_line_length": 144, "num_lines": 123, "path": "/Virtual_Keyboard2.0.py", "repo_name": "Khairajani/Virtual-Keyboard-Using-Opencv-", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport cv2\r\nimport wx\r\nfrom playsound import playsound\r\n\r\nxs={}\r\nprev=\"He\"\r\n\r\n# Blue\r\nlower_b = np.array([100,120,100])\r\nupper_b = np.array([150,255,255])\r\n\r\n# Keyboard Settings\r\nkeyboard = np.zeros((450,750,3),np.uint8)\r\n\r\nkeys_set3={0:\"Q\",1:\"W\",2:\"E\",3:\"R\",4:\"T\",\r\n 5:\"Y\",6:\"U\",7:\"I\",8:\"O\",9:\"P\",\r\n 10:\"A\",11:\"S\",12:\"D\",13:\"F\",14:\"G\",\r\n 15:\"H\",16:\"J\",17:\"K\",18:\"L\",19:\"' '\",\r\n 20:\"Z\",21:\"X\",22:\"C\",23:\"V\",24:\" \",\r\n 25:\"B\",26:\"N\",27:\"M\",28:\",\",29:\"cl\"\r\n }\r\n\r\napp =wx.App(False)\r\n(sx,sy) =wx.DisplaySize()\r\n\r\nkernelOpen = np.ones((4,4))\r\nkernelClose = np.ones((18,18))\r\n\r\ndef letter(letter_index,text):\r\n\r\n # Keys\r\n x=(letter_index%10)*75\r\n y=int(letter_index/10)*75\r\n\r\n xs[x,y]=text\r\n\r\n height,width=75,75\r\n th= 3 #thickness\r\n cv2.rectangle(img, (x+th,y+th), (x+width-th,y+height-th),(100,255,255),th)\r\n\r\n # Text-settings\r\n font_scale=4\r\n font_th =3\r\n font_letter = cv2.FONT_HERSHEY_PLAIN\r\n text_size =cv2.getTextSize(text,font_letter,font_scale,font_th)[0]\r\n width_text, height_text = text_size[0],text_size[1]\r\n\r\n text_x= int((width-width_text)/2) +x\r\n text_y = int((height+height_text)/2) +y\r\n\r\n cv2.putText(img,text,(text_x,text_y),font_letter,font_scale,(100,255,255),font_th)\r\n\r\ncam = cv2.VideoCapture(0)\r\nframe_count=0\r\npos=0\r\nwhile True:\r\n _,img = cam.read()\r\n img= cv2.resize(img,(800,600))\r\n img = cv2.flip( img, 1)\r\n\r\n # Letters\r\n for i in range(30):\r\n letter(i,keys_set3[i])\r\n\r\n imgHsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n mask = cv2.inRange(imgHsv,lower_b,upper_b)\r\n\r\n # Morphology\r\n maskOpen =cv2.morphologyEx(mask, cv2.MORPH_OPEN,kernelOpen)\r\n maskClose =cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE,kernelClose)\r\n\r\n conts,h = cv2.findContours(maskClose.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\r\n\r\n if(len(conts)==1):\r\n cv2.drawContours(img,conts,-1,(255,0,0),3)\r\n x1,y1,w1,h1 = cv2.boundingRect(conts[0])\r\n\r\n cv2.rectangle(img,(x1,y1),(x1+w1,y1+h1),(255,0,0),2)\r\n height,width=75,75\r\n th= 3 #thickness\r\n\r\n if int(x1/width) <=9 and int(y1/height) <=2:\r\n curr=keys_set3[int(x1/width)+int(y1/height)*10]\r\n cv2.rectangle(img, (int(x1/width)*75+th,int(y1/height)*75+th), (int(x1/width)*75+width-th,int(y1/height)*75+height-th),(0,0,255),th)\r\n\r\n if frame_count ==12:\r\n cv2.rectangle(img, (int(x1/width)*75+th,int(y1/height)*75+th), (int(x1/width)*75+width-th,int(y1/height)*75+height-th),(-1),th)\r\n playsound('sound.wav')\r\n frame_count=0\r\n\r\n if curr=='cl':\r\n keyboard = np.zeros((450,750,3),np.uint8)\r\n #cv2.putText(keyboard, curr, (pos,100), cv2.FONT_HERSHEY_PLAIN, 5, (255, 255, 255),3, 4)\r\n pos=0\r\n\r\n elif curr=='I':\r\n cv2.putText(keyboard,curr, (pos,100), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255),3, 2)\r\n pos+=20\r\n\r\n else:\r\n cv2.putText(keyboard,curr, (pos,100), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255),3, 2)\r\n pos+=30\r\n if(curr!=prev):\r\n\r\n frame_count=0\r\n prev=curr\r\n else:\r\n frame_count+=1\r\n else:\r\n prev=\"He\"\r\n\r\n\r\n cv2.imshow('virtual', img)\r\n cv2.imshow('board',keyboard)\r\n\r\n\r\n key=cv2.waitKey(1)\r\n if key==27:\r\n break\r\n\r\ncam.release()\r\ncv2.destroyAllWindows()" } ]
2
elxopet/sepaxml
https://github.com/elxopet/sepaxml
831307f989c6e5b4dc7d8c7fd714e6ea91d90bf4
6666c790fdb509009a5f5c30554adc0fe8307719
ff1cf1278801effd3732be536662116032f97712
refs/heads/master
2021-01-10T12:36:00.999671
2015-10-06T18:25:19
2015-10-06T18:25:19
43,765,856
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6279621124267578, "alphanum_fraction": 0.6575829386711121, "avg_line_length": 34.31937026977539, "blob_id": "a2addc7d5319513a35f178d721147987c8bc2452", "content_id": "9a0a879ce425179bb2a537cf7db9fa06cdb73ed0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6753, "license_type": "no_license", "max_line_length": 79, "num_lines": 191, "path": "/sepaxml.py", "repo_name": "elxopet/sepaxml", "src_encoding": "UTF-8", "text": "########################################################################\n# Author: Antonio Vila Juan\n# Update: 06/10/2015\n#\n# Parseador de Remesas SEPA en texto plano a XML\n# Lee le fichero generado con cualquier programa de gestión en texto \n# plano.\n# Genera la estructura para las remesas en formato XML\n# Crea la carpeta tmp dentro del directorio de trabajo\n# Mueve el fichero origen a la carpeta tmp una vez creado el xml.\n#\n########################################################################\n\n\nimport sys,os, os.path, shutil\nfrom lxml import etree\n\n\ndef cargararraycabecera01(t): \n\tcabecera[0] = t[123:158] \t# Identificacion del mensaje\n\tcabecera[1] = t[115:123] \t# Fecha y hora de creacion\n\tcabecera[4] = t[45:115]\t\t# Nombre\n\tcabecera[6] = t[10:26]\t\t# Identificacion\n\t\ndef parseartotal99(t):\n\ta = t[19:27] # Numero de operaciones\n\tcabecera[2] = int(a)\n\ta = t[2:17]\t+ \".\" + t[17:19]\t# Control de suma\n\tcabecera[3] = float(a)\n\t\ndef parsearcabecera01():\n\tGrpHdr = etree.SubElement(CstmrPmtRvsl, \"GrpHdr\")\n\tMsgId = etree.SubElement(GrpHdr, \"MsgId\")\n\tCreDtTm = etree.SubElement(GrpHdr, \"CreDtTm\")\n\tNbOfTxs = etree.SubElement(GrpHdr, \"NbOfTxs\")\n\tCtrlSum = etree.SubElement(GrpHdr, \"CtrlSum\")\n\tInitgPty = etree.SubElement(GrpHdr, \"InitgPty\")\n\tNm = etree.SubElement(InitgPty, \"Nm\")\n\tId = etree.SubElement(InitgPty, \"Id\")\n\tOrgId = etree.SubElement(Id, \"OrgId\")\n\tOthr = etree.SubElement(OrgId, \"Othr\")\n\tId = etree.SubElement(Othr, \"Id\")\n\t\n\tMsgId.text = cabecera[0].strip()\n\tCreDtTm.text = cabecera[1].strip()\n\tNbOfTxs.text = cabecera[2]\n\tCtrlSum.text = cabecera[3].strip()\n\tNm.text = cabecera[4].strip()\n\tId.text = cabecera[6].strip()\n\t\n\t\ndef parseardetalle03(t):\n\tPmtInf = etree.SubElement(CstmrPmtRvsl, \"PmtInf\")\n\tPmtInfId = etree.SubElement(PmtInf, \"PmtInfId\")\n\tPmtMtd = etree.SubElement(PmtInf, \"PmtMtd\")\n\tBtchBookg = etree.SubElement(PmtInf, \"BtchBookg\")\n\tPmtInfId.text = t[10:45].strip()\n\tPmtMtd.text = \"DD\"\n\tBtchBookg.text = \"true\"\n\tPmtTpInf = etree.SubElement(PmtInf, \"PmtTpInf\")\n\tSvcLvl = etree.SubElement(PmtTpInf, \"SvcLvl\")\n\tCd = etree.SubElement(SvcLvl,\"Cd\")\n\tCd.text =\"SEPA\"\n\tLclInstrm = etree.SubElement(PmtTpInf, \"LclInstrm\")\n\tCd = etree.SubElement(LclInstrm, \"Cd\")\n\tCd.text =\"COR1\"\n\tSeqTp = etree.SubElement(PmtTpInf, \"SeqTp\")\n\tSeqTp.text = t[80:84].strip()\n\tReqdColltnDt = etree.SubElement(PmtInf, \"ReqdColltnDt\")\n\tReqdColltnDt.text = t[99:103] + \"-\" + t[103:105] + \"-\" + t[105:107]\n\tCdTr = etree.SubElement(PmtInf, \"CdTr\")\n\tNm = etree.SubElement(CdTr, \"Nm\")\n\tNm.text = t[118:188].strip()\n\tPstlAdr = etree.SubElement(CdTr, \"PstlAdr\")\n\tCtry = etree.SubElement(PstlAdr, \"Ctry\")\n\tCtry.text = t[328:330]\n\tAdrLine = etree.SubElement(PstlAdr, \"AdrLine\")\n\tAdrLine.text = t[188:238].strip()\n\tCdtrAcct = etree.SubElement(PmtInf, \"CdtrAcct\")\n\tId = etree.SubElement(CdtrAcct, \"Id\")\n\tIBAN = etree.SubElement(Id, \"IBAN\")\n\tIBAN.text = t[403:437].strip()\n\tCcy = etree.SubElement(CdtrAcct, \"Ccy\")\n\tCcy.text = \"EUR\"\n\tCdtrAgt = etree.SubElement(PmtInf, \"CdtrAgt\")\n\tFinInstnId = etree.SubElement(CdtrAgt, \"FinInstnId\")\n\tBIC = etree.SubElement(FinInstnId, \"BIC\")\n\tBIC.text = \"NOTPROVIDED\"\n\tChrgBr = etree.SubElement(PmtInf, \"ChrgBr\")\n\tChrgBr.text = \"SLEV\"\n\tCdtrSchmeId = etree.SubElement(PmtInf, \"CdtrSchmeId\")\n\tId = etree.SubElement(CdtrSchmeId, \"Id\")\n\tPrvtId = etree.SubElement(Id, \"PrvtId\")\n\tOthr = etree.SubElement(PrvtId, \"Othr\")\n\tId = etree.SubElement(Othr, \"Id\")\n\tId.text = cabecera[6].strip()\n\tSchmeNm = etree.SubElement(Othr, \"SchmeNm\")\n\tPrtry = etree.SubElement(SchmeNm, \"Prtry\")\n\tPrtry.text = \"SEPA\"\n\tDrctDbtTxInf = etree.SubElement(PmtInf, \"DrctDbtTxInf\")\n\tPmtId = etree.SubElement(DrctDbtTxInf, \"PmtId\")\n\tInstrId = etree.SubElement(PmtId, \"InstrId\")\n\tInstrId.text = t[26:40] + \"-\" + t[40:45]\n\tEndToEndId = etree.SubElement(PmtId, \"EndToEndId\")\n\tEndToEndId.text = t[10:45].strip()\n\tInstdAmt = etree.SubElement(DrctDbtTxInf, \"InstdAmt\") # Anyadir Ccy = \"EUR\"\n\tInstdAmt.text = t[88:99].strip()\n\tDrctDbtTx = etree.SubElement(DrctDbtTxInf, \"DrctDbtTx\")\n\tMndtRltdInf = etree.SubElement(DrctDbtTx, \"MndtRltdInf\")\n\tMndtId = etree.SubElement(MndtRltdInf,\"MndtId\")\n\tMndtId.text = t[45:54].strip()\n\tDtOfSgntr = etree.SubElement(DrctDbtTx, \"DtOfSgntr\")\n\tDtOfSgntr.text = t[99:103]+\"-\"+t[103:105]+\"-\"+t[105:107]\n\tAmdmntInd = etree.SubElement(DrctDbtTx, \"AmdmntInd\")\n\tAmdmntInd.text = \"false\"\n\tDbtrAgt = etree.SubElement(DrctDbtTxInf, \"DbtrAgt\")\n\tFinInstnId = etree.SubElement(DbtrAgt, \"FinInstnId\")\n\tOthr = etree.SubElement(FinInstnId, \"Othr\")\n\tId = etree.SubElement(Othr, \"Id\")\n\tId.text = \"NOTPROVIDED\"\n\tDbtr = etree.SubElement(DrctDbtTxInf, \"Dbtr\")\n\tNm = etree.SubElement(Dbtr, \"Nm\")\n\tNm.text = t[118:188].strip()\n\tPstlAdr = etree.SubElement(Dbtr, \"PstlAdr\")\n\tCtry = etree.SubElement(PstlAdr, \"Ctry\")\n\tCtry.text = t[328:330].strip()\n\tAdrLine = etree.SubElement(PstlAdr, \"AdrLine\")\n\tAdrLine.text = t[188:238].strip()\n\tDbtrAcct = etree.SubElement(DrctDbtTxInf, \"DbtrAcct\")\n\tId = etree.SubElement(DbtrAcct, \"Id\")\n\tIBAN = etree.SubElement(Id, \"IBAN\")\n\tIBAN.text = t[403:437].strip()\n\tRmtInf = etree.SubElement(DrctDbtTxInf, \"RmtInf\")\n\tUstrd = etree.SubElement(RmtInf, \"Ustrd\")\n\tUstrd.text = t[441:581].strip()\n\t\n# Se carga el nombre del fichero txt\nfichero = sys.argv[1]\n\t\n# Se inicializa el array\ncabecera = [\"\"] * 17\n\n# Se inicializa la variable root que es la que almacenara todo el xml\nroot = etree.Element(\"Document\")\nCstmrPmtRvsl = etree.SubElement(root, \"CstmrPmtRvsl\")\n\n\n\n# Se abre el fichero para leer la primera vez\nf = open(fichero)\nlines = f.readlines()\n\n#------------------------------------------------------------------------------\n# Se leen las lineas una a una la primera vez para cargar la cabecera\n#------------------------------------------------------------------------------\nfor l in lines:\n\tif l[:2] == \"01\":\n\t\tcargararraycabecera01(l)\n\telif l[:2] == '99':\n\t\tparseartotal99(l)\n# Se leen las lineas una a una la primera vez para cargar la cabecera\t\t\n\t\t\n# Se mueven los datos del array a xml\nparsearcabecera01()\t\n# Se abre el fichero para leer la segunda vez\nf = open(\"sepa.txt\")\nlines = f.readlines()\n#------------------------------------------------------------------------------\n# Se leen las lineas una a una la segunda vez para grabar el detalle\n#------------------------------------------------------------------------------\nfor l in lines:\n\tif l[:2] == '03':\n\t\tparseardetalle03(l)\n# Se leen las lineas una a una la segunda vez para grabar el detalle\t\t\n\t\t\n# Se graba el fichero de salida xml\n\nif (os.path.isdir(\"tmp\")):\n\tprint \"exite tmp\"\nelse:\n\tprint \"no existe el directorio tmp\"\n\tos.mkdir(\"tmp\")\n\ntree = etree.ElementTree(root)\n\n# Se crea el nombre del fichero de salida.\nsalida = fichero[0:len(fichero)-4] + \".xml\"\ntree.write(salida, pretty_print=True)\n\n# Se mueve el fichero txt a tmp\nshutil.move(fichero, \"tmp\"+\"/\"+fichero)\n\n\n\t\n\t\n" } ]
1
viruswood/opencv-learn
https://github.com/viruswood/opencv-learn
f8ce7ccab9019b665eca5a52bae01edc242760cc
4b01f53396d47aef9d77009f22e1008207a73e4f
7cf94cc1b07123bd1128ae2b19f1a6ef03d0928c
refs/heads/master
2022-11-17T08:06:47.118993
2020-07-14T12:38:57
2020-07-14T12:38:57
279,494,194
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5617977380752563, "alphanum_fraction": 0.5898876190185547, "avg_line_length": 15.181818008422852, "blob_id": "32d397dcd9efd7b59a85813c7be135b8b4f8d7d5", "content_id": "f2100204cc058ba0c149a52ed0f3b9d6dcc5a706", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 178, "license_type": "permissive", "max_line_length": 31, "num_lines": 11, "path": "/1图像基本操作/颜色通道提取.py", "repo_name": "viruswood/opencv-learn", "src_encoding": "UTF-8", "text": "import cv2 as cv\n\nimg = cv.imread('data/cat.jpg')\nb, g, r = cv.split(img)\nimg[:, :, 0] = 0\nimg[:, :, 2] = 0\n\n# print(b)\ncv.imshow('cat', img)\ncv.waitKey()\ncv.destroyAllWindows()\n" }, { "alpha_fraction": 0.6328125, "alphanum_fraction": 0.703125, "avg_line_length": 20.5, "blob_id": "6481e1cc550c80e2077c0c92ef0bea0ffd9b1fc3", "content_id": "fba70bd069757ed9f624474d9e603ac60da56454", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 128, "license_type": "permissive", "max_line_length": 31, "num_lines": 6, "path": "/1图像基本操作/截取部分图像.py", "repo_name": "viruswood/opencv-learn", "src_encoding": "UTF-8", "text": "import cv2 as cv\nimg = cv.imread('data/cat.jpg')\ncat = img[0:200,0:200]\ncv.imshow('cat',cat)\ncv.waitKey()\ncv.destroyAllWindows()" }, { "alpha_fraction": 0.7894737124443054, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 18, "blob_id": "5174334f39406d4544bc8e2dcface057576e02b0", "content_id": "a4a80da91c7e2833e8f3eec813514c0f807d74f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 38, "license_type": "permissive", "max_line_length": 22, "num_lines": 2, "path": "/README.md", "repo_name": "viruswood/opencv-learn", "src_encoding": "UTF-8", "text": "# opencv-learn\nlearn opencv by python\n" }, { "alpha_fraction": 0.6398305296897888, "alphanum_fraction": 0.6864407062530518, "avg_line_length": 18.75, "blob_id": "9e77973aeb0cd6ea8943dbf37aa64219ff1fe0b2", "content_id": "f672b1a71db4894b756189249193a1edc0a00cb2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "permissive", "max_line_length": 52, "num_lines": 12, "path": "/1图像基本操作/边界填充.py", "repo_name": "viruswood/opencv-learn", "src_encoding": "UTF-8", "text": "import cv2 as cv\n\ntop, left, bottom, right = (50, 50, 50, 50)\n\nimg = cv.imread('data/cat.jpg', cv.IMREAD_GRAYSCALE)\nprint(img.shape)\n# print(img)\ncv.imshow('img', img)\ncv.imwrite('mycat.png', img)\ncv.waitKey(0)\ncv.destroyAllWindows()\n5-" }, { "alpha_fraction": 0.7150537371635437, "alphanum_fraction": 0.725806474685669, "avg_line_length": 19.66666603088379, "blob_id": "01dc4dfe6345643f5fe602e75a06bc9321295554", "content_id": "140ce7dcb96c40cab438c408ca94df5da17455a2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "permissive", "max_line_length": 52, "num_lines": 9, "path": "/1图像基本操作/第一节.py", "repo_name": "viruswood/opencv-learn", "src_encoding": "UTF-8", "text": "import cv2 as cv\n\nimg = cv.imread('data/cat.jpg', cv.IMREAD_GRAYSCALE)\nprint(img.shape)\nprint(img)\ncv.imshow('img', img)\ncv.imwrite('mycat.png',img)\ncv.waitKey(0)\ncv.destroyAllWindows()\n" }, { "alpha_fraction": 0.5653266310691833, "alphanum_fraction": 0.5854271650314331, "avg_line_length": 21.16666603088379, "blob_id": "7f3a49891cdeae7753e5b483e48238292b88e2bb", "content_id": "6569fcb1ab03a846a6ecd3c257506f76a0ed3fb7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "permissive", "max_line_length": 51, "num_lines": 18, "path": "/1图像基本操作/读取视频.py", "repo_name": "viruswood/opencv-learn", "src_encoding": "UTF-8", "text": "import cv2 as cv\n\nvc = cv.VideoCapture('data/test.mp4')\nif vc.isOpened():\n open, frame = vc.read()\nelse:\n open = False\nwhile open:\n ret, frame = vc.read()\n if frame is None:\n break\n if ret == True:\n gray = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)\n cv.imshow('resut', gray)\n if cv.waitKey(10) & 0xFF == 27:\n break\nvc.release()\ncv.destroyAllWindows()" } ]
6
Elmorew3721/CTI110
https://github.com/Elmorew3721/CTI110
5c608f1f266e91418e117e3d53d85c8b03f17eda
633b9bf7ebe5bee913a51608fce14daf5a8b3346
c5139294e94cefa4899db8988e57efd2e78776fb
refs/heads/main
2023-08-17T00:05:57.593642
2021-09-26T20:53:35
2021-09-26T20:53:35
409,786,641
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5211267471313477, "alphanum_fraction": 0.5492957830429077, "avg_line_length": 13.399999618530273, "blob_id": "38be9854a7c30f8c7974c4647028e65fb55eda09", "content_id": "0163ce80cfe756261b7bda8e44dc121f95baddbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/P1Lab3_Interleved_ElmoreWalker.py", "repo_name": "Elmorew3721/CTI110", "src_encoding": "UTF-8", "text": "print('Enter x: ')\nx = int(5)\n\nprint(x)\nprint('x doubled is:', (2 * x))" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.78125, "avg_line_length": 11.800000190734863, "blob_id": "3c6f830aae088223d7e7fb84cb3ef7a182c5ee8a", "content_id": "ca82eac80e56a6daeb61f8b30f7be700eed0b0ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 64, "license_type": "no_license", "max_line_length": 19, "num_lines": 5, "path": "/README.md", "repo_name": "Elmorew3721/CTI110", "src_encoding": "UTF-8", "text": "# CTI110\nCTI 110 Repository\nCreated for P1Lab 2\nElmore\n09/23/21\n" } ]
2
SashaSinko13/PythonBot
https://github.com/SashaSinko13/PythonBot
46e57f63f461d2aa2ec1bbdc3ce1cb7e8a87884a
59fff07e4533ce99baf6fb572fae082dd496d84f
648262d1b618f884c4b3884a05e7bd635d0456bf
refs/heads/master
2022-10-11T11:34:04.846237
2020-06-13T15:43:06
2020-06-13T15:43:06
254,905,958
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6182815432548523, "alphanum_fraction": 0.6294758915901184, "avg_line_length": 43.41750717163086, "blob_id": "16630fc1590980e43c0049adc7a933c7ec95dc30", "content_id": "d5e16d42ab37cbc04bfde27931680e39b10c31d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14086, "license_type": "no_license", "max_line_length": 142, "num_lines": 297, "path": "/SahsaBot3/src/bot.py", "repo_name": "SashaSinko13/PythonBot", "src_encoding": "UTF-8", "text": "import asyncio\r\nimport logging\r\nimport time\r\nimport aiohttp\r\nimport requests\r\nimport random\r\nfrom aiogram.types import InlineQueryResultArticle, InlineQuery, InputTextMessageContent, inline_keyboard\r\nfrom bs4 import BeautifulSoup\r\nfrom aiogram import Bot, Dispatcher, executor, types, exceptions\r\nfrom selenium.webdriver import Chrome\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\nimport keyboard as kb\r\nimport config as cfg\r\n\r\nlogging.basicConfig(level=logging.DEBUG)\r\nlog = logging.getLogger('broadcast')\r\n\r\n# Initialize bot and dispatcher\r\nloop = asyncio.get_event_loop()\r\nbot = Bot(token=cfg.TOKEN)\r\ndp = Dispatcher(bot)\r\nurl = \"https://likefilmdb.ru/\"\r\n\r\n\r\nasync def get_html(url):\r\n timeout = aiohttp.ClientTimeout(total=30)\r\n ua = 'user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\r\n async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False), headers={'User-Agent': ua},\r\n timeout=timeout) as session:\r\n async with session.request('get', url) as responce:\r\n return await responce.content.read()\r\n\r\n\r\[email protected]_handler(commands=['start'])\r\nasync def welcome(message):\r\n await bot.send_message(message.chat.id,\r\n \"Хеллоу, {0.first_name}!\\n Я - <b> {1} </b>, бот для любителей кино\".format(\r\n message.from_user,\r\n 'Раб (помогите мне, меня заставляют работать бесплатно , именно поэтому я раб)'),\r\n parse_mode='html', reply_markup=kb.main_menu_ru)\r\n\r\n\r\[email protected]_handler(lambda message: message.text == 'Фильмы')\r\nasync def films(message):\r\n await bot.send_message(message.chat.id, 'Ура, вы нашли раздел с <b>Фильмами</b>', parse_mode='html',\r\n reply_markup=kb.film_menu)\r\n\r\n\r\[email protected]_handler(lambda message: message.text == 'Сериалы')\r\nasync def films(message):\r\n await bot.send_message(message.chat.id, 'Ура, вы нашли раздел с <b>Сериалами</b>', parse_mode='html',\r\n reply_markup=kb.series_menu)\r\n\r\n\r\[email protected]_handler(lambda message: message.text == 'Случайный фильм')\r\nasync def films(message):\r\n await bot.send_message(message.chat.id, 'Ура, вы нашли раздел со <b>Случайными фильмами</b>', parse_mode='html',\r\n reply_markup=kb.random_menu)\r\n\r\n\r\[email protected]_handler(lambda message: message.text == 'Случайный сериал')\r\nasync def films(message):\r\n await bot.send_message(message.chat.id, 'Ура, вы нашли раздел со <b>Случайными сериалами</b>', parse_mode='html',\r\n reply_markup=kb.random_menu_series)\r\n\r\n\r\[email protected]_handler(lambda inline_query: inline_query.query.startswith('random_film')) # случайные фильмы\r\nasync def bests_films_year(inline_query: InlineQuery):\r\n result = await get_inline_films_href('/service/movies/rand/')\r\n await bot.answer_inline_query(inline_query.id, results=result, cache_time=10)\r\n\r\n\r\[email protected]_handler(lambda inline_query: inline_query.query.startswith('random_series')) # случайные сериалы\r\nasync def bests_films_year(inline_query: InlineQuery):\r\n result = await get_inline_series_href('/service/tv-series/rand/')\r\n await bot.answer_inline_query(inline_query.id, results=result, cache_time=10)\r\n\r\n\r\[email protected]_handler(lambda inline_query: inline_query.query.startswith('best_films')) # лучшие фильмы\r\nasync def bests_films_year(inline_query: InlineQuery):\r\n result = await get_inline_films_href('/service/movies/best/year/2020/')\r\n await bot.answer_inline_query(inline_query.id, results=result, cache_time=600)\r\n\r\n\r\[email protected]_handler(lambda inline_query: inline_query.query.startswith('best_series')) # лучшие сериалы\r\nasync def bests_series_year(inline_query: InlineQuery):\r\n result = await get_inline_series_href('/service/tv-series/best/year/2020/')\r\n await bot.answer_inline_query(inline_query.id, results=result, cache_time=600)\r\n\r\n\r\[email protected]_handler(lambda inline_query: inline_query.query.startswith('new_films')) # новые фильмы\r\nasync def new_films(inline_query: InlineQuery):\r\n result = await get_inline_films_href('/service/movies/new/')\r\n await bot.answer_inline_query(inline_query.id, results=result, cache_time=600)\r\n\r\n\r\[email protected]_handler(lambda inline_query: inline_query.query.startswith('new_series')) # новые сериалы\r\nasync def new_films(inline_query: InlineQuery):\r\n result = await get_inline_series_href('/service/tv-series/new/')\r\n await bot.answer_inline_query(inline_query.id, results=result, cache_time=600)\r\n\r\n\r\[email protected]_handler(lambda inline_query: inline_query.query.startswith('by-category_')) # категории фильмов\r\nasync def category_films(inline_query: InlineQuery):\r\n href = inline_query.query.split('_')[1]\r\n result = await get_inline_films_href(href)\r\n await bot.answer_inline_query(inline_query.id, results=result, cache_time=600)\r\n\r\n\r\[email protected]_handler(lambda inline_query: inline_query.query.startswith('ser-category_')) # категории сериалов\r\nasync def category_series(inline_query: InlineQuery):\r\n href = inline_query.query.split('_')[1]\r\n result = await get_inline_series_href(href)\r\n await bot.answer_inline_query(inline_query.id, results=result, cache_time=600)\r\n\r\n\r\[email protected]_handler(lambda inline_query: inline_query.query.startswith('similar_')) # похожие фильмы\r\nasync def add_order_to_db_film(inline_query: InlineQuery):\r\n href = inline_query.query.split('_')[1]\r\n result = await get_inline_films_href(href + 'similar/')\r\n await bot.answer_inline_query(inline_query.id, results=result, cache_time=600)\r\n\r\n\r\[email protected]_handler(lambda inline_query: inline_query.query.startswith('pohozhie-serialy_')) # похожие сериалы\r\nasync def add_order_to_db_serial(inline_query: InlineQuery):\r\n href = inline_query.query.split('_')[1]\r\n result = await get_inline_series_href(href + 'pohozhie-serialy/')\r\n await bot.answer_inline_query(inline_query.id, results=result, cache_time=600)\r\n\r\n\r\[email protected]_query_handler(lambda call: call.data.startswith('trailer_')) #\r\nasync def get_trailer(call):\r\n href = call.data.split('_')[1]\r\n soup = BeautifulSoup(requests.get(url + href).text, 'lxml')\r\n tmp = soup.find('div', {'class': 'uiSectionV2Media'})\r\n play_button = tmp.find('a', {'href': '#'})\r\n trailer_link = str(play_button).split(',')[2].split('\\'')[1][2:]\r\n await bot.send_message(call.from_user.id, trailer_link)\r\n\r\n\r\[email protected]_handler(lambda message: message.text == 'Я киноман?')\r\nasync def random_value(message):\r\n number = random.randint(1, 2)\r\n num = random.randint(1, 100)\r\n if number == 1:\r\n await bot.send_message(message.chat.id,\r\n f'Я, как знаток в фильмах, а это знают <b>ВСЕ</b>, '\r\n f'с уверенностью заявляю, что ты киноман на {num} %',\r\n parse_mode='html')\r\n if number == 2:\r\n await bot.send_message(message.chat.id,\r\n 'Я, как знаток в фильмах, а это знают <b>ВСЕ</b>, '\r\n f'с уверенностью заявляю, что киноман из тебя никакой!',\r\n parse_mode='html')\r\n\r\n\r\nasync def get_inline_films_href(href):\r\n soup = BeautifulSoup(requests.get(url + href).text, 'lxml')\r\n best_films = soup.find_all('div', 'uiSectionV8Content')\r\n result = []\r\n counter = 0\r\n for film in best_films:\r\n if counter == 12:\r\n break\r\n film = film.find('a', 'uiH2')\r\n film_href = film.get('href')\r\n try:\r\n item_desctiption, film_image = get_film_content(film_href)\r\n except Exception:\r\n pass\r\n message = InputTextMessageContent(item_desctiption, parse_mode='html')\r\n film_insert = film.next\r\n keyboard = kb.generate_film_keyboard(film_href)\r\n result.append(\r\n InlineQueryResultArticle(\r\n id=str(counter),\r\n title=film_insert,\r\n thumb_url=film_image,\r\n thumb_height=500, thumb_width=500,\r\n input_message_content=message,\r\n reply_markup=keyboard\r\n )\r\n )\r\n counter += 1\r\n return result\r\n\r\n\r\nasync def get_inline_series_href(href):\r\n soup = BeautifulSoup(requests.get(url + href).text, 'lxml')\r\n best_films = soup.find_all('div', 'uiSectionV8Content')\r\n result = []\r\n counter = 0\r\n for film in best_films:\r\n if counter == 12:\r\n break\r\n film = film.find('a', 'uiH2')\r\n film_href = film.get('href')\r\n try:\r\n item_desctiption, film_image = get_film_content(film_href)\r\n except Exception:\r\n pass\r\n message = InputTextMessageContent(item_desctiption, parse_mode='html')\r\n film_insert = film.next\r\n keyboard = kb.generate_series_keyboard(film_href)\r\n result.append(\r\n InlineQueryResultArticle(\r\n id=str(counter),\r\n title=film_insert,\r\n thumb_url=film_image,\r\n thumb_height=500, thumb_width=500,\r\n input_message_content=message,\r\n reply_markup=keyboard\r\n )\r\n )\r\n counter += 1\r\n return result\r\n\r\n\r\[email protected]_query_handler(lambda call: call.data.startswith('categories_'))\r\nasync def get_films_categories(call):\r\n tel_id = call.from_user.id\r\n current_pos = int(call.data.split('_')[1])\r\n href = 'service/movies/what-to-see/'\r\n soup = BeautifulSoup(requests.get(url + href).text, 'lxml')\r\n film_categories = soup.find_all('div', {'class': 'simpleMovie'})\r\n k = inline_keyboard.InlineKeyboardMarkup()\r\n counter = current_pos\r\n while counter <= len(film_categories):\r\n if counter == current_pos + 8 or counter == len(film_categories):\r\n break\r\n category_href = film_categories[counter].find('a').get('href')\r\n category_name = film_categories[counter].find('img').get('alt')\r\n k.add(inline_keyboard.InlineKeyboardButton(str(counter + 1) + ') ' + category_name,\r\n switch_inline_query_current_chat='by-category_{0}'.format(\r\n category_href)))\r\n counter += 1\r\n if current_pos >= 8:\r\n call_data_previous = 'categories_{0}'.format(current_pos - 8)\r\n k.add(inline_keyboard.InlineKeyboardButton('Previous⬅️', callback_data=call_data_previous))\r\n if len(film_categories) > current_pos + 8:\r\n call_data_more = 'categories_{0}'.format(current_pos + 8)\r\n k.add(inline_keyboard.InlineKeyboardButton('Next \\U000027a1', callback_data=call_data_more))\r\n await bot.edit_message_text('Выберите категорию фильмов', tel_id, call.message.message_id, reply_markup=k)\r\n\r\n\r\[email protected]_query_handler(lambda call: call.data.startswith('categories-series'))\r\nasync def get_serial_categories(call):\r\n tel_id = call.from_user.id\r\n current_pos = int(call.data.split('_')[1])\r\n href = 'service/tv-series/what-to-see/'\r\n soup = BeautifulSoup(requests.get(url + href).text, 'lxml')\r\n film_categories = soup.find_all('div', {'class': 'simpleMovie'})\r\n k = inline_keyboard.InlineKeyboardMarkup()\r\n counter = current_pos\r\n while counter <= len(film_categories):\r\n if counter == current_pos + 8 or counter == len(film_categories):\r\n break\r\n category_href = film_categories[counter].find('a').get('href')\r\n category_name = film_categories[counter].find('img').get('alt')\r\n k.add(inline_keyboard.InlineKeyboardButton(str(counter + 1) + ') ' + category_name,\r\n switch_inline_query_current_chat='ser-category_{0}'.format(\r\n category_href)))\r\n counter += 1\r\n if current_pos >= 8:\r\n call_data_previous = 'categories-series_{0}'.format(current_pos - 8)\r\n k.add(inline_keyboard.InlineKeyboardButton('Previous⬅', callback_data=call_data_previous))\r\n if len(film_categories) > current_pos + 8:\r\n call_data_more = 'categories-series_{0}'.format(current_pos + 8)\r\n k.add(inline_keyboard.InlineKeyboardButton('Next \\U000027a1', callback_data=call_data_more))\r\n await bot.edit_message_text('Выберите категорию сериалов', tel_id, call.message.message_id, reply_markup=k)\r\n\r\n\r\ndef get_film_content(href):\r\n link = url + href\r\n soup = BeautifulSoup(requests.get(link).text, 'lxml')\r\n film_title = soup.find('div', {'itemtype': 'http://schema.org/Movie'}).next.next\r\n print(film_title)\r\n text = f'<b>{film_title}</b>\\n\\n'\r\n film_content = soup.find_all('div', 'uiSectionV2Content')[0]\r\n text += 'Cюжет\\n'\r\n text += film_content.find('div', {'itemprop': 'description'}).text\r\n text += '\\n\\n'\r\n film_image = url + soup.find_all('div', 'uiSectionV2Wrapper')[0].find('div', 'uiSectionV2Preview').find('img').get(\r\n 'src')\r\n film_table = soup.find('table', {'class': 'uiStandartVarList'}).find_all('tr')\r\n table_rows = ''\r\n for row in film_table:\r\n table_rows += row.find_all('td')[0].text + ' '\r\n table_rows += row.find_all('td')[1].text + '\\n'\r\n text += table_rows\r\n text += f'''<a href=\"{film_image}\">Photo</a>'''\r\n return text, film_image\r\n\r\n\r\nif __name__ == '__main__':\r\n executor.start_polling(dp)\r\n" }, { "alpha_fraction": 0.801913857460022, "alphanum_fraction": 0.8047847151756287, "avg_line_length": 56.83333206176758, "blob_id": "648124e5a4148bfe582b911f6e46296115f10c29", "content_id": "2105803133aed327305d503417c4e21c9028aa61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1809, "license_type": "no_license", "max_line_length": 393, "num_lines": 18, "path": "/README.md", "repo_name": "SashaSinko13/PythonBot", "src_encoding": "WINDOWS-1251", "text": "# Студентка ИТКН-18-9\n# Синько Александра\n# Проект по Python\n\n## Тема: \nСоздание Telegram бота\n\n## Цель проекта: \nСоздание удобного Telegram бота для подбора подходящих фильмов.\n\n## Подробности: \nПри помощи языка программирования Python я буду разрабатывать бота, который будет собирать данные с определенного сайта, обрабатывать их и предоставлять пользователю в структурированном виде. \n\n## Интерфейс: \nДля самого бота, я планирую разработать интерфейс состоящий из кнопок, при нажатии пользователя на которые, бот будет выполнять определенные команды. Планирую сделать разбиение на различные категории, для более удобного подбора. Возможно, будет добавлен дополнительный функционал, в качестве развлекательного интерактива с пользователем (пример: выпадение случайного звания для любителя кино).\n\n## Технологии:\nPython, Библиотеки: Telebot – для работы с самим ботом, Random – для использования функции rand(), Requests -выполнения HTTP-запросов, BeautifulSoup – для работы с веб-данными, так же возможны и другие дополнения в процессе разработки. \n\n \n" } ]
2
facosta8/MNO-Black-Scholes
https://github.com/facosta8/MNO-Black-Scholes
dac0e82378e55d2ef3e7965306ab59bd43021c8d
eca423a423be505679176d8ea1526b6735c04d7b
fe4f59e486bf2a55f1bd4a7fc14a508b3133e01e
refs/heads/master
2020-04-30T13:18:19.011343
2019-05-30T16:36:10
2019-05-30T16:36:10
176,853,090
0
1
null
2019-03-21T02:19:40
2019-03-21T02:23:24
2019-03-24T01:48:31
null
[ { "alpha_fraction": 0.44009649753570557, "alphanum_fraction": 0.5783972144126892, "avg_line_length": 54.68656539916992, "blob_id": "a74dead85cf5d3630f00502685c19aa382af1f4d", "content_id": "5ab03dfa34b21e66de86e9b9d3a1f0359e6bbf72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7467, "license_type": "no_license", "max_line_length": 996, "num_lines": 134, "path": "/scripts/equation/BS_solution.cpp", "repo_name": "facosta8/MNO-Black-Scholes", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <fstream>\n#include <string>\n#include <vector>\n#include <cstdlib>\n#include <math.h>\n\nusing namespace std;\n\n\n/*Definition of the function*/\ndouble fn(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double x)\n{\n\treturn b0 + b1*x + b2 *x*x + b3 *x*x*x + b4*x*x*x*x + b5*x*x*x*x*x + b6*x*x*x*x*x*x + b7*x*x*x*x*x*x*x + b8*x*x*x*x*x*x*x*x + b9*x*x*x*x*x*x*x*x*x + b10*x*x*x*x*x*x*x*x*x*x;\n}\n\n/* The following lines are for the derivate approximation */\ndouble d1(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double x0,double h)\n{\n return (fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 + h) - fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 -h))/(2*h);\n}\n\ndouble d2(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double x0,double h)\n{\n return ( fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 + h) - 2*fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0) + fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 -h) ) / (h*h);\n}\n\ndouble d3(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double x0,double h){\n\treturn ( fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 + 2*h) - fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 -2*h) - 2*( fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 + h) - fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 - h) ) )/ (2*h*h*h);\n}\n\n/*The 4th approximation is not a good one, evaluate if we need a better one for the project o(h4 )*/\ndouble d4(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double x0,double h){\n\treturn ( fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 + 2*h) - 4*fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 + h) + 6*fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0) - 4*fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 - h) + fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0 - 2*h) )/ (h*h*h*h);\n}\n\n/*Calculate the integrate of the function by trapezoidal rule*/\n\ndouble in(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double t){\n int n = 100,i;\n double h = t/n,sum=0,x0=0;\n for(i = 1;i <= n-1;i++){\n x0= x0 + h;\n sum = sum + fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x0);\n }\n return (t/(2*n))*(fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,0) + fn(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,t) + 2*sum );\n\n}\n\ndouble u0(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double x){\n return fn( b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x);\n}\n\ndouble A0(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double x){\n return (d2( b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01))*d2( b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01);\n}\n\ndouble u1(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double x,double t,double sd, double ro, double r){\n return -1*(-0.5*sd*sd*x*x*d2(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01) + r*x*d1(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)-r)*t + ro*sd*sd*x*x*x*A0(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x)*t;\n}\n\n double A1(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double x,double t,double sd, double ro, double r){\n return 2*d2(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)*(-1*(-0.5*sd*sd*(2*d2(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)+x*x*d4(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)+r*x*d3(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)))*t + (ro*sd*sd*t*6*x*(2*(d3(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)*d3(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)+d4(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)*d2(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01) ))));\n }\n\n double u2(double b0,double b1,double b2,double b3,double b4,double b5,double b6,double b7,double b8,double b9,double b10,double x,double t,double sd, double ro, double r){\n return -1*(-0.5*sd*sd*x*x*((-1*(-0.5*sd*sd*(2*d2(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)+x*x*d4(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)+r*x*d3(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)))*t + (ro*sd*sd*t*6*x*(2*(d3(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)*d3(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)+d4(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)*d2(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01) ))))) + r*x*(-1*(-0.5*sd*sd*(2*x*d2(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)+x*x*d3(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)) + r*(d1(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)+x*d2(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)))*t + ro*sd*sd*t*(3*x*x*A0(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x) + x*x*x+(2*d2(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01)*d3(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9,b10, x,0.01))))-r)*t + ro*sd*sd*t*x*x*x*A1(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x,t,sd,ro,r);\n }\nint main(int argc, char** argv) {\n\tdouble b0 = 0,b1=0,b2=0,b3=0,b4=0;\n\tdouble b5 = 0,b6=0.,b7=0.,b8=0.,b9=0.,b10=0.;\n\tdouble x = 0,t=0, sd = 0,ro = -0.9,r = 0.18;\n\tdouble result,result2, result3,result4,result5,resultado = 0.0;\n\n\t/*3##############################################################\n\t## Inicia proceso de carga de datos y se asignan valores ####\n #################################################################*/\n\n vector<double> valores;\n double valor;\n string line;\n ifstream myfile (\"data.txt\");\n\n if (myfile.is_open())\n {\n while ( getline (myfile,line) )\n {\n if (line[0] != '#')\n //cout << line << '\\n';\n valor = atof(line.c_str());\n valores.emplace_back(valor);\n }\n myfile.close();\n }\n else cout << \"¡NOOOOO! ¡El archivo no existe!\";\n\n //valores.erase(valores.begin()); // eliminamos el primer elemento del vector, que queda con valor 0, ya que solo le fuimos agregando cosas\n //valores.erase(valores.begin());\n sd = valores[0];\n x = valores[1]/valores[1]; /* Valor al último día*/\n t = valores[2]/12; /* tiempo que dura*/\n b0 = valores[3];\n b1 = valores[4];\n b2 = valores[5];\n b3 = valores[6];\n b4 = valores[7];\n b5 = valores[8];\n b6 = valores[9];\n b7 = valores[10];\n b8 = valores[11];\n b9 = valores[12];\n b10 = valores[13];\n\n \t/*###########################################\n\t############ B-S equation ##############\n #############################################*/\n\n\tresult = u0(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x);\n\tresult2 = A0(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x);\n\tresult3 = u1(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x,t,sd,ro,r);\n\tresult4 = A1(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x,t,sd,ro,r);\n\tresult5 = u2(b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,x,t,sd,ro,r);\n\tresultado = result + result3 + result5; /*Suma de terminos para aproximación de resultados*/\n\t/*cout << \"Resultado u0: \" << result << \".\\n\";\n\tcout << \"Resultado A0: \" << result2 << \".\\n\";\n\tcout << \"Resultado u1: \" << result3 << \".\\n\";\n\tcout << \"Resultado A1: \" << result4 << \".\\n\";\n\tcout << \"Resultado u2: \" << result5 << \".\\n\";\n\tcout << \"Resultado u(\" << x << \",\" << t << \"): \" << resultado << \"\\n\";*/\n cout << resultado;\n\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.3761185109615326, "alphanum_fraction": 0.39650028944015503, "avg_line_length": 33.32764434814453, "blob_id": "76d2f1dc2d657c8858706116204fb0944db0b75f", "content_id": "0f76a929441b4158be7c44c6a9856f2f1762fe7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10060, "license_type": "no_license", "max_line_length": 79, "num_lines": 293, "path": "/app.py", "repo_name": "facosta8/MNO-Black-Scholes", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_daq as daq\nfrom dash.dependencies import Input, Output, State\nfrom plotly import tools\nimport plotly.graph_objs as go\nimport pandas as pd\nimport dask as dask\nfrom dask.distributed import Client, progress\nimport dask.dataframe as dd\n\n# client = Client()\n# client.cluster\n\n# Importo funciones de regresion.py\n\n\nfrom regresion import download_info, analisis_p\n\napp = dash.Dash(__name__)\n\n# lista que especifica lo que se descarga\nlista_desc = [\"LBMA/GOLD\", \"CHRIS/CME_O1\", \"LBMA/SILVER\", \"CHRIS/CME_DA1\",\n \"CHRIS/CME_LN1\", \"CHRIS/CME_C1\", \"CHRIS/CME_RR1\",\n \"CHRIS/CME_LB1\", \"CHRIS/CME_RB1\", \"CHRIS/CME_NG1\",\n \"CHRIS/CME_PL1\", \"CHRIS/CME_S1\"]\n\ndownload_info(lista_desc)\n\nnombres_comunes = dict({'LBMA/GOLD': 'Gold',\n 'LBMA/SILVER': 'Silver',\n 'CHRIS/CME_PL1': 'Platinum',\n 'CHRIS/CME_O1': 'Oats',\n 'CHRIS/CME_DA1': 'Dairy',\n 'CHRIS/CME_C1': 'Corn',\n 'CHRIS/CME_RR1': 'Rice',\n 'CHRIS/CME_LB1': 'Lumber',\n 'CHRIS/CME_RB1': 'Gasoline',\n 'CHRIS/CME_NG1': 'Natural gas',\n 'CHRIS/CME_S1': 'Soybean'\n })\n\ncolors = {\n 'background': '#3d3d3d',\n 'text': '#7FDBFF'\n}\n\napp.layout = html.Div(style={'backgroundColor': colors['background']},\n children=[\n html.Br(),\n html.Br(),\n html.H1(\"Commodities Finance Forecast\"),\n html.Br(),\n\n\n html.Div(style={'backgroundColor': colors['background']},\n children=[\n\n daq.NumericInput(id='LBMA/GOLD',\n className='numerico',\n value=0,\n size=100,\n min=0,\n max=10000,\n label='Gold',\n style={'color': '#EFEFEF'}),\n html.Br(),\n\n daq.NumericInput(id='LBMA/SILVER',\n className='numerico',\n value=0,\n size=100,\n min=0,\n max=10000,\n label='Silver'),\n html.Br(),\n\n daq.NumericInput(id='CHRIS/CME_O1',\n className='numerico',\n value=0,\n size=100,\n min=0,\n max=10000,\n label='Oats'),\n html.Br(),\n\n daq.NumericInput(id='CHRIS/CME_DA1',\n className='numerico',\n value=0,\n size=100,\n min=0,\n max=10000,\n label='Dairy'),\n html.Br(),\n\n daq.NumericInput(id='CHRIS/CME_C1',\n className='numerico',\n value=0,\n size=100,\n min=0,\n max=10000,\n label='Corn'),\n html.Br(),\n\n daq.NumericInput(id='CHRIS/CME_RR1',\n className='numerico',\n value=0,\n size=100,\n min=0,\n max=10000,\n label='Rice'),\n html.Br(),\n\n\n daq.NumericInput(id='CHRIS/CME_RB1',\n className='numerico',\n value=0,\n size=100,\n min=0,\n max=10000,\n label='Gasoline'),\n html.Br(),\n\n daq.NumericInput(id='CHRIS/CME_NG1',\n className='numerico',\n value=0,\n size=100,\n min=0,\n max=10000,\n label='Natural Gas'),\n html.Br(),\n\n daq.NumericInput(id='CHRIS/CME_S1',\n className='numerico',\n value=0,\n size=100,\n min=0,\n max=10000,\n label='Soybean'),\n html.Br(),\n\n\n ], className='listafuturos'),\n\n html.Div(style={'backgroundColor': colors['background']},\n children=[\n\n html.Label('Investment period',\n className='commodity'),\n daq.Slider(id='meses',\n min=2,\n max=24,\n marks={'2': '2', '6': '6', '12': '12',\n '18': '18', '24': '24'},\n value=18,\n size=300,\n handleLabel='Months'\n ),\n html.Br(),\n \n \n html.Button(html.Span('Estimate returns'),\n id='botonCalculo',\n className='boton2'),\n html.Br(),\n html.Br(),\n ], className='areacalculo'),\n\n html.Div(style={'backgroundColor': colors['background']},\n children=[\n\n html.P('''Choose the commodities you are interested in,\n select the investment period and click on the button''',\n id='textoresultado',\n className='resultado')\n\n ], className='areacalculo'),\n\n dcc.Graph(\n id='grafica_valores',\n figure={\n 'data': [\n ],\n 'layout': {\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {\n 'color': colors['text']\n }\n }\n }\n )\n])\n\n\[email protected]([Output('grafica_valores', 'figure'),\n Output('textoresultado', 'children')\n ],\n [Input('botonCalculo', 'n_clicks')],\n state=[State('LBMA/GOLD', 'value'),\n State('LBMA/SILVER', 'value'),\n State('CHRIS/CME_O1', 'value'),\n State('CHRIS/CME_DA1', 'value'),\n State('CHRIS/CME_C1', 'value'),\n State('CHRIS/CME_RR1', 'value'),\n State('CHRIS/CME_RB1', 'value'),\n State('CHRIS/CME_NG1', 'value'),\n State('CHRIS/CME_S1', 'value'),\n State('meses', 'value')\n ]\n )\n\n\ndef update_graph(n_clicks, in1, in2, in3, in4, in5, in6,\n in7, in8, in9, meses):\n\n df = pd.read_csv('datos.csv') # ya no va a la carpeta de modelo_simple/\n df.Date = pd.to_datetime(df.Date)\n\n todos = dict({'LBMA/GOLD': in1,\n 'LBMA/SILVER': in2,\n 'CHRIS/CME_O1': in3,\n 'CHRIS/CME_DA1': in4,\n 'CHRIS/CME_C1': in5,\n 'CHRIS/CME_RR1': in6,\n 'CHRIS/CME_RB1': in7,\n 'CHRIS/CME_NG1': in8,\n 'CHRIS/CME_S1': in9\n })\n validos = dict((k, v) for k, v in todos.items() if v > 0)\n\n lista_validos = list(validos.keys())\n\n lista_run = [meses, '0.0001']\n # el valor de 0.0001 es la r2 mínima que decidimos\n # esta baja para reducir el no de betas, la mantengo por si\n # posteriormente se agrega un scroller con el que se pueda modificar\n\n lista_run = lista_run+lista_validos\n\n # Corro mi código para calcular rendimientos\n rendim = analisis_p(lista_run)\n cant = list(validos.values())\n rendim['cant'] = cant\n\n te = sum(rendim['last_price']*rendim['cant'])\n ga = sum(rendim['predicted_price']*rendim['cant'])\n re = str(round(100 * (ga - te) / te, 2)) + '%'\n\n # Esto no lo entiendo pero lo dejo\n cols_seleccionar = lista_validos.copy()\n cols_seleccionar.append('Date')\n df = df.filter(items=cols_seleccionar)\n df = df.dropna()\n\n # texto es la variable que se muestra como output final\n texto = 'The percentual value increase over {} months is {}'.format(meses,\n re)\n\n # estatus = app.get_asset_url('work.gif')\n\n lineas = (len(lista_validos) + 1) // 2\n linea = 1\n columna = 1\n fig = tools.make_subplots(rows=lineas, cols=2,\n subplot_titles=[nombres_comunes[c]\n for c in lista_validos])\n\n for commodity in lista_validos:\n fig.append_trace(go.Scatter(y=df[commodity],\n x=df['Date'],\n ),\n linea, columna\n )\n if columna == 1:\n columna = 2\n elif columna == 2:\n columna = 1\n linea += 1\n\n fig['layout'].update(yaxis=dict(title='Opening value'),\n plot_bgcolor=colors['background'],\n paper_bgcolor=colors['background'],\n font=dict(color=colors['text']),\n showlegend=False)\n\n return fig, texto\n\n\nif __name__ == '__main__':\n app.run_server()\n" }, { "alpha_fraction": 0.6172224879264832, "alphanum_fraction": 0.6271777153015137, "avg_line_length": 26.50684928894043, "blob_id": "b79788fc3bbfc2abebc3ac9a9ed720c2dda62cbb", "content_id": "a657c1ebcbe5a0e3b9e473571230c942cd82519d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2027, "license_type": "no_license", "max_line_length": 71, "num_lines": 73, "path": "/scripts/carga_datos.py", "repo_name": "facosta8/MNO-Black-Scholes", "src_encoding": "UTF-8", "text": "\n#%% carga\nimport numpy as np\nimport pandas as pd\nfrom yahoofinancials import YahooFinancials as YF\nfrom ctypes import cdll\nfrom pathlib import Path\nimport os\n\n\ndef descargar_datos(stocks, \n inicio = '2008-01-01', \n fin = '2019-01-01',\n intervalo = 'daily'):\n # descarga datos de acciones específicas de Yahoo finances\n # inicio y fin determinan el intervalo\n # intervalo puede ser ‘daily’, ‘weekly’, or ‘monthly’\n intervalo = 'daily' #can be either . \n acciones = YF(stocks)\n hr = (acciones.get_historical_price_data(inicio, fin, intervalo))\n h = pd.DataFrame(hr)\n h = h.T\n return h\n\ndef extraer_datos_cia(comp,pri): \n # función interna para jalar los datos de acciones de cada compañía\n # usada por la función extraer_datos\n e = pd.DataFrame(pri)\n e = e.loc[:,('formatted_date','open', 'close', 'high', 'low')]\n e['cia'] = comp\n return e\n\ndef extraer_datos(df):\n # toma los datos de Yahoo finances y genera un dataframe adecuado\n for i in range(df.shape[0]):\n e = extraer_datos_cia(df.index[i],df.prices[i])\n datos = pd.concat([datos, e]) if i != 0 else e\n return datos\n\n\n \n#%% carga de datos\ntech_stocks = ['AAPL', 'MSFT', 'INTC', 'GOOG', 'GOOGL', 'FB', 'INTC']\nbank_stocks = ['WFC', 'BAC', 'C']\n\n# yahoo_financials_tech = YF(tech_stocks)\nhistoria = descargar_datos(tech_stocks)\n\ndatos = extraer_datos(historia)\n\n#%% metodos BS\n\nmatriz = np.array(datos.head().values[:,1:5])\n\n#%% prueba con c++\n\n# clases para usar la función de C++\n# los comandos para compilar (en caso de cambios) son\n# g++ -c -fPIC blackscholes.cpp -o bs.o\n# g++ -shared -Wl,-soname,bs.so -o bs.so bs.o\n\npath = Path(__file__).parent.absolute()\nos.chdir(path)\n\nlib = cdll.LoadLibrary('./bs.so')\nclass BlackScholes(object):\n def __init__(self):\n self.obj = lib.BS_new()\n\n def bar(self):\n lib.BS_bar(self.obj)\n \nbs = BlackScholes()\nbs.bar() #and you will see \"Hello\" on the screen\n" }, { "alpha_fraction": 0.7583120465278625, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 70.09091186523438, "blob_id": "115d774cb9764e96f12e1909ccce3b816ef39156", "content_id": "3d2b77de6a14c37ec226ea01fde8cd74d1cb0a84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 782, "license_type": "no_license", "max_line_length": 359, "num_lines": 11, "path": "/README.md", "repo_name": "facosta8/MNO-Black-Scholes", "src_encoding": "UTF-8", "text": "# MNO-Black-Scholes\n\nThis is a python-developed web app that allows you to get real-time information about a number of commodities, choose a number of these to simulate a portfolio and estimate the returns of your investments in a certain number of months. This was developed as the final project for the course on Numeric Methods and Optimization in Spring 2019, ITAM University.\n\nThis app uses the [Quandl](https://www.quandl.com/) API to retrieve the financial information. In order tu use it, you need to add your key in the `quandl.txt` file located in the root folder. Check this file for further instructions.\n\n#### Autores\n\n* Francisco Paz. [GitHub](https://github.com/MrFranciscoPaz)\n* Francisco Álvarez. [GitHub](https://github.com/fralvro/)\n* Francisco Acosta. [GitHub](https://github.com/facosta8)\n" }, { "alpha_fraction": 0.4950379431247711, "alphanum_fraction": 0.5084646940231323, "avg_line_length": 23.00934600830078, "blob_id": "11f5cc1729b25c4ed5a2b8dc839858d8f9449391", "content_id": "9d123380dc7ff59b2f2d89d5983236419ae0866f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5157, "license_type": "no_license", "max_line_length": 108, "num_lines": 214, "path": "/regresion.py", "repo_name": "facosta8/MNO-Black-Scholes", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport datetime\nimport numpy as np\nfrom sklearn.metrics import r2_score\nimport sys\nimport os\n\n\n# Paralelización \n\n# DASK\nimport dask as dask\n#from dask.distributed import Client, progress\nimport dask.dataframe as dd\n\n#client = Client()\n#client.cluster\n\nfrom dask import delayed\n\n#os.chdir('git/MNO-Black-Scholes/modelo_simple')\n\n\n\"\"\"\n\n ----------------------- ENFOQUE QUANDL -----------------------\n\n\"\"\"\n\nimport quandl\n# Abro llave de quandl\nquank = open('quandl.txt').readline().split(':')[1].strip()\nquandl.ApiConfig.api_key = quank\n\n\n# Lista de características del portafolio: [meses a predecir, r2_minima, commodities...]\n\nlista_run = ['1','0.0001',\"LBMA/GOLD\",\"CHRIS/CME_O1\", \"CHRIS/CME_LB1\"]\n\n# Lista del total de commodities con que se trabaja \n\nlista_desc = [\"LBMA/GOLD\",\"CHRIS/CME_O1\",\"LBMA/SILVER\",\"CHRIS/CME_DA1\",\"CHRIS/CME_LN1\",\n \"CHRIS/CME_C1\", \"CHRIS/CME_RR1\", \"CHRIS/CME_LB1\",\"CHRIS/CME_RB1\", \"CHRIS/CME_NG1\",\n \"CHRIS/CME_PL1\",\"CHRIS/CME_S1\"]\n\n\n# Función de descarga de datos en paralelo \n\ndef download_info(lista_desc):\n\n @delayed\n def desc_datos(years_back, future_code):\n \n now = datetime.datetime.now()\n a_t = str(now)[0:10]\n b_t = str(int(str(now)[0:4])-years_back)+str(now)[4:10]\n \n sys.stdout = open(os.devnull, \"w\")\n #yahoo = yf.download(future_code,b_t,a_t)\n yahoo = quandl.get(future_code, collapse=\"daily\",start_date=b_t, end_date=a_t)\n \n sys.stdout = sys.__stdout__\n \n return yahoo.iloc[:,0]\n \n \n \n \n \n to_merge=[]\n for i in range(len(lista_desc)):\n \n \n globals()['data%s'%i] = desc_datos(years_back=3, future_code=lista_desc[i]) #uso 3 años de historia \n \n to_merge.append(globals()['data%s'%i])\n \n \n @delayed\n def create_variables(to_merge):\n \n variables = pd.concat(to_merge, axis=1)\n return variables\n \n \n intento = create_variables(to_merge)\n datos = intento.compute()\n \n datos.columns = lista_desc\n \n datos.to_csv(\"datos.csv\")\n \n return None \n\n\n\ndef analisis_p(lista_run):\n \n # Saco la info de datos.csv de acuerdo al código\n \n info = pd.read_csv(\"datos.csv\")\n \n # Análisis de portafolio \n \n \n def fin_poly_reg(lista_run,info,investment_length, future_code, r2_min):\n \n # Tiempo que dura la inversión\n \n il = investment_length\n \n \n X = info.loc[:,future_code]\n X = X.dropna()\n # Desviación estándar\n \n sd =np.std(X)\n \n # Valor al ultimo día del stock \n \n vu = X.tolist()[-1]\n \n # Regresión polinomial \n \n deg = [1,2,3,4,5,6,7,8,9] # número de bethas posibles \n \n for j in range(len(deg)):\n \n z = np.polyfit(np.arange(len(X)),X.values,deg[j])\n \n ypred = np.polyval(z,np.arange(len(X)))\n \n r2 = r2_score(X.values, ypred)\n \n if r2 >= r2_min:\n \n break\n \n \n \"\"\"\n GUARDO RESULTADOS DE REGRESIÓN EN TXT\n \n El formato dentro del txt es:\n \n sd\n valor al último día del stock\n tiempo que dura la inversión\n betas\n \n \"\"\"\n \n \n \n nl='\\n'\n \n #Betas \n vec='' \n for i in range(len(z)):\n vec = vec+str(z[i])+nl\n \n # \n \n with open('data.txt', 'w') as the_file:\n the_file.write(str(str(sd)+nl+str(vu)+nl+str(il)+nl+vec))\n the_file.close()\n \n \"\"\"\n CALCULO RENDIMIENTO CON BLACK-SCHOLES Y LEO RESULTADOS\n \"\"\"\n \n os.system('./programa.o > out.txt')\n \n f = open('out.txt')\n tocayo = f.readlines()\n f.close()\n \n \"\"\"\n Calculo varianza para ver si las predicciones del tocayo están muy lejos \n \"\"\"\n var = np.var(X)\n \n \n sal = [future_code,vu,float(tocayo[0])]\n \n rend = 100*(float(sal[2])-float(sal[1]))/float(sal[1])\n \n return pd.DataFrame({'commodity':[sal[0]],'last_price':[sal[1]],\n 'predicted_price':[sal[2]],'difference(%)':rend,'ts_variance':var,\n 'bethas':[z], 'rango':str(X.min())+'-'+str(X.max())})\n \n \n \n \n \n #uno = fin_poly_reg(info, investment_length=10, future_code='LBMA/GOLD', r2_min=0.5)\n \n # JUNTO TODO \n \n comodities=[]\n for i in range(len(lista_run)-2):\n i=i+2\n \n globals()['com%s'%i] = fin_poly_reg(lista_run,info,investment_length = float(lista_run[0]),\n future_code=lista_run[i], r2_min=float(lista_run[1]))\n \n comodities.append(globals()['com%s'%i])\n \n variables = pd.concat(comodities, axis=0)\n \n return variables \n\n\n# Verifica que corra aquí \nprueba = analisis_p(lista_run)\n\n" } ]
5
abmodi/ToDoApp
https://github.com/abmodi/ToDoApp
2d9d335da3b08b6c65215911cb6dce2c31734d5e
32fa1f3361c08933698f20e98985670c6a91f3ac
c889c20b74cca6d28c0b438425564ca92ef42dca
refs/heads/master
2021-01-10T19:00:45.665455
2013-01-15T05:08:35
2013-01-15T05:08:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.672897219657898, "alphanum_fraction": 0.672897219657898, "avg_line_length": 27.210525512695312, "blob_id": "230394e1a2cda72f1f7cf115866c0e06c735638e", "content_id": "3c2f5ef6486235fc3356f572d35c8f8256e2e77a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 535, "license_type": "no_license", "max_line_length": 71, "num_lines": 19, "path": "/ToDoRestServer/static/js/views/content-page-view.js", "repo_name": "abmodi/ToDoApp", "src_encoding": "UTF-8", "text": "app = app||{};\n\napp.ContentPageView = Backbone.View.extend({\ntemplate: _.template($('#content-page-template').html()),\ninitialize:function(){\n\t\n},\nrender:function(){\n\t$('.ui-page-active').removeClass('ui-page-active').addClass('hidden');\n\tthis.setElement(\"#content-page\");\n\tthis.$el.empty().html(this.template());\n\tthis.$el.addClass('ui-page-active').removeClass('hidden');\n\tif (app.Todos == null) \n\t\tapp.Todos = new app.TodoList();\n\tif (app.appView == null)\n\t\tapp.appView = new app.AppView({collection:app.Todos});\n\treturn this;\n}\n});" }, { "alpha_fraction": 0.6401273608207703, "alphanum_fraction": 0.6401273608207703, "avg_line_length": 17.47058868408203, "blob_id": "884d88add374e381a560f243c644d6ef06863c53", "content_id": "bd4c2b1f1d30512045237846b8af7c5ce3052e47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 314, "license_type": "no_license", "max_line_length": 43, "num_lines": 17, "path": "/ToDoRestServer/static/js/collections/todos.js", "repo_name": "abmodi/ToDoApp", "src_encoding": "UTF-8", "text": "var app = app||{};\n\napp.TodoList = Backbone.Collection.extend({\n\tmodel:app.Todo,\n\turl:'/api/todos',\n\tcompleted:function(){\n\t\treturn this.filter(function(todo){\n\t\t\treturn todo.get(\"completed\");\n\t\t\t});\n\t},\n\tremaining:function(){\n\t\treturn this.filter(function(todo){\n\t\t\treturn !todo.get(\"completed\");\n\t\t});\n\t}\n\t\t\n});\n" }, { "alpha_fraction": 0.6533742547035217, "alphanum_fraction": 0.6533742547035217, "avg_line_length": 18.727272033691406, "blob_id": "f0da880f4b46fe3e5b9110d51d84ac5d536d7c2f", "content_id": "0015293b0bf564477238a3d171e773dd216c0180", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 652, "license_type": "no_license", "max_line_length": 50, "num_lines": 33, "path": "/ToDoRestServer/static/js/routers/router.js", "repo_name": "abmodi/ToDoApp", "src_encoding": "UTF-8", "text": "var app = app||{};\n\nRouter = Backbone.Router.extend({\n\troutes:{\n\t\t\"\":'index',\n\t\t\"content\":'content',\n\t\t\"content/*filter\":'setFilter'\n\t},\n\tinitialize: function(){\n\t\tconsole.log(\"Initializing Router\");\n\t\tapp.landingPageView = new app.LandingPageView();\n\t\tapp.contentPageView = new app.ContentPageView();\n\t},\n\tstart: function(){\n\t\tBackbone.history.start();\n\t\t\n\t},\n\tindex:function(){\n\t\tapp.landingPageView.render();\n\t},\n\tcontent:function(){\n\t\tapp.contentPageView.render();\n\t},\n\tsetFilter:function(filter){\n\t\tconsole.log(\"Set Filter\" + filter);\n\t\t\n\t\tapp.ToDoFilter = filter.trim() || '';\n\n\t\tapp.Todos.trigger('filter');\n\t}\n});\n\napp.ToDoApp = new Router();\n\n" }, { "alpha_fraction": 0.6374722719192505, "alphanum_fraction": 0.6389504671096802, "avg_line_length": 26.049999237060547, "blob_id": "d4a9f8073e05fd84a57874a190fd0443a74c1d64", "content_id": "6aaa79d81fe1396b8d68483f9a12a1ef367d312d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2706, "license_type": "no_license", "max_line_length": 126, "num_lines": 100, "path": "/ToDoRestServer/static/js/views/app.js", "repo_name": "abmodi/ToDoApp", "src_encoding": "UTF-8", "text": "var app = app||{};\n\napp.AppView = Backbone.View.extend({\n\n\tstatsTemplate: _.template( $('#stats-template').html() ),\n\tinitialize:function(){\n\t\tthis.collection.on('add',this.addOne, this);\n\t\tthis.collection.on('reset',this.addAll, this);\n\t\tthis.collection.on('remove',this.remove, this);\n\t\tthis.collection.on('filter',this.setFilter,this);\n\t\tthis.collection.on('all',this.render,this);\n\t\tthis.input = this.$(\"#new-todo\");\n\t\tthis.collection.fetch();\n\n\t},\n\tevents:{\n\t\t\"keypress #new-todo\":\"createOnEnter\",\n\t\t\"click #toggle-all\":\"toggleAll\",\n\t\t\"click #clear-completed\":\"deleteAllCompleted\",\n\t},\n\tdeleteAllCompleted:function(){\n\t\tthis.collection.each(function(todo){\n\t\t\tif(todo.get(\"completed\"))\n\t\t\t\ttodo.remove();\n\t\t\t});\n\t},\n\tsetFilter:function(){\n\t\tconsole.log('filter called'+ app.ToDoFilter);\n\t\tthis.filterAll();\n\n\t},\n\tfilterAll:function(){\n\t\tthis.collection.each(this.filterOne,this);\n\t},\n\tfilterOne:function(todo){\n\t\ttodo.trigger('visible');\n\t},\n\tcreateOnEnter:function(e){\n\t\tconsole.log(\"Create On Enter\");\n\t\tif(e.which == 13 && $(\"#new-todo\").val().trim())\n\t\t{\n\t\t\t//this.collection.create({\"title\":$(\"#new-todo\").val(),\"completed\":false,\"starred\":false,\"id\":(this.collection.length+1)});\n\t\t\tvar newModel = new app.Todo({\"title\":$(\"#new-todo\").val(),\"completed\":false,\"starred\":false});\n\t\t\tnewModel.save({},{\n\t\t\t\tsuccess:function(model,response){\n\t\t\t\t\tconsole.log(\"Success\");\n\t\t\t\t\tconsole.log(response);\n\t\t\t\t\tnewModel.set({\"id\":response});\n\t\t\t\t\t}});\n\t\t\tthis.collection.add(newModel);\n\t\t\t$(\"#new-todo\").val(\"\");\n\t\t}\n\t},\n\ttoggleAll:function(e){\n\t\tconsole.log(\"Toggled All\");\n\t\tvar completed = $(\"#toggle-all\")[0].checked;\n\t\tconsole.log(completed);\n\t\tthis.collection.each(function(todo){\n\t\t\ttodo.set({\"completed\":completed});\n\t\t\ttodo.save();\n\t\t});\n\t},\n\trender:function(){\n\t\tconsole.log(\"Rendering all todos\");\n\t\tthis.setElement(\"#todoapp\");\n\t\t//this.collection.forEach(this.addOne,this);\n\t\tvar completed = this.collection.completed().length;\n\t\tvar remaining = this.collection.remaining().length;\n\t\tif(this.collection.length)\n\t\t{\n\t\t\t$(\"#main\").show();\n\t\t\t$(\"#footer\").show();\n\t\t\t$(\"#footer\").html(this.statsTemplate({\"completed\":completed,\"remaining\":remaining}));\n\t\t\tthis.$('#filters li a').removeClass('selected')\n\t\t\t.filter('[href=\"#/' + ( app.ToDoFilter || '' ) + '\"]')\n\t\t\t.addClass('selected');\n\t\t}\n\t\telse\n\t\t{\n\t\t\t$(\"#footer\").hide();\n\t\t\t$(\"#main\").hide();\n\t\t}\n\t\t\t\n\t},\n\taddAll:function(){\n\t\t$(\"#todo-list\").html(\"\");\n\t\tthis.collection.each(this.addOne,this);\t\n\t},\n\taddOne:function(todoItem){\n\t\tconsole.log(\"Adding Item\");\n\t\tvar todoView = new app.TodoView({model:todoItem});\n\t\tconsole.log(todoView);\n\t\ttodoView.render();\n\t\t$('#todo-list').append(todoView.el);\n\t},\n\tremove:function(todoItem){\n\t\t\n\t}\n\n});\n\n" }, { "alpha_fraction": 0.6344085931777954, "alphanum_fraction": 0.647311806678772, "avg_line_length": 23.473684310913086, "blob_id": "432b08568a3838b11884df598f57d21a3a607e18", "content_id": "6c4684b98e158a878846f849464613cbb3ae5fe6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 52, "num_lines": 19, "path": "/ToDoRestServer/src/ToDoApp/urls.py", "repo_name": "abmodi/ToDoApp", "src_encoding": "UTF-8", "text": "'''\nCreated on 07-Jan-2013\n\n@author: abmodi\n'''\n\nfrom django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\nfrom views import ToDo,Session\n\nurlpatterns = patterns('',\n url(r'^todos/?$', ToDo.as_view()),\n url(r'^todos/(?P<todo_id>\\d+)', ToDo.as_view()),\n url(r'^session/?$',Session.as_view()),\n url(r'^session/(?P<uri>\\w+)', Session.as_view())\n )\n" }, { "alpha_fraction": 0.5926773548126221, "alphanum_fraction": 0.5926773548126221, "avg_line_length": 14.571428298950195, "blob_id": "0cdea21324fccc156e14c540765f9429360e8fcc", "content_id": "53b5c88ba08bb2b3db4010c5f12aef68bf433c40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 437, "license_type": "no_license", "max_line_length": 45, "num_lines": 28, "path": "/ToDoRestServer/static/js/models/todo.js", "repo_name": "abmodi/ToDoApp", "src_encoding": "UTF-8", "text": "var app = app||{};\n\napp.Todo = Backbone.Model.extend({\n\tdefaults:{\n\ttitle:'',\n\tcompleted:false,\n\tstarred:false\t\n\t},\n\turlRoot:'/api/todos',\n\ttoggleStatus:function(){\n\t\tif(this.get(\"completed\"))\n\t\t{\n\t\t\tthis.set({\"completed\":false});\n\t\t}\n\t\telse\n\t\t{\n\t\t\tthis.set({'completed':true});\n\t\t}\n\t\tthis.save();\n\t},\n\ttoggleStar:function(){\n\t\tthis.set({'starred':!this.get('starred')});\n\t\tthis.save();\n\t},\n\tremove:function(){\n\t\tthis.destroy();\n\t}\n});\n\n" }, { "alpha_fraction": 0.6049218773841858, "alphanum_fraction": 0.6109078526496887, "avg_line_length": 34.7023811340332, "blob_id": "ac3e494eff28a30f4d6067e687cba36aa0c2d374", "content_id": "bcdd3902b81349417772a79d0824dc215c201599", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3007, "license_type": "no_license", "max_line_length": 116, "num_lines": 84, "path": "/ToDoRestServer/src/ToDoApp/views.py", "repo_name": "abmodi/ToDoApp", "src_encoding": "UTF-8", "text": "from django.core import serializers\nfrom django.http import HttpResponse\n\nfrom models import ToDoItem\n\nfrom simple_rest import Resource\n\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User\n\nimport json\n\ndef serialize(jsonData):\n oldjs = json.loads(jsonData);\n newjs = []\n for c in oldjs:\n temp = c['fields']\n temp['id'] = c['pk']\n newjs.append(temp)\n \n newJson = json.dumps(newjs)\n return newJson\n\nclass ToDo(Resource):\n \n def get(self, request,todo_id=None, **kwargs):\n if request.user.is_authenticated():\n json_serializer = serializers.get_serializer('json')()\n if todo_id:\n todos = json_serializer.serialize(ToDoItem.objects.filter(pk=todo_id))\n else:\n todos = json_serializer.serialize(ToDoItem.objects.filter(username=request.user.username))\n return HttpResponse(serialize(todos), content_type='application/json; charset=utf-8', status=200)\n\n \n def post(self, request,*args, **kwargs):\n hack_json_value = request.POST.keys()[0]\n hack_query_dict = json.loads(hack_json_value)\n desc = hack_query_dict['title']\n stat = hack_query_dict['completed']\n starr = hack_query_dict['starred']\n todoitem = ToDoItem.objects.create(title = desc,completed=stat,starred=starr,username=request.user.username)\n return HttpResponse(todoitem.id,status=201)\n \n def delete(self,request,todo_id):\n todo = ToDoItem.objects.get(pk=todo_id)\n todo.delete()\n return HttpResponse(status=200)\n \n def put(self, request, *args, **kwargs):\n hack_json_value = request.POST.keys()[0]\n hack_query_dict = json.loads(hack_json_value)\n desc = hack_query_dict['title']\n stat = hack_query_dict['completed']\n starr = hack_query_dict['starred']\n todo_id = hack_query_dict['id']\n todo = ToDoItem.objects.get(pk=todo_id)\n todo.title = desc\n todo.completed = stat\n todo.starred = starr\n todo.save()# Create your views here.\n return HttpResponse(status=200)\n \n \nclass Session(Resource):\n \n def post(self,request,uri=None):\n un = request.POST['username']\n pw = request.POST['password']\n if uri and uri == \"signup\":\n email = request.POST['email']\n user = User.objects.create_user(username=un, email=email, password=pw)\n user.save();\n \n user = authenticate(username = un, password = pw)\n if user is not None:\n if user.is_active:\n login(request,user)\n else:\n return HttpResponse('{\"success\":false,\"error\":\"User has been banned\"}');\n # do something -- can't figure it out right now\n else:\n return HttpResponse('{\"success\":false,\"error\":\"Invalid Username or password\"}');\n return HttpResponse('{\"success\":true}',status=200)\n " }, { "alpha_fraction": 0.6832298040390015, "alphanum_fraction": 0.7204968929290771, "avg_line_length": 15.100000381469727, "blob_id": "f695723d68f773ce0ab66f78acf2c859c0e6999f", "content_id": "0f70d27d28a9dd0ab1a4e436dd790e39e06c7c42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 47, "num_lines": 10, "path": "/ToDoRestServer/src/ToDoRestServer/views.py", "repo_name": "abmodi/ToDoApp", "src_encoding": "UTF-8", "text": "'''\nCreated on 06-Jan-2013\n\n@author: abmodi\n'''\n\nfrom django.shortcuts import render_to_response\n\ndef home(request):\n return render_to_response(\"index.html\")\n" } ]
8
cogitozz/FTP_program
https://github.com/cogitozz/FTP_program
305595ff54ce9c6cb0df8d7e9743d63352081a3c
eee0896dbaf4f102d0cfabae0efeb7fd98e9bf2d
3d4c36ed06407f17f143f503c7647ec6519e6e9b
refs/heads/master
2020-04-28T08:39:53.944539
2019-03-12T04:57:09
2019-03-12T04:57:09
175,136,781
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6404109597206116, "alphanum_fraction": 0.6404109597206116, "avg_line_length": 22.41666603088379, "blob_id": "67f2e7a09a1741cbae4f6dc87df11748e724f6a3", "content_id": "38dce1d7cec3a251fd8eefba3cb0afc24704fa0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/Ftp详解/server端/bin/ftp_server_start.py", "repo_name": "cogitozz/FTP_program", "src_encoding": "UTF-8", "text": "import os,sys\n\n\nPATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# PATH = os.path.dirname(os.path.dirname(__file__))\nprint(PATH)\nsys.path.append(PATH) # 为了能够直接使用from core import main\n\n\nfrom core import main\nif __name__ == '__main__':\n main.ArgvHandler()\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5890411138534546, "alphanum_fraction": 0.6281800270080566, "avg_line_length": 32.66666793823242, "blob_id": "73af3dde1677eb9f8cb911a1c56d897fb3bf64c7", "content_id": "9fed3c2e4c9049109a10dfede80ee96336d2ff4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 561, "license_type": "no_license", "max_line_length": 88, "num_lines": 15, "path": "/Ftp详解/optparse_module.py", "repo_name": "cogitozz/FTP_program", "src_encoding": "UTF-8", "text": "import optparse # 解析命令行的命令\n\ndef optparse_study():\n op = optparse.OptionParser()\n op.add_option(\"-s\", \"--server\", dest=\"server\")\n op.add_option(\"-P\", \"--port\", dest=\"port\") # 自定义命令行格式\n options, args = op.parse_args()\n print(options)\n print(args)\n print(options.server)\n print(options.port)\n\noptparse_study()\n# 命令行运行G:\\python_study\\FTP详解>python optparse_module.py -s 127.0.0.1 -P 8080\n # G:\\python_study\\FTP详解>python optparse_module.py -s 127.0.0.1 -P 8080 yy uu\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.44686129689216614, "alphanum_fraction": 0.4594542980194092, "avg_line_length": 35.88028335571289, "blob_id": "43ff7e5e15a0443362730f6e34b88d0f39c73f84", "content_id": "09ff68490dc6831eeb0c79561fd9f09e260ed3c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6149, "license_type": "no_license", "max_line_length": 123, "num_lines": 142, "path": "/Ftp详解/server端/core/server_handler.py", "repo_name": "cogitozz/FTP_program", "src_encoding": "UTF-8", "text": "import socketserver, json, os\nimport configparser # 用来操作配置文件\nfrom conf import settings\n\nSTATUS_CODE = {\n 200:\"OK\",\n 401:\"身份验证错误\",\n 400:\"错误请求\",\n \"800\": \"文件不完整\",\n \"801\": \"文件存在\",\n \"802\": \"文件不存在\",\n}\n\nclass ServerHandler(socketserver.BaseRequestHandler):\n def handle(self):\n while 1:\n data = self.request.recv(1024).strip()\n data = json.loads(data.decode('utf-8')) # 1、得到输入的命令\n# data的形式如下\n# data = {\"action\":\"auth\",\n# \"username\":\"yuan\",\n# \"password\":123\n# }\n\n if data.get(\"action\"): # 2、取命令对应的函数并执行\n if hasattr(self,data.get(\"action\")):\n func = getattr(self,data.get(\"action\"))\n func(**data)\n else:\n print(\"Invalid cmd\")\n else:\n print(\"Invalid cmd\")\n\n # ***************************下面是收到客户端发来的命令做相应的处理的函数,一个命令对应相应的函数,******************************#\n def send_response(self, state_code): # 因为会有许多操作要返回数据到客户端,\n # 所以直接写一个发送数据到客户端的函数,用到的时候直接调用\n response = {'status_code': state_code}\n self.request.sendall(json.dumps(response).encode('utf-8'))\n\n#********************验证登录******************************#\n def auth(self, **data):\n print(data)\n username = data[\"username\"]\n password = data['password']\n\n user = self.authenticate(username, password)\n if user:\n self.send_response(200)\n else:\n self.send_response(401)\n\n def authenticate(self, user, pwd):\n cfg = configparser.ConfigParser() # 将一些信息写在配置文件中,使用的时候可以直接用,而且便于添加和修改,这其实就是和数据库是一样的,只是这里调用的是配置文件,也可以从数据库中提取数据\n cfg.read(settings.ACCOUNT_PATH)\n\n if user in cfg.sections():\n if cfg[user][\"Password\"] == pwd:\n self.user = user # 这个保证可以在其他的函数中使用user\n self.mainPath = os.path.join(settings.BASE_DIR, \"home\", self.user) # 很重要\n print(STATUS_CODE[200])\n return user\n#********************验证登录******************************#\n\n# ********************上传文件******************************#\n def put(self, **data):\n print(\"data\",data) # 可以看到在命令行输入的内容被转换为字典的形式\n file_name = data.get(\"file_name\")\n file_size = data.get(\"file_size\")\n target_path = data.get(\"target_path\")\n\n abs_path = os.path.join(self.mainPath, target_path, file_name)\n print(abs_path) # 得到要将文件上传到服务端的存放地址\n\n has_received = 0\n if os.path.exists(abs_path): # 判断文件在不在\n file_has_size = os.stat(abs_path).st_size\n if file_has_size < file_size:\n # 断点续传\n self.request.sendall(\"800\".encode('utf-8')) # 告诉客户端文件不完整\n choice = self.request.recv(1024).decode('utf-8')\n if choice == \"Y\": # 收到客户端发来的接着上传的命令\n self.request.sendall(str(file_has_size).encode('utf-8')) # 告诉客户端服务端接收了多少文件\n has_received += file_has_size\n f = open(abs_path, \"ab\")\n else: # 如果客户端选择N,则打开这个不完整的文件从头开始重新写入,覆盖掉原来不完整的内容\n f = open(abs_path, \"wb\")\n\n else:\n self.request.sendall(\"801\".encode('utf-8'))\n return\n\n else:\n self.request.sendall('802'.encode('utf-8')) # 如果文件不存在则告诉客户端文件不存在\n f = open(abs_path, \"wb\") # 并且在服务端对应位置新建一个该文件\n\n while has_received < file_size:\n try:\n data = self.request.recv(1024) # 接收客户端发送来的文件\n except Exception as e:\n break\n f.write(data) # 并写入到服务端新建的那个文件里\n has_received += len(data)\n\n f.close()\n# ********************上传文件******************************#\n\n# ********************命令函数******************************#\n\n # ********************ls命令***********************#\n def ls(self, **data):\n file_list = os.listdir(self.mainPath)\n file_str = \"\\n\".join(file_list)\n if not len(file_list):\n file_str = '<empty dir>'\n self.request.sendall(file_str.encode('utf-8'))\n # ********************ls命令***********************#\n\n # ********************cd命令***********************#\n def cd(self, **data):\n dirname = data.get(\"dirname\")\n if dirname == \"..\":\n self.mainPath = os.path.dirname(self.mainPath)\n else:\n self.mainPath = os.path.join(self.mainPath, dirname)\n self.request.sendall(self.mainPath.encode('utf-8'))\n # ********************cd命令***********************#\n\n # ********************mkdir命令***********************#\n def mkdir(self, **data):\n dirname = data.get(\"dirname\")\n path = os.path.join(self.mainPath,dirname)\n if not os.path.exists(path):\n if \"/\" in dirname:\n os.makedirs(path)\n else:\n os.mkdir(path)\n self.request.sendall(\"success\".encode('utf-8'))\n else:\n self.request.sendall(\"dirname exist\".encode('utf-8'))\n # ********************mkdir命令***********************#\n\n# ********************命令函数******************************#\n\n\n\n\n" }, { "alpha_fraction": 0.45772168040275574, "alphanum_fraction": 0.4715208411216736, "avg_line_length": 37.6988639831543, "blob_id": "a3cd815a40f8d43d6c7cde5f7f8329e618fb3269", "content_id": "c2be6ce3070e5f4c0fe5e25041e0aef560e9a448", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7910, "license_type": "no_license", "max_line_length": 120, "num_lines": 176, "path": "/Ftp详解/client端/ftp_client.py", "repo_name": "cogitozz/FTP_program", "src_encoding": "UTF-8", "text": "from socket import *\nimport optparse ,json,os, sys\n\nSTATUS_CODE = {\n 200:\"OK\",\n 401:\"身份验证错误\",\n 400:\"错误请求\",\n \"800\": \"文件不完整\",\n \"801\": \"文件存在\",\n \"802\": \"文件不存在\",\n}\n\nclass ClientHandler():\n def __init__(self):\n self.op = optparse.OptionParser()\n\n self.op.add_option('-s', '--server', dest='server')\n self.op.add_option('-P', '--port', dest='port')\n self.op.add_option('-u', '--username', dest='username')\n self.op.add_option('-p', '--password', dest='password') # 自定义命令行格式\n\n self.options, self.args = self.op.parse_args()\n self.verify_args(self.options, self.args) # 调用解析命令函数,解析输入的IP地址和端口号\n self.make_connection() # 通过解析出的IP地址和端口号进行连接服务器\n self.mainPath = os.path.dirname(os.path.abspath(__file__)) # 得到ftp_client的绝对路劲即FTP_client\n\n def verify_args(self, options, args): # 解析命令函数\n server = options.server\n port = options.port\n if int(port) > 0 and int(port) < 65535:\n return True\n else:\n exit(\"the ort is in 0-6535\")\n\n def make_connection(self): # 连接服务器函数\n self.client_socket = socket()\n self.client_socket.connect((self.options.server, int(self.options.port)))\n\n#***************************上面是相当于类的初始化内容,包括解析输入的IP地址和端口号,连接服务器*****************************#\n#***************************下面是与服务器交互时要用到的对应的函数,******************************#\n# 需要什么功能就写对应的函数,在客户端写应该发送什么以及收到服务器返回的结果后怎么处理即可******************************#\n\n def interactive(self): # 与服务器进行交互,选择相应函数,首先会进行验证登录函数,\n # 然后根据命令选择对应的函数\n print(\"begin to interactive.....\")\n if self.authenticate():\n while 1:\n cmd_info = input(\"[%s]\"%self.current_dir).strip()\n cmd_list = cmd_info.split() # 将输入的内容拆分\n if hasattr(self, cmd_list[0]): # 根据输入的动作做相应的函数调用\n func = getattr(self, cmd_list[0])\n func(*cmd_list)\n\n def response(self): # 接收函数\n data = self.client_socket.recv(1024).decode('utf-8')\n data = json.loads(data)\n return data\n\n# *****************************验证登录***************************************#\n def authenticate(self): # 验证用户名和密码是否为空\n if self.options.username is None or self.options.pasword is None:\n username = input(\"username: \")\n password = input(\"password: \")\n return self.get_auth_result(username, password)\n return self.get_auth_result(self.options.username, self.options.pasword)\n\n def get_auth_result(self, user, pwd): # 如果不为空就将相应的操作名和用户名和密码以字典的形式传给服务器进行验证\n data = {\n \"action\": \"auth\",\n \"username\": user,\n \"password\": pwd\n }\n self.client_socket.send(json.dumps(data).encode('utf-8'))\n response = self.response()\n # print(response)\n print(\"response:\", response[\"status_code\"])\n if response[\"status_code\"] == 200:\n self.user = user\n self.current_dir = user\n print(STATUS_CODE[200])\n return True\n else:\n print(STATUS_CODE[response[\"status_code\"]])\n# *****************************验证登录***************************************#\n\n# *****************************文件上传***************************************#\n def put(self, *cmd_list):\n # put 001.png images # put在命令行的使用格式\n action, local_path, target_path = cmd_list\n local_path = os.path.join(self.mainPath, local_path) # 将绝对路径与本地要上传的文件路劲拼接为本地路劲,注意这是在要上传的文件与ftp_client同一目录下的写法\n file_name = os.path.basename(local_path) # 取本地路劲的文件名,dirname()取路劲\n file_size = os.stat(local_path).st_size # 文件大小\n\n data = { # 这个就是服务端能够识别客户端的命令且执行对应函数的原因\n \"action\": \"put\",\n \"file_name\": file_name,\n \"file_size\": file_size,\n \"target_path\": target_path\n }\n\n self.client_socket.send(json.dumps(data).encode('utf-8'))\n is_exist = self.client_socket.recv(1024).decode('utf-8')\n\n has_sent = 0\n if is_exist == \"800\": # 收到服务端发来的文件不完整信息,\n # 文件不完整\n choice = input('the file exist,but not enough,[Y/N]').strip()\n if choice.upper() == \"Y\": # 如果选择N则是重新发送\n self.client_socket.sendall(\"Y\".encode('utf-8')) # 告诉服务端我要接着发送\n continue_position = self.client_socket.recv(1024).decode('utf-8')\n has_sent += int(continue_position)\n\n elif is_exist == \"801\":\n # 文件完全存在\n print(\"文件已存在\")\n return\n # else:\n # pass\n\n f = open(local_path, \"rb\")\n f.seek(has_sent) # 如果收到文件不完整,则从has_sent处接着发送文件\n while has_sent < file_size: # 如果收到服务端返回的是文件不存在,则从头开始发送文件,\n data = f.read(1024)\n self.client_socket.sendall(data)\n has_sent += len(data)\n self.show_progress(has_sent, file_size)\n f.close()\n print(\"成功上传\")\n\n # *********************显示进度条*****************************#\n def show_progress(self, has, total):\n self.last = 0\n rate = float(has) / float(total)\n rate_num = int(rate * 100)\n if self.last != rate_num:\n sys.stdout.write(\"%s%% %s\\r\" % (rate_num, \"#\" * rate_num))\n self.last = rate_num\n # *********************显示进度条*********************#\n# *****************************文件上传***************************************#\n\n\n# *****************************命令函数***************************************#\n\n # ********************ls命令***********************#\n def ls(self, *cmd_list):\n data = {\n \"action\":\"ls\"\n }\n self.client_socket.sendall(json.dumps(data).encode('utf-8'))\n data = self.client_socket.recv(1024).decode('utf-8')\n print(data)\n # ********************ls命令***********************#\n\n # ********************cd命令***********************#\n def cd(self,*cmd_list):\n data = {\n \"action\":\"cd\",\n \"dirname\":cmd_list[1]\n }\n self.client_socket.sendall(json.dumps(data).encode('utf-8'))\n data = self.client_socket.recv(1024).decode('utf-8')\n print(os.path.basename(data))\n self.current_dir = os.path.basename(data)\n # ********************cd命令***********************#\n\n # ********************mkdir命令***********************#\n def mkdir(self, *cmd_list):\n data = {\n \"action\": \"mkdir\",\n \"dirname\": cmd_list[1]\n }\n self.client_socket.sendall(json.dumps(data).encode('utf-8'))\n data = self.client_socket.recv(1024).decode('utf-8')\n # ********************mkdir命令***********************#\n\n# *****************************命令函数***************************************#\n\n" }, { "alpha_fraction": 0.5752118825912476, "alphanum_fraction": 0.5762711763381958, "avg_line_length": 25.94285774230957, "blob_id": "951708407bdb8edcff912a3227b6d260e01ec372", "content_id": "43a74148fd5fc2adfd8f0a0d09c3c12707546481", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1264, "license_type": "no_license", "max_line_length": 73, "num_lines": 35, "path": "/Ftp详解/server端/core/main.py", "repo_name": "cogitozz/FTP_program", "src_encoding": "UTF-8", "text": "import optparse # 解析命令行的命令\nimport socketserver\nfrom conf import settings\nfrom core import server_handler\n\n\n\"\"\"\n该文件的代码仅仅只是命令行参数的解析,不涉及其他的内容,当开始启动相应的功能是转到另一个文件去\n\"\"\"\n\n\nclass ArgvHandler():\n\n def __init__(self):\n # print(\"ok\")\n self.op = optparse.OptionParser()\n options, args = self.op.parse_args()\n\n self.verify_args(options, args)\n\n\n def verify_args(self, options, args):\n cmd = args[0]\n # 接下来就是要判断cmd对应的方法在该类里有没有,有就执行该方法。\n # 可以用if—else,也可以将可能的值放在字典里,通过字典的方式检查有没有该方法\n if hasattr(self, cmd):\n func = getattr(self, cmd)\n func() # 这是用的反射的方法,只要下面有cmd对应的方法就可以调用\n\n def start(self):\n print(\"workng...\")\n s = socketserver.ThreadingTCPServer((settings.IP, settings.PORT),\n server_handler.ServerHandler)\n # 将一些常量写在设置文件里,方便修改\n s.serve_forever()\n\n" }, { "alpha_fraction": 0.6114285588264465, "alphanum_fraction": 0.6171428561210632, "avg_line_length": 27.41666603088379, "blob_id": "4a9b9f286399066eb1f57b23972d0fc9f15dfc46", "content_id": "41c40ea18f66ae5a2c5fc38dab5799f674c194a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 470, "license_type": "no_license", "max_line_length": 61, "num_lines": 12, "path": "/Ftp详解/client端/ftp_client_start.py", "repo_name": "cogitozz/FTP_program", "src_encoding": "UTF-8", "text": "import os, sys\nPATH = os.path.dirname(os.path.abspath(__file__))\nprint(PATH)\nsys.path.append(PATH)\n\nfrom ftp_client import ClientHandler\n\n\nif __name__ == '__main__':\n client_start = ClientHandler() # 1、生成一个类,用来调用客户端的一些处理函数。\n # 并且初始化一些数据,比如解析命令并连接服务器\n client_start.interactive() # 2、连接上服务器后与服务器进行交互\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 16, "blob_id": "b7ac54983570b7c2420ea07c7d3738e5fd374542", "content_id": "8afcb2542d2ce3691ac583e2d107ddcd1de9a8e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 115, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/README.md", "repo_name": "cogitozz/FTP_program", "src_encoding": "UTF-8", "text": "# FTP_program\nftp文件上传,断点续传,命令远程操作等\n还有下载,注册等功能后续开发中\n" } ]
7
devopsconsulting/python-mutexlock
https://github.com/devopsconsulting/python-mutexlock
183c70318f51d97f28eae3752dcb340752f75557
817967862a464e148560b63198dad7b2763d344f
5bc7d85f361022e3d1a6d697f68f3a5b2b4c3c81
refs/heads/master
2020-05-27T13:42:41.159902
2012-11-20T19:13:34
2012-11-20T19:13:34
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6539074778556824, "alphanum_fraction": 0.6586921811103821, "avg_line_length": 20.620689392089844, "blob_id": "92d71f6d6b5d4aa71025013d2008ff43082f0849", "content_id": "368dfa305909b74896c547a9c5c8f8d51eae2d9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "no_license", "max_line_length": 62, "num_lines": 29, "path": "/setup.py", "repo_name": "devopsconsulting/python-mutexlock", "src_encoding": "UTF-8", "text": "\"\"\"\nmutexlock\n============\n\nA python locking implementation using mutex\n\"\"\"\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nversion = '1.0'\n\nsetup(\n name='mutexlock',\n version=version,\n description=\"A python locking implementation using mutex\",\n long_description=__doc__,\n classifiers=[],\n # Get strings from\n #http://pypi.python.org/pypi?%3Aaction=list_classifiers\n keywords='',\n author='Lars van de Kerkhof',\n author_email='[email protected]',\n url='https://github.dtc.avira.com/VDT/mutexlock',\n license='GPL',\n packages=['mutexlock'],\n include_package_data=True,\n zip_safe=False,\n)\n" }, { "alpha_fraction": 0.5528605580329895, "alphanum_fraction": 0.5536514520645142, "avg_line_length": 35.123809814453125, "blob_id": "55fe8b3bd7d9b5ce9741ba5f6530dff5199f1d9d", "content_id": "9424e6c6f4cd53dd292232632a264efbf0948ff5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3793, "license_type": "no_license", "max_line_length": 78, "num_lines": 105, "path": "/mutexlock/__init__.py", "repo_name": "devopsconsulting/python-mutexlock", "src_encoding": "UTF-8", "text": "import errno\nimport os\nimport time\n\n\nclass SemaphoreException(Exception):\n \"\"\"\n An exception that get thrown when an error occurs in the mutex semphore\n context\n \"\"\"\n\n\nclass mutexlock(object):\n \"\"\"\n A semaphore Context Manager that uses a temporary file for locking.\n Only one thread or process can get a lock on the file at once.\n\n it can be used to mark a block of code as being executed exclusively\n by some thread. see\n `mutex <http://en.wikipedia.org/wiki/Mutual_exclusion>`_.\n\n usage::\n\n from __future__ import with_statement\n from avira.deploy.lock import mutex\n\n with mutex():\n print \"hi only one thread will be executing this block of code\\\n at a time.\"\n\n Mutex raises an :class:`avira.deploy.lock.SemaphoreException` when it\n has to wait to long to obtain a lock or when it can not determine how long\n it was waiting.\n\n :param lockfile: The path and name of the pid file used to create the\\\n semaphore.\n :param max_wait: The maximum amount of seconds the process should wait to\n obtain the semaphore.\n \"\"\"\n\n def __init__(self, lockfile='/tmp/avira.deploy.lock', max_wait=0):\n # the maximum reasonable time for aprocesstobe\n self.max_wait = max_wait\n\n # the location of the lock file\n self.lockfile = lockfile\n\n def __enter__(self):\n while True:\n try:\n # if the file exists you can not create it and get an\n # exclusive lock on it. this is an atomic operation.\n file_descriptor = os.open(self.lockfile,\n os.O_EXCL | os.O_RDWR | os.O_CREAT)\n # we created the lockfile, so we're the owner\n break\n except OSError as e:\n if e.errno != errno.EEXIST:\n # should not occur\n raise e\n\n # if we got here the file exists so lets see\n # how long we are waiting for it\n try:\n # the lock file exists, try to stat it to get its age\n # and read it's contents to report the owner PID\n file_contents = open(self.lockfile, \"r\")\n file_last_modified = os.path.getmtime(self.lockfile)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise SemaphoreException(\n \"%s exists but stat() failed: %s\" %\n (self.lockfile, e.strerror)\n )\n # we didn't create the lockfile, so it did exist, but it's\n # gone now. Just try again\n continue\n\n # we didn't create the lockfile and it's still there, check\n # its age\n if self.max_wait != 0 and \\\n time.time() - file_last_modified > self.max_wait:\n pid = file_contents.read()\n raise SemaphoreException(\n \"%s has been locked for more than \"\n \"%d seconds by PID %s\" % (\n self.lockfile, self.max_wait, pid\n )\n )\n\n # it's not been locked too long, wait a while and retry\n file_contents.close()\n time.sleep(1)\n\n # if we get here. we have the lockfile. Convert the os.open file\n # descriptor into a Python file object and record our PID in it\n\n file_handle = os.fdopen(file_descriptor, \"w\")\n file_handle.write(\"%d\" % os.getpid())\n file_handle.close()\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Remove the lockfile, releasing the semaphore for other processes\n # to obtain\n os.remove(self.lockfile)\n" }, { "alpha_fraction": 0.734375, "alphanum_fraction": 0.734375, "avg_line_length": 15.25, "blob_id": "3c1000e1b032d9b16429379eb1f0cf60c461eb93", "content_id": "112f443b9c504fd693f88df2ef0bdc5b610b915f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 64, "license_type": "no_license", "max_line_length": 43, "num_lines": 4, "path": "/README.md", "repo_name": "devopsconsulting/python-mutexlock", "src_encoding": "UTF-8", "text": "mutexlock\n=========\n\nA python locking implementation using mutex" } ]
3
10arturoV/curso-programacionATS
https://github.com/10arturoV/curso-programacionATS
3f999d23a78acdd59aedec195f8c04f817d3352e
4cc68341682c5c9301354df0893ce99d127d2406
d353abaa1c4205007d41990d4898559d19ab0640
refs/heads/master
2022-11-06T14:25:25.590544
2020-06-20T17:48:54
2020-06-20T17:48:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5546875, "alphanum_fraction": 0.5859375, "avg_line_length": 27.481481552124023, "blob_id": "c7d81789a6028866fa04c9ef17f9d836620fa6d2", "content_id": "15cce331b5257787675147c00e6624c3ae44fc95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 768, "license_type": "no_license", "max_line_length": 41, "num_lines": 27, "path": "/prueba3.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "sueldo = int(input(\"ingrese su sueldo \"))\nprint(\"su sueldo es \"+str(sueldo))\n\ncat = int(input(\"ingresa tu categoria \"))\n\nif (cat == 1):\n cat1 = (sueldo*.15)\n print(\"el aumento es \"+str(cat1))\n aumento = cat1+sueldo\n print(\"el total es \" + str(aumento))\nelif (cat == 2):\n cat2 = (sueldo * .10)\n print(\"el aumento es \" + str(cat2))\n aumento = cat2 + sueldo\n print(\"el total es \" + str(aumento))\nelif (cat == 3):\n cat3 = (sueldo * .08)\n print(\"el aumento es \" + str(cat3))\n aumento = cat3 + sueldo\n print(\"el total es \" + str(aumento))\nelif (cat == 4):\n cat4 = (sueldo * .07)\n print(\"el aumento es \" + str(cat4))\n aumento = cat4 + sueldo\n print(\"el total es \" + str(aumento))\nelse:\n print(\"ingresa una categoria valida\")" }, { "alpha_fraction": 0.6440251469612122, "alphanum_fraction": 0.6477987170219421, "avg_line_length": 38.75, "blob_id": "249929b3330f43472bd24efa5dbf97e6770deba5", "content_id": "34b8e876e172d1969f0c1b7af5bc5222220d99e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 796, "license_type": "no_license", "max_line_length": 97, "num_lines": 20, "path": "/prueba5.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "cantidad_total = 0\nif __name__ == '__main__':\n\twhile True:\n\t\tprint((\"El monto de la venta es: \"))\n\t\tmonto_venta = float(input())\n\t\tiva = (monto_venta*.16)\n\t\tprint((\"El IVA es: \"),iva)\n\t\ttotal_pagar = (monto_venta+iva)\n\t\tprint((\"El total a pagar es: \"),total_pagar)\n\t\tprint((\"Ingresa el pago del cliente: \"))\n\t\tpago_cliente = float(input())\n\t\tcambio = (pago_cliente-total_pagar)\n\t\tprint((\"El cambio es: \"),cambio)\n\t\tcantidad_total = (cantidad_total+total_pagar)\n\t\twhile True:# no hay 'repetir' en python\n\t\t\tprint((\"¿Capturar nueva compra? (S/N):\"))\n\t\t\ttecla_repetir = input()\n\t\t\tif tecla_repetir==\"s\" or tecla_repetir==\"n\" or tecla_repetir==\"S\" or tecla_repetir==\"N\": break\n\t\tif tecla_repetir==\"n\" or tecla_repetir==\"N\": break\n\tprint(\"La cantidad total de dinero en la caja es: \",cantidad_total)\n" }, { "alpha_fraction": 0.6607142686843872, "alphanum_fraction": 0.6607142686843872, "avg_line_length": 15.142857551574707, "blob_id": "2faa2581a8bb4f19d27ef31b7aca9773fbf8a0b3", "content_id": "a06f9471faaf06dd8ecdbf915340cfe18b7834a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 30, "num_lines": 7, "path": "/mdulo1.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "#modulo\n\ndef saludar(nombre):\n print(\"hola, soy \"+nombre)\n\ndef despedir(nombre):\n print(\"bye bye \"+nombre)" }, { "alpha_fraction": 0.5485893487930298, "alphanum_fraction": 0.5579937100410461, "avg_line_length": 30.799999237060547, "blob_id": "e456203d0b23168020240cf075391cd58008ee65", "content_id": "1baf4ffe910857cbec2b21e92e405bf86d86e1ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 319, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/prueba9.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "a = int(input(\"ingresa 1er numero \"))\nb = int(input(\"ingresa 2do numero \"))\nc = int(input(\"ingresa 3er numero \"))\n\nif a >= b and a >= c:\n print(\"el numero \"+str(a)+\" es mayor\")\nelif b >= a and b >= c:\n print(\"el numero \"+str(b)+\" es mayor\")\nelif c >= a and c >= b:\n print(\"el numero \" + str(c) + \" es mayor\")\n\n" }, { "alpha_fraction": 0.5736095905303955, "alphanum_fraction": 0.5790621638298035, "avg_line_length": 38.565216064453125, "blob_id": "a217734a3107375059883574e23eca091d35b02f", "content_id": "e78dcd56e79fcd5d4231271c16563ead9c9e3fa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 918, "license_type": "no_license", "max_line_length": 114, "num_lines": 23, "path": "/prueba4.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "\nproducto = int(input(\"cuantos articulos va a llevar \"))\n\ncontador = 0\ncuentaTo = 0\n\nwhile contador < producto:\n contador = contador + 1\n costo = int(input(\"ingrese el costo del producto \"))\n captura = costo * .16\n total = captura + costo\n print(\"costo del articulo con iva \" + str(total))\n cuentaTo = cuentaTo + total\n print(\"el total es \"+ str(cuentaTo))\n break\n pago = float(input(\"ingrese el pago del cliente \"))\n cambio = pago - cuentaTo\n print(\"su cambio es de \"+ str(cambio))\n while True:\n print((\"¿Capturar nueva compra? (S/N):\"))\n tecla_repetir = input()\n if tecla_repetir == \"s\" or tecla_repetir == \"n\" or tecla_repetir == \"S\" or tecla_repetir == \"N\": break\n if tecla_repetir == \"n\" or tecla_repetir == \"N\": break\nprint(\"La cantidad total de dinero en la caja es: \", cuentaTo)\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5285714268684387, "alphanum_fraction": 0.5857142806053162, "avg_line_length": 13.199999809265137, "blob_id": "7f93f033a30c7af61146864db4d6309326506e72", "content_id": "6a104eb54032d98968fd2fea370f49e4e749ca4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 23, "num_lines": 5, "path": "/bucle while 2.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "inicio = 0\n\nwhile inicio < 20:\n print(\"hola mundo\")\n inicio += 1" }, { "alpha_fraction": 0.656521737575531, "alphanum_fraction": 0.6681159138679504, "avg_line_length": 30.31818199157715, "blob_id": "29ffef2b5c55bf141fddb500194396efdfa0f571", "content_id": "c770927a87323fb4764f73cc79cab459ab448e3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 690, "license_type": "no_license", "max_line_length": 67, "num_lines": 22, "path": "/cajero.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "saldoInicial = 1000\n\nprint(\"bienvenido a cajeros atm\")\nopcion = int(input(\"que operacion desea realizar? \"))\n\nif opcion == 1:\n print(f\"su saldo disponible es {saldoInicial}\")\nelif opcion == 2:\n retiro = float(input(\"ingrese la cantidad que desea retirar \"))\n if retiro > saldoInicial:\n print(\"no cuenta con saldo suficiente\")\n else:\n retiro = saldoInicial - retiro\n print(f\"su saldo disponible es {retiro}\")\nelif opcion == 3:\n deposito = float(input(\"ingrese la cantidad a depositar \"))\n deposito += saldoInicial\n print(f\"su saldo disponible es {deposito}\")\nelif opcion == 4:\n print(\"vuelva pronto\")\nelse:\n print(\"error . opcion no valida\")\n\n" }, { "alpha_fraction": 0.6713709831237793, "alphanum_fraction": 0.6975806355476379, "avg_line_length": 28.176469802856445, "blob_id": "e492098cef4f72cf3c893a2eda1e9adebfa14b1e", "content_id": "7fadd73a1596b3223a283475ee1999e5bee3d7af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 496, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/prueba 2.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "par1 = int(input(\"ingrese primer parcial \"))\npar2 = int(input(\"ingrese segundo parcial \"))\npar3 = int(input(\"ingrese tercer parcial \"))\n\nparTot = (par1+par2+par3)/3*.55\nprint(\"el parcial total es \"+str(parTot))\n\nexam = int(input(\"ingresa la calif de tu examen \"))\nexa = exam *.15\nprint(\"calificacion examen es \"+str(exa))\n\npro = int(input(\"calificacion de tu proyecto \"))\nproy = pro *.30\nprint(\"tu proyecto saco \"+str(proy))\n\nfinal = parTot+exa+proy\nprint(\"tu calificacion final es \"+str(final))\n" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.7439024448394775, "avg_line_length": 12.5, "blob_id": "8430e36f2141b311937e88bd8cd46fe2a71b0f57", "content_id": "e9ee8c78cb46345191e93569663aff6d7e4a7cc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 82, "license_type": "no_license", "max_line_length": 25, "num_lines": 6, "path": "/programa que llama a modulo.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "\nimport mdulo1\n\nmdulo1.saludar(\"antonio\")\n\nnombre = \"luis\"\nmdulo1.saludar(nombre)\n" }, { "alpha_fraction": 0.7567567825317383, "alphanum_fraction": 0.7702702879905701, "avg_line_length": 13.800000190734863, "blob_id": "6d6992cc46a0b7c207b41ce3c37a892afc227a08", "content_id": "734a32fa94c169c6ba3ab71f65af05bda7c98b00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/de modulo solo importar una funcion.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "from mdulo1 import despedir as adios\n\nminombre = \"jorge\"\n\nadios(minombre)\n" }, { "alpha_fraction": 0.5382652878761292, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 34.54545593261719, "blob_id": "e72eea4b90fca7e8d706174ef7ee5af5964beefe", "content_id": "4c231e5956c802f07bac525f5996be0c55d9a99c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "no_license", "max_line_length": 76, "num_lines": 11, "path": "/prueba8.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "a = int(input(\"ingresa un numero \"))\nb = int(input(\"ingresa otro numero \"))\n\nif a%2 == 0 and b%2 == 0:\n print(\"los 2 numeros son pares\")\nelif a%2 == 0 and b%2!=0:\n print(\"el numero \"+str(a) + \" es par y el numero \"+str(b)+\" no es par\")\nelif a%2!= 0 and b%2 == 0:\n print(\"el numero \"+str(b) + \" es par y el numero \"+str(a)+\" no es par\")\nelse:\n print(\"los numeros no son pares\")\n\n" }, { "alpha_fraction": 0.5978391170501709, "alphanum_fraction": 0.5978391170501709, "avg_line_length": 33.70833206176758, "blob_id": "4aaa180f0340444da9fdb0e1888ee2f3b074807b", "content_id": "a1a434b6ae53f8cf5c73bd7d18733b9311292f94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "no_license", "max_line_length": 51, "num_lines": 24, "path": "/calculadora aritmetica.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "operacion = input(\"que operacion desea realizar? \")\n\nif operacion == \"S\" or operacion == \"s\":\n a = float(input(\"ingrese primer digito \"))\n b = float(input(\"ingrese primer digito \"))\n c = a + b\n print(\"la suma es \"+str(c))\nelif operacion == \"R\" or operacion == \"r\":\n a = float(input(\"ingrese primer digito \"))\n b = float(input(\"ingrese primer digito \"))\n c = a - b\n print(\"la resta es \"+str(c))\nelif operacion == \"M\" or operacion == \"m\":\n a = float(input(\"ingrese primer digito \"))\n b = float(input(\"ingrese primer digito \"))\n c = a * b\n print(\"la multiplicacion es \"+str(c))\nelif operacion == \"D\" or operacion == \"d\":\n a = float(input(\"ingrese primer digito \"))\n b = float(input(\"ingrese primer digito \"))\n c = a / b\n print(\"la division es \"+str(c))\nelse:\n print(\"letra no valida\")\n" }, { "alpha_fraction": 0.6239837408065796, "alphanum_fraction": 0.6382113695144653, "avg_line_length": 40.08333206176758, "blob_id": "7bcfaec22b799dbccb98dd0d8fe7fe22df1b18d3", "content_id": "3317b2e06a4cc1b7fe0e0c87f5685ef2688f3e76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "no_license", "max_line_length": 96, "num_lines": 12, "path": "/modulo1.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "class Coche:\n def __init__(self, marca, color, combustible, cilindrada):\n self.marca = marca\n self.color = color\n self.combustible = combustible\n self.cilindrada = cilindrada\n\n def mostrar_caracteristicas(self):\n print(\"mi coche es marca {} el color es {} usa combustible {} y tiene un cilindraje {} \"\n .format(self.marca, self.color, self.combustible, self.cilindrada))\n \nmedia = lambda nota1, nota2, nota3 : (nota1+nota2+nota3)/3" }, { "alpha_fraction": 0.5376884341239929, "alphanum_fraction": 0.5376884341239929, "avg_line_length": 32.16666793823242, "blob_id": "76d5ad53b940795374d3ad60c7cf758d0b5dddd5", "content_id": "3f6ce426eda85aeebbe1bb8b7e5dd27312b9f65b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 60, "num_lines": 6, "path": "/mostar si un caracter es vocal o no.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "a = str(input(\"ingresa el caracter \")).lower()\n\nif a == \"a\" or a == \"e\" or a == \"i\" or a == \"o\" or a == \"u\":\n print(f\"el caracter {a} si es vocal\")\nelse:\n print(f\"el caracter {a} NO es vocal\")\n" }, { "alpha_fraction": 0.6761904954910278, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 29.14285659790039, "blob_id": "ef43ba90e79440522ab4e977b62eb783f9098f6e", "content_id": "ffaa2f25dc5b9ba12ac10813f06818f08e026565", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 53, "num_lines": 7, "path": "/bucle while.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "import math\n\nnumero = int(input(\"ingresa un numero \"))\nwhile numero < 0:\n print(\"ingresa un numero valido\")\n numero = int(input(\"ingresa un numero \"))\nprint(f\"la raiz cuadrada es {math.sqrt(numero):.2f}\")" }, { "alpha_fraction": 0.48073023557662964, "alphanum_fraction": 0.5253549814224243, "avg_line_length": 18.719999313354492, "blob_id": "b1b1be240f82978e4dc43100f22ac4141038f077", "content_id": "6e673fc9e04a521f9e59d10e9448f70b701300fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 493, "license_type": "no_license", "max_line_length": 34, "num_lines": 25, "path": "/prueba7.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "print(\"ingresa 1er digito\")\nnum1 = int(input())\nprint(\"ingresa 2do digito\")\nnum2 = int(input())\nprint(\"ingresa 3er digito\")\nnum3 = int(input())\n\nif num1 > num2:\n if num1 > num3:\n if num2 > num3:\n Mediana = num2\n else:\n Mediana = num3\n else:\n Mediana = num1\nelse:\n if num1 > num3:\n Mediana = num1\n else:\n if num2 > num3:\n Mediana = num3\n else:\n Mediana = num2\n\nprint(f\"La mediana es {Mediana} \")\n" }, { "alpha_fraction": 0.5698924660682678, "alphanum_fraction": 0.5913978219032288, "avg_line_length": 25.714284896850586, "blob_id": "4deb2f8c54fdd8d2c413108b2ed11db8ce5652ec", "content_id": "3a240b36c2899ff3f071ba3e64bafaf274d2b321", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 38, "num_lines": 7, "path": "/Ejercicio 1 – Operación aritmética.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "a =int(input(\"ingrsa el valor de a \"))\nb =int(input(\"ingrsa el valor de b \"))\nc =int(input(\"ingrsa el valor de c \"))\n\nd =float(a**3 * (b**2 - 2*a*c) / 2*b)\n\nprint(f\"la respuesta es {d}\")" }, { "alpha_fraction": 0.6612903475761414, "alphanum_fraction": 0.7150537371635437, "avg_line_length": 22.375, "blob_id": "1410d9ab63e3457285322baeae9e035e9f7e0acf", "content_id": "1357380e94fc74e74b8f4e50cf10015c5dfdd71d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 53, "num_lines": 8, "path": "/programa1.py", "repo_name": "10arturoV/curso-programacionATS", "src_encoding": "UTF-8", "text": "import modulo1\n\ncoche1= modulo1.Coche(\"opel\",\"rojo\",\"gasolina\",\"1.6\")\n\nprint(coche1.mostrar_caracteristicas())\n\nmedia = modulo1.media(8, 8, 8)\nprint(\"nuestra nota media es \"+ str(media))" } ]
18
scastrain/MCOC2021-P0-1
https://github.com/scastrain/MCOC2021-P0-1
5843e7780d1f3709df9f39bdeae50adf38eafa06
1296d68865da0e141b0504f4bd9b6f4b87cc5ea3
2e106cd1df38da9f51a995251a1bf7817fecd7d1
refs/heads/main
2023-07-21T18:55:29.295287
2021-09-03T16:35:58
2021-09-03T16:35:58
392,100,682
0
0
null
2021-08-02T21:35:44
2021-08-02T16:25:56
2021-08-02T16:25:54
null
[ { "alpha_fraction": 0.4245283007621765, "alphanum_fraction": 0.45943397283554077, "avg_line_length": 22.090909957885742, "blob_id": "309fd88d327ff48a4f66cd542865e8fb00354c41", "content_id": "c5c34c5ddb66b35ecf12fb66972d49fa8fd12893", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2120, "license_type": "no_license", "max_line_length": 87, "num_lines": 88, "path": "/Entrega 5/Complejidad_computacional.py", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy import linalg\r\nfrom time import perf_counter\r\nfrom numpy.linalg import solve\r\nfrom scipy.sparse import lil_matrix, csc_matrix\r\nfrom scipy.sparse.linalg import spsolve, inv\r\n\r\ndef laplaciana_llena(N,t=np.double):\r\n A=np.identity(N,t)*2\r\n for i in range(N):\r\n for j in range (N):\r\n if i+1==j:\r\n A[i,j]=-1\r\n if i-1==j:\r\n A[i,j]=-1\r\n return A\r\n \r\n\r\ndef laplaciana_dispersa(N,t=np.double):\r\n A=lil_matrix((N,N))\r\n for i in range(N):\r\n for j in range (N):\r\n if i==j:\r\n A[i,j]=2\r\n if i+1==j:\r\n A[i,j]=-1\r\n if i-1==j:\r\n A[i,j]=-1\r\n return csc_matrix(A)\r\n\r\nNm= [2,5,10,16,32,60,130,260,550,1050,2100,4100,4500]\r\n\r\nNumcorridas=10\r\n\r\nfor i in range(Numcorridas):\r\n nombres= [f\"Complejidad_MATMUL_llena{i}.txt\",f\"Complejidad_MATMUL_dispersa{i}.txt\"]\r\n archivos=[open(nombre,\"w\") for nombre in nombres]\r\n \r\n for N in Nm:\r\n ensamblaje = np.zeros((len(archivos)))\r\n solucion = np.zeros ((len(archivos)))\r\n \r\n print (f\"N={N}\")\r\n \r\n \r\n t1=perf_counter()\r\n \r\n \r\n A= laplaciana_llena(N)\r\n B= laplaciana_llena(N)\r\n \r\n\r\n t2=perf_counter()\r\n \r\n C= A@B\r\n \r\n \r\n t3=perf_counter()\r\n \r\n \r\n dt1 = t2 - t1\r\n dt2 = t3 - t2\r\n \r\n ensamblaje[0]= dt1\r\n solucion[0]= dt2\r\n \r\n #CASO2: MATMUL- Matriz dispersa: \r\n \r\n t1=perf_counter()\r\n \r\n A= laplaciana_dispersa(N)\r\n B= laplaciana_dispersa(N)\r\n t2=perf_counter()\r\n \r\n C= A@B\r\n t3=perf_counter()\r\n\r\n dt1 = t2 - t1\r\n dt2 = t3 - t2\r\n \r\n ensamblaje[1]= dt1\r\n solucion[1]= dt2 \r\n \r\n for j in range(len(archivos)):\r\n archivos[j].write(f\"{N} {ensamblaje[j]} {solucion[j]}\\n\")\r\n archivos[j].flush()\r\n \r\n[archivo.close()for archivo in archivos]\r\n" }, { "alpha_fraction": 0.4772079885005951, "alphanum_fraction": 0.5118708610534668, "avg_line_length": 24.452829360961914, "blob_id": "30b27fe0805a5ba8fc167d2a47b2f580686174ab", "content_id": "70ae345c45d458620cdbb94a3c3c85a51fcececb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4213, "license_type": "no_license", "max_line_length": 205, "num_lines": 159, "path": "/Entrega 4/Eigh/Double/timing_eigh_double.py", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "from time import perf_counter\r\nimport numpy as np\r\nfrom scipy import linalg\r\nfrom scipy.linalg import eigh, inv\r\nfrom numpy import float32, float64\r\n\r\n# se crea la funcion de Laplacian Matrix\r\n\r\ndef laplaciana(N, d= float64):\r\n A=-(np.eye(N,k=-1,dtype=d))+2*(np.eye(N,dtype=d))+-(np.eye(N, k=+1,dtype=d))\r\n return A\r\n\r\n# Tamaño de matrices\r\nNm= [2,5,10,12,15,20,30,40,45,50,55,60,75,100,125,160,190,200,250,300,350, 400, 500,550,590,600,800]\r\n\r\nNumcorridas=10\r\n\r\nnombres=[\"A_eigh_caso_1.txt\",\"A_eigh_caso_2_F.txt\",\"A_eigh_caso_3_T.txt\",\"A_eigh_caso_4_F.txt\",\"A_eigh_caso_5_T.txt\",\"A_eigh_caso_6_F.txt\",\"A_eigh_caso_7_T.txt\",\"A_eigh_caso_8_F.txt\",\"A_eigh_caso_9_T.txt\"]\r\narchivos=[open(nombre,\"w\") for nombre in nombres]\r\n\r\nfor N in Nm:\r\n dts=np.zeros((Numcorridas,len(archivos)))\r\n print (f\"N={N}\")\r\n \r\n\r\n #Caso 1\r\n for i in range (Numcorridas):\r\n # Se crea matriz laplaciana A:\r\n A=laplaciana(N)\r\n t1=perf_counter()\r\n\r\n A_invB=linalg.eigh(A)\r\n t2=perf_counter()\r\n dt=t2-t1\r\n \r\n # agregar al archivo en segunda columna\r\n dts[i][0]=dt\r\n \r\n \r\n #Caso 2\r\n # Se crea matriz laplaciana A:\r\n A=laplaciana(N)\r\n t1=perf_counter()\r\n\r\n A_invB=linalg.eigh(A, overwrite_a=False, driver=\"ev\")\r\n t2=perf_counter()\r\n dt=t2-t1\r\n \r\n # agregar al archivo en segunda columna\r\n dts[i][1]=dt\r\n \r\n \r\n #Caso 3\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.eigh(A, overwrite_a=True, driver=\"ev\")\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en tercera columna:\r\n dts[i][2]=dt\r\n\r\n\r\n #Caso 4\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.eigh(A, overwrite_a=False, driver=\"evd\")\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en cuarta columna:\r\n dts[i][3]=dt\r\n \r\n \r\n \r\n #Caso 5\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.eigh(A, overwrite_a=True, driver=\"evd\")\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en quinta columna:\r\n dts[i][4]=dt \r\n\r\n #Caso 6\r\n # Se crea matriz laplaciana A:\r\n A=laplaciana(N)\r\n t1=perf_counter()\r\n\r\n A_invB=linalg.eigh(A, overwrite_a=False, driver=\"evr\")\r\n t2=perf_counter()\r\n dt=t2-t1\r\n \r\n # agregar al archivo en segunda columna\r\n dts[i][5]=dt\r\n \r\n \r\n #Caso 7\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.eigh(A, overwrite_a=True, driver=\"evr\")\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en tercera columna:\r\n dts[i][6]=dt\r\n\r\n\r\n #Caso 8\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.eigh(A, overwrite_a=False, driver=\"evx\")\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en cuarta columna:\r\n dts[i][7]=dt\r\n \r\n \r\n \r\n #Caso 9\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.eigh(A, overwrite_a=True, driver=\"evx\")\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en quinta columna:\r\n dts[i][8]=dt\r\n\r\n print (\"dts: \", dts)\r\n \r\n # Se calcula el promedio de los tiempos:\r\n dts_mean=[]\r\n for j in range(len(archivos)):\r\n dts_mean.append(np.mean(dts[:,j]))\r\n \r\n print(\"dts_mean: \", dts_mean)\r\n \r\n \r\n # Se agregan los resultados al archivo de texto:\r\n for j in range(len(archivos)):\r\n archivos[j].write(f\"{N} {dts_mean[j]}\\n\")\r\n archivos[j].flush()\r\n \r\n[archivo.close()for archivo in archivos]\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.4646666646003723, "alphanum_fraction": 0.5720000267028809, "avg_line_length": 20, "blob_id": "504d0d5a29a4be1eb53cb7770ccc3a2fdea660ed", "content_id": "5b3dd86e1c329233ec3f028c43ca7cfafc2a8300", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1504, "license_type": "no_license", "max_line_length": 90, "num_lines": 68, "path": "/Entrega 3/Casos 2/double/Gráficos_inv_caso2_double.py", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "import matplotlib.pylab as plt\r\n\r\n\r\nNs = []\r\ndts = []\r\nmems = []\r\n\r\ntitulo = \"caso 2 double\"\r\n\r\nfor i in range(10):\t\r\n\tfid = open(f\"rendimiento {titulo}{i}.txt\", \"r\")\t\r\n\r\n\tfor line in fid:\r\n\t\tsl = line.split()\r\n\t\tN = int(sl[0])\r\n\t\tdt = float(sl[1])\r\n\t\tmem = int(sl[2])\r\n\r\n\t\tNs.append(N)\r\n\t\tdts.append(dt)\r\n\t\tmems.append(mem)\r\n\r\n\tfid.close()\r\n\r\n\r\nejed_Tiempo = [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 60, 600]\r\neje_Tiempo = [\"0.1 ms\",\"1 ms\", \"10 ms\", \"0.1 s\", \"1 s\", \"10 s\", \"1 min\", \"10 min\"]\r\n\r\nejedx_Uso = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000]\r\nejex_Uso = [\"10\",\"20\", \"50\", \"100\",\"200\", \"500\", \"1000\", \"2000\", \"5000\", \"10000\", \"20000\"]\r\n\r\nejedy_Uso = [1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11]\r\nejey_Uso = [\"1 KB\",\"10 KB\", \"100 KB\", \"1 MB\",\"10 MB\", \"100 MB\", \"1 GB\", \"10 GB\", \"\"]\r\n\r\n\r\nplt.figure()\r\n\r\n#gráfico tiempo transcurrido\r\n\r\nplt.subplot(2, 1, 1) #dos filas 1 columna\r\nplt.title(f\"Desempeño {titulo}\")\r\n\r\nM = 10\r\nfor i in range(M):\r\n\tplt.loglog(Ns[i*M:(i+1)*M], dts[i*M:(i+1)*M], marker=\"o\")\r\nplt.ylabel(\"Tiempo transcurrido (s)\")\r\n#plt.loglog(Ns, dts, marker=\"o\")\r\nplt.xticks(ejedx_Uso, [])\r\nplt.yticks(ejed_Tiempo, eje_Tiempo)\r\n\r\nplt.grid(b=True)\r\n\r\n#gráfico uso de memoria\r\n\r\nplt.subplot(2, 1, 2)\r\n\r\nplt.xlabel(\"Tamaño matriz N\")\r\nplt.ylabel(\"Uso memoria (s)\")\r\nplt.loglog(Ns, mems, marker=\"o\")\r\nplt.xticks(ejedx_Uso, ejex_Uso, rotation=45)\r\nplt.yticks(ejedy_Uso, ejey_Uso)\r\nplt.axhline(y=8*10**9 , linestyle=\"--\", color=\"k\")\r\n\r\nplt.grid(b=True)\r\n\r\n\r\n\r\nplt.show()\r\n\r\n\r\n" }, { "alpha_fraction": 0.49616172909736633, "alphanum_fraction": 0.5281473994255066, "avg_line_length": 25.871429443359375, "blob_id": "fc1953928a0c36be59fd1c9fa8ab0e2635ff3ed1", "content_id": "a27ba7fe7104bb7cb9741178deb9ea7ce7e27217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3909, "license_type": "no_license", "max_line_length": 208, "num_lines": 140, "path": "/Entrega 4/Solve/timing_solve.py", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "from time import perf_counter\r\nimport numpy as np\r\nfrom scipy import linalg\r\nfrom scipy.linalg import solve, inv\r\n\r\n# se crea la funcion de Laplacian Matrix\r\nfrom numpy import float32\r\ndef laplaciana(N, d= float32):\r\n A=-(np.eye(N,k=-1,dtype=d))+2*(np.eye(N,dtype=d))+-(np.eye(N, k=+1,dtype=d))\r\n return A\r\n\r\n# Tamaño de matrices\r\nNm= [2,5,10,12,15,20,30,40,45,50,55,60,75,100,125,160,200,250,300,350,500,600,800,900,1000,1500, 2000]\r\n\r\nNumcorridas=10\r\n\r\nnombres=[\"A_invB_inv.txt\",\"A_invB_spSolve.txt\",\"A_invB_spSolve_pos.txt\",\"A_invB_spSolve_symmetric.txt\",\"A_invB_spSolve_overwrite_a.txt\",\"A_invB_spSolve_pos_overwrite_b.txt\",\"A_invB_spSolve_pos_overwrite.txt\"]\r\narchivos=[open(nombre,\"w\") for nombre in nombres]\r\n\r\nfor N in Nm:\r\n dts=np.zeros((Numcorridas,len(archivos)))\r\n print (f\"N={N}\")\r\n \r\n\r\n #FORMA 1 \r\n for i in range (Numcorridas):\r\n # Se crea matriz laplaciana A y se crea un vector de unos:\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n \r\n # Se invierte la matriz A y se multiplica por el vector B:\r\n A_inv=linalg.inv(A)\r\n A_invB=A_inv@B\r\n t2=perf_counter()\r\n dt=t2-t1\r\n \r\n # agregar al archivo en primera columna\r\n dts[i][0]=dt\r\n \r\n \r\n \r\n #FORMA2: A_invB_spSolve(A,B)\r\n # Se crea matriz laplaciana A:\r\n A=laplaciana(N)\r\n # Se crea un vector de unos:\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n\r\n A_invB=linalg.solve(A,B)\r\n t2=perf_counter()\r\n dt=t2-t1\r\n \r\n # agregar al archivo en segunda columna\r\n dts[i][1]=dt\r\n \r\n \r\n #FORMA 3: A_invB_spSolve_pos(A,B)\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.solve(A,B,assume_a=\"pos\")\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en tercera columna:\r\n dts[i][2]=dt\r\n\r\n\r\n #FORMA 4: A_invB_spSolve_symmetric(A,B)\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.solve(A,B,assume_a=\"sym\")\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en cuarta columna:\r\n dts[i][3]=dt\r\n \r\n \r\n \r\n #FORMA5: A_invB_spSolve_overwrite_a(A,B)\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.solve(A,B,overwrite_a=True)\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en quinta columna:\r\n dts[i][4]=dt\r\n \r\n \r\n \r\n #FORMA6: A_invB_spSolve_pos_overwrite_b(A,B)\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.solve(A,B,overwrite_b=True)\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en sexta columna:\r\n dts[i][5]=dt\r\n \r\n\r\n\r\n #FORMA7: A_invB_spSolve_pos_overwrite(A,B)\r\n # Se crea matriz y vector\r\n A=laplaciana(N)\r\n B=np.ones(N)\r\n t1=perf_counter()\r\n A_invB2=linalg.solve(A,B,overwrite_a=True,overwrite_b=True)\r\n t2=perf_counter() \r\n dt=t2-t1\r\n \r\n # agregar al archivo en sexta columna:\r\n dts[i][6]=dt \r\n\r\n print (\"dts: \", dts)\r\n \r\n # Se calcula el promedio de los tiempos:\r\n dts_mean=[]\r\n for j in range(len(archivos)):\r\n dts_mean.append(np.mean(dts[:,j]))\r\n \r\n print(\"dts_mean: \", dts_mean)\r\n \r\n \r\n # Se agregan los resultados al archivo de texto:\r\n for j in range(len(archivos)):\r\n archivos[j].write(f\"{N} {dts_mean[j]}\\n\")\r\n archivos[j].flush()\r\n \r\n[archivo.close()for archivo in archivos]\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5116279125213623, "alphanum_fraction": 0.5581395626068115, "avg_line_length": 11.4375, "blob_id": "92e3c7f9fff9032243a5cf1178bd90e5e5e57133", "content_id": "216a12009a4ca73d51162f2d0c97a02d6196f871", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 216, "license_type": "no_license", "max_line_length": 29, "num_lines": 16, "path": "/Entrega 1/timing_matmul.py", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "\r\nfrom numpy import zeros\r\nfrom time import perf_counter\r\n\r\n#Tamaño\r\nN = 2000\r\nA = zeros((N, N))+1\r\nB = zeros((N, N))+2\r\n\r\n\r\nt1 = perf_counter()\r\nC = A@B\r\nt2 = perf_counter()\r\n\r\ndt = t2 - t1\r\n\r\nprint(f\"dt = {dt} s\")" }, { "alpha_fraction": 0.4515879154205322, "alphanum_fraction": 0.5476374626159668, "avg_line_length": 29.536584854125977, "blob_id": "a2e42c330e88fbed4b8f0ec3c0e5feb1c494bc32", "content_id": "3d2f18c5964be20dfee183e9affc593323411dfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1294, "license_type": "no_license", "max_line_length": 205, "num_lines": 41, "path": "/Entrega 4/Eigh/Double/Graficos_eigh_double.py", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n\r\ndef graficar(Nombres):\r\n # Eje y de tiempo transcurrido:\r\n yticks=[0.0001,0.001,0.01,0.1,1,10,60,600]\r\n yticks_text=[\"0.1 ms\",\"1ms\",\"10ms\",\"0.1 s\",\"1 s\",\"10 s\", \"1 min\", \"10 min\"]\r\n \r\n # Eje x tamaño de matriz:\r\n xticks=[10,20,50,100,200,500,1000,2000,5000,10000,20000]\r\n xticks_text=[\"10\",\"20\",\"50\",\"100\",\"200\",\"500\",\"1000\",\"2000\",\"5000\",\"10000\",\"20000\"]\r\n \r\n plt.figure()\r\n \r\n for nombre in nombres:\r\n data=np.loadtxt(nombre)\r\n Nm=data[:,0]\r\n dts=data[:,1]\r\n \r\n print(\"Ns: \", Nm)\r\n print (\"dts: \", dts)\r\n \r\n plt.loglog(Nm,dts.T,\"-o\", label=nombre)\r\n plt.ylabel(\"Tiempo transcurrido (s)\")\r\n plt.xlabel(\"Tamaño de matriz\")\r\n plt.grid(True)\r\n plt.title(\"Desempeño de Eigh double\")\r\n \r\n plt.xticks(xticks,xticks_text, rotation=45)\r\n plt.yticks(yticks, yticks_text)\r\n \r\n plt.tight_layout()\r\n plt.legend(loc=2,prop={'size': 9}) \r\n plt.show()\r\n \r\n \r\nnombres=[\"A_eigh_caso_1.txt\",\"A_eigh_caso_2_F.txt\",\"A_eigh_caso_3_T.txt\",\"A_eigh_caso_4_F.txt\",\"A_eigh_caso_5_T.txt\",\"A_eigh_caso_6_F.txt\",\"A_eigh_caso_7_T.txt\",\"A_eigh_caso_8_F.txt\",\"A_eigh_caso_9_T.txt\"]\r\n\r\ngraficar(nombres)" }, { "alpha_fraction": 0.4330143630504608, "alphanum_fraction": 0.4678742289543152, "avg_line_length": 21.80487823486328, "blob_id": "8b451c1eb3860427d77aedc4edcca68a91af5f0e", "content_id": "78d6fcb2ec21fe705645b4d1e212f4acb88e2a86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2926, "license_type": "no_license", "max_line_length": 152, "num_lines": 123, "path": "/Entrega 6/Complejidad_computacional.py", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy import linalg\r\nfrom time import perf_counter\r\nfrom numpy.linalg import solve\r\nfrom scipy.sparse import lil_matrix, csc_matrix\r\nfrom scipy.sparse.linalg import spsolve, inv\r\n\r\n\r\ndef laplaciana_llena(N,t=np.double):\r\n A=np.identity(N,t)*2\r\n for i in range(N):\r\n for j in range (N):\r\n if i+1==j:\r\n A[i,j]=-1\r\n if i-1==j:\r\n A[i,j]=-1\r\n return A\r\n \r\n\r\ndef laplaciana_dispersa(N,t=np.double):\r\n A=lil_matrix((N,N))\r\n for i in range(N):\r\n for j in range (N):\r\n if i==j:\r\n A[i,j]=2\r\n if i+1==j:\r\n A[i,j]=-1\r\n if i-1==j:\r\n A[i,j]=-1\r\n return csc_matrix(A)\r\n\r\n\r\nNm= [2,5,10,16,32,60,130,260,550,1050,2100,3000,4000]\r\n\r\nNumcorridas=4\r\n\r\n\r\nfor i in range(Numcorridas):\r\n nombres= [f\"Complejidad_SOLVE_llena{i}.txt\",f\"Complejidad_SOLVE_dispersa{i}.txt\",f\"Complejidad_INV_llena{i}.txt\",f\"Complejidad_INV_dispersa{i}.txt\"]\r\n archivos=[open(nombre,\"w\") for nombre in nombres]\r\n \r\n for N in Nm:\r\n ensamblaje = np.zeros((len(archivos)))\r\n solucion = np.zeros ((len(archivos)))\r\n \r\n print (f\"N={N}\")\r\n \r\n \r\n #CASO 1: SOLVE- Matriz llena\r\n\r\n t1=perf_counter()\r\n\r\n A= laplaciana_llena(N)\r\n b = np.ones(N,dtype=np.double)\r\n t2=perf_counter()\r\n \r\n C =solve(A,b)\r\n t3=perf_counter()\r\n\r\n dt1 = t2 - t1\r\n dt2 = t3- t2\r\n \r\n ensamblaje[0]= dt1\r\n solucion[0]= dt2\r\n \r\n \r\n #CASO 2: SOLVE-Matriz dispersa\r\n\r\n t1=perf_counter()\r\n\r\n A= laplaciana_dispersa(N)\r\n b = np.ones(N,dtype=np.double)\r\n t2=perf_counter()\r\n \r\n C=spsolve(A,b)\r\n # c=np.linalg.solve\r\n t3=perf_counter()\r\n\r\n dt1= t2 - t1\r\n dt2= t3 - t2\r\n \r\n ensamblaje[1]= dt1\r\n solucion[1]= dt2\r\n \r\n \r\n #CASO 3: INV- Matriz llena:\r\n \r\n t1=perf_counter()\r\n \r\n A= laplaciana_llena(N)\r\n t2=perf_counter()\r\n \r\n A_inv= linalg.inv(A)\r\n t3=perf_counter()\r\n\r\n dt1= t2 - t1\r\n dt2= t3 - t2\r\n \r\n ensamblaje[2]= dt1\r\n solucion[2]= dt2\r\n \r\n \r\n #CASO 4: INV- Matriz dispersa \r\n \r\n t1=perf_counter()\r\n A= laplaciana_dispersa(N)\r\n t2=perf_counter()\r\n \r\n A_inv= inv(A)\r\n t3=perf_counter()\r\n\r\n dt1= t2 - t1\r\n dt2= t3 - t2\r\n \r\n ensamblaje[3]= dt1\r\n solucion[3]= dt2\r\n \r\n # Se agregan los resultados al archivo de texto:\r\n for j in range(len(archivos)):\r\n archivos[j].write(f\"{N} {ensamblaje[j]} {solucion[j]}\\n\")\r\n archivos[j].flush()\r\n \r\n[archivo.close()for archivo in archivos]" }, { "alpha_fraction": 0.43641912937164307, "alphanum_fraction": 0.5686673521995544, "avg_line_length": 18.4375, "blob_id": "a43be338df04c585228fb9da226e2e74128b11c6", "content_id": "24ef2b34ac06c71d65c96abf8aa063070973afd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 984, "license_type": "no_license", "max_line_length": 181, "num_lines": 48, "path": "/Entrega 2/timing_matmul.py", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "from numpy import zeros, float16, float32, float64\r\nfrom time import perf_counter\r\nimport matplotlib.pylab as plt\r\nimport random\r\n\r\n#Tamaño\r\nN = 1000\r\n\r\nNs = [1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 16, 20, 24, 29, 35, 42, 51, 62, 75, 100, 190, 200, 240, 330, 360, 400, 490, 500, 596, 719, 868, 1048, 1264, 1526, 1842, 2222, 2682, 3237, 3906]\r\n\"\"\"for i in range(27):\r\n\tj = random.randrange(5, 2000)\r\n\tNs.append(j)\r\n\r\n\r\nNs.sort()\r\nNs[0] = random.randint(1, 10)\"\"\"\r\n\r\n\r\ndts = []\r\nmems =[]\r\n\r\nfor i in range(10):\r\n\tfid = open(f\"rendimiento{i}.txt\", \"w\")\r\n\r\n\tfor N in Ns:\r\n\t\t\r\n\r\n\t\tA = zeros((N, N), dtype=float16 )+1\r\n\r\n\t\tB = zeros((N, N))+2\r\n\r\n\r\n\t\tt1 = perf_counter()\r\n\t\tC = A@B\r\n\t\tt2 = perf_counter()\r\n\r\n\t\tuso_memoria_total = A.nbytes + B.nbytes + C.nbytes\r\n\r\n\t\tdt = t2 - t1\r\n\r\n\t\tdts.append(dt)\r\n\t\tmems.append(uso_memoria_total)\r\n\r\n\t\tprint(f\"N = {N} dt = {dt} s mem = {uso_memoria_total} bytes flops = {N**3/dt} flops/s\")\r\n\t\t\r\n\t\tfid.write(f\"{N} {dt} {uso_memoria_total}\\n\")\r\n\r\n\tfid.close()\r\n\r\n" }, { "alpha_fraction": 0.4736842215061188, "alphanum_fraction": 0.5513980388641357, "avg_line_length": 30.7702693939209, "blob_id": "5795c38722a7ee3caecb19d4caa9721d7f7568f9", "content_id": "55cb6c2dfd4a12dd6ddf42b83c5d5d86baedb951", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2433, "license_type": "no_license", "max_line_length": 134, "num_lines": 74, "path": "/Entrega 6/Solve/Gráfico_Solve_Llena.py", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef graficar(nombres):\r\n\r\n yticks=[0.0001,0.001,0.01,0.1,1,10,60,600]\r\n yticks_text=[\"0.1 ms\",\"1ms\",\"10ms\",\"0.1 s\",\"1 s\",\"10 s\", \"1 min\", \"10 min\"]\r\n \r\n \r\n xticks=[10,20,50,100,200,500,1000,2000,5000,10000,20000]\r\n xticks_text=[\"10\",\"20\",\"50\",\"100\",\"200\",\"500\",\"1000\",\"2000\",\"5000\",\"10000\",\"20000\"]\r\n xticks_text2=[]\r\n \r\n plt.figure() \r\n ensamblajey = []\r\n soluciony = []\r\n for nombre in nombres:\r\n data = np.loadtxt(nombre)\r\n \r\n Nm = data[:, 0]\r\n ensamblaje = data[:, 1]\r\n solucion = data[:, 2]\r\n \r\n \r\n ensamblaje_max = max(ensamblaje)\r\n Nmmax = max(Nm) \r\n solucion_max = max(solucion)\r\n\r\n ensamblajey= 0*Nm + ensamblaje_max\r\n soluciony=0*Nm + solucion_max\r\n \r\n plt.subplot(2,1,1)\r\n plt.title(\"Complejidad Solve Llena\")\r\n plt.loglog(Nm, ensamblaje.T, \"k-o\", alpha=0.4,markersize=3)\r\n plt.ylabel(\"Tiempo de ensamblado\")\r\n \r\n \r\n plt.subplot(2,1,2)\r\n plt.loglog(Nm, solucion.T, \"k-o\", alpha=0.4,markersize=3)\r\n plt.ylabel(\"Tiempo de solucion\")\r\n plt.xlabel(\"Tamaño matriz \")\r\n \r\n plt.subplot(2,1,1)\r\n plt.plot(Nm,ensamblajey, \"c--\") \r\n plt.loglog(Nm,Nm*(ensamblaje_max/Nmmax),\"y--\")\r\n plt.plot(Nm,Nm**2*(ensamblaje_max/Nmmax**2),\"g--\")\r\n plt.plot(Nm,Nm**3*(ensamblaje_max/Nmmax**3),\"r--\")\r\n plt.plot(Nm,Nm**4*(ensamblaje_max/Nmmax**4),\"m--\")\r\n plt.xticks(xticks,xticks_text2)\r\n plt.yticks(yticks, yticks_text)\r\n plt.ylim([0.000001, 600])\r\n plt.xlim([0, 20000])\r\n \r\n \r\n plt.subplot(2,1,2)\r\n plt.plot(Nm,soluciony, \"c--\",label=\"Constante\") \r\n plt.loglog(Nm,Nm*(solucion_max/Nmmax),\"y--\",label=\"O(N)\")\r\n plt.plot(Nm,Nm**2*(solucion_max/Nmmax**2),\"g--\",label=\"O(N^2)\")\r\n plt.plot(Nm,Nm**3*(solucion_max/Nmmax**3),\"r--\",label=\"O(N^3)\")\r\n plt.plot(Nm,Nm**4*(solucion_max/Nmmax**4),\"m--\",label=\"O(N^4)\")\r\n plt.xticks(xticks,xticks_text, rotation=45)\r\n plt.yticks(yticks, yticks_text)\r\n plt.ylim([0.000001, 600])\r\n plt.xlim([0, 20000])\r\n \r\n \r\n plt.tight_layout()\r\n plt.legend(loc=2,prop={'size': 8}) \r\n plt.show()\r\n \r\n \r\nnombres= [\"Complejidad_SOLVE_llena0.txt\",\"Complejidad_SOLVE_llena1.txt\",\"Complejidad_SOLVE_llena2.txt\",\"Complejidad_SOLVE_llena3.txt\"]\r\ngraficar(nombres) \r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6727800965309143, "alphanum_fraction": 0.7479668259620667, "avg_line_length": 56.65550231933594, "blob_id": "de73a811bcffc2c7f0648395a92714c0e305e462", "content_id": "692fdb4040d38e1f81b30ec1af4646ac44732233", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 12192, "license_type": "no_license", "max_line_length": 669, "num_lines": 209, "path": "/README.md", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "# MCOC2021-P0-1\n\n# Mi computador principal\n\n* Marca/modelo: Acer A315-41\n* Tipo: Notebook\n* Año adquisición: 2019\n* Procesador:\n * Marca/Modelo: AMD Ryzen 5 3500U\n * Velocidad Base: 2.1 GHz\n * Velocidad Máxima: 3.7 GHz\n * Numero de núcleos: 4 \n * Humero de hilos: 8\n * Arquitectura: AMD \"Zen\"\n * Set de instrucciones: XFR, FMA3, SSE 4.2, AVX2, SMT\n* Tamaño de las cachés del procesador\n * L1: 128KB\n * L2: 512KB\n * L3: 4MB\n* Memoria \n * Total: 4 GB\n * Tipo memoria: DDR4\n * Velocidad 2400 MHz\n * Numero de (SO)DIMM: 4\n* Tarjeta Gráfica\n * Marca / Modelo: AMD Radeon Vega 8\n * Memoria dedicada: No tiene\n * Resolución: 1920 x 1080\n* Disco 1: \n * Marca: Kingston\n * Tipo: SSD\n * Tamaño: 480 GB\n * Particiones: 2\n * Sistema de archivos: SATA, Rev\n\n \n* Dirección MAC de la tarjeta wifi: 3C-91-80-93-76-7D\n* Dirección IP (Interna, del router): 192.168.0.21\n* Dirección IP (Externa, del ISP): 192.168.0.1\n* Proveedor internet: VTR Banda Ancha S.A.\n\n# Desempeño MATMUL\n* ![image](https://user-images.githubusercontent.com/88336928/128522536-330b3860-e223-4055-9f2f-75fad39c89d4.png)\n\n* ¿Cómo difiere del gráfico del profesor/ayudante?\nSe puede ver que difiere el tiempo de corrida, a diferencia del profesor la diferencia de tiempo (dts) para ciertos valores varian en ciertos puntos, como por ejemplo, al comienzo. Sobre el uso de la memoria ambos graficos son similares.\n\n* ¿A qué se pueden deber las diferencias en cada corrida?\nSe deben a la diferencia en el tipo de procesador, ya que este tiene un intervalo de ejecucion de tiempo distinto.\n\n* El gráfico de uso de memoria es lineal con el tamaño de matriz, pero el de tiempo transcurrido no lo es ¿porqué puede ser?\nEsto ocurre ya qe cada vez que se ejecuta el código se realiza todo nuevamente, por lo que en cada iteración va a variar por algunos segundo. Esto puede depender de otros programas que esten abiertos al mismo tiempo. En cambio el gráfico de memoria es linea, ya que miestras mas grande la matriz más bytes se utilizaran. \n\n* ¿Qué versión de python está usando?\nversión 3.8\n\n* ¿Qué versión de numpy está usando?\nversión 1.20.1\n\n* Durante la ejecución de su código ¿se utiliza más de un procesador? Muestre una imagen (screenshot) de su uso de procesador durante alguna corrida para confirmar.\n![image](https://user-images.githubusercontent.com/88336928/128525360-e352486f-f0e7-4fe7-abbf-c5ee546bd0b3.png)\nSe utilizan los 4 procesadores y 8 hilos.\n\n# Desempeño de INVERSA\n* Para cada tipo de dato se puede ver en los gráficos:\n![Caso 1 double](https://user-images.githubusercontent.com/88336928/129896762-00aa975c-b884-4b8d-8cef-407cf3af7801.png)\n![Caso 1 single](https://user-images.githubusercontent.com/88336928/129896813-b6cbae76-187a-4b11-99d0-710676f398e1.png)\n![Caso 2 double](https://user-images.githubusercontent.com/88336928/129896839-9dcdf194-5f48-4e94-9843-53ce633d7a29.png)\n![Caso 2 half](https://user-images.githubusercontent.com/88336928/129896861-2ba295c5-889e-4604-8d5d-c7313f8b86a2.png)\n![Caso 2 longdouble](https://user-images.githubusercontent.com/88336928/129896886-86e01d75-bc05-46b6-b010-38c40146fb4f.png)\n![Caso 2 single](https://user-images.githubusercontent.com/88336928/129896906-8c5ea3d3-1eff-4cb4-b176-f5e70db521af.png)\n![Caso 3 double](https://user-images.githubusercontent.com/88336928/129896934-c2c50294-7b5c-45d0-a3bc-ae5eabcc1fce.png)\n![Figure_1](https://user-images.githubusercontent.com/88336928/129896957-26168bd6-b2e4-459b-ae71-dc24c89108f9.png)\n![Caso 3 Longdouble](https://user-images.githubusercontent.com/88336928/129896977-421f9a1b-9b8b-4d0c-95b5-6b5292313bbc.png)\n![Caso 3 single](https://user-images.githubusercontent.com/88336928/129896995-a42fdac8-f09a-4280-be6e-cdd2a509cdee.png)\n\nPara el caso 1 half y longdouble, no se logra ejecutar.\nAdemás se puede ver que se utiliza más memoria a medida que el tamaño de la matriz aumenta.\n\n* ¿Qué algoritmo de inversión cree que utiliza cada método (ver wiki)?\nSe utiliza el algoritmo de Laplacian Matrix, este es un algoritmo con complejidad factorial, lo cual hace que resolver el sistema mediante este método no sea la mejor opción, debido a su demora. Matriz donde las columnas representan a las aristas del grafo y las filas a los vértices. El elemento (i,j) representa que la arista i incide en el vértice j. La diagonal esta compuesta por 2, y las diagonales adyacentes superior e inferior a la central estan compuestas por -1. Además se utiliza la Invertible Matrix, para el caso 1 se utiliza la libreria de Numpy y en el caso 2 y 3 se utiliza la libreria Scipy. En ambos casos se resuelve un sistema lineal de ecuaciones.\n\n* ¿Como incide el paralelismo y la estructura de caché de su procesador en el desempeño en cada caso? Justifique su comentario en base al uso de procesadores y memoria observado durante las corridas. \nEl paralelismo es una función que realiza el procesador para ejecutar varias tareas al mismo tiempo, realizando varios cálculos simultáneamente (paralelismo de datos). Los sistemas informáticos suelen hacer uso de cachés, ubicados cerca del procesador que almacenan las copias temporales de los valores de la memoria. Como se mencionó anteriormente el caso que presentó menos desempeño utilizando otras aplicaciones o programas mientras se corria el código fue el caso 1.\n\n\n# Desempeño de Solve y Eigh\n* Haga un comentario completo respecto de todo lo que ve en términos de desempeño en cada problema. \n-\tDesempeño timing solve: para este caso se logo generar una matriz máxima de 2000x2000, no se ogra llegar a una de 10000x10000 ya que el rendimiento de mi computador no permite que esto ocurra en menos de 2 minutos. \n-\tDesempeño timing eigh float y double: para este caso se logo generar una matriz máxima de 800x800, no se ogra llegar a una de 10000x10000. Se puede ver que este es más rápido que el desempeño de eigh double.\n\n* ¿Como es la variabilidad del tiempo de ejecución para cada algoritmo? \n-\tTiene un incremento exponencial a medida que aumentan las matrices. \n* ¿Qué algoritmo gana (en promedio) en cada caso? \n-\tDesempeño timing solve: A_invB_soSolve_pos, la cual es la matriz definida positiva.\n-\tDesempeño timing eigh float: A_eigh_caso_5_T la cual tiene overwrite_a=True y driver=\"evd\"\n-\tDesempeño timing eigh double: : A_eigh_caso_7_T la cual tiene overwrite_a=True y driver=\"evr\n\n* ¿Depende del tamaño de la matriz? \n-\tSi depende del tamaño de la matriz, ya que a mayor matriz el desempeño será “peor”.\n* ¿A que se puede deber la superioridad de cada opción? \n-\tSe debe a la cantidad de acciones que se le pide a cada opción, las cuales difieren del set up por default. \n¿Su computador usa más de un proceso por cada corrida? \n-\tSi, utiliza los cuatro procesadores.\n-\t![image](https://user-images.githubusercontent.com/88336928/130275105-ead7e442-9320-4eda-9b71-bb3f1458d69a.png)\n \n* ¿Que hay del uso de memoria (como crece)? \n- A medida que los programas corren, el uso de memoria se mantiene constante.\n![image](https://user-images.githubusercontent.com/88336928/130275128-7e86d41a-9eeb-4f3a-b8ed-b91403b9c865.png)\n\n# Matrices dispersas y complejidad computacional\n* Se realizaron 10 corridas con matrices crecientes hasta un N=4500, se observo que a medida que disminuinan las corridas, el tamaño de matriz podia ser mayor (para 280 s de corrida), pero se decidio mantener la cantidad de corridas que fueron indicadas en las entregas anteriores.\n* Caso 1: Complejidad algorítmica de MATMUL\nSe genero el gráfico para la multiplicación de matrices Laplacianas utilizando el formato matriz llena con datos tipo double:\n![Llena](https://user-images.githubusercontent.com/88336928/130873116-feed7b4a-5d6a-4569-9fc9-5bc1bab0dfd5.png)\nLuego se repitio el mismo procedimiento para matrices dispersas:\n![Dispersa](https://user-images.githubusercontent.com/88336928/130873857-14301980-19a1-4ec9-b3ec-750f3ebc48dd.png)\nSe puede observar que el tiempo de ensamblado es similar, pero se pueden notar diferencias en el tiempo de solucion. Para mayor tamaño de matriz la llena demora más de un segundo, en cambio la dispersa demora menos de 0.1 s.\n* Código de ensamblaje:\n- Matriz llena:\n```\nfrom scipy.sparse import lil_matrix, csc_matrix\nfrom scipy.sparse.linalg import spsolve, inv\nimport numpy as np\n\n def laplaciana_llena(N,t=np.double):\n A=np.identity(N,t)*2\n for i in range(N):\n for j in range (N):\n if i+1==j:\n A[i,j]=-1\n if i-1==j:\n A[i,j]=-1\n return A \n``` \n- Matriz dispersa:\n```\ndef laplaciana_dispersa(N,t=np.double):\n A=lil_matrix((N,N))\n for i in range(N):\n for j in range (N):\n if i==j:\n A[i,j]=2\n if i+1==j:\n A[i,j]=-1\n if i-1==j:\n A[i,j]=-1\n return csc_matrix(A) \n ```\n\n# Matrices dispersas y complejidad computacional\n\n![Inv_Dispersa](https://user-images.githubusercontent.com/88336928/132034277-df108a87-119d-4d94-8c8c-c5cc3aa63aad.png)\n![Inv_Llena](https://user-images.githubusercontent.com/88336928/132034283-944d76e6-e636-4e78-9f59-242c86492777.png)\n![Solve_Dispersa](https://user-images.githubusercontent.com/88336928/132034297-59e05f8d-cfbf-42ec-b979-ead8cc021f14.png)\n![Solve_Llena](https://user-images.githubusercontent.com/88336928/132034300-c1c0f10f-f417-4ba3-bb93-1f1936a5fa69.png)\n\n* Comente las diferencias que ve en el comportamiento de los algoritmos en el caso de matrices llenas y dispersas.\n \n Caso Solve: se puede ver como el tiempo de ensamblado es similar para ambos casos, en cambio el tiempo se solución tiene mayores variaciones. Además, para el caso de matriz llena, se ven discontinuidades a lo largo de las corridas.\n\n Caso Inv: se puede ver como el tiempo de ensamblado varia, la matriz dispersa se demora más que la llena. El tiempo se solución tiene también tiene variaciones, donde se puede ver un salto en el caso de matriz llena.\n\n* ¿Cual parece la complejidad asintótica (para N→∞N→∞) para el ensamblado y solución en ambos casos y porqué?\n \n Caso Solv: Para el tiempo de ensamblado se puede ver un comportamiento de complejidad asintótica correspondiente a O(N2), para ambos casos. Para el tiempo de solución se puede ver un comportamiento de complejidad asintótica correspondiente a O(N) para la matriz dispersa y O(N2) para la matriz llena.\n\n Caso Inv: Para el tiempo de ensamblado se puede ver un comportamiento de complejidad asintótica correspondiente a O(N2), para ambos casos. Para el tiempo de solución se puede ver un comportamiento de complejidad asintótica correspondiente a O(N) para la matriz dispersa y O(N2) para la matriz llena.\n\n* ¿Como afecta el tamaño de las matrices al comportamiento aparente?\n \n Cuando se tiene un N menor el tiempo de ensamblado y de solución es mayor, esto se debe al paralelismo, ya que el computador esta haciendo muchas acciones en un inicio. Además, el tiempo se solución se muestra variable a excepción de la matriz inversa dispersa. \n \n* ¿Qué tan estables son las corridas (se parecen todas entre si siempre, nunca, en un rango)?\n El tiempo de ensamblado tiene corridas más estables, en cambio el tiempo de solución se muestra muy variable, por lo tanto, es más inestable para ambos casos.\n \n * Código de ensamblaje:\n\nMatriz llena:\n```\nfrom scipy.sparse import lil_matrix, csc_matrix\nfrom scipy.sparse.linalg import spsolve, inv\nimport numpy as np\n\n def laplaciana_llena(N,t=np.double):\n A=np.identity(N,t)*2\n for i in range(N):\n for j in range (N):\n if i+1==j:\n A[i,j]=-1\n if i-1==j:\n A[i,j]=-1\n return A \n``` \n- Matriz dispersa:\n```\ndef laplaciana_dispersa(N,t=np.double):\n A=lil_matrix((N,N))\n for i in range(N):\n for j in range (N):\n if i==j:\n A[i,j]=2\n if i+1==j:\n A[i,j]=-1\n if i-1==j:\n A[i,j]=-1\n return csc_matrix(A) \n ```\n El codigo de matrices llenas es mucho más comodo que el de dispersas. El segundo caso se demoro menos, pero no fue el más optimo al momento de ensamblar la matriz.\n" }, { "alpha_fraction": 0.45969125628471375, "alphanum_fraction": 0.5634648203849792, "avg_line_length": 18.068965911865234, "blob_id": "a2c445bcad1b966c2a0b717bd1f2e7bbef596a87", "content_id": "8c6024a7041630fee5e8b44b2935e4deccf21d15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1167, "license_type": "no_license", "max_line_length": 84, "num_lines": 58, "path": "/Entrega 3/Casos 3/half/timing_inv_caso3_half.py", "repo_name": "scastrain/MCOC2021-P0-1", "src_encoding": "UTF-8", "text": "from numpy import zeros, float16, float32, float64\r\nfrom time import perf_counter\r\nimport matplotlib.pylab as plt\r\nimport random\r\nfrom scipy import linalg\r\nimport numpy as np\r\n\r\ndef laplaciana_half(N, dtype=np.half):\r\n\tA = zeros((N, N), dtype=dtype)\r\n\r\n\tfor i in range(N):\r\n\t\tA[i,i] = 2\r\n\t\tfor j in range(max(0,i-2),i):\r\n\t\t\tif abs(i-j) == 1:\r\n\t\t\t\tA[i,j] = -1\r\n\t\t\t\tA[j,i] = -1\r\n\r\n\treturn A\r\n\r\n#Tamaño\r\n\r\ntitulo = \"caso 3 half\"\r\na = float16\r\n\r\nNs = [1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 16, 20, 24, 29, 35, 42, 51, \r\n62, 75, 100, 190, 200, 240, 330, 360, 400, 490, 500, 540, 596, 719, 868, 900, 1048, \r\n1264, 1526, 1842, 2222, 2500]\r\n\r\ndts = []\r\nmems =[]\r\n\r\nfor i in range(10):\r\n\tfid = open(f\"rendimiento {titulo}{i}.txt\", \"w\")\r\n\r\n\tfor N in Ns:\r\n\t\t\r\n\t\t\r\n\t\tA = laplaciana_half(N, dtype=a)\r\n\t\tt1 = perf_counter()\r\n\t\t\r\n\t\tAm1 = linalg.inv(A, overwrite_a=True)\r\n\r\n\t\tt2 = perf_counter()\r\n\t\t\r\n\t\tdt = t2 - t1\r\n\t\t#print(f\"{dt}\")\r\n\t\t#exit(0)\r\n\r\n\t\tbytes_total = A.nbytes + Am1.nbytes\r\n\r\n\t\tdts.append(dt)\r\n\t\tmems.append(bytes_total)\r\n\r\n\t\tprint(f\"N = {N} dt = {dt} s mem = {bytes_total} bytes flops = {N**3/dt} flops/s\")\r\n\t\t\r\n\t\tfid.write(f\"{N} {dt} {bytes_total}\\n\")\r\n\r\n\tfid.close()\r\n\r\n" } ]
11
andykit/add-mask-and-goggle
https://github.com/andykit/add-mask-and-goggle
e9900080432136317d2e0e82d79d4bf666edcddb
e3a80fb6d73110114c278867756c196f7063ebc3
f8d7f6b50c79720cade445d4c1227fe0051de759
refs/heads/master
2022-08-21T13:18:50.098206
2020-05-27T16:50:05
2020-05-27T16:50:05
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5431527495384216, "alphanum_fraction": 0.5812687277793884, "avg_line_length": 37.27083206176758, "blob_id": "1ba9284a695af839894b306e6683b67ebf7aa1ce", "content_id": "80ad3b0748beb6525efa88d4ae420f15065d1ed3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3673, "license_type": "permissive", "max_line_length": 84, "num_lines": 96, "path": "/add.py", "repo_name": "andykit/add-mask-and-goggle", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nName: add.py\nAuthor: Evi1ran\nDate Created: January 24, 2020\nDescription: None\n'''\n\n# built-in imports\nimport cv2\nimport dlib\nimport numpy as np\nimport os\n\ndef add(path, filename, mode, isGoggle):\n img1 = cv2.imread(path)\n if (isGoggle):\n x_min, x_max, y_min, y_max, size = get_eye(img1)\n img2 = cv2.imread('masks/goggle.png', cv2.IMREAD_UNCHANGED)\n img2 = cv2.resize(img2,size)\n alpha_channel = img2[:, :, 3]\n _, mask = cv2.threshold(alpha_channel, 220, 255, cv2.THRESH_BINARY)\n color = img2[:, :, :3]\n img2 = cv2.bitwise_not(cv2.bitwise_not(color, mask=mask))\n rows,cols,channels = img2.shape\n roi = img1[y_min: y_min + rows, x_min:x_min + cols]\n img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(img2gray, 254, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n img1_bg = cv2.bitwise_and(roi,roi,mask = mask)\n img2_fg = cv2.bitwise_and(img2,img2,mask = mask_inv)\n dst = cv2.add(img1_bg,img2_fg)\n img1[y_min: y_min + rows, x_min:x_min + cols] = dst\n x_min, x_max, y_min, y_max, size = get_mouth(img1)\n img2 = cv2.imread('masks/mask' + str(mode) + '.png', cv2.IMREAD_UNCHANGED) \n img2 = cv2.resize(img2,size)\n alpha_channel = img2[:, :, 3]\n _, mask = cv2.threshold(alpha_channel, 220, 255, cv2.THRESH_BINARY)\n color = img2[:, :, :3]\n img2 = cv2.bitwise_not(cv2.bitwise_not(color, mask=mask))\n rows,cols,channels = img2.shape\n roi = img1[y_min: y_min + rows, x_min:x_min + cols]\n img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(img2gray, 254, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n img1_bg = cv2.bitwise_and(roi,roi,mask = mask)\n img2_fg = cv2.bitwise_and(img2,img2,mask = mask_inv)\n dst = cv2.add(img1_bg,img2_fg)\n img1[y_min: y_min + rows, x_min:x_min + cols] = dst\n img_processed = \"static/output/\" + filename\n cv2.imwrite(img_processed, img1)\n output = img_processed\n return output\n\ndef get_mouth(img):\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor('models/shape_predictor_68_face_landmarks.dat')\n faces = detector(img_gray, 0)\n for k, d in enumerate(faces):\n x = []\n y = []\n height = d.bottom() - d.top()\n width = d.right() - d.left()\n shape = predictor(img_gray, d)\n for i in range(48, 68):\n x.append(shape.part(i).x)\n y.append(shape.part(i).y)\n y_max = (int)(max(y) + height / 3)\n y_min = (int)(min(y) - height / 3)\n x_max = (int)(max(x) + width / 3)\n x_min = (int)(min(x) - width / 3)\n size = ((x_max-x_min),(y_max-y_min))\n return x_min, x_max, y_min, y_max, size\n\ndef get_eye(img):\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor('models/shape_predictor_68_face_landmarks.dat')\n faces = detector(img_gray, 0)\n for k, d in enumerate(faces):\n x = []\n y = []\n height = d.bottom() - d.top()\n width = d.right() - d.left()\n shape = predictor(img_gray, d)\n for i in range(36, 48):\n x.append(shape.part(i).x)\n y.append(shape.part(i).y)\n y_max = (int)(max(y) + height / 3)\n y_min = (int)(min(y) - height / 3)\n x_max = (int)(max(x) + width / 3)\n x_min = (int)(min(x) - width / 3)\n size = ((x_max-x_min),(y_max-y_min))\n return x_min, x_max, y_min, y_max, size" }, { "alpha_fraction": 0.6553459167480469, "alphanum_fraction": 0.6893081665039062, "avg_line_length": 24.238094329833984, "blob_id": "b4f11b1d431a8300839ab5c71e2af50b4f833a22", "content_id": "56eff0bddc81d5c70e0bb2821636c3c5e7a2c1d6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2058, "license_type": "permissive", "max_line_length": 330, "num_lines": 63, "path": "/README.md", "repo_name": "andykit/add-mask-and-goggle", "src_encoding": "UTF-8", "text": "# 给头像添加口罩及护目镜\n\n[![Python 3.7](https://img.shields.io/badge/python-3.7-blue.svg)](https://www.python.org/) [![Flask](https://img.shields.io/badge/flask-v1.1.1-blue)](https://pypi.org/project/Flask/) [![License](https://img.shields.io/github/license/Evilran/add-mask-and-goggle)](https://github.com/Evilran/add-mask-and-goggle/blob/master/LICENSE)\n\n珍爱生命,为预防2020新型冠狀病毒肺炎,请积极佩戴口罩及护目镜。\n\n此项目使用人脸识别自动给头像添加口罩及护目镜,仅为呼吁大家积极佩戴口罩及护目镜,为武汉及奋斗在第一线的医护人员加油!\n\n依赖🐍\n------------------------------------------------------------------\n\n- numpy==1.17.4\n- Flask>=1.0.0\n- requests==2.22.0\n- opencv-python==4.0.0.21\n- dlib==19.17.99\n\n用法😷\n---\n\n仅需一个命令即可简单地运行Web服务器:\n\n```\n$ python3 server.py\n```\n\n然后访问: **127.0.0.1:5000** (端口 5000).\n\n这里支持两种模式,一种是输入URL地址,另外一种是直接上传图片:\n\n![image](https://github.com/Evilran/add-mask-and-goggle/blob/master/images/url.png)\n\n![image](https://github.com/Evilran/add-mask-and-goggle/blob/master/images/upload.png)\n\n\n\n目前口罩支持以下几种类型:\n\n![image](https://github.com/Evilran/add-mask-and-goggle/blob/master/images/mask.png)\n\n## 举个栗子🌰\n\n***原图:***\n\n![image](https://github.com/Evilran/add-mask-and-goggle/blob/master/test/grace_hopper.bmp)\n\n***添加口罩及护目镜:***\n\n![image](https://github.com/Evilran/add-mask-and-goggle/blob/master/images/grace_hopper.bmp)\n\n***原图:***\n\n![image](https://github.com/Evilran/add-mask-and-goggle/blob/master/test/i064qa-mn.jpg)\n\n***添加口罩:***\n\n![image](https://github.com/Evilran/add-mask-and-goggle/blob/master/images/i064qa-mn.jpg)\n\n## 感谢🙏\n\n感谢奋斗在第一线的医护人员,感谢春运中的逆行者!\n\n口罩及护目镜素材来自:[[maskon-wuhan](https://github.com/izumiwing/maskon-wuhan)]@[izumiwing](https://github.com/izumiwing)\n" }, { "alpha_fraction": 0.40963855385780334, "alphanum_fraction": 0.6746987700462341, "avg_line_length": 15.600000381469727, "blob_id": "6314432081f7b2e79a09f47274059d14482ff3cb", "content_id": "23307bebd879f61cc584c5c8a1a17c3f126a40f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 83, "license_type": "permissive", "max_line_length": 23, "num_lines": 5, "path": "/requirements.txt", "repo_name": "andykit/add-mask-and-goggle", "src_encoding": "UTF-8", "text": "numpy==1.17.4\nFlask>=1.0.0\nrequests==2.22.0\nopencv-python==4.0.0.21\ndlib==19.17.99\n" }, { "alpha_fraction": 0.5558781027793884, "alphanum_fraction": 0.562046468257904, "avg_line_length": 31.821428298950195, "blob_id": "595ea6ea339ec7e8d669d138e343c672596be9c9", "content_id": "1b57f2d48bc271181137c6268cdb798d0a58f938", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2828, "license_type": "permissive", "max_line_length": 82, "num_lines": 84, "path": "/server.py", "repo_name": "andykit/add-mask-and-goggle", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nName: server.py\nAuthor: Evi1ran\nDate Created: November 06, 2019\nDescription: None\n'''\n\n# built-in imports\nimport os\nimport re\nimport requests\n\n# third-party imports\nfrom flask import Flask\nfrom flask import request\nfrom flask import render_template\nfrom datetime import timedelta\nfrom add import add\n\napp = Flask(__name__)\n# Cancel image caching\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)\nALLOWED_EXTENSIONS = set(['bmp', 'png', 'jpg', 'jpeg'])\nUPLOAD_FOLDER=r'./cache/'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html')\n\[email protected]('/url', methods=['GET', 'POST'])\ndef url():\n if request.method == 'POST':\n url = request.form['url']\n mode = (int)(request.form['mask'])\n isGoggle = request.form.get('goggle')\n if re.match(r'^https?:/{2}\\w.+$', url): \n if allowed_file(url):\n filename = url.split('/')[-1]\n path = os.path.join(app.config['UPLOAD_FOLDER'], filename) \n r = requests.get(url)\n if r.status_code == 200:\n c = r.content\n if not c.startswith(b'<!DOCTYPE html>'):\n with open(path, 'wb') as f:\n f.write(c)\n output = add(path, filename, mode, isGoggle)\n return render_template('index.html', output = output)\n else:\n return render_template('index.html', alert = 'URL地址无法识别!')\n else:\n return render_template('index.html', alert = 'URL地址不能访问!')\n else:\n return render_template('index.html', alert = 'URL地址不是图片!')\n else:\n return render_template('index.html', alert = 'URL格式错误!')\n\n else:\n return render_template('index.html')\n\[email protected]('/add', methods=['GET', 'POST'])\ndef search():\n if request.method == 'POST':\n file = request.files['image']\n mode = (int)(request.form['mask'])\n isGoggle = request.form.get('goggle')\n if file and allowed_file(file.filename):\n path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)\n file.save(path)\n output = add(path, file.filename, mode, isGoggle)\n return render_template('index.html', output = output)\n else:\n return render_template('index.html', alert = '文件类型必须是图片!')\n else:\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run()" } ]
4
HengyueLi/Fortran_Install
https://github.com/HengyueLi/Fortran_Install
aa726a89b0d053dda81a32de7c65733842358d25
8a84d561afd204eecc6fa97767ed91fbf98cdbeb
d3ce3658a996a69b419e47dfa6a68e204563458a
refs/heads/master
2021-04-15T06:59:49.426151
2018-03-30T03:29:15
2018-03-30T03:29:15
126,914,276
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7509578466415405, "alphanum_fraction": 0.7593869566917419, "avg_line_length": 58.318180084228516, "blob_id": "9d86203d0abb035f56b3a2112dbc0c576b351124", "content_id": "1cd6cbc6d41e54874f686cee91fd07f30f21ecda", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2610, "license_type": "permissive", "max_line_length": 535, "num_lines": 44, "path": "/README.md", "repo_name": "HengyueLi/Fortran_Install", "src_encoding": "UTF-8", "text": "# License\nPlease ignore all the License information IN THE SOURCE CODE.\n# Documents\nRecently the documents are not well prepared. The module can do much more than what has been shown in the example code. If one has some problem with how to use it, please leave messages.\n# Preparation --python3.\nThe code used the syntax of Fortran(2003). Therefore an newly version of fortran compiler is required.\nEven though there is only one f90 file in the project, there are (maybe) many dependencies. One should follow steps. This code, in principle, can be used in any OS. But here I write a python script to make the code become easier to use. So a python3 intepretation should have been installed. This script, in principle, can only run on a Unix-like OS. If you use windows, try to make command tool to be used ( basically to make sure the command like:\n\n gfortran main.f90 -o run.exe\n can be called.). If you do not use the script, you can abstract all the f90 file in /Depe (recursively).\n\n# Structure of the code.\nIn the project one may see 5 folders: /Depe, /Incl, /Mods, /Ofil and /Install. When download the project, one need to make sure all the dependencies in /Depe are also downloaded recursively. Otherwise it maybe empty. Especially, there are always some in /Install. If it is empty, one have not downloaded it correctly. The problem may be cuased by the submodule in git. Consider use something like 'git clone --recursive YOUR-GIT-REPO-URL' or other software to download it. Or search 'How to git clone including submodules' on internet.\n\n# Compile\nDirectly run\n\n python3 compile.py\n\n in command line where compile.py is in /Install. In such case, a defult choice of compiler (ifort) is assumed. If you want to use other compiler, say gfortran:\n\n python3 compile.py -c gfortran\n\nOne can check detailes of other options by\n\n python3 compile.py -h\n\n\n\n# How to use.\nAfter the compiling, one can found many '.o' files in /Ofil and '.mod' in /Mods. Only this object files can be used. For instance one may use the lib in his own code 'main.f90', then compile the code by command( ifort for example):\n\n ifort /Ofil/*.o -IMods main.f90 -o run.exe\n\nIn case some modules are lapack dependent, use:\n\n ifort /Ofil/*.o -IMods main.f90 -o run.exe -llapack\n\nOne should notice that the chosen compiler should be the same as the previous one in [Compile](https://github.com/HengyueLi/Fortran_Install#compile). The default chosen compiler is ifort as mentioned.\n\n# Obligation (will update):\nPeople whose research is benefited from this code would be asked to CONSIDER to cite the papers below:\n\n*\n" }, { "alpha_fraction": 0.5519769191741943, "alphanum_fraction": 0.5528653860092163, "avg_line_length": 28.233766555786133, "blob_id": "a522bb84e035dcfe4733d96f6e2d04ef9771a45c", "content_id": "931c7590b1eb80d95d1ee5ed573e83084659bb1f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4502, "license_type": "permissive", "max_line_length": 102, "num_lines": 154, "path": "/compile.py", "repo_name": "HengyueLi/Fortran_Install", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n\n\n\nimport argparse,sys,os\n#===============================================================================\n# get interpreter\npython = sys.executable\n#===============================================================================\n\n\n\n\n#===============================================================================\n# get arguments\n#===============================================================================\nparser = argparse.ArgumentParser()\n#--------add options\nparser.add_argument(\"--compiler\", \"-c\", help=\"Set compiler, defualt is ifort\" )\nparser.add_argument(\"--flag\" , \"-f\", help=\"Set flag to compiler\" )\nparser.add_argument(\"--clib\" , \"-l\", help=\"add lib when compilering\",nargs='*')\n\n# read arguments from the command line\nargs = parser.parse_args()\n#----------------initiate arguments---------------\nif args.compiler:\n print(\"Set compiler : %s\" % args.compiler)\n Compiler = args.compiler\nelse:\n #print(\"Set compiler : ifort (defult) \")\n Compiler = 'ifort'\n\nif args.flag:\n print(\"Set flag : %s\" % args.flag)\n Cflag = args.flag\nelse:\n Cflag = \" \"\n\nif args.clib:\n CLib = \" \".join([\" -l\"+jc for jc in args.clib])\nelse:\n CLib = \" \"\n\n\n\n\n\n\n\n\nCompiler = Compiler\nComFlag = Cflag\nPythonArg = \" \".join(sys.argv[1:])\n\n\n\nclass GetListDirectory():\n def __init__(self,dpath):\n self.path = dpath\n\n def GetlistdirName(self):\n return os.listdir(self.path)\n\n def GetlistdirNameNonehidden(self):\n l = self.GetlistdirName()\n r = []\n for jc in l:\n if not jc.startswith('.'):\n r.append(jc)\n return r\n\n def GetListdirNameBySuffix(self,suffix):\n r = []\n l = self.GetlistdirName()\n for jc in l:\n if jc.endswith(suffix):\n r.append(jc)\n return r\n\n def GetFullPath(self,NameList):\n r = []\n for jc in NameList:\n r.append(os.path.join(self.path, jc))\n return r\n\n\ndef MoveFileBySuffix(SourceFolder,DestinyFolder,suffix):\n sources = GetListDirectory(SourceFolder).GetListdirNameBySuffix(suffix)\n for jc in sources:\n os.rename( os.path.join(SourceFolder,jc) , os.path.join(DestinyFolder,jc) )\n\n\n\nFilename = os.path.basename(__file__)\nFilePath = os.path.abspath(__file__)\nInstPath = os.path.dirname(FilePath)\nProjPath = os.path.dirname(InstPath)\n\nModsPath = os.path.join(ProjPath,'Mods')\nOfilPath = os.path.join(ProjPath,'Ofil')\nInclPath = os.path.join(ProjPath,'Incl')\nDepePath = os.path.join(ProjPath,'Depe')\n\n\n\n#--------------------------------\n# pre-compile dependence\n\n# get list of subproject\nSubProjList = GetListDirectory(DepePath).GetlistdirNameNonehidden()\nfor pro in SubProjList:\n SubProPath = os.path.join(DepePath,pro)\n pyfile = os.path.join(SubProPath,'Install',Filename)\n #-----------------------------------------------------------\n # run python file in the sub project.\n os.system( python + \" \" + pyfile +\" \" + PythonArg )\n #-----------------------------------------------------------\n # move ofile to current/Ofiles folders\n SubOfil = os.path.join( SubProPath , 'Ofil' )\n MoveFileBySuffix(SourceFolder=SubOfil,DestinyFolder=OfilPath,suffix='.o')\n #-----------------------------------------------------------\n # move all '.mod' files to main project/Mods\n SubMod = os.path.join(SubProPath,'Mods')\n MoveFileBySuffix(SourceFolder=SubMod,DestinyFolder=ModsPath,suffix='.mod')\n\n\n\n\n#--------------------------------\n# scan all o-files\n# f = GetListDirectory(ModsPath)\n# DependentOfiles = f.GetFullPath( f.GetListdirNameBySuffix('.o') )\n# DependentOfilesString = \" \".join(DependentOfiles)\nf = GetListDirectory(OfilPath)\nDependentOfiles = f.GetFullPath( f.GetListdirNameBySuffix('.o') )\nDependentOfilesString = \" \".join(DependentOfiles)\n\n#--------------------------------\n# scan all include files\nf = GetListDirectory(InclPath)\nIflag = \" \".join( [\" -I\" + jc + \" \" for jc in f.GetFullPath( f.GetlistdirNameNonehidden() )] )\n\n#--------------------------------\n# compile\nos.chdir(ProjPath)\nCompileCommand = Compiler +\" -c \"+ ComFlag + \" -IMods \" + Iflag +DependentOfilesString+\" *.f90 \"+CLib\nos.system(CompileCommand)\n#--------------------------------\n# move mods into folder Mods\nMoveFileBySuffix(SourceFolder=ProjPath,DestinyFolder=ModsPath,suffix='.mod')\n#-------------------------------\n# move wanted ofile into Ofil\nMoveFileBySuffix(SourceFolder=ProjPath,DestinyFolder=OfilPath,suffix='.o')\n" } ]
2
CTimmerman/CalculAPI
https://github.com/CTimmerman/CalculAPI
623170309ba7b0ab0c7e83a211ea4d98b247c467
37d5adcca8a7c569f8fdc96634b966d9f8745627
737dfaa19e08077b6035293098838a0a98542411
refs/heads/main
2023-06-15T16:25:24.994796
2021-07-12T11:15:00
2021-07-12T11:15:00
379,378,031
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6772152185440063, "alphanum_fraction": 0.7278481125831604, "avg_line_length": 21.571428298950195, "blob_id": "3701bfbdcb5c6ea8b6a4fc97a1c5d04760c7b68d", "content_id": "798996dd698a8e3f9a0d28effb8fc666cc73daec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 158, "license_type": "permissive", "max_line_length": 46, "num_lines": 7, "path": "/Dockerfile", "repo_name": "CTimmerman/CalculAPI", "src_encoding": "UTF-8", "text": "FROM alpine\nRUN apk add --no-cache python3 py3-pip\nWORKDIR /app\nCOPY . .\nRUN python3 -m pip install -r requirements.txt\nEXPOSE 8888\nCMD [\"python3\", \"app.py\"]\n" }, { "alpha_fraction": 0.5724681615829468, "alphanum_fraction": 0.597331702709198, "avg_line_length": 30.711538314819336, "blob_id": "728ce50163e203e749ce270cee4cdbf4e4d58ca9", "content_id": "d1381726ad3d8aae90ee498f8e32e37989224185", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1649, "license_type": "permissive", "max_line_length": 86, "num_lines": 52, "path": "/app.py", "repo_name": "CTimmerman/CalculAPI", "src_encoding": "UTF-8", "text": "\"\"\"A simple calculator JSON REST API.\"\"\"\n\nimport json, logging, math, re\nfrom wsgiref.simple_server import make_server\n\nfrom pyramid.config import Configurator\n\n\nVALID_PATTERN = (\n f\"^([ 0-9.()^|*/%+-]|{'|'.join(name for name in dir(math) if '_' not in name)})+$\"\n)\nPORT = 8888\n\n\ndef calc(request):\n \"\"\"\n >>> from pyramid.testing import DummyRequest\n >>> calc(DummyRequest(json_body={'q':'pi / (1+1)'}, method='POST')).body\n b'{\"result\": 1.5707963267948966}'\n >>> calc(DummyRequest(json_body={'q':'pi / (1+1) / hax'}, method='POST')).status\n '400 Bad Request'\n \"\"\"\n try:\n q_param = request.params.get(\"q\")\n if q_param:\n equation = json.loads(q_param)[\"q\"]\n else:\n equation = request.json_body[\"q\"]\n logging.debug(f\"Equation: {equation}\")\n if re.match(VALID_PATTERN, equation):\n result = eval(equation, math.__dict__)\n else:\n result = f\"Invalid equation. Should match {VALID_PATTERN}.\"\n request.response.status = 400\n except Exception as e:\n result = json.dumps(str(e))\n request.response.status = 400\n\n logging.debug(result)\n request.response.body = json.dumps({\"result\": result}).encode(\"utf8\")\n request.response.content_type = \"application/json\"\n return request.response\n\n\nif __name__ == \"__main__\":\n with Configurator() as config:\n config.add_route(\"calc\", \"/\")\n config.add_view(calc, route_name=\"calc\")\n app = config.make_wsgi_app()\n server = make_server(\"0.0.0.0\", PORT, app)\n logging.debug(f\"Listening on {PORT} for {VALID_PATTERN}\")\n server.serve_forever()\n" }, { "alpha_fraction": 0.5774359107017517, "alphanum_fraction": 0.6830769181251526, "avg_line_length": 30.45161247253418, "blob_id": "e3f6644c527e6b2fb5279378ee35c92e12823971", "content_id": "680d326e2a1e5971f2bcc1eaffa215cde47077dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 975, "license_type": "permissive", "max_line_length": 375, "num_lines": 31, "path": "/README.md", "repo_name": "CTimmerman/CalculAPI", "src_encoding": "UTF-8", "text": "# CalculAPI\nSimple calculator JSON REST API.\n\n## Usage\n\nValid equations return code 200 and the result:\n\n http://localhost:8888/?q={\"q\":\"sqrt(e)-log(pi)\"}\n\n {\"result\": 0.503991384850728}\n\n http://localhost:8888/?q={\"q\":\"1 - 2 / 6\"}\n\n {\"result\": 0.6666666666666667}\n\nJSON via POST is easier via the terminal:\n\n curl 127.0.0.1:8888 -d {\"q\":\"1 - 2 / 6\"}\n\n {\"result\": 0.6666666666666667}\n\nInvalid equations return code 400 and the reason as result:\n\n http://localhost:8888/?q={\"q\":\"1 - 2 / b6\"}\n\n {\"result\": \"Invalid equation. Should match ^([ 0-9.()^|*/%+-]|acos|acosh|asin|asinh|atan|atan2|atanh|ceil|comb|copysign|cos|cosh|degrees|dist|e|erf|erfc|exp|expm1|fabs|factorial|floor|fmod|frexp|fsum|gamma|gcd|hypot|inf|isclose|isfinite|isinf|isnan|isqrt|ldexp|lgamma|log|log10|log1p|log2|modf|nan|perm|pi|pow|prod|radians|remainder|sin|sinh|sqrt|tan|tanh|tau|trunc)+$.\"}\n\n## Installation\n\n docker build calculapi .\n docker run -dp 8888:8888 calculapi\n" } ]
3
salten13/Challanges
https://github.com/salten13/Challanges
bc605e9065c3de07473dc8eefdb53f8b684d13fa
58829d22f8faa8140daea1d1ee5582fa834e510f
4535059395fca85c9f09411725f4e25cce714379
refs/heads/master
2021-01-23T00:44:36.550516
2017-05-30T18:12:36
2017-05-30T18:12:36
92,836,572
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4029850661754608, "alphanum_fraction": 0.49253731966018677, "avg_line_length": 21.33333396911621, "blob_id": "074c705fbd196325f46dfb160f9a743d5c7beba7", "content_id": "daedbf08d9c438e82747e52d839c6b716cc8276d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/L1/1.py", "repo_name": "salten13/Challanges", "src_encoding": "UTF-8", "text": "numbers = []\nfor x in range(2000,3201):\n numbers.append(x)\n if x % 7 == 0 and x % 5 != 0:\n print(x, end=',')\nprint('\\n')\n" } ]
1
ShabbySam/cataract
https://github.com/ShabbySam/cataract
516a92b5b12ec7e03249cde643ea83d6e41853bc
af9185d2601dd79d74aa905fbf28a670297fedfe
f21fd5fcca60ed695a4190f8ed0a82865bb61aea
refs/heads/master
2023-03-16T03:19:13.697074
2017-12-14T03:45:03
2017-12-14T03:45:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5888773202896118, "alphanum_fraction": 0.5956341028213501, "avg_line_length": 30.032258987426758, "blob_id": "214eb1660417ea1deec2ec9695908483da048312", "content_id": "205a9be2cf211ddd89772a813dbaa2351759514f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1932, "license_type": "no_license", "max_line_length": 162, "num_lines": 62, "path": "/src/test/java/com/guazi/cataract/input/DFSchemaTest.java", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "package com.guazi.cataract.input;\n\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\n\nimport org.apache.spark.sql.Row;\nimport org.apache.spark.sql.types.DataTypes;\nimport org.apache.spark.sql.types.StructType;\n\nimport com.guazi.cataract.common.DFSchema;\n\nimport junit.framework.TestCase;\nimport net.minidev.json.JSONObject;\n\n/**\n * Unit test for simple App.\n */\npublic class DFSchemaTest \n extends TestCase\n{\n /**\n * Create the test case\n *\n * @param testName name of the test case\n */\n public DFSchemaTest( String testName )\n {\n super( testName );\n }\n\n /**\n * @return the suite of tests being tested\n */\n public void testJson2Row()\n {\n List<String> schemas = new ArrayList<>(Arrays.asList(\"d:parent_id\", \"d:city_name\", \"d:area_name\", \"d:fixed_city_id\", \"d:py\", \"d:city_level\", \"d:pinyin\"));\n \n JSONObject data = new JSONObject();\n data.put(\"d:area_name\", \"东北\");\n data.put(\"d:fixed_city_id\", \"1341\");\n data.put(\"d:py\", \"sy\");\n data.put(\"d:city_level\", \"A\");\n data.put(\"d:pinyin\", \"shenyang\");\n data.put(\"rowkey\", \"141414\");\n data.put(\"d:parent_id\", \"0\");\n data.put(\"d:city_name\", \"沈阳\");\n \n DFSchema schema = new DFSchema(schemas);\n Row row = schema.json2Row(data);\n System.out.println(row);\n StructType structType = new StructType()\n .add(\"parent_id\", DataTypes.StringType, true)\n .add(\"city_name\", DataTypes.StringType, true)\n .add(\"area_name\", DataTypes.StringType, true)\n .add(\"fixed_city_id\", DataTypes.StringType, true)\n .add(\"py\", DataTypes.StringType, true)\n .add(\"city_level\", DataTypes.StringType, true)\n .add(\"pinyin\", DataTypes.StringType, true);\n System.out.println(structType.fieldNames().length);\n }\n}\n" }, { "alpha_fraction": 0.5926327109336853, "alphanum_fraction": 0.6117009520530701, "avg_line_length": 38.11016845703125, "blob_id": "d22db9f76e6423a8d48bd1d430df5608f17f2dae", "content_id": "9697420aa2003cceb537feb9ab72339a4ed724ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4615, "license_type": "no_license", "max_line_length": 243, "num_lines": 118, "path": "/src/test/java/com/guazi/cataract/udf/JsonProcessorTest.java", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "package com.guazi.cataract.udf;\n\nimport net.minidev.json.JSONArray;\nimport net.minidev.json.JSONUtil;\nimport net.minidev.json.JSONValue;\nimport net.minidev.json.parser.JSONParser;\nimport org.apache.commons.io.IOUtils;\nimport org.junit.Assert;\nimport org.junit.Test;\n\nimport net.minidev.json.JSONObject;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.util.*;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\npublic class JsonProcessorTest {\n\n public JsonProcessorTest() {\n }\n \n @Test\n public void dateformatTest() {\n JsonProcessor processor = new JsonProcessor(new JSONObject(), new JSONObject());\n String fun = \"@DATEFORMAT($last_update_time, yyyy-MM-dd HH:mm:ss, yyyyMMdd)\";\n }\n\n @Test\n public void strftimeTest() {\n JsonProcessor processor = new JsonProcessor(new JSONObject(), new JSONObject());\n String time = processor.strftime(\"1471336343\", \"yyyy-MM-dd\");\n Assert.assertEquals(\"2016-08-16\", time);\n\n String stime = processor.strftime(\"1471336343\", \"yyyy-MM-dd hh:mm:ss\");\n Assert.assertEquals(\"2016-08-16 04:32:23\", stime);\n }\n\n public void processColTest() {\n JSONObject broadcast = new JSONObject();\n JSONObject dm1 = new JSONObject();\n dm1.put(\"key1\", \"val1\");\n dm1.put(\"key2\", \"val2\");\n JSONObject dm2 = new JSONObject();\n dm2.put(\"key3\", \"val3\");\n dm2.put(\"key4\", \"val4\");\n broadcast.put(\"dm1\", dm1);\n broadcast.put(\"dm2\", dm2);\n JsonProcessor processor = new JsonProcessor(new JSONObject(), broadcast);\n }\n\n public JSONObject jsonModify(JSONObject o) {\n JSONObject out = (JSONObject) o.clone();\n out.put(\"test\", \"in\");\n return out;\n }\n\n @Test\n public void jsonTest() {\n JSONObject obj = new JSONObject();\n obj.put(\"no\", \"no\");\n JSONObject nobj = jsonModify(obj);\n Assert.assertEquals(\"{\\\"no\\\":\\\"no\\\"}\", obj.toJSONString());\n Assert.assertEquals(\"{\\\"test\\\":\\\"in\\\",\\\"no\\\":\\\"no\\\"}\", nobj.toJSONString());\n }\n\n @Test\n public void parseUDFTest() {\n JsonProcessor jp = new JsonProcessor(new JSONObject(), new JSONObject());\n List<List<String>> list = jp.parseUDF(\"@STRFTIME($d:xxx , %y-%m-%d , strXXX)+ time +@INT ( $aa )\");\n Assert.assertArrayEquals(new Object[]{\"@STRFTIME\", \"$d:xxx\", \"%y-%m-%d\", \"strXXX\"}, list.get(0).toArray());\n Assert.assertArrayEquals(new Object[]{\"\", \"time\"}, list.get(1).toArray());\n Assert.assertArrayEquals(new Object[]{\"@INT\", \"$aa\"}, list.get(2).toArray());\n }\n\n @Test\n public void parseAllTest() {\n String demoJsonStr = \"{\\\"clue_id\\\":\\\"@INT($d:clue_id)\\\",\\\"create_time\\\":\\\"@STRFTIME($d:create_time, %Y-%m-%d)\\\",\\\"source_fk\\\":\\\"@STR($d:source_type) + $d:source_type_code\\\",\\\"location_fk\\\":\\\"@STR($d:city_id) + @STR($d:district_id)\\\"}\";\n JSONObject demoJson = (JSONObject) JSONValue.parse(demoJsonStr);\n JsonProcessor jp = new JsonProcessor(demoJson, new JSONObject());\n Map<String, List<List<String>>> map = jp.getColumnUDF();\n List<String> create_time_0 = new ArrayList<>();\n create_time_0.add(\"@STRFTIME\");\n create_time_0.add(\"$d:create_time\");\n create_time_0.add(\"%Y-%m-%d\");\n List<String> location_fk_0 = new ArrayList<>();\n location_fk_0.add(\"@STR\");\n location_fk_0.add(\"$d:city_id\");\n List<String> location_fk_1 = new ArrayList<>();\n location_fk_1.add(\"@STR\");\n location_fk_1.add(\"$d:district_id\");\n\n List<String> clue_id_0 = new ArrayList<>();\n clue_id_0.add(\"@INT\");\n clue_id_0.add(\"$d:clue_id\");\n List<String> source_fk_0 = new ArrayList<>();\n source_fk_0.add(\"@STR\");\n source_fk_0.add(\"$d:source_type\");\n List<String> source_fk_1 = new ArrayList<>();\n source_fk_1.add(\"\");\n source_fk_1.add(\"$d:source_type_code\");\n Assert.assertArrayEquals(new Object[]{create_time_0}, map.get(\"create_time\").toArray());\n Assert.assertArrayEquals(new Object[]{location_fk_0,location_fk_1}, map.get(\"location_fk\").toArray());\n Assert.assertArrayEquals(new Object[]{clue_id_0}, map.get(\"clue_id\").toArray());\n Assert.assertArrayEquals(new Object[]{source_fk_0,source_fk_1}, map.get(\"source_fk\").toArray());\n\n }\n\n private void checkVarsInUDF(String func,Pattern reg){\n Assert.assertEquals(func.contains(\"(\") , false);\n Assert.assertEquals(func.contains(\")\") , false);\n\n Matcher m = reg.matcher(func);\n Assert.assertEquals(m.matches() , true);\n }\n\n}\n" }, { "alpha_fraction": 0.5849031805992126, "alphanum_fraction": 0.5903232097625732, "avg_line_length": 36.917293548583984, "blob_id": "c5b26c159749be45a3714df740892d90c4960326", "content_id": "dc37c28088a682f6ab30b1a2baeec8766f3aaf6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 15129, "license_type": "no_license", "max_line_length": 161, "num_lines": 399, "path": "/src/main/java/com/guazi/cataract/Cataract.java", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "package com.guazi.cataract;\n\nimport java.io.File;\nimport java.io.FileNotFoundException;\nimport java.io.IOException;\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Map.Entry;\nimport java.util.Properties;\n\nimport org.apache.commons.lang.StringUtils;\nimport org.apache.hadoop.conf.Configuration;\nimport org.apache.hadoop.fs.FileSystem;\nimport org.apache.hadoop.fs.LocatedFileStatus;\nimport org.apache.hadoop.fs.Path;\nimport org.apache.hadoop.fs.RemoteIterator;\nimport org.apache.hadoop.hbase.HBaseConfiguration;\nimport org.apache.hadoop.hbase.client.Result;\nimport org.apache.hadoop.hbase.io.ImmutableBytesWritable;\nimport org.apache.hadoop.hbase.mapreduce.TableInputFormat;\nimport org.apache.hadoop.hbase.mapreduce.TableOutputFormat;\nimport org.apache.hadoop.mapreduce.Job;\nimport org.apache.spark.SparkConf;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.JavaRDD;\nimport org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.broadcast.Broadcast;\nimport org.apache.spark.sql.DataFrame;\nimport org.apache.spark.sql.Row;\nimport org.apache.spark.sql.hive.HiveContext;\nimport org.apache.spark.sql.types.DataTypes;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport com.guazi.cataract.common.DFSchema;\nimport com.guazi.cataract.common.JsonConfig;\nimport com.guazi.cataract.common.SqlParser;\nimport com.guazi.cataract.input.HbaseTable2JsonMapper;\nimport com.guazi.cataract.input.Json2RowMapper;\nimport com.guazi.cataract.output.HbaseTableWriter;\nimport com.guazi.cataract.udf.UDFMapper;\n\nimport net.minidev.json.JSONArray;\nimport net.minidev.json.JSONObject;\nimport net.minidev.json.JSONValue;\n\n\npublic class Cataract\n{\n final static Logger logger = LoggerFactory.getLogger(Cataract.class);\n\n private JsonConfig jsonConfig;\n\n private FileSystem fs;\n\n public Cataract(String configFile) throws IOException {\n String path = new File(\"\").getAbsolutePath() + \"/\" + configFile;\n jsonConfig = new JsonConfig(new File(path));\n\n Configuration config = new Configuration();\n config.set(\"fs.default.name\", \"hdfspath\");\n fs = FileSystem.get(config);\n }\n\n public void run(JavaSparkContext jsc, HiveContext hc, Job jobConf, Map<String, String> params) throws Exception {\n JSONObject bc = jsonConfig.getBroadcast();\n final Broadcast<JSONObject> broadcast = (Broadcast<JSONObject>) jsc.broadcast(bc);\n\n JSONObject sources = (JSONObject) jsonConfig.getConfig().get(\"source\");\n\n // begin to table\n for (Entry<String, Object> entry: sources.entrySet()) {\n JSONObject source = (JSONObject) entry.getValue();\n doSource(jsc, hc, source, broadcast, params);\n }\n\n boolean isUnion = false;\n if (params.containsKey(\"union\") && params.get(\"union\").equals(\"true\")) {\n isUnion = true;\n }\n\n // sql\n JSONArray arr = (JSONArray) jsonConfig.getConfig().get(\"sql\");\n JSONArray schemaJsonArray = (JSONArray) ((JSONObject)jsonConfig.getConfig().get(\"save\")).get(\"schema\");\n List<String> sqlArray = SqlParser.parse(arr, schemaJsonArray);\n\n DataFrame unionDF = null;\n\n for (String sql: sqlArray) {\n String tableName = null;\n boolean repartition = false;\n String partitionKey = null;\n if (sql.startsWith(\"[table:\")) {\n String header = sql.substring(\"[table:\".length(), sql.indexOf(']'));\n String[] info = StringUtils.split(header, ':');\n tableName = info[0];\n if (info.length == 2) {\n repartition = true;\n partitionKey = info[1];\n }\n sql = sql.substring(sql.indexOf(']') + 1);\n }\n\n if (sql.startsWith(\"[print]\")) {\n sql = sql.substring(\"[print]\".length());\n DataFrame print = hc.sql(sql);\n printDF(print);\n continue;\n }\n\n // Temp table.\n if (tableName != null) {\n DataFrame table = hc.sql(sql);\n if (repartition) table.repartition(table.col(partitionKey));\n table.cache();\n table.registerTempTable(tableName);\n continue;\n }\n\n // Save table\n DataFrame result = hc.sql(sql);\n\n if (isUnion) {\n if (unionDF == null) unionDF = result;\n else unionDF = unionDF.unionAll(result);\n } else {\n doSave(jsc, hc, jobConf, (JSONObject) jsonConfig.getConfig().get(\"save\"), result, params);\n }\n }\n\n // save\n if (isUnion) {\n doSave(jsc, hc, jobConf, (JSONObject) jsonConfig.getConfig().get(\"save\"), unionDF, params);\n }\n\n }\n\n public void doSource(JavaSparkContext jsc, HiveContext hc, JSONObject source, Broadcast<JSONObject> broadcast, Map<String, String> params) throws Exception {\n String type = source.getAsString(\"type\");\n int rddNum = 0;\n\n Object numObj = source.get(\"rdd_num\");\n\n if (numObj instanceof Integer) {\n rddNum = (int) numObj;\n } else if (numObj instanceof String) {\n rddNum = Integer.parseInt((String) numObj);\n }\n\n // LOAD RDD to JSON RDD\n JavaRDD<JSONObject> objRDD = null;\n if (type.equals(\"hbase\")) {\n Configuration conf = HBaseConfiguration.create();\n conf.set(\"hbase.zookeeper.quorum\", \"quorum\");\n conf.set(\"hbase.zookeeper.property.clientPort\", \"2181\");\n conf.set(TableInputFormat.INPUT_TABLE, source.getAsString(\"table\"));\n JavaPairRDD<ImmutableBytesWritable, Result> data = jsc.newAPIHadoopRDD(\n conf, TableInputFormat.class, ImmutableBytesWritable.class,\n Result.class);\n\n// if(rddNum > 0) {\n// // TODO\n//// data = data.par (rddNum);\n// }\n\n objRDD = data.map(new HbaseTable2JsonMapper());\n } else if (type.equals(\"hive\")) {\n String table = source.getAsString(\"table\");\n DataFrame df = null;\n\n String partitionFile = params.get(source.getAsString(\"partition\"));\n\n if (containsFile(partitionFile)) {\n logger.info(\"contains file: \" + partitionFile);\n df = hc.read().format(\"orc\").option(\"basePath\", table).load(partitionFile);\n } else {\n logger.info(\"doesn't contain file: \" + partitionFile);\n df = createEmptyDF(jsc, hc, (JSONArray) source.get(\"df.schema\"));\n }\n// if (rddNum > 0) {\n// df = df.repartition(rddNum);\n// }\n objRDD = df.toJSON().toJavaRDD().map(a -> (JSONObject) JSONValue.parse(a));\n }\n\n // JSON RDD to JSON RDD\n for (Object udfObj : (JSONArray) source.get(\"UDF\")) {\n JSONObject udfConfig = (JSONObject) udfObj;\n objRDD = objRDD.map(new UDFMapper(udfConfig, broadcast.getValue()));\n }\n\n // JSON RDD to Row RDD\n JSONArray arr = (JSONArray) source.get(\"df.schema\");\n DFSchema schema = new DFSchema(arr);\n JavaRDD<Row> rowRdd = objRDD.map(new Json2RowMapper(schema));\n\n // Row RDD to DataFrame\n DataFrame df = hc.createDataFrame(rowRdd, DataTypes.createStructType(schema.getStructFields())).cache();\n\n // Register Table\n df.registerTempTable(source.getAsString(\"df.table\"));\n System.out.println(source.getAsString(\"df.table\") + \" schema: \" + df.schema().toString());\n }\n\n public void doSave(JavaSparkContext jsc, HiveContext hc, Job jobConf, JSONObject save, DataFrame df, Map<String, String> params) throws Exception {\n int par = 8;\n if (params.containsKey(\"savePartitions\")) {\n par = Integer.parseInt(params.get(\"savePartitions\"));\n }\n df = df.coalesce(par);\n\n String type = save.getAsString(\"type\");\n if (type.equals(\"hbase\")) {\n saveToHbase(jsc, hc, jobConf, save, df);\n } else if (type.equals(\"hive\")) {\n saveToHive(save, df);\n } else if (type.equals(\"mysql\")) {\n saveToMysql(save, df);\n } else\n throw new Exception(String.format(\"ERROR: %s isn't a valid save type\", type));\n }\n\n public void saveToHbase(JavaSparkContext jsc, HiveContext hc, Job jobConf, JSONObject save, DataFrame df) {\n DFSchema hschema = new DFSchema((JSONArray) save.get(\"schema\"));\n JSONObject config = (JSONObject) save.get(\"hbase\");\n jobConf.getConfiguration().set(TableOutputFormat.OUTPUT_TABLE, save.getAsString(\"table\"));\n jobConf.setOutputFormatClass(TableOutputFormat.class);\n df.toJavaRDD().mapToPair(new HbaseTableWriter(config, hschema)).saveAsNewAPIHadoopDataset(jobConf.getConfiguration());;\n }\n\n public void saveToHive(JSONObject save, DataFrame df) {\n JSONObject hiveConfig = (JSONObject) save.get(\"hive\");\n DFSchema hschema = new DFSchema((JSONArray) save.get(\"schema\"));\n\n System.out.println(\"========[ BEFORE ]========\");\n df.printSchema();\n DataFrame updatedDF = hschema.updateSchema(df);\n System.out.println(\"========[ AFTER ]========\");\n updatedDF.printSchema();\n\n String[] partitions = StringUtils.split(hiveConfig.getAsString(\"partition\"), ',');\n String mode = hiveConfig.getAsString(\"mode\");\n updatedDF.write().mode(mode).partitionBy(partitions).orc(save.getAsString(\"table\"));\n }\n\n public void saveToMysql(JSONObject save, DataFrame df) {\n JSONObject mysqlConfig = (JSONObject) save.get(\"mysql\");\n DFSchema hschema = new DFSchema((JSONArray) save.get(\"schema\"));\n\n System.out.println(\"========[ BEFORE ]========\");\n df.printSchema();\n DataFrame updatedDF = hschema.updateSchema(df);\n System.out.println(\"========[ AFTER ]========\");\n updatedDF.printSchema();\n\n String url = mysqlConfig.getAsString(\"url\");\n String table = mysqlConfig.getAsString(\"table\");\n String mode = mysqlConfig.getAsString(\"mode\");\n String user = mysqlConfig.getAsString(\"user\");\n String password = mysqlConfig.getAsString(\"password\");\n Properties prop = new Properties();\n prop.put(\"user\", user);\n prop.put(\"password\", password);\n\n updatedDF.write().mode(mode).jdbc(url, table, prop);\n }\n\n public boolean containsFile(String path) throws FileNotFoundException, IllegalArgumentException, IOException {\n if (!fs.exists(new Path(path))) {\n return false;\n }\n\n RemoteIterator<LocatedFileStatus> files = fs.listFiles(new Path(path), true);\n while (files.hasNext()) {\n Path p = files.next().getPath();\n String name = p.getName();\n if (!name.startsWith(\".\") &&\n !name.endsWith(\".tmp\") &&\n !name.toLowerCase().equals(\"_success\") &&\n !name.toLowerCase().equals(\"_fail\")) {\n return true;\n }\n }\n return false;\n }\n\n public DataFrame createEmptyDF(JavaSparkContext jsc, HiveContext hc, JSONArray schemaArr) {\n JSONObject obj = new JSONObject();\n String whereKey = \"\";\n String whereValue = \"\";\n for (Object schemaObj: schemaArr) {\n String[] tks = StringUtils.split((String) schemaObj, ';');\n if (tks[1].equalsIgnoreCase(\"string\")) {\n obj.put(tks[0], \"1\");\n whereKey = tks[0];\n whereValue = \"'2'\";\n } else if (tks[1].equalsIgnoreCase(\"int\")) {\n obj.put(tks[0], 1);\n } else if (tks[1].equalsIgnoreCase(\"long\")) {\n obj.put(tks[0], 1L);\n } else if (tks[1].equalsIgnoreCase(\"boolean\")) {\n obj.put(tks[0], true);\n } else if (tks[1].equalsIgnoreCase(\"date\")) {\n obj.put(tks[0], \"1990-10-10\");\n } else if (tks[1].equalsIgnoreCase(\"double\")) {\n double d = 1;\n obj.put(tks[0], d);\n } else if (tks[1].equalsIgnoreCase(\"float\")) {\n obj.put(tks[0], 1f);\n } else if (tks[1].equalsIgnoreCase(\"short\")) {\n short s = 1;\n obj.put(tks[0], s);\n } else if (tks[1].equalsIgnoreCase(\"timestamp\")) {\n obj.put(tks[0], 1475983650);\n } else {\n obj.put(tks[0], \"1\");\n }\n }\n\n List<JSONObject> data = new ArrayList<JSONObject>();\n data.add(obj);\n\n DFSchema schema = new DFSchema(schemaArr);\n JavaRDD<Row> rowRdd = jsc.parallelize(data, 1).map(new Json2RowMapper(schema));\n\n DataFrame df = hc.createDataFrame(rowRdd, DataTypes.createStructType(schema.getStructFields()));\n\n String tempTable = \"temptable\";\n df.registerTempTable(tempTable);\n\n List<String> sqlList = new ArrayList<String>();\n sqlList.add(\"select\");\n sqlList.add(String.join(\", \", schema.getSchema()));\n sqlList.add(\"from\");\n sqlList.add(tempTable);\n sqlList.add(\"where\");\n\n sqlList.add(whereKey + \"=\" + whereValue);\n return hc.sql(String.join(\" \", sqlList));\n }\n\n public void printDF(DataFrame df) {\n System.out.println(\"================\");\n df.show();\n }\n\n public static void main( String[] args ) throws Exception\n {\n if (args.length < 1) {\n System.out.println(\"args length must more than 1\");\n System.exit(1);\n }\n\n Map<String, String> params = new HashMap<String, String>();\n\n boolean flag = false;\n for (int i = 1; i < args.length; ++i) {\n String param = args[i];\n String[] tks = StringUtils.split(param, ':');\n if (tks.length != 2) {\n System.out.println(\"param: \" + param + \" length not equals 2.\");\n flag = true;\n continue;\n }\n params.put(tks[0], tks[1]);\n }\n\n if (flag) {\n System.exit(1);\n }\n\n Configuration conf = HBaseConfiguration.create();\n conf.set(\"hbase.zookeeper.quorum\", \"quorum\");\n conf.set(\"hbase.zookeeper.property.clientPort\", \"2181\");\n Job jobConf = Job.getInstance(conf);\n\n SparkConf sparkConf = new SparkConf();\n JavaSparkContext jsc = new JavaSparkContext(sparkConf);\n\n HiveContext hc = new HiveContext(jsc.sc());\n\n int numPartition = 20;\n if (params.containsKey(\"partition\")) {\n numPartition = Integer.parseInt(params.get(\"partition\"));\n } else {\n params.put(\"partition\", Integer.toString(numPartition));\n }\n\n hc.sql(\"set spark.sql.shuffle.partitions=\" + Integer.toString(numPartition));\n\n Cataract cataract = new Cataract(args[0]);\n\n cataract.run(jsc, hc, jobConf, params);\n\n jsc.close();\n }\n}\n" }, { "alpha_fraction": 0.6124078631401062, "alphanum_fraction": 0.627764105796814, "avg_line_length": 31.559999465942383, "blob_id": "a0b5b922cc39a3006c9d9861ff1872af2b71de30", "content_id": "b150b9d6bbbef8146845093e3bb447fc2bb1a3e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1628, "license_type": "no_license", "max_line_length": 108, "num_lines": 50, "path": "/src/main/java/com/guazi/cataract/input/HbaseTable2JsonMapper.java", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "package com.guazi.cataract.input;\n\n\nimport org.apache.hadoop.hbase.Cell;\nimport org.apache.hadoop.hbase.CellUtil;\nimport org.apache.hadoop.hbase.client.Result;\nimport org.apache.hadoop.hbase.io.ImmutableBytesWritable;\nimport org.apache.hadoop.hbase.util.Bytes;\nimport org.apache.spark.api.java.function.Function;\n\nimport net.minidev.json.JSONObject;\nimport scala.Tuple2;\n\n\npublic class HbaseTable2JsonMapper implements Function<Tuple2<ImmutableBytesWritable, Result>, JSONObject> {\n \n private static final long serialVersionUID = 5072906928576802829L;\n\n \n \n @Override\n public JSONObject call(Tuple2<ImmutableBytesWritable, Result> t) throws Exception {\n return singleVersionFormatter(t._1, t._2);\n }\n \n public JSONObject singleVersionFormatter(ImmutableBytesWritable rowkey, Result result) {\n Cell[] cells = result.rawCells();\n // family -> column family -> qualifier\n JSONObject dataMap = new JSONObject();\n \n dataMap.put(\"rowkey\", Bytes.toString(rowkey.get()));\n \n for(Cell kv : cells) {\n byte [] family = CellUtil.cloneFamily(kv);\n String familyStr = Bytes.toString(family);\n byte [] qualifier = CellUtil.cloneQualifier(kv);\n String qualifierStr = Bytes.toString(qualifier);\n \n String key = familyStr + \":\" + qualifierStr;\n \n byte [] value = CellUtil.cloneValue(kv);\n \n \n \n dataMap.put(key, Bytes.toString(value));\n }\n \n return dataMap;\n }\n}\n" }, { "alpha_fraction": 0.6511180996894836, "alphanum_fraction": 0.6605520844459534, "avg_line_length": 32.47368240356445, "blob_id": "8197fc9614740ccedd42ad36f654aa768a41a0ee", "content_id": "92f987b02d859e98dc86fd37ce8228515a69633c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5724, "license_type": "no_license", "max_line_length": 333, "num_lines": 171, "path": "/README.md", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "### Cataract\nCataract is a tools to format and transfer data from hbase/hive to hbase/hive.\n\n### Workflow\n1. hbase/hive table to JSON RDD\n2. JSON RDD format or UDF computing, to JSON RDD\n3. JSON RDD to DataFrame\n4. Spark Sql on DataFrame\n5. Save DataFrame to hbase or hive\n\n### Functions:\n - Mysql relate mapping\n - Some Simple UDFs: @INT, @STR, @DIMENSION, + , etc.\n - Support table join on sql\n - Can read from hbase, hive\n - Can write to hbase, hive\n\n### Configuration\nConfig Example:\n\n```JSON\n{\n \"broadcast\": [\n {\n \"mysql\": \"jdbc:mysql;host;port;db;user;password\",\n \"city\": \"select pinyin, fixed_city_id from bi_city\"\n }\n ],\n \"source\": {\n \"misc_city\": {\n \"type\": \"hbase\",\n \"table\": \"plt:misc_city\",\n \"UDF\": [\n {\n \"short_name\": \"$d:short_name\",\n \"domain\": \"$d:domain\",\n \"location\": \"$d:location\",\n \"pinyin\": \"$d:pinyin\",\n \"city_id\": \"@DIMENSION(city, $d:pinyin)\"\n }\n ],\n \"df.table\": \"misc_city\",\n \"df.schema\": [\n \"short_name;string\",\n \"pinyin;string\",\n \"domain;string\",\n \"location;string\",\n \"city_id;string\"\n ]\n }\n },\n \"sql\": [\n \"select short_name, pinyin, domain, location, city_id from misc_city\"\n ],\n \"save\": {\n \"type\": \"hbase\",\n \"table\": \"test:ncity\",\n \"hbase\": {\n \"cf\": \"d\",\n \"row\": \"pinyin\"\n },\n \"schema\": [\n \"city_id;string\",\n \"pinyin;string\",\n \"short_name;string\",\n \"domain;string\",\n \"location;string\"\n ]\n },\n \"testData\":{\n \"broadcast\":{\n \"city\":{\"city_1\":\"1\",\"city_2\":\"2\"}\n },\n \"d:short_name\":\"short_name\",\n \"d:domain\":\"domain\",\n \"d:location\": \"location\",\n \"d:pinyin\": \"city_1\"\n }\n}\n```\n\n#### broadcast\n- Load mysql table to spark broadcast, prepare for @DIMENSION function.\n- _broadcast_ can scan multiple dbs, multiple tables at the same time, and generate (key, value) pairs for each table.\n- Key is the first column, value is the second in the sql.\n- Each Json in broadcast relate to a db, and each key of the json is a table except \"mysql\" which is a connection string.\n- The key of the json is the table alias which is needed in @DIMENSION\n\n**@DIMENSION(tablename, key)** return the value of the key in broadcast.\n\n#### source\nSource config step 1, 2, 3.\n\n- **source.key** is an alias which is useless.\n- **source.key.type** defines the type of table, can be hbase or hive\n- **source.key.table** defines the path of the table, can be hbase table name or hive hdfs basic path.\n- **source.key.partition** is only needed if the type is hive. The value is a alias of the parameter which is given when you run the spark job. e.g. if the value is _citypartition_, in your running command, you should set _citypartion:/user/hive/external/year=2016/..._, and the value of citypartiton is the path you want to process.\n- **source.key.UDF** configs step 3 in the workflow. The key in it is the added key of json, and the value in it is the compute logic.\n- **source.key.df.table** the alias of table in memory, which is needed in step 5.\n- **source.key.df.schema** the schema of the table in memory, content column name and type.\n\n#### UDF\n```shell\n# Get the fk of dimension\n@DIMENSION(dimensionName, $first, ...)\n\n# Cast everything to int if possible\n@INT($col)\n\n# Cast everything to String if possible\n@STR($col)\n\n# Format timestamp string to date string\n@STRFTIME($col, yyyy-MM-dd)\n\n# Get timestamp string of date string\n@STRPTIME($col, yyyy-MM-dd)\n\n# Format date from one type to another\n@DATEFORMAT($col, yyyyMMdd, yyyy-MM-dd)\n```\n\n#### sql\nSQL config step 4. The value of sql is a list. The tools will union all sql results together.\n\n#### save\nSAVE config step 5.\n\n- **save.type** same as source\n- **save.table** same as source\n- **save.hbase** only needed when type is hbase, need to config _cf_ and _row_, _cf_ refer to columnfamily, _row_ refer to the column you want to set as rowkey.\n- **save.hive** only needed when type is hive, need to config _partition_ and _mode_, _partition_ is partition keys divided by ',', _mode_ is the mode to save, append or overwrite.\n- **schema** same as source.\n\n#### test\nTest should be performed after completion of the configuration.\n\n- **testData.broadcast.key** The test data of broadcast\n- **testData.rowDatas** The test data , mast contains all of the column in UDF parms\n\n##### check contents\n1. source.TABLE_NAME.table mast be included\n2. if source.TABLE_NAME.type equals hive mast have key \"partition\"\n3. UDF check\n 1. every funcitons mast match with regular expressions \"@[\\S\\s]*?\\(([\\s\\S]*?)\\)\"\n 2. every variable mast match with regular expressions \"\\$[a-zA-Z0-9\\_\\-:]+\"\n 3. every text mast match with regular expressions \"[a-zA-Z0-9\\_\\-:%]+\"\n 4. every variable **Not** have \"@\" AND \"(\" AND \")\"\n 5. if type equals \"hbase\" , every reference variable mast start width \"d:\" or \"rowkey\"\n 6. dimension : broadcast mast contain keys in param\n4. source.TABLE_NAME.df.table mast be included\n5. source.TABLE_NAME.df.schema mast be included\n6. save.schema mast be included\n7. if save.type equqls \"hive\" ,\"partition\" mast be included\n8. if save.type equals \"hbase\" , \"cf\" AND \"row\" mast be included\n\n### run test json\n```shell\n java -cp cataract-0.0.1-SNAPSHOT-jar-with-dependencies.jar com.guazi.cataract.JsonConfigCheck '/path/to/demo.json'\n```\n### Build\n```shell\nmvn clean package\n```\n\n### Run the tools\nPut your config file in the same folder of jar, and run\n\n```shell\nspark-submit --files yourconfig.json --driver-class-path /path/to/guava-14.0.jar --class com.guazi.cataract.Cataract cataract-0.0.1-SNAPSHOT-jar-with-dependencies.jar yourconfig.json citypartion:/user/hive/external/year=2016/month=08/\n```\n" }, { "alpha_fraction": 0.5795363783836365, "alphanum_fraction": 0.5843325257301331, "avg_line_length": 35.08654022216797, "blob_id": "1180d92a53cc8aa0cd4e6e59d979b5fbc6ee3b68", "content_id": "4add64eb9fd33eb1de5b93f729f28dd84ffb5364", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3753, "license_type": "no_license", "max_line_length": 112, "num_lines": 104, "path": "/src/main/java/com/guazi/cataract/common/SqlParser.java", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "package com.guazi.cataract.common;\n\n/**\n * Created by wangxueqiang on 16-9-21.\n */\n\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.StringJoiner;\n\nimport net.minidev.json.JSONArray;\nimport net.minidev.json.JSONObject;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\npublic class SqlParser {\n final static Logger logger = LoggerFactory.getLogger(SqlParser.class);\n final static String OP_SELECT = \"select\";\n final static String OP_WHERE = \"where\";\n final static String OP_FROM = \"from\";\n final static String DELIMITER = \";\";\n\n public static List<String> parse(JSONArray sqlJsonArray, JSONArray schemaJsonArray) throws Exception {\n logger.info(\"parse sql of JSONObject\");\n ArrayList<String> ret = new ArrayList<>();\n List<String[]> schema = parseJsonArray(schemaJsonArray, DELIMITER);\n for (Object sqlObj : sqlJsonArray) {\n // this is a json dictionary\n if (sqlObj instanceof JSONObject) {\n ret.add(generateSql(sqlObj, schema));\n } else if (sqlObj instanceof JSONArray) {\n ret.add(concatSql(sqlObj));\n } else { // this is a sql string\n ret.add(sqlObj.toString().trim());\n }\n }\n logger.info(\"finish parse sql of JSONObject\");\n return ret;\n }\n\n // generate one executable sql from a json object\n private static String generateSql(Object sql, List<String[]> schema) throws Exception {\n logger.debug(\"generate sql: {}\", sql.toString());\n JSONObject sqlObj = (JSONObject) sql;\n JSONObject fieldsObj = (JSONObject) sqlObj.get(OP_SELECT);\n StringJoiner sjSql = new StringJoiner(\" \");\n StringJoiner sjSelect = new StringJoiner(\", \");\n\n // generate the select part\n sjSql.add(OP_SELECT);\n for (String[] strs : schema) {\n if (strs.length < 2) {\n logger.error(\"Entry length is less than 2 in schema, field: {}\", strs[0]);\n throw new Exception(String.format(\"Entry length is less than 2 in schema, field: %s\", strs[0]));\n }\n String field = strs[0];\n String value;\n if (strs.length >= 3)\n value = strs[2];\n else {\n logger.warn(\"Entry length is 2: field: {}\", strs[0]);\n value = strs[0];\n }\n // use user defined value for field\n if (fieldsObj.containsKey(field))\n value = (String) fieldsObj.get(field);\n sjSelect.add(value);\n }\n sjSql.add(sjSelect.toString());\n // generate the from part\n if (sqlObj.containsKey(OP_FROM)) {\n sjSql.add(OP_FROM).add((String) sqlObj.get(OP_FROM));\n } else {\n logger.error(\"From clause is missing\");\n throw new Exception(\"From clause is missing\");\n }\n\n // generate the where part\n if (sqlObj.containsKey(OP_WHERE)) {\n sjSql.add(OP_WHERE).add((String) sqlObj.get(OP_WHERE));\n }\n\n return sjSql.toString().trim();\n }\n\n // concat sql from a json array\n private static String concatSql(Object sql) throws Exception {\n logger.debug(\"concat sql: {}\", sql.toString());\n StringBuilder sb = new StringBuilder();\n JSONArray sqlArray = (JSONArray) sql;\n for (Object obj : sqlArray) {\n sb.append((String) obj);\n }\n return sb.toString().trim();\n }\n\n private static List<String[]> parseJsonArray(JSONArray array, String delimiter) {\n List<String[]> ret = new ArrayList<>();\n for (Object obj : array) {\n ret.add(((String) obj).split(delimiter));\n }\n return ret;\n }\n}\n" }, { "alpha_fraction": 0.5795354843139648, "alphanum_fraction": 0.5852322578430176, "avg_line_length": 37.03333282470703, "blob_id": "53b4f622c98d544b66e41baf662037b039cf12e7", "content_id": "1902bf863d531e21f05f5807fdde18868bc1b7ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4564, "license_type": "no_license", "max_line_length": 133, "num_lines": 120, "path": "/src/main/java/com/guazi/cataract/common/MySQLHelper.java", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "package com.guazi.cataract.common;\n\nimport com.mchange.v2.c3p0.ComboPooledDataSource;\n\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport java.beans.PropertyVetoException;\nimport java.sql.*;\nimport java.util.List;\n\npublic class MySQLHelper {\n private ComboPooledDataSource dataSource;\n private String driver = \"com.mysql.jdbc.Driver\";\n\n final Logger logger = LoggerFactory.getLogger(MySQLHelper.class);\n\n public MySQLHelper(String connectionString) throws ClassNotFoundException, SQLException {\n init(connectionString);\n }\n\n public void init(String connectionString) throws ClassNotFoundException, SQLException {\n logger.info(\"being to init connection\");\n Class.forName(driver);\n String[] tks = connectionString.split(\";\");\n dataSource = new ComboPooledDataSource();\n dataSource.setUser(tks[4]);\n dataSource.setPassword(tks[5]);\n dataSource.setJdbcUrl(tks[0] + \"://\" + tks[1] + \":\" + tks[2] + \"/\" + tks[3] + \"?autoReconnect=true&characterEncoding=UTF-8\");\n dataSource.setInitialPoolSize(1);\n dataSource.setMinPoolSize(1);\n dataSource.setMaxPoolSize(10);\n dataSource.setMaxStatements(50);\n dataSource.setMaxIdleTime(600);\n\n //http://www.databasesandlife.com/automatic-reconnect-from-hibernate-to-mysql/\n dataSource.setIdleConnectionTestPeriod(600);\n dataSource.setTestConnectionOnCheckout(true);\n dataSource.setBreakAfterAcquireFailure(true);\n logger.info(\"init connection finished\");\n }\n\n public Connection connect() throws SQLException {\n logger.info(\"begin connecting\");\n Connection conn = dataSource.getConnection();\n logger.info(\"connecting finished\");\n return conn;\n }\n\n public ResultSet read(Connection conn, String sql) throws SQLException {\n Statement statement = conn.createStatement();\n ResultSet ret = statement.executeQuery(sql);\n return ret;\n }\n\n public void close() throws SQLException {\n dataSource.close();\n }\n\n public boolean write(String sql, Object... objects) throws SQLException {\n Connection conn = dataSource.getConnection();\n try {\n conn.setAutoCommit(true); // Can't call commit when autocommit=true, so conn.commit(true) is commented\n PreparedStatement preparedStatement = conn.prepareStatement(sql);\n int n = 0;\n for (Object obj : objects) {\n ++n;\n if (obj instanceof String) {\n preparedStatement.setString(n, (String) obj);\n } else if (obj instanceof Integer) {\n preparedStatement.setInt(n, (Integer) obj);\n } else if (obj instanceof Date) {\n preparedStatement.setDate(n, (Date) obj);\n } else if (obj instanceof Double) {\n preparedStatement.setDouble(n, (Double) obj);\n } else {\n return false;\n }\n }\n preparedStatement.executeUpdate();\n // conn.commit();\n return true;\n } catch (SQLException e) {\n logger.error(\"got SQLException:\" + e.getMessage());\n return false;\n } finally {\n conn.close();\n }\n }\n\n public int[] writeBatch(String sql, List<List<Object>> lines) throws SQLException {\n Connection conn = dataSource.getConnection();\n try {\n conn.setAutoCommit(true);\n PreparedStatement preparedStatement = conn.prepareStatement(sql);\n for (List<Object> line : lines) {\n int n = 0;\n for (Object obj : line) {\n ++n;\n if (obj instanceof String) {\n preparedStatement.setString(n, (String) obj);\n } else if (obj instanceof Integer) {\n preparedStatement.setInt(n, (Integer) obj);\n } else if (obj instanceof Date) {\n preparedStatement.setDate(n, (Date) obj);\n } else if (obj instanceof Double) {\n preparedStatement.setDouble(n, (Double) obj);\n }\n }\n preparedStatement.addBatch();\n }\n return preparedStatement.executeBatch();\n } catch (SQLException e) {\n logger.error(\"got SQLException:\" + e.getMessage());\n } finally {\n conn.close();\n }\n return null;\n }\n}\n" }, { "alpha_fraction": 0.5751110911369324, "alphanum_fraction": 0.5786666870117188, "avg_line_length": 25.785715103149414, "blob_id": "67abad32aab72011758645607ae061957628610f", "content_id": "f24358476e4816f8c5c1565c0d7f729c5b59feef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1125, "license_type": "no_license", "max_line_length": 67, "num_lines": 42, "path": "/src/main/java/com/guazi/cataract/input/FilterMapper.java", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "package com.guazi.cataract.input;\n\n\nimport java.util.HashSet;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Set;\n\nimport org.apache.spark.api.java.function.Function;\n\nimport net.minidev.json.JSONObject;\n\n\npublic class FilterMapper implements Function<JSONObject, String> {\n private static final long serialVersionUID = 1L;\n private Set<String> columns;\n \n public FilterMapper(List<String> columns) {\n this.columns = new HashSet<String>(columns);\n }\n \n @Override\n public String call(JSONObject t) throws Exception {\n JSONObject newObj = new JSONObject();\n for (Map.Entry<String, Object> entry : t.entrySet()) {\n String key = entry.getKey();\n \n if (!this.columns.contains(key)) {\n continue;\n }\n \n String[] tks = key.split(\":\");\n if (tks.length == 1) {\n newObj.put(key, entry.getValue());\n } else if (tks.length == 2) {\n newObj.put(tks[1], entry.getValue());\n }\n }\n return newObj.toJSONString();\n }\n \n}\n" }, { "alpha_fraction": 0.6774628758430481, "alphanum_fraction": 0.7354925870895386, "avg_line_length": 91.5, "blob_id": "1046ed704dac996d4dea4da054e98ec4568f8b2c", "content_id": "63aa61cd7cf3dd84a9180f504b7e026a4271d809", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 741, "license_type": "no_license", "max_line_length": 459, "num_lines": 8, "path": "/src/test/resources/sqlparser_test.sh", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "spark-submit \\\n --driver-memory 6g \\\n --executor-memory 10g \\\n --files sqlparser_test.json \\\n --driver-class-path $HADOOP_HOME/spark_lib/datanucleus-api-jdo-3.2.6.jar:$HADOOP_HOME/spark_lib/datanucleus-core-3.2.10.jar:$HADOOP_HOME/spark_lib/datanucleus-rdbms-3.2.9.jar:$HADOOP_HOME/spark_lib/guava-14.0.jar:$HADOOP_HOME/spark_lib/hbase-client-1.1.5.jar:$HADOOP_HOME/spark_lib/hbase-common-1.1.5.jar:$HADOOP_HOME/spark_lib/hbase-server-1.1.5.jar:$HADOOP_HOME/spark_lib/mysql-connector-java-5.1.34.jar:$HADOOP_HOME/spark_lib/hbase-protocol-1.1.5.jar \\\n --class com.guazi.cataract.Cataract cataract-0.0.1-SNAPSHOT-jar-with-dependencies.jar \\\n sqlparser_test.json \\\n cars:/user/hive/external/cars/year=2016/month=08/day=28\n\n" }, { "alpha_fraction": 0.5124784111976624, "alphanum_fraction": 0.517589271068573, "avg_line_length": 34.11888122558594, "blob_id": "c899a68c339e25612aacbcc5fe3ee80d110781c3", "content_id": "59e96888f5edd5224c651d975c67188d0a9164ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 15066, "license_type": "no_license", "max_line_length": 150, "num_lines": 429, "path": "/src/main/java/com/guazi/cataract/udf/JsonProcessor.java", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "package com.guazi.cataract.udf;\n\nimport java.io.Serializable;\nimport java.text.NumberFormat;\nimport java.text.ParseException;\nimport java.text.SimpleDateFormat;\nimport java.util.ArrayList;\nimport java.util.Calendar;\nimport java.util.Date;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Map.Entry;\nimport java.util.TimeZone;\n\nimport org.apache.hadoop.util.StringUtils;\n\nimport net.minidev.json.JSONArray;\nimport net.minidev.json.JSONObject;\n\npublic class JsonProcessor implements Serializable {\n private static final long serialVersionUID = -7983086796740792122L;\n\n// private Map<String, Function> functions;\n\n// private Map<String, SimpleDateFormat> dateFormats;\n\n private JSONObject config;\n\n private JSONObject broadcast;\n\n private final static TimeZone tz = TimeZone.getTimeZone(\"GMT+8\");\n\n private Map<String, List<List<String>>> columnUDF;\n\n\n public JsonProcessor(JSONObject config, JSONObject broadcast) {\n this.config = config;\n this.broadcast = broadcast;\n parseAll();\n }\n\n public JSONObject process(JSONObject data) {\n JSONObject out = (JSONObject) data.clone();\n\n for (Entry<String, Object> entry : config.entrySet()) {\n String col = entry.getKey();\n out.put(col, processCol(data, col));\n }\n return out;\n }\n\n public Object processCol(JSONObject data, String col) {\n List<List<String>> udfParams = columnUDF.get(col);\n List<Object> result = new ArrayList<Object>();\n for (List<String> udfParam : udfParams) {\n String funName = udfParam.get(0);\n\n List<Object> params = new ArrayList<Object>();\n for (int idx = 1; idx < udfParam.size(); ++idx) {\n String param = udfParam.get(idx);\n if (param.startsWith(\"$\")) {\n params.add(data.get(param.substring(1)));\n } else {\n params.add(param);\n }\n }\n\n if (funName.equals(\"\")) {\n result.add(params.get(0));\n } else if (funName.equalsIgnoreCase(\"@STR\")) {\n result.add(toString(params.get(0)));\n } else if (funName.equalsIgnoreCase(\"@INT\")) {\n result.add(toInt(params.get(0)));\n } else if (funName.equalsIgnoreCase(\"@RJUST\")) {\n result.add(rjust(params.get(0), params.get(1)));\n } else if (funName.equalsIgnoreCase(\"@STRFTIME\")) {\n result.add(strftime((String) params.get(0), (String) params.get(1)));\n } else if (funName.equalsIgnoreCase(\"@STRPTIME\")) {\n result.add(strptime((String) params.get(0), (String) params.get(1)));\n } else if (funName.equalsIgnoreCase(\"@DIMENSION\")) {\n result.add(dimension(params));\n } else if (funName.equalsIgnoreCase(\"@DATEFORMAT\")) {\n result.add(dateFormat((String) params.get(0), (String) params.get(1), (String) params.get(2)));\n } else if (funName.equalsIgnoreCase(\"@IF\")) {\n result.add(ifFunction(params.get(0), (String) params.get(1), (String) params.get(2), (String) params.get(3), (String) params.get(4)));\n } else if (funName.equalsIgnoreCase(\"@CONTAINS\")) {\n result.add(isContains((String) params.get(0), (String) params.get(1)));\n } else if (funName.equalsIgnoreCase(\"@RANGE\")) {\n result.add(range(params));\n }\n }\n\n if (result.isEmpty()) return null;\n if (result.get(0) instanceof String) {\n String buf = \"\";\n for (Object obj : result) {\n buf += (String) obj;\n }\n return buf;\n }\n\n if (result.get(0) instanceof Integer) {\n int buf = 0;\n for (Object obj : result) {\n buf += (int) obj;\n }\n return buf;\n }\n\n if (result.get(0) instanceof Float) {\n float buf = 0;\n for (Object obj : result) {\n buf += (float) obj;\n }\n return buf;\n }\n\n if (result.get(0) instanceof Long) {\n long buf = 0;\n for (Object obj : result) {\n buf += (long) obj;\n }\n return buf;\n }\n\n if (result.get(0) instanceof Double) {\n double buf = 0;\n for (Object obj : result) {\n buf += (double) obj;\n }\n return buf;\n }\n\n return null;\n }\n\n public String toString(Object obj) {\n String res = \"\";\n if (obj instanceof String) {\n res = (String) obj;\n } else if (obj instanceof Integer) {\n res = Integer.toString((int) obj);\n } else if (obj instanceof Float) {\n res = Float.toString((float) obj);\n } else if (obj instanceof Long) {\n res = Long.toString((long) obj);\n } else if (obj instanceof Double) {\n res = Double.toString((double) obj);\n } else if (obj instanceof Short) {\n res = Short.toString((short) obj);\n }\n return res;\n }\n\n public int toInt(Object obj) {\n int res = 0;\n if (obj instanceof String) {\n res = Integer.valueOf((String) obj);\n } else if (obj instanceof Integer) {\n res = (int) obj;\n }\n return res;\n }\n\n public void parseAll() {\n columnUDF = new HashMap<String, List<List<String>>>();\n for (Entry<String, Object> entry : config.entrySet()) {\n String col = entry.getKey();\n String cal = (String) entry.getValue();\n columnUDF.put(col, parseUDF(cal));\n }\n }\n\n private List<String> parseFun(String single) {\n List<String> oneFunction = new ArrayList<>();\n if (single.startsWith(\"@\")) {\n int leftBracket = single.indexOf(\"(\");\n int rightBracket = single.lastIndexOf(\")\");\n\n oneFunction.add(single.substring(0, leftBracket).trim());\n String params = single.substring(leftBracket + 1, rightBracket);\n\n for (String param : StringUtils.split(params, ',')) {\n oneFunction.add(param.trim());\n }\n } else {\n oneFunction.add(\"\");\n oneFunction.add(single);\n }\n return oneFunction;\n }\n\n public List<List<String>> parseUDF(String udf) {\n List<List<String>> udfList = new ArrayList<>();\n if (udf != null && udf.contains(\"+\")) {\n String[] udfSplit = StringUtils.split(udf, '+');\n for (String s : udfSplit) {\n udfList.add(parseFun(s.trim()));\n }\n } else {\n udfList.add(parseFun(udf));\n }\n return udfList;\n }\n\n public String strftime(String ts, String format) {\n SimpleDateFormat dateFormat = null;\n dateFormat = new SimpleDateFormat(format);\n dateFormat.setTimeZone(tz);\n\n long t = Long.parseLong(ts);\n if (ts.length() == 10) t *= 1000;\n\n return dateFormat.format(new Date(t));\n }\n\n\n public String strptime(String date, String dataFormat) {\n SimpleDateFormat df = null;\n df = new SimpleDateFormat(dataFormat);\n df.setTimeZone(tz);\n\n Date dt = null;\n try {\n dt = df.parse(date);\n } catch (ParseException e) {\n e.printStackTrace();\n }\n\n Calendar c = Calendar.getInstance();\n c.setTime(dt);\n long time = c.getTimeInMillis();\n return Long.toString(time);\n }\n\n public String dateFormat(String date, String from, String to) {\n SimpleDateFormat fromDF = null;\n fromDF = new SimpleDateFormat(from);\n fromDF.setTimeZone(tz);\n\n SimpleDateFormat toDF = null;\n toDF = new SimpleDateFormat(to);\n toDF.setTimeZone(tz);\n\n Date dt = null;\n try {\n dt = fromDF.parse(date);\n } catch (ParseException e) {\n e.printStackTrace();\n }\n\n if (dt == null) {\n System.out.println(\"DataFormat error: \");\n System.out.println(\"Date: \" + date);\n System.out.println(\"From: \" + from);\n System.out.println(\"To: \" + to);\n return \"\";\n }\n\n return toDF.format(dt);\n }\n\n public Object dimension(List<Object> params) {\n String name = (String) params.get(0);\n JSONArray mappings = (JSONArray) broadcast.get(name);\n Object o = null;\n for (int i = 1; i < params.size(); ++i) {\n Object param = params.get(i);\n JSONObject mapping = (JSONObject) mappings.get(i - 1);\n o = mapping.get(param);\n if (o != null) break;\n }\n return o;\n }\n\n public String rjust(Object data, Object len) {\n String format = \"%1$,0\" + (String) len + \"d\";\n if (data instanceof Integer) {\n return String.format(format, (int) data);\n } else if (data instanceof String) {\n return String.format(format, Integer.parseInt((String) data));\n } else {\n System.out.println(\"RJUST failed, data: \" + data + \", len: \" + len);\n }\n return data.toString();\n }\n\n private Object ifFunction(Object originData, String method, String compareData, Object trueResult, Object falseResult) {\n boolean flag = false;\n if (\"eq\".equals(method)) {\n // ==\n if (originData instanceof String) {\n flag = originData.equals(compareData);\n } else if (originData instanceof Number) {\n NumberFormat nf = NumberFormat.getInstance();\n Number compareNumber = null;\n try {\n compareNumber = nf.parse(compareData);\n flag = ((Number) originData).doubleValue() == compareNumber.doubleValue();\n } catch (ParseException e) {\n e.printStackTrace();\n }\n }\n } else if (\"ne\".equals(method)) {\n // !=\n if (originData instanceof String) {\n flag = !originData.equals(compareData);\n } else if (originData instanceof Number) {\n NumberFormat nf = NumberFormat.getInstance();\n Number compareNumber = null;\n try {\n compareNumber = nf.parse(compareData);\n flag = ((Number) originData).doubleValue() != compareNumber.doubleValue();\n } catch (ParseException e) {\n e.printStackTrace();\n }\n }\n } else if (\"lt\".equals(method)) {\n // <\n if (originData instanceof String) {\n flag = ((String) originData).compareTo(compareData) < 0;\n } else if (originData instanceof Number) {\n NumberFormat nf = NumberFormat.getInstance();\n Number compareNumber = null;\n try {\n compareNumber = nf.parse(compareData);\n flag = ((Number) originData).doubleValue() < compareNumber.doubleValue();\n } catch (ParseException e) {\n e.printStackTrace();\n }\n }\n } else if (\"le\".equals(method)) {\n // <=\n if (originData instanceof String) {\n flag = ((String) originData).compareTo(compareData) <= 0;\n } else if (originData instanceof Number) {\n NumberFormat nf = NumberFormat.getInstance();\n Number compareNumber = null;\n try {\n compareNumber = nf.parse(compareData);\n flag = ((Number) originData).doubleValue() <= compareNumber.doubleValue();\n } catch (ParseException e) {\n e.printStackTrace();\n }\n }\n } else if (\"gt\".equals(method)) {\n // >\n if (originData instanceof String) {\n flag = ((String) originData).compareTo(compareData) > 0;\n } else if (originData instanceof Number) {\n NumberFormat nf = NumberFormat.getInstance();\n Number compareNumber = null;\n try {\n compareNumber = nf.parse(compareData);\n flag = ((Number) originData).doubleValue() > compareNumber.doubleValue();\n } catch (ParseException e) {\n e.printStackTrace();\n }\n }\n } else if (\"ge\".equals(method)) {\n // >=\n if (originData instanceof String) {\n flag = ((String) originData).compareTo(compareData) >= 0;\n } else if (originData instanceof Number) {\n NumberFormat nf = NumberFormat.getInstance();\n Number compareNumber = null;\n try {\n compareNumber = nf.parse(compareData);\n flag = ((Number) originData).doubleValue() >= compareNumber.doubleValue();\n } catch (ParseException e) {\n e.printStackTrace();\n }\n }\n } else if (\"contains\".equals(method)) {\n flag = originData.toString().contains(compareData);\n }\n\n if (\"NULL\".equals(trueResult)) {\n trueResult = null;\n }\n if (\"NULL\".equals(falseResult)) {\n falseResult = null;\n }\n return flag ? trueResult : falseResult;\n }\n\n private int isContains(String source, String str) {\n if (source != null && str != null) {\n return source.contains(str) ? 1 : 0;\n } else {\n return 0;\n }\n }\n\n public Object range(List<Object> params) {\n String name = (String) params.get(0);\n JSONArray mappings = (JSONArray) broadcast.get(name);\n Object o = null;\n NumberFormat nf = NumberFormat.getInstance();\n for (int i = 1; i < params.size(); ++i) {\n Object param = params.get(i);\n JSONObject mapping = (JSONObject) mappings.get(i - 1);\n for (Object obj : mapping.keySet()) {\n String key = obj.toString();\n String[] minMax = key.split(\"_\");\n Number min = null;\n Number max = null;\n try {\n min = nf.parse(minMax[0]);\n max = nf.parse(minMax[1]);\n Number input = nf.parse(param.toString());\n if (min.doubleValue() <= input.doubleValue()\n && max.doubleValue() >= input.doubleValue()) {\n o = mapping.get(key);\n }\n } catch (ParseException e) {\n e.printStackTrace();\n }\n }\n if (o != null) break;\n }\n return o;\n }\n\n public Map<String, List<List<String>>> getColumnUDF() {\n return columnUDF;\n }\n}\n" }, { "alpha_fraction": 0.5483973622322083, "alphanum_fraction": 0.55474454164505, "avg_line_length": 29.89215660095215, "blob_id": "c96ba49f21aca397f2c76bacd767d2fd75d40ac2", "content_id": "78546f65a2d0ebc43ddf786cc18f70fd48a3deb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3151, "license_type": "no_license", "max_line_length": 105, "num_lines": 102, "path": "/script/cataract_runner.py", "repo_name": "ShabbySam/cataract", "src_encoding": "UTF-8", "text": "#/usr/bin/python\n# -*- coding: utf-8 -*-\n# Author: Peng Chao\n# Copyright:\n# Date:\n# Distributed under terms of the license.\n\nimport sys\nimport os\nimport getopt\nfrom ConfigParser import SafeConfigParser\n\n\nclass CataractCommand(object):\n def __init__(self, configfile):\n self.config = SafeConfigParser()\n self.config.read(configfile)\n self.spark_params = ['spark-submit']\n self.cataract_params = []\n\n def read_config(self):\n self.set_spark()\n self.set_dependencies('jars')\n self.set_dependencies('files')\n self.set_cataract()\n\n def parse_cmd(self, argv):\n option_params = ['partition=', 'union=', 'save-partition=', 'jar=',\n 'deploy-mode=', 'executor-cores=', 'executor-memory=',\n 'num-executors=', 'driver-memory=']\n\n cataract_part = {}\n spark_part = {}\n files = {}\n\n try:\n opts, argv = getopt.getopt(argv, \"t:c:j:\", option_params)\n except getopt.GetoptError as e:\n print(\"Usage: %s -i 2016082015 -o /user/hive/external/test/ -p partition_nums\" % sys.argv[0])\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-t':\n input_time = arg\n elif opt == '-o':\n hive_path = arg\n elif opt == '-p':\n partitions = int(arg)\n\n def set_spark(self):\n env = dict(self.config.items('spark'))\n self.set_spark_param(env, 'master', 'local[4]')\n self.set_spark_param(env, 'deploy-mode', None)\n self.set_spark_param(env, 'name', None)\n\n self.set_spark_param(env, 'executor-cores', None)\n self.set_spark_param(env, 'executor-memory', '2g')\n self.set_spark_param(env, 'num-executors', '5')\n self.set_spark_param(env, 'driver-memory', '4g')\n\n self.cataract_params.append(env.get('jar', None))\n self.cataract_params.append(env.get('class', 'com.guazi.cataract.Cataract'))\n\n def set_dependencies(self, part):\n values = []\n for f in self.read_values(part):\n values.append(f)\n\n if len(values) != 0:\n self.spark_params.append(\"--\" + part)\n self.spark_params.append(','.join(values))\n\n def set_cataract(self):\n env = dict(self.config.items('cataract'))\n for k, v in env.items():\n if v == '':\n continue\n self.cataract_params.append(k + \":\" + v)\n\n def read_values(self, section):\n return [x[1] for x in self.config.items(section)]\n\n def set_spark_param(self, env, key, default):\n # default is None: don't add this key\n dv = env.get(key, default)\n if dv is not None or default is not None:\n self.spark_params.append('--' + key)\n self.spark_params.append(dv)\n\n def get_command(self):\n cmds = []\n cmds.extend(self.spark_params)\n cmds.extend(self.cataract_params)\n return ' '.join(cmds)\n\n\nif __name__ == '__main__':\n cmd = CataractCommand(sys.argv[1])\n cmd.read_config()\n cmd_str = cmd.get_command()\n print cmd_str\n os.system(cmd_str)\n" } ]
11
janithwanni/alpha-zero-data-storm-2
https://github.com/janithwanni/alpha-zero-data-storm-2
511ddbd6cb2942a795fef35c246791060f9f60b5
f40a271ca7e929881b04593bb0d0e9f962562e36
689749763bfc37d4ed0dff596fd23b5d4e76fe01
refs/heads/master
2023-03-22T21:39:24.580680
2021-03-14T06:33:59
2021-03-14T06:33:59
346,534,079
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.603554368019104, "alphanum_fraction": 0.6192754507064819, "avg_line_length": 27.076923370361328, "blob_id": "e29d0378e2cef0fc75e337e593af834d8015587e", "content_id": "d5c7db095ae1431cf563881205c90f9472ab3c9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 1463, "license_type": "no_license", "max_line_length": 97, "num_lines": 52, "path": "/notebooks/day_2/2-Oversample_EDA.Rmd", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "---\ntitle: \"2-Oversample_EDA\"\nauthor: \"Janith Wanniarachchi\"\ndate: \"3/12/2021\"\noutput: html_document\n---\n\n```{r setup, include=FALSE}\nknitr::opts_chunk$set(echo = TRUE)\nlibrary(tidyverse)\n```\n\n```{r}\ntrain_os <- read_csv(\"../../data/processed/oversampled/train_oversample.csv\") %>% \n mutate_if(is.character,factor) %>% \n mutate(Reservation_Status = factor(Reservation_Status,\n levels=c(\"Check-In\",\"Canceled\",\"No-Show\")))\n```\n\n```{r}\ntable(train_os$Reservation_Status)\n```\n\n```{r}\nskimr::skim(train_os)\n```\n\n```{r}\ncat_cols <- train_os %>% select_if(is.factor) %>% colnames()\nmap_dfr(cat_cols, function(x){\n pval <- table(train_os[[x]],train_os$Reservation_Status) %>% summary() %>% .$p.value \n return(list(column = x,rejected = pval < 0.05))\n}) %>% filter(rejected == TRUE)\n```\n\n```{r}\n# GGally::ggpairs(train_os %>% select_if(is.factor),aes(color=Reservation_Status),progress=FALSE)\n# ggsave(\"output/pair_oversampled.png\",width = 841,height = 594,units = \"mm\")\n```\n\n```{r}\n# cols <- c(train_os %>% select_if(~ !is.factor(.x)) %>% colnames(),\"Reservation_Status\")\n# GGally::ggpairs(train_os %>% select(all_of(cols)),\n# aes(color=Reservation_Status),progress=FALSE)\n# ggsave(\"output/pair_oversampled.png\",width = 841,height = 594,units = \"mm\")\n```\n\n```{r}\nlibrary(ggfortify)\npca_res <- prcomp(train_os %>% select_if(is.numeric), scale. = TRUE)\nautoplot(pca_res, data = train_os, colour = 'Reservation_Status')\n```\n\n\n\n" }, { "alpha_fraction": 0.6353626847267151, "alphanum_fraction": 0.640544056892395, "avg_line_length": 52.27586364746094, "blob_id": "8b51fabc1d7d70a202b270ad9af1ca43e7a4ea17", "content_id": "2395b7c107673f11928f28c89a02f6d78615e534", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1544, "license_type": "no_license", "max_line_length": 122, "num_lines": 29, "path": "/notebooks/day_1/auto_ml_attempt.py", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "from autogluon.tabular import TabularDataset, TabularPredictor\nimport pandas as pd\nfrom datetime import datetime\n\n# train_df = pd.read_csv(\"../../data/processed/train_preproc.csv\")\ntrain_data = TabularDataset(\"../../data/processed/oversampled/train_valid_feat_eng_oversample.csv\")\n# train_data = train_data.drop([\"Age\",\"Room_Rate\",\"Discount_Rate\"],axis=\"columns\")\nsave_path = \"models_oversample_valid\"\npredictor = TabularPredictor(label=\"Reservation_Status\",path=save_path,eval_metric=\"f1_macro\").fit(train_data,\n time_limit=7200,\n presets=\"best_quality\")\n\nvalid_data = TabularDataset(\"../../data/processed/valid_preproc.csv\")\ny_test = valid_data.loc[:,\"Reservation_Status\"]\nvalid_data = valid_data.drop([\"Reservation_Status\"],axis=\"columns\")\n\ny_pred = predictor.predict(valid_data)\nperf = predictor.evaluate_predictions(y_true=y_test, y_pred=y_pred, auxiliary_metrics=True)\nprint(perf)\n\ntest_data = TabularDataset(\"../../data/processed/test_preproc.csv\")\ntest_preds = predictor.predict(test_data)\n\ntest_df = pd.read_csv(\"../../data/processed/test_preproc.csv\")\ntest_df[\"Reservation_Status\"] = test_preds\ntest_df = test_df.replace({\"Reservation_Status\":{\"Check-In\":1,\"Canceled\":2,\"No-Show\":3}})\ntest_df = test_df.loc[:,[\"Reservation-id\",\"Reservation_Status\"]]\n\ntest_df.to_csv(\"../../data/submissions/automl_\"+str(datetime.now())+\".csv\",index=False)" }, { "alpha_fraction": 0.6886290907859802, "alphanum_fraction": 0.6907545328140259, "avg_line_length": 43.85714340209961, "blob_id": "18c01803e439181290e7f3a2e1ab0c7ee19f058b", "content_id": "772e16264f7f8a240a9017cee3c4bcaa281a612a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 941, "license_type": "no_license", "max_line_length": 116, "num_lines": 21, "path": "/notebooks/day_2/oversample_selected_features.py", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "from imblearn.over_sampling import SMOTENC\nimport pandas as pd\n\ndef oversample(dataframe: pd.DataFrame, cat_feats):\n X = dataframe.drop(\"Reservation_Status\",axis=\"columns\")\n y = dataframe.loc[:,\"Reservation_Status\"]\n smote_enc = SMOTENC(categorical_features=cat_feats,random_state=42)\n X_res, y_res = smote_enc.fit_resample(X,y)\n out_df = X_res.copy(deep=True)\n out_df[\"Reservation_Status\"] = y_res\n return out_df\n\ntrain_df = pd.read_csv(\"../../data/processed/train_preproc.csv\")\ntrain_df = train_df.loc[:,[\"Reservation_Status\",\"N_Minors\",\"Total_PAX\",\n \"Income_number\",\"Cost\",\"Cost_Income\",\"Lag\",\"Meal_Type\"]]\n\ncategorical_columns = [\"Meal_Type\"]\n\ncat_feats = [train_df.drop(\"Reservation_Status\",axis=\"columns\").columns.get_loc(col) for col in categorical_columns]\nos_df = oversample(train_df,cat_feats)\nos_df.to_csv(\"../../data/processed/oversampled/train_feat_eng_oversample.csv\",index=False)" }, { "alpha_fraction": 0.716946005821228, "alphanum_fraction": 0.7243947982788086, "avg_line_length": 40.346153259277344, "blob_id": "95be0b4dcd10dacbd1800202052c6bf1e88fb525", "content_id": "cebe3c7c3b708c6507932d00035c6e365b2dd437", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1074, "license_type": "no_license", "max_line_length": 88, "num_lines": 26, "path": "/notebooks/day_2/xgboost_attempt.py", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "import xgboost as xgb\nimport pandas as pd\nfrom sklearn.metrics import f1_score, confusion_matrix\n\ntrain_df = pd.read_csv(\"../../data/processed/oversampled/train_feat_eng_oversample.csv\")\nvalid_df = pd.read_csv(\"../../data/processed/valid_preproc.csv\")\ntest_df = pd.read_csv(\"../../data/processed/test_preproc.csv\")\n\ntrain_X = pd.get_dummies(train_df.drop(\"Reservation_Status\",axis=\"columns\"))\ntrain_Y = pd.get_dummies(train_df.loc[:,\"Reservation_Status\"])\n\nvalid_X = pd.get_dummies(valid_df.drop(\"Reservation_Status\",axis=\"columns\"))\nvalid_Y = pd.get_dummies(valid_df.loc[:,\"Reservation_Status\"])\n\nxg_train = xgb.DMatrix(train_X,train_Y,enable_categorical=True)\nxg_valid = xgb.DMatrix(valid_X,valid_Y,enable_categorical=True)\n\nparam = {\"eta\":0.1,\"max_depth\":6,\"nthread\":4,\"num_class\":3,\"objective\":\"multi:softmax\"}\n\nwatchlist = [(xg_train,'train'),(xg_valid,'test')]\nnum_round = 5\nbst = xgb.train(param,xg_train,num_round,watchlist)\npred = bst.predict(xg_valid)\n\nprint(f1_score(y_true=valid_Y,y_pred=pred,average=\"macro\"))\nprint(confusion_matrix(y_true=valid_Y,y_pred=pred))" }, { "alpha_fraction": 0.6595744490623474, "alphanum_fraction": 0.695035457611084, "avg_line_length": 25.5, "blob_id": "68ef79ffe11ecf585f1f3fec1ef4ee6017513416", "content_id": "26eebe630d6b22e8beb88c1c195cf8474b75a004", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 1269, "license_type": "no_license", "max_line_length": 158, "num_lines": 46, "path": "/notebooks/day_3/model 4.Rmd", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "---\r\ntitle: \"svm_combined\"\r\nauthor: \"Ann\"\r\ndate: \"3/13/2021\"\r\noutput: html_document\r\n---\r\n\r\n```{r}\r\nknitr::opts_chunk$set(warning = FALSE,message = FALSE)\r\n```\r\n\r\n```{r}\r\nlibrary(tidyverse)\r\nlibrary(e1071)\r\nlibrary(MLmetrics)\r\n```\r\n\r\n```{r}\r\ndf_trainValidate <- read.csv(\"C://Users//Asus//OneDrive//Desktop//DataStorm2021//train_valid_feat_eng_oversample.csv\", stringsAsFactors = TRUE, header = TRUE)\r\ndf_train <-read.csv(\"C://Users//Asus//OneDrive//Desktop//DataStorm2021//train_feat_eng_oversample.csv\", stringsAsFactors = TRUE, header = TRUE)\r\ndf_validate <-read.csv(\"C://Users//Asus//OneDrive//Desktop//DataStorm2021//valid_preproc.csv\", stringsAsFactors = TRUE, header = TRUE)\r\n\r\n\r\n```\r\n\r\n```{r}\r\nmodel1 <- svm(Reservation_Status ~.,df_train, cost = 5, type = \"nu-classification\")\r\n\r\nmodel2 <- svm(Reservation_Status ~.,df_trainValidate, cost = 5, type = \"nu-classification\")\r\n\r\nvalidate1 <- df_validate\r\nvalidate2 <- df_validate\r\n\r\nvalidate1$Pred <- predict(model1, validate1)\r\nvalidate2$Pred <- predict(model2, validate2)\r\n\r\ntable(validate1$Pred, validate1$Reservation_Status)\r\ntable(validate2$Pred, validate2$Reservation_Status)\r\n\r\n\r\nF1_Score(validate1$Reservation_Status, validate1$Pred)\r\nF1_Score(validate2$Reservation_Status, validate2$Pred)\r\n\r\n\r\n\r\n```\r\n\r\n\r\n" }, { "alpha_fraction": 0.6205059885978699, "alphanum_fraction": 0.6404793858528137, "avg_line_length": 21.75757598876953, "blob_id": "b63144e32203f4cb2a7b47ba4700892ba5585efe", "content_id": "800c9fa64f866d57b46f60b3716aa947ae870b6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 2253, "license_type": "no_license", "max_line_length": 95, "num_lines": 99, "path": "/notebooks/day_1/2-EDA.Rmd", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "---\ntitle: \"2-EDA\"\nauthor: \"Janith Wanniarachchi\"\ndate: \"3/11/2021\"\noutput: html_document\n---\n\n```{r setup, include=FALSE}\nknitr::opts_chunk$set(echo = TRUE)\nlibrary(tidyverse)\n```\n\n```{r}\ntrain <- read_csv(\"../../data/processed/train_preproc.csv\")\ntest <- read_csv(\"../../data/processed/test_preproc.csv\")\nvalid <- read_csv(\"../../data/processed/valid_preproc.csv\")\n```\n\n```{r}\nglimpse(train)\n```\n\n```{r}\ntable(train$Income)\n\ntrain_temp <- train %>% \n mutate(Income_number = case_when(\n Income == \"<25K\" ~ 25000,\n Income == \">100K\" ~ 100000,\n Income == \"25K --50K\" ~ 37500,\n Income == \"50K -- 100K\" ~ 75000\n ),\n Cost = Duration * Room_Rate,\n Cost_Income = Cost / Income_number,\n Visit_Cancel = case_when(\n Visted_Previously == \"Yes\" & Previous_Cancellations == \"Yes\" ~ \"Visit_n_Cancel\",\n Visted_Previously == \"Yes\" & Previous_Cancellations == \"No\" ~ \"Visit_n_No_Cancel\",\n Visted_Previously == \"No\" & Previous_Cancellations == \"Yes\" ~ \"No_Visit_n_Cancel\",\n Visted_Previously == \"No\" & Previous_Cancellations == \"No\" ~ \"No_Visit_n_No_Cancel\",\n ),\n Total_PAX = Adults + Babies + Children,\n N_Minors = Babies + Children)\n```\n\n```{r}\ntable(train_temp$N_Minors)\n```\n\n\n# Lag ~ Response ANOVA\n```{r}\nggplot(train_temp,aes(x = Reservation_Status,y = Lag))+geom_boxplot()+theme_minimal()\n```\n\n```{r}\naov(Lag ~ Reservation_Status,data=train_temp) %>% summary.lm()\n```\n\nThere is no effect by Lag\n\n# Cost ~ Response ANOVA\n```{r}\nggplot(train_temp,aes(x = Reservation_Status,y = Cost))+geom_boxplot(alpha=0.1)+theme_minimal()\n```\n\n```{r}\naov(Cost ~ Reservation_Status,data=train_temp) %>% summary.lm()\n```\n\nCost has no effect\n\n# Cost_Income ~ Response ANOVA\n```{r}\nggplot(train_temp,aes(x = Reservation_Status,y = Cost_Income))+geom_boxplot()+theme_minimal()\n```\n```{r}\naov(Cost_Income ~ Reservation_Status,data=train_temp) %>% summary.lm()\n```\n\n# Visit Cancel ~ Response Chi Squared\n```{r}\ntable(train_temp$Visit_Cancel,train_temp$Reservation_Status) %>% summary()\n```\n\nVisit Cancel has an effect\n\n# Adults Babies Children With Chi Square\n```{r}\ntable(train_temp$Total_PAX,train_temp$Reservation_Status) %>% summary()\n```\n\n```{r}\ntable(train_temp$N_Minors,train_temp$Reservation_Status) %>% summary()\n```\n\n\n```{r}\nunique(train_temp$Room_Rate)\n```\n" }, { "alpha_fraction": 0.5849858522415161, "alphanum_fraction": 0.6076487302780151, "avg_line_length": 21.74193572998047, "blob_id": "a50cf71d28ffc21e5e02b2e080149b46a6316473", "content_id": "bdc04a1974e14f9202e42622780d5797ccb4cb5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 706, "license_type": "no_license", "max_line_length": 87, "num_lines": 31, "path": "/notebooks/day_2/1-Boruta.Rmd", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "---\ntitle: \"1-Boruta\"\nauthor: \"Janith Wanniarachchi\"\ndate: \"3/12/2021\"\noutput: html_document\n---\n\n```{r setup, include=FALSE}\nknitr::opts_chunk$set(echo = TRUE)\nlibrary(tidyverse)\nlibrary(Boruta)\n```\n\n```{r}\ntrain_os <- read_csv(\"../../data/processed/train_preproc.csv\") %>% \n mutate_if(is.character,factor) %>% \n mutate(Reservation_Status = factor(Reservation_Status,\n levels=c(\"Check-In\",\"Canceled\",\"No-Show\")))\n```\n\n```{r}\nboruta_output <- Boruta(Reservation_Status ~ .,data = train_os,doTrace=100,pValue=0.05)\n```\n\n```{r}\nboruta_output %>% plot(cex.axis=.7, las=2,xlab=\"\")\n```\n\n```{r}\nboruta_output$finalDecision[boruta_output$finalDecision == \"Confirmed\"] %>% names()\n```\n\n" }, { "alpha_fraction": 0.6012433171272278, "alphanum_fraction": 0.6134546995162964, "avg_line_length": 27.865385055541992, "blob_id": "14df724ea98a6dc3ec7826a197cfb6d7a61789bc", "content_id": "284722d111ce2f73218ef7fc6d4fdd2945267263", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 4504, "license_type": "no_license", "max_line_length": 90, "num_lines": 156, "path": "/notebooks/day_1/1-EDA.Rmd", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "---\ntitle: \"1-EDA\"\nauthor: \"Janith Wanniarachchi\"\ndate: \"3/11/2021\"\noutput: html_document\n---\n\n```{r}\nknitr::opts_chunk$set(warning = FALSE,message = FALSE)\n```\n\n```{r}\nlibrary(tidyverse)\nlibrary(rpart)\nlibrary(randomForest)\nlibrary(MLmetrics)\n```\n\n\n```{r}\npreprocess <- function(tib,test=FALSE){\n date_columns <- c(\"Expected_checkin\",\"Expected_checkout\",\"Booking_date\")\n tib <- tib %>% \n mutate_at(vars(contains(date_columns)),~ as.Date(.x,format=\"%m/%d/%Y\")) %>% \n mutate_if(is.character,factor) %>% \n mutate(Lag = as.numeric(Expected_checkin - Booking_date,units=\"days\"),\n Duration = as.numeric(Expected_checkout - Expected_checkin,units=\"days\")) %>% \n select(-all_of(date_columns)) %>% \n mutate(Income_number = case_when(\n Income == \"<25K\" ~ 25000,\n Income == \">100K\" ~ 100000,\n Income == \"25K --50K\" ~ 37500,\n Income == \"50K -- 100K\" ~ 75000\n ),\n Cost = Duration * Room_Rate,\n Cost_Income = Cost / Income_number,\n Visit_Cancel = factor(case_when(\n Visted_Previously == \"Yes\" & Previous_Cancellations == \"Yes\" ~ \"Visit_n_Cancel\",\n Visted_Previously == \"Yes\" & Previous_Cancellations == \"No\" ~ \"Visit_n_No_Cancel\",\n Visted_Previously == \"No\" & Previous_Cancellations == \"Yes\" ~ \"No_Visit_n_Cancel\",\n Visted_Previously == \"No\" & Previous_Cancellations == \"No\" ~ \"No_Visit_n_No_Cancel\",\n )),\n Total_PAX = Adults + Babies + Children,\n N_Minors = Babies + Children) \n # cat_cols <- tib %>% select_if(is.factor) %>% colnames()\n # cat_cols_keep <- c(\"Meal_Type\",\"Deposit_type\",\"Reservation_Status\")\n # cat_cols_rmv <- cat_cols[!(cat_cols %in% cat_cols_keep)]\n # final_vars <- c(\"Meal\")\n # tib <- tib %>% select(all_of())\n if(!test){\n tib <- tib %>% \n select(-`Reservation-id`) %>% \n mutate(Reservation_Status = factor(Reservation_Status,\n levels=c(\"Check-In\",\"Canceled\",\"No-Show\")))\n }\n tib \n}\n```\n\n```{r}\ntrain <- read_csv(\"../../data/Hotel-A-train.csv\") %>% preprocess()\ntest <- read_csv(\"../../data/Hotel-A-test.csv\") %>% preprocess(test=TRUE)\nvalid <- read_csv(\"../../data/Hotel-A-validation.csv\") %>% preprocess()\n# train %>% write_csv(\"../../data/processed/train_preproc.csv\")\n# test %>% write_csv(\"../../data/processed/test_preproc.csv\")\n# valid %>% write_csv(\"../../data/processed/valid_preproc.csv\")\n```\n\n```{r}\nglimpse(train)\n```\n\n\n```{r}\ntable(train$Reservation_Status) / nrow(train)\ntable(valid$Reservation_Status) / nrow(valid)\n```\n\n```{r}\nskimr::skim(train)\n```\n\n```{r}\ncat_cols <- train %>% select_if(is.factor) %>% colnames()\nmap_dfr(cat_cols, function(x){\n pval <- table(train[[x]],train$Reservation_Status) %>% summary() %>% .$p.value \n return(list(column = x,rejected = pval < 0.05))\n}) %>% filter(rejected == TRUE)\n```\n\n```{r}\n# cols <- c(train %>% select_if(~ is.factor(.x)) %>% colnames(),\"Reservation_Status\")\n# GGally::ggpairs(train %>% select(all_of(cols)),\n# aes(color=Reservation_Status,alpha=0.2),progress = FALSE)\n# ggsave(\"outputs/pair_plot_colored.jpg\",width = 841,height = 594,units = \"mm\")\n```\n\n```{r}\nlibrary(ggfortify)\npca_res <- prcomp(train %>% select_if(is.numeric), scale. = TRUE)\nautoplot(pca_res, data = train, colour = 'Reservation_Status')\n```\n\n\n```{r}\n# TODO insert evaluation pipeline here\n```\n\n```{r}\nmodel <- randomForest(Reservation_Status ~ .,data=train)\n```\n\n```{r}\nplot_importances <- function(model){\n imp <- importance(model)\n imp %>% \n as_tibble() %>% \n mutate(variable = factor(rownames(imp))) %>% \n arrange(-MeanDecreaseGini) %>% \n mutate(variable = fct_reorder(variable,MeanDecreaseGini)) %>% \n ggplot(aes(x = variable,y=MeanDecreaseGini)) + \n geom_col()+\n coord_flip()\n}\nplot_importances(model)\n```\n\n\n```{r}\nevaluate <- function(y_preds,y_true){\n print(ConfusionMatrix(y_preds,y_true))\n print(F1_Score(y_preds,y_true))\n print(Precision(y_preds,y_true))\n print(Recall(y_preds,y_true))\n print(caret::confusionMatrix(y_preds,reference=y_true,mode=\"everything\"))\n}\n```\n\n```{r}\ny_preds <- predict(model,newdata=valid %>% select(-Reservation_Status))\nevaluate(y_preds,valid$Reservation_Status)\n```\n\n\n```{r}\nmake_submission <- function(test_preds,name){\n submission <- tibble(`Reservation-id` = test$`Reservation-id`,\n Reservation_status = test_preds)\n submission %>% write_csv(paste(\"../../data/submissions/\",name,\"-\",Sys.time(),\".csv\"))\n}\n```\n\n```{r}\ntest_preds <- predict(model,newdata = test)\nmake_submission(as.numeric(test_preds),\"naive\")\n```\n\n" }, { "alpha_fraction": 0.6823869347572327, "alphanum_fraction": 0.6843118667602539, "avg_line_length": 39, "blob_id": "266956143a7f7d506d1245201f142a0848bcd17c", "content_id": "944fb988a4a6f09ea41b45d41a4a21fe101ab1dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1039, "license_type": "no_license", "max_line_length": 110, "num_lines": 26, "path": "/notebooks/day_3/generate_oversample_with_validation.py", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "from imblearn.over_sampling import SMOTENC\nimport pandas as pd\n\n\ndef oversample(dataframe: pd.DataFrame, cat_feats):\n X = dataframe.drop(\"Reservation_Status\",axis=\"columns\")\n y = dataframe.loc[:,\"Reservation_Status\"]\n smote_enc = SMOTENC(categorical_features=cat_feats,random_state=42)\n X_res, y_res = smote_enc.fit_resample(X,y)\n out_df = X_res.copy(deep=True)\n out_df[\"Reservation_Status\"] = y_res\n return out_df\n\n\ntrain_df = pd.read_csv(\"../../data/processed/train_preproc.csv\")\nvalid_df = pd.read_csv(\"../../data/processed/valid_preproc.csv\")\ndf = pd.concat([train_df,valid_df],axis=\"rows\")\n\ndf = df.loc[:,[\"Reservation_Status\",\"N_Minors\",\"Total_PAX\",\n \"Income_number\",\"Cost\",\"Cost_Income\",\"Lag\",\"Meal_Type\"]]\n\ncategorical_columns = [\"Meal_Type\"]\n\ncat_feats = [df.drop(\"Reservation_Status\",axis=\"columns\").columns.get_loc(col) for col in categorical_columns]\nos_df = oversample(df,cat_feats)\nos_df.to_csv(\"../../data/processed/oversampled/train_valid_feat_eng_oversample.csv\",index=False)" }, { "alpha_fraction": 0.642042338848114, "alphanum_fraction": 0.6474741697311401, "avg_line_length": 29.180328369140625, "blob_id": "205796fad26cd52f0097014b1cd7db314ca2fe27", "content_id": "601d72967a28b7f53c7fca1b8398272cf75f0bec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 1841, "license_type": "no_license", "max_line_length": 92, "num_lines": 61, "path": "/notebooks/day_2/1-RandomForest.Rmd", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "---\ntitle: \"1-RandomForest\"\nauthor: \"Janith Wanniarachchi\"\ndate: \"3/11/2021\"\noutput: html_document\n---\n\n```{r setup, include=FALSE}\nknitr::opts_chunk$set(echo = TRUE)\n```\n\n```{r}\nlibrary(tidyverse)\nlibrary(randomForest)\nlibrary(MLmetrics)\ntrain <- read_csv(\"../../data/processed/oversampled/train_oversample.csv\") %>% \n mutate_if(is.character,factor) %>% \n mutate(Reservation_Status = factor(Reservation_Status,\n levels=c(\"Check-In\",\"Canceled\",\"No-Show\"))) %>% \n select(Reservation_Status,N_Minors,Total_PAX,Income_number,Cost,Cost_Income,Lag,Meal_Type)\nvalid <- read_csv(\"../../data/processed/valid_preproc.csv\") %>% \n mutate_if(is.character,factor) %>% \n mutate(Reservation_Status = factor(Reservation_Status,\n levels=c(\"Check-In\",\"Canceled\",\"No-Show\"))) %>% \n select(Reservation_Status,N_Minors,Total_PAX,Income_number,Cost,Cost_Income,Lag,Meal_Type)\n```\n\n```{r}\nmodel <- randomForest(Reservation_Status ~ .,data=train)\n```\n```{r}\nplot_importances <- function(model){\n imp <- importance(model)\n imp %>% \n as_tibble() %>% \n mutate(variable = factor(rownames(imp))) %>% \n arrange(-MeanDecreaseGini) %>% \n mutate(variable = fct_reorder(variable,MeanDecreaseGini)) %>% \n ggplot(aes(x = variable,y=MeanDecreaseGini)) + \n geom_col()+\n coord_flip()\n}\nplot_importances(model)\n```\n\n```{r}\nevaluate <- function(y_preds,y_true){\n cm <- caret::confusionMatrix(y_preds,reference=y_true,mode=\"everything\")\n print(ConfusionMatrix(y_preds,y_true))\n print(cm)\n # print(F1_Score_macro_weighted(y_preds,y_true))\n # print(Precision(y_preds,y_true))\n # print(Recall(y_preds,y_true))\n print(mean(cm$byClass[,\"F1\"],na.rm=TRUE))\n}\n```\n\n```{r}\ny_preds <- predict(model,newdata=valid %>% select(-Reservation_Status))\nevaluate(y_preds,valid$Reservation_Status)\n```\n" }, { "alpha_fraction": 0.6940298676490784, "alphanum_fraction": 0.6958954930305481, "avg_line_length": 47.727272033691406, "blob_id": "7d5a753b5b7812dfa69297fc1fdc679bb5d14e50", "content_id": "3d0f6ea298b10e0fd005d3a788cac6104b20ba02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1072, "license_type": "no_license", "max_line_length": 116, "num_lines": 22, "path": "/notebooks/day_1/oversampler.py", "repo_name": "janithwanni/alpha-zero-data-storm-2", "src_encoding": "UTF-8", "text": "from imblearn.over_sampling import SMOTENC\nimport pandas as pd\nfrom typing import List\n\ndef oversample(dataframe: pd.DataFrame, cat_feats: List[int]):\n X = dataframe.drop(\"Reservation_Status\",axis=\"columns\")\n y = dataframe.loc[:,\"Reservation_Status\"]\n smote_enc = SMOTENC(categorical_features=cat_feats,random_state=42)\n X_res, y_res = smote_enc.fit_resample(X,y)\n out_df = X_res.copy(deep=True)\n out_df[\"Reservation_Status\"] = y_res\n return out_df\n\n\ntrain_df = pd.read_csv(\"../../data/processed/train_preproc.csv\")\ncategorical_columns = [\"Gender\",\"Ethnicity\",\"Educational_Level\",\"Income\",\"Country_region\",\"Hotel_Type\",\n \"Meal_Type\",\"Visted_Previously\",\"Previous_Cancellations\",\"Deposit_type\",\"Booking_channel\",\n \"Required_Car_Parking\",\"Use_Promotion\",\"Visit_Cancel\"]\n\ncat_feats = [train_df.drop(\"Reservation_Status\",axis=\"columns\").columns.get_loc(col) for col in categorical_columns]\nos_df = oversample(train_df,cat_feats)\nos_df.to_csv(\"../../data/processed/oversampled/train_oversample.csv\",index=False)\n" } ]
11
komosam/django-deployment
https://github.com/komosam/django-deployment
5d994ed6bb3ed430aa6d09cb91ef154ef1a86bfa
1ee2f5ed998b77dbe910c213961f91168b26955b
1bc2494aadd177ddbc2f13007cfa521097047727
refs/heads/master
2020-04-17T02:50:02.567538
2019-01-17T03:46:22
2019-01-17T03:46:22
166,154,840
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.699999988079071, "avg_line_length": 24.714284896850586, "blob_id": "e10080f9bf688b6c23b73fd887d0d1b287bb1d51", "content_id": "60f04d2a2faceac6e2baffd7d87000c33304c6dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 41, "num_lines": 7, "path": "/learningtemplates/bikes/forms.py", "repo_name": "komosam/django-deployment", "src_encoding": "UTF-8", "text": "from django import forms\nfrom bikes.models import registrations\n\nclass registration_form(forms.ModelForm):\n class Meta():\n model=registrations\n fields = '__all__'\n" }, { "alpha_fraction": 0.7633333206176758, "alphanum_fraction": 0.7733333110809326, "avg_line_length": 32.33333206176758, "blob_id": "3a68c7ea8e93e2d6f3c3e093d05aff3862bbe984", "content_id": "01fc866e362a0934c2b15f20df80ae8da56307b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "no_license", "max_line_length": 62, "num_lines": 9, "path": "/learningtemplates/bikes/models.py", "repo_name": "komosam/django-deployment", "src_encoding": "UTF-8", "text": "from django.db import models\n#from datetime import datetime\nfrom django.utils import timezone\n# Create your models here.\n\nclass registrations(models.Model):\n username = models.CharField(max_length=200)\n email = models.EmailField()\n created_date = models.DateTimeField(default=timezone.now)\n" }, { "alpha_fraction": 0.68282550573349, "alphanum_fraction": 0.68282550573349, "avg_line_length": 26.769229888916016, "blob_id": "e343a8cc2d60057611fb391b56eb3b867c3df471", "content_id": "824c0280d39ee905083e2caa45b70a019baab96f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "no_license", "max_line_length": 67, "num_lines": 26, "path": "/learningtemplates/bikes/views.py", "repo_name": "komosam/django-deployment", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom bikes.forms import registration_form\n# Create your views here.\n\ndef index(request):\n return render(request,'bikes_temp/index.html')\n\n#def others(request):\n #form = registration_form()\n\n #if request.method == 'POST':\n #if form.is_valid():\n #form = registration_form(request.POST)\n #form.save(commit=True)\n #return index(request)\n #else: print ('form invalid')\n #return render(request,'bikes_temp/others.html',{'form':form })\n\ndef base(request):\n return render(request,'bikes_temp/base.html')\n\ndef other(request):\n return render(request,'bikes_temp/other.html')\n\ndef brand(request):\n return render(request,'bikes_temp/brand.html')\n" }, { "alpha_fraction": 0.6548672318458557, "alphanum_fraction": 0.6548672318458557, "avg_line_length": 17.83333396911621, "blob_id": "b60b96ca9b72ec75a20d8026fd999b2ed545d6c9", "content_id": "cc2c34ea77936e261c21c7c6aa6d73a39ea81d67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/learningtemplates/bikes/urls.py", "repo_name": "komosam/django-deployment", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom bikes import views\n\n\napp_name = 'bikes'\n\n\nurlpatterns = [\n path('brand/', views.brand,name='brand'),\n path('base/',views.base,name='base'),\n path('other/',views.other,name='other')\n]\n" } ]
4
michaelpbAD/Dual-Net-Gaming-with-Python
https://github.com/michaelpbAD/Dual-Net-Gaming-with-Python
5aeb397e3d5715301cd24b38a4af621fa414cc53
31de70314da9e9ff81246c88a5da7873e926116c
206d655455208b7653cf258e929f2dbb529e32c1
refs/heads/master
2021-05-14T23:01:55.029436
2017-12-09T16:28:12
2017-12-09T16:28:12
105,742,230
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.526770293712616, "alphanum_fraction": 0.5401074886322021, "avg_line_length": 39.52191162109375, "blob_id": "f4cb66ba11d85636c6595b1a83ec4796d0da996b", "content_id": "ade99d9ae3d9db5c89ad12b18952ddb8077bab02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10422, "license_type": "no_license", "max_line_length": 119, "num_lines": 251, "path": "/VierOpEenRij.py", "repo_name": "michaelpbAD/Dual-Net-Gaming-with-Python", "src_encoding": "UTF-8", "text": "\"\"\" Vier op een rij - Client\"\"\"\r\nimport pygame\r\nfrom PodSixNet.Connection import ConnectionListener, connection\r\nfrom time import sleep\r\n\r\nclass VierOpEenRijGame(ConnectionListener):\r\n def Network_close(self, data):\r\n exit()\r\n\r\n def Network_connected(self, data):\r\n print(\"Connected to the server.\")\r\n\r\n def Network_error(self, data):\r\n print(\"Error connecting to the server.\")\r\n exit()\r\n\r\n def Network_disconnected(self, data):\r\n print(\"Disconnected from the server.\")\r\n exit()\r\n\r\n def Network_nickname(self,data):\r\n self.playerNaam[data[\"playerNR\"]-1] = data[\"nickname\"]\r\n\r\n def Network_startgame(self, data):\r\n self.running = True\r\n self.num = data[\"player\"]\r\n self.gameid = data[\"gameid\"]\r\n self.playerAantal = data[\"playerAantal\"]\r\n self.boardBoxH = data[\"boardBoxH\"]\r\n self.boardBoxW = data[\"boardBoxW\"]\r\n\r\n # define game board dimensions\r\n self.board = [[0 for x in range(self.boardBoxW)] for y in range(self.boardBoxH)]\r\n\r\n #dimensies van scherm aanpassen naar spel grote\r\n # gameboard dimensions px\r\n self.boardH = self.boardBoxH * self.boxD - (self.boardBoxH - 1) * self.boxB + self.boxB * 4\r\n self.boardW = self.boardBoxW * self.boxD - (self.boardBoxW - 1) * self.boxB + self.boxB * 4\r\n # score board height\r\n self.panelH = 200\r\n # window dimensions\r\n self.width = self.boardW\r\n self.height = self.boardH + self.boxD + self.panelH\r\n # score board width\r\n self.panelW = self.width\r\n # initialize the screen with windows dimensions\r\n self.screen = pygame.display.set_mode((self.width, self.height))\r\n pygame.display.set_caption(\"Vier op een rij\")\r\n\r\n def Network_place(self, data):\r\n # get attributes\r\n self.pijlx = data[\"pijlx\"]\r\n K_DOWN = data[\"K_DOWN\"]\r\n\r\n if K_DOWN == True and self.board[0][self.pijlx] == 0:\r\n self.board[0][self.pijlx] = self.playerTurn\r\n self.playerTurn = data[\"playerTurn\"]\r\n self.pijl = self.playerBox[self.playerTurn - 1]\r\n\r\n def Network_win(self, data):\r\n self.wint = data[\"speler\"]\r\n self.scorePlayer = data[\"score\"]\r\n\r\n def Network_boardWipe(self, data):\r\n self.wint = data[\"wint\"]\r\n self.board = data[\"board\"]\r\n self.playerTurn = data[\"playerTurn\"]\r\n self.pijl = self.playerBox[self.playerTurn - 1]\r\n\r\n # initialize VierOpEenRijGame\r\n def __init__(self, socket, nickname):\r\n pygame.init()\r\n pygame.font.init()\r\n # dimensions tiles game board\r\n self.boardBoxH = 7\r\n self.boardBoxW = 14\r\n\r\n # box dimensions and border\r\n self.boxD = 50 # px length square side\r\n self.boxB = int(self.boxD / 10) # px border square\r\n\r\n # gameboard dimensions px\r\n self.boardH = self.boardBoxH * self.boxD - (self.boardBoxH - 1) * self.boxB + self.boxB * 4\r\n self.boardW = self.boardBoxW * self.boxD - (self.boardBoxW - 1) * self.boxB + self.boxB * 4\r\n\r\n # score board height\r\n self.panelH = 200\r\n # window dimensions\r\n self.width = self.boardW\r\n self.height = self.boardH + self.boxD + self.panelH\r\n # score board width\r\n self.panelW = self.width\r\n\r\n # initialize the screen with windows dimensions\r\n self.screen = pygame.display.set_mode((self.width, self.height))\r\n pygame.display.set_caption(\"Vier op een rij\")\r\n\r\n # initialize pygame clock\r\n self.clock = pygame.time.Clock()\r\n self.initGraphics()\r\n\r\n # define game board dimensions\r\n self.board = [[0 for x in range(self.boardBoxW)] for y in range(self.boardBoxH)]\r\n\r\n # define who starts\r\n self.playerTurn = 1\r\n\r\n # defineer spaeler naam\r\n self.playerNaam = [\"speler1\", \"speler2\", \"speler3\", \"speler4\"]\r\n # defineer player color\r\n self.playerBox = [self.greenBox, self.blueBox, self.redBox, self.yellowBox]\r\n # define scores\r\n self.scorePlayer = [0, 0, 0, 0]\r\n self.wint = 0\r\n\r\n # define pijl\r\n self.pijl = self.playerBox[self.playerTurn - 1]\r\n self.pijlx = 0\r\n self.pijly = 0\r\n\r\n # try to connect\r\n try:\r\n self.Connect((socket[0], int(socket[1])))\r\n except:\r\n pass\r\n\r\n self.gameid = None\r\n self.num = None\r\n self.running = False\r\n # wait until game starts\r\n while not self.running:\r\n self.Pump()\r\n connection.Pump()\r\n sleep(0.001)\r\n # determine attributes from player #\r\n self.playerNR = self.num + 1\r\n self.playerNaam[self.num] = \"me > \"+nickname\r\n connection.Send({\"action\": \"nickname\", \"nickname\": nickname, \"gameid\": self.gameid, \"playerNR\": self.playerNR})\r\n\r\n # initialize graphics images\r\n def initGraphics(self):\r\n self.legeBox = pygame.transform.scale(pygame.image.load(\"img/legeBox.png\"), (self.boxD, self.boxD))\r\n self.greenBox = pygame.transform.scale(pygame.image.load(\"img/greenBox.png\"), (self.boxD, self.boxD))\r\n self.blueBox = pygame.transform.scale(pygame.image.load(\"img/blueBox.png\"), (self.boxD, self.boxD))\r\n self.redBox = pygame.transform.scale(pygame.image.load(\"img/redBox.png\"), (self.boxD, self.boxD))\r\n self.yellowBox = pygame.transform.scale(pygame.image.load(\"img/yellowBox.png\"), (self.boxD, self.boxD))\r\n #self.scorePanel = pygame.transform.scale(pygame.image.load(\"img/scorePanel.png\"), (self.panelW, self.panelH))\r\n\r\n # update game\r\n def update(self):\r\n connection.Pump()\r\n self.Pump()\r\n # sleep to make the game 60 fps\r\n self.clock.tick(60)\r\n # clear the screen\r\n self.screen.fill((255, 255, 255))\r\n self.drawBoard()\r\n self.drawPanel()\r\n\r\n # vult bord op met winaars kleur\r\n if self.wint != 0:\r\n for x in range(self.boardBoxW):\r\n self.board[0][x] = self.wint\r\n\r\n # update the screen\r\n pygame.display.flip()\r\n\r\n # events/key press\r\n self.eventAndKeys()\r\n\r\n # handling events and key presses\r\n def eventAndKeys(self):\r\n for event in pygame.event.get():\r\n # quit if the quit button was pressed\r\n if event.type == pygame.QUIT:\r\n pygame.display.quit()\r\n exit()\r\n # key press\r\n if event.type == pygame.KEYDOWN and self.playerTurn == self.playerNR:\r\n # pijl move links wanneer linker pijl\r\n if event.key == pygame.K_LEFT:\r\n if 0 < self.pijlx:\r\n self.pijlx -= 1\r\n connection.Send(\r\n {\"action\": \"place\", \"playerTurn\": self.playerTurn, \"pijlx\": self.pijlx, \"K_DOWN\": False,\r\n \"gameid\": self.gameid, \"playerNR\": self.playerNR})\r\n # pijl move rechts wanneer rechterpijl\r\n if event.key == pygame.K_RIGHT:\r\n if self.pijlx < (self.boardBoxW - 1):\r\n self.pijlx += 1\r\n connection.Send(\r\n {\"action\": \"place\", \"playerTurn\": self.playerTurn, \"pijlx\": self.pijlx, \"K_DOWN\": False,\r\n \"gameid\": self.gameid, \"playerNR\": self.playerNR})\r\n # place box wanneer enter of pijl naar beneden\r\n if (event.key == pygame.K_KP_ENTER or event.key == pygame.K_DOWN) and self.board[0][self.pijlx] == 0:\r\n connection.Send(\r\n {\"action\": \"place\", \"playerTurn\": self.playerTurn, \"pijlx\": self.pijlx, \"K_DOWN\": True,\r\n \"gameid\": self.gameid, \"playerNR\": self.playerNR})\r\n\r\n # print dropped box, gameboard en pijl\r\n def drawBoard(self):\r\n # drop box\r\n for x in range(self.boardBoxW):\r\n for y in range(self.boardBoxH - 1):\r\n if self.board[y][x] != 0:\r\n if self.board[y + 1][x] == 0:\r\n self.board[y + 1][x] = self.board[y][x]\r\n self.board[y][x] = 0\r\n # draw game board\r\n for x in range(self.boardBoxW):\r\n for y in range(self.boardBoxH):\r\n if self.board[y][x] == 0:\r\n self.screen.blit(self.legeBox, [(self.boxB * 2) + ((x) * self.boxD) - self.boxB * x,\r\n self.boxD + (self.boxB * 2) + ((y) * self.boxD) - self.boxB * y])\r\n if self.board[y][x] != 0:\r\n self.screen.blit(self.playerBox[self.board[y][x] - 1],\r\n [(self.boxB * 2) + ((x) * self.boxD) - self.boxB * x,\r\n self.boxD + (self.boxB * 2) + ((y) * self.boxD) - self.boxB * y])\r\n # place pijl\r\n self.screen.blit(self.pijl, ((self.boxB * 2) + ((self.pijlx) * self.boxD) - self.boxB * self.pijlx,\r\n (self.boxB * 2) + ((self.pijly) * self.boxD) - self.boxB * self.pijly))\r\n # print score paneel\r\n def drawPanel(self):\r\n panelP = self.height - self.panelH\r\n # achtergrond paneel kleur of foto\r\n # self.screen.blit(self.scorePanel, [0, panelP])\r\n pygame.draw.rect(self.screen, (0, 0, 0), [0, panelP, self.panelW, self.panelH])\r\n # print Player Score Labels\r\n x, y = 0, panelP\r\n for i in range(self.playerAantal):\r\n if (self.width / 2) > 300:\r\n if i % 2 == 0:\r\n x = 25\r\n y += 35\r\n else:\r\n x = (self.width / 2) + 25\r\n else:\r\n x = 25\r\n y += 35\r\n self.printPlayerScoreLabel(x, y, self.playerBox[i], self.playerNaam[i], self.scorePlayer[i])\r\n # print player scores\r\n def printPlayerScoreLabel(self, x, y, icon, naam, score):\r\n myfont = pygame.font.SysFont(None, 42)\r\n\r\n fScore = myfont.render(str(score), 1, (255, 255, 255))\r\n fNaam = myfont.render(str(naam), 1, (255, 255, 255))\r\n\r\n wNaam, hNaam = fNaam.get_size()\r\n self.screen.blit(pygame.transform.scale(icon, (25, 25)), (x, y))\r\n self.screen.blit(fNaam, (x + 50, y))\r\n self.screen.blit(fScore, (x + 250, y))\r\n" }, { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7283950448036194, "avg_line_length": 42.97142791748047, "blob_id": "393cef9d1cfc9dc1a6c46c51ca02cc5ef8c19bd8", "content_id": "9d83cbcf34595f38f40ba6717bc7783d2240ac94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1539, "license_type": "no_license", "max_line_length": 215, "num_lines": 35, "path": "/README.MD", "repo_name": "michaelpbAD/Dual-Net-Gaming-with-Python", "src_encoding": "UTF-8", "text": "# Dual-Net-Gaming-with-Python\n## Documentation \"how to git\":\n1. Clone repository with your favourite Git GUI of CLI. (I use GitKraken)\n2. Make a LOCAL branch of the last MASTER commit called e.g. \"develop\" (don't push that to remote repository)\n3. Select your local branch: that's where you will be working in.\n4. When a feature (or your work) is done:\nStage changes you want to stage\nCommit changes (with a meaninful title and description)\n5. Go to MASTER branch\n6. Pull from remote\n7. Merge DEVELOP into MASTER\n * solve merge conflicts\n8. Push commit\n9. go back to 3\n10. COMMUNICATE WITH YOUR COLLEAGUES\n\n# Dual gaming with Python\nWe are going to make \"VierOpEenRij\" in python with following libraries:\n* PodSixNet: https://github.com/chr15m/PodSixNet/tree/python3\n* pygame: http://pygame.org/news\n* tkinter: https://wiki.python.org/moin/TkInter, http://www.tkdocs.com/tutorial/index.html, https://docs.python.org/3.5/library/tkinter.html, http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm#protocols\n* re (regular expressions): https://docs.python.org/3.5/library/re.html\n* used \"sleep\" function of module \"time\": https://docs.python.org/3/library/time.html\n\n# How to play this game?\n1. Install Python 3.5 or 3.6\n2. Install \"pip\" for python 3.5 or 3.6 (for Linux: \"pip3\")\n3. \n * Windows: pip install pygame\n * Linux: sudo pip3 install pygame\n4. If you don't have \"tkinter\", install it with \"pip\"\n5.\n * Windows: pip install pygame\n * Linux: sudo pip3 install pygame\n6. python3 start.py\n" }, { "alpha_fraction": 0.730715274810791, "alphanum_fraction": 0.739130437374115, "avg_line_length": 22.700000762939453, "blob_id": "82a4ef3c6180aaee1c7aff781eabce8fb05f7be7", "content_id": "66d0ffbc756ea260f5445547e7738be049dc7d3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 713, "license_type": "no_license", "max_line_length": 89, "num_lines": 30, "path": "/testing material/vieropeenrijClient.py", "repo_name": "michaelpbAD/Dual-Net-Gaming-with-Python", "src_encoding": "UTF-8", "text": "\n\n# connect to the server - optionally pass hostname and port like: (\"mccormick.cx\", 31425)\n\n\nfrom PodSixNet.Connection import ConnectionListener\n\nclass MyNetworkListener(ConnectionListener):\n\n\tdef Network(self, data):\n\t\tprint('network data:', data)\n\n\tdef Network_connected(self, data):\n\t\tprint(\"connected to the server\")\n\n\tdef Network_error(self, data):\n\t\tprint (\"error:\", data['error'][1])\n\n\tdef Network_disconnected(self, data):\n\t\tprint( \"disconnected from the server\")\n\n\tdef Network_myaction(data):\n\t\tprint (\"myaction:\", data)\n\n\t\n\n\nclass MyPlayerListener(ConnectionListener):\n\n\tdef Network_numplayers(data):\n\t\t# update gui element displaying the number of currently connected players\n\t\tprint(data['players'])\n" }, { "alpha_fraction": 0.5832312107086182, "alphanum_fraction": 0.5927923321723938, "avg_line_length": 32.99166488647461, "blob_id": "54a6ad93926708cbea5e2f40aee17074fedf522a", "content_id": "ba2c5931c1fac7a5b15fe13672ab3014070d2424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4079, "license_type": "no_license", "max_line_length": 148, "num_lines": 120, "path": "/testing material/vieropeenrijServer.py", "repo_name": "michaelpbAD/Dual-Net-Gaming-with-Python", "src_encoding": "UTF-8", "text": "from time import sleep\nfrom PodSixNet.Server import Server\nfrom PodSixNet.Channel import Channel\n\nclass ClientChannel(Channel):\n def Network(self, data):\n self.gameid = data[\"gameid\"]\n print(data)\n\n def Network_placeBox(self, data):\n # deconsolidate all of the data from the dictionary\n playerTurn = data[\"playerTurn\"]\n pijlx = data[\"pijlx\"]\n playerNR = data[\"playerNR\"]\n # id of game given by server at start of game\n self.gameid = data[\"gameid\"]\n # tells server to place box\n self._server.placeBox(playerTurn, pijlx, data, self.gameid, playerNR)\n\n def Network_movePijl(self,data):\n pijlx = data[\"pijlx\"]\n self.gameid = data[\"gameid\"]\n self._server.movePijl(pijlx, self.gameid, data)\n\n def Close(self):\n self._server.close(self.gameid)\n\nclass vieropeenrijServer(Server):\n channelClass = ClientChannel\n\n def __init__(self, *args, **kwargs):\n Server.__init__(self, *args, **kwargs)\n self.games = []\n self.queue = None\n self.currentIndex = 0\n self.numPlayers = 0\n\n def Connected(self, channel, addr):\n self.numPlayers += 1\n print('new connection:', channel)\n print(self.queue)\n if self.queue == None:\n self.currentIndex += 1\n channel.gameid = self.currentIndex\n self.queue = Game(channel, self.currentIndex)\n else:\n channel.gameid = self.currentIndex\n self.queue.player[(self.numPlayers-1)%self.queue.playerAantal] = channel\n\n if self.numPlayers > 1 and self.numPlayers%self.queue.playerAantal == 0:\n for i in range(self.queue.playerAantal):\n self.queue.player[i].Send({\"action\": \"startgame\", \"player\": i, \"gameid\": self.queue.gameid,\"playerAantal\": self.queue.playerAantal})\n self.games.append(self.queue)\n for a in self.games:\n print(a)\n self.queue = None\n\n def movePijl(self,pijlx,gameid, data):\n game = [a for a in self.games if a.gameid == gameid]\n if len(game) == 1:\n game[0].movePijl(pijlx, data)\n\n def placeBox(self, playerTurn, pijlx, data, gameid, playerNR):\n game = [a for a in self.games if a.gameid == gameid]\n if len(game) == 1:\n game[0].placeBox(playerTurn, pijlx, data, playerNR)\n def close(self,gameid):\n try:\n game = [a for a in self.games if a.gameid == gameid][0]\n for i in range(game.playerAantal):\n game.player[i].Send({\"action\": \"close\", \"gameid\": gameid})\n except:\n pass\n\nclass Game: # controleren\n def __init__(self, player0, currentIndex):\n # whose turn\n self.Turn = 1\n self.playerAantal = 2\n # dimensions tiles game board\n self.boardBoxH = 7\n self.boardBoxW = 14\n # define game board dimensions\n self.board = [[0 for x in range(self.boardBoxW)] for y in range(self.boardBoxH)]\n # initialize the players including the one who started the game\n self.player=[player0,None,None,None]\n # gameid of game\n self.gameid = currentIndex\n\n def movePijl(self, pijlx, data):\n for i in range(self.playerAantal):\n self.player[i].Send(data)\n\n def placeBox(self, playerTurn, pijlx, data, playerNR):\n # make sure it's their turn\n if playerNR == self.Turn:\n # and self.board[0][pijlx]==0:\n # self.board[0][pijlx]=self.playerTurn\n if self.playerAantal > self.Turn:\n self.Turn += 1\n else:\n self.Turn = 1\n data[\"playerTurn\"] = self.Turn\n # send data and turn data to each player\n for i in range(self.playerAantal):\n self.player[i].Send(data)\n\n\nprint(\"STARTING SERVER ON LOCALHOST\")\nserver = vieropeenrijServer(localaddr=(\"LOCALHOST\", 31425))\n\nwhile 1:\n server.Pump()\n sleep(0.01)\n\n\n # def updateServer():\n # print(\"Clock is ticking\")\n # vieropenrijServer.Pump()\n # sleep(0.0001)\n" }, { "alpha_fraction": 0.7748344540596008, "alphanum_fraction": 0.7748344540596008, "avg_line_length": 32.55555725097656, "blob_id": "ff4da49339bdc8a46d0313657bdbdf0054643984", "content_id": "97777c562bd94c3a49439caeffbd95eebfaac26d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 70, "num_lines": 9, "path": "/start.py", "repo_name": "michaelpbAD/Dual-Net-Gaming-with-Python", "src_encoding": "UTF-8", "text": "\"\"\" This python script starts screen_joinorhost.py (object) \"\"\"\n# import screen_joinorhost.py\nimport screen_joinorhost\n\n# make object to initialize the window for joining or hosting a server\nstart = screen_joinorhost.joinorhost()\n# keep updating object\nwhile not start.closedWindow:\n start.update()\n" }, { "alpha_fraction": 0.5118188261985779, "alphanum_fraction": 0.522292971611023, "avg_line_length": 38.03314971923828, "blob_id": "c6207656b05e8d5bde6dff4e8cbc36a1eadd1682", "content_id": "f6484179f78ab9188ddf052920659e2f313580bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7065, "license_type": "no_license", "max_line_length": 219, "num_lines": 181, "path": "/vieropeenrijserver.py", "repo_name": "michaelpbAD/Dual-Net-Gaming-with-Python", "src_encoding": "UTF-8", "text": "\"\"\" Vier op een rij - Server\"\"\"\nfrom time import sleep\nfrom PodSixNet.Server import Server\nfrom PodSixNet.Channel import Channel\n\nclass ClientChannel(Channel):\n def Network(self, data):\n print(data)\n\n def Network_place(self, data):\n # deconsolidate all of the data from the dictionary\n playerTurn = data[\"playerTurn\"]\n pijlx = data[\"pijlx\"]\n K_DOWN = data[\"K_DOWN\"]\n playerNR = data[\"playerNR\"]\n\n # id of game given by server at start of game\n self.gameid = data[\"gameid\"]\n\n # tells server to place line\n self._server.placeLine(playerTurn, pijlx, K_DOWN, data, self.gameid, playerNR)\n\n def Network_nickname(self, data):\n self._server.nickname(data)\n\n def Close(self):\n self._server.close(self.gameid)\n\nclass vieropeenrijServer(Server):\n def __init__(self, maxPlayers,*args, **kwargs):\n Server.__init__(self, *args, **kwargs)\n self.maxPlayers = maxPlayers\n self.games = []\n self.queue = None\n self.currentIndex = 0\n self.numPlayers = 0\n # verplicht voor de module PodSixNet\n channelClass = ClientChannel\n\n def Connected(self, channel, addr):\n self.numPlayers += 1\n print('new connection:', channel)\n\n if self.queue == None:\n self.currentIndex += 1\n channel.gameid = self.currentIndex\n self.queue = Game(channel, self.currentIndex, self.maxPlayers)\n elif self.numPlayers == 2:\n channel.gameid = self.currentIndex\n self.queue.player[1] = channel\n elif self.numPlayers == 3:\n channel.gameid = self.currentIndex\n self.queue.player[2] = channel\n elif self.numPlayers == 4:\n channel.gameid = self.currentIndex\n self.queue.player[3] = channel\n\n if self.numPlayers >= self.queue.playerAantal:\n for i in range(self.queue.playerAantal):\n self.queue.player[i].Send({\"action\": \"startgame\", \"player\": i, \"gameid\": self.queue.gameid, \"playerAantal\": self.queue.playerAantal, \"boardBoxH\": self.queue.boardBoxH, \"boardBoxW\": self.queue.boardBoxW})\n\n self.games.append(self.queue)\n self.queue = None\n self.numPlayers=0\n\n def placeLine(self, playerTurn, pijlx, K_DOWN, data, gameid, playerNR):\n game = [a for a in self.games if a.gameid == gameid]\n if len(game) == 1:\n game[0].placeLine(playerTurn, pijlx, K_DOWN, data, playerNR)\n\n def tick(self):\n for game in self.games:\n if game.wint != 0 or game.draw:\n game.wint = 0\n game.Turn = 1\n game.draw = False\n game.board = [[0 for x in range(game.boardBoxW)] for y in range(game.boardBoxH)]\n sleep(2)\n for i in range(game.playerAantal):\n game.player[i].Send(\n {\"action\": \"boardWipe\", \"board\": game.board, \"playerTurn\": game.Turn, \"wint\": game.wint})\n self.Pump()\n\n\n def close(self, gameid):\n try:\n game = [a for a in self.games if a.gameid == gameid][0]\n for i in range(game.playerAantal):\n game.player[i].Send({\"action\": \"close\"})\n except:\n pass\n\n def nickname(self, data):\n game = [a for a in self.games if a.gameid == data[\"gameid\"]][0]\n for i in range(game.playerAantal):\n if i != data[\"playerNR\"]-1:\n game.player[i].Send({\"action\": \"nickname\", \"playerNR\": data[\"playerNR\"], \"nickname\": data[\"nickname\"]})\n\n\nclass Game(object):\n def __init__(self, player0, currentIndex, maxPlayers):\n # whose turn\n self.Turn = 1\n self.playerAantal = maxPlayers\n # dimensions tiles game board\n self.boardBoxH = 7\n self.boardBoxW = 14\n # define game board dimensions\n self.board = [[0 for x in range(self.boardBoxW)] for y in range(self.boardBoxH)]\n # initialize the players including the one who started the game\n self.player = [player0, None, None, None]\n self.scorePlayer = [0, 0, 0, 0]\n self.wint = 0\n self.draw = False\n\n # gameid of game\n self.gameid = currentIndex\n\n def placeLine(self, playerTurn, pijlx, K_DOWN, data, playerNR):\n # make sure it's their turn\n if playerNR == self.Turn:\n if K_DOWN == True and self.board[0][pijlx] == 0:\n # plaats box\n self.board[0][pijlx] = self.Turn\n # volgende speler\n if self.playerAantal > self.Turn:\n self.Turn += 1\n else:\n self.Turn = 1\n data[\"playerTurn\"] = self.Turn\n # send data and turn data to each player\n for i in range(self.playerAantal):\n self.player[i].Send(data)\n\n self.dropBox()\n self.controle()\n if self.wint != 0:\n self.scorePlayer[self.wint - 1] += 1\n for i in range(self.playerAantal):\n self.player[i].Send({\"action\": \"win\", \"speler\": self.wint, \"score\": self.scorePlayer})\n\n def dropBox(self):\n for x in range(self.boardBoxW):\n for y in range(self.boardBoxH - 1):\n if self.board[y][x] != 0:\n if self.board[y + 1][x] == 0:\n self.board[y + 1][x] = self.board[y][x]\n self.board[y][x] = 0\n\n def controle(self):\n # controle gebeurt alleen (y,x) (0,+),(+,0),(+,+),(+,-)\n geenNull = True\n for y in range(self.boardBoxH):\n for x in range(self.boardBoxW):\n if self.board[y][x] != 0:\n var = self.board[y][x]\n # horizontale controle\n if x < (self.boardBoxW - 3):\n if var == self.board[y][x + 1] and var == self.board[y][x + 2] and var == self.board[y][x + 3]:\n self.wint = var\n\n # verticale controle\n if y < (self.boardBoxH - 3):\n if var == self.board[y + 1][x] and var == self.board[y + 2][x] and var == self.board[y + 3][x]:\n self.wint = var\n\n # rechts naar beneden controle\n if y < (self.boardBoxH - 3) and x < (self.boardBoxW - 3):\n if var == self.board[y + 1][x + 1] and var == self.board[y + 2][x + 2] and var == \\\n self.board[y + 3][x + 3]:\n self.wint = var\n\n # links naar beneden controle\n if y < (self.boardBoxH - 3) and x > 2:\n if var == self.board[y + 1][x - 1] and var == self.board[y + 2][x - 2] and var == \\\n self.board[y + 3][x - 3]:\n self.wint = var\n # controleer of het gameboard lege vakken bevat\n else:\n geenNull=False\n self.draw = geenNull\n" }, { "alpha_fraction": 0.628654956817627, "alphanum_fraction": 0.6409356594085693, "avg_line_length": 40.70731735229492, "blob_id": "83205fa3ec20e5c6faa9228ee0c444425ac7b403", "content_id": "bb796a1b18b9e16e2c2805d854102813fc439894", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1710, "license_type": "no_license", "max_line_length": 124, "num_lines": 41, "path": "/screen_hostserver.py", "repo_name": "michaelpbAD/Dual-Net-Gaming-with-Python", "src_encoding": "UTF-8", "text": "\"\"\" Form with tkinter: hosting the server \"\"\"\n# import modules\nfrom tkinter import *\nfrom tkinter import ttk\nfrom time import sleep\n\nclass screenServer():\n def __init__(self, socket, maxPlayers):\n self.closedWindow = False\n # create window\n self.root = Tk()\n self.root.title(\"Vier op een rij: Server\")\n self.root.resizable(False, False)\n # make frame to show widgets in\n self.serverframe = ttk.Frame(self.root, padding=\"80 80 80 80\")\n self.serverframe.grid(column=0, row=0, sticky=(N, W, E, S))\n self.serverframe.columnconfigure(0, weight=1)\n self.serverframe.rowconfigure(0, weight=1)\n ttk.Label(self.serverframe, text=\"Running the server...\").grid(column=2, row=1, sticky=(W, E))\n # import vieropeenrijserver\n import vieropeenrijserver\n # make object from server class with arguments maxPlayers and socket = localaddr\n self.hosting = vieropeenrijserver.vieropeenrijServer(maxPlayers, localaddr=(socket[0], int(socket[1])))\n # protocol handler for checking if window gets closed by clicking (WM_DELETE_WINDOW) and will do function on_closing\n self.root.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\n\n def on_closing(self):\n # ask the user if he wants to quit?\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n # start.py loops until closedWindow = True\n self.closedWindow = True\n # close the window\n self.root.destroy()\n\n def update(self):\n # update window\n self.root.update()\n # check for sockets / data / buffers\n self.hosting.Pump()\n sleep(0.01)\n self.hosting.tick()\n" }, { "alpha_fraction": 0.6343065500259399, "alphanum_fraction": 0.6554744243621826, "avg_line_length": 33.25, "blob_id": "ff37e3e6aa1249e388d5e9a0e562ccadc90932ac", "content_id": "3be0f404d39536b8a8d09ee42211bc9b7388645d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2740, "license_type": "no_license", "max_line_length": 98, "num_lines": 80, "path": "/testing material/bootscreen.py", "repo_name": "michaelpbAD/Dual-Net-Gaming-with-Python", "src_encoding": "UTF-8", "text": "\"\"\" Form with tkinter: nickname, server selection \"\"\"\n# import tkinter / ttk for GUI\nfrom tkinter import *\nfrom tkinter import ttk\n# import regex to search for IP adress\nimport re\nimport pygame\nfrom VierOpEenRij import *\n\ngstart=False\n# checking IP adress\n\ndef checkIp(*args):\n isIp = ip.get()\n print(isIp + \" : \" + str(len(isIp)))\n if len(isIp) < 8 or len(isIp) > 15:\n print(\"This is not an IP address.\") # need to generate error\n else:\n patIp = re.compile(r'\\d{2,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}')\n matchIp = patIp.search(isIp)\n if matchIp == None or matchIp.group() != isIp:\n print(\"This is not a right address\")\n else:\n print(matchIp.group())\n print(\"join server\")\n # exec(open(\"./VierOpEenRij.py\").read()) # experimental, not the right way\n\n global bg\n bg=VierOpEenRijGame() # init__ is called right here\n global gstart\n gstart=True\n\n# checking if server can be hosted\ndef hostServer(*args):\n server = Tk()\n print(\"host server\")\n server.title(\"Vier op een rij: Server\") # title of window\n serverframe = ttk.Frame(server, padding=\"80 80 80 80\") # padding of frame\n serverframe.grid(column=0, row=0, sticky=(N, W, E, S)) # grid layout\n serverframe.columnconfigure(0, weight=1)\n serverframe.rowconfigure(0, weight=1)\n ttk.Label(serverframe, text=\"Running the server....\").grid(column=2, row=1, sticky=(W, E))\n\n\n# root =\nroot = Tk()\nroot.title(\"Vier op een rij: Client or Server\") # title of window\nmainframe = ttk.Frame(root, padding=\"80 80 80 80\") # padding of frame\nmainframe.grid(column=0, row=0, sticky=(N, W, E, S)) # grid layout\nmainframe.columnconfigure(0, weight=1)\nmainframe.rowconfigure(0, weight=1)\n\n# tkinter variable for entry (input field)\nip = StringVar()\n\n# label for input field\nttk.Label(mainframe, text=\"Server IP-adress:\").grid(column=2, row=1, sticky=(W, E))\n# text input ipaddress\nip_entry = ttk.Entry(mainframe, width=20, textvariable=ip)\nip_entry.grid(column=2, row=2, sticky=(N, W, E, S)) # layout text input field ipaddress\nttk.Button(mainframe, text=\"Join server\", command=checkIp).grid(column=2, row=3, sticky=(W, E))\n\n# \"or\"-label\nttk.Label(mainframe, text=\"or\").grid(column=2, row=4, sticky=(W, E))\n\n# button for hosting the server\nttk.Button(mainframe, text=\"Host server\", command=hostServer).grid(column=2, row=5, sticky=(W, E))\n\n# loop through all child of the frame and add padding to x and y\nfor child in mainframe.winfo_children():\n child.grid_configure(padx=10, pady=10)\n\n# focus on ip text field when started\nip_entry.focus()\n\n# loop for GUI\nwhile 1:\n root.update()\n if gstart==True and (not bg.stopped):\n bg.update()\n" }, { "alpha_fraction": 0.5728916525840759, "alphanum_fraction": 0.5836389064788818, "avg_line_length": 45.523529052734375, "blob_id": "8f801dfbfca03b21e8c2ee39e44dcf4dce887d36", "content_id": "8fa51211ef49375da67ca7b20c61e5b328f89fc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7909, "license_type": "no_license", "max_line_length": 165, "num_lines": 170, "path": "/screen_joinorhost.py", "repo_name": "michaelpbAD/Dual-Net-Gaming-with-Python", "src_encoding": "UTF-8", "text": "\"\"\" Form with tkinter: join host or host server \"\"\"\n# import modules\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\nimport re\n\nclass joinorhost():\n def __init__(self):\n self.closedWindow = False\n self.hostS = None\n self.playVierOpEenRij = None\n # ============================ START FORM JOIN OR HOST SERVER =================================\n # make a window\n self.root = Tk()\n # title of window\n self.root.title(\"Vier op een rij: Client or Server\")\n self.root.resizable(False, False)\n\n # making new derived styles\n s = ttk.Style()\n s.configure('vieropeenrij.TFrame', background='#1ABC9C')\n s.configure('vieropeenrij.TLabel', background='#1ABC9C')\n\n # frame is part of window (for showing form elements)\n mainframe = ttk.Frame(self.root, padding=\"80 80 80 80\", style=\"vieropeenrij.TFrame\") # padding of frame\n mainframe.grid(column=0, row=0, sticky=(N, W, E, S)) # grid layout\n mainframe.columnconfigure(0, weight=1)\n mainframe.rowconfigure(0, weight=1)\n\n # tkinter variables for entries, spinbox\n self.socket = StringVar()\n self.nickname = StringVar()\n self.socketServer = StringVar()\n self.maxPlayers = StringVar()\n\n # label for text entry\n ttk.Label(mainframe, text=\"Server IP-adress: Server port\", style=\"vieropeenrij.TLabel\").grid(column=2, row=1,\n sticky=(W, E))\n # text entry for \"socket\" server to joing\n socketEntry = ttk.Entry(mainframe, width=20, textvariable=self.socket)\n socketEntry.grid(column=3, row=1, sticky=(N, W, E, S))\n # label for nickname\n ttk.Label(mainframe, text=\"Nickname:\", style=\"vieropeenrij.TLabel\").grid(column=2, row=2, sticky=(W, E))\n # text entry for nickname\n nicknameEntry = ttk.Entry(mainframe, width=20, textvariable=self.nickname)\n nicknameEntry.grid(column=3, row=2, sticky=(N, W, E, S))\n # button for function joinServer\n ttk.Button(mainframe, text=\"Join server\", command=self.joinServer).grid(column=3, row=3, sticky=(W, E))\n\n # \"or\"-label\n ttk.Label(mainframe, text=\"OR\", style=\"vieropeenrij.TLabel\").grid(column=2, row=4, sticky=(W, E))\n\n # label for text entry server ip and port\n ttk.Label(mainframe, text=\"Your PC's IP-adress: Server port\", style=\"vieropeenrij.TLabel\").grid(column=2, row=5,\n sticky=(W, E))\n # entry for \"socketServer\"\n serverEntry = ttk.Entry(mainframe, width=15, textvariable=self.socketServer)\n serverEntry.grid(column=3, row=5, sticky=(N, W, E, S))\n # label for maximum number of players in a game\n ttk.Label(mainframe, text=\"Maximum number of players in a game:\", style=\"vieropeenrij.TLabel\").grid(column=2, row=6, sticky=(W, E))\n # spinbox for \"maxplayers\"\n Spinbox(mainframe, from_=2, to=4, textvariable=self.maxPlayers, width=3).grid(column=3, row=6, sticky=(W))\n # button for hosting the server, function hostServer\n ttk.Button(mainframe, text=\"Host server\", command=self.hostServer).grid(column=3, row=7, sticky=(W, E))\n\n # loop through all child of the frame and add padding to x and y\n for child in mainframe.winfo_children():\n child.grid_configure(padx=10, pady=10)\n\n # focus on text entry \"socketEntry\" when started\n socketEntry.focus()\n # ============================ END FORM JOIN OR HOST SERVER ===================================\n # protocol handler (interaction between application and window manager) for checking if window gets closed (WM_DELETE_WINDOW) and will do function on_closing\n self.root.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\n\n def joinServer(self):\n # get socket out of text entry and check if it is valid\n checkedSocket = self.checkSocket(self.socket.get())\n # get nickname out of text entry\n nickname = self.nickname.get().strip()\n # check can't be false and nickname can't be empty\n if not (checkedSocket and nickname != \"\"):\n messagebox.showerror(\"Error\", \"No empty nickname allowed.\")\n return False\n else:\n # close the window\n self.root.destroy()\n # import VierOpEenRij.py\n import VierOpEenRij\n print(\"Joining server at: \" + checkedSocket[0] + \" : \" + checkedSocket[1] + \" as \" + nickname)\n # join server by making an object from VierOpEenRijGame with arguments: checkedSocket and nickname\n self.playVierOpEenRij = VierOpEenRij.VierOpEenRijGame(checkedSocket, nickname)\n\n def hostServer(self):\n # get socket out of text entry and check if it is valid\n checkedSocket = self.checkSocket(self.socketServer.get())\n # try saving maxPlayers as an int\n try:\n maxPlayers = int(self.maxPlayers.get())\n except:\n maxPlayers = 0\n # checkedSocket can't be false and maxPlayers must be 2,3 or 4\n if checkedSocket and (maxPlayers == 2 or maxPlayers == 3 or maxPlayers == 4):\n # close the window\n self.root.destroy()\n # import screen_server.py\n import screen_hostserver\n print(\"Hosting server at: \" + checkedSocket[0] + \" : \" + checkedSocket[\n 1] + \" with maximum players in a game \" + str(maxPlayers))\n # hosting the server with arguments: checkedSocket, maxPlayers\n self.hostS = screen_hostserver.screenServer(checkedSocket, maxPlayers)\n else:\n messagebox.showerror(\"Error\", \"Maximum players is 2, 3 or 4.\")\n return False\n\n # check if socket entered is valid\n def checkSocket(self, socket):\n try:\n # split socket if possible\n isIp, isPort = socket.split(\":\")\n except:\n messagebox.showerror(\"Error\", \"Format is IP:Port\")\n return False\n\n # lenth of IP adress may not be smaller than 7 or higher than 15\n if len(isIp) < 7 or len(isIp) > 15:\n messagebox.showerror(\"Error\", \"This can not be a valid IP address.\")\n return False\n else:\n # check if pattern of IP is valid (3 dots with groups of 1 to 3 digits\n patIp = re.compile(r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}')\n matchIp = patIp.search(isIp)\n if matchIp == None or matchIp.group() != isIp:\n messagebox.showerror(\"Error\", \"This can not be a valid IP address.\")\n return False\n try:\n # check if port is negative\n if int(isPort) != abs(int(isPort)):\n messagebox.showerror(\"Error\", \"Not a valid port number.\")\n return False\n except:\n messagebox.showerror(\"Error\", \"Not a valid port number.\")\n return False\n # return the socket\n return [isIp, isPort]\n\n def on_closing(self):\n # ask the user if he wants to quit?\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n # start.py loops until closedWindow = True\n self.closedWindow = True\n # close the window\n self.root.destroy()\n\n # update GUI 1 time\n def update(self):\n try:\n self.root.update()\n except:\n pass\n # only update when object exists\n if self.hostS != None:\n if self.hostS.closedWindow == False:\n self.hostS.update()\n else:\n self.closedWindow = True\n # only update when object exists\n if self.playVierOpEenRij != None:\n self.playVierOpEenRij.update()\n" } ]
9
gogobd/vq-vae-2
https://github.com/gogobd/vq-vae-2
f042c801547ecb432d7760f2162201f9fc8ca530
af7c7f91d89f610788a7bb04f39b138ab7045a6c
f6845adea64e263e0b18f944804b5a5cd45c5a69
refs/heads/master
2022-07-15T14:01:06.885241
2020-05-20T05:55:39
2020-05-20T05:55:39
264,475,687
0
0
null
2020-05-16T16:12:41
2020-04-24T10:05:49
2019-08-19T12:42:49
null
[ { "alpha_fraction": 0.4691457748413086, "alphanum_fraction": 0.4799245595932007, "avg_line_length": 31.2608699798584, "blob_id": "088dd221a524817816567e9209a68444f664f15e", "content_id": "8542bbe276aceab0dcfd552a59212bdc6e507f1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3711, "license_type": "no_license", "max_line_length": 99, "num_lines": 115, "path": "/vq_vae_2/examples/hierarchical/data.py", "repo_name": "gogobd/vq-vae-2", "src_encoding": "UTF-8", "text": "\"\"\"\nData loading pipeline.\n\nAll data should be stored as images in a single directory.\n\"\"\"\n\nimport os\nimport random\n\nfrom PIL import Image\nimport numpy as np\nimport torch\n\n\nclass SwipeCropper(object):\n \n def __init__(self, image, wr, hr):\n self.image = image\n self.wr = wr\n self.hr = hr\n \n def __iter__(self):\n return self\n \n def tiles(self):\n wr = self.wr\n hr = self.hr\n w, h, c = self.image.shape\n if (h < hr) or (w < wr):\n print(\"Image too small.\")\n return\n if wr == w and hr == h:\n yield self.image\n return\n hd = (hr - (h % hr)) / ( h // hr )\n wd = (wr - (w % wr)) / ( w // wr )\n for hn in range(h//hr + 1):\n for wn in range(w//wr + 1):\n h0 = int((hn * hr) - (hn * hd))\n w0 = int((wn * wr) - (wn * wd))\n h1 = int(h0 + hr)\n w1 = int(w0 + wr)\n yield(self.image[w0:w1, h0:h1, :])\n\n \ndef load_tiled_images(dir_path, batch_size=8, width=128, height=128):\n images = load_single_images_uncropped(dir_path)\n batch = []\n while True:\n try:\n image = next(images)\n cropper = SwipeCropper(np.array(image), width, height)\n tiles = cropper.tiles()\n while True:\n try:\n tile = next(tiles)\n batch.append(tile)\n if len(batch) == batch_size:\n batch = np.array(batch)\n batch = torch.from_numpy(batch).permute(0, 3, 1, 2).contiguous()\n batch = batch.float() / 255\n yield batch\n batch = []\n except StopIteration:\n break\n except StopIteration:\n continue\n\n \ndef load_images(dir_path, batch_size=16):\n images = load_single_images(dir_path)\n while True:\n batch = np.array([next(images) for _ in range(batch_size)])\n batch = torch.from_numpy(batch).permute(0, 3, 1, 2).contiguous()\n batch = batch.float() / 255\n yield batch\n\n\ndef load_single_images(dir_path):\n while True:\n with os.scandir(dir_path) as listing:\n for entry in listing:\n if not (entry.name.endswith('.png') or entry.name.endswith('.jpg')):\n continue\n try:\n img = Image.open(entry.path)\n except OSError:\n # Ignore corrupt images.\n continue\n width, height = img.size\n scale = IMAGE_SIZE / min(width, height)\n img = img.resize((round(scale * width), round(scale * height)))\n img = img.convert('RGB')\n tensor = np.array(img)\n row = random.randrange(tensor.shape[0] - IMAGE_SIZE + 1)\n col = random.randrange(tensor.shape[1] - IMAGE_SIZE + 1)\n yield tensor[row:row + IMAGE_SIZE, col:col + IMAGE_SIZE]\n\n \ndef load_single_images_uncropped(dir_path, randomized=True):\n while True:\n listing = list(os.scandir(dir_path))\n if randomized:\n random.shuffle(list(listing))\n for entry in listing:\n if not os.path.splitext(entry.name)[1].lower() in Image.registered_extensions().keys():\n continue\n try:\n img = Image.open(entry.path)\n except OSError:\n # Ignore corrupt images.\n continue\n img = img.convert('RGB')\n tensor = np.array(img)\n yield tensor\n\n" }, { "alpha_fraction": 0.6378854513168335, "alphanum_fraction": 0.6766519546508789, "avg_line_length": 29.675676345825195, "blob_id": "496aad2de70c758caadd8343e738f6f3803351ec", "content_id": "ce57430865d3c975532f9668e91e57f9a18f23c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1135, "license_type": "no_license", "max_line_length": 90, "num_lines": 37, "path": "/Dockerfile", "repo_name": "gogobd/vq-vae-2", "src_encoding": "UTF-8", "text": "FROM nvidia/cuda:latest\n\n# Install system dependencies\nRUN apt-get update \\\n && DEBIAN_FRONTEND=noninteractive apt-get install -y \\\n build-essential \\\n curl \\\n wget \\\n git \\\n unzip \\\n screen \\\n vim \\\n net-tools \\\n && apt-get clean\n\n# Install python miniconda3 + requirements\nENV MINICONDA_HOME /opt/miniconda\nENV PATH ${MINICONDA_HOME}/bin:${PATH}\nRUN wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \\\n && chmod +x Miniconda3-latest-Linux-x86_64.sh \\\n && ./Miniconda3-latest-Linux-x86_64.sh -b -p \"${MINICONDA_HOME}\" \\\n && rm Miniconda3-latest-Linux-x86_64.sh\nRUN conda update -n base -c defaults conda\n\n# JupyterLab\nRUN conda install -c conda-forge jupyterlab ipywidgets nodejs\n\n# Project\nCOPY . /vq-vae-2\nWORKDIR /vq-vae-2\nRUN conda install pytorch torchvision cudatoolkit=10.1 -c pytorch -y\n\n# Start container in notebook mode\nCMD SHELL=/bin/bash jupyter lab --no-browser --ip 0.0.0.0 --port 8888 --allow-root\n\n# docker build -t vq-vae-2 .\n# docker run -v /host/directory/data:/data -p 8888:8888 --ipc=host --gpus all -it vq-vae-2\n" } ]
2
FORSEN-ROCK/other
https://github.com/FORSEN-ROCK/other
3c1e26a87132cef9fcec6c3de1a5d30e668ec676
62ccbcea4afcfdef6e037af627bc718d8a4cc139
2bbc8ae29d02d023becadcfd60b3e4739f820a93
refs/heads/master
2021-05-12T18:15:27.015313
2018-07-31T21:22:58
2018-07-31T21:22:58
117,063,078
3
0
null
2018-01-11T07:01:15
2018-01-11T07:35:50
2018-01-13T14:28:43
Python
[ { "alpha_fraction": 0.5814332365989685, "alphanum_fraction": 0.5895765423774719, "avg_line_length": 29.725000381469727, "blob_id": "466737ca791f52f667bdeb5ed7ffb73295971059", "content_id": "76278810dcc8a36081a78abbd87d0a63ad9c2431", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "no_license", "max_line_length": 200, "num_lines": 40, "path": "/parserHH/parserResumeId.py", "repo_name": "FORSEN-ROCK/other", "src_encoding": "UTF-8", "text": "import urllib.request as urllib\n\n\nfrom bs4 import BeautifulSoup\n\n\ndef getSearchingResults(searchText):\n countRecord = 0\n namberPage = 0\n listOfResumesId = []\n while True:\n searchSpeak = \"https://hh.ru/search/resume?area=1&clusters=true&text=%s&pos=full_text&logic=normal&exp_period=all_time&order_by=relevance&area=1&clusters=true&page=%i\" %(searchText,namberPage)\n \n connect = urllib.urlopen(searchSpeak)\n content = connect.read()\n soupTree = BeautifulSoup(content, 'html.parser')\n connect.close()\n notFound = soupTree.find('div', {'class': 'error-content-wrapper'})\n if(notFound == None):\n formPersons = soupTree.findAll('tr',{'itemscope': 'itemscope'})\n for item in formPersons:\n listOfResumesId.append(item.find('a',{'itemprop':'jobTitle'}))##['data-hh-resume-hash'])\n countRecord += 1\n # for debag\n if(countRecord >= debugeSize):\n break\n else:\n break\n namberPage += 1\n \n return listOfResumesId\n \n \n \n \nif __name__ == '__main__':\n \n debugeSize = 200\n listId = getSearchingResults(\"Siebel\")\n print(listId)" }, { "alpha_fraction": 0.5824915766716003, "alphanum_fraction": 0.5855996012687683, "avg_line_length": 21.85207176208496, "blob_id": "f07137896fbf5d45d15e84acad01b18ba1771d4f", "content_id": "a2a95b442c8fc6190b893c99e6eb4b60b19f46c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3861, "license_type": "no_license", "max_line_length": 71, "num_lines": 169, "path": "/bypass_hh/bypass_hh.py", "repo_name": "FORSEN-ROCK/other", "src_encoding": "UTF-8", "text": "\"\"\" Custom Seance objects for recrut sites\n\"\"\"\nimport time\nimport requests\n\n\nfrom selenium import webdriver\n\n\nclass BaseException(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass URLError(BaseException):\n pass\n\n\nclass SeanceError(BaseException):\n pass\n\n\nclass SeanceValueError(BaseException):\n pass\n#class BypassBase(object):\n# pass\n\n\n#class BypassBaseAuth(object):\n# auth_url = None\n# login_field_class = None\n# pass_field_class = None\n\n# def __init__(self):\n# self\n\nclass BrowserSeanceBase(object):\n\n def __init__(self):\n self.browser = webdriver.Chrome()\n\n def __del__(self):\n browser = getattr(self, 'browser', None)\n\n if browser:\n browser.close()\n\n def _set_url(self, url=None):\n if not url:\n raise URLError(\"URL it's bad: %s\" %(url))\n\n browser = getattr(self, 'browser', None)\n\n if not browser:\n raise SeanceError(\"Not active connect\")\n\n browser.get(url)\n time.sleep(1)\n\n status = self._status_code()\n\n #if status != 200:\n # raise SeanceError(\"Respons status code: %s\" %(status))\n\n def _get_html(self):\n browser = getattr(self, 'browser', None)\n\n if not browser:\n raise SeanceError(\"Not active connect\")\n\n return browser.page_source\n\n def _screenshot(self):\n browser = getattr(self, 'browser', None)\n\n if not browser:\n raise SeanceError(\"Not active connect\")\n\n return browser.screen()\n\n def _convert_to_pdf(self):\n \"\"\"Take thees object\n Returned current page as bayts in format pdf\n \"\"\"\n pass\n\n def _get_element(self, target_name):\n element_name = getattr(self, \"target_\" + target_name, None)\n\n if element_name:\n browser = getattr(self, 'browser', None)\n element = browser.find_element_by_name(element_name)\n else:\n element = None\n\n return element\n\n def _set_element_value(self, element, value):\n\n if not value:\n raise SeanceValueError(\"Velue is empty!\")\n\n element.send_keys(value)\n\n def _send_form_element(self, element):\n element.submit()\n\n def _status_code(self):\n url = self.browser.current_url\n connect = requests.get(url)\n connect.close()\n return connect.status_code\n\n def request_get(self, url):\n return self._set_url(url)\n\n def html(self):\n return self._get_html()\n\n def screenshot(self):\n return self._screenshot()\n\n def pdf(self):\n return self._convert_to_pdf()\n\n def status_code(self):\n return self._status_code()\n\n\nclass BrowserSeanceHh(BrowserSeanceBase):\n\n target_login = \"username\"\n target_password = \"password\"\n\n def __init__(self, login=None, password=None):\n\n if not (login and password):\n raise SeanceError(\n \"Login or password is None (%s, %s)\" %(login, password)\n )\n\n super(BrowserSeanceHh, self).__init__()\n self.auth = self._auth(login, password)\n\n def _auth(self, login, password):\n browser = getattr(self, \"browser\", None)\n\n if not browser:\n raise SeanceError(\"Bad Seance\")\n\n self.request_get(\"https://hh.ru/account/login?backurl=%2F\")\n login_element = self._get_element(\"login\")\n password_element = self._get_element(\"password\")\n self._set_element_value(login_element, login)\n self._set_element_value(password_element, password)\n self._send_form_element(password_element)\n status = self.status_code()\n\n if status != 200:\n auth_flag = False\n else:\n auth_flag = True\n\n return auth_flag\n\n\nif __name__ == '__main__':\n test = BrowserSeanceHh('1', '2')\n time.sleep(10)" }, { "alpha_fraction": 0.5716134905815125, "alphanum_fraction": 0.5736939311027527, "avg_line_length": 41.58070755004883, "blob_id": "40a64b16c1cf21d24343eeaf5b833004319462d7", "content_id": "8779cbded436dd0a718a42827f85ac69f642d4cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21956, "license_type": "no_license", "max_line_length": 249, "num_lines": 508, "path": "/offset recrut/services.py", "repo_name": "FORSEN-ROCK/other", "src_encoding": "UTF-8", "text": "import urllib.request as urllib\nfrom urllib.parse import quote\nfrom bs4 import BeautifulSoup\nimport lxml\nimport requests\nfrom .models import Domain, SearchObject, VailidValues, SchemaParsing, Expression, Credentials, RequestHeaders, SessionData, CredentialsData, ResumeLink, SearchResult\nfrom random import random\nfrom re import findall\n\nclass LoginServer(object):\n def __init__(self, domain):\n self.domain = domain\n self.authSession = requests.Session()\n self.__credentialsOrigen__ = None\n self.__authData__ = None\n self.__sessionHeaders__ = None\n self.__sessinCookies__ = None\n\n def __nullCookies__(self):\n if(not self.__sessinCookies__):\n self.__sessinCookies__ = SessionData.objects.filter(credentials=self.__credentialsOrigen__)\n if(len(self.__sessinCookies__) <= 0):\n return False\n else:\n for cookie in self.__sessinCookies__:\n if(cookie.cookieValue):\n return False\n return True\n\n def __notCookies__(self):\n if(not self.__sessinCookies__):\n self.__sessinCookies__ = SessionData.objects.filter(credentials=self.__credentialsOrigen__)\n\n if(len(self.__sessinCookies__) <= 0):\n return True\n else:\n return False\n\n def connect(self):\n pass \n #preRequest = requests.Request(\"POST\", self.__credentialsOrigen__.testLink)\n #requestSession= self.authSession.prepare_request(preRequest)\n #testRequest = self.authSession.send(requestSession)\n #if(testRequest.status_code == 200):\n # return True\n #else:\n # return False\n\n def authOut(self):\n SessionData.objects.all().delete()\n #if(not self.__sessinCookies__):\n # self.__sessinCookies__ = SessionData.objects.filter(credentials=self.__credentialsOrigen__)\n #else:\n # for cookie in self.__sessinCookies__:\n # cookie.cookieValue = None\n # cookie.save()\n\n def cookiesDell(self):\n sessionDat = SessionData.objects.filter(credentials=self.__credentialsOrigen__)##.delete()\n for item in sessionDat:\n item.delete()\n ##return True \n\n def authLogin(self):\n self.__credentialsOrigen__ = Credentials.objects.get(domain=self.domain)\n self.__sessionHeaders__ = RequestHeaders.objects.filter(credentials=self.__credentialsOrigen__)\n\n self.authSession.headers = {item.sectionName : item.body for item in self.__sessionHeaders__}\n\n if(self.__nullCookies__() or self.__notCookies__()):\n ##Рассмотреть случай, когда не входим в условие\n loginGet = requests.Request(\"GET\", self.__credentialsOrigen__.loginLink)\n preRequest = self.authSession.prepare_request(loginGet)\n loginRequest = self.authSession.send(preRequest)\n\n if(loginRequest.status_code != 200):\n raise ValueError(\"Bad Credentials!\")\n\n securityTokin = self.authSession.cookies.get_dict()\n self.__authData__ = CredentialsData.objects.filter(credentials=self.__credentialsOrigen__)\n\n for itemAuth in self.__authData__:\n if(not itemAuth.value):\n itemAuth.value = securityTokin.get(itemAuth.name)\n\n authData = {item.name : item.value for item in self.__authData__}\n loginPost = requests.Request(\"POST\", self.__credentialsOrigen__.loginLink, data=authData)\n authPreReguest = self.authSession.prepare_request(loginPost)\n authRequest = self.authSession.send(authPreReguest)\n\n if(authRequest.status_code != 200):\n raise ValueError(\"Bad authData!\")\n\n authCookies = self.authSession.cookies.get_dict()\n for cookie in authCookies:\n SessionData.objects.update_or_create(cookieName=cookie, credentials=self.__credentialsOrigen__, defaults={'cookieValue': authCookies[cookie]},)\n if(not (self.__nullCookies__() and self.__notCookies__())):\n sessionCookies = SessionData.objects.filter(credentials=self.__credentialsOrigen__)\n for cookie in sessionCookies:\n self.authSession.cookies.set(cookie.cookieName, cookie.cookieValue)\n\n ##if(not self.connect()): \n ##self.cookiesDell()\n ##self.authOut()\n ##self.authLogin()\n\nclass ResumeMeta(object):\n def __init__(self, domain,**kwargs):\n self.domain = domain\n self.pay = kwargs.get('pay')\n self.age = kwargs.get('age')\n self.jobExp = kwargs.get('jobExp')\n self.lastJob = kwargs.get('lastJob')\n self.jobTitle = kwargs.get('jobTitle')\n self.gender = kwargs.get('gender')\n self.link = kwargs.get('link') # в системе\n self.origenLink = kwargs.get('origen') # на сайте источнике локальная\n self.previewLink = kwargs.get('preview') # предварительный просмотр из системы на источнике\n\n def setAttr(self, attrName, attrVal):\n if(attrName in self.__dict__):\n self.__dict__[attrName] = attrVal\n\n def createLink(self):\n if(not self.origenLink):\n return\n else:\n self.previewLink = self.domain.rootUrl + self.origenLink\n self.link = '/search/result/resume/' + self.previewLink\n\n #def __str__(self):\n # format = '\\n'\n # for attrName in self.__dict__:\n # row = '%s = %s\\n' %(attrName, self.__dict__[attrName])\n # format += row\n # return format\n\nclass ResumeData(object):\n def __init__(self, domain, **kwargs):\n self.domain = domain\n self.firstName = kwargs.get('firstName')\n self.lastName = kwargs.get('lastName')\n self.middleName = kwargs.get('middleNam')\n self.phone = kwargs.get('phone')\n self.email = kwargs.get('email')\n self.location = kwargs.get('location')\n self.education = kwargs.get('education')\n self.experience = kwargs.get('expJob')\n self.gender = kwargs.get('gender')\n\n def get(self, attrName):\n if(attrName not in self.__dict__):\n return None\n else:\n return self.__dict__[attrName]\n\n def __str__(self):\n format = '\\n'\n for attrName in self.__dict__:\n row = '%s = %s\\n' %(attrName, self.__dict__[attrName])\n format += row\n return format\n\n def setAttr(self, attrName, attrVal):\n if(attrName in self.__dict__):\n self.__dict__[attrName] = attrVal\n\n def validGender(self):\n validValueList = VailidValues.objects.filter(domain=self.domain, context=\"RESUME\")\n validValueDict = {item.rawValue : item.validValue for item in validValueList}\n ##print('validValueDict>>',validValueDict)\n ##print('self.gender>>',self.gender)\n self.gender = validValueDict.get(self.gender)\n\nclass OrigenUrl(object):\n def __init__(self,domain=None, url=None):\n self.__domain__ = domain\n self.__url__ = url\n self.__iterNum__ = 0\n self.pattern = None\n\n def setDomain(self, domainName=None):\n if((not self.__url__) and domainName):\n self.__domain__ = Domain.objects.get(domainName=domainName)\n if(self.__url__ and (not domainName)):\n domainName = findall(r'\\w{0,4}\\.?\\w+\\.ru', self.__url__)\n self.__domain__ = Domain.objects.get(domainName=domainName)\n else:\n raise TypeError(\"can not specify Domain and Url at the same time!\")\n\n def getDomain(self, isObject=False):\n if(isObject):\n return self.__domain__\n else:\n return self.__domain__.domainName\n\n def setUrlOrPattern(self, url=None, **kwargs):\n if((not url) and self.__domain__):\n #Load Pattern\n self.pattern = SearchObject().getPattern(domain=self.__domain__,mode=kwargs.get('mode'),ageFrom=kwargs.get('ageFrom'),ageTo=kwargs.get('ageTo'),salaryFrom=kwargs.get('salaryFrom'),salaryTo=kwargs.get('salaryTo'),gender=kwargs.get('gen'))\n if(url and self.__domain__):\n domainName = findall(r'\\w{0,4}\\.?\\w+\\.ru', url)[0]\n if(domainName != self.__domain__.domainName):\n ##print('doaminName>>',domainName,'__domain__.domainName>>',self.__domain__.domainName)\n raise ValueError(\"A link can be reinstalled within the same domain\")\n else:\n self.__url__ = url\n\n def getUrl(self):\n return self.__url__\n\n def getIterationNum(self):\n if(not self.pattern):\n raise ValueError(\"Patern link is not defained\")\n return self.__iterNum__\n\n def setIterationNum(self, iterationNum=0):\n if(not self.pattern):\n raise ValueError(\"Pattern link is not defained\")\n self.__iterNum__ = iterationNum\n\n def createLink(self, dataParm):\n if(not self.pattern):\n raise ValueError(\"Pattern link is not defained\")\n\n self.__iterNum__ = self.pattern.startPosition\n listParamtrs = self.pattern.parametrs.split(',')\n link = self.pattern.link\n ##print(dataParm)\n ##print(self.pattern.parametrs.split(','))\n ##print(self.pattern.domain.domainName)\n ##print('>>',dataParm['ageto'])\n for parametr in listParamtrs:\n ##print('233 >>', len(listParamtrs))\n ##print('===>',parametr,parametr.lower(),dataParm.get(parametr.strip().lower()))\n link = link.replace(parametr, dataParm.get(parametr.strip().lower()))\n\n ##print('237 >>', link)\n self.__url__ = link \n\n def createSearchLink(self, dataParm, data):\n dataParm.setdefault('heash_search', str(int(random()*10**15)))\n valList = VailidValues.objects.filter(domain=self.__domain__, context='SEARCH')\n for item in valList:\n if((data.get('gender') == item.rawValue) and item.criterionName == 'gender'):\n data['gender'] = item.validValue\n if((data.get('ageFrom') == item.rawValue) and item.criterionName == 'ageFrom'):\n data['ageFrom'] = item.validValue\n if((data.get('ageTo') == item.rawValue) and item.criterionName == 'ageTo'):\n data['ageTo'] = item.validValue\n\n self.setUrlOrPattern(mode=data.get('searchMode'),ageFrom=data.get('ageFrom'),ageTo=data.get('ageTo'),salaryFrom=data.get('salaryFrom'),salaryTo=data.get('salaryTo'),gen=data.get('gender'))\n self.createLink(dataParm)\n\n def nextOrStartIteration(self):\n ##print('Enter>>')\n if((not self.pattern) and self.__url__):\n raise TypeError(\"Object url have is not iteration\")\n else:\n link = self.__url__.replace(self.pattern.iterator, str(self.__iterNum__))\n self.__iterNum__ += self.pattern.iterStep\n ##print('link>>', link)\n ##print('self.__iterNum__>>',self.__iterNum__)\n\n return link\n\nclass OrigenRequest(object):\n def __init__(self,domain=None, linkObj=None):\n self.domain = domain \n self.link = linkObj\n self.__auth__ = None\n\n def getLink(self, isObject=False):\n if(isObject):\n return self.link\n else:\n return self.link.getUrl()\n\n def setLink(self,domain=None,url=None):\n if(not url and not domain):\n return None\n else:\n self.link = OrigenUrl(domain, url)\n\n def setLinkObj(self, linkObj):\n if(linkObj):\n self.link = linkObj\n\n def request(self):\n if(not self.__auth__):\n self.__auth__ = LoginServer(self.domain)\n try:\n self.__auth__.authLogin()\n except ValueError:\n self.__auth__.authOut()\n self.__auth__.authLogin()\n ##else:\n ##if(not self.__auth__.connect()):\n ## self.__auth__.authLogin()\n ## print('------------------')\n ## print('self.__auth__.connect()>>',self.__auth__.connect())\n ## print('>>Not connect!!<<')\n if(self.link.pattern):\n searchLink = self.link.nextOrStartIteration()\n else:\n searchLink = self.link.getUrl()\n\n try:\n connectRequest = requests.Request('GET',searchLink)\n connectSession = self.__auth__.authSession.prepare_request(connectRequest)\n content = self.__auth__.authSession.send(connectSession)\n outContent = content.text\n content.close()\n except UnboundLocalError:\n outContent = None\n\n return outContent\n\nclass OriginParsing(object):\n def __init__(self, domain, limit, context, search_card=None):\n self.domain = domain\n self.context = context\n self.search_card = search_card\n self.__limit__ = int(limit)\n self.countResume = 0\n self.__notFound__ = None\n self.__bodyResponse__ = None\n self.__schema__ = None\n\n def createResumeLink(self, url):\n self.link.setUrlOrPattern(url)\n\n def generalSchem(self):\n prserSchem = []\n parserList = SchemaParsing.objects.filter(domain=self.domain, context=self.context, inactive=False)##,parSchemaParsing__isnull=False)\n for item in parserList:\n if(item.target == 'error' or item.target == 'bodyResponse'):\n pass\n else:\n row = []\n expression = Expression.objects.filter(SchemaParsing=item).order_by('seqOper')\n row.append(item)\n if(expression): \n for expItem in expression:\n row.append(expItem)\n prserSchem.append(row)\n self.__schema__ = prserSchem\n\n def setErrorTarget(self):\n self.__notFound__ = SchemaParsing.objects.get(domain=self.domain, context=self.context, target='error')\n\n def setBodyResponceTarget(self):\n self.__bodyResponse__ = SchemaParsing.objects.get(domain=self.domain, context=self.context, target='bodyResponse')\n\n def parser(self, schema_parsing, tree):\n ##Вот эту дерминку не мешало бы переписать по человечески\n ##без дублирования кода, а то это какая-то кантуженная рекурсия\n if(schema_parsing.parSchemaParsing):\n tree = self.parser(schema_parsing.parSchemaParsing, tree)\n if(tree):\n if(schema_parsing.notAttr):\n listTags = tree.findAll(schema_parsing.tagName)\n for itemTag in listTags:\n if(not itemTag.attrs):\n return itemTag\n\n elif(schema_parsing.sequens):\n listTags = tree.findAll(schema_parsing.tagName, {schema_parsing.attrName : schema_parsing.attrVal})\n if(len(listTags) > schema_parsing.sequens - 1): ##изменил >= на >\n return listTags[schema_parsing.sequens - 1]\n else:\n return None\n else:\n return tree.find(schema_parsing.tagName, {schema_parsing.attrName : schema_parsing.attrVal})\n else:\n if(tree):\n if(schema_parsing.notAttr):\n listTags = tree.findAll(schema_parsing.tagName)\n for itemTag in listTags:\n if(not itemTag.attrs):\n return itemTag\n\n elif(schema_parsing.sequens):\n listTags = tree.findAll(schema_parsing.tagName, {schema_parsing.attrName : schema_parsing.attrVal})\n if(len(listTags) > schema_parsing.sequens - 1): ##изменил >= на >\n return listTags[schema_parsing.sequens - 1]\n else:\n return None\n else:\n return tree.find(schema_parsing.tagName, {schema_parsing.attrName : schema_parsing.attrVal})\n\n def executeExpression(self, expression_parsing, parameter):\n if(not parameter):\n return None\n\n if((expression_parsing.split)):\n parameter = parameter.split(expression_parsing.split)\n\n if((expression_parsing.shearTo or expression_parsing.shearFrom) and type(parameter) is not dict):\n parameter = parameter[expression_parsing.shearFrom : expression_parsing.shearTo]\n\n if((expression_parsing.regexp) and type(parameter) is str):\n parameter = findall(expression_parsing.regexp, parameter)\n\n if(expression_parsing.sequence):## or expression_parsing.split):\n ##Так как сделана дружественная индексация от 1 до N\n seque = expression_parsing.sequence - 1\n if(seque < len(parameter)):\n parameter = parameter[seque]\n\n if((expression_parsing.join) and type(parameter) is list):\n parameter = expression_parsing.join.join(parameter)\n\n return parameter\n\n def parsingResume(self, responseContent):\n if responseContent is None:\n return None\n\n tree = BeautifulSoup(responseContent,'html.parser')\n resultList = [] \n pastRecord = 0 #Счетчик просмотренных резюме\n notFound = tree.find(self.__notFound__.tagName, {self.__notFound__.attrName : self.__notFound__.attrVal})\n if(not notFound):\n resumes = tree.findAll(self.__bodyResponse__.tagName,{self.__bodyResponse__.attrName : self.__bodyResponse__.attrVal})\n for resumeItem in resumes:\n record = {}\n for rowParse in self.__schema__:\n for itemOper in rowParse:\n if(type(itemOper) is SchemaParsing):\n parameter = self.parser(itemOper, resumeItem)\n if((itemOper.target != 'origenLink')and parameter):\n parameter = parameter.get_text()\n elif(parameter):\n parameter = parameter['href']\n if(type(itemOper) is Expression):\n parameter = self.executeExpression(itemOper, parameter)\n\n if(type(itemOper) is SchemaParsing):\n record.setdefault(itemOper.target, parameter)\n else:\n\n record.setdefault(itemOper.SchemaParsing.target, parameter)\n ##resumeRecord.setAttr(itemOper.target, parameter)\n\n if(type(itemOper) is SchemaParsing):\n ##resumeRecord.setAttr(itemOper.target, parameter)\n ##print('itemOper.target>>',itemOper.target)\n record.setdefault(itemOper.target, parameter)\n else:\n ##resumeRecord.setAttr(itemOper.SchemaParsing.target, parameter)\n record.setdefault(itemOper.SchemaParsing.target, parameter)\n previewLink = self.domain.rootUrl + record.get('origenLink')\n record.setdefault('URL', previewLink)\n\n check_result = SearchResult.objects.filter(url=record.get( 'URL')).count()\n\n if not check_result:\n print('self.search_card>>', self.search_card)\n search_record = SearchResult.objects.create(search_card=self.search_card, domain=self.domain, pay=record.get('pay'), age=record.get('age'), \n jobExp=record.get('jobExp'), lastJob=record.get('lastJob'), jobTitle=record.get('jobTitle'), gender=record.get('gender'), url=record.get('URL'))\n\n search_record.save()\n else:\n search_record = SearchResult.objects.get(url=record.get('URL'))\n\n check_link = ResumeLink.objects.filter(url=search_record.url).count()\n\n if not check_link:\n resultList.append(search_record)\n self.countResume += 1\n else:\n print('>>',record)\n pastRecord += 1\n\n if(pastRecord == self.domain.itemRecord or self.countResume == self.__limit__):\n return resultList\n else:\n return None\n\n\n def parserResume(self, responseContent):\n tree = BeautifulSoup(responseContent,'html.parser')\n notFound = tree.find(self.__notFound__.tagName, {self.__notFound__.attrName : self.__notFound__.attrVal})\n if(not notFound):\n resumeRecord = ResumeData(self.domain)\n for rowParse in self.__schema__:\n ##Перебор по строкам схемы парсера\n for itemOper in rowParse:\n if(type(itemOper) is SchemaParsing):\n parameter = self.parser(itemOper, tree)\n if(parameter):\n parameter = parameter.get_text()\n\n if(type(itemOper) is Expression):\n parameter = self.executeExpression(itemOper, parameter)\n\n if(type(parameter) is str):\n parameter = parameter.strip()\n\n if(type(itemOper) is SchemaParsing):\n resumeRecord.setAttr(itemOper.target, parameter)\n else:\n resumeRecord.setAttr(itemOper.SchemaParsing.target, parameter)\n ##print('resumeRecord line-473>>',resumeRecord)\n resumeRecord.validGender()\n ##print('resumeRecord line-475>>',resumeRecord)\n return resumeRecord" }, { "alpha_fraction": 0.5235385298728943, "alphanum_fraction": 0.5387135744094849, "avg_line_length": 43.274810791015625, "blob_id": "cf2dc24e5350586a0b01f8170844d0bc13691463", "content_id": "3b2731f15961c15f078ee6b6e13d767afb607938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5799, "license_type": "no_license", "max_line_length": 77, "num_lines": 131, "path": "/offset recrut/data_transfer.py", "repo_name": "FORSEN-ROCK/other", "src_encoding": "UTF-8", "text": "'''\n Save result in another database.\n'''\n\ndef save_candidate(**kwargs):\n import os\n import datetime\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker\n from sqlalchemy.ext.declarative import declarative_base\n\n connect_string = 'oracle+cx_oracle://%s:%s@%s' %(os.getenv(''),\n os.getenv(''),\n os.getenv(''))\n engine = create_engine(connect_string,\n exclude_tablespaces=[\"DATA01\", \"SOME_TABLESPACE\"])\n Base = declarative_base(bind=engine)\n Session = sessionmaker(bind=engine)\n Candidate = candidate_maker(Base)\n session = Session()\n time = datetime.datetime.utcnow()\n row_id = 'HR-' + str(time.minute) + ':' + str(time.second)\n cand = Candidate(row_id=row_id,\n created_by=kwargs.get('created_by', '0-1'),\n last_upd_by=kwargs.get('last_upd_by', '0-1'),\n education=kwargs.get('education', ''),\n email=kwargs.get('email', ''),\n experience=kwargs.get('experience', ''),\n first_name=kwargs.get('first_name', ''),\n gender=kwargs.get('gender', ''),\n job_vacancy=kwargs.get('job_vacancy', ''),\n last_name=kwargs.get('last_name', ''),\n mid_name=kwargs.get('mid_name', ''),\n phone=kwargs.get('phone', ''),\n region=kwargs.get('region' ,''),\n source=kwargs.get('source', ''),\n auto_flg=kwargs.get('auto_flg', 'N'))\n session.add(cand)\n session.commit()\n session.close_all()\n return True\n\ndef candidate_maker(cls):\n import datetime\n from sqlalchemy import Column, Integer, String, Date, Char\n\n class Candidate(cls):\n __tablename__ = 'CX_CANDIDATE'\n __table_args__ = {\n 'schema': 'SIEBEL',##\n 'extend_existing': True,\n 'implicit_returning': False,\n }\n row_id = Column(String(15), primary_key=True)\n created = Column(Date)\n created_by = Column(String(15))\n last_upd = Column(Date)\n last_upd_by = Column(String(15))\n modification_num = Column(Integer(10))\n conflict_id = Column(String(15))\n db_last_upd = Column(Date)\n db_last_upd_src = Column(String(50))\n education = Column(String(50))\n email = Column(String(50))\n experience = Column(String(50))\n first_name = Column(String(50))\n gender = Column(String(10))\n job_vacancy = Column(String(50))\n last_name = Column(String(50))\n mid_name = Column(String(50))\n phone = Column(String(50))\n position = Column(String(50))\n region = Column(String(50))\n skype = Column(String(50))\n source = Column(String(50))\n status_cd = Column(String(50)) \n auto_flg = Column(Char(1))\n communication = Column(Integer(10))\n computer_text = Column(String(250))\n experience_text = Column(String(250))\n interest_text = Column(String(250))\n internet_flg = Column(Char(1))\n making_decis = Column(Integer(10))\n recomended1 = Column(Char(1))\n recomended2 = Column(Char(1))\n recomended3 = Column(Char(1))\n result = Column(String(250))\n result_orient = Column(Integer(10))\n stress_block = Column(Integer(10))\n want_work = Column(Integer(10))\n\n def __init__(self, **kwargs):\n self.row_id = kwargs.get('row_id')\n self.created = datetime.datetime.utcnow()\n self.created_by = kwargs.get('created_by', '0-1')\n self.last_upd = datetime.datetime.utcnow()\n self.last_upd_by = kwargs.get('last_upd_by', '0-1')\n self.modification_num = kwargs.get('modification_num', '0')\n self.conflict_id = kwargs.get('conflict_id', '0')\n self.db_last_upd = datetime.datetime.utcnow()\n self.db_last_upd_src = kwargs.get('db_last_upd_src', '')\n self.education = kwargs.get('education')\n self.email = kwargs.get('email')\n self.experience = kwargs.get('experience')\n self.first_name = kwargs.get('first_name')\n self.gender = kwargs.get('gender')\n self.job_vacancy = kwargs.get('job_vacancy')\n self.last_name = kwargs.get('last_name')\n self.mid_name = kwargs.get('mid_name')\n self.phone = kwargs.get('phone')\n self.position = kwargs.get('position', '')\n self.region = kwargs.get('region')\n self.skype = kwargs.get('skype', '')\n self.source = kwargs.get('source')\n self.status_cd = kwargs.get('status_cd', '')\n self.auto_flg = kwargs.get('auto_flg')\n self.communication = kwargs.get('communication', '')\n self.computer_text = kwargs.get('computer_text', '')\n self.experience_text = kwargs.get('experience_text', '')\n self.interest_text = kwargs.get('interest_text', '')\n self.internet_flg = kwargs.get('internet_flg', 'N')\n self.making_decis = kwargs.get('making_decis', '')\n self.recomended1 = kwargs.get('recomended1', 'N')\n self.recomended2 = kwargs.get('recomended2', 'N')\n self.recomended3 = kwargs.get('recomended3', 'N')\n self.result = kwargs.get('result', '')\n self.result_orient = kwargs.get('result_orient', '')\n self.stress_block = kwargs.get('stress_block', '')\n self.want_work = kwargs.get('want_work', '')\n\n return Candidate" }, { "alpha_fraction": 0.5718317627906799, "alphanum_fraction": 0.5766299962997437, "avg_line_length": 40.69411849975586, "blob_id": "788d4314e11a12b6c6f874dc06564d6bfc57d51f", "content_id": "8477b43ef59ed5dbfa929b0220e0ffed62a8d8c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3672, "license_type": "no_license", "max_line_length": 178, "num_lines": 85, "path": "/Seizure/script/background.js", "repo_name": "FORSEN-ROCK/other", "src_encoding": "WINDOWS-1251", "text": "//chrome.contextMenus.onClicked.addListener(function(e,t){var n,r,o;return r=e.menuItemId[0],n=e.menuItemId.slice(1),r=getType(r),o=getValue(e,t,r),openTab(n,r,o,t)})\n/*chrome.contextMenus.onClicked.addListener(function(event,tab){\n var n,r,o;\n var testLog = {\n frameId : event.frameId,\n menuItemId : event.menuItemId,\n menuItemId : event.menuItemId,\n }\n console.log(testLog)})\n*/\n\nchrome.extension.onMessage.addListener(function(request) {\n console.log(request);\n\tif(request.resumeId) {//проверяется, от того ли окна и скрипта отправлено\n\t\tlocalStorage['request.origin'] = request.origin;\n localStorage['request.resumeId'] = request.resumeId;\n console.log('Url: ', request.origin);\n console.log('id: ', request.resumeId);\n }\n});\n\nfunction genericOnClick() {\n //return function(){\n var origin = localStorage['request.origin'];\n switch(localStorage['request.origin']) {\n case \"https://hh.ru\":\n var request = new XMLHttpRequest();\n \n //request.open(\"GET\", \"http://api.hh.ru/resumes/resumeId\".replace(\"resumeId\",localStorage['request.resumeId']),true);\n request.open(\"GET\", \"https://hh.ru/search/resume?&area=1&clusters=true&text=Siebel&pos=full_text&logic=normal&exp_period=all_time&items_on_page=100&page=0\",true);\n //request.setRequestHeader(\"User-Agent\", \"hh-recommender\");\n //console.log(\"http://api.hh.ru/resumes/resumeId\".replace(\"resumeId\",localStorage['request.resumeId']));\n //request.send();\n request.onreadystatechange = function(){\n if(request.readyState == 4) {\n console.log(request.responseText);\n //var responseFromHh = request.responseText; //По идее строка json с резюме \n //var dataParse = JSON.parse(responseFromHh);//Здесь получаем js объект резюме\n //console.log(dataParse);\n }\n }\n request.send();\n break;\n case \"https://www.superjob.ru\":\n //var request = new XMLHttpRequest();\n //request.open(\"GET\", \"\thttps://api.superjob.ru/2.0/resumes/:id/\".replase(\":id\",localStorage['request.resumeId']),true);\n //if(request.readyState == 4) {\n // var responseFromHh = request.responseText; //По идее строка json с резюме \n // var dataParse = JSON.parse(responseFromHh);//Здесь получаем js объект резюме\n //}\n break;\n case \"https://www.rabota.ru\":\n break;\n case \"http://www.job-mo.ru\":\n break;\n case \"https://career.ru\":\n break;\n case \"https://job.ru\":\n break;\n }\n //else if(localStorage['request.origin']\n //};\n //}\n\n\n /*return function(info, tab) {\n // The srcUrl property is only available for image elements.\n var url = 'info.html#' + info.srcUrl;\n // Create a new window to the info page.\n chrome.windows.create({ url: url, width: 520, height: 660 });\n */\n //};\n};\n//chrome.contextMenus.onClicked.addListener(genericOnClick);\n/*\n\n});\n*/\n\n\nvar parent = chrome.contextMenus.create({\"title\": \"Test Seizure\", \"onclick\": genericOnClick});\n\nchrome.browserAction.onClicked.addListener(function(tab) {\n chrome.tabs.create({url:chrome.extension.getURL(\"option.html\")});\n});" }, { "alpha_fraction": 0.502711296081543, "alphanum_fraction": 0.5030303001403809, "avg_line_length": 39.20512771606445, "blob_id": "7f5627ee89e52e8e3ff7fb1cf2789c791676c16c", "content_id": "72e423477aaf7429bad6b73dfc843e88809682f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3135, "license_type": "no_license", "max_line_length": 118, "num_lines": 78, "path": "/Seizure/script/content.js", "repo_name": "FORSEN-ROCK/other", "src_encoding": "UTF-8", "text": "window.addEventListener(\"contextmenu\", function(event) {\n switch(window.location.origin) {\n case \"https://hh.ru\":\n var parent = event.target.closest(\"tr[itemscope='itemscope']\");\n if(parent) {\n var id = parent.dataset.hhResumeHash;\n }else{\n return;\n }\n break;\n case \"https://www.superjob.ru\":\n var parent = event.target.closest(\"div[class='ResumeListElementNew js-resume-item']\");\n if(parent) {\n containerResumeId = parent.querySelector(\"div[class='sj_block m_t_3 ng-isolate-scope ng-hide']\");\n if(containerResumeId) {\n var id = containerResumeId.getAttribute(\"resume-id\");\n }\n }\n else {\n return;\n }\n break;\n case \"https://www.rabota.ru\":\n var target = event.target;\n var containerResumeId;\n if(target.matches(\"div[class='b-center__box resum_rez_item resum_rez_active']\")) {\n containerResumeId = target;\n }\n else if(target.matches(\"div[class='b-center__box resum_rez_item ']\")){\n containerResumeId = target;\n }\n else {\n if(target.closest(\"div[class='b-center__box resum_rez_item resum_rez_active']\")){\n containerResumeId = target.closest(\"div[class='b-center__box resum_rez_item resum_rez_active']\");\n }else {\n containerResumeId = target.closest(\"div[class='b-center__box resum_rez_item ']\");\n }\n }\n if(!containerResumeId){\n return;\n }\n var id = containerResumeId.dataset.resumeId;\n break;\n case \"http://www.job-mo.ru\":\n var container = event.target.closest(\"tr\");\n var hrefResumeId = container.querySelector(\"a[target='_blank']\");\n if(!hrefResumeId) {\n hrefResumeId = container.previousElementSibling.querySelector(\"a[target='_blank']\");\n }\n if(!hrefResumeId) {\n return;\n }\n var id = hrefResumeId.getAttribute(\"href\");\n break;\n case \"http://www.avito.ru\":\n console.log(\"dwkd;lwkde\");\n //var container = event.target.closest(\"tr\");\n //var hrefResumeId = container.querySelector(\"a[target='_blank']\");\n //if(!hrefResumeId) {\n // hrefResumeId = container.previousElementSibling.querySelector(\"a[target='_blank']\");\n //}\n //if(!hrefResumeId) {\n // return;\n //}\n anchor.click(function() {chrome.tabs.create({url:\"searchPageWidget.html\"});});\n var id = \"http://www.avito.ru\";\n break;\n //case \"https://career.ru\":\n //handlerk\n // break;\n //case \"https://job.ru\":\n //handler\n // break;\n }\n if(id){\n chrome.extension.sendMessage({resumeId: id,origin: window.location.origin});\n }\n});" }, { "alpha_fraction": 0.6009378433227539, "alphanum_fraction": 0.6034741997718811, "avg_line_length": 31.108901977539062, "blob_id": "368d761ce27a6d7cab3162e2aa0368462d34a522", "content_id": "83b4d2b56f7b420e67d768bfa2c9e8301b632f74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33921, "license_type": "no_license", "max_line_length": 82, "num_lines": 1056, "path": "/parser_xml/data_parser.py", "repo_name": "FORSEN-ROCK/other", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport codecs\nimport datetime\nimport re\nimport xml.etree.ElementTree as etree\nimport argparse\nimport threading\n\nPATH_EVENTS_SOURCE = r'D:\\git-project\\parser_xml\\events'\nPATH_CNDIDATES_SOURCE = r'D:\\git-project\\parser_xml\\candidates'\nPATH_SAVE_EVENTS = r'D:\\git-project\\parser_xml\\flet_events'\nPATH_SAVE_CANDIDATES = r'D:\\git-project\\parser_xml\\flet_candidates'\nPATH_VACANCY_SOURCE = r'D:\\git-project\\parser_xml\\vacancy'\nPATH_VACANCY_SAVE = r'D:\\git-project\\parser_xml\\flet_vacancy'\nPATH_DIVISION_SOURCE = r'D:\\git-project\\parser_xml\\division'\nPATH_DIVIVISION_SAVE = r'D:\\git-project\\parser_xml\\flet_division'\nPATH_VACANCY_CAND_SAVE = r'D:\\git-project\\parser_xml\\flet_candidate_vacanct'\nPATH_SAVE_CANDIDATES_ERROR_LOG = r'D:\\git-project\\parser_xml\\candidates_error_log'\n\n\ndef set_files_path():\n global PATH_EVENTS_SOURCE\n global PATH_CNDIDATES_SOURCE\n global PATH_SAVE_EVENTS\n global PATH_SAVE_CANDIDATES\n global PATH_VACANCY_SOURCE\n global PATH_VACANCY_SAVE\n global PATH_DIVISION_SOURCE\n global PATH_DIVIVISION_SAVE\n global PATH_VACANCY_CAND_SAVE\n global PATH_SAVE_CANDIDATES_ERROR_LOG\n\n PATH_EVENTS_SOURCE = os.getenv('PATH_EVENTS_SOURCE')\n PATH_CNDIDATES_SOURCE = os.getenv('PATH_CNDIDATES_SOURCE')\n PATH_SAVE_EVENTS = os.getenv('PATH_SAVE_EVENTS')\n PATH_SAVE_CANDIDATES = os.getenv('PATH_SAVE_CANDIDATES')\n PATH_VACANCY_SOURCE = os.getenv('PATH_VACANCY_SOURCE')\n PATH_VACANCY_SAVE = os.getenv('PATH_VACANCY_SAVE')\n PATH_DIVISION_SOURCE = os.getenv('PATH_DIVISION_SOURCE')\n PATH_DIVIVISION_SAVE = os.getenv('PATH_DIVIVISION_SAVE')\n PATH_VACANCY_CAND_SAVE = os.getenv('PATH_VACANCY_CAND_SAVE')\n PATH_SAVE_CANDIDATES_ERROR_LOG = os.getenv(\n 'PATH_SAVE_CANDIDATES_ERROR_LOG'\n )\n\n\ndef get_files_path():\n print(\"PATH_EVENTS_SOURCE = %s\" % (PATH_EVENTS_SOURCE))\n print(\"PATH_CNDIDATES_SOURCE = %s\" % (PATH_CNDIDATES_SOURCE))\n print(\"PATH_SAVE_EVENTS = %s\" % (PATH_SAVE_EVENTS))\n print(\"PATH_SAVE_CANDIDATES = %s\" % (PATH_SAVE_CANDIDATES))\n print(\"PATH_VACANCY_SOURCE = %s\" % (PATH_VACANCY_SOURCE))\n print(\"PATH_VACANCY_SAVE = %s\" % (PATH_VACANCY_SAVE))\n print(\"PATH_DIVISION_SOURCE = %s\" % (PATH_DIVISION_SOURCE))\n print(\"PATH_DIVIVISION_SAVE = %s\" % (PATH_DIVIVISION_SAVE))\n print(\"PATH_VACANCY_CAND_SAVE = %s\" % (PATH_VACANCY_CAND_SAVE))\n print(\"PATH_SAVE_CANDIDATES_ERROR_LOG =%s\" %(\n PATH_SAVE_CANDIDATES_ERROR_LOG\n ))\n\n\nclass BaseException(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass ParserError(BaseException):\n pass\n\n\nclass BaseParserXML(object):\n\n def _get_target(self, target_name, xml_content,\n return_xml_element=False):\n \"\"\"Find target value or element in xml tree\n Takes xml tree and target_name\n Returned value\n \"\"\"\n target_attr = getattr(self, 'target_' + target_name, None)\n\n if not target_attr:\n raise ParserError(\"No goal %s specified\" % target_name)\n\n if xml_content:\n target_tag = xml_content.find(target_attr)\n else:\n target_tag = ''\n\n if target_tag is not None:\n if return_xml_element:\n result = target_tag\n else:\n result = target_tag.text\n else:\n result = ''\n\n return result\n\n def _get_more_target(self, target_name, xml_content,\n returned_xml_elements=False):\n \"\"\"Find all target value\n Takes target name and xml content\n Returned list values\n \"\"\"\n target_attr = getattr(self, 'target_' + target_name, None)\n\n if not target_attr:\n raise ParserError(\"No goal %s specified\" % target_name)\n\n if xml_content:\n tag_list = xml_content.findall(target_attr)\n else:\n tag_list = []\n\n if tag_list:\n\n if not returned_xml_elements:\n value_list = [\n tag.text for tag in tag_list\n ]\n else:\n value_list = tag_list\n else:\n value_list = []\n\n return value_list\n\n\nclass EventParse(BaseParserXML):\n target_id = 'id'\n target_type_id = 'type_id'\n # target_is_derived = 'is_derived'\n target_date = 'date'\n target_vacancy_id = 'vacancy_id'\n target_candidate_id = 'candidate_id'\n target_comment = 'comment'\n target_creation_date = 'creation_date'\n target_contact_phones_desc = 'contact_phones_desc'\n\n # target_is_rr_poll = 'is_rr_poll'\n\n def get_id(self, xml_content):\n return self._get_target('id', xml_content)\n\n def get_type_id(self, xml_content):\n return self._get_target('type_id', xml_content)\n\n def get_date(self, xml_content):\n return self._get_target('date', xml_content)\n\n def get_vacancy_id(self, xml_content):\n return self._get_target('vacancy_id', xml_content)\n\n def get_candidate_id(self, xml_content):\n return self._get_target('candidate_id', xml_content)\n\n def get_comment(self, xml_content):\n data_raw = self._get_target('comment', xml_content)\n data = data_raw.replace('\\r\\n', '')\n data = data.replace('\\r', '')\n data = data.replace('\\n', '')\n data = data.replace('\\t', ' ')\n data = re.findall(\n '[^#\\d+][A-Za-zА-ЯЁа-яё.\\d\\-\\_\\:\\s]+',\n data\n )\n data_string = str(' ').join(data)\n data_string = data_string.replace(';', ',')\n return data_string\n\n def get_creation_date(self, xml_content):\n return self._get_target('creation_date', xml_content)\n\n def get_contact_phones_desc(self, xml_content):\n data_raw = self._get_target('contact_phones_desc', xml_content)\n data = data_raw.replace(';', ',')\n return data\n\n\nclass CandidateParser(BaseParserXML):\n target_id = 'id'\n target_code = 'code'\n target_first_name = 'firstname'\n target_last_name = 'lastname'\n target_middle_name = 'middlename'\n target_is_candidate = 'is_candidate'\n target_gender = 'gender_id'\n target_birth = 'birth_date'\n target_age = 'age'\n target_homme_phone = 'home_phone'\n target_phone = 'mobile_phone'\n target_email = 'email'\n target_email_2 = 'email2'\n target_creation_date = 'creation_date'\n target_last_mod_date = 'last_mod_date'\n target_entrance = 'entrance_type_id'\n target_source = 'source_id'\n target_city = 'location_id'\n target_salary = 'salary'\n target_uni_salary = 'uni_salary'\n target_vacancy_id = 'vacancy_id'\n target_main_vacancy_id = 'main_vacancy_id'\n target_main_vac_div = 'main_vacancy_division_id'\n\n def get_id(self, xml_content):\n return self._get_target('id', xml_content)\n\n def get_code(self, xml_content):\n return self._get_target('code', xml_content)\n\n def get_first_name(self, xml_content):\n return self._get_target('first_name', xml_content)\n\n def get_last_name(self, xml_content):\n return self._get_target('last_name', xml_content)\n\n def get_middle_name(self, xml_content):\n return self._get_target('middle_name', xml_content)\n\n def get_is_candidate(self, xml_content):\n return self._get_target('is_candidate', xml_content)\n\n def get_gender(self, xml_content):\n return self._get_target('gender', xml_content)\n\n def get_birth(self, xml_content):\n return self._get_target('birth', xml_content)\n\n def get_age(self, xml_content):\n return self._get_target('age', xml_content)\n\n def get_homme_phone(self, xml_content):\n raw = self._get_target('homme_phone', xml_content)\n data = raw.replace(';', ',')\n return data\n\n def get_phone(self, xml_content):\n raw = self._get_target('phone', xml_content)\n data = raw.replace(';', ',')\n return data\n\n def get_email(self, xml_content):\n return self._get_target('email', xml_content)\n\n def get_email_2(self, xml_content):\n return self._get_target('email_2', xml_content)\n\n def get_creation_date(self, xml_content):\n return self._get_target('creation_date', xml_content)\n\n def get_last_mod_date(self, xml_content):\n return self._get_target('last_mod_date', xml_content)\n\n def get_entrance(self, xml_content):\n return self._get_target('entrance', xml_content)\n\n def get_source(self, xml_content):\n return self._get_target('source', xml_content)\n\n def get_city(self, xml_content):\n return self._get_target('city', xml_content)\n\n def get_salary(self, xml_content):\n return self._get_target('salary', xml_content)\n\n def get_uni_salary(self, xml_content):\n return self._get_target('uni_salary', xml_content)\n\n def get_vacancy_id(self, xml_content):\n return self._get_target('vacancy_id', xml_content)\n\n def get_main_vacancy_id(self, xml_content):\n return self._get_target('main_vacancy_id', xml_content)\n\n def get_main_vac_div(self, xml_content):\n return self._get_target('main_vac_div', xml_content)\n\n\nclass VacancyParser(BaseParserXML):\n target_id = 'id'\n target_name = 'name'\n target_code = 'code'\n target_division = 'division_id'\n target_created = 'start_date'\n target_final_candidate = 'final_candidate_id'\n target_source = 'final_candidate_source_id'\n target_city = 'location_id'\n target_position_name = 'position_name'\n target_comment = 'comment'\n target_records = 'records'\n target_mass_vacancy = 'is_mass_vacancy'\n target_mp_vacancy = 'is_mp_vacancy'\n\n target_record = 'record'\n target_event_date = 'date'\n target_type = 'type_id'\n target_status = 'state_id'\n\n def get_id(self, xml_content):\n return self._get_target('id', xml_content)\n\n def get_name(self, xml_content):\n return self._get_target('name', xml_content)\n\n def get_code(self, xml_content):\n return self._get_target('code', xml_content)\n\n def get_division(self, xml_content):\n return self._get_target('division', xml_content)\n\n def get_created(self, xml_content):\n return self._get_target('created', xml_content)\n\n def get_final_candidate(self, xml_content):\n return self._get_target('final_candidate', xml_content)\n\n def get_source(self, xml_content):\n return self._get_target('source', xml_content)\n\n def get_city(self, xml_content):\n return self._get_target('city', xml_content)\n\n def get_position_name(self, xml_content):\n return self._get_target('position_name', xml_content)\n\n def get_comment(self, xml_content):\n raw = self._get_target('comment', xml_content)\n data = raw.replace('\\r\\n', '')\n data = data.replace('\\r', '')\n data = data.replace('\\n', '')\n data = data.replace('\\t', ' ')\n data = re.findall(\n '[^#\\d+][A-Za-zА-ЯЁа-яё\\.\\d\\-\\_\\:\\s]+',\n data\n )\n data_string = str(' ').join(data)\n data_string = data_string.replace(';', ',')\n return data_string\n\n def get_mass_vacancy(self, xml_content):\n raw = self._get_target('mass_vacancy', xml_content)\n\n if raw == '':\n data = 0\n else:\n data = raw\n\n return data\n\n def get_mp_vacancy(self, xml_content):\n raw = self._get_target('mp_vacancy', xml_content)\n\n if raw == '':\n data = 0\n else:\n data = raw\n\n return data\n\n def get_status(self, xml_content):\n return self._get_target('status', xml_content)\n\n def get_history(self, xml_content):\n tag_records = self._get_target('records', xml_content,\n return_xml_element=True)\n tag_list = self._get_more_target('record', tag_records,\n returned_xml_elements=True)\n status_list = []\n\n for tag in tag_list:\n status_dict = {}\n event_date = self._get_target('event_date', tag)\n type = self._get_target('type', tag)\n status = self._get_target('status', tag)\n\n status_dict.setdefault('date', event_date)\n status_dict.setdefault('type', type)\n status_dict.setdefault('status', status)\n\n status_list.append(status_dict)\n\n return status_list\n\n\nclass VacancyCandidates(BaseParserXML):\n target_vacansie = 'id'\n target_candidate = 'multi_final_candidate_id'\n\n def get_vacancy(self, xml_content):\n return self._get_target('vacansie', xml_content)\n\n def get_candidates(self, xml_content):\n return self._get_more_target('candidate', xml_content)\n\n\nclass DivisionParser(BaseParserXML):\n target_id = 'id'\n target_parent_id = 'parent_id'\n target_created = 'creation_date'\n target_last_update = 'last_mod_date'\n target_code = 'code'\n target_name = 'name'\n target_type = 'type_id'\n target_hce = 'hce'\n target_eid = 'eid'\n\n def get_id(self, xml_content):\n return self._get_target('id', xml_content)\n\n def get_parent_id(self, xml_content):\n return self._get_target('parent_id', xml_content)\n\n def get_created(self, xml_content):\n return self._get_target('created', xml_content)\n\n def get_last_update(self, xml_content):\n return self._get_target('last_update', xml_content)\n\n def get_code(self, xml_content):\n return self._get_target('code', xml_content)\n\n def get_name(self, xml_content):\n return self._get_target('name', xml_content)\n\n def get_type(self, xml_content):\n return self._get_target('type', xml_content)\n\n def get_hce(self, xml_content):\n return self._get_target('hce', xml_content)\n\n def get_eid(self, xml_content):\n return self._get_target('eid', xml_content)\n\n\ndef save_error(error_list):\n date = datetime.datetime.utcnow()\n date_performance = date.strftime('%d_%m_%G')\n name = 'error_log_%s.csv' %(date_performance)\n file = codecs.open(PATH_SAVE_CANDIDATES_ERROR_LOG + '\\\\' +\n name,\n mode='bw', encoding='Windows-1251')\n for error in error_list:\n data_str = '%s\\n' % (error)\n file.write(data_str)\n file.close()\n\n\ndef event_parser(cls, xml_content):\n if not cls:\n raise ParserError(\"Required argument is None\")\n\n if not xml_content:\n raise ParserError(\"XML is None\")\n\n event = {}\n\n event_id = cls.get_id(xml_content)\n event.setdefault('id', event_id)\n\n type_id = cls.get_type_id(xml_content)\n event.setdefault('type_id', type_id)\n\n date = cls.get_date(xml_content)\n event.setdefault('date', date)\n\n vacancy_id = cls.get_vacancy_id(xml_content)\n event.setdefault('vacancy_id', vacancy_id)\n\n candidate_id = cls.get_candidate_id(xml_content)\n event.setdefault('candidate_id', candidate_id)\n\n comment = cls.get_comment(xml_content)\n event.setdefault('comment', comment)\n\n creation_date = cls.get_creation_date(xml_content)\n event.setdefault('creation_date', creation_date)\n\n contact_phones_desc = cls.get_contact_phones_desc(xml_content)\n event.setdefault('contact_phones_desc', contact_phones_desc)\n\n return event\n\n\ndef event_save(event_list):\n date = datetime.datetime.utcnow()\n date_performance = date.strftime('%d_%m_%G')\n name = 'event_result_%s.csv' % (date_performance)\n file = codecs.open(PATH_SAVE_EVENTS + '\\\\' + name,\n mode='bw', encoding='Windows-1251')\n file.write(\"id;type;date;vacancy_id;candidate_id;creation_date;\" +\n \"contact_phones_desc;comment\\n\")\n for event in event_list:\n data_str = \"%s;%s;%s;%s;%s;%s;%s;%s\\n\" % (\n event['id'],\n event['type_id'],\n event['date'],\n event['vacancy_id'],\n event['candidate_id'],\n event['creation_date'],\n event['contact_phones_desc'],\n event['comment']\n )\n file.write(data_str)\n file.close()\n\n\ndef event_scan():\n event_xml_parser = EventParse()\n event_list = []\n for path_dir, dirs, files in os.walk(PATH_EVENTS_SOURCE):\n for file in files:\n file_source = path_dir + '\\\\' + file\n tree = etree.parse(file_source)\n root = tree.getroot()\n event = event_parser(event_xml_parser, root)\n event_list.append(event)\n\n event_save(event_list)\n\n\ndef candidate_parser(cls, xml_content):\n if not cls:\n raise ParserError(\"Required argument is None\")\n\n if not xml_content:\n raise ParserError(\"XML is None\")\n\n candidate = {}\n\n candidate_id = cls.get_id(xml_content)\n candidate.setdefault('id', candidate_id)\n\n candidate_code = cls.get_code(xml_content)\n candidate.setdefault('code', candidate_code)\n\n candidate_first_name = cls.get_first_name(xml_content)\n candidate.setdefault('first_name', candidate_first_name)\n\n candidate_last_name = cls.get_last_name(xml_content)\n candidate.setdefault('last_name', candidate_last_name)\n\n candidate_middle_name = cls.get_middle_name(xml_content)\n candidate.setdefault('middle_name', candidate_middle_name)\n\n candidate_gender = cls.get_gender(xml_content)\n candidate.setdefault('gender', candidate_gender)\n\n candidate_flag = cls.get_is_candidate(xml_content)\n candidate.setdefault('is_candidate', candidate_flag)\n\n candidate_birth = cls.get_birth(xml_content)\n candidate.setdefault('birth', candidate_birth)\n\n candidate_age = cls.get_age(xml_content)\n candidate.setdefault('age', candidate_age)\n\n candidate_homme_phone = cls.get_homme_phone(xml_content)\n candidate.setdefault('homme_phone', candidate_homme_phone)\n\n candidate_phone = cls.get_phone(xml_content)\n candidate.setdefault('phone', candidate_phone)\n\n candidate_email = cls.get_email(xml_content)\n candidate.setdefault('email', candidate_email)\n\n candidate_email_2 = cls.get_email_2(xml_content)\n candidate.setdefault('email_2', candidate_email_2)\n\n candidate_create = cls.get_creation_date(xml_content)\n candidate.setdefault('creation_date', candidate_create)\n\n candidate_last_mod = cls.get_last_mod_date(xml_content)\n candidate.setdefault('last_mod_date', candidate_last_mod)\n\n candidate_entrance = cls.get_entrance(xml_content)\n candidate.setdefault('entrance', candidate_entrance)\n\n candidate_source = cls.get_source(xml_content)\n candidate.setdefault('source', candidate_source)\n\n candidate_city = cls.get_city(xml_content)\n candidate.setdefault('city', candidate_city)\n\n candidate_salary = cls.get_salary(xml_content)\n candidate.setdefault('salary', candidate_salary)\n\n candidate_uni_salary = cls.get_uni_salary(xml_content)\n candidate.setdefault('uni_salary', candidate_uni_salary)\n\n candidate_vacancy = cls.get_vacancy_id(xml_content)\n candidate.setdefault('vacancy_id', candidate_vacancy)\n\n candidate_main_vacancy = cls.get_main_vacancy_id(xml_content)\n candidate.setdefault('main_vacancy_id', candidate_main_vacancy)\n\n candidate_main_vac_div = cls.get_main_vac_div(xml_content)\n candidate.setdefault('main_vac_div', candidate_main_vac_div)\n\n return candidate\n\n\ndef candidate_save(candidate_list):\n date = datetime.datetime.utcnow()\n date_performance = date.strftime('%d_%m_%G')\n name = 'candidate_result_%s.csv' % (date_performance)\n file = codecs.open(PATH_SAVE_CANDIDATES + '\\\\' + name,\n mode='bw', encoding='Windows-1251')\n format_str = \"%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;\"\n format_str += \"%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\\n\"\n file.write(\"id;code;first_name;last_name;middle_name;gender;\" +\n \"is_candidate;birth;age;homme_phone;phone;email;\" +\n \"email_2;creation_date;last_mod_date;entrance;\" +\n \"source;city;salary;uni_salary;vacancy_id;\" +\n \"main_vacancy_id;main_vacancy_division_id\\n\")\n for candidate in candidate_list:\n data_str = format_str % (\n candidate['id'],\n candidate['code'],\n candidate['first_name'],\n candidate['last_name'],\n candidate['middle_name'],\n candidate['gender'],\n candidate['is_candidate'],\n candidate['birth'],\n candidate['age'],\n candidate['homme_phone'],\n candidate['phone'],\n candidate['email'],\n candidate['email_2'],\n candidate['creation_date'],\n candidate['last_mod_date'],\n candidate['entrance'],\n candidate['source'],\n candidate['city'],\n candidate['salary'],\n candidate['uni_salary'],\n candidate['vacancy_id'],\n candidate['main_vacancy_id'],\n candidate['main_vac_div']\n )\n file.write(data_str)\n file.close()\n\n\ndef clear_attachments(file_source):\n file_in = codecs.open(file_source, mode='br')\n data = file_in.read()\n file_in.close()\n reg_exp = b'<attachments>.+</attachments>'\n attachments = re.findall(reg_exp, data)\n try:\n attachments_sub_str = attachments[0]\n except IndexError:\n attachments_sub_str = None\n\n if attachments_sub_str:\n data = data.replace(attachments_sub_str, b'')\n file_out = codecs.open(file_source, mode='bw')\n file_out.write(data)\n file_out.close()\n execution = True\n else:\n execution = False\n\n return execution\n\ndef clear(file_source):\n file_in = codecs.open(file_source, mode='br')\n data = file_in.read()\n file_in.close()\n pattern_1 = b'<attachments>'\n pattern_2 = b'</attachments>'\n first_in = data.find(pattern_1)\n last_in = data.find(pattern_2) + len(pattern_2)\n data_1 = data[:first_in]\n data_2 = data[last_in:]\n\n if data_1 and data_2:\n data = b''.join([data_1, data_2])\n file_out = codecs.open(file_source, mode='bw')\n file_out.write(data)\n file_out.close()\n execution = True\n else:\n execution = False\n\n return execution\n\ndef candidate_scan(stream=0):\n candidate_xml_parser = CandidateParser()\n candidate_list = []\n error_list = []\n for path_dir, dirs, files in os.walk(PATH_CNDIDATES_SOURCE):\n for file in files:\n file_source = PATH_CNDIDATES_SOURCE + '\\\\' + file\n try:\n tree = etree.parse(file_source)\n except etree.ParseError:\n execution = clear(file_source)\n\n if execution:\n tree = etree.parse(file_source)\n else:\n error_list.append(file)\n\n root = tree.getroot()\n candidate = candidate_parser(candidate_xml_parser, root)\n candidate_list.append(candidate)\n\n candidate_save(candidate_list)\n\n if len(error_list) >= 1:\n save_error(error_list)\n\ndef vacancy_parser(cls, xml_content):\n if not cls:\n raise ParserError(\"Required argument is None\")\n\n if not xml_content:\n raise ParserError(\"XML is None\")\n\n id = cls.get_id(xml_content)\n name = cls.get_name(xml_content)\n code = cls.get_code(xml_content)\n division = cls.get_division(xml_content)\n created = cls.get_created(xml_content)\n final_candidate = cls.get_final_candidate(xml_content)\n source = cls.get_source(xml_content)\n city = cls.get_city(xml_content)\n position_name = cls.get_position_name(xml_content)\n comment = cls.get_comment(xml_content)\n is_mass_vacancy = cls.get_mass_vacancy(xml_content)\n is_mp_vacancy = cls.get_mp_vacancy(xml_content)\n\n history = cls.get_history(xml_content)\n\n if len(history) < 1:\n # print(\"Allert! vacancy_id = %s\" %(id))\n status = cls.get_status(xml_content)\n create_history = {}\n create_history.setdefault('date', created)\n create_history.setdefault('status', status)\n create_history.setdefault('type', '')\n history.append(create_history)\n\n for item in history:\n item.setdefault('id', id)\n item.setdefault('name', name)\n item.setdefault('code', code)\n item.setdefault('division', division)\n item.setdefault('created', created)\n item.setdefault('final_candidate', final_candidate)\n item.setdefault('source', source)\n item.setdefault('city', city)\n item.setdefault('position_name', position_name)\n item.setdefault('comment', comment)\n item.setdefault('mass_vacancy', is_mass_vacancy)\n item.setdefault('mp_vacancy', is_mp_vacancy)\n\n return history\n\n\ndef vacancy_save(vacancy_list):\n date = datetime.datetime.utcnow()\n date_performance = date.strftime('%d_%m_%G')\n name = 'vacancy_result_%s.csv' % (date_performance)\n file = codecs.open(PATH_VACANCY_SAVE + '\\\\' + name,\n mode='bw', encoding='Windows-1251')\n file.write(\"id;name;code;division;created;final_candidate;\" +\n \"source;city;position_name;status;date;type;\" +\n \"comment;mass_vacancy;mp_vacancy\\n\")\n for vacancy in vacancy_list:\n data_str = \"%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s\\n\" % (\n vacancy['id'],\n vacancy['name'],\n vacancy['code'],\n vacancy['division'],\n vacancy['created'],\n vacancy['final_candidate'],\n vacancy['source'],\n vacancy['city'],\n vacancy['position_name'],\n vacancy['status'],\n vacancy['date'],\n vacancy['type'],\n vacancy['comment'],\n vacancy['mass_vacancy'],\n vacancy['mp_vacancy']\n )\n # data = data_str.encode('Windows-1251')\n file.write(data_str)\n file.close()\n\n\ndef vacancy_scan():\n vacancy_xml_parser = VacancyParser()\n vacancy_candidate_xmal_parser = VacancyCandidates()\n vacancy_list = []\n vacancy_candidates_list = []\n for path_dir, dirs, files in os.walk(PATH_VACANCY_SOURCE):\n for file in files:\n file_source = path_dir + '\\\\' + file\n tree = etree.parse(file_source)\n root = tree.getroot()\n vacancy = vacancy_parser(vacancy_xml_parser, root)\n vacancy_candidates = vacancy_candidates_parser(\n vacancy_candidate_xmal_parser,\n root\n )\n vacancy_list += vacancy\n vacancy_candidates_list += vacancy_candidates\n\n vacancy_save(vacancy_list)\n vacancy_candidates_save(vacancy_candidates_list)\n\n\ndef vacancy_candidates_parser(cls, xml_content):\n if not cls:\n raise ParserError(\"Required argument is None\")\n\n if not xml_content:\n raise ParserError(\"XML is None\")\n\n vacabcy_candidates_list = []\n vacancy = cls.get_vacancy(xml_content)\n candidates_list = cls.get_candidates(xml_content)\n for candidate in candidates_list:\n vacancy_candidates = {}\n vacancy_candidates.setdefault('vacancy', vacancy)\n vacancy_candidates.setdefault('candidate', candidate)\n vacabcy_candidates_list.append(vacancy_candidates)\n return vacabcy_candidates_list\n\n\ndef vacancy_candidates_save(vacancy_candidates_list):\n date = datetime.datetime.utcnow()\n date_performance = date.strftime('%d_%m_%G')\n name = 'vacancy_candidates_result_%s.csv' % (date_performance)\n file = codecs.open(PATH_VACANCY_CAND_SAVE + '\\\\' + name,\n mode='bw', encoding='Windows-1251')\n file.write(\"vacancy;candidate\\n\")\n for vacancy in vacancy_candidates_list:\n data_str = \"%s;%s\\n\" % (vacancy['vacancy'], vacancy['candidate'])\n # data = data_str.encode('Windows-1251')\n file.write(data_str)\n file.close()\n\n\ndef division_parser(cls, xml_content):\n if not cls:\n raise ParserError(\"Required argument is None\")\n\n if not xml_content:\n raise ParserError(\"XML is None\")\n\n division = {}\n\n id = cls.get_id(xml_content)\n division.setdefault('id', id)\n\n parent_id = cls.get_parent_id(xml_content)\n division.setdefault('parent_id', parent_id)\n\n created = cls.get_created(xml_content)\n division.setdefault('created', created)\n\n last_update = cls.get_last_update(xml_content)\n division.setdefault('last_update', last_update)\n\n code = cls.get_code(xml_content)\n division.setdefault('code', code)\n\n name = cls.get_name(xml_content)\n division.setdefault('name', name)\n\n type = cls.get_type(xml_content)\n division.setdefault('type', type)\n\n hce = cls.get_hce(xml_content)\n division.setdefault('hce', hce)\n\n eid = cls.get_eid(xml_content)\n division.setdefault('eid', eid)\n\n return division\n\n\ndef division_save(division_list):\n date = datetime.datetime.utcnow()\n date_performance = date.strftime('%d_%m_%G')\n name = 'division_result_%s.csv' % (date_performance)\n file = codecs.open(PATH_DIVIVISION_SAVE + '\\\\' + name,\n mode='bw',\n encoding='Windows-1251')\n file.write(\"id;parent_id;created;last_update;code;name;type;\" +\n \"hce;eid\\n\")\n for division in division_list:\n data_str = \"%s;%s;%s;%s;%s;%s;%s;%s;%s\\n\" % (\n division['id'],\n division['parent_id'],\n division['created'],\n division['last_update'],\n division['code'],\n division['name'],\n division['type'],\n division['hce'],\n division['eid']\n )\n file.write(data_str)\n file.close()\n\n\ndef division_scan():\n division_xml_parser = DivisionParser()\n division_list = []\n for path_dir, dirs, files in os.walk(PATH_DIVISION_SOURCE):\n for file in files:\n file_source = path_dir + '\\\\' + file\n try:\n tree = etree.parse(file_source)\n except etree.ParseError:\n continue\n root = tree.getroot()\n division = division_parser(division_xml_parser, root)\n division_list.append(division)\n\n division_save(division_list)\n\n\ndef correction_files():\n error_log = codecs.open(PATH_SAVE_CANDIDATES + '\\\\' +\n 'error_log.csv',\n mode='br', encoding='Windows-1251')\n data_error_log = error_log.read()\n errors = data_error_log.split('\\n')\n for error_file in errors:\n # execution = clear(PATH_CNDIDATES_SOURCE + '\\\\' +\n # error_file)\n execution = add_heder_xml(PATH_SAVE_CANDIDATES_ERROR_LOG + '\\\\' +\n error_file)\n print(error_file, '-', execution)\n\n\ndef add_heder_xml(file_source):\n file_open = True\n try:\n file_in = open(file_source, 'br')\n except FileNotFoundError:\n file_open = False\n\n if file_open:\n data_in = file_in.read()\n file_in.close()\n header_str = b'<?xml version=\"1.0\" encoding=\"windows-1251\"?>\\n'\n have_header = data_in.find(header_str)\n if have_header == 0:\n file_out = open(file_source, 'bw')\n file_out.write(header_str + data_in)\n file_out.close()\n return True\n\n\nif __name__ == '__main__':\n sys_args_parse = argparse.ArgumentParser(\n description=('This utility parsing xml-files from ' +\n 'e-staff and convert to csv-files')\n )\n sys_args_parse.add_argument(\n '-usp', '--using_system_preference',\n help='Utility using path define in system preference',\n default=False,\n action='store_true'\n )\n sys_args_parse.add_argument(\n '-ssp', '--show_system_preference',\n help='Utility show path define in system preference',\n default=False,\n action='store_true'\n )\n sys_args_parse.add_argument(\n '-all', '--all',\n help='Utility parsing all categorie source files',\n default=False,\n action='store_true'\n )\n sys_args_parse.add_argument(\n '-can', '--candidates',\n help='Utility only parsing canditate files',\n default=False,\n action='store_true'\n )\n sys_args_parse.add_argument(\n '-e', '--event',\n help='Utility only parsing event files',\n default=False,\n action='store_true'\n )\n sys_args_parse.add_argument(\n '-v', '--vacancy',\n help='Utility only parsing vacancy files',\n default=False,\n action='store_true'\n )\n sys_args_parse.add_argument(\n '-d', '--division',\n help='Utility only parsing division files',\n default=False,\n action='store_true'\n )\n sys_args_parse.add_argument(\n '-cor', '--correct',\n help='Utility only correct source files',\n default=False,\n action='store_true'\n )\n cmd_arg = sys_args_parse.parse_args(sys.argv[1:])\n\n if cmd_arg.using_system_preference:\n set_files_path()\n\n if cmd_arg.show_system_preference:\n get_files_path()\n\n if cmd_arg.all:\n thread_1 = threading.Thread(target=vacancy_scan)\n thread_2 = threading.Thread(target=event_scan)\n thread_3 = threading.Thread(target=candidate_scan)\n ##thread_4 = threading.Thread(target=division_scan)\n\n thread_1.daemon = True\n thread_2.daemon = True\n thread_3.daemon = True\n ##thread_4.daemon = True\n\n thread_1.start()\n thread_2.start()\n thread_3.start()\n ##thread_4.start()\n\n ##correction_files()\n while True:\n if not (thread_1.isAlive() or thread_2.isAlive() or\n thread_3.isAlive()):\n break\n\n if cmd_arg.candidates and not cmd_arg.all:\n candidate_scan()\n if cmd_arg.event and not cmd_arg.all:\n event_scan()\n if cmd_arg.vacancy and not cmd_arg.all:\n vacancy_scan()\n if cmd_arg.division:\n division_scan()\n if cmd_arg.correct:\n correction_files()\n" }, { "alpha_fraction": 0.5150054693222046, "alphanum_fraction": 0.51995849609375, "avg_line_length": 36.60091781616211, "blob_id": "27672c3df9314c347a876b4672b0fc6588b65a02", "content_id": "6bdd1b7c1a1381a4ce7fe8f8bb5af5613f811fe6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81970, "license_type": "no_license", "max_line_length": 513, "num_lines": 2180, "path": "/parserHH/parser_classes.py", "repo_name": "FORSEN-ROCK/other", "src_encoding": "UTF-8", "text": "import re\nimport time\nimport json\nimport datetime\nimport urllib.request as urllib\nimport requests\n\nfrom requests import HTTPError\nfrom urllib.error import HTTPError, URLError\nfrom random import random\nfrom bs4 import BeautifulSoup\nfrom bs4 import NavigableString\n\n\nclass BaseException(Exception):\n def __init__(self, message):\n self.message = message\n\n\nclass ParserError(BaseException):\n pass\n\n\nclass SearchError(BaseException):\n pass\n\n\nclass EndParserError(BaseException):\n pass\n\n\nclass ExpressionError(BaseException):\n pass\n\n\nclass Expression:\n def __init__(self, **kwargs):\n self.tag = kwargs.get('tag')\n self.attribute = kwargs.get('attribute')\n self.value = kwargs.get('value')\n\n def __str__(self):\n format_str = '<tag = %s, attribute = %s, value = %s>' %(\n self.tag, self.attribute, self.value)\n return format_str\n\n\n# Parser from search\nclass BaseParserSearchHTML(object):\n root_url = None\n\n container_title_resume = None\n container_url = None\n container_last_update = None\n container_body = None\n\n target_title_resume = Expression()\n target_url = Expression()\n target_last_update = Expression()\n target_body = Expression()\n\n def _get_container(self, container_name, html):\n \"\"\"Base container getter.\n Takes name of container element and html tree node\n \"\"\"\n #html_tree = BeautifulSoup(html, 'html.parser')\n container_attr = getattr(self, 'container_' + container_name, None)\n if container_attr:\n container_html = html.find(\n container_attr.tag,\n {container_attr.attribute: container_attr.value}\n )\n else:\n container_html = html\n return container_html\n\n def _get_more_target(self, target_name, html):\n \"\"\"Base targets getter.\n Takes name of target element\n Returned HTML nodes\n \"\"\"\n target_attr = getattr(self, 'target_' + target_name, None)\n \n if not target_attr:\n raise ExpressionError(\"No goal %s specified\" % target_name)\n \n if html: \n elements = html.findAll(\n target_attr.tag,\n {target_attr.attribute: target_attr.value}\n )\n else:\n elements = None\n \n return elements \n\n def get_container_title_resume(self, html):\n return self._get_container('title_resume', html)\n\n def get_container_url(self, html):\n return self._get_container('url', html)\n\n def get_container_last_update(self, html):\n return self._get_container('last_update', html)\n\n def get_container_body(self, html):\n html_tree = BeautifulSoup(html, 'html.parser')\n return self._get_container('body', html_tree)\n\n def _get_target(self, target_name, html, return_html_node=False):\n \"\"\"Base target getter.\n Takes name of target element and html tree node\n \"\"\"\n target_attr = getattr(self, 'target_' + target_name, None)\n\n if not target_attr:\n raise ExpressionError(\"No goal %s specified\" % target_name)\n \n if html: \n element = html.find(\n target_attr.tag,\n {target_attr.attribute: target_attr.value}\n )\n else:\n element = None\n\n if element:\n if not return_html_node:\n result = element.get_text()\n else:\n result = element\n else:\n result = None\n\n return result\n\n def get_body(self, html):\n return self._get_more_target('body', html)\n\n def get_title_resume(self, html):\n return self._get_target('title_resume', html)\n\n def get_url(self, html):\n element_html = self._get_target('url', html, return_html_node=True)\n root = self.root_url\n \n if element_html:\n if root:\n url = root + element_html['href']\n else:\n url = element_html['href']\n else:\n url = None\n return url\n\n def get_last_update(self, html):\n return self._get_target('last_update', html)\n\n\n# Parser for resume\nclass BaseParserResumeHTML(object):\n find_cache = None##\n\n container_head = None\n container_gender = None\n container_phone = None\n container_email = None\n container_city = None\n container_metro_station = None\n container_salary = None##\n container_age = None##\n container_lentgh_of_work = None##\n container_experience = None##\n container_degree_of_education = None##\n container_education = None\n container_full_name = None\n container_key_words = None\n\n target_gender = Expression()\n target_phone = Expression()\n target_email = Expression()\n target_city = Expression()\n target_metro_station = Expression()\n target_salary = Expression()##\n target_age = Expression()##\n target_length_of_work = Expression()##\n target_experience = Expression()##\n target_experience_period = Expression()##\n target_experience_text = Expression()##\n target_last_position = Expression()##\n target_organization_name = Expression()##\n target_degree_of_education = Expression()##\n target_education = Expression()##\n target_education_year = Expression()##\n target_education_name = Expression()##\n target_education_profession = Expression()##\n target_first_name = Expression()\n target_last_name = Expression()\n target_middle_name = Expression()\n target_key_words = Expression()\n\n def _get_container(self, container_name, html):\n \"\"\"Base container getter.\n Takes name of container element and html tree node\n \"\"\"\n html_tree = BeautifulSoup(html, 'html.parser')\n container_attr = getattr(self, 'container_' + container_name, None)\n if container_attr:\n container_html = html_tree.find(\n container_attr.tag,\n {container_attr.attribute: container_attr.value}\n )\n else:\n container_html = html_tree\n return container_html\n \n def _get_target(self, target_name, html, return_html_node=False):\n \"\"\"Base target getter.\n Takes name of target element and html tree node\n \"\"\"\n target_attr = getattr(self, 'target_' + target_name, None)\n\n if not target_attr:\n raise ExpressionError(\"No goal %s specified\" % target_name)\n \n if html: \n element = html.find(\n target_attr.tag,\n {target_attr.attribute: target_attr.value}\n )\n else:\n element = None\n\n if element:\n if not return_html_node:\n result = element.get_text()\n else:\n result = element\n else:\n result = None\n\n return result\n \n def _get_more_target(self, target_name, html):\n \"\"\"Base targets getter.\n Takes name of target element\n Returned HTML nodes\n \"\"\"\n target_attr = getattr(self, 'target_' + target_name, None)\n\n if not target_attr:\n raise ExpressionError(\"No goal %s specified\" % target_name)\n\n if html: \n elements = html.findAll(\n target_attr.tag,\n {target_attr.attribute: target_attr.value}\n )\n else:\n elements = None\n\n return elements\n\n def _get_children_elements(self, html_element):\n \"\"\"Base method for get children element html_element\n Takes parent HTML - node\n Returned HTML - list children elements \n \"\"\"\n child_list = [\n child_item for child_item in html_element.children if\n child_item is not NavigableString\n ]\n return child_list\n\n def _get_position_element(self, target_name, html, position=0, \n hash_flag=False):\n \"\"\"Base method get element from a given position\n For the case of repeated repetition\n Takes HTML, target_name and Position\n Returned HTML - node\n \"\"\"\n if not self.find_hash:\n elements_list = self._get_more_target(target_name, html)\n\n if hash_flag:\n self.find_hash = elements_list\n\n else:\n elements_list = self.find_hash\n\n try:\n element = elements_list[position]\n except IndexError:\n element = None\n return element\n\n def _get_table_value(self, html_table, row=0, column=0):\n \"\"\"Base method get target position (row, column) for table\n Takes HTML - Table\n Returned target element from a given position\n \"\"\"\n row_elements = self._get_children_elements(html_table)\n column_elements = [\n self._get_children_elements(item) for item in\n row_elements\n ]\n \n try:\n column_element = column_elements[row][column]\n except IndexError:\n column_element = None\n\n if column_element:\n value = column_element.get_text()\n else:\n value = None\n\n return value\n\n def _get_list_value(self, html_list, siquence_numder=0):\n \"\"\"Base method get value for HTML - list\n Takes HTML - list\n Returned item-list value\n \"\"\"\n item_list = self._get_children_elements(html_list)\n try:\n item_list_tag = item_list[siquence_numder]\n except IndexError:\n item_list_tag = None\n \n if item_list_tag:\n value = item_list_tag.get_text()\n else:\n value = None\n \n return value\n\n def _get_name_part(self, html, target_name, name_index):\n full_name = self._get_target(target_name, html)\n \n if full_name:\n try:\n name = full_name.split(' ')[name_index]\n except IndexError:\n name = None\n else:\n name = None\n\n return name\n\n def get_container_degree_of_education(self, html):\n return self._get_container('degree_of_education', html)\n\n def get_container_salary(self, html):\n return self._get_container('salary', html)\n\n def get_container_age(self, html):\n return self._get_container('age', html)\n\n def get_container_length_of_work(self, html):\n return self._get_container('length_of_work', html)\n\n def get_container_gender(self, html):\n return self._get_container('gender', html)\n\n def get_container_phone(self, html):\n return self._get_container('phone', html)\n\n def get_container_email(self, html):\n return self._get_container('email', html)\n\n def get_container_city(self, html):\n return self._get_container('city', html)\n\n def get_container_metro_station(self, html):\n return self._get_container('metro_station', html)\n\n def get_container_education(self, html):\n return self._get_container('education', html)\n\n def get_container_experience(self, html):\n return self._get_container('experience', html)\n\n def get_container_full_name(self, html):\n return self._get_container('full_name', html)\n\n def get_container_key_words(self, html):\n return self._get_container('key_words', html)\n\n def get_salary(self, html):\n list_numder = self._get_target('salary', html)\n if list_numder:\n salary_list = re.findall(r'\\d+', list_numder)\n salary_str = str().join(salary_list)\n salary = salary_str\n else:\n salary = None\n return salary\n\n def get_age(self, html):\n list_numder = self._get_target('age', html)\n if list_numder:\n age_list = re.findall(r'\\d{2}', list_numder)\n age_str = str().join(age_list)\n age = age_str\n else:\n age = None\n return age\n\n def get_gender(self, html):\n return self._get_target('gender', html)\n\n def get_phone(self, html):\n phone = self._get_target('phone', html)\n if phone: \n list_number = re.findall(r'\\d{0,11}', phone)\n phone_number = str().join(list_number)\n number = '8' + phone_number[1:11]\n else:\n number = None\n return number\n\n def get_email(self, html):\n return self._get_target('email', html)\n\n def get_city(self, html):\n return self._get_target('city', html)\n\n def get_metro_station(self, html):\n return self._get_target('metro_station', html)\n\n def get_education(self, html):\n education_bloks = self._get_more_target('education', html)\n education_list = []\n \n if education_bloks:\n for item in education_bloks:\n education_item = {\n target_name: self._get_target(target_name, item) for\n target_name in ('education_year', 'education_name',\n 'education_profession')\n }\n education_list.append(education_item)\n\n return education_list\n\n def get_degree_of_education(self, html):\n return self._get_target('degree_of_education', html)\n\n def get_length_of_work(self, html):\n return self._get_target('length_of_work', html)\n\n def get_experience(self, html):\n \"\"\"Method for get all experience data \n \"\"\"\n experience_bloks = self._get_more_target('experience', html)\n experience_list = []\n \n if experience_bloks:\n for item in experience_bloks:\n experience_item = {\n target_name: self._get_target(target_name, item) for\n target_name in ('experience_text', 'last_position',\n 'experience_period', 'organization_name')##\n }\n experience_list.append(experience_item)\n\n return experience_list\n\n def get_firts_name(self, html):\n return self._get_name_part(html, 'first_name', 1)\n\n def get_last_name(self, html):\n return self._get_name_part(html, 'last_name', 0)\n\n def get_middle_name(self, html):\n return self._get_name_part(html, 'middle_name', 2)\n\n def get_key_words(self, html): \n elements = self._get_more_target('key_words', html)\n key_words_list = []\n \n if elements:\n for item_element in elements:\n key_word = item_element.get_text()\n key_words_list.append(key_word)\n \n return key_words_list\n\n\nclass BaseParserSearchAPI(object):\n container_error = None\n container_body = None\n container_title_resume = None\n container_salary = None\n container_age = None\n container_experience = None\n container_last_position = None\n container_organization_name = None\n container_url = None\n container_last_update = None\n\n target_error = None\n target_body = None\n target_title_resume = None\n target_salary = None\n target_age = None\n target_experience = None\n target_last_position = None\n target_organization_name = None\n target_url = None\n target_last_update = None\n\n def get_container_error(self, json_contant):\n json_data = json.loads(json_contant)\n if self.container_error is not None:\n container = json_data[self.container_error]\n else:\n container = json_data\n return container\n\n def get_container_body(self, json_contant):\n json_data = json.loads(json_contant)\n if self.container_body is not None:\n container = json_data[self.container_body]\n else:\n container = json_data\n return container\n\n def get_container_title_resume(self, json_contant):\n if self.container_title_resume is not None:\n container = json_contant[self.container_title_resume]\n else:\n container = json_contant\n return container\n\n def get_container_salary(self, json_contant):\n if self.container_salary is not None:\n container = json_contant[self.container_salary]\n else:\n container = json_contant\n return container\n\n def get_container_age(self, json_contant):\n if self.container_age is not None:\n container = json_contant[self.container_age]\n else:\n container = json_contant\n return container\n\n def get_container_experience(self, json_contant):\n if self.container_experience is not None:\n container = json_contant[self.container_experience]\n else:\n container = json_contant\n return container\n\n def get_container_last_position(self, json_contant):\n if self.container_last_position is not None:\n container = json_contant[self.container_last_position]\n else:\n container = json_contant\n return container\n\n def get_container_organization_name(self, json_contant):\n if self.container_organization_name is not None:\n container = json_contant[self.container_organization_name]\n else:\n container = json_contant\n return container\n\n def get_container_url(self, json_contant):\n if self.container_url is not None:\n container = json_contant[self.container_url]\n else:\n container = json_contant\n return container\n\n def get_container_last_update(self, json_contant):\n if self.container_last_update is not None:\n container = json_contant[self.container_last_update]\n else:\n container = json_contant\n return container\n\n def get_error(self, json_contant):\n if self.target_error is not None:\n error = json_contant[self.target_error]\n else:\n error = None\n return error\n\n def get_body(self, json_contant):\n body = json_contant[self.target_body]\n return body\n\n def get_title_resume(self, json_contant):\n title_resume = json_contant[self.target_title_resume]\n return title_resume\n\n def get_salary(self, json_contant):\n salary = json_contant[self.target_salary]\n return salary\n\n def get_age(self, json_contant):\n age = json_contant[self.target_age]\n return age\n\n def get_experience(self, json_contant):\n experience = json_contant[self.target_experience]\n return experience\n\n def get_last_position(self, json_contant):\n last_position = json_contant[self.target_last_position]\n return last_position\n\n def get_organization_name(self, json_contant):\n organization_name = json_contant[self.target_organization_name]\n return organization_name\n\n def get_url(self, json_contant):\n url = json_contant[self.target_url]\n return url\n\n def get_last_update(self, json_contant):\n last_update = json_contant[self.target_last_update]\n return last_update\n\n\n#class BaseParserResumeAPI(object):\ndef parser_search(cls=None, html=None):\n if (cls is None) or (html is None):\n raise ParserError(\"Missing parsing class or html body\")\n\n body_html = cls.get_container_body(html)\n bodys = cls.get_body(body_html)\n\n if not bodys:\n raise ParserError(\"End sequence\")\n\n resumes = []\n for body_item in bodys:\n resume = {}\n title_resume_html = cls.get_container_title_resume(body_item)\n title_resume = cls.get_title_resume(title_resume_html)\n resume.setdefault('title_resume', title_resume)\n url_html = cls.get_container_url(body_item)\n url = cls.get_url(url_html)\n resume.setdefault('url', url)\n last_update_html = cls.get_container_last_update(body_item)\n last_update = cls.get_last_update(last_update_html)\n resume.setdefault('last_update', last_update)\n resumes.append(resume)\n return resumes\n\ndef parser_resume(cls=None, html=None):\n if (cls is None) or (html is None):\n raise ParserError(\"Missing parsing class or html body\")\n\n resume_data = {}\n\n salary_html = cls.get_container_salary(html)\n salary = cls.get_salary(salary_html)\n resume_data.setdefault('salary', salary)\n\n age_html = cls.get_container_age(html)\n age = cls.get_age(age_html)\n resume_data.setdefault('age', age)\n\n gender_html = cls.get_container_gender(html)\n gender = cls.get_gender(gender_html)\n resume_data.setdefault('gender', gender)\n\n city_html = cls.get_container_city(html)\n city = cls.get_city(city_html)\n resume_data.setdefault('city', city)\n\n metro_station_html = cls.get_container_metro_station(html)\n metro_station = cls.get_metro_station(metro_station_html)\n resume_data.setdefault('metro_station', metro_station)\n\n phone_html = cls.get_container_phone(html)\n try:\n phone = cls.get_phone(phone_html)\n except IndexError:\n phone = None\n except ExpressionError:\n phone = None\n resume_data.setdefault('phone', phone)\n\n email_html = cls.get_container_email(html)\n try:\n email = cls.get_email(email_html)\n except ExpressionError:\n email = None\n resume_data.setdefault('email', email)\n\n degree_of_education_html = cls.get_container_degree_of_education(html)\n degree_of_education = cls.get_degree_of_education(\n degree_of_education_html\n )\n resume_data.setdefault('degree_of_education', degree_of_education)\n\n education_html = cls.get_container_education(html)\n education = cls.get_education(education_html)\n resume_data.setdefault('education', education)\n\n lentgh_of_work_html = cls.get_container_length_of_work(html)\n lentgh_of_work = cls.get_length_of_work(lentgh_of_work_html)\n resume_data.setdefault('lentgh_of_work', lentgh_of_work)\n \n experience_html = cls.get_container_experience(html)\n experience = cls.get_experience(experience_html)\n resume_data.setdefault('experience', experience)\n\n full_name_html = cls.get_container_full_name(html)\n\n try:\n first_name = cls.get_firts_name(full_name_html)\n except ExpressionError:\n first_name = None\n resume_data.setdefault('first_name', first_name)\n\n try:\n last_name = cls.get_last_name(full_name_html)\n except ExpressionError:\n last_name = None\n resume_data.setdefault('last_name', last_name)\n\n try:\n middle_name = cls.get_middle_name(full_name_html)\n except ExpressionError:\n middle_name = None\n resume_data.setdefault('middle_name', middle_name)\n\n key_words_html = cls.get_container_key_words(html)\n key_words = cls.get_key_words(key_words_html)\n resume_data.setdefault('key_words', key_words)\n\n return resume_data\n\n\n# Custom classes for recrut site.\nclass HhParserSearch(BaseParserSearchHTML):\n root_url = 'https://hh.ru'\n\n container_body = Expression(tag='table',\n attribute='data-qa',\n value='resume-serp__results-search')\n\n target_title_resume = Expression(tag='a',\n attribute='itemprop',\n value='jobTitle')\n target_url = Expression(tag='a',\n attribute='itemprop',\n value='jobTitle')\n target_last_update = Expression(tag='span',\n attribute='class',\n value='output__tab m-output__date')\n target_body = Expression(tag='tr',\n attribute='itemscope',\n value='itemscope')\n\n\nclass HhParserResume(BaseParserResumeHTML):\n container_education = Expression(tag='div',\n attribute='data-qa',\n value='resume-block-education')\n container_degree_of_education = Expression(tag='div',\n attribute='data-qa',\n value='resume-block-education')\n container_experience = Expression(tag='div',\n attribute='data-qa',\n value='resume-block-experience')\n container_lentgh_of_work = Expression(tag='div',\n attribute='data-qa',\n value='resume-block-experience')\n container_full_name = Expression(tag='div',\n attribute='class',\n value='resume-header-name')\n container_gender = Expression(tag='div',\n attribute='class',\n value='resume-header-block')\n container_phone = Expression(tag='div',\n attribute='itemprop',\n value='contactPoints')\n container_email = Expression(tag='div',\n attribute='itemprop',\n value='contactPoints')\n container_city = Expression(tag='span',\n attribute='itemprop',\n value='address')\n container_metro_station = Expression(tag='span',\n attribute='itemprop',\n value='address')\n\n target_gender = Expression(tag='span',\n attribute='itemprop',\n value='gender')\n target_phone = Expression(tag='span',\n attribute='itemprop',\n value='telephone')\n target_email = Expression(tag='a',\n attribute='itemprop',\n value='email')\n target_city = Expression(tag='span',\n attribute='itemprop',\n value='addressLocality')\n target_metro_station = Expression(tag='span',\n attribute='data-qa',\n value='resume-personal-metro')\n target_salary = Expression(tag='span',\n attribute='class',\n value='resume-block__salary')\n target_age = Expression(tag='span',\n attribute='data-qa',\n value='resume-personal-age')\n target_length_of_work = Expression(\n tag='span',\n attribute='class',\n value='resume-block__title-text resume-block__title-text_sub'\n )\n target_degree_of_education = Expression(\n tag='span',\n attribute='class',\n value='resume-block__title-text resume-block__title-text_sub'\n )\n target_education = Expression(tag='div',\n attribute='class',\n value='resume-block-item-gap')\n target_education_year = Expression(\n tag='div',\n attribute='class',\n value='bloko-column bloko-column_s-2 bloko-column_m-2 bloko-column_l-2'\n )\n target_education_name = Expression(tag='div',\n attribute='data-qa',\n value='resume-block-education-name')\n target_education_profession = Expression(\n tag='div',\n attribute='data-qa',\n value='resume-block-education-organization'\n ) \n target_experience = Expression(tag='div',\n attribute='class',\n value='resume-block-item-gap')\n target_experience_period = Expression(\n tag='div',\n attribute='class',\n value='resume-block__experience-timeinterval'\n )\n target_experience_text = Expression(\n tag='div',\n attribute='data-qa',\n value='resume-block-experience-description'\n )\n target_last_position = Expression(\n tag='div',\n attribute='data-qa',\n value='resume-block-experience-position'\n )\n target_organization_name = Expression(tag='div',\n attribute='itemprop',\n value='name')\n target_first_name = target_middle_name = target_last_name = Expression(\n tag='h1', attribute='itemprop', value='name')\n target_key_words = Expression(\n tag='span',\n attribute='class',\n value='bloko-tag bloko-tag_inline bloko-tag_countable Bloko-TagList-Tag'\n )\n\n\nclass ZarplataParserSearch(BaseParserSearchAPI):\n container_experience = 'work_time_total'\n container_last_position = 'jobs'\n container_organization_name = 'jobs'\n\n target_body = 'resumes'\n target_title_resume = 'header'\n target_salary = 'wanted_salary_rub'\n target_age = 'age'\n target_experience = 'year'\n target_last_position = 'title'\n target_organization_name = 'title'\n target_url = 'url'\n target_last_update = 'mod_date'\n\n def get_last_position(self, json_contant):\n container = json_contant[0]['position']\n last_position = container[self.target_last_position]\n return last_position\n\n def get_organization_name(self, json_contant):\n container = json_contant[0]['company']\n organization_name = container[self.target_organization_name]\n return organization_name\n\n def get_url(self, json_contant):\n local_url = json_contant[self.target_url]\n url = 'https://www.zarplata.ru' + local_url\n return url\n\n\nclass SuperjobParserSearch(BaseParserSearchHTML):\n container_last_update = Expression(\n tag='div',\n attribute='class',\n value='sj_block m_b_2 ResumeListElementNew_history'\n ) \n\n target_title_resume = Expression(tag='a',\n attribute='target',\n value='_blank')\n target_url = Expression(tag='a',\n attribute='target',\n value='_blank')\n target_last_update = Expression(tag='span',\n attribute='class',\n value='sj_text m_small')\n target_body = Expression(tag='div',\n attribute='class',\n value='ResumeListElementNew js-resume-item')\n '''\n def get_salary(self, html):\n if html:\n list_elements = html.findAll(\n self.target_salary.tag,\n {self.target_salary.attribute, sself.target_salary}\n )\n if list_elements:\n try:\n element_salary = list_elements[0]\n salary_inner = element_salary.get_text()\n salary_list = re.findall(r'\\d+', salary_inner)\n salary_str = str().join(salary_list)\n salary = salary_str\n except IndexError:\n salary = None\n else:\n salary = None\n\n return salary\n '''\n\n def get_age(self, html):\n if html:\n list_elements = html.findAll(\n self.target_age.tag,\n {self.target_age.attribute, self.target_age.value}\n )\n if list_elements:\n try:\n element_age = list_elements[1]\n age_inner = element_age.get_text()\n age_list = re.findall(r'\\d{2}', age_inner)\n age_str = str().join(age_list)\n age = age_str\n except IndexError:\n age = None\n else:\n age = None\n \n return age\n\n def get_experience(self, html):\n if html:\n list_elements = html.findAll(\n self.target_experience.tag,\n {self.target_experience.attribute, self.target_experience.value}\n )\n if list_elements:\n try:\n element_experience = list_elements[-1]\n experience = element_experience.get_text()\n except IndexError:\n experience = None\n else:\n experience = None\n return experience\n\n def get_last_position(self, html):\n raw_data = self._get_target('last_position', html)\n if raw_data:\n list_char = re.findall(\n \"[^<span class='sj_match_highlight'>][^</span>]\",\n raw_data\n )\n last_position = str().join(list_char)\n else:\n last_position = None\n \n return last_position\n\n\nclass SuperjobParserResume(BaseParserResumeHTML):\n container_salary = Expression(tea='div',\n attribute='class',\n value='ResumeMainHRNew_content')\n container_age = Expression(tea='div',\n attribute='class',\n value='ResumeMainHRNew_content')\n container_degree_of_education = Expression(tea='div',\n attribute='class',\n value='ResumeMainHRNew_content')\n container_lentgh_of_work = Expression(tag='div',\n attribute='class',\n value='sj_block m_b_2 sj_h3')\n container_gender = Expression(tea='div',\n attribute='class',\n value='ResumeMainHRNew_content')\n container_phone = Expression(tea='div',\n attribute='class',\n value='ResumeMainHRNew_content')\n container_email = Expression(tea='div',\n attribute='class',\n value='ResumeMainHRNew_content')\n container_city = Expression(tea='div',\n attribute='class',\n value='ResumeMainHRNew_content')\n container_metro_station = Expression(\n tea='div',\n attribute='class',\n value='ResumeMainHRNew_content'\n )\n container_education = Expression(tea='div',\n attribute='class',\n value='ResumeMainHRNew_content')\n container_experience = Expression(tea='div',\n attribute='class',\n value='ResumeDetailsNew_row')\n container_full_name = Expression(tea='div',\n attribute='class',\n value='ResumeMainHRNew_content')\n container_key_words = None\n\n target_salary = Expression(tag='span',\n attribute='class',\n value='h_font_weight_medium')\n target_age = Expression(tag='div')\n target_gender = Expression(tag='div')\n target_phone = Expression(tag='div',\n attribute='class',\n value='m_t_2')\n target_email = Expression(tag='div',\n attribute='class',\n value='m_t_0')\n target_city = Expression(tag='div')\n target_metro_station = Expression(tag='div')\n target_degree_of_education = Expression(tag='div',\n attribute='class',\n value='sj_block m_b_2 sj_h3')\n target_education = Expression(tag='div',\n attribute='class',\n value='ResumeDetailsNew_row')\n target_education_year = Expression(tag='div',\n attribute='class',\n value='ResumeDetailsNew_left')\n target_education_name = Expression(tag='div',\n attribute='class',\n value='h_font_weight_medium')\n target_education_profession = Expression(tag='div')\n target_length_of_work = Expression(tag='div',\n attribute='class',\n value='sj_block m_b_2 sj_h3')\n target_experience = Expression(tag='div',\n attribute='class',\n value='sj_block m_b_2')\n target_experience_period = Expression(\n tag='div',\n attribute='class',\n value='ResumeDetailsNew_left h_word_wrap_break_word'\n )\n target_experience_text = Expression(tag='div',\n attribute='class',\n value='sj_block m_t_2')\n target_last_position = Expression(tag='div',\n attribute='class',\n value='h_font_weight_medium')\n target_organization_name = Expression(tag='div')\n target_first_name = Expression(tag='div',\n attribute='class',\n value='sj_h3')\n target_last_name = Expression(tag='div',\n attribute='class',\n value='sj_h3')\n target_middle_name = Expression(tag='div',\n attribute='class',\n value='sj_h3')\n target_key_words = Expression(tag='div',\n attribute='class',\n value='h_word_wrap_break_word')\n\n\nclass AvitoParserSearch(BaseParserSearchHTML):\n root_url = 'https://www.avito.ru'\n \n target_title_resume = Expression(tag='a',\n attribute='class',\n value='item-description-title-link')\n\n target_url = Expression(tag='a',\n attribute='class',\n value='item-description-title-link')\n target_last_update = Expression(tag='div',\n attribute='class',\n value='date c-2')\n target_body = Expression(tag='div',\n attribute='class',\n value='description item_table-description')\n\n #def _get_position_target(slf, target_name, html, \n # element_position, target_position):\n # \"\"\"\n # Base method for pars case:\n # <ul>\n # <li class=\"...\">...</li>\n # .......................\n # <li class=\"...\">...</li>\n # .......................\n # <li class=\"...\">...</li>\n # </ul>\n # \"\"\"\n # elemts = self._get_more_target(target_name, html)\n # \n # if elemts:\n # try:\n # target_element = elemts[element_position]\n # except IndexError:\n # target_element = None\n # \n # if target_element:\n # target_value = target_element.get_text()\n # list_value = target_value.split(',')\n # try:\n # target_data = list_value[target_position]\n # except IndexError:\n # target_data = None\n # else:\n # target_data = None\n # else:\n # target_data = None\n # \n # return target_data\n \n #def get_age(self, html):\n # target_element = self._get_position_target('age', html, 0, 1)\n # \n # if target_element:\n # age_list = re.findall(r'\\d{2}', target_element)\n # age_str = str().join(age_list)\n # age = age_str\n # else:\n # age = None\n # \n # return age\n \n #def get_experience(self, html):\n # target_element = self._get_position_target('experience', html, 0, 2)\n # \n # if target_element:\n # experience_list = re.findall(r'\\d{2}', target_element)\n # experience_str = str().join(experience_list)\n # experience = experience_str\n # else:\n # experience = None\n # \n # return experience\n # \n #def get_url(self, html): \n # element_html = self._get_target('url', html, return_html_node=True)\n # if element_html:\n # local_url = element_html['href']\n # url = 'https://www.avito.ru' + local_url\n # else:\n # url = None\n # return url\n\n\nclass AvitoParserResume(BaseParserResumeHTML):\n container_gender = Expression(\n tag='div',\n attribute='class',\n value='item-params item-params_type-one-colon'\n )\n container_phone = Expression(tag='div',\n attribute='class',\n value='item-view-right')\n container_email = Expression(tag='div',\n attribute='class',\n value='item-view-right')\n container_city = Expression(tag='div',\n attribute='class',\n value='item-view-right')\n container_metro_station = Expression(tag='div',\n attribute='class',\n value='item-view-right')\n container_education = Expression(\n tag='div',\n attribute='class',\n value='item-params item-params_type-one-colon'\n )\n container_salary = Expression(tag='div',\n attribute='class',\n value='item-view-right')\n container_age = Expression(tag='div',\n attribute='class',\n value='item-params item-params_type-one-colon')\n container_length_of_work = Expression(\n tag='div',\n attribute='class',\n value='item-params item-params_type-one-colon'\n )\n container_experience = None##\n container_degree_of_education = Expression(\n tag='div',\n attribute='class',\n value='item-params item-params_type-one-colon'\n )\n container_full_name = None\n container_key_words = Expression(tag='div',\n attribute='itemprop',\n value='description')\n\n target_gender = Expression(\n tag='li',\n attribute='class',\n value='item-params-list-item'\n )\n target_phone = None\n target_email = None\n target_city = Expression(\n tag='div',\n attribute='class',\n value='seller-info-value')\n target_metro_station = Expression(tag='div',\n attribute='class',\n value='seller-info-value')\n target_first_name = None\n target_last_name = None\n target_middle_name = None\n target_salary = Expression(\n tag='span',\n attribute='class',\n value='price-value-string js-price-value-string')##\n target_age = Expression(tag='ul',\n attribute='class',\n value='item-params-list')\n target_lentgh_of_work = Expression(tag='ul',\n attribute='class',\n value='item-params-list')\n target_degree_of_education = Expression(tag='ul',\n attribute='class',\n value='item-params-list')\n target_experience = Expression(tag='div',\n attribute='class',\n value='resume-params')##\n target_experience_period = Expression(tag='div',\n attribute='class',\n value='resume-params-work-date')\n target_experience_text = Expression(tag='p',\n attribute='class',\n value='resume-params-text')\n target_last_position = Expression(tag='div',\n attribute='class',\n value='resume-params-title')##\n target_organization_name = Expression(tag='div',\n attribute='class',\n value='resume-params-title')\n \n target_education = Expression(tag='div',\n attribute='class',\n value='resume-params')\n target_education_year = Expression(tag='td',\n attribute='class',\n value='resume-params-left')##\n target_education_name = Expression(tag='div',\n attribute='class',\n value='resume-params-title')##\n target_education_profession = Expression(tag='p',\n attribute='class',\n value='resume-params-text')##\n\n target_key_words = Expression(tag='p')\n\n\nclass RabotaParserSearch(BaseParserSearchHTML):\n container_last_update = Expression(\n tag='div',\n attribute='class',\n value='h-box-wrapper__centercol'\n )\n\n target_title_resume = Expression(tag='a',\n attribute='target',\n value='_blank')\n target_url = Expression(tag='a',\n attribute='target',\n value='_blank')\n target_last_update = Expression(\n tag='p',\n attribute='class',\n value='box-wrapper__descr_12grey mt_10'\n )\n target_body = Expression(tag='div',\n attribute='class',\n value='h-box-wrapper')\n\n\nclass RabotaParserResume(BaseParserResumeHTML):\n container_metro_station = Expression(tag='div',\n attribute='id',\n value='resume_metro_list')\n container_education = Expression(tag='div',\n attribute='class',\n value='w100 res-card-tbl')\n container_experience = Expression(tag='div',\n attribute='class',\n value='b-main-info b-experience-list')\n container_full_name = None\n container_key_words = None\n container_salary = Expression(tag='div',\n attribute='class',\n value='b-main-info')\n container_age = None##\n container_length_of_work = Expression(tag='div',\n attribute='class',\n value='b-main-info')\n container_experience = None##\n container_degree_of_education = None##\n\n target_gender = Expression(tag='p',\n attribute='class',\n value='b-sex-age')\n target_phone = None\n target_email = None\n target_city = Expression(tag='p',\n attribute='class',\n value='b-city-info mt_10')\n target_metro_station = Expression(tag='span',\n attribute='class',\n value='name longname')\n target_first_name = None\n target_last_name = None\n target_middle_name = None\n target_key_words = None\n \n target_salary = Expression(tag='span',\n attribute='class',\n value='text_24 salary nobr')##\n target_age = Expression(tag='p',\n attribute='class',\n value='b-sex-age')\n target_lentgh_of_work = Expression(tag='span',\n attribute='class',\n value='text_18 bold exp-years')##\n target_experience = Expression(tag='div',\n attribute='class',\n value='res-card-tbl-row')\n target_experience_period = Expression(tag='span',\n attribute='class',\n value='gray9_text')##\n target_experience_text = Expression(tag='p',\n attribute='class',\n value='lh_20 p-res-exp')\n target_last_position = Expression(tag='p',\n attribute='class',\n value='last-position-name')\n target_organization_name = Expression(tag='p',\n attribute='class',\n value='company-name')\n target_degree_of_education = Expression(tag='span',\n attribute='class',\n value='bold edu-type')\n target_education = Expression(tag='div',\n attribute='class',\n value='res-card-tbl-row')\n target_education_profession = Expression(\n tag='div',\n attribute='class',\n value='mt_5 gray9_text profes-info'\n )\n target_education_year = Expression(tag='div',\n attribute='class',\n value='edu-year')\n target_education_name = Expression(tag='p',\n attribute='class',\n value='mt_4 lh_20 school-name')##\n\n def _get_choice_target(self, target_name, html,\n psition, separator=','):\n data_str = self._get_target(target_name, html)\n \n if data_str:\n data_list = data_str.split(separator)\n try:\n value = data_list[psition]\n except IndexError:\n value = None\n else:\n value = None\n \n return value\n \n def get_age(self, html):\n age_str = self._get_choice_target('age', html, 0)\n \n if age_str:\n age_list = re.findall('\\d{2}', age_str)\n try:\n age = age_list[0]\n except IndexError:\n age = None\n else:\n age = None\n \n return age\n \n def get_gender(self, html):\n gender_str = self._get_choice_target('gender', html, 1)\n \n if gender_str:\n gender_list = re.findall('\\S', gender_str)\n gender = str().join(gender_list)\n else:\n gender = None\n \n return gender\n\n\nclass FarpostParserSearch(BaseParserSearchHTML):\n root_url = 'https://www.farpost.ru'\n\n container_title_resume = Expression(tag='div',\n attribute='class',\n value='priceCell')\n container_last_update = Expression(tag='div',\n attribute='class',\n value='priceCell')\n\n target_title_resume = Expression(tag='a',\n attribute='class',\n value='bulletinLink')\n target_url = Expression(tag='a',\n attribute='class',\n value='bulletinLink')\n target_last_update = Expression(tag='td',\n attribute='class',\n value='dateCell')\n target_body = Expression(tag='tr',\n attribute='class',\n value='bull-item')\n\n\nclass FarpostParserResume(BaseParserResumeHTML):\n container_error = None\n container_head = None\n container_gender = None\n container_phone = None\n container_email = None\n container_city = None\n container_metro_station = None\n container_education = None\n container_experience = None\n container_full_name = None\n container_key_words = None\n\n target_error = Expression(tag='div',\n attribute='class',\n value='notificationPlate')\n target_gender = Expression(tag='span',\n attribute='data-field',\n value='sex-maritalStatus-hasChildren'\n )\n target_phone = Expression(tag='div',\n attribute='class',\n value='new-contacts__td new-contact__phone')\n target_email = Expression(tag='a',\n attribute='class',\n value='new-contact__email')\n target_city = Expression(tag='span',\n attribute='data-field',\n value='district')\n target_metro_station = Expression()\n target_education = Expression()\n target_experience = Expression()\n target_first_name = None\n target_last_name = None\n target_middle_name = None\n target_key_words = Expression(tag='p',\n attribute='data-field',\n value='resumeSkills')\n\n\nclass RabotavgorodeParserSearch(BaseParserSearchHTML):\n target_title_resume = Expression(tag='a',\n attribute='target',\n value='_blank')\n target_url = Expression(tag='a',\n attribute='target',\n value='_blank')\n target_last_update = Expression(tag='div',\n attribute='class',\n value='date')\n target_body = Expression(tag='div',\n attribute='class',\n value='info')\n\n\nclass RabotavgorodeParserResume(BaseParserResumeHTML):\n container_error = None\n container_head = None\n container_gender = None\n container_phone = None\n container_email = None\n container_city = None\n container_metro_station = None\n container_education = None\n container_experience = None\n container_full_name = None\n container_key_words = None\n\n target_error = None\n target_gender = Expression(tag='td')\n target_phone = Expression(tag='td')\n target_email = Expression(tag='td')\n target_city = Expression(tag='td')\n target_metro_station = Expression(tag='td')\n target_education = Expression(tag='td')\n target_experience = Expression(tag='td')\n target_first_name = Expression(tag='td')\n target_last_name =Expression(tag='td')\n target_middle_name = Expression(tag='td')\n target_key_words = Expression(tag='td')\n\n\nclass SearchBase(object):\n search_pattern = None\n search_iterator = None\n search_text = None\n search_step = 1\n search_start = 0\n\n source = None\n preview = False\n search_class = None\n resume_class = None\n\n def generate_search_url(self, search_str):\n pattern = getattr(self, 'search_pattern', None)\n iterator = getattr(self, 'search_iterator', None)\n search_url = pattern %(search_str)\n search_url += iterator\n return search_url\n\n def next_step(self, search_url=None, namber_page=0):\n if not search_url:\n raise SearchError(\"search_url is None\")\n return search_url %(namber_page)\n \n def search(self, search_str=None, session=None, resume_list=[], \n reload_error_flag=False, update_flad=False,\n debug_flag=False):\n\n if not(reload_error_flag and update_flad):\n\n if not search_str:\n raise SearchError(\"Search_str is None\")\n\n search_url = self.generate_search_url(search_str) \n search_pars = getattr(self, 'search_class', None)\n search_step = getattr(self, 'search_step', None)\n source_name = getattr(self, 'source', None)\n preview_flag = getattr(self, 'preview', None)\n namber_page = getattr(self, 'search_start', None)\n\n if not session:\n session = requests.Session()\n\n while True:\n search_speak = self.next_step(search_url, namber_page)\n request_object = requests.Request('GET', search_speak)\n request = session.prepare_request(request_object)\n responce = session.send(request)\n content_html = responce.text\n responce.close()\n\n if responce.status_code != 200:\n break\n\n try:\n begin_pars = time.time()\n resumes = parser_search(cls=search_pars, \n html=content_html)\n end_pars = time.time()\n spent = end_pars - begin_pars\n except ParserError:\n break\n resume_list += resumes\n namber_page += search_step\n \n if debug_flag:\n print('namber_page = %i, spent = %f' %(namber_page, \n spent))\n if namber_page >= search_step * 10:\n break\n\n resume_data_list = []\n resume_error_list = []\n\n if len(resume_list) > 0:\n resume_pars = getattr(self, 'resume_class', None)\n for resume in resume_list:\n resume_url = resume['url']\n resume_reque_obj = requests.Request('GET', resume_url)\n resume_request = session.prepare_request(resume_reque_obj)\n try:\n resume_responce = session.send(resume_request)\n except HTTPError:\n resume_error_list += resume\n continue\n resume_html = resume_responce.text\n resume_responce.close()\n try:\n resume_data = parser_resume(cls=resume_pars, html=resume_html)\n except ParserError:\n resume_error_list += resume\n continue\n resume_data.setdefault('title_resume', resume['title_resume'])\n resume_data.setdefault('url', resume['url'])\n resume_data.setdefault('last_update', resume['last_update'])\n resume_data.setdefault('source', source_name)\n resume_data.setdefault('preview', preview_flag)\n resume_data_list.append(resume_data)\n\n return resume_data_list, resume_error_list\n\n\nclass SearchHh(SearchBase):\n search_pattern = 'https://hh.ru/search/resume?exp_period=all_time&'\n search_pattern += 'order_by=relevance&text=%s&pos=full_text&logic='\n search_pattern += 'normal&clusters=true'\n search_iterator = '&page=%i'\n search_step = 1\n search_start = 0\n\n source = 'hh.ru'\n preview = False\n search_class = HhParserSearch()\n resume_class = HhParserResume()\n\n\nclass SearchSj(SearchBase):\n search_pattern = 'https://www.superjob.ru/resume/search_resume.html?'\n search_pattern += 'sbmit=1&c[]=1&keywords[0][srws]=7&keywords[0]'\n search_pattern += '[skwc]=and&keywords[0][keys]=%s'\n search_iterator = '&search_hesh=%i&main=1&page=%i'\n search_step = 1\n search_start = 0\n\n source = 'www.superjob.ru'\n preview = True\n search_class = SuperjobParserSearch()\n resume_class = SuperjobParserResume()\n\n def next_step(self, search_url=None, namber_page=0):\n if not search_url:\n raise SearchError(\"search_url is None\")\n random_sequence = random()*10**15\n hesh = int(random_sequence)\n return search_url %(hesh, namber_page)\n\n\nclass SearchAvito(SearchBase):\n search_pattern = 'https://www.avito.ru/rossiya/rezume?p=%i&q=%s'\n search_iterator = None\n search_step = 1\n search_start = 0\n\n source = 'www.avito.ru'\n preview = True\n search_class = AvitoParserSearch()\n resume_class = AvitoParserResume()\n\n def generate_search_url(self, search_str):\n self.search_text = search_str\n return getattr(self, 'search_pattern', None)\n\n def next_step(self, search_url=None, namber_page=0):\n if not search_url:\n raise SearchError(\"search_url is None\")\n\n search_str = getattr(self, 'search_text', None)\n return search_url %(namber_page, search_str)\n\n\n'''\ndef search_sj(search_str, session=None, resume_list=[], \n reload_error_flag=False, update_flad=False):\n if not(reload_error_flag and update_flad):\n search_url = 'https://www.superjob.ru/resume/search_resume.html?'\n search_url += 'sbmit=1&c[]=1&keywords[0][srws]=7&keywords[0]'\n search_url += '[skwc]=and&keywords[0][keys]=%s'\n search_url = search_url %(search_str)\n search_url += '&search_hesh=%i&main=1&page=%i'\n class_sj_search = SuperjobParserSearch()\n namber_page = 0\n\n if not session:\n session = requests.Session()\n\n while True:\n random_sequence = random()*10**15\n hesh = int(random_sequence)\n search_peak = search_url %(hesh, namber_page)\n request_object = requests.Request('GET', search_peak)\n request = session.prepare_request(request_object)\n responce = session.send(request)\n content_html = responce.text\n responce.close()\n\n if responce.status_code != 200:\n break\n\n try:\n resumes = parser_search(cls=class_sj_search, html=content_html)\n except ParserError:\n break\n resume_list += resumes\n namber_page += 1\n\n if len(resume_list) > 0:\n resume_data_list = []\n resume_error_list = []\n class_sj = SuperjobParserResume()\n for resume in resume_list:\n resume_url = resume['url']\n resume_reque_obj = requests.Request('GET', resume_url)\n resume_request = session.prepare_request(resume_reque_obj)\n try:\n resume_responce = session.send(resume_request)\n except HTTPError:\n resume_error_list += resume\n continue\n resume_html = resume_responce.text\n resume_responce.close()\n try:\n resume_data = parser_resume(cls=class_sj, html=html_content)\n except ParserError:\n resume_error_list += resume\n continue\n resume_data.setdefault('title_resume', resume['title_resume'])\n resume_data.setdefault('url', resume['url'])\n resume_data.setdefault('last_update', resume['last_update'])\n resume_data_list += resume_data\n\n return resume_data_list, resume_error_list \n\ndef search_avito(search_str, session=None, resume_list=[], \n reload_error_flag=False, update_flad=False):\n\n if not(reload_error_flag and update_flad):\n search_url = 'https://www.avito.ru/rossiya/rezume?p=%i&q=%s'\n class_avito_search = AvitoParserSearch()\n namber_page = 0\n\n if not session:\n session = requests.Session()\n\n while True:\n search_peak = search_url %(namber_page, search_str)\n request_object = requests.Request('GET', search_peak)\n request = session.prepare_request(request_object)\n responce = session.send(request)\n content_html = responce.text\n responce.close()\n\n if responce.status_code != 200:\n break\n\n try:\n resumes = parser_search(cls=class_hh_search, \n html=content_html)\n except ParserError:\n break\n resume_list += resumes\n namber_page += 1\n\n if len(resume_list) > 0:\n resume_data_list = []\n resume_error_list = []\n class_avito = AvitoParserResume()\n for resume in resume_list:\n resume_url = resume['url']\n resume_reque_obj = requests.Request('GET', resume_url)\n resume_request = session.prepare_request(resume_reque_obj)\n try:\n resume_responce = session.send(resume_request)\n except HTTPError:\n resume_error_list += resume\n continue\n resume_html = resume_responce.text\n resume_responce.close()\n try:\n resume_data = parser_resume(cls=class_avito, html=resume_html)\n except ParserError:\n resume_error_list += resume\n continue\n resume_data.setdefault('title_resume', resume['title_resume'])\n resume_data.setdefault('url', resume['url'])\n resume_data.setdefault('last_update', resume['last_update'])\n resume_data_list += resume_data\n\n return resume_data_list, resume_error_list\n\ndef search_Rabota(search_str, session=None, resume_list=[], \n reload_error_flag=False, update_flad=False):\n\n if not(reload_error_flag and update_flad):\n search_url = 'https://www.rabota.ru/v3_searchResumeByParamsResults'\n search_url += '.html?action=search&area=v3_searchResumeByParams'\n search_url += 'Results&p=-2005&w=&qk[0]=%s&qot[0]=1&qsa[0][]=1&sf='\n search_url += '&st=&cu=2&krl[]=3&krl[]=4&krl[]=284&krl[]=25&'\n search_url += 'krl[]=328&krl[]=231&krl[]=248&krl[]=250&krl[]=395&'\n search_url += 'krl[]=299&af=&at=&sex=&eylo=&t2l=&la=&id=22847743'\n search_url = search_url %(search_str)\n search_url += '&start=%i'\n class_rabota_search = RabotaParserSearch()\n namber_page = 0\n\n if not session:\n session = requests.Session()\n\n while True:\n search_peak = search_url %(namber_page)\n request_object = requests.Request('GET', search_peak)\n request = session.prepare_request(request_object)\n responce = session.send(request)\n content_html = responce.text\n responce.close()\n\n if responce.status_code != 200:\n break\n\n try:\n resumes = parser_search(cls=class_rabota_search, \n html=content_html)\n except ParserError:\n break\n resume_list += resumes\n namber_page += 1\n\n if len(resume_list) > 0:\n resume_data_list = []\n resume_error_list = []\n class_rabota = RabotaParserResume()\n for resume in resume_list:\n resume_url = resume['url']\n resume_reque_obj = requests.Request('GET', resume_url)\n resume_request = session.prepare_request(resume_reque_obj)\n try:\n resume_responce = session.send(resume_request)\n except HTTPError:\n resume_error_list += resume\n continue\n resume_html = resume_responce.text\n resume_responce.close()\n try:\n resume_data = parser_resume(cls=class_rabota,\n html=resume_html)\n except ParserError:\n resume_error_list += resume\n continue\n resume_data.setdefault('title_resume', resume['title_resume'])\n resume_data.setdefault('url', resume['url'])\n resume_data.setdefault('last_update', resume['last_update'])\n resume_data_list += resume_data\n\n return resume_data_list, resume_error_list\n'''\n\nif __name__ == '__main__':\n '''\n url = 'https://hh.ru/resume/e9bb81ccff0337fea50039ed1f577a68444648'\n connect = urllib.urlopen(url)\n html_content = connect.read()\n connect.close()\n class_hh = HhParserResume()\n data = parser_resume(cls=class_hh, html=html_content)\n print(data)\n #\n PATH_FILE = 'D:\\git-project\\parserHH\\ResumesIdBase'\n namber_page = 0\n resume_list = []\n class_hh_search = HhParserSearch()\n begin_all_time = time.time()\n begin_ = time.time()\n while True:\n begin_time = time.time()\n searchSpeak = \"https://hh.ru/search/resume?text=SQL&logic=normal&pos=full_text&exp_period=all_time&order_by=relevance&area=1&clusters=true&page=%i\" %(namber_page)\n try:\n connect = urllib.urlopen(searchSpeak)\n except HTTPError:\n content_html = connect.read()\n connect.close()\n break\n content_html = connect.read()\n connect.close()\n try:\n resumes = parser_search(cls=class_hh_search, html=content_html)\n except ParserError:\n break\n resume_list += resumes\n end_time = time.time()\n spent = end_time - begin_time\n print('namber_page = %i, spent = %f' %(namber_page, spent))\n if namber_page >= 50:\n break\n namber_page += 1\n end_all_time = time.time()\n spent_all = (end_all_time - begin_all_time) / 60\n print('spent >>', spent_all)\n print('Count = ', len(resume_list))\n resume_index = 0\n error_stak = 0\n date = datetime.datetime.utcnow()\n date_performance = date.strftime('%d%m%G%H%M')\n search_file = PATH_FILE + '\\search_%s.csv' %(date_performance)\n error_file = PATH_FILE + '\\error_%s.csv' %(date_performance)\n file_error = open(error_file, 'bw')\n begin_resumes = time.time()\n for resume in resume_list:\n begin_resume = time.time()\n try:\n connect = urllib.urlopen(resume['url'])\n except URLError:\n error_stak += 1\n error_str = '%s;\\n' %(resume['url'])\n format = error_str.encode('utf-8')\n file_error.write(format)\n continue\n html_content = connect.read()\n connect.close()\n class_hh = HhParserResume()\n data = parser_resume(cls=class_hh, html=html_content)\n resume.setdefault('gender', data['gender'])\n resume.setdefault('first_name', data['first_name'])\n resume.setdefault('last_name', data['last_name'])\n resume.setdefault('middle_name', data['middle_name'])\n resume.setdefault('phone', data['phone'])\n resume.setdefault('email', data['email'])\n resume.setdefault('city', data['city'])\n resume.setdefault('metro_station', data['metro_station'])\n resume.setdefault('education', data['education'])\n resume.setdefault('experience', data['experience'])\n end_resume = time.time()\n spant_resume = (end_resume - begin_resume) / 60\n print('Resume - %i, spent = %f' %(resume_index,\n spant_resume))\n resume_index += 1\n end_resumes = time.time()\n file_error.close()\n spent_resumes = (end_resumes - begin_resumes) / 60\n print('Spent >>', spent_resumes)\n end_ = time.time()\n spent_ = (end_ - begin_) / 60\n print('All spent time = %f' %(spent_))\n file_search = open(search_file, 'bw')\n for record_item in resume_list:\n try:\n data_str = '%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;\\n' %(\n record_item['title_resume'],\n record_item['salary'],\n record_item['age'],\n record_item['experience'],\n record_item['last_position'],\n record_item['organization_name'],\n record_item['url'],\n record_item['last_update'],\n record_item['gender'],\n record_item['first_name'],\n record_item['last_name'],\n record_item['middle_name'],\n record_item['phone'],\n record_item['email'],\n record_item['city'],\n record_item['metro_station'],\n record_item['education'])\n format = data_str.encode('utf-8')\n file_search.write(format)\n except KeyError:\n continue\n file_search.close()\n print('Count error = %i' %(error_stak))\n #\n resume_list = []\n class_zp_search = ZarplataParserSearch()\n begin_all_time = time.time()\n date_performance = date.strftime('%d%m%G%H%M')\n PATH_FILE = 'D:\\git-project\\parserHH\\ResumesIdBase'\n search_file = PATH_FILE + '\\search_%s.csv' %(date_performance)\n searchSpeak =\"https://api.zp.ru/v1/resumes/?geo_id=1177&limit=100&q=Siebel&state=1\"\n connect = urllib.urlopen(searchSpeak)\n content_html = connect.read()\n connect.close()\n resumes = parser_search(cls=class_zp_search, html=content_html)\n resume_list += resumes\n end_all_time = time.time()\n spent_all = (end_all_time - begin_all_time) / 60\n print('spent >>', spent_all)\n print('Count = ', len(resume_list))\n file_search = open(search_file, 'bw')\n for record_item in resume_list:\n print(record_item['age'])\n data_str = '%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' %(\n record_item['title_resume'],\n record_item['salary'],\n record_item['age'],\n record_item['experience'],\n record_item['last_position'],\n record_item['organization_name'],\n record_item['url'],\n record_item['last_update'])\n format = data_str.encode('utf-8')\n file_search.write(format)\n file_search.close()\n \n PATH_FILE = 'D:\\git-project\\parserHH\\ResumesIdBase'\n namber_page = 0\n resume_list = []\n class_hh_search = SuperjobParserSearch()\n begin_all_time = time.time()\n begin_ = time.time()\n while True:\n random_sequence = random()*10**15\n hesh = int(random_sequence)\n begin_time = time.time()\n searchSpeak = 'https://www.superjob.ru/resume/search_resume.html?sbmit=1&t[]=4&keywords[0][srws]=7&keywords[0][skwc]=and&keywords[0][keys]=Siebel&search_hesh=%i&main=1&page=%i' %(hesh, namber_page)\n try:\n connect = urllib.urlopen(searchSpeak)\n except HTTPError:\n content_html = connect.read()\n connect.close()\n break\n content_html = connect.read()\n connect.close()\n try:\n resumes = parser_search(cls=class_hh_search, html=content_html)\n except ParserError:\n break\n resume_list += resumes\n end_time = time.time()\n spent = end_time - begin_time\n print('namber_page = %i, spent = %f' %(namber_page, spent))\n if namber_page >= 50:\n break\n namber_page += 1\n end_all_time = time.time()\n spent_all = (end_all_time - begin_all_time) / 60\n print('spent >>', spent_all)\n print('Count = ', len(resume_list))\n resume_index = 0\n error_stak = 0\n date = datetime.datetime.utcnow()\n date_performance = date.strftime('%d%m%G%H%M')\n search_file = PATH_FILE + '\\search_%s.csv' %(date_performance)\n error_file = PATH_FILE + '\\error_%s.csv' %(date_performance)\n file_error = open(error_file, 'bw')\n #begin_resumes = time.time()\n #for resume in resume_list:\n #begin_resume = time.time()\n #try:\n # connect = urllib.urlopen(resume['url'])\n #except URLError:\n # error_stak += 1\n # error_str = '%s;\\n' %(resume['url'])\n # format = error_str.encode('utf-8')\n # file_error.write(format)\n # continue\n #html_content = connect.read()\n #connect.close()\n #class_hh = HhParserResume()\n #data = parser_resume(cls=class_hh, html=html_content)\n #resume.setdefault('gender', data['gender'])\n #resume.setdefault('first_name', data['first_name'])\n #resume.setdefault('last_name', data['last_name'])\n #resume.setdefault('middle_name', data['middle_name'])\n #resume.setdefault('phone', data['phone'])\n #resume.setdefault('email', data['email'])\n #resume.setdefault('city', data['city'])\n #resume.setdefault('metro_station', data['metro_station'])\n #resume.setdefault('education', data['education'])\n #resume.setdefault('experience', data['experience'])\n #end_resume = time.time()\n #spant_resume = (end_resume - begin_resume) / 60\n #print('Resume - %i, spent = %f' %(resume_index,\n # spant_resume))\n #resume_index += 1\n #end_resumes = time.time()\n #file_error.close()\n #spent_resumes = (end_resumes - begin_resumes) / 60\n #print('Spent >>', spent_resumes)\n end_ = time.time()\n spent_ = (end_ - begin_) / 60\n print('All spent time = %f' %(spent_))\n file_search = open(search_file, 'bw')\n for record_item in resume_list:\n try:\n data_str = '%s;%s;%s;%s;%s;%s;%s;%s;\\n' %(\n record_item['title_resume'],\n record_item['salary'],\n record_item['age'],\n record_item['experience'],\n record_item['last_position'],\n record_item['organization_name'],\n record_item['url'],\n record_item['last_update']\n )\n format = data_str.encode('utf-8')\n file_search.write(format)\n except KeyError:\n continue\n file_search.close()\n print('Count error = %i' %(error_stak))\n'''\n session = requests.Session()\n '''\n session.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36', 'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4', 'Host': 'hh.ru', 'Content-Encoding': 'gzip', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Cache-Control':'max-age=0', 'DNT':'1', 'Referer':'https://hh.ru/login', 'Upgrade-Insecure-Requests':'1'\n }\n \n session.headers = {\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4',\n 'Content-Encoding': 'gzip, deflate, br',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36 OPR/47.0.2631.80',\n 'Upgrade-Insecure-Requests': '1',\n 'Connection': 'keep-alive',\n 'Host': 'www.superjob.ru',\n 'Referer': 'https://www.superjob.ru/'\n }\n '''\n session.headers = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',\n 'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4',\n 'Content-Encoding': 'gzip',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'DNT': '1',\n 'Origin': 'https://www.avito.ru',\n 'Cache-Control': 'no-cache',\n 'Host': 'www.avito.ru',\n 'Referer': 'https://www.avito.ru/profile/login?next=%2Fprofile',\n 'Upgrade-Insecure-Requests': '1'\n }\n #search_hh = SearchHh()\n #search_sj = SearchSj()\n search_avito = SearchAvito()\n #data, error = search_hh.search('Siebel', session=session, debug_flag=True)\n #data, error = search_sj.search('Siebel', session=session, debug_flag=True)\n relod_resume_avito = [\n {\n 'title_resume': 'codec',\n 'url': 'https://www.avito.ru/moskva/rezume/upravlyayuschiy_menedzher_administrator_1114512030',\n 'last_update': '22.01.2018'\n }\n ]\n data, error = search_avito.search('Siebel', session=session,\n resume_list=relod_resume_avito,\n update_flad=True,\n debug_flag=True)\n #print('---------------')\n #print(data)\n #print(error)\n #print('---------------')\n for item in data:\n print('<<<---------->>>')\n print(item)\n #print(data[1]['experience'], error)\n" }, { "alpha_fraction": 0.7450980544090271, "alphanum_fraction": 0.7450980544090271, "avg_line_length": 35.57143020629883, "blob_id": "edd0fb9c3ee2f1cbd2eba95b87285af524e0e121", "content_id": "db7ad7fdf9cdc1aa8fc5058c8a40c42e2a1e7365", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 255, "license_type": "no_license", "max_line_length": 69, "num_lines": 7, "path": "/Seizure/popup (2).js", "repo_name": "FORSEN-ROCK/other", "src_encoding": "UTF-8", "text": "chrome.browserAction.onClicked.addListener(function(tab) {\n chrome.tabs.create({url:chrome.extension.getURL(\"option.html\")});\n});\n\ndocument.addEventListener(\"click\", function(tab) {\n chrome.tabs.create({url:chrome.extension.getURL(\"option.html\")});\n};" }, { "alpha_fraction": 0.6071873307228088, "alphanum_fraction": 0.6135029196739197, "avg_line_length": 34.80573272705078, "blob_id": "eda611561c25ecfc93ac8b955278665a033dd53c", "content_id": "4b5aba4199908195d60faf95cb15de757fdf3d74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 11683, "license_type": "no_license", "max_line_length": 151, "num_lines": 314, "path": "/Seizure/searchLogec.js", "repo_name": "FORSEN-ROCK/other", "src_encoding": "WINDOWS-1251", "text": "//Канцептуальный дерьмокод\n\n//Хз на сколько верен данный подход, но сделаем так\n\n//Предопределенные объекты парсеры( регулярные вырожения)\n\nvar parserForHh = {\n pageNamber: /.+\\&page=/g,\n linkResumes: /\\/resume\\/\\w+/g,\n bodyInfo: \"tr[itemscope='itemscope']\",//\"div[class='output__main']\",//пока так нужно корректировать \n update: \"div[class='output__addition']\",//пока так нужно корректировать \n resumeLinkInnerHtml: \"a[itemprop='jobTitle']\",//пока так нужно корректировать \n};\nvar parserForZarplata = {\n pageNamber: /.+\\&page=/g,\n linkResumes: null,\n bodyInfo: \"div[class='ui grid']\",\n update: \"div[style='margin-top: 5px;']\",\n resumeLinkInnerHtml: \"a[target='_blank']\"//<a href=\"http:\\/\\/hr.zarplata.ru\\/resumes\\/\\?.+\">.+?<\\/a>/g\n};\nvar parserForSuperjob = {\n pageNamber: /.+\\&page=/g,\n linkResumes: null,\n bodyInfo: \"div[class='ResumeListElementNew js-resume-item']\",//>.+?/g,//пока так нужно корректировать sj_block m_b_1 sj_panel \n update: \"div[class='h_color_gray ResumeListElementNew_updated']\",\n resumeLinkInnerHtml: \"a[target='_blank']\"//<a .+? href=\"https:\\/\\/www.superjob.ru\\/resume\\/.+\">.+?<\\/a>/g\n};\nvar parserForRabota = {\n pageNamber: /.+\\&page=/g,\n linkResumes: null,\n bodyInfo: \"div[class='h-box-wrapper']\",//<div class=\"sj_flex_col2\">.+?/g,//пока так нужно корректировать \n update: \"p[class='box-wrapper__descr_12grey mt_10']\",\n resumeLinkInnerHtml: \"a[target='_blank']\"//<a .+? href=\"https:\\/\\/www.superjob.ru\\/resume\\/.+\">.+?<\\/a>/g\n};\nvar parserForAvito = {\n pageNamber: /.+\\&page=/g,\n linkResumes: /\\/moskva\\/rezume\\/\\w+/g,\n bodyInfo: \"div[class='description item_table-description']\",//<div class=\"sj_flex_col2\">.+?/g,//пока так нужно корректировать \n update: \"div[class='clearfix ']\",\n resumeLinkInnerHtml: \"a[class='item-description-title-link']\"\n};\n\nvar parseLayout = {\n hh: parserForHh,\n zarplata: parserForZarplata,\n superjob: parserForSuperjob,\n avito: parserForAvito,\n rabota: parserForRabota\n};\n\nvar LinkHh = {\n link: \"https://hh.ru/search/resume?area=1&clusters=true&text=searchSpec&pos=full_text&logic=normal&exp_period=all_time\"\n};\n\nvar LinkZarplata = {\n link: \"https://www.zarplata.ru/resume?q=uri+searchSpec\"\n};\n\nvar LinkSuperjob = {\n link: \"https://www.superjob.ru/resume/search_resume.html?sbmit=1&t[]=4&keywords[0][srws]=7&keywords[0][skwc]=and&keywords[0][keys]=searchSpec\"\n};\n\nvar LinkRabota = {\n link: \"https://www.rabota.ru/v3_searchResumeByParamsResults.html?action=search&area=v3_searchResumeByParamsResults&p=-2005&w=&qk%5B0%5D=searchSpec\"\n};\n\nvar LinkAvito = {\n link: \"https://www.avito.ru/moskva/rezume?s=2&q=searchSpec\"\n};\n\nvar links = {\n hh:LinkHh,\n zarplata:LinkZarplata,\n superjob:LinkSuperjob,\n rabota:LinkRabota,\n avito:LinkAvito\n};\n//Сиё вынести в отдельный файл\n\nfunction generalSerchSpeack(option) { //mainCriterion: object; addCriteria: object\n mainCriterion = option.mainCriterion || null;\n searchScheme = option.searchScheme || null;\n page = option.page || null;\n \n if(searchScheme == \"\" || searchScheme == '') {\n return null;\n }\n \n if((!mainCriterion && searchScheme)&&(searchScheme && page)) {\n //var reg = /.+\\&page=/g;\n var search = searchScheme.match(reg);\n search += String(page);\n return search;\n }\n \n var search = searchScheme;\n for(key in mainCriterion){\n if(mainCriterion[key] == 'none') {\n continue;\n }\n \n search += \"&\" + key + \"=\" + mainCriterion[key];\n }\n \n //search += \"&items_on_page=100&page=0\";\n return search;\n};\n\nfunction handlerResponse(responseText) {\n return function(){\n rex = /<div class=\"sticky-container\">\\.+<\\/div>/g;\n contein = String(responseText).match(rex);\n console.info(contein);\n \n };\n};\n\n//По идее здесь только обработчики в се остальное вынести \n\nfunction focusHandler(event) {\n var target = event.target;\n if(target.dataset.entry != undefined){\n target.value = ''; \n }\n};\n\nfunction clicHandler(event) {\n var target = event.target;\n if(target.dataset.command == undefined){\n return;\n }\n search = target.form[0].value;\n if(search == \"\"){\n return;\n }\n var limitSample = 0^(document.forms[0].limitField.value);\n if(limitSample == \"\" && (limitSample < 0 || limitSample > 100)){\n limitSample = 20;\n }\n console.log(search);\n searchRun(search, limitSample);\n //handlerResponse(testrex);\n};\n/*\nfunction searchRun(searchSpec, limitSample) {\n var searchLinks = Object.assign({},links);\n for( var key in searchLinks){\n searchLinks[key].link = searchLinks[key].link.replace(\"searchSpec\", searchSpec);\n }\n var rexp = /\\w{4,5}:\\/\\/(\\w+|www\\.\\w+)\\.ru/g;\n var rexLink = /\\/resume\\/\\w+/g;\n var quantityRecord = 20;\n for(var key in searchLinks) {\n //var key = 'hh';\n var domain = searchLinks[key].link.match(rexp);\n console.log(domain);\n request = new XMLHttpRequest();\n request.open(\"GET\", searchLinks[key].link, false);\n request.send(null); \n var answer = document.body.querySelector(\"#listAnswer\");\n var buff = document.createDocumentFragment();\n if(request.readyState == 4 && request.status == 200) {\n var div = document.createElement('div');\n div.innerHTML = request.responseText;\n var resumeRecords = div.querySelectorAll(parseLayout[key].bodyInfo);\n if(resumeRecords.length >= limitSample) {\n quantityRecord = limitSample;\n } \n else{\n quantityRecord = resumeRecords.length;\n }\n for(var i = 0; i < quantityRecord; i++){\n var item = document.createElement(\"il\");\n var link = document.createElement(\"a\");\n linkResume = resumeRecords[i].querySelector(parseLayout[key].resumeLinkInnerHtml);\n console.log(linkResume.href);\n var updataLink = domain + linkResume.href.match(parseLayout[key].linkResumes);\n linkResume.href = updataLink;\n console.log(updataLink);\n item.innerHTML = resumeRecords[i].innerHTML;\n buff.appendChild(item);\n }\n }\n answer.appendChild(buff);\n }\n};\n*/\nfunction getSearchParametrs(){\n var parentForm = document.forms[0];\n var searchSpec = parentForm.searchField.value;\n var limitSample = 0^parentForm.limitField.value;\n if(limitSample == \"\" || limitSample <= 0) {\n limitSample = 20;\n }\n else if(limitSample > 100){\n limitSample = 100;\n }\n if(searchSpec.indexOf(\" \",1) != -1){\n searchSpec = searchSpec.replace(\" \", \"+\");\n }\n return {searchSpec:searchSpec, limitSample:limitSample};\n};\n\nfunction generalLinkOject(searchSpec){\n var linckObject = JSON.parse(JSON.stringify(links));\n for( var key in linckObject){\n linckObject[key].link = linckObject[key].link.replace(\"searchSpec\", searchSpec);\n }\n return linckObject;\n};\n\nfunction searchingRequest(linckObject){\n var searchingResponse = {};\n for(var key in linckObject) {\n request = new XMLHttpRequest();\n request.open(\"GET\", linckObject[key].link, false);\n request.send(null); \n if(request.readyState == 4 && request.status == 200) {\n searchingResponse[key] = request.responseText;\n request.abort();\n }\n }\n return searchingResponse;\n};\nfunction showeResult(responseObject, linckObject, DOMContainerResult, limitSample){\n if(!responseObject || !DOMContainerResult){\n return;\n }\n \n var rexDomain = /\\w{4,5}:\\/\\/(\\w+|www\\.\\w+)\\.ru/g, rexLinkResume = /\\/resume\\/\\w+/g;\n var buff = document.createDocumentFragment();\n var count = 0;\n for(var key in responseObject){//Цикл по обекту ответов\n var domain = linckObject[key].link.match(rexDomain);\n var div = document.createElement('div');\n div.innerHTML = responseObject[key];\n //console.log(key + \" = > \" + responseObject[key]);\n var resumeRecords = div.querySelectorAll(parseLayout[key].bodyInfo);\n \n if(resumeRecords.length >= limitSample) {\n quantityRecord = limitSample;\n } \n else{\n quantityRecord = resumeRecords.length;\n }\n \n for(var i = 0; i < quantityRecord; i++){\n count++;\n var itemList = document.createElement(\"il\");\n var itemDiv = document.createElement(\"div\");\n var link = document.createElement(\"a\");\n if(domain != \"https://www.rabota.ru\" && domain != \"https://www.superjob.ru\"){\n linkResume = resumeRecords[i].querySelector(parseLayout[key].resumeLinkInnerHtml);\n var updataLink = domain + linkResume.href.match(parseLayout[key].linkResumes);\n linkResume.href = updataLink;\n }\n itemDiv.innerHTML = resumeRecords[i].innerHTML;\n itemDiv.className = \"itemList\";\n itemList.appendChild(itemDiv);\n buff.appendChild(itemList);\n /*if(i == quantityRecord - 1){\n var buttonContainer = document.createElement(\"li\");\n var nextResult = document.createElement(\"input\");\n nextResult.type = \"button\";\n nextResult.value = \"Еще результаты из данного источника\";\n nextResult.dataset.command = \"nextResult\";\n buttonContainer.appendChild(nextResult);//innerHTML = nextResult;\n buff.appendChild(buttonContainer);\n }*/\n\n \n }\n DOMContainerResult.appendChild(buff)\n console.log(count);\n }\n};\n\nfunction handlerRunButton(event){\n if(event.target.dataset.command == \"ranSearch\"){\n var searchingResults = document.body.querySelector(\"#searchingResults\");//по идее тоже должны называться результатами\n //var buttonRun = document.body.querySelector(\"input[name='run']\");\n var searchSpec = getSearchParametrs().searchSpec;\n var limitSample = getSearchParametrs().limitSample;\n if(searchSpec != \"\"){\n if(searchingResults.children.length != 0){\n console.log(searchingResults.children.length);\n while(searchingResults.children.length){\n searchingResults.removeChild(searchingResults.lastChild);\n }\n }\n var linckObject = generalLinkOject(searchSpec);\n var searchingResponse = searchingRequest(linckObject);\n showeResult(searchingResponse, linckObject, searchingResults, limitSample);\n event.target.value = \"Поиск\";\n }\n \n }\n}\n\n\n\n\ndocument.addEventListener(\"focus\",focusHandler);\n//document.forms[0].run.addEventListener(\"click\",handlerRunButton);\n/*document.addEventListener(\"click\", function(event) {\n if(event.target.dataset.command == \"ranSearch\"){\n event.target.value = \"Идет поиск....\";\n }\n});\n*/\ndocument.addEventListener(\"click\",handlerRunButton);\n\n\n//document.addEventListener(\"click\",clicHandler);" }, { "alpha_fraction": 0.4845360815525055, "alphanum_fraction": 0.5047864317893982, "avg_line_length": 36.73611068725586, "blob_id": "3519e3987d2970e07b49511f4dd7daa659d19cd2", "content_id": "12903efa1a691298bc209728a5df73a8507493e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2889, "license_type": "no_license", "max_line_length": 78, "num_lines": 72, "path": "/load_card/Generate_load_file.py", "repo_name": "FORSEN-ROCK/other", "src_encoding": "UTF-8", "text": "import datetime\n\nfrom random import random\n\n\ndef generate_dtata(default_str, pattern_data_str,\n path_file='D:\\git-project\\load_card\\card_file',\n len_card_num=16,\n len_phone_num=11,\n emission_type='6062',\n unequ_phone=True,\n default_phone='88005553535',\n count_card=100):\n count = 0\n date = datetime.datetime.utcnow()\n date_performance = date.strftime('%G%m%d%H%M%S')\n file_name = path_file + '\\ext_contact_%s.dat' %(date_performance)\n card_file = open(file_name, 'bw')\n default_str += '\\r\\n'\n data_str = default_str.encode('utf-8')\n card_file.write(data_str)\n\n while True:\n unequ_float = random()*10**5\n unequ_id = int(unequ_float)\n person_name = 'PERSON_%i' %(unequ_id)\n length_number = len_card_num - len(emission_type)\n num_card_float = random()*10**length_number\n num_card_int = int(num_card_float)\n num_card_str = emission_type + str(num_card_int)\n \n if len(num_card_str) < len_card_num:\n ofset = len_card_num - len(num_card_str)\n num_card_str += date_performance[:ofset]\n \n if unequ_phone: \n num_phone_float = random()*10**len_phone_num\n num_phone_int = int(num_phone_float)\n num_phone_str = str(num_phone_int)\n \n if len(num_phone_str) < len_phone_num:\n ofset = len_phone_num - len(num_phone_str)\n num_phone_str += date_performance[:ofset]\n \n else:\n num_phone_str = default_phone\n \n data = pattern_data_str %(num_card_str, \n person_name, \n person_name, \n person_name, \n num_phone_str)\n data += '\\r\\n'\n data_str = data.encode('utf-8')\n card_file.write(data_str)\n \n if count >= count_card:\n break\n\n count += 1\n card_file.close()\n return True\n\nif __name__ == '__main__':\n default_str = ('#№ карты|Фамилия|Имя|Отчество|Пол|Дата рождения|Индекс|' +\n 'Область|Населенный пункт|Улица|Дом|Корпус|Квартира|' +\n 'Номер телефона|e-mail|Согласие на рассылку|' +\n 'Дата заполнения анкеты|Хэш|EAN|Название партнера')\n pattern_data_str = ('%s|%s|%s|%s|1|23/12/1988||||||||' + \n '%s|[email protected]|1|16/01/2018|?||Газпромбанк')\n complit_flag = generate_dtata(default_str, pattern_data_str, \n emission_type='6821', unequ_phone=False)" }, { "alpha_fraction": 0.6873315572738647, "alphanum_fraction": 0.7035040259361267, "avg_line_length": 40.33333206176758, "blob_id": "6b7c6354b80453cee6e86397d758a4b49d8a606a", "content_id": "eee14b1c5763ecd43c84e3d7639deafac6b32b76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 371, "license_type": "no_license", "max_line_length": 78, "num_lines": 9, "path": "/Seizure/popup.js", "repo_name": "FORSEN-ROCK/other", "src_encoding": "UTF-8", "text": "chrome.browserAction.onClicked.addListener(function(tab) {\n chrome.tabs.create({url:chrome.extension.getURL(\"option.html\")});\n});\n\ndocument.addEventListener(\"click\", function(tab) {\n chrome.tabs.create({url:\"background.html\"});\n //chrome.tabs.create({url:\"searchPageWidget.html\"});\n //chrome.windows.create({ url + \"option.html\", width: 520, height: 660 });\n});" } ]
12
passive-anarchy/python-pi-example
https://github.com/passive-anarchy/python-pi-example
f94e54b0143ec3e45ecdfa7d3ef993e3ca9ed642
7ec5bcbf16820fe60cb95818c52aad3d0fa7bdc2
c58d60385582b3cce0e7262c9f82fb8ba37c98f7
refs/heads/main
2023-08-31T22:53:47.155684
2021-10-30T20:27:52
2021-10-30T20:27:52
422,601,031
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7749999761581421, "alphanum_fraction": 0.7749999761581421, "avg_line_length": 39, "blob_id": "a40ad77f1f163b9dc9fec8a8bb1a4deed8bf5ecd", "content_id": "6229a448cd69d820b063b6dd5a18b76e7aabe25a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 80, "license_type": "no_license", "max_line_length": 59, "num_lines": 2, "path": "/README.md", "repo_name": "passive-anarchy/python-pi-example", "src_encoding": "UTF-8", "text": "# python-pi-example\nthis is an example to demonstrate the use of git on Debian.\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 12, "blob_id": "56ae273f644a64bbaab782488f60040d82a952cc", "content_id": "c66f0c21e80616ebb491b1ceeb107dec8e64bfc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26, "license_type": "no_license", "max_line_length": 20, "num_lines": 2, "path": "/main.py", "repo_name": "passive-anarchy/python-pi-example", "src_encoding": "UTF-8", "text": "print(\"Hello World\")\n#add\n" } ]
2
department-of-general-services/fmd_archibus_dashboard
https://github.com/department-of-general-services/fmd_archibus_dashboard
68ff2ca7b6f1a18cd02d0e1ec82389678eca7791
3153c317421799c9d39bf931ef3a0ce63c5d9fea
b634c58e50b00ae022062d5dab02d8478e6d9746
refs/heads/master
2023-07-18T09:45:18.694551
2021-09-03T18:07:25
2021-09-03T18:07:25
300,392,903
1
0
null
2020-10-01T19:02:10
2021-07-27T18:29:49
2021-09-03T18:07:25
TSQL
[ { "alpha_fraction": 0.7581047415733337, "alphanum_fraction": 0.7605984807014465, "avg_line_length": 43.44444274902344, "blob_id": "4f2b4ddf54dd6943cfc4fc27c33c9bb0a2933029", "content_id": "6959a5a7484aa838ffb3bb18f3642d5f102e8ed2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 401, "license_type": "no_license", "max_line_length": 227, "num_lines": 9, "path": "/README.md", "repo_name": "department-of-general-services/fmd_archibus_dashboard", "src_encoding": "UTF-8", "text": "# FMD Dashboard: V3\nContains XML files used for dashboarding in Archibus.\n\nTo create the dashboard from this directory, run:\n ```\n $ python wrap_for_prod.py\n ```\n\nThis will remove the backdating from SQL that is needed to test the dashboard and create a zipped archive in your /Documents folder. This zipped archive must be ported to the production environment to build the dashboard there. \n" }, { "alpha_fraction": 0.5817888975143433, "alphanum_fraction": 0.5858178734779358, "avg_line_length": 30.820512771606445, "blob_id": "776459862deb8c5db81464745469b960232fba92", "content_id": "627662bc002c60282334a61d0219dd399f7f14fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1241, "license_type": "no_license", "max_line_length": 67, "num_lines": 39, "path": "/backdate_for_test.py", "repo_name": "department-of-general-services/fmd_archibus_dashboard", "src_encoding": "UTF-8", "text": "from pathlib import Path\nimport re\n\n\ndef get_files(extensions):\n all_files = []\n for ext in extensions:\n all_files.extend(Path.cwd().glob(ext))\n return(all_files)\n\n\ndef loop_through_axvw_and_js(prod_dir, to_replace, replace_with):\n file_paths = get_files(['*.axvw', '*.js'])\n for file_path in file_paths:\n print(file_path.name)\n with open(file_path, \"r\") as f:\n axvw_text = f.read()\n prod_text = re.sub(to_replace, replace_with, axvw_text)\n with open(prod_dir / file_path.name, \"w\") as f:\n f.write(prod_text)\n return\n\n\ndef loop_through_axvw(prod_dir, to_replace, replace_with):\n for file_path in prod_dir.glob('*.axvw'):\n print(file_path.name)\n with open(file_path, \"r\") as f:\n axvw_text = f.read()\n # print(axvw_text[0:20])\n prod_text = re.sub(to_replace, replace_with, axvw_text)\n with open(prod_dir / file_path.name, \"w\") as f:\n f.write(prod_text)\n\n\nif __name__ == '__main__':\n prod_dir = Path.cwd() / \"kpis_dashboard\"\n to_replace = re.escape(\"DateAdd(year, -2, getDate())\")\n replace_with = r\"DateAdd(month, -1, getDate())\"\n loop_through_axvw(prod_dir, to_replace, replace_with)\n" }, { "alpha_fraction": 0.700486421585083, "alphanum_fraction": 0.7129951119422913, "avg_line_length": 22.983333587646484, "blob_id": "f0197917589a28df3b6e448a84e99f81819c1a98", "content_id": "2e11af5d3c04db0be88f47747b2509be5b48dd40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1439, "license_type": "no_license", "max_line_length": 94, "num_lines": 60, "path": "/backlog_dashboard/balt-bldgops-backlog-drilldown.js", "repo_name": "department-of-general-services/fmd_archibus_dashboard", "src_encoding": "UTF-8", "text": "function onDrillDownBacklogByPrimaryType(item) {\n\tvar panel = View.panels.get(\"chartDrillDown_backlog_by_primary_type\");\n\n\tpanel.addParameter('summaryValueForThisGroup', item.selectedChartData['wrhwr.primary_type']);\n\tpanel.refresh();\n\n\tpanel.showInWindow({\n\t\twidth: 800,\n\t\theight: 300\n\t});\n\n}\n\n\nfunction onDrillDownBacklogBySupervisor(item) {\n\tvar panel = View.panels.get(\"chartDrillDown_backlog_by_supervisor\");\n\n\tpanel.addParameter('summaryValueForThisGroup', item.selectedChartData['wrhwr.supervisor']);\n\tpanel.refresh();\n\n\tpanel.showInWindow({\n\t\twidth: 800,\n\t\theight: 300\n\t});\n\n}\n\nfunction onDrillDownBacklogByStatus(item) {\n\tvar panel = View.panels.get(\"chartDrillDown_backlog_by_status\");\n\n\tconsole.log(item.selectedChartData['wrhwr.status'])\n\n\tif (item.selectedChartData['wrhwr.status'] === 'Assigned to Work Order') {\n\t\tbrief_status = 'AA'\n\t}\n\telse if (item.selectedChartData['wrhwr.status'] === 'Completed') {\n\t\tbrief_status = 'Com'\n\t}\n\telse if (item.selectedChartData['wrhwr.status'] === 'Closed') {\n\t\tbrief_status = 'Clo'\n\t}\n\telse if (item.selectedChartData['wrhwr.status'] === 'Issued and In Process') {\n\t\tbrief_status = 'I'\n\t}\n\telse if (item.selectedChartData['wrhwr.status'] === 'Stopped') {\n\t\tbrief_status = 'S'\n\t}\n\telse {\n\t\tbrief_status = item.selectedChartData['wrhwr.status']\n\t}\n\n\tpanel.addParameter('summaryValueForThisGroup', brief_status);\n\tpanel.refresh();\n\n\tpanel.showInWindow({\n\t\twidth: 800,\n\t\theight: 300\n\t});\n\n}\n" }, { "alpha_fraction": 0.7232502102851868, "alphanum_fraction": 0.7425583004951477, "avg_line_length": 20.084745407104492, "blob_id": "ee0974c5e07a1173363cb52f4301f0fd32060d9f", "content_id": "f350782dd17a26c9b2e96663bda70266d3052d1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1243, "license_type": "no_license", "max_line_length": 105, "num_lines": 59, "path": "/fmd_dashboard/balt-bldgops-fmd-divisional-drilldown.js", "repo_name": "department-of-general-services/fmd_archibus_dashboard", "src_encoding": "UTF-8", "text": "function onDrillDownCompleteToClose(item) {\n\tvar panel = View.panels.get(\"chartDrillDown_complete_to_close\");\n\n\tpanel.addParameter('summaryValueForThisGroup', item.selectedChartData['wrhwr.calendar_month_complete']);\n\n\tpanel.refresh();\n\n\tpanel.showInWindow({\n\t\twidth: 800,\n\t\theight: 300\n\t});\n\n}\n\n\nfunction onDrillDownByTimespan(item) {\n\tvar panel = View.panels.get(\"chartDrillDown_metrics\");\n\n\tpanel.addParameter('summaryTimespan', item.selectedChartData['wrhwr.timespan']);\n\tpanel.addParameter('summaryAction', item.selectedChartData['wrhwr.action']);\n\n\tpanel.refresh();\n\n\tpanel.showInWindow({\n\t\twidth: 800,\n\t\theight: 300\n\t});\n\n}\n\nfunction onDrillDownByCalMonth(item) {\n\tvar panel = View.panels.get(\"chartDrillDown_monthly_volume\");\n\n\tpanel.addParameter('summaryMonth', item.selectedChartData['wrhwr.calendar_month']);\n\tpanel.addParameter('summaryAction', item.selectedChartData['wrhwr.action']);\n\n\tpanel.refresh();\n\n\tpanel.showInWindow({\n\t\twidth: 800,\n\t\theight: 300\n\t});\n\n}\n\n\nfunction onDrillDownByTrade(item) {\n\tvar panel = View.panels.get(\"chartDrillDown_aging_by_trade\");\n\n\tpanel.addParameter('summaryValueForThisGroup', item.selectedChartData['wrhwr.trade']);\n\n\tpanel.refresh();\n\n\tpanel.showInWindow({\n\t\twidth: 800,\n\t\theight: 300\n\t});\n\n}" }, { "alpha_fraction": 0.5747076869010925, "alphanum_fraction": 0.5751407742500305, "avg_line_length": 27.158536911010742, "blob_id": "3fb9ccb823461665a210848a15b995d5525c0c16", "content_id": "8b18cfdc71df7eccfa7d9b1eab5a878d5fb0800d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2309, "license_type": "no_license", "max_line_length": 78, "num_lines": 82, "path": "/wrap_for_prod.py", "repo_name": "department-of-general-services/fmd_archibus_dashboard", "src_encoding": "UTF-8", "text": "from pathlib import Path\nimport re\nfrom datetime import datetime\nfrom shutil import make_archive\n\n\ndef del_dir_contents(root):\n for p in root.iterdir():\n if p.is_dir():\n del_dir_contents(p)\n else:\n p.unlink()\n for p in root.iterdir():\n if p.is_dir():\n p.rmdir()\n return\n\n\ndef set_up_dir(zip_dir, view_dir):\n if zip_dir.exists():\n del_dir_contents(zip_dir)\n else:\n zip_dir.mkdir()\n view_dir.mkdir()\n return\n\n\ndef get_files(path, extensions):\n all_files = []\n for ext in extensions:\n all_files.extend(path.glob(ext))\n return(all_files)\n\n\ndef loop_through_axvw_and_js(test_dir, prod_dir, to_replace, replace_with):\n file_paths = get_files(test_dir, ['*.axvw', '*.js', '*.sql'])\n for file_path in file_paths:\n print(file_path.name)\n with open(file_path, \"r\") as f:\n axvw_text = f.read()\n prod_text = re.sub(to_replace, replace_with, axvw_text)\n if test_dir.name == \"dashboards\":\n with open(prod_dir / file_path.name, \"w\") as f:\n f.write(prod_text)\n elif test_dir.parent.name == \"dashboards\":\n subdir = prod_dir / test_dir.name\n if not subdir.exists():\n subdir.mkdir()\n with open(subdir / file_path.name, \"w\") as f:\n f.write(prod_text)\n\n return\n\n\ndef zip_up_dash(zip_dir, view_dir):\n date_string = datetime.now().strftime(\"%Y%m%d\")\n zip_filename = f\"fmd_dashboard_{date_string}\"\n zip_path = zip_dir / zip_filename\n make_archive(zip_path, 'zip', view_dir)\n return\n\n\nif __name__ == \"__main__\":\n zip_dir = Path(r\"C:\\\\Users\\\\sa.james\\Documents\\\\fmd_dashboard\")\n view_dir = zip_dir / \"views\"\n set_up_dir(zip_dir, view_dir)\n\n to_replace = re.escape(r\"DateAdd(month, -1, getDate())\")\n replace_with = r\"getDate()\"\n test_dir = Path.cwd()\n loop_through_axvw_and_js(test_dir, view_dir, to_replace, replace_with)\n \n dash_dirs = [test_dir / \"backlog_dashboard\", \n test_dir / \"kpis_dashboard\",\n test_dir / \"fmd_dashboard\"]\n\n for dash_dir in dash_dirs: \n loop_through_axvw_and_js(dash_dir, view_dir, to_replace, replace_with)\n\n zip_up_dash(zip_dir, view_dir)\n\n print(\"Work completed.\")\n" }, { "alpha_fraction": 0.7346938848495483, "alphanum_fraction": 0.7551020383834839, "avg_line_length": 23.5, "blob_id": "0eb9ed0384da98425a326b454583fe7f5db32aa4", "content_id": "eedec9cb9a5c5e4882bf88df49cecf2754368192", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 294, "license_type": "no_license", "max_line_length": 105, "num_lines": 12, "path": "/kpis_dashboard/balt-bldgops-divisional-drilldown.js", "repo_name": "department-of-general-services/fmd_archibus_dashboard", "src_encoding": "UTF-8", "text": "function onDrillDownCMsByCalendarMonth(item) {\n\tvar panel = View.panels.get(\"chartDrillDown_pct_cms_ontime\");\n\n\tpanel.addParameter('summaryValueForThisGroup', item.selectedChartData['wrhwr.calendar_month_complete']);\n\tpanel.refresh();\n\n\tpanel.showInWindow({\n\t\twidth: 800,\n\t\theight: 300\n\t});\n\n}\n" } ]
6
UL-FRI-Zitnik/BSNLP-2021-Shared-Task
https://github.com/UL-FRI-Zitnik/BSNLP-2021-Shared-Task
257a4dd7632d0c83b540ef664bf800911f1a6287
5163242ff2e0b41d760afb3b9e9f4508f8dbaac6
116e4f583e70e1e40cfcfd1c2843fdb66b52108e
refs/heads/master
2023-03-13T06:52:40.400640
2021-03-04T16:36:34
2021-03-04T16:36:34
344,717,610
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.596707820892334, "alphanum_fraction": 0.596707820892334, "avg_line_length": 23.299999237060547, "blob_id": "a7e098116bceed32e74a6047b4e97d1abf67ebf8", "content_id": "0438e187c5702d3f1ade14ed258469c43c8a7bd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 51, "num_lines": 10, "path": "/src/utils/utils.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import os\n\n\ndef list_dir(dirpath: str) -> (list, list):\n files, dirs = [], []\n for dpath, dnames, fnames in os.walk(dirpath,):\n files.extend(fnames)\n dirs.extend(dnames)\n break\n return sorted(dirs), sorted(files)\n" }, { "alpha_fraction": 0.5666064023971558, "alphanum_fraction": 0.5762507319450378, "avg_line_length": 38.816001892089844, "blob_id": "27f036832b85b4303f0a1b092d54016310b28de8", "content_id": "9cfec94047f6e8e212c8c6315096179862e2e81c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9954, "license_type": "no_license", "max_line_length": 156, "num_lines": 250, "path": "/src/eval/predict.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import json\nimport logging\nimport sys\nimport torch\nimport pandas as pd\nimport numpy as np\n\nfrom seqeval.metrics import f1_score, precision_score, recall_score, accuracy_score, classification_report\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler\nfrom keras.preprocessing.sequence import pad_sequences\nfrom transformers import AutoTokenizer, AutoModelForTokenClassification\nfrom transformers import PreTrainedModel, pipeline\nfrom collections import defaultdict\nfrom operator import itemgetter\nfrom tqdm import tqdm\n\nfrom src.utils.load_dataset import LoadBSNLP\n\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'\n)\nlogger = logging.getLogger('MakePrediction')\n\n\nclass ExtractPredictions:\n def __init__(\n self,\n tag2code: dict,\n code2tag: dict,\n model_path: str = f'./data/models/bert-base-multilingual-cased-other',\n ):\n \"\"\"\n A class to extract all the NE predictions from a given tokens\n :param model_path: path to a HuggingFace-transformers pre-trained model for the NER task, such as BERT Base Multilingual (Un)Cased\n \"\"\"\n self.model_path = model_path\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.model = AutoModelForTokenClassification.from_pretrained(\n model_path,\n output_attentions=False,\n output_hidden_states=False,\n num_labels=len(tag2code),\n label2id=tag2code,\n id2label=code2tag,\n ).to(self.device)\n self.tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n from_pt=True,\n do_lower_case=False,\n use_fast=False\n )\n self.BATCH_SIZE = 32\n self.MAX_LENGTH = 128\n\n def convert_input(\n self,\n input_data: pd.DataFrame,\n tag2code: dict,\n ) -> (DataLoader, list):\n all_ids = []\n ids = [] # sentence ids\n tokens = [] # sentence tokens\n token_ids = [] # converted sentence tokens\n tags = [] # NER tags\n\n for (doc, sentence), data in input_data.groupby([\"docId\", \"sentenceId\"]):\n sentence_tokens = []\n sentence_tags = []\n sentence_ids = []\n for id, word_row in data.iterrows():\n word_tokens = self.tokenizer.tokenize(str(word_row[\"text\"]))\n sentence_tokens.extend(word_tokens)\n sentence_tags.extend([tag2code[word_row[\"ner\"]]] * len(word_tokens))\n token_id_str = f'{doc};{sentence};{word_row[\"tokenId\"]}'\n all_ids.append(token_id_str)\n token_id = len(all_ids) - 1\n sentence_ids.extend([token_id] * len(word_tokens))\n if len(sentence_tokens) != len(sentence_tags) != len(sentence_ids):\n raise Exception(\"Inconsistent output!\")\n ids.append(sentence_ids)\n tokens.append(sentence_tokens)\n sentence_token_ids = self.tokenizer.convert_tokens_to_ids(sentence_tokens)\n token_ids.append(sentence_token_ids)\n tags.append(sentence_tags)\n # padding is required to spill the sentence tokens in case there are sentences longer than 128 words\n # or to fill in the missing places to 128 (self.MAX_LENGTH)\n ids = torch.as_tensor(pad_sequences(\n ids,\n maxlen=self.MAX_LENGTH,\n dtype=\"long\",\n value=-1,\n truncating=\"post\",\n padding=\"post\"\n )).to(self.device)\n token_ids = torch.as_tensor(pad_sequences(\n token_ids,\n maxlen=self.MAX_LENGTH,\n dtype=\"long\",\n value=0.0,\n truncating=\"post\",\n padding=\"post\"\n )).to(self.device)\n tags = torch.as_tensor(pad_sequences(\n tags,\n maxlen=self.MAX_LENGTH,\n dtype=\"long\",\n value=tag2code[\"PAD\"],\n truncating=\"post\",\n padding=\"post\"\n )).to(self.device)\n masks = torch.as_tensor(np.array([[float(token != 0.0) for token in sentence] for sentence in token_ids])).to(self.device)\n data = TensorDataset(ids, token_ids, masks, tags)\n sampler = RandomSampler(data)\n return DataLoader(data, sampler=sampler, batch_size=self.BATCH_SIZE), all_ids\n\n def translate(\n self,\n predictions: list,\n labels: list,\n tokens: list,\n sent_ids: list,\n tag2code: dict,\n code2tag: dict,\n all_ids: list\n ) -> (list, list, list, list):\n translated_predictions, translated_labels, translated_tokens, translated_sentences = [], [], [], []\n for preds, labs, toks, ids in zip(predictions, labels, tokens, sent_ids):\n sentence_predictions, sentence_labels, sentence_tokens, sentence_ids = [], [], [], []\n for p, l, t, i in zip(preds, labs, toks, ids):\n if l == tag2code[\"PAD\"]:\n continue\n if p == tag2code[\"PAD\"]:\n logger.info(f\"PREDICTED `PAD`! {p}, {l}, {t}, {i}\")\n continue\n sentence_tokens.append(t)\n sentence_predictions.append(code2tag[p])\n sentence_labels.append(code2tag[l])\n sentence_ids.append(all_ids[i])\n translated_tokens.append(sentence_tokens)\n translated_predictions.append(sentence_predictions)\n translated_labels.append(sentence_labels)\n translated_sentences.append(sentence_ids)\n return translated_predictions, translated_labels, translated_tokens, translated_sentences\n\n def test(\n self,\n data: DataLoader,\n all_ids: list,\n tag2code: dict,\n code2tag: dict,\n ) -> (dict, pd.DataFrame):\n eval_loss = 0.\n eval_steps, eval_examples = 0, 0\n eval_ids, eval_tokens, eval_predictions, eval_labels = [], [], [], []\n self.model.eval()\n for batch in data:\n batch_ids, batch_tokens, batch_masks, batch_tags = tuple(t.to(self.device) for t in batch)\n with torch.no_grad():\n outputs = self.model(\n batch_tokens,\n attention_mask=batch_masks,\n labels=batch_tags\n )\n logits = outputs[1].detach().cpu().numpy()\n label_ids = batch_tags.to('cpu').numpy()\n toks = batch_tokens.to('cpu').numpy()\n sentence_ids = batch_ids.to('cpu').numpy()\n\n eval_loss += outputs[0].mean().item()\n toks = [self.tokenizer.convert_ids_to_tokens(sentence) for sentence in toks]\n eval_tokens.extend(toks)\n eval_predictions.extend([list(p) for p in np.argmax(logits, axis=2)])\n eval_labels.extend(label_ids)\n eval_ids.extend(sentence_ids)\n\n eval_examples += batch_tokens.size(0)\n eval_steps += 1\n eval_loss = eval_loss / eval_steps\n flatten = lambda x: [j for i in x for j in i]\n\n predicted_tags, valid_tags, tokens, sentence_ids = self.translate(eval_predictions, eval_labels, eval_tokens, eval_ids, tag2code, code2tag, all_ids)\n\n # for st, sp, sv, vi in zip(tokens, predicted_tags, valid_tags, sentence_ids):\n # for t, p, v, i in zip(st, sp, sv, vi):\n # logger.info(f\"row = {t}, {p}, {v}, {i}\")\n\n predicted_data = pd.DataFrame(data={\n 'sentence_id': flatten(sentence_ids),\n 'tokens': flatten(tokens),\n 'predicted_tag': flatten(predicted_tags),\n 'valid_tag': flatten(valid_tags),\n })\n\n if len([tag for sent in valid_tags for tag in sent if tag[:2] in ['B-', 'I-']]) == 0:\n valid_tags.append([\"O\"])\n predicted_tags.append([\"B-ORG\"])\n\n\n scores = {\n \"loss\": eval_loss,\n \"acc\": accuracy_score(valid_tags, predicted_tags),\n \"f1\": f1_score(valid_tags, predicted_tags),\n \"p\": precision_score(valid_tags, predicted_tags),\n \"r\": recall_score(valid_tags, predicted_tags),\n \"report\": classification_report(valid_tags, predicted_tags),\n }\n\n return scores, predicted_data\n\n def __merge_data(self,\n data: pd.DataFrame,\n pred_data: pd.DataFrame,\n ) -> pd.DataFrame:\n data['calcNER'] = ''\n for sent_id, sent_data in pred_data.groupby('sentence_id'):\n ids = sent_id.split(';')\n did = ids[0]\n sid = int(ids[1])\n tid = int(ids[2])\n max_cat = max(sent_data['predicted_tag'].value_counts().to_dict().items(), key=itemgetter(1))[0]\n data.loc[(data['docId'] == did) & (data['sentenceId'] == sid) & (data['tokenId'] == tid), 'calcNER'] = max_cat\n return data\n\n def predict(self,\n data: pd.DataFrame,\n tag2code: dict,\n code2tag: dict,\n ) -> (dict, pd.DataFrame):\n in_data, ids = self.convert_input(data, tag2code)\n scores, pred_data = self.test(in_data, ids, tag2code, code2tag)\n merged = self.__merge_data(data, pred_data)\n return scores, merged\n\n\nif __name__ == '__main__':\n # model_path = f'./data/models/bert-base-multilingual-cased-other'\n model_path = './data/runs/run_2021-02-17T11:42:19_slo-models/models/sloberta-1.0-bsnlp-2021-5-epochs'\n tag2code, code2tag = LoadBSNLP(lang='sl', year='2021', merge_misc=False).encoding()\n logger.info(f'{tag2code}')\n logger.info(f'{code2tag}')\n loader = LoadBSNLP(lang=\"sl\", year='2021', merge_misc=False)\n predictor = ExtractPredictions(model_path)\n data = loader.test()\n scores, pred_data = predictor.predict(data, tag2code, code2tag)\n logger.info(f'{json.dumps(scores, indent=4)}')\n logger.info(f'\\n{scores[\"report\"]}')\n logger.info(f'\\n{pred_data}')\n" }, { "alpha_fraction": 0.7786885499954224, "alphanum_fraction": 0.7786885499954224, "avg_line_length": 16.428571701049805, "blob_id": "46e291ce396dfafe5efcc9a4c835c7c4975e9703", "content_id": "dab4a0df103c12acdd9a5850c523300a47c699e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 122, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/bin/exec-setup.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"Installing python dependencies\"\npip install -r requirements.txt\n\napt update\napt install -y default-jre\n" }, { "alpha_fraction": 0.7577319741249084, "alphanum_fraction": 0.7749140858650208, "avg_line_length": 40.57143020629883, "blob_id": "ffa0e7fe0bfa7e07578910a35d56ebee9a81b1f9", "content_id": "7a930949890d6871f517472fe8801e56116216a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 582, "license_type": "no_license", "max_line_length": 99, "num_lines": 14, "path": "/bin/singularity-commands.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "# build a new container:\nsingularity build <local_target>.sif docker://<URL>\n# e.g.\nsingularity build ./containers/container.sif docker://pytorch/pytorch:1.6.0-cuda10.1-cudnn7-runtime\n\n# install dependencies within the container\nsingularity exec ./containers/container.sif pip install -r requirements.txt\n\n# run the container\nsingularity run --nv ./containers/sing-container.sif\n\n# submit a job to SLURM using a singularity container\nsbatch -p compute --gres=gpu:1 --parsable ./bin/run-singularity-container.sh\nsbatch -p compute -c 10 --parsable ./bin/run-singularity-container.sh\n" }, { "alpha_fraction": 0.5917490124702454, "alphanum_fraction": 0.5971924066543579, "avg_line_length": 40.80239486694336, "blob_id": "74a8e9fb041797663e9f6cb61e82b757f53c2c43", "content_id": "5b167acbde97381a041ef307ca243e4f314f8baa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6981, "license_type": "no_license", "max_line_length": 201, "num_lines": 167, "path": "/src/eval/model_eval.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import json\nimport argparse\nimport tqdm\nimport logging\nimport sys\nimport pandas as pd\nimport random\n\nfrom collections import defaultdict\n\nfrom src.eval.predict import ExtractPredictions\nfrom src.utils.load_documents import LoadBSNLPDocuments\nfrom src.utils.load_dataset import LoadBSNLP\nfrom src.utils.update_documents import UpdateBSNLPDocuments\nfrom src.utils.utils import list_dir\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\n\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.DEBUG,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'\n)\nlogger = logging.getLogger('TrainEvalModels')\n\nDEBUG = False\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--lang', type=str, default='all')\n parser.add_argument('--year', type=str, default='all')\n parser.add_argument('--merge-misc', action='store_true')\n parser.add_argument('--run-path', type=str, default=None)\n return parser.parse_args()\n\n\ndef group_sentences(document: list) -> dict:\n sentences = defaultdict(lambda: \"\")\n for token in document:\n sentences[token['sentenceId']] = f\"{sentences[token['sentenceId']]} {token['text']}\"\n return dict(sentences)\n\ndef get_label_dicts(path: str) -> (dict, dict):\n with open(f'{path}/config.json') as f:\n config = json.load(f)\n code2tag = {int(k): v for k, v in config['id2label'].items()}\n return config['label2id'], code2tag\n\n\ndef looper(\n run_path: str,\n clang: str,\n model: str,\n year: str,\n categorize_misc: bool = False,\n) -> dict:\n loader = LoadBSNLPDocuments(lang=clang, year=year) \n\n model_name = model.split('/')[-1]\n logger.info(f\"Predicting for {model_name}\")\n model_path = f'{run_path}/models/{model}'\n \n tag2code, code2tag = get_label_dicts(model_path)\n misctag2code, misccode2tag = {}, {}\n\n logger.info(f\"tag2code: {tag2code}\")\n logger.info(f\"code2tag: {code2tag}\")\n\n misc_model, _ = list_dir(f'{run_path}/misc_models')\n if categorize_misc:\n logger.info(f\"Using misc model: {misc_model[0]}\")\n misctag2code, misccode2tag = get_label_dicts(f'{run_path}/misc_models/{misc_model[0]}')\n logger.info(f\"misctag2code: {misctag2code}\")\n logger.info(f\"misccode2tag: {misccode2tag}\")\n\n predictor = ExtractPredictions(model_path=model_path, tag2code=tag2code, code2tag=code2tag)\n pred_misc = None if not categorize_misc else ExtractPredictions(model_path=f'./{run_path}/misc_models/{misc_model[0]}', tag2code=misctag2code, code2tag=misccode2tag)\n\n updater = UpdateBSNLPDocuments(lang=clang, year=year, path=f'{run_path}/predictions/bsnlp/{model_name}')\n predictions = {}\n data = loader.load_merged()\n tdset = tqdm.tqdm(data.items(), desc=\"Dataset\")\n scores = []\n for dataset, langs in tdset:\n tdset.set_description(f'Dataset: {dataset}')\n tlang = tqdm.tqdm(langs.items(), desc=\"Language\")\n predictions[dataset] = {}\n for lang, docs in tlang:\n predictions[dataset][lang] = {}\n tlang.set_description(f'Lang: {tlang}')\n for docId, doc in tqdm.tqdm(docs.items(), desc=\"Docs\"):\n to_pred = pd.DataFrame(doc['content'])\n if categorize_misc:\n # categorize the PRO and EVT to MISC, as the model only knows about it\n to_pred.loc[to_pred['ner'].isin(['B-PRO', 'B-EVT']), 'ner'] = f'B-MISC'\n to_pred.loc[to_pred['ner'].isin(['I-PRO', 'I-EVT']), 'ner'] = f'I-MISC'\n doc_scores, pred_data = predictor.predict(to_pred, tag2code, code2tag)\n doc_scores['id'] = f'{lang};{docId}'\n scores.append(doc_scores)\n # if pred_misc is not None and :\n if categorize_misc and len(pred_data.loc[pred_data['calcNER'].isin(['B-MISC', 'I-MISC'])]) > 0:\n misc_data = pd.DataFrame(doc['content'])\n if len(misc_data.loc[~(misc_data['ner'].isin(['B-MISC', 'I-MISC']))]) > 0:\n # randomly choose a category for (B|I)-MISC category\n cat = random.choice(['PRO', 'EVT'])\n misc_data.loc[(misc_data['ner'] == 'B-MISC'), 'ner'] = f'B-{cat}'\n misc_data.loc[(misc_data['ner'] == 'I-MISC'), 'ner'] = f'I-{cat}'\n misc_data.loc[~(misc_data['ner'].isin(['B-PRO', 'B-EVT', 'I-PRO', 'I-EVT'])), 'ner'] = 'O'\n _, misc_pred = pred_misc.predict(misc_data, misctag2code, misccode2tag)\n # pred_data['ner'] = pd.DataFrame(doc['content'])['ner']\n # update the entries\n # update wherever there is misc in the original prediction\n pred_data.loc[pred_data['calcNER'].isin(['B-MISC', 'I-MISC']), 'calcNER'] = misc_pred.loc[pred_data['calcNER'].isin(['B-MISC', 'I-MISC']), 'calcNER']\n # update wherever the new predictor made a prediction\n pred_data.loc[misc_pred['calcNER'].isin(['B-PRO', 'B-EVT', 'I-PRO', 'I-EVT']), 'calcNER'] = misc_pred.loc[misc_pred['calcNER'].isin(['B-PRO', 'B-EVT', 'I-PRO', 'I-EVT']), 'calcNER']\n doc['content'] = pred_data.to_dict(orient='records')\n miscs = [r['calcNER'] for r in doc['content'] if r['calcNER'] in ['B-MISC', 'I-MISC']]\n if len(miscs) > 0:\n raise Exception(f\"STILL MORE MISCS??? {docId}, {miscs}\")\n predictions[dataset][lang][docId] = pred_data.loc[~(pred_data['calcNER'] == 'O')].to_dict(orient='records')\n updater.update_merged(data)\n logger.info(f\"Done predicting for {model_name}\")\n return {\n 'model': model_name,\n 'preds': predictions,\n }, scores\n\n\ndef main():\n args = parse_args()\n run_path = args.run_path if args.run_path is not None else \"./data/models/\"\n lang = args.lang\n year = args.year\n merge_misc = args.merge_misc\n\n print(f\"Run path: {run_path}\")\n print(f\"Langs: {lang}\")\n print(f\"Year: {year}\")\n print(f\"Merge misc: {merge_misc}\")\n\n models, _ = list_dir(f'{run_path}/models')\n logger.info(f\"Models to predict: {json.dumps(models, indent=4)}\")\n\n # tmodel = tqdm.tqdm(list(map(lambda x: (run_path, lang, x), models)), desc=\"Model\")\n # predictions = pool.map(looper, tmodel)\n # predictions = list(map(looper, tmodel))\n predictions = []\n doc_scores = {}\n for model in tqdm.tqdm(models, desc=\"Model\"):\n logger.info(f\"Model: {model}\")\n preds, scores = looper(run_path, lang, model,year, merge_misc)\n predictions.append(preds)\n doc_scores[model]= scores\n # logger.info(predictions)\n \n with open(f'{run_path}/all_predictions.json', 'w') as f:\n json.dump(predictions, f)\n with open(f'{run_path}/all_scores.json', 'w') as f:\n json.dump(predictions, f)\n logger.info(\"Done.\")\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7412587404251099, "alphanum_fraction": 0.7412587404251099, "avg_line_length": 27.600000381469727, "blob_id": "996a86ca110250312a85948b2e3c75edc3456c3c", "content_id": "55c2d6e075851bd38f4000affae26cb2b350b7c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 143, "license_type": "no_license", "max_line_length": 63, "num_lines": 5, "path": "/bin/exec-clustering.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\necho \"Starting the BSNLP clustering process...\"\nPYTHONPATH=. python src/matching/match_dedupe-all-chars.py \"$@\"\n" }, { "alpha_fraction": 0.7251908183097839, "alphanum_fraction": 0.7251908183097839, "avg_line_length": 25.200000762939453, "blob_id": "32fb1ab4707700307bc2a2d3a116d70610c7f46b", "content_id": "9eb1d6e309b031e657fdeb778abb8cb8b407b9db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 131, "license_type": "no_license", "max_line_length": 55, "num_lines": 5, "path": "/bin/exec-annotate.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\necho \"Merging and annotating the files ...\"\nPYTHONPATH=. python src/transform/annotate_docs.py \"$@\"\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 25.399999618530273, "blob_id": "b624a037d4c726252f85331eda2dc0aaf62e25dc", "content_id": "286e3c7ef4e402631b5d3e974519889880176a91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 132, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/bin/exec-pred.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\necho \"Starting the BERT model prediction process...\"\nPYTHONPATH=. python src/eval/model_eval.py \"$@\"\n" }, { "alpha_fraction": 0.5316859483718872, "alphanum_fraction": 0.5395243763923645, "avg_line_length": 32.60268020629883, "blob_id": "0710f3084bd93952aa0436d46c59cff80179dea2", "content_id": "0b05d7228581aaf860fed8893dcc5faa0643c432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7527, "license_type": "no_license", "max_line_length": 133, "num_lines": 224, "path": "/src/utils/load_dataset.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport pyconll\nimport numpy as np\n\nfrom src.utils.utils import list_dir\nfrom typing import Union\n\n# pd.set_option('display.max_rows', None) # only for debugging purposes\n\n\nclass LoadDataset:\n def __init__(self, base_fname: str, format: str, print_debug: bool = False):\n self.base_fname = base_fname\n self.data_format = format\n self.print_debug = print_debug\n\n def load(self, dset: str) -> pd.DataFrame:\n return pd.DataFrame()\n\n def train(self) -> pd.DataFrame:\n return pd.DataFrame()\n\n def dev(self) -> pd.DataFrame:\n \"\"\"\n This is the validation data\n \"\"\"\n return pd.DataFrame()\n\n def test(self) -> pd.DataFrame:\n return pd.DataFrame()\n\n def load_all(self) -> pd.DataFrame:\n return pd.concat([\n self.train(),\n self.dev(),\n self.test()\n ])\n\n def encoding(self) -> (dict, dict):\n data = self.train()\n possible_tags = np.append(data[\"ner\"].unique(), [\"PAD\"])\n tag2code = {tag: code for code, tag in enumerate(possible_tags)}\n code2tag = {val: key for key, val in tag2code.items()}\n return tag2code, code2tag\n\n\nclass LoadSSJ500k(LoadDataset):\n def __init__(self):\n super().__init__(\n \"data/datasets/ssj500k/\",\n \"conll\"\n )\n\n def load(self, dset: str) -> pd.DataFrame:\n raw_data = pyconll.load_from_file(f\"{self.base_fname}{dset}_ner.conllu\")\n data = []\n for id, sentence in enumerate(raw_data):\n for word in sentence:\n if word.upos == 'PROPN': # check if the token is a NER\n annotation = list(word.misc.keys())[0]\n data.append({\"docId\": \"xxx\", \"text\": word.form, \"sentenceId\": id, \"ner\": annotation.upper()})\n # NOTE: we cannot use the just <TYPE> annotation without `B-` (begin) or `I-` (inside) `<TYPE>`\n # because we would not be compliant with the CoNLL format\n else:\n data.append({\"docId\": \"xxx\", \"text\": word.form, \"sentenceId\": id, \"ner\": \"O\"})\n return pd.DataFrame(data)\n\n def train(self) -> pd.DataFrame:\n return self.load('train')\n\n def dev(self) -> pd.DataFrame:\n return self.load('dev')\n\n def test(self) -> pd.DataFrame:\n return self.load('test')\n\n\nclass LoadBSNLP(LoadDataset):\n available_langs = ['bg', 'cs', 'pl', 'ru', 'sl', 'uk']\n datasets = {\n \"2017\": [\"ec\", \"trump\"],\n \"2021\": [\"asia_bibi\", \"brexit\", \"nord_stream\", \"other\", \"ryanair\"],\n \"all\": [\"ec\", \"trump\", \"asia_bibi\", \"brexit\", \"nord_stream\", \"other\", \"ryanair\"],\n \"test_2021\": [\"covid-19\", \"us_election_2020\"],\n }\n\n def __init__(\n self,\n lang: str = 'all',\n year: str = 'all',\n data_set: str = 'all',\n exclude: Union[str, None] = None,\n merge_misc: bool = True,\n misc_data_only: bool = False,\n print_debug: bool = False\n ):\n super().__init__(\n \"data/datasets/bsnlp\",\n \"csv\",\n print_debug=print_debug,\n )\n # assert year\n if year not in self.datasets:\n raise Exception(f\"Invalid year chosen: {year}\")\n\n # assert dataset\n if data_set in self.datasets[year]:\n self.data_set = [data_set]\n elif data_set == 'all':\n self.data_set = self.datasets[year]\n else:\n raise Exception(f\"Invalid dataset chosen: {data_set}\")\n\n if exclude is not None:\n if print_debug: print(f\"Excluding {exclude}\")\n self.data_set = [ds for ds in self.data_set if ds != exclude]\n\n if not self.data_set:\n raise Exception(f\"Empty data set chosen? {self.data_set}\")\n\n # assert language\n if lang in self.available_langs:\n self.langs = [lang]\n elif lang == 'all':\n self.langs = self.available_langs\n else:\n raise Exception(f\"Invalid language option: {lang}\")\n\n self.random_state = 42\n self.merge_misc = merge_misc\n if merge_misc and misc_data_only:\n print(\"WARNING: weird combination? merge misc and misc data only?\")\n self.misc_data_only = misc_data_only\n\n def load(self, dset: str) -> pd.DataFrame:\n dirs, _ = list_dir(self.base_fname)\n data = pd.DataFrame()\n for dataset in dirs:\n if dataset not in self.data_set:\n continue\n for lang in self.langs:\n fname = f\"{self.base_fname}/{dataset}/splits/{lang}/{dset}_{lang}.csv\"\n try:\n df = pd.read_csv(f\"{fname}\")\n except:\n if self.print_debug: print(f\"[{dataset}] skipping {lang}.\")\n continue\n df['sentenceId'] = df['docId'].astype(str) + ';' + df['sentenceId'].astype('str') # + '-' + df['tokenId'].astype(str)\n if self.merge_misc:\n df['ner'] = df['ner'].map(lambda x: x.replace(\"PRO\", \"MISC\").replace(\"EVT\", \"MISC\"))\n if self.misc_data_only:\n df['ner'] = df['ner'].map(lambda x: \"O\" if x[2:] in [\"PER\", \"LOC\", \"ORG\"] else x)\n data = pd.concat([data, df])\n return data\n\n def train(self) -> pd.DataFrame:\n return self.load('train')\n\n def dev(self) -> pd.DataFrame:\n \"\"\"\n This is the validation data\n \"\"\"\n return self.load('dev')\n\n def test(self) -> pd.DataFrame:\n return self.load('test')\n\n\nclass LoadCombined(LoadDataset):\n def __init__(self, loaders: list):\n super().__init__(\n f\"combined_datasets:{','.join([l.base_fname for l in loaders])}\",\n \"csv\"\n )\n self.random_state = 42\n self.loaders = loaders\n\n def load(self, set: str) -> pd.DataFrame:\n return pd.DataFrame()\n\n def train(self) -> pd.DataFrame:\n data = pd.DataFrame()\n for loader in self.loaders:\n loader_data = loader.train()\n data = pd.concat([data, loader_data])\n return data\n\n def dev(self) -> pd.DataFrame:\n data = pd.DataFrame()\n for loader in self.loaders:\n loader_data = loader.dev()\n data = pd.concat([data, loader_data])\n return data\n\n def test(self) -> pd.DataFrame:\n data = pd.DataFrame()\n for loader in self.loaders:\n loader_data = loader.test()\n data = pd.concat([data, loader_data])\n return data\n\n\nif __name__ == '__main__':\n loader = LoadBSNLP(lang=\"all\", year='2021', merge_misc=False)\n # loader = LoadSSJ500k()\n # loader = LoadCombined([LoadBSNLP(\"sl\"), LoadSSJ500k()])\n tag2code, code2tag = loader.encoding()\n print(f\"tag2code: {tag2code}\")\n print(f\"code2tag: {code2tag}\")\n\n train_data = loader.train()\n # print(train_data.head(10))\n print(f\"Train data: {train_data.shape[0]}, NERs: {train_data.loc[train_data['ner'] != 'O'].shape[0]}\")\n print(train_data['ner'].value_counts())\n print(train_data.value_counts())\n # print(train_data)\n \n dev_data = loader.dev()\n print(f\"Validation data: {dev_data.shape[0]}, NERs: {dev_data.loc[dev_data['ner'] != 'O'].shape[0]}\")\n print(dev_data['ner'].value_counts())\n \n test_data = loader.test()\n print(f\"Test data: {test_data.shape[0]}, NERs: {test_data.loc[test_data['ner'] != 'O'].shape[0]}\")\n print(test_data['ner'].value_counts())\n" }, { "alpha_fraction": 0.6001326441764832, "alphanum_fraction": 0.6034482717514038, "avg_line_length": 27.452829360961914, "blob_id": "650f62dcafe2b65de56b2ff3c5b04214206b0cf9", "content_id": "938c1c4b7d37a7d8e74a01becc439d2764e7df34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3016, "license_type": "no_license", "max_line_length": 77, "num_lines": 106, "path": "/src/transform/create_splits.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import os\nimport json\nimport pandas as pd\nimport pathlib\nimport shutil\n\nfrom src.utils.utils import list_dir\nfrom sklearn.model_selection import train_test_split\n\n# TODO: add different seed option\nrandom_state = 42\nTRAIN_SIZE = 0.8\n\n\ndef join_docs(path: str, docs: list) -> pd.DataFrame:\n joined = pd.DataFrame()\n for doc in docs:\n df = pd.read_csv(f'{path}/{doc[\"merged_fname\"]}')\n joined = pd.concat([joined, df])\n return joined\n\n\ndef copy_annotations(\n docs: list,\n path: str,\n):\n print(f\"Copying annotations to {path}\")\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n for doc in docs:\n old_doc_path = pathlib.Path(doc['annotated'])\n ann_name = doc[\"ann_fname\"]\n if ann_name[-4:] != '.out':\n ann_name = f'{ann_name}.out'\n new_doc_path = pathlib.Path(f'{path}/{ann_name}')\n shutil.copy(old_doc_path, new_doc_path)\n\n\ndef join_files(files: list, docs: list) -> list:\n for doc in docs:\n joined = False\n for file in files:\n if file[:-4] == doc['raw_fname'][:-4]:\n doc['merged_fname'] = file\n joined = True\n break\n if not joined:\n print(f\"[ERROR] No merged file for {doc}\")\n return docs\n\n\ndef create_split(\n dataset_dir: str,\n lang: str,\n docs: list,\n split_path: str,\n) -> None:\n path = f\"{dataset_dir}/merged/{lang}\"\n out_path = f\"{dataset_dir}/splits/{lang}/\"\n dataset_name = dataset_dir.split('/')[-1]\n print(path)\n _, files = list_dir(path)\n joined = join_files(files, docs)\n train_docs, test_docs = train_test_split(\n joined,\n train_size=TRAIN_SIZE,\n random_state=random_state,\n )\n train_docs, val_docs = train_test_split(\n joined,\n test_size=TRAIN_SIZE * 0.1,\n random_state=random_state,\n )\n # print(len(files), len(train_docs), len(val_docs), len(test_docs))\n train_data = join_docs(path, train_docs)\n val_data = join_docs(path, val_docs)\n test_data = join_docs(path, test_docs)\n\n if not os.path.exists(out_path):\n os.mkdir(out_path)\n print(f\"Saving to: {out_path}\")\n train_data.to_csv(f'{out_path}/train_{lang}.csv', index=False)\n val_data.to_csv(f'{out_path}/dev_{lang}.csv', index=False)\n test_data.to_csv(f'{out_path}/test_{lang}.csv', index=False)\n\n copy_annotations(train_docs, f'{split_path}/train/{dataset_name}/{lang}')\n copy_annotations(val_docs, f'{split_path}/dev/{dataset_name}/{lang}')\n copy_annotations(test_docs, f'{split_path}/test/{dataset_name}/{lang}')\n\n\ndef create_splits(\n datasets: dict,\n split_path: str\n) -> None:\n for dataset, langs in datasets.items():\n for lang, docs in langs.items():\n create_split(dataset, lang, docs, split_path)\n\n\ndef main():\n split_path = './data/datasets/bsnlp_splits'\n datasets = json.load(open('./data/results/dataset_pairs.json'))\n create_splits(datasets, split_path)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6866952776908875, "alphanum_fraction": 0.6909871101379395, "avg_line_length": 22.299999237060547, "blob_id": "c8c126283f7994dd5348d7a441c2f8d729deb59a", "content_id": "eaf41652bee79b407223c915e8f8012b68f120b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 233, "license_type": "no_license", "max_line_length": 61, "num_lines": 10, "path": "/bin/run-container.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nCONTAINER_IMAGE_PATH=\"$PWD/containers/pytorch-image-new.sqfs\"\n\nsrun \\\n\t--gpus=1\\\n\t--container-image \"$CONTAINER_IMAGE_PATH\" \\\n\t--container-save \"$CONTAINER_IMAGE_PATH\" \\\n\t--container-mounts .:/workspace \\\n\t--pty bash -l\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 26, "blob_id": "26ffce2b2a987d638442a4a3eddfa2c998470fc2", "content_id": "ea91e82f1a191de368d282fef129f5690c0677d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 135, "license_type": "no_license", "max_line_length": 55, "num_lines": 5, "path": "/bin/exec-join-clusters.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\necho \"Starting the BSNLP clustering process...\"\nPYTHONPATH=. python src/utils/join_pred_cluster.py \"$@\"\n" }, { "alpha_fraction": 0.47629138827323914, "alphanum_fraction": 0.48370862007141113, "avg_line_length": 32.40707778930664, "blob_id": "f046e94d6c598dba1bca8f78609d9c716038bba1", "content_id": "ff6cfeda2f4d20ff60eadab8b2ccc198045fca56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3775, "license_type": "no_license", "max_line_length": 96, "num_lines": 113, "path": "/src/utils/load_documents.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import json\nimport pandas as pd\nfrom typing import Callable\n\nfrom src.utils.utils import list_dir\n\n\nclass LoadDocuments:\n def __init__(self, path):\n self.path = path\n\n\nclass LoadBSNLPDocuments(LoadDocuments):\n def __init__(\n self,\n year: str = 'all',\n lang: str = 'all',\n path: str = './data/datasets/bsnlp',\n ) -> None:\n super(LoadBSNLPDocuments, self).__init__(\n path=path\n )\n datasets = {\n \"2017\": [\"ec\", \"trump\"],\n \"2021\": [\"asia_bibi\", \"brexit\", \"nord_stream\", \"other\", \"ryanair\"],\n \"all\": [\"ec\", \"trump\", \"asia_bibi\", \"brexit\", \"nord_stream\", \"other\", \"ryanair\"],\n \"test_2021\": [\"covid-19\", \"us_election_2020\"],\n }\n if year not in datasets:\n raise Exception(f\"Invalid subset chosen: {year}\")\n self.dirs = datasets[year]\n available_langs = ['bg', 'cs', 'pl', 'ru', 'sl', 'uk']\n if lang in available_langs:\n self.langs = [lang]\n elif lang == 'all':\n self.langs = available_langs\n else:\n raise Exception(\"Invalid language option.\")\n\n def load(\n self,\n ftype: str,\n fun: Callable # NOTE: all functions must return `dict` type with `docId` available\n ) -> dict:\n data = {}\n for dataset in self.dirs:\n data[dataset] = {}\n for lang in self.langs:\n data[dataset][lang] = {}\n path = f'{self.path}/{dataset}/{ftype}/{lang}'\n _, files = list_dir(path)\n for fname in files:\n result = fun(f'{path}/{fname}')\n result['fname'] = fname\n data[dataset][lang][result['docId']] = result\n return data\n\n def load_raw(self) -> dict:\n def raw_loader(fpath: str) -> dict:\n data = {}\n with open(fpath) as f:\n lines = f.readlines()\n data['docId'] = lines[0].strip()\n data['lang'] = lines[1].strip()\n data['created'] = lines[2].strip()\n data['url'] = lines[3].strip()\n data['title'] = lines[4].strip()\n content = ' '.join([line.strip() for line in lines[4:]])\n data['content'] = content\n return data\n return self.load('raw', raw_loader)\n\n def load_merged(self) -> dict:\n def merged_loader(fpath: str) -> dict:\n df = pd.read_csv(fpath, dtype={'docId': str, 'clID': str}).to_dict(orient='records')\n docId = df[0]['docId']\n return {\n 'docId': docId,\n 'content': df\n }\n return self.load('merged', merged_loader)\n\n def load_predicted(self, folder: str = 'predicted') -> dict:\n def predicted_loader(fpath: str) -> dict:\n df = pd.read_csv(fpath)\n docId = df.iloc[0]['docId']\n return {\n 'docId': docId,\n 'content': df\n }\n return self.load(folder, predicted_loader)\n\n def load_annotated(self):\n def annotated_loader(fpath: str) -> dict:\n docId = open(fpath).readline().strip()\n data = pd.read_csv(\n fpath,\n header=None,\n skiprows=[0],\n delimiter='\\t',\n names=['Mention', 'Base', 'Category', 'clID']\n )\n return {\n 'docId': docId,\n 'content': data.to_dict(orient='records'),\n }\n return self.load('annotated', annotated_loader)\n\n\nif __name__ == '__main__':\n doc_loader = LoadBSNLPDocuments(lang='sl')\n res = doc_loader.load_annotated()\n print(json.dumps(res, indent=4))\n" }, { "alpha_fraction": 0.49817296862602234, "alphanum_fraction": 0.5035322904586792, "avg_line_length": 36.318180084228516, "blob_id": "ada2dee22c76630c2007677b352b979a3d68ec76", "content_id": "363745f663b362f7eb890391bcca77ac1068d8bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4105, "license_type": "no_license", "max_line_length": 94, "num_lines": 110, "path": "/src/utils/update_documents.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nfrom typing import Callable\nfrom pathlib import Path\n\n\nclass UpdateDocuments:\n def __init__(self, path):\n self.path = path\n\n\nclass UpdateBSNLPDocuments(UpdateDocuments):\n def __init__(\n self,\n year: str = 'all',\n lang: str = 'all',\n path: str = './data/datasets/bsnlp',\n ) -> None:\n super(UpdateBSNLPDocuments, self).__init__(\n path=path\n )\n datasets = {\n \"2017\": [\"ec\", \"trump\"],\n \"2021\": [\"asia_bibi\", \"brexit\", \"nord_stream\", \"other\", \"ryanair\"],\n \"all\": [\"ec\", \"trump\", \"asia_bibi\", \"brexit\", \"nord_stream\", \"other\", \"ryanair\"],\n \"test_2021\": [\"covid-19\", \"us_election_2020\"],\n }\n if year not in datasets:\n raise Exception(f\"Invalid subset chosen: {year}\")\n self.dirs = datasets[year]\n available_langs = ['bg', 'cs', 'pl', 'ru', 'sl', 'uk']\n if lang in available_langs:\n self.langs = [lang]\n elif lang == 'all':\n self.langs = available_langs\n else:\n raise Exception(\"Invalid language option.\")\n\n def __update(\n self,\n ftype: str,\n data: dict,\n fun: Callable\n ) -> None:\n for dataset, langs in data.items():\n if dataset not in self.dirs:\n raise Exception(f\"Unrecognized dataset: {dataset}\")\n for lang, documents in langs.items():\n if lang not in self.langs:\n raise Exception(f\"Unrecognized language: {lang}\")\n path = f'{self.path}/{dataset}/{ftype}/{lang}'\n Path(path).mkdir(parents=True, exist_ok=True)\n for docId, content in documents.items():\n fun(f'{path}/{content[\"fname\"]}', content)\n\n def update_merged(self, new_data) -> None:\n def update_merged(fpath: str, doc: dict) -> None:\n df = pd.DataFrame(doc['content'])\n df.to_csv(fpath, index=False)\n self.__update('predicted', new_data, update_merged)\n\n def update_clustered(self, new_data) -> None:\n def update_merged(fpath: str, doc: dict) -> None:\n doc['content'].to_csv(fpath, index=False)\n self.__update('clustered', new_data, update_merged)\n\n def __merge_records(\n self,\n nes: pd.DataFrame\n ) -> pd.DataFrame:\n \"\"\"\n Merges the NEs in the form of the expected output\n :param nes:\n :return:\n \"\"\"\n nes = nes.to_dict(orient='records')\n merged = []\n for i, ne in enumerate(nes):\n if ne['calcNER'].startswith('I-'):\n continue\n j = i + 1\n while j < len(nes) and not nes[j]['calcNER'].startswith('B-'):\n ne['text'] = f'{ne[\"text\"]} {nes[j][\"text\"]}'\n ne['calcLemma'] = f'{ne[\"calcLemma\"]} {nes[j][\"calcLemma\"]}'\n j += 1\n ne['calcNER'] = ne['calcNER'][2:]\n merged.append(ne)\n return pd.DataFrame(merged)\n\n def update_predicted(self, new_data) -> None:\n def update_predicted(fpath: str, doc: dict) -> None:\n df = doc['content']\n if 'calcLemma' not in df.columns:\n print(f\"MISSING LEMMA: `{fpath}`\")\n df['calcLemma'] = 'xxx'\n if 'calcClId' not in df.columns:\n print(f\"MISSING caclClId in `{fpath}`\")\n df['calcClId'] = 'xxx'\n if 'calcNer' in df.columns:\n df = df.rename(columns={'calcNer': 'calcNER'})\n df = df[['text', 'calcLemma', 'calcNER', 'calcClId']]\n if len(df.loc[df['calcNER'].isna()]) > 0:\n df.loc[df['calcNER'].isna(), 'calcNER'] = 'O'\n df = df.loc[~df['calcNER'].isin(['O'])]\n df = self.__merge_records(df)\n df = df.drop_duplicates(subset=['text'])\n with open(f'{fpath}.out', 'w') as f:\n f.write(f'{doc[\"docId\"]}\\n')\n df.to_csv(f, sep='\\t', header=False, index=False)\n self.__update('', new_data, update_predicted)\n" }, { "alpha_fraction": 0.7080292105674744, "alphanum_fraction": 0.7153284549713135, "avg_line_length": 26.399999618530273, "blob_id": "8fd40563aa379dac2caa147ce5967c0db89c868f", "content_id": "9038dceb3687b85bdef1169aab5bf4e56e244a35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 137, "license_type": "no_license", "max_line_length": 59, "num_lines": 5, "path": "/bin/exec-l1o.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\necho \"Starting the Leave-One-Out (L1O) training process...\"\nPYTHONPATH=. python src/train/trainer.py \"$@\"\n" }, { "alpha_fraction": 0.5460895895957947, "alphanum_fraction": 0.5591015219688416, "avg_line_length": 32.80555725097656, "blob_id": "55a82a5c981dcda5155ddf0103745cc086d57a3c", "content_id": "6736ecc1537fe7ef1458ac11ca1d7d22b78e85a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7301, "license_type": "no_license", "max_line_length": 202, "num_lines": 216, "path": "/data/datasets/ssj500k/prep.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\nimport random\nrandom.seed(42)\ndef next_sent():\n conllu=open('ssj500k.conllu/ssj500k-ud-morphology.conllu')\n sent=[]\n for line in conllu:\n if not line.startswith('#'):\n if line.strip()=='':\n yield sent\n sent=[]\n else:\n line=line.split('\\t')\n sent.append([line[3],line[5]])\nget_next_sent=next_sent()\nget_next_sent=next_sent()\ntree=ET.parse('ssj500k-en.TEI/ssj500k-en.body.xml')\nroot=tree.getroot()\ntrain=[]\ndev=[]\ntest=[]\ntrain_jos=[]\ndev_jos=[]\ntest_jos=[]\ntrain_ud=[]\ndev_ud=[]\ntest_ud=[]\ntrain_ner=[]\ndev_ner=[]\ntest_ner=[]\ntrain_text=open('train.txt','w')\ndev_text=open('dev.txt','w')\ntest_text=open('test.txt','w')\ntrain_jos_text=open('train_jos.txt','w')\ndev_jos_text=open('dev_jos.txt','w')\ntest_jos_text=open('test_jos.txt','w')\ntrain_ud_text=open('train_ud.txt','w')\ndev_ud_text=open('dev_ud.txt','w')\ntest_ud_text=open('test_ud.txt','w')\ntrain_ner_text=open('train_ner.txt','w')\ndev_ner_text=open('dev_ner.txt','w')\ntest_ner_text=open('test_ner.txt','w')\ndo_ner=True\nfor doc in root.iter('{http://www.tei-c.org/ns/1.0}div'):\n rand=random.random()\n if rand<0.8:\n pointer=train\n pointer_text=train_text\n pointer_ud=train_ud\n pointer_ud_text=train_ud_text\n pointer_jos=train_jos\n pointer_jos_text=train_jos_text\n pointer_ner_text=train_ner_text\n pointer_ner=train_ner\n elif rand<0.9:\n pointer=dev\n pointer_text=dev_text\n pointer_ud=dev_ud\n pointer_ud_text=dev_ud_text\n pointer_jos=dev_jos\n pointer_jos_text=dev_jos_text\n pointer_ner=dev_ner\n pointer_ner_text=dev_ner_text\n else:\n pointer=test\n pointer_text=test_text\n pointer_ud=test_ud\n pointer_ud_text=test_ud_text\n pointer_jos=test_jos\n pointer_jos_text=test_jos_text\n pointer_ner=test_ner\n pointer_ner_text=test_ner_text\n for p in doc.iter('{http://www.tei-c.org/ns/1.0}p'):\n #print p.attrib\n if p.attrib['{http://www.w3.org/XML/1998/namespace}id']=='ssj500.2653':\n do_ner=False\n for element in p:\n if element.tag.endswith('s'):\n sent_id=element.attrib['{http://www.w3.org/XML/1998/namespace}id']\n sentence=element\n text=''\n tokens=[]\n ners=[]\n uposfeats=get_next_sent.next()\n jos=None\n ud=None\n for element in sentence:\n if element.tag[-3:]=='seg':\n if element.attrib['type']=='name':\n ner=element.attrib['subtype']\n else:\n ner=None\n for idx,subelement in enumerate(element):\n text+=subelement.text\n if not subelement.tag.endswith('}c'):\n if subelement.tag.endswith('w'):\n lemma=subelement.attrib['lemma']\n else:\n lemma=subelement.text\n if do_ner:\n if ner is not None:\n if idx==0:\n ners.append('B-'+ner)\n else:\n ners.append('I-'+ner)\n else:\n ners.append('O')\n tokens.append([subelement.text,lemma,subelement.attrib['ana'].split(':')[1]])\n if element.tag[-2:] not in ('pc','}w','}c'):\n if element.tag[-7:]=='linkGrp':\n if element.attrib['type']=='UD-SYN':\n ud=[]\n for subelement in element:\n label=subelement.attrib['ana'].split(':')[1]\n head,dep=subelement.attrib['target'].split(' ')\n head=head.split('.')[-1]\n if head[0]!='t':\n head='0'\n else:\n head=head[1:]\n ud.append((head,label))\n elif element.attrib['type']=='JOS-SYN':\n jos=[]\n for subelement in element:\n label=subelement.attrib['ana'].split(':')[1]\n head,dep=subelement.attrib['target'].split(' ')\n head=head.split('.')[-1]\n if head[0]!='t':\n head='0'\n else:\n head=head[1:]\n jos.append((head,label))\n continue\n text+=element.text\n if not element.tag.endswith('}c'):\n if element.tag.endswith('w'):\n lemma=element.attrib['lemma']\n else:\n lemma=element.text\n tokens.append([element.text,lemma,element.attrib['ana'].split(':')[1]])\n if do_ner:\n ners.append('O')\n tokens=[a+b for a,b in zip(tokens,uposfeats)]\n pointer.append((sent_id,text,tokens))\n pointer_text.write(text.encode('utf8'))\n if ud!=None:\n pointer_ud.append((sent_id,text,tokens,ud))\n pointer_ud_text.write(text.encode('utf8'))\n if jos!=None:\n pointer_jos.append((sent_id,text,tokens,jos))\n pointer_jos_text.write(text.encode('utf8'))\n if do_ner:\n pointer_ner.append((sent_id,text,tokens,ners))\n pointer_ner_text.write(text.encode('utf8'))\n else:\n pointer_text.write(element.text.encode('utf8'))\n if ud!=None:\n pointer_ud_text.write(element.text.encode('utf8'))\n if jos!=None:\n pointer_jos_text.write(element.text.encode('utf8'))\n if do_ner:\n pointer_ner_text.write(element.text.encode('utf8'))\n pointer_text.write('\\n')\n if ud!=None:\n pointer_ud_text.write('\\n')\n if jos!=None:\n pointer_jos_text.write('\\n')\n if do_ner:\n pointer_ner_text.write('\\n')\n #pointer_text.write('\\n')\n\ndef write_list(lst,fname,synt=False,ner=False):\n f=open(fname,'w')\n for el in lst:\n if not synt and not ner:\n sid,text,tokens=el\n elif ner:\n sid,text,tokens,nes=el\n else:\n sid,text,tokens,dep=el\n f.write('# sent_id = '+sid+'\\n')\n f.write('# text = '+text.encode('utf8')+'\\n')\n for idx,token in enumerate(tokens):\n if not synt and not ner:\n f.write(str(idx+1)+'\\t'+token[0].encode('utf8')+'\\t'+token[1].encode('utf8')+'\\t'+token[3]+'\\t'+token[2]+'\\t'+token[4]+'\\t_\\t_\\t_\\t_\\n')\n elif synt:\n f.write(str(idx+1)+'\\t'+token[0].encode('utf8')+'\\t'+token[1].encode('utf8')+'\\t'+token[3]+'\\t'+token[2]+'\\t'+token[4]+'\\t'+dep[idx][0].encode('utf8')+'\\t'+dep[idx][1].encode('utf8')+'\\t_\\t_\\n')\n else:\n f.write(str(idx+1)+'\\t'+token[0].encode('utf8')+'\\t'+token[1].encode('utf8')+'\\t'+token[3]+'\\t'+token[2]+'\\t'+token[4]+'\\t_\\t_\\t_\\t'+nes[idx].encode('utf8')+'\\n')\n f.write('\\n')\n f.close()\n\nwrite_list(train,'train.conllu')\nwrite_list(dev,'dev.conllu')\nwrite_list(test,'test.conllu')\nwrite_list(train_jos,'train_jos.conllu',True)\nwrite_list(dev_jos,'dev_jos.conllu',True)\nwrite_list(test_jos,'test_jos.conllu',True)\nwrite_list(train_ud,'train_ud.conllu',True)\nwrite_list(dev_ud,'dev_ud.conllu',True)\nwrite_list(test_ud,'test_ud.conllu',True)\nwrite_list(train_ner,'train_ner.conllu',ner=True)\nwrite_list(dev_ner,'dev_ner.conllu',ner=True)\nwrite_list(test_ner,'test_ner.conllu',ner=True)\ntrain_text.close()\ndev_text.close()\ntest_text.close()\ntrain_ud_text.close()\ndev_ud_text.close()\ntest_ud_text.close()\ntrain_jos_text.close()\ndev_jos_text.close()\ntest_jos_text.close()\ntrain_ner_text.close()\ndev_ner_text.close()\ntest_ner_text.close()" }, { "alpha_fraction": 0.7894737124443054, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 21.16666603088379, "blob_id": "c6f617079689ab178ef166463ae9abd31413f007", "content_id": "b64af5b3cf1e0fa7400b61850769c9e4fa255548", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 133, "license_type": "no_license", "max_line_length": 60, "num_lines": 6, "path": "/bin/compile-checker.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\n# compile the consistency check provided by BSNLP organizers\n\njavac ./src/utils/ConsistencyCheck.java\n" }, { "alpha_fraction": 0.5285171270370483, "alphanum_fraction": 0.7072243094444275, "avg_line_length": 16.53333282470703, "blob_id": "0186dbe93ad75330c8b0e2044c7d8f234c17fbb3", "content_id": "c9a23e79beda83859f70d7c42e187338f1e4ae1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 263, "license_type": "no_license", "max_line_length": 27, "num_lines": 15, "path": "/requirements.txt", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "pandas==1.2.1\nclassla==0.0.11\nstanza==1.2\nfuzzywuzzy==0.18.0\nscikit-learn==0.24.1\nmatplotlib==3.3.4\ndedupe==2.0.6\npyconll==3.0.3\ntensorflow==2.3.1\ntensorflow-estimator==2.3.0\ntransformers==3.5.1\nkeras==2.4.3\nkeras-preprocessing==1.1.2\nseqeval==1.2.2\ntorch==1.7.0\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 23.200000762939453, "blob_id": "ccd6614f7548eabab6051f85a45670977d8bf6de", "content_id": "22fe2004a62118101b0f62125338ad01dd00e82d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 121, "license_type": "no_license", "max_line_length": 55, "num_lines": 5, "path": "/bin/exec-splits.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\necho \"Creating dataset splits...\"\nPYTHONPATH=. python src/transform/create_splits.py \"$@\"\n" }, { "alpha_fraction": 0.6829268336296082, "alphanum_fraction": 0.7044476270675659, "avg_line_length": 25.80769157409668, "blob_id": "d46d2ba7b194f9ca922ee14f589ac3afeee85593", "content_id": "228a3683b53476e404fb538fdd6dca2339855982", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 697, "license_type": "no_license", "max_line_length": 78, "num_lines": 26, "path": "/bin/run-bert-l1o.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#SBATCH --nodes=1\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=8\n#SBATCH --gpus-per-task=1\n#SBATCH --time=3-00:00:00\n#SBATCH --output=logs/NER-l1o-%J.out\n#SBATCH --error=logs/NER-l1o-%J.err\n#SBATCH --job-name=\"NER-l1o\"\n\nset -euo pipefail\n\nCONTAINER_IMAGE_PATH=\"$PWD/containers/pytorch-image-new.sqfs\"\n\necho \"$SLURM_JOB_ID -> Training the model...\"\n\n# the following command opens a bash terminal of an already existing container\n# with the current directory (.) mounted\nsrun \\\n --container-image \"$CONTAINER_IMAGE_PATH\" \\\n --container-mounts \"$PWD\":/workspace,/shared/datasets/rsdo:/data \\\n --container-entrypoint /workspace/bin/exec-l1o.sh\n\necho \"$SLURM_JOB_ID -> Done.\"\n\n#wait\n" }, { "alpha_fraction": 0.7037814855575562, "alphanum_fraction": 0.7153361439704895, "avg_line_length": 33, "blob_id": "02d87a71c902d65b3388a68e6a31b58d67b26cc9", "content_id": "21a175deae74287cad8f3daf703519e3325a1ac8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 952, "license_type": "no_license", "max_line_length": 150, "num_lines": 28, "path": "/bin/run-eval.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#SBATCH --nodes=1\n#SBATCH --ntasks=1\n#SBATCH --mem=5GB\n#SBATCH --cpus-per-task=4\n#SBATCH --time=3-00:00:00\n#SBATCH --output=logs/BSNLP-eval-%J.out\n#SBATCH --error=logs/BSNLP-eval-%J.err\n#SBATCH --job-name=\"BSNLP-eval\"\n\nCONTAINER_IMAGE_PATH=\"$PWD/containers/pytorch-image-new.sqfs\"\n\necho \"$SLURM_JOB_ID -> Generating the clusters for the model...\"\n\nBUNDLE=\"multi_all\"\nDIR_PREFIX=\"data/evals/$SLURM_JOB_ID-$BUNDLE\"\nmkdir -p \"$DIR_PREFIX/reports\"\nmkdir -p \"$DIR_PREFIX/error-logs\"\nmkdir -p \"$DIR_PREFIX/summaries\"\n\n# the following command opens a bash terminal of an already existing container\n# with the current directory (.) mounted\nsrun \\\n --container-image \"$CONTAINER_IMAGE_PATH\" \\\n --container-mounts \"$PWD\":/workspace,/shared/datasets/rsdo:/data \\\n --container-entrypoint /workspace/bin/exec-eval.sh \"java-eval/data-$BUNDLE\" \"$DIR_PREFIX/reports\" \"$DIR_PREFIX/error-logs\" \"$DIR_PREFIX/summaries\"\n\necho \"$SLURM_JOB_ID -> Done.\"\n" }, { "alpha_fraction": 0.5751495361328125, "alphanum_fraction": 0.5786390900611877, "avg_line_length": 44.59090805053711, "blob_id": "ef094be8a66989f6f2a5874030d928dcbf54138c", "content_id": "2b9ce71ab06c4547344a5100cd050e171243481f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8024, "license_type": "no_license", "max_line_length": 156, "num_lines": 176, "path": "/src/transform/annotate_docs.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import json\nimport pathlib\nimport stanza\nimport classla\nimport pandas as pd\nfrom fuzzywuzzy import fuzz\n\nDOWNLOAD_RESOURCES = False\nLOWEST_SIMILARITY = 85\n\n\ndef split_documents(dataset_files: dict, tokenizers: dict):\n warnings = []\n files_processed = 0\n for dataset in dataset_files:\n if dataset not in ['./data/datasets/bsnlp/covid-19', './data/datasets/bsnlp/us_election_2020']:\n print(f\"Skipping {dataset}\")\n continue\n for lang in dataset_files[dataset]:\n print(f'Dataset: {dataset}, Language: {lang}')\n merged_path = f'{dataset}/merged/{lang}'\n if not pathlib.Path(merged_path).exists():\n pathlib.Path(merged_path).mkdir(parents=True, exist_ok=True)\n for file in dataset_files[dataset][lang]:\n sentences, document_id = split_document(file['raw'], tokenizers[lang], lang)\n annotated_document, warns = annotate_document(sentences, file['annotated'], document_id, tokenizers[lang], lang)\n warnings.extend(warns)\n doc_name = f\"{file['raw'].split('/')[-1][:-3]}csv\"\n merged_fname = f'{merged_path}/{doc_name}'\n annotated_document.to_csv(merged_fname, index=False)\n files_processed += 1\n print(f'Files processed: {files_processed}.')\n print(f'Number of warnings occured: {len(warnings)}.')\n json.dump(warnings, open('./data/results/merge_warnings.json', 'w'), indent=4)\n\n\ndef convert_sentences(raw_sentences, lang):\n sentences = []\n for sentence in raw_sentences:\n tokens = []\n for token in sentence.tokens:\n if len(token.words) > 1:\n print(f\"MORE WORDS: {token.words}\")\n tokens.append({\n \"id\": token.index if lang in ['sl', 'bg'] else token.id[0],\n \"text\": ''.join([w.text for w in token.words]),\n \"calcLemma\": ' '.join([w.lemma for w in token.words if w.lemma is not None]),\n \"upos\": ' '.join([w.xpos for w in token.words if w.xpos is not None]),\n \"xpos\": ' '.join([w.upos for w in token.words if w.upos is not None]),\n })\n sentences.append(tokens)\n return sentences\n\n\ndef split_document(document_path: str, tokenizer, lang: str):\n document_lines = open(document_path, encoding='utf-8-sig').readlines()\n document_id = document_lines[0].strip()\n content = ' '.join(document_lines[4:])\n doc = tokenizer(content)\n # sentences = [sentence.to_dict() for sentence in doc.sentences] if lang != 'sl' else convert_sentences(doc.sentences)\n sentences = convert_sentences(doc.sentences, lang)\n return sentences, document_id\n\n\ndef tokenize_mention(mention: str, tokenizer, lang: str) -> list:\n # just for slo\n tokenized = [i for s in convert_sentences(tokenizer(mention).sentences, lang) for i in s]\n return [t['text'] for t in tokenized]\n\n\ndef sort_by_mention_length(data: pd.DataFrame) -> pd.DataFrame:\n sorted_vals = data['Mention'].str.len().sort_values().index\n return data.reindex(sorted_vals).reset_index(drop=True)\n\n\ndef annotate_document(sentences: list, annotations_path: str, document_id: str, tokenizer, lang) -> (pd.DataFrame, list):\n # print(tf\"Work on {annotations_path}\")\n try:\n anns = pd.read_csv(annotations_path, names=['Mention', 'Base', 'Category', 'clID'], skiprows=[0], sep='\\t')\n ann_df = sort_by_mention_length(anns)\n except:\n print(f\"CAN'T LOAD {annotations_path}\")\n anns = pd.DataFrame()\n ann_df = pd.DataFrame(columns=['Mention', 'Base', 'Category', 'clID'])\n # return pd.DataFrame(), []\n # a hack to first look for shorter matches if mentions\n # are substrings, e.g. komisija vs Evropska Komisija\n\n warnings = []\n if len(ann_df['Mention'].unique()) != len(ann_df.index):\n print(\"Duplicate mentions!\")\n warnings.append({\n \"msg\": \"Duplicate mentions found!\",\n \"doc\": annotations_path,\n })\n annotations = ann_df.to_dict('records')\n annotated_tokens = []\n for sent_id, sentence in enumerate(sentences):\n for token in sentence:\n token['ner'] = 'O'\n token['lemma'] = ''\n token['clID'] = ''\n token['sentenceId'] = sent_id\n token['docId'] = document_id\n annotated_tokens.append(token)\n\n used_annotations = 0\n for annotation in annotations:\n ann_pieces = tokenize_mention(annotation['Mention'], tokenizer, lang)\n matched = 0\n for token_id, token in enumerate(annotated_tokens):\n first_ratio = fuzz.ratio(ann_pieces[0].lower(), token['text'].lower())\n if first_ratio >= LOWEST_SIMILARITY:\n if token_id + len(ann_pieces) > len(annotated_tokens):\n continue\n all_ratio = [fuzz.ratio(ann.lower(), annotated_tokens[token_id + i]['text'].lower()) for i, ann in enumerate(ann_pieces)]\n if len([r for r in all_ratio if r >= LOWEST_SIMILARITY]) != len(ann_pieces):\n continue\n f_ner = True\n matched_tokens = [annotated_tokens[token_id + i]['text'] for i, _ in enumerate(ann_pieces)]\n lemma = tokenize_mention(str(annotation[\"Base\"]), tokenizer, lang)\n for i, _ in enumerate(ann_pieces):\n t = annotated_tokens[token_id + i]\n t['ner'] = f\"{'B' if f_ner else 'I'}-{annotation['Category']}\"\n if not lemma:\n warnings.append({\n \"msg\": \"BASE FORM DOES NOT MATCH MENTION\",\n \"doc\": annotations_path,\n \"lemma\": annotation['Base'],\n \"ner\": annotation['Mention'],\n \"matched\": matched_tokens\n })\n print(f\"[WARNING] LEMMA DOES NOT MATCH\")\n lemma = ['PAD']\n t['lemma'] = lemma.pop(0)\n t['clID'] = annotation[\"clID\"]\n f_ner = False if f_ner else f_ner\n matched += 1\n if matched == 0:\n warnings.append({\n \"msg\": \"Annotation not matched!\",\n \"doc\": annotations_path,\n \"annotation\": annotation,\n })\n used_annotations += 1 if matched > 0 else 0\n\n if used_annotations != len(annotations):\n print(f\"[WARNING] UNUSED ANNOTATIONS: {used_annotations}/{len(annotations)}\")\n warnings.append({\n \"msg\": f\"ALTERED ITEMS ({used_annotations}) NOT EQUAL TO ANNOTATIONS ({len(annotations)})\",\n \"doc\": annotations_path,\n \"num_altered\": used_annotations,\n \"num_annotations\": len(annotations)\n })\n sentence_df = pd.DataFrame(annotated_tokens)\n sentence_df = sentence_df.rename(columns={'id': 'tokenId'})\n sentence_df = sentence_df[['docId', 'sentenceId', 'tokenId', 'text', 'lemma', 'calcLemma', 'upos', 'xpos', 'ner', 'clID']] # leaving out 'misc' for now\n return sentence_df, warnings\n\n\nif __name__ == '__main__':\n datasets_files = json.load(open('./data/results/dataset_pairs.json'))\n languages = set([lang for dataset in datasets_files for lang in datasets_files[dataset].keys()])\n print(languages)\n processors = 'tokenize,pos,lemma'\n if DOWNLOAD_RESOURCES: # do it once on a new system\n for lang in languages:\n lang = lang if lang != 'ua' else 'uk'\n print(f'Downloading {lang}...')\n stanza.download(lang, processors=processors)\n classla.download('sl')\n classla.download('bg')\n tokenizers = {lang: stanza.Pipeline(lang=lang if lang != 'ua' else 'uk', processors=processors) for lang in languages}\n tokenizers['sl'] = classla.Pipeline('sl', processors=processors)\n tokenizers['bg'] = classla.Pipeline('bg', processors=processors)\n split_documents(datasets_files, tokenizers)\n" }, { "alpha_fraction": 0.51507568359375, "alphanum_fraction": 0.5195796489715576, "avg_line_length": 36.70283126831055, "blob_id": "75be2b9e5ba134057ee743573f241b9cdedfc6ce", "content_id": "b611e9437b2b0562b7d7683bf182669aabbfd951", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7993, "license_type": "no_license", "max_line_length": 134, "num_lines": 212, "path": "/src/analyze/main.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import os\nimport json\nimport pandas as pd\nimport sys\nimport logging\n\nfrom collections import defaultdict\n\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.DEBUG,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'\n)\nlogger = logging.getLogger('main')\n\n\ndef list_all_files(dirpath: str) -> (list, list, dict):\n files, dirs = [], []\n stats = {\n 'test': {\n \"numFiles\": 0,\n \"dirs\": {\n \"annotated\": defaultdict(list),\n \"raw\": defaultdict(list),\n }\n },\n 'sample': {\n \"numFiles\": 0,\n \"dirs\": {\n \"annotated\": defaultdict(list),\n \"raw\": defaultdict(list),\n }\n },\n 'train': {\n \"numFiles\": 0,\n \"dirs\": {\n \"annotated\": defaultdict(list),\n \"raw\": defaultdict(list),\n }\n },\n }\n for dpath, dnames, fnames in os.walk(dirpath,):\n whole_path_dirs = [f'{dpath}/{dname}' for dname in dnames]\n whole_path_files = [f'{dpath}/{fname}' for fname in fnames]\n dirs.extend(whole_path_dirs)\n files.extend(whole_path_files)\n if fnames:\n if 'test' in dpath:\n stats['test']['numFiles'] += len(whole_path_files)\n if \"annotated\" in dpath:\n stats['test']['dirs']['annotated'][dpath[-2:].lower()].extend(whole_path_files)\n else:\n stats['test']['dirs']['raw'][dpath[-2:].lower()].extend(whole_path_files)\n\n elif 'sample' in dpath:\n stats['sample']['numFiles'] += len(whole_path_files)\n if \"annotated\" in dpath:\n stats['sample']['dirs']['annotated'][dpath[-2:].lower()].extend(whole_path_files)\n else:\n stats['sample']['dirs']['raw'][dpath[-2:].lower()].extend(whole_path_files)\n else:\n stats['train']['numFiles'] += len(whole_path_files)\n if \"annotated\" in dpath:\n stats['train']['dirs']['annotated'][dpath[-2:].lower()].extend(whole_path_files)\n else:\n stats['train']['dirs']['raw'][dpath[-2:].lower()].extend(whole_path_files)\n return sorted(dirs), sorted(files), stats\n\n\ndef list_datasets(datasets: list) -> dict:\n dataset_files = {}\n for dataset in datasets:\n dataset_files[dataset] = {}\n languages_raw = sorted(os.listdir(f'{dataset}/raw'))\n try:\n languages_ann = sorted(os.listdir(f'{dataset}/annotated'))\n except:\n languages_ann = languages_raw\n for lang_id, lang in enumerate(languages_raw):\n base_raw = f'{dataset}/raw/{lang}'\n base_ann = f'{dataset}/annotated/{lang}'\n\n raw_files = sorted(os.listdir(base_raw))\n try:\n ann_files = sorted(os.listdir(base_ann))\n except:\n ann_files = raw_files\n for r, a in zip(raw_files, ann_files):\n digits_r = ''.join([d for d in r if d.isdigit()])\n digits_a = ''.join([d for d in r if d.isdigit()])\n if digits_a != digits_r:\n print(f'NO MATCH:\\n{base_raw}/{r}\\n{base_ann}/{a}')\n dataset_files[dataset][languages_ann[lang_id]] = [\n {\n 'raw': f'{base_raw}/{r}',\n 'annotated': f'{base_ann}/{a}',\n 'raw_fname': r, 'ann_fname': a\n }\n for r, a in zip(raw_files, ann_files)\n ]\n return dataset_files\n\n\ndef aggregate_nes(stats: dict) -> dict:\n ne_stats = {}\n atts = ['Mention', 'Base', 'Category', 'clID']\n all_data = {att: pd.DataFrame() for att in atts}\n for dataset, data in stats.items():\n ne_stats[dataset] = {}\n for lang, files in data['dirs']['annotated'].items():\n ne_stats[dataset][lang] = {}\n lang_data = pd.DataFrame()\n for file in files:\n file_nes = pd.read_csv(file, header=None, skiprows=[0], delimiter='\\t', names=['Mention', 'Base', 'Category', 'clID'])\n lang_data = pd.concat([lang_data, file_nes], ignore_index=True)\n for att in atts:\n counts = pd.DataFrame(lang_data[att].value_counts())\n ne_stats[dataset][lang][att] = counts.to_json()\n counts.reset_index(inplace=True)\n counts = counts.rename(columns={'index': att, att:'Count'})\n all_data[att] = pd.concat([all_data[att], counts], ignore_index=True)\n counts.to_csv(f'./data/stats/{dataset}-{lang}-{att}.csv', index=False)\n for att in atts:\n counts = all_data[att].groupby([att]).agg(['sum'])\n counts.reset_index(inplace=True)\n counts.columns = [att, 'Count']\n counts.to_csv(f'./data/stats/{dataset}-{att}.csv', index=False)\n return ne_stats\n\n\ndef raw_doc_info(fname: str) -> dict:\n file_info = {}\n with open(fname, encoding='utf-8-sig') as f:\n lines = f.readlines()\n file_info['id'] = lines[0].strip()\n file_info['lang'] = lines[1].strip()\n file_info['created'] = lines[2].strip()\n file_info['url'] = lines[3].strip()\n file_info['title'] = lines[4].strip()\n content = ' '.join(lines[5:]).strip()\n file_info['contentLength'] = len(content)\n file_info['numWords'] = len(content.split(' '))\n return file_info\n\n\ndef ann_doc_info(fname: str) -> dict:\n file_info = {}\n ne_categories = ['PER', 'ORG', 'LOC', 'EVT', 'PRO']\n with open(fname, encoding='utf-8-sig') as f:\n lines = f.readlines()\n file_info['id'] = lines[0].strip()\n df = pd.read_csv(fname, names=['Mention', 'Base', 'Category', 'clID'], skiprows=[0], sep='\\t')\n file_info['NEcount'] = len(df.index)\n cat_counts = df['Category'].value_counts()\n for cat in ne_categories:\n file_info[cat] = cat_counts[cat] if cat in cat_counts else 0\n file_info['UniqueCLIDs'] = len(df['clID'].unique())\n return file_info\n\n\ndef get_doc_info(stats: dict) -> dict:\n dataset_raw = []\n dataset_ann = []\n for dataset, data in stats.items():\n for lang, files in data['dirs']['raw'].items():\n for file in files:\n info = raw_doc_info(file)\n info['dataset_dir'] = dataset\n info['lang'] = lang\n info['fpath'] = file\n dataset_raw.append(info)\n for lang, files in data['dirs']['annotated'].items():\n for file in files:\n info = ann_doc_info(file)\n info['dataset_dir'] = dataset\n info['lang'] = lang\n info['fpath'] = file\n dataset_ann.append(info)\n raw_df = pd.DataFrame(dataset_raw)\n raw_df.to_csv(\"./data/results/file_raw_stats.csv\")\n\n ann_df = pd.DataFrame(dataset_ann)\n ann_df.to_csv(\"./data/results/file_ne_stats.csv\")\n\n return {\n \"raw\": raw_df,\n \"ann\": ann_df,\n }\n\n\nif __name__ == '__main__':\n datasets = [\n './data/datasets/bsnlp/ec',\n './data/datasets/bsnlp/trump',\n # 2019 data is updated for the 2021 challenge, so these are obsolete\n # './data/datasets/bsnlp/sample',\n # './data/datasets/bsnlp/training',\n # './data/datasets/bsnlp/nord_stream',\n # './data/datasets/bsnlp/ryanair',\n './data/datasets/bsnlp/asia_bibi',\n './data/datasets/bsnlp/brexit',\n './data/datasets/bsnlp/nord_stream',\n './data/datasets/bsnlp/other',\n './data/datasets/bsnlp/ryanair',\n './data/datasets/bsnlp/covid-19',\n './data/datasets/bsnlp/us_election_2020',\n ]\n dataset_files = list_datasets(datasets)\n logger.info('Done.')\n with open('./data/results/dataset_pairs.json', 'w') as f:\n json.dump(dataset_files, f, indent=4)\n" }, { "alpha_fraction": 0.6752577424049377, "alphanum_fraction": 0.6969072222709656, "avg_line_length": 30.29032325744629, "blob_id": "3fb912b11ffe28e2e48b56ee57817a258dd96399", "content_id": "7361e133d90623742ed74b83c9187d225097b20b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 970, "license_type": "no_license", "max_line_length": 110, "num_lines": 31, "path": "/bin/run-setup.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#SBATCH --nodes=1\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=8\n#SBATCH --gpus-per-task=0\n#SBATCH --time=5:00\n#SBATCH --output=logs/NER-BERT-setup-%J.out\n#SBATCH --error=logs/NER-BERT-setup-%J.err\n#SBATCH --job-name=\"NER-BERT-setup\"\n\nset -euo pipefail\n\nCONTAINER_IMAGE_PATH=\"$PWD/containers/pytorch-image-new.sqfs\"\n\nif [ ! -e \"$CONTAINER_IMAGE_PATH\" ]; then\n echo \"Creating the container image at $CONTAINER_IMAGE_PATH...\"\n # xantipa uses Singularity containers, we need the appropriate image\n # singularity build ./containers/sing-container.sif docker://pytorch/pytorch:1.6.0-cuda10.1-cudnn7-runtime\n\n srun \\\n --container-image pytorch/pytorch:1.7.0-cuda11.0-cudnn8-runtime \\\n --container-save \"$CONTAINER_IMAGE_PATH\" \\\n --container-mounts \"$PWD\":/workspace \\\n --container-entrypoint /workspace/bin/exec-setup.sh\n\n echo \"Image is created.\"\nelse\n echo \"Using existing image from $CONTAINER_IMAGE_PATH.\"\nfi\n\n#wait\n" }, { "alpha_fraction": 0.7138728499412537, "alphanum_fraction": 0.7456647157669067, "avg_line_length": 33.599998474121094, "blob_id": "859c9d9b318a4ff2dc01250a7fe061a678f9bc50", "content_id": "170575609447ed4d798498ebd9e19136ff858fde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 346, "license_type": "no_license", "max_line_length": 116, "num_lines": 10, "path": "/bin/run-checker.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\n# Takes 2 args:\n# 1. path to directory containing submission files\n# 2. path to file where the report should be generated\n# Example usage [from repo root]:\n# ./bin/run-checker.sh data/challenge/2021/brexit/annotated/sl/ data/consistency_reports/report_2021-sl-brexit.txt\n\njava src/utils/ConsistencyCheck \"$@\"\n" }, { "alpha_fraction": 0.514648973941803, "alphanum_fraction": 0.5363354086875916, "avg_line_length": 39.686851501464844, "blob_id": "c473c4ee7a8421f60337164ac47b694488813eec", "content_id": "f553691169341b33263010ff261749df534be80f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23517, "license_type": "no_license", "max_line_length": 118, "num_lines": 578, "path": "/src/train/crosloeng.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport torch\nimport random\nimport os\nimport sys\nimport logging\nimport argparse\nimport transformers\nimport pathlib\n\nfrom datetime import datetime\nfrom tqdm import trange, tqdm\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler\nfrom transformers import AutoTokenizer, AutoModelForTokenClassification, AdamW\nfrom transformers import get_linear_schedule_with_warmup, PreTrainedModel\nfrom keras.preprocessing.sequence import pad_sequences\nfrom seqeval.metrics import f1_score, precision_score, recall_score, accuracy_score, classification_report\nfrom matplotlib import pyplot as plt\nfrom itertools import product\n\nfrom src.train.model import Model\nfrom src.utils.load_dataset import LoadSSJ500k, LoadBSNLP, LoadCombined\nfrom src.utils.utils import list_dir\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.DEBUG,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'\n)\nlogger = logging.getLogger('TrainEvalModels')\n\n\nclass BertModel(Model):\n def __init__(\n self,\n tag2code,\n code2tag,\n output_model_path: str, # this is the output dir\n output_model_fname: str, # this is the output file name\n tune_entire_model: bool,\n epochs: int = 3,\n max_grad_norm: float = 1.0,\n input_model_path: str = 'data/models/cro-slo-eng-bert', # this is a directory\n use_test: bool = False\n ):\n super().__init__()\n self.input_model_path = input_model_path\n self.output_model_path = output_model_path\n self.output_model_fname = output_model_fname\n self.use_test = use_test\n\n logger.info(f\"Output model at: {output_model_path}\")\n\n logger.info(f\"Tuning entire model: {tune_entire_model}\")\n self.tune_entire_model = tune_entire_model\n\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.input_model_path,\n from_pt=True,\n do_lower_case=False,\n use_fast=False,\n\n )\n self.MAX_LENGTH = 128 # max input length\n self.BATCH_SIZE = 32 # max input length\n self.epochs = epochs\n self.max_grad_norm = max_grad_norm\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n logger.info(f\"Using device: {self.device}\")\n self.tag2code, self.code2tag = tag2code, code2tag\n logger.info(f\"tags: {self.tag2code.keys()}\")\n self.save_weights = False\n\n def convert_input(self, input_data: pd.DataFrame):\n tokens = []\n tags = [] # NER tags\n\n if 'docId' not in input_data.columns:\n input_data['docId'] = 'xxx'\n\n for (_, sentence), data in input_data.groupby([\"docId\", \"sentenceId\"]):\n sentence_tokens = []\n sentence_tags = []\n for id, word_row in data.iterrows():\n word_tokens = self.tokenizer.tokenize(str(word_row[\"text\"]))\n sentence_tokens.extend(word_tokens)\n sentence_tags.extend([self.tag2code[word_row[\"ner\"]]] * len(word_tokens))\n\n sentence_ids = self.tokenizer.convert_tokens_to_ids(sentence_tokens)\n tokens.append(sentence_ids)\n tags.append(sentence_tags)\n # padding is required to spill the sentence tokens in case there are sentences longer than 128 words\n # or to fill in the missing places to 128 (self.MAX_LENGTH)\n tokens = torch.as_tensor(pad_sequences(\n tokens,\n maxlen=self.MAX_LENGTH,\n dtype=\"long\",\n value=0.0,\n truncating=\"post\",\n padding=\"post\"\n )).to(self.device)\n tags = torch.as_tensor(pad_sequences(\n tags,\n maxlen=self.MAX_LENGTH,\n dtype=\"long\",\n value=self.tag2code[\"PAD\"],\n truncating=\"post\",\n padding=\"post\"\n )).to(self.device)\n masks = torch.as_tensor(np.array([[float(token != 0.0) for token in sentence] for sentence in tokens])).to(\n self.device)\n data = TensorDataset(tokens, masks, tags)\n sampler = RandomSampler(data)\n return DataLoader(data, sampler=sampler, batch_size=self.BATCH_SIZE)\n\n def convert_output(self):\n pass\n\n def train(\n self,\n data_loaders: dict\n ):\n logger.info(f\"Loading the pre-trained model `{self.input_model_path}`...\")\n model = AutoModelForTokenClassification.from_pretrained(\n self.input_model_path,\n num_labels=len(self.tag2code),\n label2id=self.tag2code,\n id2label=self.code2tag,\n output_attentions=False,\n output_hidden_states=False\n )\n\n model = model.to(self.device)\n optimizer, loss = None, None\n\n for dataset, dataloader in data_loaders.items():\n logger.info(f'Training on `{dataset}`')\n # hack to use entire dataset, leaving the validation data intact\n td = pd.concat([dataloader.train(), dataloader.test()]) if self.use_test else dataloader.train()\n model, optimizer, loss = self.__train(model, train_data=td,\n validation_data=dataloader.dev())\n\n out_fname = f\"{self.output_model_path}/{self.output_model_fname}\"\n logger.info(f\"Saving the model at: {out_fname}\")\n model.save_pretrained(out_fname)\n self.tokenizer.save_pretrained(out_fname)\n logger.info(\"Done!\")\n\n def __train(\n self,\n model,\n train_data: pd.DataFrame,\n validation_data: pd.DataFrame\n ):\n logger.info(\"Loading the training data...\")\n train_data = self.convert_input(train_data)\n logger.info(\"Loading the validation data...\")\n validation_data = self.convert_input(validation_data)\n\n if self.tune_entire_model:\n model_parameters = list(model.named_parameters())\n no_decay = ['bias', 'gamma', 'beta']\n optimizer_parameters = [\n {\n 'params': [p for n, p in model_parameters if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01\n },\n {\n 'params': [p for n, p in model_parameters if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0\n }\n ]\n else:\n model_parameters = list(model.named_parameters())\n optimizer_parameters = [{\"params\": [p for n, p in model_parameters]}]\n\n optimizer = AdamW(\n optimizer_parameters,\n lr=3e-5,\n eps=1e-8\n )\n\n total_steps = len(train_data) * self.epochs\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n )\n\n # ensure reproducibility\n # TODO: try out different seed values\n seed_val = 42\n random.seed(seed_val)\n np.random.seed(seed_val)\n torch.manual_seed(seed_val)\n torch.cuda.manual_seed_all(seed_val)\n\n training_loss, validation_loss, loss = [], [], None\n logger.info(f\"Training the model for {self.epochs} epochs...\")\n for _ in trange(self.epochs, desc=\"Epoch\"):\n model.train()\n total_loss = 0\n # train:\n for step, batch in tqdm(enumerate(train_data), desc='Batch'):\n batch_tokens, batch_masks, batch_tags = tuple(t.to(self.device) for t in batch)\n\n # reset the grads\n model.zero_grad()\n\n outputs = model(\n batch_tokens,\n attention_mask=batch_masks,\n labels=batch_tags\n )\n\n loss = outputs[0]\n loss.backward()\n total_loss += loss.item()\n\n # preventing exploding gradients\n torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=self.max_grad_norm)\n\n # update the parameters\n optimizer.step()\n\n # update the learning rate (lr)\n scheduler.step()\n\n avg_epoch_train_loss = total_loss / len(train_data)\n logger.info(f\"Avg train loss = {avg_epoch_train_loss:.4f}\")\n training_loss.append(avg_epoch_train_loss)\n\n # validate:\n model.eval()\n val_loss, val_acc, val_f1, val_p, val_r, val_report = self.__test(model, validation_data)\n validation_loss.append(val_loss)\n logger.info(f\"Validation loss: {val_loss:.4f}\")\n logger.info(f\"Validation accuracy: {val_acc:.4f}, P: {val_p:.4f}, R: {val_r:.4f}, F1 score: {val_f1:.4f}\")\n logger.info(f\"Classification report:\\n{val_report}\")\n\n fig, ax = plt.subplots()\n ax.plot(training_loss, label=\"Traing loss\")\n ax.plot(validation_loss, label=\"Validation loss\")\n ax.legend()\n ax.set_title(\"Model Loss\")\n ax.set_ylabel(\"Loss\")\n ax.set_xlabel(\"Epoch\")\n fig.savefig(f\"{self.output_model_path}/{self.output_model_fname}-loss.png\")\n return model, optimizer, loss\n\n def translate(self, predictions: list, labels: list, tokens) -> (list, list, list):\n translated_predictions, translated_labels, translated_tokens = [], [], []\n for preds, labs, toks in zip(predictions, labels, tokens):\n sentence_predictions, sentence_labels, sentence_tokens = [], [], []\n for p, l, t in zip(preds, labs, toks):\n if l == self.tag2code[\"PAD\"]:\n continue\n sentence_tokens.append(t)\n sentence_predictions.append(self.code2tag[p])\n sentence_labels.append(self.code2tag[l])\n translated_tokens.append(sentence_tokens)\n translated_predictions.append(sentence_predictions)\n translated_labels.append(sentence_labels)\n return translated_predictions, translated_labels, translated_tokens\n\n def __test(self, model: PreTrainedModel, data: DataLoader) -> (float, float, float, float, float, str):\n eval_loss = 0.\n eval_steps, eval_examples = 0, 0\n tokens, eval_predictions, eval_labels = [], [], []\n model.eval()\n for batch in tqdm(data):\n batch_tokens, batch_masks, batch_tags = tuple(t.to(self.device) for t in batch)\n with torch.no_grad():\n outputs = model(\n batch_tokens,\n attention_mask=batch_masks,\n labels=batch_tags\n )\n logits = outputs[1].detach().cpu().numpy()\n label_ids = batch_tags.to('cpu').numpy()\n toks = batch_tokens.to('cpu').numpy()\n\n eval_loss += outputs[0].mean().item()\n batch_toks = [self.tokenizer.convert_ids_to_tokens(sentence) for sentence in toks]\n tokens.extend(batch_toks)\n eval_predictions.extend([list(p) for p in np.argmax(logits, axis=2)])\n eval_labels.extend(label_ids)\n\n eval_examples += batch_tokens.size(0)\n eval_steps += 1\n\n eval_loss = eval_loss / eval_steps\n\n predicted_tags, valid_tags, tokens = self.translate(eval_predictions, eval_labels, tokens)\n\n score_acc = accuracy_score(valid_tags, predicted_tags)\n score_f1 = f1_score(valid_tags, predicted_tags)\n score_p = precision_score(valid_tags, predicted_tags)\n score_r = recall_score(valid_tags, predicted_tags)\n report = classification_report(valid_tags, predicted_tags)\n\n return eval_loss, score_acc, score_f1, score_p, score_r, report\n\n def test(self, test_data: pd.DataFrame) -> (float, float, float):\n if not (os.path.exists(self.output_model_path) and os.path.isdir(self.output_model_path)):\n raise Exception(f\"A model with the given parameters has not been trained yet,\"\n f\" or is not located at `{self.output_model_path}`.\")\n models, _ = list_dir(self.output_model_path)\n models = [model_fname for model_fname in models if model_fname.startswith(self.output_model_fname)]\n print(\"Models:\", models)\n if not models:\n raise Exception(f\"There are no trained models with the given criteria: `{self.output_model_fname}`\")\n\n logger.info(\"Loading the testing data...\")\n test_data = self.convert_input(test_data)\n avg_acc, avg_f1, avg_p, avg_r, reports = [], [], [], [], []\n for model_fname in models:\n logger.info(f\"Loading {model_fname}...\")\n model = AutoModelForTokenClassification.from_pretrained(\n f\"{self.output_model_path}/{model_fname}\",\n num_labels=len(self.tag2code),\n label2id=self.tag2code,\n id2label=self.code2tag,\n output_attentions=False,\n output_hidden_states=False\n )\n model = model.to(self.device)\n _, acc, f1, p, r, report = self.__test(model, test_data)\n avg_acc.append(acc)\n avg_f1.append(f1)\n avg_p.append(p)\n avg_r.append(r)\n logger.info(f\"Testing P: {p:.4f}, R: {r:.4f}, F1: {f1:.4f}\")\n logger.info(f\"Testing classification report:\\n{report}\")\n logger.info(f\"Average accuracy: {np.mean(avg_acc):.4f}\")\n f1 = np.mean(avg_f1)\n p = np.mean(avg_p)\n r = np.mean(avg_r)\n logger.info(f\"Average P: {p:.4f}, R: {r:.4f}, F1: {f1:.4f}\")\n return p, r, f1\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', action='store_true')\n parser.add_argument('--train-iterations', type=int, default=1)\n parser.add_argument('--train-bundle', type=str, default=\"slo_misc-only\")\n parser.add_argument('--epochs', type=int, default=3)\n parser.add_argument('--test', action='store_true')\n parser.add_argument('--run-path', type=str, default=None)\n parser.add_argument('--full-finetuning', action='store_true')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n global JOB_ID\n JOB_ID = os.environ['SLURM_JOB_ID'] if 'SLURM_JOB_ID' in os.environ else None\n logger.info(f\"Training new NER models\")\n logger.info(f\"SLURM_JOB_ID = {JOB_ID}\")\n logger.info(f\"Training: {args.train}\")\n logger.info(f\"Train iterations: {args.train_iterations}\")\n logger.info(f\"Train bundle: {args.train_bundle}\")\n logger.info(f\"Epochs: {args.epochs}\")\n logger.info(f\"Full finetuning: {args.full_finetuning}\")\n logger.info(f\"Testing: {args.test}\")\n logger.info(f\"Torch version {torch.__version__}\")\n logger.info(f\"Transformers version {transformers.__version__}\")\n\n train_bundles = {\n \"slo_misc\": {\n \"models\": [\n \"cro-slo-eng-bert\",\n \"bert-base-multilingual-cased\",\n \"bert-base-multilingual-uncased\",\n \"sloberta-1.0\",\n \"sloberta-2.0\",\n ],\n \"train\": {\n \"ssj500k-bsnlp2017-iterative\": {\n \"ssj500k\": LoadSSJ500k(),\n \"bsnlp-2017\": LoadBSNLP(lang='sl', year='2017'),\n },\n \"ssj500k-bsnlp-2017-combined\": {\n \"combined\": LoadCombined([LoadSSJ500k(), LoadBSNLP(lang='sl', year='2017')]),\n },\n \"ssj500k-bsnlp-2021-iterative\": {\n \"ssj500k\": LoadSSJ500k(),\n \"bsnlp2021\": LoadBSNLP(lang='sl', year='2021'),\n },\n \"ssj500k-bsnlp-2021-combined\": {\n \"combined\": LoadCombined([LoadSSJ500k(), LoadBSNLP(lang='sl', year='2021')]),\n },\n \"ssj500k-bsnlp-all-iterative\": {\n \"ssj500k\": LoadSSJ500k(),\n \"bsnlp2017\": LoadBSNLP(lang='sl', year='all'),\n },\n \"ssj500k-bsnlp-all-combined\": {\n \"combined\": LoadCombined([LoadSSJ500k(), LoadBSNLP(lang='sl', year='all')]),\n },\n \"ssj500k\": {\n \"ssj500k\": LoadSSJ500k(),\n },\n \"bsnlp-2017\": {\n \"bsnlp-2017\": LoadBSNLP(lang='sl', year='2017'),\n },\n \"bsnlp-2021\": {\n \"bsnlp-2021\": LoadBSNLP(lang='sl', year='2021'),\n },\n \"bsnlp-all\": {\n \"bsnlp-all\": LoadBSNLP(lang='sl', year='all'),\n },\n },\n \"test\": {\n \"ssj500k\": LoadSSJ500k(),\n \"bsnlp-2017\": LoadBSNLP(lang='sl', year='2017'),\n \"bsnlp-2021\": LoadBSNLP(lang='sl', year='2021'),\n \"bsnlp-all\": LoadBSNLP(lang='sl', year='all')\n },\n },\n \"slo_misc-submission\": {\n \"models\": [\n \"cro-slo-eng-bert\",\n \"sloberta-1.0\",\n \"sloberta-2.0\",\n ],\n \"train\": {\n \"ssj500k-bsnlp-2021-iterative\": {\n \"ssj500k\": LoadSSJ500k(),\n \"bsnlp2021\": LoadBSNLP(lang='sl', year='2021'),\n },\n \"bsnlp-all\": {\n \"bsnlp-all\": LoadBSNLP(lang='sl', year='all'),\n },\n },\n \"test\": {\n \"bsnlp-2021\": LoadBSNLP(lang='sl', year='2021'),\n },\n },\n \"slo_misc-only-submission\": {\n \"models\": [\n \"sloberta-1.0\",\n ],\n \"train\": {\n \"bsnlp-2021\": {\n \"bsnlp-2021\": LoadBSNLP(lang='sl', year='2021', merge_misc=False, misc_data_only=True),\n }\n },\n \"test\": {\n \"bsnlp-2021\": LoadBSNLP(lang='sl', year='2021', merge_misc=False, misc_data_only=True),\n },\n },\n \"slo_misc-only\": {\n \"models\": [\n \"cro-slo-eng-bert\",\n \"bert-base-multilingual-cased\",\n \"bert-base-multilingual-uncased\",\n \"sloberta-1.0\",\n \"sloberta-2.0\",\n ],\n \"train\": {\n \"bsnlp-2021\": {\n \"bsnlp-2021\": LoadBSNLP(lang='sl', year='2021', merge_misc=False, misc_data_only=True),\n }\n },\n \"test\": {\n \"bsnlp-2021\": LoadBSNLP(lang='sl', year='2021', merge_misc=False, misc_data_only=True),\n },\n },\n \"slo_all\": {\n \"models\": [\n \"cro-slo-eng-bert\",\n \"bert-base-multilingual-cased\",\n \"bert-base-multilingual-uncased\",\n \"sloberta-1.0\",\n \"sloberta-2.0\",\n ],\n \"train\": {\n \"bsnlp-2021\": {\n \"bsnlp-2021\": LoadBSNLP(lang='sl', year='2021', merge_misc=False),\n }\n },\n \"test\": {\n \"bsnlp-2021\": LoadBSNLP(lang='sl', year='2021', merge_misc=False),\n }\n },\n \"multilang_all\": {\n \"models\": [\n \"bert-base-multilingual-cased\",\n ],\n \"train\": {\n 'bsnlp-2021-bg': {'bsnlp-2021-bg': LoadBSNLP(lang='bg', year='2021', merge_misc=False)},\n 'bsnlp-2021-cs': {'bsnlp-2021-cs': LoadBSNLP(lang='cs', year='2021', merge_misc=False)},\n 'bsnlp-2021-pl': {'bsnlp-2021-pl': LoadBSNLP(lang='pl', year='2021', merge_misc=False)},\n 'bsnlp-2021-ru': {'bsnlp-2021-ru': LoadBSNLP(lang='ru', year='2021', merge_misc=False)},\n 'bsnlp-2021-sl': {'bsnlp-2021-sl': LoadBSNLP(lang='sl', year='2021', merge_misc=False)},\n 'bsnlp-2021-uk': {'bsnlp-2021-uk': LoadBSNLP(lang='uk', year='2021', merge_misc=False)},\n 'bsnlp-2021-all': {'bsnlp-2021-all': LoadBSNLP(lang='all', year='2021', merge_misc=False)},\n },\n \"test\": {\n \"bsnlp-2021-bg\": LoadBSNLP(lang='bg', year='2021', merge_misc=False),\n \"bsnlp-2021-cs\": LoadBSNLP(lang='cs', year='2021', merge_misc=False),\n \"bsnlp-2021-pl\": LoadBSNLP(lang='pl', year='2021', merge_misc=False),\n \"bsnlp-2021-ru\": LoadBSNLP(lang='ru', year='2021', merge_misc=False),\n \"bsnlp-2021-sl\": LoadBSNLP(lang='sl', year='2021', merge_misc=False),\n \"bsnlp-2021-uk\": LoadBSNLP(lang='uk', year='2021', merge_misc=False),\n \"bsnlp-2021-all\": LoadBSNLP(lang='all', year='2021', merge_misc=False),\n }\n }\n }\n\n chosen_bundle = args.train_bundle\n if chosen_bundle not in train_bundles:\n raise Exception(f\"Invalid bundle chosen: {chosen_bundle}\")\n\n bundle = train_bundles[chosen_bundle]\n models = bundle['models']\n train_data = bundle['train']\n test_data = bundle['test']\n\n if not args.run_path:\n run_time = datetime.now().isoformat()[:-7] # exclude the ms\n run_path = f'./data/runs/run_{JOB_ID if JOB_ID is not None else run_time}_{chosen_bundle}'\n else:\n run_path = args.run_path\n run_time = run_path.split('/')[-1][4:]\n\n pathlib.Path(run_path).mkdir(parents=True, exist_ok=True)\n pathlib.Path(f'{run_path}/models').mkdir(parents=True, exist_ok=True)\n logger.info(f'Running path: `{run_path}`, run time: `{run_time}`')\n\n tag2code, code2tag = list(test_data.values())[0].encoding()\n\n test_f1_scores = []\n for model_name, fine_tuning in product(models, [True, False]):\n logger.info(f\"Working on model: `{model_name}`...\")\n for train_bundle, loaders in train_data.items():\n bert = BertModel(\n tag2code=tag2code,\n code2tag=code2tag,\n epochs=args.epochs,\n input_model_path=f'./data/models/{model_name}',\n output_model_path=f'{run_path}/models',\n output_model_fname=f'{model_name}-{train_bundle}'\n f\"{'-finetuned' if fine_tuning else ''}\"\n f'-{args.epochs}-epochs',\n tune_entire_model=fine_tuning,\n use_test=True,\n )\n\n if args.train:\n logger.info(f\"Training data bundle: `{train_bundle}`\")\n bert.train(loaders)\n\n if args.test:\n for test_dataset, dataloader in test_data.items():\n logger.info(f\"Testing on `{test_dataset}`\")\n p, r, f1 = bert.test(test_data=dataloader.test())\n test_f1_scores.append({\n \"model_name\": model_name,\n \"fine_tuned\": fine_tuning,\n \"train_bundle\": train_bundle,\n \"epochs\": args.epochs,\n \"test_dataset\": test_dataset,\n \"precision_score\": p,\n \"recall_score\": r,\n \"f1_score\": f1\n })\n logger.info(f\"[{train_bundle}][{test_dataset}] P = {p:.4f}, R = {r:.4f}, F1 = {f1:.4f}\")\n if args.test:\n scores = pd.DataFrame(test_f1_scores)\n scores.to_csv(f'{run_path}/training_scores-{chosen_bundle}-{JOB_ID}.csv', index=False)\n logger.info(f'Entire training suite is done.')\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7044673562049866, "alphanum_fraction": 0.7731958627700806, "avg_line_length": 35.375, "blob_id": "2dc07bfb0d72dc51457b0c523bd56925163a16a7", "content_id": "d9e5f82dab216733fcfa9a622a2d4d5ef747a5c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 113, "num_lines": 8, "path": "/data/datasets/ssj500k/fetch.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import urllib2\nresponse=urllib2.urlopen('https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1210/ssj500k-en.TEI.zip')\narchive=response.read()\nfile=open('ssj500k-en.TEI.zip','w')\nfile.write(archive)\nfile.close()\nimport zipfile\nzipfile.ZipFile('ssj500k-en.TEI.zip').extractall('.')\n" }, { "alpha_fraction": 0.6522449254989624, "alphanum_fraction": 0.6587755084037781, "avg_line_length": 32.08108139038086, "blob_id": "199cacea5589a3764609f4b00dcc838d0e6e7ee6", "content_id": "cdf582d9b4d33e38a01203737c0d9013f734756c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1225, "license_type": "no_license", "max_line_length": 101, "num_lines": 37, "path": "/src/utils/prepare_output.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import argparse\nimport pandas as pd\n\nfrom src.utils.load_documents import LoadBSNLPDocuments\nfrom src.utils.update_documents import UpdateBSNLPDocuments\nfrom src.utils.utils import list_dir\n\n\ndef parser_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--lang', type=str, default='all')\n parser.add_argument('--year', type=str, default='2021')\n parser.add_argument('--run-path', type=str, default=None)\n return parser.parse_args()\n\n\ndef convert_files(\n run_path: str,\n lang: str = 'sl',\n year: str = '2021',\n) -> None:\n dirs, _ = list_dir(f'{run_path}/predictions/bsnlp')\n for dir in dirs:\n print(f\"Working on {dir}\")\n loader = LoadBSNLPDocuments(year=year, lang=lang, path=f'{run_path}/predictions/bsnlp/{dir}')\n updater = UpdateBSNLPDocuments(year=year, lang=lang, path=f'{run_path}/out/{dir}')\n data = loader.load_predicted(folder='clustered')\n # data = loader.load_predicted()\n updater.update_predicted(data)\n\n\nif __name__ == '__main__':\n args = parser_args()\n print(f'Run path: {args.run_path}')\n print(f'Lang: {args.lang}')\n print(f'Year: {args.year}')\n convert_files(args.run_path, lang=args.lang, year=args.year)\n\n" }, { "alpha_fraction": 0.688632607460022, "alphanum_fraction": 0.7215815782546997, "avg_line_length": 49.58333206176758, "blob_id": "96e715eee99a6b2f9ba345755a58e749faf3a49c", "content_id": "17a2353b03424d7d57bfb8d340198fbc8bb50285", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1214, "license_type": "no_license", "max_line_length": 263, "num_lines": 24, "path": "/bin/run-join-clusters.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#SBATCH --nodes=1\n#SBATCH --ntasks=1\n#SBATCH --mem=32GB\n#SBATCH --cpus-per-task=8\n#SBATCH --time=3-00:00:00\n#SBATCH --output=logs/BSNLP-join-clusters-%J.out\n#SBATCH --error=logs/BSNLP-join-clusters-%J.err\n#SBATCH --job-name=\"BSNLP-join-clusters\"\n\nCONTAINER_IMAGE_PATH=\"$PWD/containers/pytorch-image-new.sqfs\"\n\necho \"$SLURM_JOB_ID -> Generating the clusters for the model...\"\n\n# the following command opens a bash terminal of an already existing container\n# with the current directory (.) mounted\nsrun \\\n --container-image \"$CONTAINER_IMAGE_PATH\" \\\n --container-mounts \"$PWD\":/workspace,/shared/datasets/rsdo:/data \\\n --container-entrypoint /workspace/bin/exec-join-clusters.sh --year \"test_2021\" --lang \"sl\" --pred-path \"./data/runs/run_2668_slo_misc-submission\" --cluster-path \"./data/deduper/runs/run_2508\"\n # --container-entrypoint /workspace/bin/exec-join-clusters.sh --test --tsh 0.35 --train-chars --run-path \"./data/deduper/runs/run_2605\" --data-path \"./data/runs/run_l1o_2551/predictions/bsnlp/bert-base-multilingual-cased-bsnlp-exclude-none-finetuned-5-epochs\"\n # --container-entrypoint /workspace/bin/exec-clustering.sh --test --tsh 0.35 --train\n\necho \"$SLURM_JOB_ID -> Done.\"\n" }, { "alpha_fraction": 0.694597601890564, "alphanum_fraction": 0.7089305520057678, "avg_line_length": 32.592594146728516, "blob_id": "c64e19575dc255a9d443530ac4505a2a7774f954", "content_id": "bdc411a851f6a3bfe78f1827045ec277d6db88dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 907, "license_type": "no_license", "max_line_length": 122, "num_lines": 27, "path": "/bin/run-bert-train.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#SBATCH --nodes=1\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=8\n#SBATCH --gpus-per-task=1\n#SBATCH --time=3-00:00:00\n#SBATCH --output=logs/NER-BERT-train-%J.out\n#SBATCH --error=logs/NER-BERT-train-%J.err\n#SBATCH --job-name=\"NER-BERT-train\"\n\nset -euo pipefail\n\nCONTAINER_IMAGE_PATH=\"$PWD/containers/pytorch-image-new.sqfs\"\n\necho \"$SLURM_JOB_ID -> Training the model...\"\n\n# the following command opens a bash terminal of an already existing container\n# with the current directory (.) mounted\nsrun \\\n --container-image \"$CONTAINER_IMAGE_PATH\" \\\n --container-mounts \"$PWD\":/workspace,/shared/datasets/rsdo:/data \\\n --container-entrypoint /workspace/bin/exec-bert.sh --train --epochs 5 --test --train-bundle \"slo_misc-only-submission\"\n # --container-entrypoint /workspace/bin/exec-bert.sh --train --epochs 5 --test --train-bundle \"slo_misc-submission\"\n\necho \"$SLURM_JOB_ID -> Done.\"\n\n#wait\n" }, { "alpha_fraction": 0.7053571343421936, "alphanum_fraction": 0.7053571343421936, "avg_line_length": 21.399999618530273, "blob_id": "28d4ee30fbc1053e7d4798b732388419971095c5", "content_id": "4489db96abb25ea5715a689c183fc136d1bdd29d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 112, "license_type": "no_license", "max_line_length": 44, "num_lines": 5, "path": "/bin/exec-main.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\necho \"Obtaining file structure ...\"\nPYTHONPATH=. python src/analyze/main.py \"$@\"\n" }, { "alpha_fraction": 0.6944797039031982, "alphanum_fraction": 0.7078045606613159, "avg_line_length": 33.011112213134766, "blob_id": "d93e30093d079a68a07261340f173602c745d317", "content_id": "28050e56846e9c9f434bba09a5618cc5d026e946", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 3152, "license_type": "no_license", "max_line_length": 139, "num_lines": 90, "path": "/java-eval/readme.txt", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "====================================\r\nBSNLP-NER 2019 shared task evaluator\r\n====================================\r\n\r\nThe package contains a system response validation script and an evaluation script that allows to \r\ncompare the performance of various system runs.\r\n\r\n1. VALIDATION\r\n=============\r\n\r\nUsage: java -cp bsnlp-ner-evaluator-19.0.4.jar sigslav.ConsistencyCheck [path-to-files] [out-file]\r\n\r\n- path-to-file - a path to a directory containing the files to be validated (e.g. data/golden/trump/cs)\r\n- out-file - an output file, which contains the validation info\r\n\r\n\r\n2. EVALUATION\r\n=============\r\n\r\nUsage: java -cp bsnlp-ner-evaluator-19.0.4.jar sigslav.BNEvaluator [path-to-data] [path-to-reports] [path-to-error-log] [path-to-summaries]\r\n\r\n- path-to-data - data with gold annotations and system outputs (e.g. data)\r\n- path-to-reports - en empty directory, where per-system evaluation reports will be saved (e.g. reports)\r\n- path-to-error-log - en empty directory, where the per-system per-evaluation metric error reports will be saved (e.g. error-logs)\r\n- path-to-summaries - en empty directory, where the system results' summaries will be saved (a .csv file) (e.g. summaries)\r\n\r\nExample: java -cp bsnlp-ner-evaluator-19.0.4.jar sigslav.BNEvaluator data reports error-logs summaries\r\n\r\nData should be organized as follows:\r\n\r\ndata\r\n- golden\r\n- - trump\r\n- - - cs\r\n- - - - file_103.txt\r\n- - - - file_104.txt\r\n- - - - ...\r\n- - - sk\r\n- - - ru\r\n- - - ...\r\n- - eu\r\n- system-1\r\n- - trump\r\n- - - cs\r\n- - - sk\r\n- - - ru\r\n- - - ...\r\n- - eu\r\n- system-2\r\n...\r\n\r\nThe document IDs are taken from the file (the first line). The file names are not important.\r\n\r\nResults:\r\n\r\n1) named entity recognition\r\n\r\n1a) Relaxed evaluation, partial match: an entity mentioned in a given document is\r\nconsidered to be extracted correctly if the system response includes at\r\nleast one annotation of a named mention of this entity (regardless whether\r\nthe extracted mention is base form).\r\nEven partial match counts.\r\n\r\n1b) Relaxed evaluation, exact match: an entity mentioned in a given document is\r\nconsidered to be extracted correctly if the system response includes at\r\nleast one annotation of a named mention of this entity (regardless whether\r\nthe extracted mention is base form).\r\nThe full string have to be matched.\r\n\r\n1c) Strict evaluation: the system response should include exactly one\r\nannotation for each unique form of a named mention of an entity that is\r\nreferred to in a given document, i.e., capturing and listing all variants\r\nof an entity is required. Partial matches are errors.\r\n\r\n2) Name normalisation\r\n\r\nTaking all mentions, but only those that need to be normalized on both sides (golden and system annotations).\r\n\r\n3) Coreference resolution (identifying mentions of the same entity)\r\n\r\nComputed by the LEA metric: www.aclweb.org/anthology/P16-1060.pdf\r\nNote: the importance of an entity is taken as log2 (number of mentions).\r\n\r\n3a) at document level\r\n\r\n3b) at single-language level\r\n\r\n3c) at crosslingual level\r\n\r\nOnly cross-lingual links are considered by the metric. Entity weighting stays the same (based on the number of entity mentions). \r\n" }, { "alpha_fraction": 0.658085823059082, "alphanum_fraction": 0.7023102045059204, "avg_line_length": 46.34375, "blob_id": "6187ec6f90d6048be166f5fdb6706aa5f000c890", "content_id": "022249e2f454dd6b6b5964a354df881b2149cd04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1515, "license_type": "no_license", "max_line_length": 169, "num_lines": 32, "path": "/bin/run-bert-pred.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#SBATCH --nodes=1\n#SBATCH --ntasks=1\n#SBATCH --mem-per-cpu=16GB\n#SBATCH --cpus-per-task=16\n#SBATCH --gpus-per-task=2\n#SBATCH --time=3-00:00:00\n#SBATCH --output=logs/NER-BERT-pred-%J.out\n#SBATCH --error=logs/NER-BERT-pred-%J.err\n#SBATCH --job-name=\"NER-BERT-pred\"\n\nset -euo pipefail\n\nCONTAINER_IMAGE_PATH=\"$PWD/containers/pytorch-image-new.sqfs\"\n\necho \"$SLURM_JOB_ID -> Predicting from the model...\"\n\n# the following command opens a bash terminal of an already existing container\n# with the current directory (.) mounted\nsrun \\\n --container-image \"$CONTAINER_IMAGE_PATH\" \\\n --container-mounts \"$PWD\":/workspace,/shared/datasets/rsdo:/data \\\n --container-entrypoint /workspace/bin/exec-pred.sh --lang \"sl\" --year \"test_2021\" --run-path \"./data/runs/run_2668_slo_misc-submission\" --merge-misc # all slo models\n # --container-entrypoint /workspace/bin/exec-pred.sh --lang \"all\" --year \"test_2021\" --run-path \"./data/runs/run_l1o_2551\" # all models submission\n # --container-entrypoint /workspace/bin/exec-pred.sh --lang sl --merge-misc --run-path \"./data/runs/run_2499_slo_misc\"\n # --container-entrypoint /workspace/bin/exec-pred.sh --lang all --run-path \"./data/runs/run_2497_multilang_all\"\n # --container-entrypoint /workspace/bin/exec-pred.sh --lang sl --run-path \"./data/runs/run_2021-02-19T08:02:08_slo-misc-models\"\n # --container-entrypoint /workspace/bin/exec-pred.sh --lang sl --run-path \"./data/runs/run_2021-02-17T11:42:19_slo-models\"\n\necho \"$SLURM_JOB_ID -> Done.\"\n\n#wait\n" }, { "alpha_fraction": 0.6735357642173767, "alphanum_fraction": 0.7071583271026611, "avg_line_length": 39.08695602416992, "blob_id": "713cfc5c9337cbe8a11aa954d242f170906b3193", "content_id": "9975aa9be89092d8795b66a0ee3534d0b82b3996", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 922, "license_type": "no_license", "max_line_length": 141, "num_lines": 23, "path": "/bin/run-output.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#SBATCH --nodes=1\n#SBATCH --ntasks=1\n#SBATCH --mem=256GB\n#SBATCH --cpus-per-task=64\n#SBATCH --time=3-00:00:00\n#SBATCH --output=logs/BSNLP-output-%J.out\n#SBATCH --error=logs/BSNLP-output-%J.err\n#SBATCH --job-name=\"BSNLP-output\"\n\nCONTAINER_IMAGE_PATH=\"$PWD/containers/pytorch-image-new.sqfs\"\n\necho \"$SLURM_JOB_ID -> Generating the output files for the models...\"\n\n# the following command opens a bash terminal of an already existing container\n# with the current directory (.) mounted\nsrun \\\n --container-image \"$CONTAINER_IMAGE_PATH\" \\\n --container-mounts \"$PWD\":/workspace,/shared/datasets/rsdo:/data \\\n --container-entrypoint /workspace/bin/exec-output.sh --lang \"sl\" --year \"test_2021\" --run-path \"./data/runs/run_2668_slo_misc-submission\"\n # --container-entrypoint /workspace/bin/exec-output.sh --lang \"all\" --year \"test_2021\" --run-path \"./data/runs/run_l1o_2551\"\n\necho \"$SLURM_JOB_ID -> Done.\"\n" }, { "alpha_fraction": 0.5507786273956299, "alphanum_fraction": 0.5612728595733643, "avg_line_length": 34.590362548828125, "blob_id": "c7ddb7bfc148b35437f457b56c0d9683fc609b4c", "content_id": "f787f880dee5f8a863fdabc93a331c7bbf5ad107", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2954, "license_type": "no_license", "max_line_length": 102, "num_lines": 83, "path": "/src/train/trainer.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport logging\nimport pandas as pd\nimport pathlib\n\nfrom tqdm import tqdm\nfrom datetime import datetime\n\nfrom src.train.crosloeng import BertModel\nfrom src.utils.load_dataset import LoadBSNLP\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.DEBUG,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'\n)\nlogger = logging.getLogger('TrainL1OStrategy')\n\nrun_time = datetime.now().isoformat()[:-7] # exclude the ms\nJOB_ID = os.environ['SLURM_JOB_ID'] if 'SLURM_JOB_ID' in os.environ else None\nrun_path = f'./data/runs/run_l1o_{JOB_ID if JOB_ID is not None else run_time}'\npathlib.Path(run_path).mkdir(parents=True, exist_ok=True)\npathlib.Path(f'{run_path}/models').mkdir(parents=True, exist_ok=True)\n\ndef main():\n epochs = 5\n fine_tuning = True\n model_name = 'bert-base-multilingual-cased'\n test_scores = []\n for excluded_dataset in tqdm(LoadBSNLP.datasets['2021'], desc='Excluded Dataset'):\n excluded_dataset = 'none'\n logger.info(f\"Excluding {excluded_dataset}\")\n train_bundle = f'bsnlp-exclude-{excluded_dataset}'\n train_datasets = {\n train_bundle: LoadBSNLP(\n lang='all',\n year='2021',\n merge_misc=False,\n # exclude=excluded_dataset\n )\n }\n # test_dataset = LoadBSNLP(\n # lang='all',\n # year='2021',\n # data_set=excluded_dataset,\n # merge_misc=False,\n # )\n tag2code, code2tag = train_datasets[train_bundle].encoding()\n bert = BertModel(\n tag2code=tag2code,\n code2tag=code2tag,\n epochs=epochs,\n input_model_path=f'./data/models/{model_name}',\n output_model_path=f'{run_path}/models',\n output_model_fname=f'{model_name}-{train_bundle}'\n f\"{'-finetuned' if fine_tuning else ''}\"\n f'-{epochs}-epochs',\n tune_entire_model=fine_tuning,\n use_test=True,\n )\n logger.info(f\"Training data bundle: `{train_bundle}`\")\n bert.train(train_datasets)\n # logger.info(f\"Testing on `{excluded_dataset}`\")\n # p, r, f1 = bert.test(test_data=test_dataset.load_all())\n # test_scores.append({\n # \"model_name\": model_name,\n # \"fine_tuned\": fine_tuning,\n # \"train_bundle\": train_bundle,\n # \"epochs\": epochs,\n # \"test_dataset\": excluded_dataset,\n # \"precision_score\": p,\n # \"recall_score\": r,\n # \"f1_score\": f1\n # })\n # logger.info(f\"[{train_bundle}][{excluded_dataset}] P = {p:.4f}, R = {r:.4f}, F1 = {f1:.4f}\")\n break\n scores = pd.DataFrame(test_scores)\n scores.to_csv(f'{run_path}/training_scores-L1O-{JOB_ID}.csv', index=False)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.731148362159729, "alphanum_fraction": 0.7454410791397095, "avg_line_length": 57.81159591674805, "blob_id": "29707c76af33073d695c987e3130a5a7d5f4925a", "content_id": "eff48d9bb2af60feeb263848e5753921cbd3d0b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4058, "license_type": "no_license", "max_line_length": 414, "num_lines": 69, "path": "/README.md", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "# BSNLP Shared task 2021\n*Laboratory for Data Technologies, University of Ljubljana*\n\nShared task official [website](http://bsnlp.cs.helsinki.fi/shared-task.html)\n\nThe guidelines are [here](http://bsnlp.cs.helsinki.fi/System_response_guidelines-1.2.pdf).\n\n## Setup\n\nput the following models in the `data/models/` directory:\n- bert-base-multilingual-cased [link](https://huggingface.co/bert-base-multilingual-cased)\n- bert-base-multilingual-uncased [link](https://huggingface.co/bert-base-multilingual-uncased)\n- cro-slo-eng-bert [link](https://www.clarin.si/repository/xmlui/handle/11356/1330)\n- sloberta-1.0 [link](https://www.clarin.si/repository/xmlui/handle/11356/1387)\n- sloberta-2.0 [link](https://www.clarin.si/repository/xmlui/handle/11356/1397)\n\n### Running locally\n```\npip install -r requirements.txt\n```\nRun the `./bin/exec-*.sh` scripts.\n\n### Running on a SLURM-enabled cluster\nThese instructions assume that you will be running on a nVidia-enabled cluster, with [enroot](https://github.com/nvidia/enroot)containers.\n```\nsbatch ./bin/run-setup.sh\n```\nAfterwards, run the `./bin/run-*.sh` scripts as required.\n\nShould you need to run the code in a singularity-enabled SLURM cluster, take a look at [this file](./bin/singularity-commands.sh) and how to [run it](./bin/run-singularity.sh).\n\n## Order of execution\n\n1. run [`src/analyze/main.py`](./src/analyze/main.py) ([script](./bin/exec-main.sh)) to get the dataset file structure\n2. run [`src/transform/annotate_docs.py`](./src/transform/annotate_docs.py) ([script](./bin/exec-annotate.sh)) to tokenize the dataset and to obtain the lemmas. This will generate the `data/bsnlp/<dataset-name>/merged/<lang>` files\n3. run [`src/transform/create_splits.py`](src/transform/create_splits.py) ([script](./bin/exec-splits.sh)) this will split the dataset into training, validation, and test sets for each language, and store them into `data/bsnlp/<dataset-name>/merged/<lang>/(dev|test|train)_{lang}.csv`. Note: the split is performed on sentences. Each sentence is chosen at random, to preserve the context of all the named entities\n4. run [`src/train/crosloeng.py`](./src/train/crosloeng.py) ([script](./bin/run-bert-train.sh)) to train the models.\n5. run [`src/eval/model_eval.py`](./src/eval/model_eval.py) ([script](./bin/run-bert-pred.sh)) to generate the predictions of the trained models. Results are stored in `./data/runs/run_<JOB_ID>/`.\n6. run [`src/matching/match_dedupe.py`](./src/matching/match_dedupe.py) ([script](./bin/run-dedupe.sh)) to obtain the NE linkage. Results stored in `./data/deduper/runs/run_<JOB_ID>/` (self-created)\n7. run (TODO) to merge the results from the entity linking and the NER tasks.\n8. run [`src/utils/prepare_output.py`](`./src/utils/prepare_output.py`) ([script](./bin/exec-output.sh)) to generate the output files in BSNLP-compliant format\n9. run [`bin/run-eval.sh`](./bin/run-eval.sh), which is a wrapper of the evaluation script provided by BSNLP organizers to obtain final results. More details can be found [here](./java-eval/readme.txt).\n\nMind the arguments you pass into the scripts, for more details look at the `parse_args` functions in the respective `.py` files, such as [here](./src/train/crosloeng.py).\n\n## Algorithm (open setting):\n\n* INPUT\n * Text documents from web including online media. Each collection about certain event or entity.\n* OUTPUT \n * Recognized entities for each document without indexes. Each entity *lemmatized* and *linked between documents and languages*. Taks: NERC (PER, ORG, LOC, EVT, PRO), name lemmatization, entity matching\n * NOTE: evaluation is case-insensitive, i.e. test data in lower case.\n\nPart-based submission also taken into account.\n\n## Evaluation:\n\n* Relaxed\n* Strict\n * Exactly one annotation per entity instance (deduplication) \n\n## Dataset format\n\n```\nNamed-entity-mention <TAB> base-form <TAB> category <TAB> cross-lingual ID\n```\n## Live documentation\n\n[https://drive.google.com/drive/folders/1Zr4dIuEnBmE4yOvSdph7MQc_gnu85ISZ](https://drive.google.com/drive/folders/1Zr4dIuEnBmE4yOvSdph7MQc_gnu85ISZ)\n" }, { "alpha_fraction": 0.7459016442298889, "alphanum_fraction": 0.7786885499954224, "avg_line_length": 29.5, "blob_id": "20b35d7817c601f657ad262f67a4b8d8325c554d", "content_id": "00cc3ec060917078e3df847e7b3d078ea9ee8d12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 122, "license_type": "no_license", "max_line_length": 90, "num_lines": 4, "path": "/java-eval/consistency-check.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\njava -cp bsnlp-ner-evaluator-19.0.4.jar sigslav.ConsistencyCheck data/golden/brexit/cs out\n" }, { "alpha_fraction": 0.5842490792274475, "alphanum_fraction": 0.5842490792274475, "avg_line_length": 22.7391300201416, "blob_id": "ba038fccf3552c2691fdc1f3ceed9dad8d6bd16d", "content_id": "58a0d040b78cc908ba72b123d2f85ffa62f939f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 546, "license_type": "no_license", "max_line_length": 74, "num_lines": 23, "path": "/src/train/model.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "from typing import Any\n\nimport pandas as pd\n\n\nclass Model:\n def __init__(self) -> None:\n pass\n\n def convert_input(self, input_data: pd.DataFrame) -> Any:\n \"\"\"\n Convert the data to the correct input format for the model\n By default, we assume that it is already in the correct format\n :param input_data:\n :return:\n \"\"\"\n return input_data\n\n def train(self, data_loaders: dict):\n pass\n\n def test(self, test_data: pd.DataFrame) -> (float, float, float):\n pass\n" }, { "alpha_fraction": 0.5769230723381042, "alphanum_fraction": 0.5828402638435364, "avg_line_length": 13.083333015441895, "blob_id": "74d5f3da627cf65a82aa13d7b6e00388a733c1a4", "content_id": "172088db3c2aee8535985aa987064d376c1f13f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 338, "license_type": "no_license", "max_line_length": 31, "num_lines": 24, "path": "/Pipfile", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "[[source]]\nname = \"pypi\"\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\n\n[dev-packages]\n\n[packages]\npandas = \"*\"\nclassla = \"*\"\nstanza = \"*\"\nfuzzywuzzy = \"*\"\nsklearn = \"*\"\nscikit-learn = \"*\"\nsnap-stanford = \"*\"\nnetworkx = \"*\"\nmatplotlib = \"*\"\ndedupe = \"*\"\npyconll = \"*\"\nseqeval = \"*\"\ntensorflow = \"*\"\n\n[requires]\npython_version = \"3.7\"\n" }, { "alpha_fraction": 0.6886792182922363, "alphanum_fraction": 0.7264150977134705, "avg_line_length": 25.5, "blob_id": "b2c1a5b34e275dde5e874bc9162d57c251249dd7", "content_id": "43952dcfe3cb72f30e8939388835ee2acc049d12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 106, "license_type": "no_license", "max_line_length": 74, "num_lines": 4, "path": "/bin/exec-eval.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\njava -cp java-eval/bsnlp-ner-evaluator-19.0.4.jar sigslav.BNEvaluator \"$@\"\n" }, { "alpha_fraction": 0.7048872113227844, "alphanum_fraction": 0.731203019618988, "avg_line_length": 34.46666717529297, "blob_id": "c767397be83019048ea958bd3c30633baa4cc077", "content_id": "7cf50dd50dc6c0d10078b64283ed3b6782469bcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 532, "license_type": "no_license", "max_line_length": 107, "num_lines": 15, "path": "/bin/run-singularity.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#SBATCH --nodes=1\n#SBATCH --ntasks=1\n#SBATCH --mem=64GB\n#SBATCH --cpus-per-task=16\n#SBATCH --time=3-00:00:00\n#SBATCH --output=logs/BSNLP-cluster-%J.out\n#SBATCH --error=logs/BSNLP-cluster-%J.err\n#SBATCH --job-name=\"BSNLP-cluster\"\n\nCONTAINER_IMAGE_PATH=\"$PWD/containers/container.sif\"\n\n# singularity run --nv $CONTAINER_IMAGE_PATH bin/exec-bert.sh --full-finetuning --epochs 5 --test # --train\nsingularity run --nv $CONTAINER_IMAGE_PATH bin/exec-clustering.sh\n# singularity run --nv $CONTAINER_IMAGE_PATH bin/exec-pred.sh\n" }, { "alpha_fraction": 0.7203390002250671, "alphanum_fraction": 0.7203390002250671, "avg_line_length": 22.600000381469727, "blob_id": "d6681f64d9a4ee0551412e21e345611eaf5d3273", "content_id": "3ed7eab29d9aaa8a7eaacd6e134240b909b4b89c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 118, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/bin/exec-output.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\necho \"Generating output files...\"\nPYTHONPATH=. python src/utils/prepare_output.py \"$@\"\n" }, { "alpha_fraction": 0.5923135876655579, "alphanum_fraction": 0.5965411067008972, "avg_line_length": 35.44257736206055, "blob_id": "14654d3448d30a6630ea56a413850849d81a28e7", "content_id": "b8ac63c59e66692a1de7209255c035d9ec0add21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13010, "license_type": "no_license", "max_line_length": 200, "num_lines": 357, "path": "/src/matching/match_dedupe.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport argparse\nimport json\nimport pathlib\nimport pandas as pd\nimport logging\n\nfrom tqdm import tqdm\nfrom dedupe import Dedupe, StaticDedupe, console_label\nfrom fuzzywuzzy import fuzz\nfrom datetime import datetime\nfrom collections import defaultdict\nfrom itertools import combinations, product\nfrom random import choices, random\nfrom typing import Iterable, Callable\n\nfrom src.utils.utils import list_dir\n\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.DEBUG,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'\n)\nlogger = logging.getLogger('DedupeMatching')\n\nBASE_FNAME: str = \"./data/deduper\"\nrun_time = datetime.now().isoformat()[:-7] # exclude the ms\nJOB_ID = os.environ['SLURM_JOB_ID'] if 'SLURM_JOB_ID' in os.environ else run_time\nRUN_BASE_FNAME = f\"{BASE_FNAME}/runs/run_{JOB_ID}\"\nDATA_PATH = f\"./data/datasets/bsnlp\"\nNER_FIELD = 'calcNER'\nRELEVANT_LANGS: list = ['bg', 'cs', 'pl', 'ru', 'sl', 'uk']\n\n# Dedup configuration variables\nSEARCH_CLOSEST: bool = True\nCHOOSE_K: int = 2 # determines how many samples of equivalent values to choose\nCLUSTER_THRESHOLD: float = 0.65\nDEDUPE_CORES_USED: int = 63\ndedupe_variables: list = [\n # document structure: docId,sentenceId,tokenId,text,lemma,calcLemma,upos,xpos,ner,clID\n # variables to consider:\n {\"field\": \"text\", \"type\": \"String\"},\n {\"field\": \"calcLemma\", \"type\": \"String\"},\n {\"field\": \"upos\", \"type\": \"String\"},\n {\"field\": \"xpos\", \"type\": \"String\"},\n {\"field\": \"ner\", \"type\": \"String\"},\n]\n\n\ndef merge_nes(\n nes: list\n) -> list:\n \"\"\"\n Merges the NEs in the form of the expected output\n :param nes:\n :return:\n \"\"\"\n merged = []\n for i, ne in enumerate(nes):\n if ne[NER_FIELD].startswith('I-'):\n continue\n j = i + 1\n ne['numTokens'] = 1\n while j < len(nes) and not nes[j][NER_FIELD].startswith('B-'):\n ne['text'] = f'{ne[\"text\"]} {nes[j][\"text\"]}'\n ne['lemma'] = f'{ne[\"lemma\"]} {nes[j][\"lemma\"]}'\n ne['calcLemma'] = f'{ne[\"calcLemma\"]} {nes[j][\"calcLemma\"]}'\n ne['sentenceId'] = f'{ne[\"sentenceId\"]}:{nes[j][\"sentenceId\"]}'\n ne['tokenId'] = f'{ne[\"tokenId\"]}:{nes[j][\"tokenId\"]}'\n ne['upos'] = f'{ne[\"upos\"]}:{nes[j][\"upos\"]}'\n ne['xpos'] = f'{ne[\"xpos\"]}:{nes[j][\"xpos\"]}'\n if nes[j][\"clID\"] != ne['clID']:\n print(f\"Inconsistent cluster ids: {nes[j]['clID']} vs {ne['clID']}, NE: {ne}\")\n ne['numTokens'] += 1\n j += 1\n ne[NER_FIELD] = ne[NER_FIELD][2:]\n merged.append(ne)\n return merged\n\n\ndef load_nes(\n datasets: list,\n) -> (dict, dict):\n documents = {}\n doc_alphabet = {}\n # doc_alphabet = defaultdict(dict)\n for dataset in datasets:\n dataset_name = dataset.split('/')[-1]\n if dataset_name not in ['covid-19', 'us_election_2020']:\n print(f\"Skipping {dataset_name}\")\n continue\n documents[dataset_name] = {}\n doc_alphabet[dataset_name] = defaultdict(dict)\n langs, _ = list_dir(f'{dataset}/predicted')\n for lang in langs:\n if lang.lower() not in RELEVANT_LANGS:\n logger.info(f\"Skipping {dataset_name}/{lang}\")\n continue\n documents[dataset_name][lang] = {}\n logger.info(f'Extracting from: {dataset}/{lang}')\n ne_path = f'{dataset}/predicted/{lang}'\n _, files = list_dir(ne_path)\n for file in files:\n df = pd.read_csv(f'{ne_path}/{file}', dtype={'docId': str, 'sentenceId': str, 'tokenId': str, 'clID': str,'text': str,'lemma': str,'calcLemma': str,'upos': str,'xpos': str,'ner': str})\n df['lang'] = lang\n df = df.fillna('N/A')\n records = merge_nes(df.loc[~(df[NER_FIELD] == 'O')].to_dict(orient='records'))\n for item in records:\n dkey = f\"{lang};{item['docId']};{item['sentenceId']};{item['tokenId']};{item['text']}\"\n fchar = item['text'][0].upper()\n if dkey in doc_alphabet[dataset_name][fchar]:\n raise Exception(f\"[doc_alphabet] COLLISION!!! {dkey}\")\n doc_alphabet[dataset_name][fchar][dkey] = item\n if dkey in documents[dataset_name][lang]:\n raise Exception(f\"[documents] COLLISION!!! {dkey}\")\n documents[dataset_name][lang][dkey] = item\n return {\n \"normal\": documents,\n \"alphabetized\": doc_alphabet,\n }\n\n\ndef load_data(\n clear_cache: bool = False\n) -> (dict, dict):\n cache_path = f'{RUN_BASE_FNAME}/cached_data.json'\n cached_file = pathlib.Path(cache_path)\n if not clear_cache and cached_file.exists() and cached_file.is_file():\n mod_time = datetime.fromtimestamp(cached_file.stat().st_mtime)\n logger.info(f\"Using cached data from `{cache_path}`, last modified at: `{mod_time.isoformat()}`\")\n with open(cache_path) as f:\n return json.load(f)\n # datasets = json.load(open(\"./data/results/dataset_pairs.json\"))\n datasets, _ = list_dir(DATA_PATH)\n datasets = [f'{DATA_PATH}/{dataset}' for dataset in datasets]\n data = load_nes(datasets)\n with open(cache_path, 'w') as f:\n logger.info(f\"Storing cached data at: {cache_path}\")\n json.dump(data, f)\n return data\n\n\ndef get_clustered_ids(\n clustered: Iterable\n) -> list:\n return [{\n \"clusterId\": i,\n \"ners\": [\n {\n 'id': cid,\n 'score': float(score)\n } for cid, score in zip(ids, scores)\n ]\n } for i, (ids, scores) in enumerate(clustered)]\n\n\ndef generate_training_examples(\n data: dict,\n) -> dict:\n positive_examples = defaultdict(list)\n matches = []\n distinct = []\n\n for key, value in data.items():\n positive_examples[value['clID']].append(value)\n\n for key, values in positive_examples.items():\n # logger.info(f\"{key} ({len(values)}): {values}\")\n use_items = choices(values, k=CHOOSE_K)\n for comb in combinations(use_items, 2):\n matches.append(comb)\n\n clids = positive_examples.keys()\n for comb in combinations(clids, 2):\n # skip some combination with a 1/2 probability\n if not SEARCH_CLOSEST and random() < 0.5: # toss a fair coin\n # logger.info(\"Skipping...\")\n continue\n d1 = choices(positive_examples[comb[0]], k=CHOOSE_K)\n d2 = choices(positive_examples[comb[1]], k=CHOOSE_K)\n for (i1, i2) in product(d1, d2):\n if SEARCH_CLOSEST:\n if fuzz.ratio(i1['text'].lower(), i2['text'].lower()) >= 70:\n # logger.info(f\"Similar are: {i1['text']}, {i2['text']}\")\n distinct.append((i1, i2))\n else:\n distinct.append((i1, i2))\n\n return {\n 'distinct': distinct,\n 'match': matches\n }\n\n\ndef data_looper(\n data: dict,\n call_fun: Callable,\n mapper: dict,\n train_all: bool = False,\n) -> Callable:\n chunk_size = 50\n def loop_through():\n for dataset, langs in data.items():\n for lang, items in langs.items():\n try:\n logger.info(f\"size of items for `{dataset}/{lang}`: {len(items)}\")\n keys = list(items.keys())\n for i, chunk_keys in enumerate([keys[x:x+chunk_size] for x in range(0, len(keys), chunk_size)]):\n chunk = {k:items[k] for k in chunk_keys}\n call_fun(dataset, f'{lang}-{i}', chunk, mapper)\n except Exception as e:\n logger.error(f\"ERROR OCCURED WHEN WORKING ON {dataset}/{lang}, {e}\")\n if train_all:\n try:\n call_fun(dataset, \"all\", {k:v for lang, docs in langs.items() for k, v in docs.items()})\n except Exception as e:\n logger.error(f\"ERROR OCCURED WHEN WORKING ON {dataset}/all, {e}\")\n return loop_through\n\n\ndef train(\n dataset: str,\n lang: str,\n items: dict,\n mapper: dict,\n) -> None:\n logger.info(f\"Training on `{dataset}/{lang}`\")\n\n # prepare training examples: generate matches and distinct cases\n td = generate_training_examples(items)\n train_path = f'{RUN_BASE_FNAME}/{dataset}'\n pathlib.Path(train_path).mkdir(parents=True, exist_ok=True)\n train_data_fname = f'{train_path}/train-{lang}.json'\n with open(train_data_fname, 'w') as tf:\n json.dump(td, tf)\n\n ## alternatively, manually label the training data\n ## the above code generates the training examples, so it is automating this step\n # console_label(deduper)\n\n # create a dedupe instance with chosen variables and number of cores to be used\n deduper = Dedupe(variable_definition=dedupe_variables, num_cores=DEDUPE_CORES_USED)\n\n # load the training data and prepare for training\n with open(train_data_fname) as tf:\n deduper.prepare_training(data=items, training_file=tf)\n\n # train the deduper\n deduper.train()\n\n # store the learned settings\n learned_settings_fname = f'{train_path}/learned_settings-{lang}.bin'\n with open(learned_settings_fname, 'wb') as ts:\n deduper.write_settings(ts)\n\n\ndef cluster_data(\n dataset: str,\n lang: str,\n items: dict,\n mapper: dict\n) -> None:\n logger.info(f\"Clustering `{dataset}/{lang}`\")\n data_set_folder = f'{RUN_BASE_FNAME}/{dataset}/'\n pathlib.Path(data_set_folder).mkdir(parents=True, exist_ok=True)\n lang_id = lang.split('-')[0]\n clusters_report_fname = f'{RUN_BASE_FNAME}/{dataset}/clusters_report-{lang}.txt'\n if pathlib.Path(clusters_report_fname).exists():\n logger.info(f\"Dataset: `{dataset}/{lang}` is already processed, skipping...\")\n return\n\n learned_settings_fname = f'{RUN_BASE_FNAME}/{mapper[dataset]}/learned_settings-{lang_id}.bin'\n settings_file = pathlib.Path(learned_settings_fname)\n if not (settings_file.exists() or settings_file.is_file()):\n logger.info(f\"Settings file `{learned_settings_fname}` does not exist or it's not a file.\")\n return\n\n # load the learned settings\n with open(learned_settings_fname, 'rb') as f:\n deduper = StaticDedupe(f, num_cores=DEDUPE_CORES_USED)\n\n # cluster the data\n clustered = deduper.partition(items, threshold=CLUSTER_THRESHOLD)\n\n with open(clusters_report_fname, 'w') as f:\n for clid, (rec, score) in enumerate(clustered):\n print(f\"{clid}: {','.join(rec)}\", file=f)\n\n clustered_data_fname = f'{RUN_BASE_FNAME}/{dataset}/clusters-{lang}.json'\n clusters = get_clustered_ids(clustered)\n with open(clustered_data_fname, 'w') as f:\n json.dump(clusters, fp=f, indent=4)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--closest', action='store_true')\n parser.add_argument('--train', action='store_true')\n parser.add_argument('--train-chars', action='store_true')\n parser.add_argument('--train-all', action='store_true')\n parser.add_argument('--test', action='store_true')\n parser.add_argument('--run-path', type=str, default=None)\n parser.add_argument('--data-path', type=str, default=None)\n parser.add_argument('--tsh', type=float, default=None)\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n \n global RUN_BASE_FNAME, SEARCH_CLOSEST, CLUSTER_THRESHOLD, JOB_ID, DATA_PATH\n RUN_BASE_FNAME = args.run_path if args.run_path is not None else RUN_BASE_FNAME\n DATA_PATH = args.data_path if args.data_path is not None else DATA_PATH\n pathlib.Path(RUN_BASE_FNAME).mkdir(parents=True, exist_ok=True)\n \n CLUSTER_THRESHOLD = args.tsh if args.tsh is not None else CLUSTER_THRESHOLD\n\n SEARCH_CLOSEST = args.closest\n\n logger.info(\"Running Dedupe Entity Matching\")\n logger.info(f\"SLURM_JOB_ID = {JOB_ID}\")\n logger.info(f\"Run path = {RUN_BASE_FNAME}\")\n logger.info(f\"Number of cores = {DEDUPE_CORES_USED}\")\n logger.info(f\"Dedupe threshold = {CLUSTER_THRESHOLD}\")\n logger.info(f\"Choose k = {CHOOSE_K}\")\n logger.info(f\"Closest string search: {SEARCH_CLOSEST}\")\n logger.info(f\"Train on chars: {args.train_chars}\")\n logger.info(f\"Train on all datasets: {args.train_all}\")\n logger.info(f\"Train: {args.train}\")\n logger.info(f\"Test: {args.test}\")\n\n logger.info(\"Loading the data...\")\n data = load_data()\n data = data['alphabetized'] if args.train_chars else data['normal']\n\n predict_from = {\n 'covid-19': 'ryanair',\n 'us_election_2020': 'brexit',\n }\n\n trainer = data_looper(data, train, train_all=args.train_all, mapper=predict_from)\n if args.train:\n logger.info(\"Training on the data...\")\n trainer()\n\n clusterer = data_looper(data, cluster_data, mapper=predict_from)\n if args.test:\n logger.info(\"Clustering the data...\")\n clusterer()\n\n logger.info(\"Done!\")\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5458955764770508, "alphanum_fraction": 0.5544342994689941, "avg_line_length": 37.170372009277344, "blob_id": "88df6247694f681cb1800fedf1dda0e3549cc4b3", "content_id": "bc27e4eb8160f56b5a817c92e13b1b5ed70e7183", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5153, "license_type": "no_license", "max_line_length": 130, "num_lines": 135, "path": "/src/utils/join_pred_cluster.py", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "import sys\nimport json\nimport glob\nimport pathlib\nimport logging\nimport argparse\nimport pandas as pd\n\nfrom collections import defaultdict\n\nfrom src.utils.load_dataset import LoadBSNLP\nfrom src.utils.load_documents import LoadBSNLPDocuments\nfrom src.utils.update_documents import UpdateBSNLPDocuments\nfrom src.utils.utils import list_dir\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.DEBUG,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'\n)\nlogger = logging.getLogger('TrainEvalModels')\n\n# pred_path = 'data/runs/run_2497_multilang_all'\npred_path = './data/runs/run_l1o_2551'\ncluster_path = 'data/deduper/runs/run_2508'\n\n\ndef load_clusters(\n path: str\n) -> (dict, dict):\n clusters = {}\n ne_map = {}\n n_clusters = 0\n for dataset in LoadBSNLP.datasets['test_2021']:\n df_clusters = pd.DataFrame()\n ne_map[dataset] = defaultdict(list)\n for fname in glob.glob(f'{path}/{dataset}/clusters-*.json'):\n fcluster = json.load(open(fname))\n nes = []\n for cluster in fcluster:\n for ne in cluster['ners']:\n try:\n ids = ne['id'].split(';')\n for sid, tid, t in zip(ids[2].split(':'), ids[3].split(':'), ids[4].split(' ')):\n item = {\n 'clusterId': f'{n_clusters}-{cluster[\"clusterId\"]}',\n 'lang': ids[0],\n 'docId': ids[1],\n 'sentenceId': int(sid),\n 'tokenId': int(tid),\n 'text': t,\n }\n ne_key = f'{ids[0]};{ids[1]};{sid};{tid}'\n if ne_key in ne_map[dataset]:\n logger.info(f\"Double occurrence: {ne_key}\")\n ne_map[dataset][ne_key].append(f'{n_clusters}-{cluster[\"clusterId\"]}')\n nes.append(item)\n except Exception as e:\n logger.error(f\"ERROR OCCURRED {ne}, {e}\")\n n_clusters += 1\n df_clusters = pd.concat([df_clusters, pd.DataFrame(nes)])\n clusters[dataset] = df_clusters\n logger.info(f\"Clusters: {clusters}\")\n logger.info(f\"Map: {ne_map}\")\n return clusters, ne_map\n\n\ndef update_clusters(data: dict, ne_map: dict):\n for dataset, langs in data.items():\n missed = 0\n all_nes = 0\n for lang, docs in langs.items():\n for docId, doc in docs.items():\n doc['content']['calcClId'] = 'xxx'\n for i, row in doc['content'].iterrows():\n if row['calcNER'] != 'O':\n all_nes += 1\n ne_key = f'{lang};{row[\"docId\"]};{row[\"sentenceId\"]};{row[\"tokenId\"]}'\n if ne_key not in ne_map[dataset]:\n if row['calcNER'] != 'O':\n missed += 1\n continue\n doc['content'].loc[i, 'calcClId'] = ne_map[dataset][ne_key][0]\n logger.info(f\"[{dataset}] Missed {missed}/{all_nes} [{missed/all_nes:.3f}]\")\n return data\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--pred-path', type=str, default=None)\n parser.add_argument('--cluster-path', type=str, default=None)\n parser.add_argument('--year', type=str, default='2021')\n parser.add_argument('--lang', type=str, default='all')\n return parser.parse_args()\n\n\ndef main():\n global pred_path, cluster_path\n args = parse_args()\n pred_path = args.pred_path if args.pred_path is not None else pred_path\n cluster_path = args.cluster_path if args.cluster_path is not None else cluster_path\n year = args.year\n lang = args.lang\n\n logger.info(f\"Predictions path: {pred_path}\")\n logger.info(f\"Clusters path: {pred_path}\")\n logger.info(f\"Year: {year}\")\n logger.info(f\"Language: {lang}\")\n\n path = pathlib.Path(pred_path)\n if not path.exists() or not path.is_dir():\n raise Exception(f\"Path does not exist or is not a directory: `{pred_path}`\")\n path = pathlib.Path(cluster_path)\n if not path.exists() or not path.is_dir():\n raise Exception(f\"Path does not exist or is not a directory: `{cluster_path}`\")\n\n logger.info(\"Loading the clusters...\")\n clusters, ne_map = load_clusters(cluster_path)\n\n models, _ = list_dir(f'{pred_path}/predictions/bsnlp')\n for model in models:\n logger.info(f\"Loading the documents for model `{model}`...\")\n data = LoadBSNLPDocuments(year='test_2021', lang=lang, path=f'{pred_path}/predictions/bsnlp/{model}').load_predicted()\n\n logger.info(f\"[{model}] Merging the cluster data into the prediction data\")\n updated = update_clusters(data, ne_map)\n\n logger.info(f\"[{model}] Persisting the changes...\")\n UpdateBSNLPDocuments(year='test_2021', lang=lang, path=f'{pred_path}/predictions/bsnlp/{model}').update_clustered(updated)\n\n logger.info(\"Done.\")\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7519999742507935, "alphanum_fraction": 0.7839999794960022, "avg_line_length": 30.25, "blob_id": "35840fe44ebaaba107839c09b9f96a09e7f0df49", "content_id": "037dc7c6b0a384a7c80e81d2ec45e83abdc2dc7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 125, "license_type": "no_license", "max_line_length": 93, "num_lines": 4, "path": "/java-eval/evaluator.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\njava -cp bsnlp-ner-evaluator-19.0.4.jar sigslav.BNEvaluator data reports error-logs summaries\n" }, { "alpha_fraction": 0.7130434513092041, "alphanum_fraction": 0.7130434513092041, "avg_line_length": 22, "blob_id": "890102b20322b3b43ad25e253f7d81f7b5a3dea9", "content_id": "15479b738130f5eebad36e77383325eb3a8ade79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 115, "license_type": "no_license", "max_line_length": 47, "num_lines": 5, "path": "/bin/exec-bert.sh", "repo_name": "UL-FRI-Zitnik/BSNLP-2021-Shared-Task", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -euo pipefail\n\necho \"Starting the BERT process...\"\nPYTHONPATH=. python src/train/crosloeng.py \"$@\"\n" } ]
46
protos123/reportessimon
https://github.com/protos123/reportessimon
1e4b8c07489d234732e7ef0c91af6cd66380826d
99bf08ab2ce2205cd7c1416f80e2ed7adbb60b9c
6c390105b451c24526bd204de20969e6297dc437
refs/heads/master
2020-03-25T00:53:58.905412
2018-03-06T22:03:12
2018-03-06T22:03:12
143,211,334
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6160973906517029, "alphanum_fraction": 0.6320857405662537, "avg_line_length": 48.14285659790039, "blob_id": "27cb749b76737d1ba29cf6e2a3acf2cf80be6d6f", "content_id": "6bb7f5453915d819225d03d805a5e0362156b949", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5504, "license_type": "no_license", "max_line_length": 120, "num_lines": 112, "path": "/aud_cuentas_pricing_JIRA.py", "repo_name": "protos123/reportessimon", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport psycopg2 as db\nimport logging\nimport sys\nimport datetime\nimport queries as qp\n\ntry:\n conn = db.connect(dbname='pol_v4', user='readonly', host='172.18.35.22', password='YdbLByGopWPS4zYi8PIR')\n cursor = conn.cursor()\nexcept:\n logging.error('Cannot connect to database. Please run this script again')\n sys.exit()\n\n\ndef listacuentas(today):\n tomorrow = today + datetime.timedelta(days=1)\n logging.warning('Searching account id with changes on %(today)s', {'today':str(today)})\n cursor.execute(\"\"\"SELECT c.cuenta_id FROM audit.cuenta_aud c\n inner join audit.revision_auditoria ra ON c.rev = ra.revision_id \n INNER JOIN pps.cuenta pc on (c.cuenta_id=pc.cuenta_id)\n WHERE (to_timestamp(fecha_revision/1000))>=%(today)s and (to_timestamp(fecha_revision/1000))<%(tomorrow)s\n and pc.fecha_creacion<%(today)s order by c.cuenta_id\"\"\",{'today':today,'tomorrow':tomorrow})\n cuentas = pd.DataFrame(cursor.fetchall())\n if len(cuentas)!=0:\n cuentas.columns = ['cuenta_id']\n cuentas = cuentas.drop_duplicates()\n logging.warning('Search finished: %(cantidad)s have received changes', {'cantidad':len(cuentas)})\n return cuentas\n\n# ----------------------------------------------------------------------------------------------------------------------\n# CONTROL DE CAMBIOS CUENTAS:\n# CORRE POR EL LISTADO DE CUENTAS TRAYENDO LA CONSULTA DE LOS CAMBIOS A LA FECHA Y ANEXANDO EL ULTIMO CAMBIO\n# ANTERIOR A LA FECHA DE EJECUCION DEL REPORTE.\n# RETORNA UN DATAFRAME CON EL LISTADO DE TODOS LOS CAMBIOS PARA EL LISTADO DE CUENTAS.\ndef controlcambioscuentas(cuentas,today):\n\n cuentas = cuentas['cuenta_id'].tolist()\n temp = []\n tomorrow=today + datetime.timedelta(days=1)\n logging.warning('Starting check for %(today)s', {'today':str(today)})\n audcuentas = pd.DataFrame()\n for index in range(0, len(cuentas)):\n cid = cuentas[index] # Indexar cuentas Id\n logging.info('Checking account %(cid)s', {'cid':cid})\n # Ejecutar consulta de fecha actual\n cursor.execute(\"\"\"SELECT to_timestamp(fecha_revision/1000) as \"fec_cambio\", c.rev, uw.email, c.cuenta_id, \n c.nombre, c.perfil_usuario_id, c.grupo_perfil_cobranza_id\n FROM audit.cuenta_aud c\n inner join audit.revision_auditoria ra ON (c.rev = ra.revision_id)\n inner join pps.usuario_web uw on (c.usuario_modificacion_id=uw.usuario_web_id)\n WHERE (to_timestamp(fecha_revision/1000))>=%(today)s and (to_timestamp(fecha_revision/1000))<%(tomorrow)s\n and c.cuenta_id=%(cid)s\"\"\", {'cid': cid,'today':today,'tomorrow':tomorrow})\n for i in xrange(cursor.rowcount):\n temp.append(cursor.fetchone())\n # Ejecutar consulta de cambio inmediatamente anterior\n cursor.execute(\"\"\"SELECT to_timestamp(fecha_revision/1000) as \"fec_cambio\", c.rev, uw.email, c.cuenta_id, \n c.nombre, c.perfil_usuario_id, c.grupo_perfil_cobranza_id\n FROM audit.cuenta_aud c\n inner join audit.revision_auditoria ra ON (c.rev = ra.revision_id)\n inner join pps.usuario_web uw on (c.usuario_modificacion_id=uw.usuario_web_id)\n WHERE (to_timestamp(fecha_revision/1000))<%(today)s and c.cuenta_id=%(cid)s \n order by \"fec_cambio\" desc limit 1\"\"\", {'cid': cid, 'today':today})\n i = 0\n for i in xrange(cursor.rowcount):\n temp.append(cursor.fetchone())\n cambios = pd.DataFrame(temp,columns = ['fecha_rev', 'rev_id', 'email', 'cuenta_id', 'nombre_cuenta',\n 'perfil_usuario_id','grupo_perfil_cobranza'])\n cambios['fecha_rev'] = pd.to_datetime(cambios.fecha_rev)\n cambios = cambios.sort_values(by='fecha_rev')\n cambiosord2 = cambios.shift()\n cambiosord2.columns = ['fecha_revprev', 'rev_idprev', 'emailprev', 'cuenta_idprev',\n 'nombre_cuenta_prev', 'perfil_usuario_idprev','grupo_perfil_cobranzaprev']\n df = pd.concat([cambios, cambiosord2], axis=1)\n # En caso de que sea un primer cambio, ejecutar script de identificacion\n if len(df)!=0:\n df.drop(df.index[0], inplace=True)\n df['cambio'] = np.where(df['perfil_usuario_id'] != df['perfil_usuario_idprev'], 'perfilcobranza', None)\n df = df[df.cambio.notnull()]\n audcuentas = audcuentas.append(df, ignore_index=True)\n logging.info('Found %(numbers)s changes',{'numbers':len(df)})\n\n # Limpiar variables iterativas\n df = df.iloc[0:0]\n cambios=cambios.iloc[0:0]\n cambiosord2=cambiosord2.iloc[0:0]\n temp=[]\n logging.info('Account Id %(cid)s finished',{'cid':cid})\n # Eliminar Columnas innecesarias\n logging.warning('Process Finished. Proceeding to concatenate and save')\n return audcuentas\n\n# ----------------------------------------------------------------------------------------------------------------------\n\nstart = datetime.date(2018, 01, 01)\nend = datetime.date(2018, 01, 31)\ndays = end - start\npricingchanges = pd.DataFrame()\n\nfor x in xrange(0,days.days+1):\n date = start + datetime.timedelta(days=x)\n accounts = listacuentas(date)\n if len(accounts)!=0:\n cambios = controlcambioscuentas(accounts,date)\n pricingchanges = pricingchanges.append(cambios, ignore_index=True)\n cambios = cambios.iloc[0:0]\n\nfilename = 'Pricing_changes_Nov-2017_Jan-2018.xlsx'\nwriter = pd.ExcelWriter(filename)\npricingchanges.to_excel(writer)\nwriter.save()\n" }, { "alpha_fraction": 0.7876105904579163, "alphanum_fraction": 0.7964601516723633, "avg_line_length": 13.25, "blob_id": "ec036e20d8f0bb9ae9725d1ab6737f00364eb7db", "content_id": "1cddff8d71aff32e7570eeb2394569f9643446c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 53, "num_lines": 8, "path": "/test-arena.py", "repo_name": "protos123/reportessimon", "src_encoding": "UTF-8", "text": "import os\nimport pandas as pd\nimport datetime\n\n\ndate=datetime.date.today()-datetime.timedelta(days=1)\n\nprint date" }, { "alpha_fraction": 0.7225490212440491, "alphanum_fraction": 0.7382352948188782, "avg_line_length": 28.823530197143555, "blob_id": "d8c1bbd67a86af196532c3faad30faf2bbfe67ba", "content_id": "d6d2fbeeab6c051532cc98c8cddb5dd0482e5e68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1020, "license_type": "no_license", "max_line_length": 99, "num_lines": 34, "path": "/bank-acc-reports-engine.py", "repo_name": "protos123/reportessimon", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport psycopg2 as db\nimport logging\nimport smtplib\nimport ssl\nimport os\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport datetime\nimport queries as qp\n\ntry:\n conn = db.connect(dbname='pol_v4', user='readonly', host='172.18.35.22', password='YdbLByGopWPS4zYi8PIR')\n cursor = conn.cursor()\nexcept:\n logging.error('Cannot connect to database. Please run this script again')\n sys.exit()\n\ndate=datetime.date.today()-datetime.timedelta(days=1)\ncambioscuentas = pd.DataFrame()\naccounts = qp.listacuentasbancos(date)\nif len(accounts) != 0:\n cambios = qp.controlcambioscuentasbancos(accounts, date)\n cambioscuentas = cambioscuentas.append(cambios, ignore_index=True)\n cambios = cambios.iloc[0:0]\n\n\nfilename = 'Bank_Accounts_Changes_' + str(date) + ('.xlsx')\nwriter = pd.ExcelWriter(filename,options={'remove_timezone': True})\ncambioscuentas.to_excel(writer)\nwriter.save()\nlogging.warning('Saved in file %(filename)s. Process completed',{'filename':filename})\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.717632532119751, "alphanum_fraction": 0.7336621284484863, "avg_line_length": 28, "blob_id": "59360a8e45100e64fd1beeed06c99a7152e89f15", "content_id": "e306379682aa76b19e1f4486af1a02f03327816a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 99, "num_lines": 28, "path": "/accounts-reports-engine.py", "repo_name": "protos123/reportessimon", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport psycopg2 as db\nimport logging\nimport sys\nimport datetime\nimport queries as qp\ntry:\n conn = db.connect(dbname='pol_v4', user='readonly', host='172.18.35.22', password='YdbLByGopWPS4zYi8PIR')\n cursor = conn.cursor()\nexcept:\n logging.error('Cannot connect to database. Please run this script again')\n sys.exit()\n\naccounts = qp.listacuentas()\n\n# Ejecutar script de control de cambios\nif len(accounts)!=0:\n cambios = qp.controlcambioscuentas(accounts)\n\n# Guardar Cambios en archivo de Excel\ntoday = datetime.date.today() - datetime.timedelta(days=1)\nfilename = 'Accounts_Report_' + str(today) + ('.xlsx')\nwriter = pd.ExcelWriter(filename)\ncambios.to_excel(writer)\nwriter.save()\n\nlogging.warning('Saved in file %(filename)s. Process completed',{'filename':filename})" }, { "alpha_fraction": 0.7007299065589905, "alphanum_fraction": 0.7007299065589905, "avg_line_length": 20.6842098236084, "blob_id": "306d8a924a3addc5826d9ba599af0af2ffccdcc5", "content_id": "df1404fc1410eac373401ac8d169ef508619ad41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 411, "license_type": "no_license", "max_line_length": 104, "num_lines": 19, "path": "/README.md", "repo_name": "protos123/reportessimon", "src_encoding": "UTF-8", "text": "# README #\n\nThis README would normally document whatever steps are necessary to get your application up and running.\n\n### What is this repository for? ###\n\nReporting engine for changes in pricing and banking information for Operations Leadership\n\n### How do I get set up? ###\n\n\n### Contribution guidelines ###\n\n\n### Who do I talk to? ###\n\n*Repo Owner: Jesus Rincon\n*Product Owner: German Giraldo\n*Team: German Giraldo, Viviana Rodriguez, Jesus Rincon" }, { "alpha_fraction": 0.6791979670524597, "alphanum_fraction": 0.7059314846992493, "avg_line_length": 25.04347801208496, "blob_id": "20178667dd9b146eb3ee6d602fe4bbd2db958674", "content_id": "b83f27845cb30d3a7a6610424718d474db718658", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1197, "license_type": "no_license", "max_line_length": 99, "num_lines": 46, "path": "/cambios-pricing-especificos.py", "repo_name": "protos123/reportessimon", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport psycopg2 as db\nimport logging\nimport sys\nimport datetime\nimport queries as qp\nimport os\ntry:\n conn = db.connect(dbname='pol_v4', user='readonly', host='172.18.35.22', password='YdbLByGopWPS4zYi8PIR')\n cursor = conn.cursor()\nexcept:\n logging.error('Cannot connect to database. Please run this script again')\n sys.exit()\n\naccounts=pd.DataFrame([500509],columns=['cuenta_id'])\nprint accounts\n\nfor x in range(1,32):\n today =datetime.date(2017, 12, x)\n cambios = qp.controlcambioscuentas(accounts,today)\n # Guardar Cambios en archivo de Excel\n filename = 'Pricing_Report_' + str(today) + ('.xlsx')\n writer = pd.ExcelWriter(filename)\n cambios.to_excel(writer)\n writer.save()\n logging.warning('Saved in file %(filename)s. Process completed',{'filename':filename})\n\npath= os.getcwd()\nfiles =os.listdir(path)\nprint files\n\nfiles_xlsx=[f for f in files if f[-4:] == 'xlsx']\nprint files_xlsx\n\ndf=pd.DataFrame()\n\nfor f in files_xlsx:\n data=pd.read_excel(f,'Sheet1')\n df = df.append(data)\n\ndf=df.reset_index(drop= True)\nfilename='Pricing_Changes_December_2017.xlsx'\nwriter = pd.ExcelWriter(filename)\ndf.to_excel(writer)\nwriter.save()" }, { "alpha_fraction": 0.6974359154701233, "alphanum_fraction": 0.7064102292060852, "avg_line_length": 22.66666603088379, "blob_id": "06d6c913a3ccc71b93985f7cb3bcdd82fe949a85", "content_id": "fb636c9bbe97eba40040373ac52c25f2d051f99a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 780, "license_type": "no_license", "max_line_length": 112, "num_lines": 33, "path": "/dev-test.py", "repo_name": "protos123/reportessimon", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport psycopg2 as db\nimport logging\nimport sys\nimport datetime\nimport queries as qp\nimport os\n\npath= os.getcwd()\nfiles =os.listdir(path)\nprint files\n\nfiles_xlsx=[f for f in files if f[-4:] == 'xlsx']\nprint files_xlsx\n\ndf=pd.DataFrame()\n\nfor f in files_xlsx:\n data=pd.read_excel(f,'Sheet1')\n df = df.append(data)\n\ndf=df.reset_index(drop= True)\n\n#df=df[['fecha_rev', 'rev_id', 'email', 'usuario_id', 'nombre_comercio','nombre_contacto', 'url','domicilio',\n# 'fecha_revprev','rev_idprev', 'emailprev', 'usuario_idprev', 'nombre_comercioprev','nombre_contactoprev',\n# 'urlprev', 'domicilioprev','cambio']]\nprint df\n\nfilename='Pricing_Changes_January_2017.xlsx'\nwriter = pd.ExcelWriter(filename)\ndf.to_excel(writer)\nwriter.save()" }, { "alpha_fraction": 0.5954269766807556, "alphanum_fraction": 0.6140924096107483, "avg_line_length": 37.28571319580078, "blob_id": "fb4c8b56ae214bcdf00f1584686c1733172b8015", "content_id": "7fe2d8bb4fc485043408793de6f9f0a8698f8dc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2143, "license_type": "no_license", "max_line_length": 129, "num_lines": 56, "path": "/user-reports-engine.py", "repo_name": "protos123/reportessimon", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport psycopg2 as db\nimport logging\nimport sys\nimport datetime\nimport queries as qp\ntry:\n conn = db.connect(dbname='pol_v4', user='readonly', host='172.18.35.22', password='YdbLByGopWPS4zYi8PIR')\n cursor = conn.cursor()\nexcept:\n logging.error('Cannot connect to database. Please run this script again')\n sys.exit()\n\nend = datetime.date.today() - datetime.timedelta(days=1)\nend = datetime.date(2017, 6, 30)\ndays = end - start\nuserchanges = pd.DataFrame()\n\nfor x in xrange(0,days.days+1):\n date = start + datetime.timedelta(days=x)\n users = qp.listausuarios(date)\n if len(users)!=0:\n cambios = qp.controlcambios(users,date)\n userchanges = userchanges.append(cambios, ignore_index=True)\n cambios = cambios.iloc[0:0]\n\nfilename = 'User_Changes_06_30.xlsx'\nwriter = pd.ExcelWriter(filename)\nuserchanges.to_excel(writer)\nwriter.save()\n\n\n # for x in range(1,31):\n# today = datetime.date(2017, 4, x)\n# users = qp.listausuarios(today)\n# # Ejecutar script de control de cambios\n# if len(users)!=0:\n# cambios = qp.controlcambios(users,today)\n# # Guardar Cambios en archivo de Excel\n# filename = 'Users_Report_BR_' + str(today) + ('.xlsx')\n# writer = pd.ExcelWriter(filename)\n# cambios.to_excel(writer)\n# writer.save()\n# else:\n# cambios = pd.DataFrame(columns=['fecha_rev', 'rev_id', 'email', 'usuario_id', 'nombre_comercio',\n# 'nombre_contacto', 'url',\n# 'domicilio'])\n# cambiosord2=pd.DataFrame(columns= ['fecha_revprev', 'rev_idprev', 'emailprev', 'usuario_idprev', 'nombre_comercioprev',\n# 'nombre_contactoprev', 'urlprev', 'domicilioprev'])\n# filename = 'Users_Report_BR_' + str(today) + ('.xlsx')\n# audcambios = pd.concat([cambios, cambiosord2], axis=1)\n# writer = pd.ExcelWriter(filename)\n# audcambios.to_excel(writer)\n# writer.save()\n# logging.warning('Saved in file %(filename)s. Process completed',{'filename':filename})" }, { "alpha_fraction": 0.5901757478713989, "alphanum_fraction": 0.5995063781738281, "avg_line_length": 57.90425491333008, "blob_id": "35fbdf838965a9915efc5edfc9aa188b72bd674c", "content_id": "94634457b4c5e07ceb84eed73944d9a23ac6b877", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16612, "license_type": "no_license", "max_line_length": 162, "num_lines": 282, "path": "/queries.py", "repo_name": "protos123/reportessimon", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport pandas as pd\nimport psycopg2 as db\nimport logging\nimport sys\nimport datetime\nimport numpy as np\n\n# Configurar Logger\nlogging.basicConfig(filename='queries.log', filemode='w', level=logging.DEBUG)\n\n# Intentar Conexion a BD\ntry:\n conn = db.connect(dbname='pol_v4', user='readonly', host='172.18.35.22', password='YdbLByGopWPS4zYi8PIR')\n cursor = conn.cursor()\nexcept:\n logging.error('Cannot connect to database. Please run this script again')\n sys.exit()\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Listado de usuarios que recibieron un cambio el dia actual\ndef listausuarios(today):\n tomorrow = today + datetime.timedelta(days=1)\n logging.warning('Searching user id with changes on %(today)s', {'today':str(today)})\n cursor.execute(\"\"\"SELECT u.usuario_id FROM audit.usuario_aud u\n inner join audit.revision_auditoria ra ON u.rev = ra.revision_id \n inner join pps.usuario pu on (u.usuario_id=pu.usuario_id)\n WHERE (to_timestamp(fecha_revision/1000))>=%(today)s and \n (to_timestamp(fecha_revision/1000))<%(tomorrow)s and pu.fecha_Creacion<%(today)s\n order by u.usuario_id\"\"\",{'today':today,'tomorrow':tomorrow})\n usuarios = pd.DataFrame(cursor.fetchall())\n if len(usuarios) != 0:\n usuarios.columns = ['usuario_id']\n usuarios = usuarios.drop_duplicates()\n logging.warning('Search finished: %(cantidad)s have received changes', {'cantidad':len(usuarios)})\n return usuarios\n# ----------------------------------------------------------------------------------------------------------------------\n\n# CONTROL DE CAMBIOS:\n# CORRE POR EL LISTADO DE USUARIOS TRAYENDO LA CONSULTA DE LOS CAMBIOS A LA FECHA Y ANEXANDO EL ULTIMO CAMBIO\n# ANTERIOR A LA FECHA DE EJECUCION DEL REPORTE.\n# RETORNA UN DATAFRAME CON EL LISTADO DE TODOS LOS CAMBIOS PARA EL LISTADO DE USUARIOS.\n\ndef controlcambios(usuarios,today):\n\n usuarios = usuarios['usuario_id'].tolist()\n temp = []\n tomorrow = today + datetime.timedelta(days=1)\n logging.warning('Starting check for %(today)s', {'today':str(today)})\n audcambios = pd.DataFrame()\n for index in range(0, len(usuarios)):\n uid = usuarios[index] # Indexar usuarios Id\n logging.info('Checking user %(uid)s', {'uid':uid})\n # Ejecutar consulta de fecha actual\n cursor.execute(\"\"\"SELECT to_timestamp(fecha_revision/1000) as \"fec_cambio\", u.rev, uw.email, \n u.usuario_id, u.nombres, u.nombre_contacto, u.url, u.direccion\n FROM audit.usuario_aud u \n inner join audit.revision_auditoria ra ON (u.rev = ra.revision_id) \n inner join pps.usuario_web uw on (u.usuario_modificacion_id=uw.usuario_web_id)\n WHERE (to_timestamp(fecha_revision/1000))>=%(today)s and (to_timestamp(fecha_revision/1000))<%(tomorrow)s\n and u.usuario_id=%(uid)s\"\"\", {'uid': uid,'today':today,'tomorrow':tomorrow})\n for i in xrange(cursor.rowcount):\n temp.append(cursor.fetchone())\n # Ejecutar consulta de cambio inmediatamente anterior\n cursor.execute(\"\"\"SELECT to_timestamp(fecha_revision/1000) as \"fec_cambio\", u.rev,uw.email, u.usuario_id, u.nombres, u.nombre_contacto, u.url, u.direccion\n FROM audit.usuario_aud u\n inner join audit.revision_auditoria ra ON u.rev = ra.revision_id \n inner join pps.usuario_web uw on (u.usuario_modificacion_id=uw.usuario_web_id)\n WHERE (to_timestamp(fecha_revision/1000))<%(today)s and u.usuario_id=%(uid)s\n order by \"fec_cambio\" desc limit 1\"\"\", {'uid': uid, 'today':today})\n i = 0\n for i in xrange(cursor.rowcount):\n temp.append(cursor.fetchone())\n cambios = pd.DataFrame(temp,columns = ['fecha_rev', 'rev_id', 'email', 'usuario_id', 'nombre_comercio', 'nombre_contacto', 'url',\n 'domicilio'])\n cambios['fecha_rev'] = pd.to_datetime(cambios.fecha_rev)\n cambios = cambios.sort_values(by='fecha_rev')\n cambiosord2 = cambios.shift()\n cambiosord2.columns = ['fecha_revprev', 'rev_idprev', 'emailprev', 'usuario_idprev', 'nombre_comercioprev',\n 'nombre_contactoprev', 'urlprev','domicilioprev']\n df = pd.concat([cambios, cambiosord2], axis=1)\n # En caso de que sea un primer cambio, ejecutar script de identificacion\n if len(df)!=0:\n df.drop(df.index[0], inplace=True)\n df['cambio'] = np.where(df['nombre_contacto'] != df['nombre_contactoprev'], 'nombre_contacto',\n np.where(df['nombre_comercio'] != df['nombre_comercioprev'], 'nombre_comercio',\n np.where(df['url'] != df['urlprev'], 'url',\n np.where(df['domicilio'] != df['domicilioprev'], 'domicilio', None))))\n df = df[df.cambio.notnull()]\n audcambios = audcambios.append(df, ignore_index=True)\n logging.info('Found %(numbers)s changes',{'numbers':len(df)})\n\n # Limpiar variables iterativas\n df = df.iloc[0:0]\n cambios=cambios.iloc[0:0]\n cambiosord2=cambiosord2.iloc[0:0]\n temp=[]\n logging.info('User Id %(uid)s finished',{'uid':uid})\n # Eliminar Columnas innecesarias\n logging.warning('Process Finished. Proceeding to concatenate and save')\n return audcambios\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Listado de cuentas que recibieron un cambio el dia actual\ndef listacuentas():\n tomorrow = datetime.date.today()\n today = datetime.date.today() - datetime.timedelta(days=1)\n logging.warning('Searching account id with changes on %(today)s', {'today':str(today)})\n cursor.execute(\"\"\"SELECT c.cuenta_id FROM audit.cuenta_aud c\n inner join audit.revision_auditoria ra ON c.rev = ra.revision_id \n INNER JOIN pps.cuenta pc on (c.cuenta_id=pc.cuenta_id)\n WHERE (to_timestamp(fecha_revision/1000))>=%(today)s and (to_timestamp(fecha_revision/1000))<%(tomorrow)s\n and pc.fecha_creacion<%(today)s order by c.cuenta_id\"\"\",{'today':today,'tomorrow':tomorrow})\n cuentas = pd.DataFrame(cursor.fetchall())\n if len(cuentas)!=0:\n cuentas.columns = ['cuenta_id']\n cuentas = cuentas.drop_duplicates()\n logging.warning('Search finished: %(cantidad)s have received changes', {'cantidad':len(cuentas)})\n return cuentas\n\n# ----------------------------------------------------------------------------------------------------------------------\n# CONTROL DE CAMBIOS CUENTAS:\n# CORRE POR EL LISTADO DE CUENTAS TRAYENDO LA CONSULTA DE LOS CAMBIOS A LA FECHA Y ANEXANDO EL ULTIMO CAMBIO\n# ANTERIOR A LA FECHA DE EJECUCION DEL REPORTE.\n# RETORNA UN DATAFRAME CON EL LISTADO DE TODOS LOS CAMBIOS PARA EL LISTADO DE CUENTAS.\ndef controlcambioscuentas(cuentas,today):\n\n cuentas = cuentas['cuenta_id'].tolist()\n temp = []\n tomorrow=today + datetime.timedelta(days=1)\n logging.warning('Starting check for %(today)s', {'today':str(today)})\n audcuentas = pd.DataFrame()\n for index in range(0, len(cuentas)):\n cid = cuentas[index] # Indexar cuentas Id\n logging.info('Checking account %(cid)s', {'cid':cid})\n # Ejecutar consulta de fecha actual\n cursor.execute(\"\"\"SELECT to_timestamp(fecha_revision/1000) as \"fec_cambio\", c.rev, uw.email, c.cuenta_id, \n c.nombre, c.perfil_usuario_id, c.grupo_perfil_cobranza_id\n FROM audit.cuenta_aud c\n inner join audit.revision_auditoria ra ON (c.rev = ra.revision_id)\n inner join pps.usuario_web uw on (c.usuario_modificacion_id=uw.usuario_web_id)\n WHERE (to_timestamp(fecha_revision/1000))>=%(today)s and (to_timestamp(fecha_revision/1000))<%(tomorrow)s\n and c.cuenta_id=%(cid)s\"\"\", {'cid': cid,'today':today,'tomorrow':tomorrow})\n for i in xrange(cursor.rowcount):\n temp.append(cursor.fetchone())\n # Ejecutar consulta de cambio inmediatamente anterior\n cursor.execute(\"\"\"SELECT to_timestamp(fecha_revision/1000) as \"fec_cambio\", c.rev, uw.email, c.cuenta_id, \n c.nombre, c.perfil_usuario_id, c.grupo_perfil_cobranza_id\n FROM audit.cuenta_aud c\n inner join audit.revision_auditoria ra ON (c.rev = ra.revision_id)\n inner join pps.usuario_web uw on (c.usuario_modificacion_id=uw.usuario_web_id)\n WHERE (to_timestamp(fecha_revision/1000))<%(today)s and c.cuenta_id=%(cid)s \n order by \"fec_cambio\" desc limit 1\"\"\", {'cid': cid, 'today':today})\n i = 0\n for i in xrange(cursor.rowcount):\n temp.append(cursor.fetchone())\n cambios = pd.DataFrame(temp,columns = ['fecha_rev', 'rev_id', 'email', 'cuenta_id', 'nombre_cuenta',\n 'perfil_usuario_id','grupo_perfil_cobranza'])\n cambios['fecha_rev'] = pd.to_datetime(cambios.fecha_rev)\n cambios = cambios.sort_values(by='fecha_rev')\n cambiosord2 = cambios.shift()\n cambiosord2.columns = ['fecha_revprev', 'rev_idprev', 'emailprev', 'cuenta_idprev',\n 'nombre_cuenta_prev', 'perfil_usuario_idprev','grupo_perfil_cobranzaprev']\n df = pd.concat([cambios, cambiosord2], axis=1)\n # En caso de que sea un primer cambio, ejecutar script de identificacion\n if len(df)!=0:\n df.drop(df.index[0], inplace=True)\n df['cambio'] = np.where(df['perfil_usuario_id'] != df['perfil_usuario_idprev'], 'perfilcobranza', None)\n df = df[df.cambio.notnull()]\n audcuentas = audcuentas.append(df, ignore_index=True)\n logging.info('Found %(numbers)s changes',{'numbers':len(df)})\n\n # Limpiar variables iterativas\n df = df.iloc[0:0]\n cambios=cambios.iloc[0:0]\n cambiosord2=cambiosord2.iloc[0:0]\n temp=[]\n logging.info('Account Id %(cid)s finished',{'cid':cid})\n # Eliminar Columnas innecesarias\n logging.warning('Process Finished. Proceeding to concatenate and save')\n return audcuentas\n\n# ----------------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------------------------\n\n\n# Listado de cuentas bancarias que recibieron un cambio el dia actual\ndef listacuentasbancos(today):\n tomorrow = today + datetime.timedelta(days=1)\n logging.warning('Searching account id with changes on %(today)s', {'today':str(today)})\n cursor.execute(\"\"\"SELECT db.cuenta_id FROM audit.datos_bancarios_aud db\n inner join audit.revision_auditoria ra ON db.rev = ra.revision_id \n INNER JOIN pps.cuenta pdb on (db.cuenta_id=pdb.cuenta_id)\n WHERE (to_timestamp(fecha_revision/1000))>=%(today)s and (to_timestamp(fecha_revision/1000))<%(tomorrow)s \n and pdb.fecha_creacion<%(today)s\n order by db.cuenta_id\"\"\",{'today':today,'tomorrow':tomorrow})\n cbancos = pd.DataFrame(cursor.fetchall())\n if len(cbancos)!=0:\n cbancos.columns = ['cuenta_id']\n cbancos = cbancos.drop_duplicates()\n logging.warning('Search finished: %(cantidad)s have received changes', {'cantidad':len(cbancos)})\n #print cbancos\n return cbancos\n\n# ----------------------------------------------------------------------------------------------------------------------\n# CONTROL DE CAMBIOS CUENTAS BANCOS:\n# CORRE POR EL LISTADO DE CUENTAS TRAYENDO LA CONSULTA DE CAMBIOS EN CUENTAS BANCARIAS A LA FECHA Y ANEXANDO E\n# L ULTIMO CAMBIO ANTERIOR A LA FECHA DE EJECUCION DEL REPORTE.\n# RETORNA UN DATAFRAME CON EL LISTADO DE TODOS LOS CAMBIOS PARA EL LISTADO DE CUENTAS QUE TUVIERON CAMBIOS EN CUENTA\n# BANCARIA.\ndef controlcambioscuentasbancos(cbancos,today):\n\n cbancos = cbancos['cuenta_id'].tolist()\n temp = []\n tomorrow=today + datetime.timedelta(days=1)\n logging.warning('Starting check for %(today)s', {'today':str(today)})\n audcuentas = pd.DataFrame()\n for index in range(0, len(cbancos)):\n cidb = cbancos[index] # Indexar cuentas Id\n logging.info('Checking account %(cidb)s', {'cidb':cidb})\n # Ejecutar consulta de fecha actual\n cursor.execute(\"\"\"SELECT to_timestamp(fecha_revision/1000) as \"fec_cambio_db\", db.rev, uw.email, \n db.cuenta_id, db.numero_cuenta_recaudo, db.titular_cuenta_recaudo, db.documento_titular_cuenta_recaudo,\n db.titular_cr_tipo_documento, db.tipo_cuenta_recaudo, db.banco_id, db.swift, db.pais_cuenta_recaudo\n FROM audit.datos_bancarios_aud db\n inner join audit.revision_auditoria ra ON (db.rev = ra.revision_id)\n inner join pps.usuario_web uw on (db.usuario_modificacion_id=uw.usuario_web_id)\n WHERE (to_timestamp(fecha_revision/1000))>=%(today)s and (to_timestamp(fecha_revision/1000))<%(tomorrow)s\n and db.cuenta_id=%(cid)s\"\"\", {'cid': cidb,'today':today,'tomorrow':tomorrow})\n for i in xrange(cursor.rowcount):\n temp.append(cursor.fetchone())\n # Ejecutar consulta de cambio inmediatamente anterior\n cursor.execute(\"\"\"SELECT to_timestamp(fecha_revision/1000) as \"fec_cambio_db\", db.rev, \n uw.email, db.cuenta_id, db.numero_cuenta_recaudo, db.titular_cuenta_recaudo, db.documento_titular_cuenta_recaudo,\n db.titular_cr_tipo_documento, db.tipo_cuenta_recaudo, db.banco_id, db.swift, db.pais_cuenta_recaudo\n FROM audit.datos_bancarios_aud db\n inner join audit.revision_auditoria ra ON (db.rev = ra.revision_id)\n inner join pps.usuario_web uw on (db.usuario_modificacion_id=uw.usuario_web_id)\n WHERE (to_timestamp(fecha_revision/1000))<%(today)s and db.cuenta_id=%(cidb)s\n order by \"fec_cambio_db\" desc limit 1\"\"\", {'cidb': cidb, 'today':today})\n i = 0\n for i in xrange(cursor.rowcount):\n temp.append(cursor.fetchone())\n cambios = pd.DataFrame(temp,columns = ['fecha_rev', 'rev_id', 'email', 'cuenta_id', 'numero_cuenta_rec',\n 'titular_cuenta_rec','dni_titular_cuenta','dni_tipo_titular_cuenta',\n 'tipo_cuenta_rec','banco_id','swift','pais_cuenta_rec'])\n cambios['fecha_rev'] = pd.to_datetime(cambios.fecha_rev)\n cambios = cambios.sort_values(by='fecha_rev')\n cambiosord2 = cambios.shift()\n cambiosord2.columns = ['fecha_rev_prev', 'rev_id_prev', 'email_prev', 'cuenta_id_prev', 'numero_cuenta_rec_prev',\n 'titular_cuenta_rec_prev','dni_titular_cuenta_prev','dni_tipo_titular_cuenta_prev',\n 'tipo_cuenta_rec_prev','banco_id_prev','swift_prev','pais_cuenta_rec_prev']\n df = pd.concat([cambios, cambiosord2], axis=1)\n # En caso de que sea un primer cambio, ejecutar script de identificacion\n if len(df)!=0:\n df.drop(df.index[0], inplace=True)\n df['cambio'] = np.where(df['numero_cuenta_rec'] != df['numero_cuenta_rec_prev'], 'numero_cuenta',\n np.where(df['titular_cuenta_rec'] != df['titular_cuenta_rec_prev'], 'titular_cuenta',\n np.where(df['dni_titular_cuenta'] != df['dni_titular_cuenta_prev'], 'dni_titular',\n np.where(df['dni_tipo_titular_cuenta'] != df['dni_tipo_titular_cuenta_prev'], 'tipo_dni_titular',\n np.where(df['tipo_cuenta_rec'] != df['tipo_cuenta_rec_prev'], 'tipo_cuenta',\n np.where(df['banco_id'] != df['banco_id_prev'], 'banco_id',\n np.where(df['swift'] != df['swift_prev'], 'swift',\n np.where(df['pais_cuenta_rec'] != df['pais_cuenta_rec_prev'], 'pais_cuenta', None\n ))))))))\n df = df[df.cambio.notnull()]\n audcuentas = audcuentas.append(df, ignore_index=True)\n logging.info('Found %(numbers)s changes',{'numbers':len(df)})\n\n # Limpiar variables iterativas\n df = df.iloc[0:0]\n cambios=cambios.iloc[0:0]\n cambiosord2=cambiosord2.iloc[0:0]\n temp=[]\n logging.info('Account Id %(cid)s finished',{'cid':cidb})\n # Eliminar Columnas innecesarias\n logging.warning('Process Finished. Proceeding to concatenate and save')\n return audcuentas\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n" }, { "alpha_fraction": 0.6865839958190918, "alphanum_fraction": 0.7271702289581299, "avg_line_length": 25.84848403930664, "blob_id": "7e86e06d9a43431975f1669f4e9a2c8fac543e11", "content_id": "99d394d430a628c458d04752f196eef2f1e64c19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 887, "license_type": "no_license", "max_line_length": 99, "num_lines": 33, "path": "/pricing-consulta-cuentas.py", "repo_name": "protos123/reportessimon", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport psycopg2 as db\nimport logging\nimport sys\nimport datetime\nimport queries as qp\nimport os\ntry:\n conn = db.connect(dbname='pol_v4', user='readonly', host='172.18.35.22', password='YdbLByGopWPS4zYi8PIR')\n cursor = conn.cursor()\nexcept:\n logging.error('Cannot connect to database. Please run this script again')\n sys.exit()\n\naccounts = pd.DataFrame([500509],columns=['cuenta_id'])\nprint accounts\n\nstart = datetime.date(2017,01,01)\nend = datetime.date(2017,12,31)\ndays = end-start\npricing = pd.DataFrame()\nfor x in xrange(0,days.days):\n date = start + datetime.timedelta(days=x)\n cambios = qp.controlcambioscuentas(accounts,date)\n pricing = pricing.append(cambios, ignore_index=True)\n cambios = cambios.iloc[0:0]\n\n\nfilename='Nombre de archivo a guardar.xlsx'\nwriter = pd.ExcelWriter(filename)\npricing.to_excel(writer)\nwriter.save()\n\n" } ]
10
derhaodh/pdf-renamer
https://github.com/derhaodh/pdf-renamer
ac2dfc4750f11a199b1fe9cfedfa4643aa87d35f
7d0463c8fc8414e7fd11d091ecd984c012bbd53d
c8537a78a8b0c3416ba50ff4c608504800b2b2d6
refs/heads/main
2023-08-27T22:37:09.580346
2021-11-05T13:27:57
2021-11-05T13:27:57
404,412,625
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7976190447807312, "alphanum_fraction": 0.7976190447807312, "avg_line_length": 41, "blob_id": "11adf5c627489bb94af1639c5f78b1c7b5ec4fce", "content_id": "565556997ae26220ff170825b65eab7b508f75e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 84, "license_type": "no_license", "max_line_length": 69, "num_lines": 2, "path": "/README.md", "repo_name": "derhaodh/pdf-renamer", "src_encoding": "UTF-8", "text": "# Pdf renamer\nA simple pdf renamer that rename the filename based on the field data\n" }, { "alpha_fraction": 0.5986308455467224, "alphanum_fraction": 0.6014198660850525, "avg_line_length": 31.016260147094727, "blob_id": "6b733de6913290e59ba5b1ffa76ec60e2c74caf6", "content_id": "32da89f7857fd50c7fce5def11e53f4dea9aa60e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3944, "license_type": "no_license", "max_line_length": 132, "num_lines": 123, "path": "/pdf_renamer.py", "repo_name": "derhaodh/pdf-renamer", "src_encoding": "UTF-8", "text": "import pdftotext\nimport re\nimport sys\nimport os\nfrom os import walk\nimport argparse\nfrom pathlib import Path\nimport logging\n\n\ntry:\n import colorlog\nexcept ImportError:\n pass\nclass Company:\n def __init__(self, name):\n self.name = name\n self.number_of_appearance = 1\n\ndef setup_logging():\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n format = '%(asctime)s - %(levelname)-8s - %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n if 'colorlog' in sys.modules and os.isatty(2):\n cformat = '%(log_color)s' + format\n f = colorlog.ColoredFormatter(cformat, date_format,\n log_colors = { 'DEBUG' : 'reset', 'INFO' : 'reset',\n 'WARNING' : 'bold_yellow', 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red' })\n else:\n f = logging.Formatter(format, date_format)\n ch = logging.StreamHandler()\n ch.setFormatter(f)\n root.addHandler(ch)\n\n\n\n\ndef pdf_rename(file_name, path):\n folder = Path(path)\n file_to_open = folder / file_name\n pdf_to_text(file_to_open, folder)\n\ndef pdf_to_text(file_name, dest_folder):\n\n with open(file_name, \"rb\") as f:\n pdf = pdftotext.PDF(f)\n\n text = pdf[0]\n content = [line.split(':') for line in text.splitlines()]\n extract_company_name(content, file_name, dest_folder)\n\ndef extract_company_name(content, file_name, dest_folder): \n ## remove items that is less than 2 in the list\n for x in content:\n if len(x) != 2:\n content.remove(x)\n\n company_name = None\n ## Extract content that has \"To\" keyword\n for x in content:\n ## get the company name if the first item has keyword \"To \"\n if x[0].startswith('To'):\n company_name = x[1]\n if(company_name not in [x.name for x in global_company]):\n global_company.append(Company(name=company_name))\n else:\n index = findCompanyByName(company_name)\n global_company[index].number_of_appearance += 1\n \n\n if company_name is None:\n output_msg = f'{os.path.basename(file_name)} does not contain keyword \"To\"'\n log.error(output_msg)\n else: \n ##remove whitespace \n ##company_name = company_name.strip()\n ##search company in the global list\n index = findCompanyByName(company_name)\n company_name = global_company[index].name.strip()\n number_of_appearance = global_company[index].number_of_appearance\n if(number_of_appearance == 1):\n rename_file(company_name, file_name, dest_folder)\n else:\n rename_file(newfile_name=company_name + \" \"+str(number_of_appearance), ori_file_name=file_name, dest_folder=dest_folder)\n\n\ndef rename_file(newfile_name,ori_file_name, dest_folder):\n target_file_name = os.path.join(dest_folder/\"new\", f'{newfile_name}.pdf')\n if not os.path.exists(dest_folder/\"new\"):\n os.makedirs(dest_folder/\"new\")\n os.rename(ori_file_name, target_file_name)\n output_msg = f' {os.path.basename(ori_file_name)} has been successfully renamed as {newfile_name}'\n log.info(output_msg)\n\ndef findCompanyByName(name):\n for i in range(len(global_company)):\n if global_company[i].name == name:\n return i\n\nif __name__ == \"__main__\":\n setup_logging()\n global_company = []\n log = logging.getLogger(__name__)\n parser = argparse.ArgumentParser(description='A simple pdf renamer')\n parser.add_argument(\"-p\")\n\n args = parser.parse_args()\n path = args.p\n\n f = []\n for (dirpath, dirnames, filenames) in walk(path):\n if any(f.endswith('.pdf') for f in filenames):\n f.extend(filenames)\n break\n else:\n log.error(f'{filenames} does not have pdf extension')\n if len(f) == 0:\n log.error('No file is found')\n sys.exit()\n for x in f:\n pdf_rename(x, path)\n \n\n" }, { "alpha_fraction": 0.5151515007019043, "alphanum_fraction": 0.6969696879386902, "avg_line_length": 15.5, "blob_id": "d078d6864e5df592b60e22ec65af52c2a037cf08", "content_id": "22aeec4086db388100a00e67ca9b88d09d6ce5e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 33, "license_type": "no_license", "max_line_length": 16, "num_lines": 2, "path": "/requirements.txt", "repo_name": "derhaodh/pdf-renamer", "src_encoding": "UTF-8", "text": "colorlog==6.4.1\npdftotext==2.2.0\n" } ]
3
ramok/usb-can-ch340
https://github.com/ramok/usb-can-ch340
3b7b95a93f196e8b3b9337f49fb1bef60981a1ea
0d5a27c247507f6204da2cdd536251f4ff1e84fc
b8cb7e9fe395f8164efee8675202afb8d9e9f71e
refs/heads/master
2021-01-18T05:30:55.308919
2016-05-10T17:25:10
2016-05-13T00:41:42
45,265,395
0
0
null
2015-10-30T17:14:35
2015-10-30T16:11:05
2015-10-30T14:57:45
null
[ { "alpha_fraction": 0.3861227035522461, "alphanum_fraction": 0.4118213951587677, "avg_line_length": 29.8217830657959, "blob_id": "eeeaf42e44b896f14971fca2f1ca25225821c59a", "content_id": "66038c044d5c0e386b057c27e1d983d1124e934b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3113, "license_type": "no_license", "max_line_length": 83, "num_lines": 101, "path": "/canshell.py", "repo_name": "ramok/usb-can-ch340", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport usb_can_ch340 as can\nimport threading\nimport time\nimport sys\nimport select\nimport re\n\nclass candriver(threading.Thread):\n \n USBCAN = None\n \n def __init__(self):\n threading.Thread.__init__(self)\n self.USBCAN = can.USBCAN (500000, \"standard\", \"normal\", timeout=0.005)\n self.USBCAN.flush()\n \n def run(self):\n self.starttime=time.time()\n while 1:\n \n inputready = select.select([sys.stdin, self.USBCAN.canport], [], [])[0]\n\n for s in inputready:\n if s == sys.stdin:\n line = sys.stdin.readline()\n if line == \"\\n\":\n continue\n\n line = line.strip(\"\\n\")\n\n try:\n if \"send \" in line and \"#\" in line:\n self.cansend(line.split(\"send \")[-1])\n elif \"#\" in line:\n self.cansend(line)\n elif line == \"s\":\n self.USBCAN.bus_status()\n print (\"Bus Status: {}\".format(self.USBCAN.Buserrors))\n elif not line:\n return\n else:\n print (\"wrong input line\")\n except:\n print (\"Error: \", sys.exc_info()[0])\n\n if s == self.USBCAN.canport:\n self.USBCAN.rec()\n while len(self.USBCAN.Message) > 0:\n acttime = time.time()\n message = self.USBCAN.Message.pop(0)\n print( \" (\" + \"{:.6f}\".format(acttime) +\n \") can0 RX - - \" + message[\"ID\"][-3:] +\n \" [\"+ str(message[\"length\"]) + \"] \" +\n ' '.join('{:02x}'.format(x) for x in message[\"data\"]) )\n \n \n def cansend(self, adddata):\n addr = adddata.split( \"#\" ) [0]\n data = adddata.split( \"#\" ) [1]\n \n while len(addr) < 8:\n addr = \"0\" + addr\n \n data = re.sub('\\.', '', data)\n \n self.USBCAN.send(addr, data)\n acttime = time.time()\n \n data = re.findall('.{2}', data)\n \n print( \" (\" + \"{:.6f}\".format(acttime) +\n \") can0 TX - - \" + addr[-3:] +\n \" [\"+ str(len(data)) + \"] \" +\n \" \".join(data))\n \n def close(self):\n self.USBCAN.close()\n \n def __del__(self):\n self.close()\n\nif __name__ == \"__main__\":\n \n canthread=candriver()\n #canthread.cansend( \"000#8100\" )\n canthread.start()\n \n # USBCAN=can.USBCAN(500000,\"Standard\",\"normal\")\n # try:\n # #USBCAN.set_IDfilter([\"00000584\",\"00000604\"])\n # USBCAN.send(\"11111111\",\"123456\")\n # #time.sleep(2)\n # print(USBCAN.rec(10))\n # print(USBCAN.Message)\n # USBCAN.flush()\n # print(USBCAN.Message)\n # finally:\n # USBCAN.close()\n" } ]
1
caoqiming/four-ducks
https://github.com/caoqiming/four-ducks
9aa7541ce1225488f34d6d4b4359e9c4a29e5be8
6bf7e53153e67aac7dec240d76035017587df10e
958436a02449af2dc48d4e0dd98fce8fc75b8bc0
refs/heads/master
2020-12-04T00:00:42.539899
2020-01-03T06:57:38
2020-01-03T06:57:38
231,531,036
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48961424827575684, "alphanum_fraction": 0.5242334604263306, "avg_line_length": 22.119047164916992, "blob_id": "5ba715a032e8cac9aabdc7debe488a2c01c57887", "content_id": "5917186d3c4af4f304b98dae39c05e5b4bafad58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1107, "license_type": "no_license", "max_line_length": 55, "num_lines": 42, "path": "/_4ducks.py", "repo_name": "caoqiming/four-ducks", "src_encoding": "UTF-8", "text": "import math\r\nimport random\r\ndef is_close_to(d1,d2):#判断d2是否在d1开始顺时针方向pi的范围内,在则返回true\r\n if d1<math.pi:\r\n if d2>d1 and d2<d1+math.pi:\r\n return True\r\n else:\r\n if d2>d1 or d2<d1-math.pi:\r\n return True\r\n return False\r\n\r\ndef check_ducks(ducks):#输入4个角度,判断是否在一个半圆内\r\n for i in range(0,len(ducks)):\r\n all_close_to_i=1\r\n for j in range(0,len(ducks)):\r\n if i!=j:#不判断自身\r\n if not is_close_to(ducks[i],ducks[j]):\r\n all_close_to_i=0\r\n break\r\n if all_close_to_i==1:\r\n return True\r\n return False\r\n\r\ndef release_ducks(n):\r\n Ducks=[]\r\n for i in range(0,n):\r\n Ducks.append(random.uniform(0,2*math.pi))\r\n return Ducks\r\n\r\n\r\n\r\ndef main():\r\n ans_yes=0.0\r\n runtime=10000000\r\n for i in range(0,runtime):\r\n Ducks=release_ducks(4)#这里修改鸭子数\r\n if check_ducks(Ducks):\r\n ans_yes+=1\r\n print(ans_yes/runtime)\r\n\r\nif __name__==\"__main__\":\r\n main()" } ]
1
nazim164/Python-Code
https://github.com/nazim164/Python-Code
39902c0fd3798ec7d08a5bff4b3d17d9b1d3fc59
54f9109ebdfd49a3191bedf6aaa0076281df0cd8
f2491b83a9224346131fb8fb0339e49bc8c3a745
refs/heads/master
2023-06-14T04:52:28.824324
2021-07-12T06:20:13
2021-07-12T06:20:13
385,144,467
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5581395626068115, "alphanum_fraction": 0.6151162981987, "avg_line_length": 27.633333206176758, "blob_id": "e3d2619b754b256789bda363f95bc06151d7ded1", "content_id": "9b13b5b3d69a7cb92e3ebf1d1274a0f3e2108fc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 860, "license_type": "no_license", "max_line_length": 67, "num_lines": 30, "path": "/Mark.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "name=(input(\"Enter Student Name :\"))\nroll=int(input(\"Enter Student Roll No :\"))\ns1=int(input(\"Enter S1 Marks :\"))\ns2=int(input(\"Enter S2 Marks :\"))\ns3=int(input(\"Enter S3 Marks :\"))\ns4=int(input(\"Enter S4 Marks :\"))\ns5=int(input(\"Enter S5 Marks :\"))\ntotal=s1+s2+s3+s4+s5\nper=total/5\n\nif s1>=40 and s2>=40 and s3>=40 and s4>=40 and s5>=40 and per>=50 :\n result=\"Pass\"\nelse :\n result=\"Fail\"\nif per<50 and result==\"Fail\" :\n Grade=\"*******\"\nelif per<=50 and per>65 and result==\"Pass\" :\n Grade=\"C\"\nelif per<=65 and per>75 and result==\"Pass\":\n Grade =\"B\"\nelif per>=75 and per<85 and result==\"Pass\":\n Grade=\"A\"\nelif per>=85 and result==\"Pass\":\n Grade =\"A+\"\nelse :\n Grade=\"*****\"\n \nprint(\"Student Name Is :\",name,\"Student Roll No Is :\",roll)\nprint(\"Total Is :\",total,\"Percentage Is :\",per)\nprint(\"Result Is :\",result,\"Grade Is :\",Grade)\n\n" }, { "alpha_fraction": 0.5664621591567993, "alphanum_fraction": 0.5971370339393616, "avg_line_length": 21.272727966308594, "blob_id": "d8177babf61e6ac88ee9706c83cbd30af1ff0165", "content_id": "67cced5e1e8cf2384c5bf7c996d60825756390da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 42, "num_lines": 22, "path": "/String.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "str=(input(\"Enter Your String :\"))\nnewstr=''\ncount1=0\ncount2=0\ncount3=0\n\nfor i in str:\n if (i.isupper())==True:\n count1+=1\n newstr+=(i.lower())\n elif (i.islower())==True:\n count2+=1\n newstr+=(i.upper())\n elif(i.isspace())==True:\n count3+=1\n newstr+=i\n\nprint(\"In Orignal String \")\nprint(\"Uppar Case String Is :\",count1)\nprint(\"Lower Case String Is :\",count2)\nprint(\"Spaces In String Is :\",count3)\nprint(\"After Changing The Cases :\",newstr)" }, { "alpha_fraction": 0.6871165633201599, "alphanum_fraction": 0.6871165633201599, "avg_line_length": 31.799999237060547, "blob_id": "49068c9ed3cc50fd85f9b9ff2696dfd694e8ed96", "content_id": "377d0f8d849c34d2750f9e14a05b37ca150b2356", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 41, "num_lines": 5, "path": "/first.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "a=int(input(\"Enter First Number :\"))\nb=int(input(\"Enter Second Number :\"))\nc=int(input(\"Enter Third Number :\"))\nsum=a+b+c\nprint(\"Addtion Of Three Number Is :\",sum)" }, { "alpha_fraction": 0.6125289797782898, "alphanum_fraction": 0.6334106922149658, "avg_line_length": 26, "blob_id": "734a43fe03edbecdc8e6866f054b292727c00db9", "content_id": "fa94f26591aa82c2e2db0bac1888fc382aa1f537", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 65, "num_lines": 16, "path": "/vowels.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "str=(input(\"Enter A String :\"))\nvowel=0\nconsonant=0\nfor i in str:\n if i=='a' or i=='e' or i=='i' or i=='o' or i=='u':\n vowel+=1\n else:\n consonant+=1\n\nprint(\"Number Of Wovels :\",vowel,\"Number Of Consonant\",consonant)\n\nstr1=(input(\"Enter The String :\"))\nprint(\"Orignal String :\",str1)\nprint(\"UpperCase String :\",str1.upper())\nprint(\"LowerCase String :\",str1.lower())\nprint(\"Capatalize String :\",str1.capitalize())" }, { "alpha_fraction": 0.5125628113746643, "alphanum_fraction": 0.6532663106918335, "avg_line_length": 21.22222137451172, "blob_id": "98a584446511ce482e97cc5c2b0f77172807be41", "content_id": "fe06da9ebe11d1ff5b5e5645e030ebfb7d58b798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 60, "num_lines": 9, "path": "/list.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "list=[10,\"Nazim\",100,20,30,\"Sumit\",\"Aniket\",\"Sohail\",1,5,32]\n'''''\nlist.append(\"Apple\")\nlist.remove(\"Nazim\")\n'''\nlist1=[\"Banana\",\"Cherry\",\"Apple\"]\nlist2=[10,20,30,40,5]\nlist3=list1+list2\nprint(list3)" }, { "alpha_fraction": 0.446958988904953, "alphanum_fraction": 0.4900990128517151, "avg_line_length": 37.08108139038086, "blob_id": "f2f7d2d59c861c26eaebe1b7036c769691b1f353", "content_id": "190bfb26cd0952920f6458759923390e8bbac6a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1414, "license_type": "no_license", "max_line_length": 136, "num_lines": 37, "path": "/markwithclassfunction.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "\n\n\n\nclass Myclass ():\n def show(self):\n \n name=(input(\"Enter Student Name :\"))\n roll=int(input(\"Enter Student Roll No :\"))\n s1=int(input(\"Enter Student Subject S1 :\"))\n s2=int(input(\"Enter Student Subject S2 :\"))\n s3=int(input(\"Enter Student Subject S3 :\"))\n s4=int(input(\"Enter Student Subject S4 :\"))\n s5=int(input(\"Enter Student Subject S5 :\"))\n\n total=s1+s2+s3+s4+s5\n per=total/5\n if s1>=40 and s2>=40 and s3>40 and s4>=40 and s5>40 and per>=50 :\n result=\"Pass\"\n else :\n result=\"Fail\"\n if per<50 and result==\"Fail\" :\n Grade=\"****\"\n elif per<=50 and per>65 and result==\"Paass\" :\n Grade=\"C\"\n elif per>=65 and per<75 and result==\"Pass\":\n Grade=\"B\"\n elif per>=75 and per<85 and result==\"Pass\":\n Grade=\"A\"\n elif per>=85 and result==\"Pass\":\n Grade=\"A+\"\n else:\n \n Grade=\"*******\"\n print(\"Student Name : \",name,\"!!!!!\",\"Student Roll No :\",roll)\n print(\"Subject S1 :\",s1,\"!!!!!\",\"Subject S2 :\",s2,\"!!!!!\",\"Subject S3 :\",s3,\"!!!!!\",\"Subject S4 :\",s4,\"!!!!!\",\"Subject S5 :\",s5)\n print(\"Total Of Student :\",total,\"!!!!!\",\"Percentage Of Student :\",per)\n print(\"Student Result :\",result,\"!!!!!\",\"Student Grade :\",Grade)\n\nm1=Myclass()\nm1.show()\n\n" }, { "alpha_fraction": 0.5957446694374084, "alphanum_fraction": 0.5957446694374084, "avg_line_length": 19.94444465637207, "blob_id": "dd2a8c0afd72641f22c22f9868447655b508be0a", "content_id": "2d6e9605c6dccca3df7813d670aed1445966dfb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 376, "license_type": "no_license", "max_line_length": 37, "num_lines": 18, "path": "/find.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "a=int(input(\"Enter First Number :\"))\nb=int(input(\"Enter Second Number :\"))\nc=int(input(\"Enter Third Number :\"))\n\nif a>b and a>c :\n print(\"A Is Greter\")\nelif b>a and b>c :\n print(\"B Is Greater\")\nelif c>a and c>b :\n print(\"C Is Greater\")\n\n\nif a<b and a<c :\n print(\"A Is Lowest\")\nelif b<a and b<c :\n print(\"B Is Lowest\")\nelif c<a and c<b :\n print(\"C Is Lowest\")" }, { "alpha_fraction": 0.48701298236846924, "alphanum_fraction": 0.5324675440788269, "avg_line_length": 13.699999809265137, "blob_id": "24460c59194c445078026a3336896e695f752851", "content_id": "eda452bf83faabac3081fb39ff90a3760fd986a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 25, "num_lines": 10, "path": "/largest.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "\na=12\nb=130\nc=15\n\nif a>b and a>c :\n print(\"A Is Largest\")\nif b>a and b>c :\n print(\"B Is Largest\")\nif c>a and c>b :\n print(\"C is Largest\")\n\n\n " }, { "alpha_fraction": 0.39344263076782227, "alphanum_fraction": 0.44262295961380005, "avg_line_length": 15.285714149475098, "blob_id": "b21ccaabcefc474636563f1906f5feb311ab03e0", "content_id": "cf7cc02e1c7fce537fcb0ce6b415410884b76d0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 122, "license_type": "no_license", "max_line_length": 24, "num_lines": 7, "path": "/func.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "def f1() :\n print(\"F1 Function\")\ndef f2():\n print(\"F2 Function\")\n f1()\nif __name__=='__main__':\n f2()\n \n " }, { "alpha_fraction": 0.6300366520881653, "alphanum_fraction": 0.6410256624221802, "avg_line_length": 23.909090042114258, "blob_id": "9d56e73150900a2675df82d8ea4869d0116a7ee0", "content_id": "50b3dbfd3a52959eac426ddda7fcf572811dd939", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 39, "num_lines": 11, "path": "/bill.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "itemame=(input(\" Item Name :\"))\nprice=int(input(\" Price :\"))\nQaunt=int(input(\" Qauntity :\"))\n\ntotal=price*Qaunt\ndis=total*0.05\nnetprice=total-dis\nprint(\"!!!!!!!!!!!!!!!! TOTAL BILL IS\")\nprint(\"Total Is :\",total)\nprint(\"Discount Is :\",dis)\nprint(\"NetPrice Is :\",netprice)" }, { "alpha_fraction": 0.42822086811065674, "alphanum_fraction": 0.42822086811065674, "avg_line_length": 26.100000381469727, "blob_id": "1568e737db50e2309401e25d8db68b82cfdde18d", "content_id": "94da37dd5a4ea65bd7f8ed0e33097b2f0469a433", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 815, "license_type": "no_license", "max_line_length": 53, "num_lines": 30, "path": "/grlrwithclass.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "class Myclass() :\n def show(self) :\n \n \n a=int(input(\"Enter First Number :\"))\n b=int(input(\"Enter Second Number :\"))\n c=int(input(\"Enter Third Number :\"))\n if a>b and a>c :\n print(\"A Is Greater !!!!\")\n elif b>a and b>c :\n print(\"B Is Greater !!!\")\n else :\n print(\"C Is Greaterc!!!\")\n def name(self):\n x=int(input(\"Enter First Number :\"))\n y=int(input(\"Enter Second Number :\"))\n z=int(input(\"Enter Third Number\"))\n if x<y and x<z :\n print(\"X Is Lowest \")\n elif y<x and y<z :\n print(\"Y Is Lowest \")\n else :\n print(\"Z Is Lowest\")\n\n\n \n\nnext=Myclass()\nnext.show()\nnext.name()\n\n\n" }, { "alpha_fraction": 0.5076923370361328, "alphanum_fraction": 0.6846153736114502, "avg_line_length": 31.75, "blob_id": "a72437a12402ecdadd6fd87fbeddfe92667a43af", "content_id": "4b7023aabf5419edd1934fcc95859b19b0d92236", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 45, "num_lines": 4, "path": "/dict.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "data={101:\"Nazim\",102:\"Aniket\",103:\"Piyush\"}\nprint(data[101])\ndata1={\"Nazim\":101,\"Aniket\":102,\"Piyush\":103}\nprint(data1[\"Aniket\"])" }, { "alpha_fraction": 0.6816479563713074, "alphanum_fraction": 0.6816479563713074, "avg_line_length": 37.28571319580078, "blob_id": "ea550a16455ef827d07bd43169dcc15405a78704", "content_id": "7cd73bd9a9fa179c4039ea06f9823e8eaa8d58e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "no_license", "max_line_length": 49, "num_lines": 7, "path": "/len.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "# Find Data Type Of Variable\nname=(input(\"Enter Your Name :\"))\nage=int(input(\"Enter Your Age :\"))\nvalue=float(input(\"Enter Your Floating Value :\"))\nprint(\"A Type Of Name Is :\",type(name))\nprint(\"A Type Of Age Is :\",type(age))\nprint(\"A Type Of value Is :\",type(value))" }, { "alpha_fraction": 0.4931506812572479, "alphanum_fraction": 0.5068492889404297, "avg_line_length": 27.399999618530273, "blob_id": "3bab5a5bc0ca17f82b7b17cd515062c2ef475228", "content_id": "48d122fdb84b970a756ebe709843b6d53094a898", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 61, "num_lines": 10, "path": "/voterwithclassfunction.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "class Show():\n def dis(self):\n name=(input(\"Enter Voter Name :\"))\n age=int(input(\"Enter Voter Age :\"))\n if age>=18 :\n print(\"You Are Eligible For Vote\")\n else :\n print(\"Sorry .... You Are Not Eligible For Vote\")\nf1=Show()\nf1.dis()\n " }, { "alpha_fraction": 0.5333333611488342, "alphanum_fraction": 0.550000011920929, "avg_line_length": 23, "blob_id": "bbb10da013e79d29930b3d0a7e0f2dc399be4d12", "content_id": "b401f5d19975acf7d48926fb7f30f42da284d32f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/class.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "class computer :\n def config(self) :\n print(\"My Name Is Nazim\")\n com1=computer()\n com1.config()\n" }, { "alpha_fraction": 0.6015228629112244, "alphanum_fraction": 0.6015228629112244, "avg_line_length": 19.789474487304688, "blob_id": "86610a415bc4ee97b71d1f9f8ee4332d6730f52a", "content_id": "7d0766c63f21fb8f40edc8ce3f7266b851e6bb59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "no_license", "max_line_length": 37, "num_lines": 19, "path": "/LargeUser.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "from abc import ABCMeta\n\n\na=int(input(\"Enter First Number :\"))\nb=int(input(\"Enter Second Number :\"))\nc=int(input(\"Enter Third Number :\"))\n\nif a>b and a>c :\n print(\"A Is Greater\")\nif b>a and b>c :\n print(\"B Is Greater\")\nif c>a and c>b :\n print(\"C Is Greater\")\nif a<b and a<c :\n print(\"A Is Lowest\")\nif b<a and b<c :\n print(\"B Is Lowest\")\nif c<a and c<b :\n print(\"C Is Lowest \")" }, { "alpha_fraction": 0.6242424249649048, "alphanum_fraction": 0.678787887096405, "avg_line_length": 32.20000076293945, "blob_id": "3cba64018260323cfb061ff4b79fa83e610c9223", "content_id": "193b13fcf7511c5fb7061dd709450a3e70952f66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/concate.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "str1=(input(\"Enter First String :\"))\nstr2=(input(\"Enter Second String :\"))\nstr3=(input(\"Enter Third String :\"))\n\nprint(\"Concate All String :\",(str1*3),str2*3,str3*3)" }, { "alpha_fraction": 0.551578938961029, "alphanum_fraction": 0.5705263018608093, "avg_line_length": 30.700000762939453, "blob_id": "6fbf8920cb17b30952c673aeeebff466dd18babe", "content_id": "57802072f1aca86c035372b6b54cd37edcac9542", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "no_license", "max_line_length": 69, "num_lines": 30, "path": "/meter.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "class Myclass():\n def show(args):\n \n\n name=(input(\"Enter Customer Name :\"))\n meter_no=int(input(\"Enter Meter No :\"))\n initail_r=float(input(\"Enter Initial Riding :\"))\n final_r=float(input(\"Enter Final Riding :\"))\n total_unit=final_r-initail_r\n type_of_cus=(input(\"Enter Type Of Customer :\"))\n\n if type_of_cus==\"Residential\":\n total_bil=total_unit*3.50\n elif type_of_cus==\"Agriculture\":\n total_bil=total_unit*2.50\n elif type_of_cus==\"Commercial\":\n total_bil=total_unit*6.50\n if total_bil>=8000:\n charge=total_bil*0.10\n total_bil=total_bil+charge\n\n\n\n print(\"Custmer Name :\",name,\"Meter No :\",meter_no)\n print(\"Final Riding :\",final_r,\"Initial Riding : \",initail_r)\n print(\"Type Of Customer :\",type_of_cus)\n print(\"Total Unit :\",total_unit,\"Total Bill :\",total_bil)\n\nm1=Myclass()\nm1.show()" }, { "alpha_fraction": 0.5038759708404541, "alphanum_fraction": 0.5348837375640869, "avg_line_length": 13.333333015441895, "blob_id": "9aa52516d877822f26b117ac6694c7ef44de8e82", "content_id": "7f43336bf5299430f4c0d8f467a34fab0ca6dc79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 41, "num_lines": 18, "path": "/get.py", "repo_name": "nazim164/Python-Code", "src_encoding": "UTF-8", "text": "def add() :\n x=10\n y=20\n sum=x+y\n print(sum)\nadd()\nadd()\ndef wifi():\n x=int(input(\"Enter First Number :\"))\n y=int(input(\"Enter Second Number :\"))\n result=x+y\n print(result)\nwifi()\ndef real(y):\n x=10\n z=x+y\n print(z)\nreal(10)\n" } ]
19
vitoo22/showmylikes
https://github.com/vitoo22/showmylikes
86f62f308749a1ab6ee08f1529922388951d44f3
b4e1cba279561e26e2b8431c47ef9e4402c4c97f
1f793aa4172e6e298f7ba803218fe2a5055cf39e
refs/heads/master
2016-09-14T11:36:24.249022
2016-05-23T12:10:15
2016-05-23T12:10:15
59,207,925
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7149758338928223, "alphanum_fraction": 0.7185990214347839, "avg_line_length": 32.119998931884766, "blob_id": "ffaacdddc31662160d80f634b44d1fb8e974ab44", "content_id": "5d49f77e6a1e2933b31359fda4027b9431fc7c62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 828, "license_type": "no_license", "max_line_length": 95, "num_lines": 25, "path": "/app.py", "repo_name": "vitoo22/showmylikes", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, redirect, url_for\nfrom get_instagrams_token import *\nimport urllib.request\nimport json\n\n\napp = Flask(__name__)\n\[email protected]('/')\ndef hello_world():\n return render_template('home.html')\n\[email protected]('/authorization-completed/', defaults={'code': ''})\[email protected]('/authorization-completed/<code>')\ndef authorization_completed(code):\n\tcode = request.args.get('code')\n\taccess_token = api.exchange_code_for_access_token(code)\n\turl = 'https://api.instagram.com/v1/users/self/media/liked?access_token=' + access_token[0]\n\tresponse = urllib.request.urlopen(url)\n\tstring = response.read().decode('utf-8')\n\tjson_data = json.loads(string)\n\treturn render_template('authorization-completed.html', code=access_token, json_data=json_data)\n\nif __name__ == '__main__':\n app.run(debug=True)\n" }, { "alpha_fraction": 0.6333333253860474, "alphanum_fraction": 0.7969697117805481, "avg_line_length": 29, "blob_id": "c222158b2e72d6a025493c39993e1553a15cf980", "content_id": "83aa33f216e9df316f8255daa16b86dc0b2f105b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "no_license", "max_line_length": 95, "num_lines": 11, "path": "/get_instagrams_token.py", "repo_name": "vitoo22/showmylikes", "src_encoding": "UTF-8", "text": "from instagram.client import InstagramAPI\n\nclient_id = 'acb84376812747f0b6086b14e905332b'\n\nclient_secret = 'b638f8a09d28480d8e20541a2c51bafc'\n\nredirect_uri = 'http://127.0.0.1:5000/authorization-completed/'\n\nscope = 'public_content'\n\napi = InstagramAPI(client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri)\n" } ]
2
sfali16/udacity-python
https://github.com/sfali16/udacity-python
1b9071d743795446b4ae11aa8b980a9435696241
9763ab26f4e18d0318174490821a13bf8b3826b9
5b2e1c81afcc0e4e4f0a7f1661576653c05a2e89
refs/heads/master
2021-01-02T22:48:33.173710
2017-08-05T03:23:30
2017-08-05T03:23:30
99,396,011
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8214285969734192, "alphanum_fraction": 0.8214285969734192, "avg_line_length": 41, "blob_id": "5a9b6bd1cc266da8c4e67b87d70d35db89ecc864", "content_id": "de6206b2d5ad46708845581a5c54c41743eeafbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 84, "license_type": "no_license", "max_line_length": 66, "num_lines": 2, "path": "/README.md", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "# udacity-python\nrepository to share code with the group in machine learning course\n" }, { "alpha_fraction": 0.708108127117157, "alphanum_fraction": 0.7405405640602112, "avg_line_length": 15.909090995788574, "blob_id": "f97a252e6f8429f1ecc7651f13d6f007af5f0596", "content_id": "7d6d3dcde316b11639de5ff8acacb35f98d42cae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 42, "num_lines": 11, "path": "/hellopythonworld/src/PyJSGUIModule.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 30, 2017\n\n@author: Faraz\n'''\nimport pyjs\nfrom pyjamas.ui.RootPanel import RootPanel\nfrom pyjamas.ui.RootPanel import Label\n\nl = Label('Hello pyjs')\nRootPanel().add(l)" }, { "alpha_fraction": 0.517793595790863, "alphanum_fraction": 0.5284697413444519, "avg_line_length": 29.5, "blob_id": "cf0cbdb24f3a9af532adc3469d4ade2bf3bf6369", "content_id": "e86817061389a100f88d24f7879b7432eb9f0d9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1124, "license_type": "no_license", "max_line_length": 73, "num_lines": 36, "path": "/hellopythonworld/src/BinarySearch.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 30, 2017\n\n@author: Faraz\n'''\n\nclass BinarySearch(object):\n '''\n This is a binary search class created to learn python\n '''\n def __init__(self, array):\n '''\n Constructor\n '''\n self.array = array\n \n def search(self, target):\n ''' search the given list for a particular target. return -1 if\n target element is not found in the list, or returns the index in\n the list at which element is found'''\n self.array.sort()\n return self.binarySearch( 0, len(self.array)-1, target)\n \n def binarySearch(self, lo, high, target):\n pivot = (lo+high)/2\n# print(\"binarySearch called with low=\" + str(lo) + \", high=\" \n# + str(high) + \", pivot= \" + str(pivot))\n array = self.array\n if ( lo > high ):\n return -1\n if ( array[pivot] == target):\n return pivot\n elif ( array[pivot] < target ):\n return self.binarySearch( pivot+1, high, target)\n else:\n return self.binarySearch( lo, pivot, target)\n \n \n" }, { "alpha_fraction": 0.6527777910232544, "alphanum_fraction": 0.6643518805503845, "avg_line_length": 23, "blob_id": "d2fcab96f02c7ae2719b2170604da13a464ddf3f", "content_id": "7e782de15da74f02cfd697f861d6608ae584a151", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 62, "num_lines": 18, "path": "/MachineLearningUdacity/src/ClassifyNB.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Aug 2, 2017\n\n@author: Faraz\n'''\n\n\ndef classify(features_train, labels_train): \n ### import the sklearn module for GaussianNB\n ### create classifier\n ### fit the classifier on the training features and labels\n ### return the fit classifier\n ### your code goes here!\n \n from sklearn import naive_bayes\n clf = naive_bayes.GaussianNB()\n clf.fit( features_train, labels_train )\n return clf\n" }, { "alpha_fraction": 0.5170876383781433, "alphanum_fraction": 0.570579469203949, "avg_line_length": 23.962963104248047, "blob_id": "82e3ae0e6febe231342da6881b182295409adc4b", "content_id": "c58c98bb41e19e41d84541f8d7e0d7dfc46d2b6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 673, "license_type": "no_license", "max_line_length": 90, "num_lines": 27, "path": "/hellopythonworld/test/PointTest.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 30, 2017\n\n@author: Faraz\n'''\nimport unittest\nfrom PointFile import Point\n\n\nclass PointTest(unittest.TestCase):\n\n def testEquals(self):\n p1 = Point(1,3)\n p2 = Point(1,3)\n print( p1 )\n print( \"p2={0}\".format(p2))\n self.assertFalse( p1 is p2, \"p1 is not p2, so this should be false\")\n self.assertEqual( p1, p2, \"p1 and p2 should be equal\" )\n pass\n\n def testAdd(self):\n p1 = Point(1,2)\n p2 = Point(3,4)\n self.assertEquals( Point(4,6), p1.add(p2), \"Not adding the two points \" + p1 + p2)\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()" }, { "alpha_fraction": 0.6315315365791321, "alphanum_fraction": 0.6576576828956604, "avg_line_length": 23.065217971801758, "blob_id": "7c15fc8436e3cc5d963470573ea6257e4e35c0f2", "content_id": "2b0d4d2a020f7f8e6ef2970bb53455c725e31c9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1110, "license_type": "no_license", "max_line_length": 68, "num_lines": 46, "path": "/hellopythonworld/src/FirstModule.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 29, 2017\n\n@author: Faraz\n'''\ndef add(a,b):\n return a+b\n\ndef addFixedValue(a):\n y=5\n return y+a\n\nprint( addFixedValue(6) )\nprint( add(6,5) )\n\nmyString = \"Lars\"\nprint( myString + \" 6\")\nprint( myString[0] )\nfor i in range(1,5):\n if i <= 3:\n print(i)\n else:\n print(i*2)\nisNone = myString is None\nprint( \"is myString None: {0}\".format(isNone) )\n# myString = input( \"Enter your name:\")\nprint(myString + \"licious, Faraz loves you\" )\n\nmyString = \"audacity\"\nprint( myString[1].upper()+myString[2:])\nprint( \"finding: \" + str(myString[1:].find('a') ))\n\nmylist = [\"Linux\", \"Mac OS\" , \"Windows\"]\n# Print the first list element\nprint(\"printing 0th element \" + mylist[0])\n# Print the last element\n# Negativ values starts the list from the end\nprint(\"printing -1 element\" + mylist[-1]) # should print out Windows\n# Sublist - first and second element\nprint(\"printing elements 0:2 {0}\".format( mylist[0:2]) )\n# Add elements to the list\nmylist.append(\"Android\")\n# Print the content of the list\nprint(\"Print full contents of the list:\")\nfor element in mylist:\n print(element)\n\n\n\n" }, { "alpha_fraction": 0.6054053902626038, "alphanum_fraction": 0.6594594717025757, "avg_line_length": 17.600000381469727, "blob_id": "f2018eb537d410a69e8339d22ba03d966e0f5f09", "content_id": "57cb502e38335bca4ecc454f48fe1fa15656dff7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 75, "num_lines": 10, "path": "/hellopythonworld/src/FilesModule.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 30, 2017\n\n@author: Faraz\n'''\nf = open('/Users/Faraz/eclipse/pyjs-pyjs-07f54ad/examples/HelloWorld', 'r')\nprint( f )\nfor line in f:\n print( line.rstrip())\nf.close()" }, { "alpha_fraction": 0.48148149251937866, "alphanum_fraction": 0.4933333396911621, "avg_line_length": 19.96875, "blob_id": "80d2c6feafb5b15dc77b1ba39d2d06b924fe7002", "content_id": "2c51295f7cbc4f88b529a66372f8446c94bbdd67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 675, "license_type": "no_license", "max_line_length": 67, "num_lines": 32, "path": "/hellopythonworld/src/PointFile.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 30, 2017\n\n@author: Faraz\n'''\nfrom Carbon.Aliases import false\n\nclass Point(object):\n '''\n This is a class created by Faraz to learn how to use python. It\n represents a Point with an x and y coordinate\n '''\n\n\n def __init__(self, x=0, y=0):\n '''\n Constructor\n '''\n self.x = x\n self.y = y\n \n def __str__(self):\n return ( \"x: \" + str(self.x) + \", y: \" + str( self.y ) ) \n \n def __eq__(self, other):\n equal = (self.x == other.x) and (self.y == other.y)\n return equal\n \n def add(self, other):\n self.x += other.x\n self.y += other.y\n return self\n " }, { "alpha_fraction": 0.6239520907402039, "alphanum_fraction": 0.6347305178642273, "avg_line_length": 25.125, "blob_id": "3c882f5b44288491843d78c0092c64e1e8f1d831", "content_id": "d307d45cd52ae6ecd41e6580ffb068bdeae724bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 835, "license_type": "no_license", "max_line_length": 96, "num_lines": 32, "path": "/hellopythonworld/test/WebCrawlerTest.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 30, 2017\n\n@author: Faraz\n'''\nimport unittest\nfrom WebCrawler import WebCrawler\n\n\nclass Test(unittest.TestCase):\n\n\n\n def setUp(self):\n self.crawler = WebCrawler()\n\n def testWebCrawler(self):\n page = ('<div id=\"top_bin\"><div id=\"top_content\" class=\"width960\">'\n'<div class=\"udacity float-left\"><a href=\"http://udacity.com\">')\n self.crawler.findUrlsInPage(page)\n \n self.crawler.findUrlsInPage( '<a href=\"http://udacity.com\">Hello world</a>')\n pass\n \n def testWebCrawlerFile(self):\n# filename = \"/Users/faraz/eclipse/python_workspace/hellopythonworld/udacity-source.htm\"\n filename = \"../udacity-source.htm\"\n self.crawler.findUrlsInFile( filename )\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()" }, { "alpha_fraction": 0.6245059370994568, "alphanum_fraction": 0.6472331881523132, "avg_line_length": 28.794116973876953, "blob_id": "bed3536bcb0f33cd004d06f36135dd39646fea8e", "content_id": "e5bff2feef12a2f153801b3c7f2d463c5bbff521", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1012, "license_type": "no_license", "max_line_length": 78, "num_lines": 34, "path": "/hellopythonworld/test/BinarySearchTest.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 30, 2017\n\n@author: Faraz\n'''\nimport unittest\nfrom BinarySearch import BinarySearch\n\nclass BinarySearchTest(unittest.TestCase):\n\n def searchForTarget(self, target, expectedIndex):\n self.searchArrayForTarget(None, target, expectedIndex)\n \n def searchArrayForTarget(self, list1, target, expectedIndex):\n \"\"\"\n Test the search function by searching the list provided for the target\n and ensure that the expectedIndex is what the search returns\n \"\"\"\n if ( list1 is not None ):\n self.testObject = BinarySearch(list1)\n self.assertEquals(expectedIndex, self.testObject.search(target))\n\n def testSearch(self):\n self.searchArrayForTarget([1, 2, 3], 3, 2)\n self.searchForTarget(4, -1)\n self.searchForTarget(2, 1)\n self.searchArrayForTarget([1], 3, -1)\n self.searchForTarget(1, 0)\n pass\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()" }, { "alpha_fraction": 0.4766584634780884, "alphanum_fraction": 0.488943487405777, "avg_line_length": 24.1875, "blob_id": "552518960e50614de234a3c84eb3e7c71828e436", "content_id": "560fd4059d08f2ba26fd69b8e3700244f535b9ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 814, "license_type": "no_license", "max_line_length": 60, "num_lines": 32, "path": "/hellopythonworld/src/WebCrawler.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 30, 2017\n\n@author: Faraz\n'''\n\nclass WebCrawler(object):\n '''\n classdocs\n '''\n def __init__(self):\n '''\n Constructor\n '''\n \n def findUrlsInPage(self, page):\n lastIndex = 0\n urlList = []\n while ( page.find(\"<a href=\", lastIndex) != -1):\n print( \"lastIndex=\" + str(lastIndex))\n urlIndex = page.find( \"<a href=\", lastIndex) + 9\n endIndex = page.find('\\\"', urlIndex)\n url = page[ urlIndex: endIndex ]\n# print(\"SFA_DEBUG url=\" + url)\n urlList.append( url )\n lastIndex = (endIndex+1)\n \n print( urlList )\n \n def findUrlsInFile(self, filename):\n fileObj = open( filename, 'r')\n self.findUrlsInPage( fileObj.read() )\n " }, { "alpha_fraction": 0.4714285731315613, "alphanum_fraction": 0.4952380955219269, "avg_line_length": 11.411765098571777, "blob_id": "6ae2c02f9848aad378777034a1667ba6de49bd6e", "content_id": "cac0517bc1702b48d82f1f33b7c3bee683291e8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 31, "num_lines": 17, "path": "/hellopythonworld/src/FileUtils.py", "repo_name": "sfali16/udacity-python", "src_encoding": "UTF-8", "text": "'''\nCreated on Aug 1, 2017\n\n@author: Faraz\n'''\n\nclass FileUtils(object):\n '''\n classdocs\n '''\n\n\n def __init__(self, params):\n '''\n Constructor\n '''\n def readFile(self, name):" } ]
12
KevinSe/snap-py
https://github.com/KevinSe/snap-py
80605416827123d59153ce0c1e4b7d0b6d581129
3eb34ea3972f9d293da5afa2bca8e0dffc8039ab
62926499eab7034e942fa85a1e24d0f4fe879319
refs/heads/master
2016-08-12T19:22:31.856446
2016-02-08T11:16:27
2016-02-08T11:16:27
51,013,197
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.41203704476356506, "alphanum_fraction": 0.43611112236976624, "avg_line_length": 30.764705657958984, "blob_id": "16dcd09d75ed3ea28628e2b3b9e216389d08ef92", "content_id": "2a9272c2495622682902a3cf08ed061d4ef1c972", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1080, "license_type": "permissive", "max_line_length": 80, "num_lines": 34, "path": "/trend_k.py", "repo_name": "KevinSe/snap-py", "src_encoding": "UTF-8", "text": "__author__ = \"Kevin Serru\"\n___date___ = 03 / 02 / 2016\n\nimport numpy as np\n\ndef trend_k(data):\n \"\"\"\n This function takes an array of numbers and for each index tracks the number\n of indices since last up (positive) or down (negative)\n :param data: Array of numbers\n :return: Array of ints\n \"\"\"\n trend = []\n trend_i = 0\n up = 1\n for i in range(1, len(data)):\n delta = data[i] - data[i-1]\n if delta > 0:\n if up == 0: # if trend was decreasing\n trend_i = 1 # 1 index growth\n up = 1 # trend growing\n else:\n trend_i += 1\n elif delta == 0:\n trend_i = 0\n up = 0 # trend stabilized\n else:\n if up == 1: # if trend was increasing\n trend_i = -1 # 1 index decline\n up = 0 # trend decreasing\n else:\n trend_i -= 1\n trend.append(trend_i)\n return np.array(trend)\n" }, { "alpha_fraction": 0.7400000095367432, "alphanum_fraction": 0.7400000095367432, "avg_line_length": 24, "blob_id": "95c607dcbce1fcc5721087ee67afbf1487f3b552", "content_id": "7c32d98888c30c9def063dc269dcfb6c2fe2b63f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 50, "license_type": "permissive", "max_line_length": 39, "num_lines": 2, "path": "/README.md", "repo_name": "KevinSe/snap-py", "src_encoding": "UTF-8", "text": "# snap-py\nPieces of python code I'd like to store\n" }, { "alpha_fraction": 0.49789029359817505, "alphanum_fraction": 0.5569620132446289, "avg_line_length": 21.571428298950195, "blob_id": "5326f58574cf39835591b39a4df80ab5e22ca077", "content_id": "53d729ffc99558a5d761ba517cda0af5bec667ac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "permissive", "max_line_length": 94, "num_lines": 21, "path": "/sort_arrays.py", "repo_name": "KevinSe/snap-py", "src_encoding": "UTF-8", "text": "__author__ = \"Kevin Serru\"\n___date___ = 08 / 02 / 2016\n\nimport numpy as np\n\ndef sort_arrays(self, A, B):\n \"\"\"\n This function takes two numpy arrays of the same dimension and sorts A against B's values \n :return: Array\n \"\"\"\n sorted_A = np.array([a for (b,a) in sorted(zip(B,A))])\n return sorted_A\n\nA = np.array([1,5,3,6])\nB = np.array([7,4,8,1])\nsorted_A = sort_arrays(A, B)\n\nprint(A, B, sorted_A)\n# A = [1, 5, 3, 6]\n# B = [7, 4, 8, 1]\n# sorted_A = [6, 5, 1, 3]\n" } ]
3
NumEconCopenhagen/projects-2020-skm
https://github.com/NumEconCopenhagen/projects-2020-skm
2fcf8e8ac0d1ad1c5c882986208f151cb35eee11
15ab71681fd876918ccc63f332cf3e635d6245b2
eb6e30c0f459a1e2b79737691117b2109c603359
refs/heads/master
2021-03-05T00:43:25.890077
2020-05-15T17:23:15
2020-05-15T17:23:15
246,081,184
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7771084308624268, "alphanum_fraction": 0.7981927990913391, "avg_line_length": 43.266666412353516, "blob_id": "b0fd4c2b1a8415a86a7e7284f642db06a76d3de5", "content_id": "7f68520fed7c64a364b16843bc9cf37a741e669f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 664, "license_type": "permissive", "max_line_length": 212, "num_lines": 15, "path": "/dataproject/README.md", "repo_name": "NumEconCopenhagen/projects-2020-skm", "src_encoding": "UTF-8", "text": "# Data analysis project\n\nOur project is titled Project 2 and is about income and employment where we exploit to employment rates: the employment rate of the total workforce and the employment rate of descendants of non-western immigrants\n\nThe **results** of the project can be seen from running Employment_project.ipynb \n\nThis **loades two datasets**:\n\n1. Income data from statistikbanekn.dk/INDKP107\n1. Employment data from statistikbanekn.dk/RAS200\n\n**Dependencies:** Apart from a standard Anaconda Python 3 installation, the project requires the following installations:\n\n1. `pip install pandas-datareader`\n1. `pip install git+https://github.com/elben10/pydst`\n" }, { "alpha_fraction": 0.5749129056930542, "alphanum_fraction": 0.6292682886123657, "avg_line_length": 32.093021392822266, "blob_id": "2616b373c06eb8251a9fb5bbd1d3542df2bf144c", "content_id": "de099798c7bf6531a5d124d5fbf7dc615361a696", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1435, "license_type": "permissive", "max_line_length": 152, "num_lines": 43, "path": "/inauguralproject/inauguralproject.py", "repo_name": "NumEconCopenhagen/projects-2020-skm", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\nfrom scipy import optimize\n\n\n##Defining the utility function\ndef u_func(l, w, epsilon, m, nu, tau0, tau1, kappa):\n return math.log10( m+w*l-(tau0*w*l+tau1*max(w*l-kappa,0)) ) - nu*l**(1+1/epsilon)/(1+1/epsilon)\n\n# objective funciton (to minimize)\ndef value_of_choice(l, w, epsilon, m, nu, tau0, tau1, kappa):\n return -u_func(l, w, epsilon, m, nu, tau0, tau1, kappa)\n\n#Consumption function\ndef c_func(l, w, epsilon=0.3, m=1, tau0=0.4, tau1=0.1, kappa=0.4):\n return m+w*l-(tau0*w*l+tau1*max(w*l-kappa,0)) \n\n\nnp.random.seed(42)\nN2 = 100 #OBS! Was supposed to be 10K, but code took forever to run in Q5\nw_vec2 = np.random.uniform(0.5,1.5,N2)\nl_vec2 = np.empty(N2)\nt_vec = np.empty(N2)\n\n# function for calculating taxes paid by each individual\ndef t_func(l, w, epsilon, m, nu, tau0, tau1, kappa):\n return tau0*w*l+tau1*max(w*l-kappa,0)\n\n# funtion for returning total tax revenue\ndef tax_rev(epsilon, m, nu, tau0, tau1, kappa):\n for i,w in enumerate(w_vec2):\n w = w_vec2[i]\n sol_case1 = optimize.minimize_scalar(value_of_choice,method='bounded',bounds=(0,1),args=(w, epsilon, m, nu, tau0, tau1, kappa))\n l_vec2[i] = sol_case1.x\n l=l_vec2[i]\n t_vec[i] = t_func(l,w,epsilon, m, nu, tau0, tau1, kappa)\n return sum(t_vec)\n\ndef obj_func(x):\n tau0=x[0]\n tau1=x[1]\n kappa=x[2]\n return -tax_rev(0.1,1,10,tau0, tau1, kappa)\n\n \n\n \n" }, { "alpha_fraction": 0.7978339195251465, "alphanum_fraction": 0.8007220029830933, "avg_line_length": 61.90909194946289, "blob_id": "545022bdfebecd597d52b4f6919175482d7ffe1b", "content_id": "b6cff23e7f62b87a2a6e92e2bdd5814cf9ec0ce9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1385, "license_type": "permissive", "max_line_length": 271, "num_lines": 22, "path": "/README.md", "repo_name": "NumEconCopenhagen/projects-2020-skm", "src_encoding": "UTF-8", "text": "# Project portfolio\n\n## Inaugural Project\nIn this project we maximize the utility of.. and solve for the optimal tax rates of the politician.\n\n## Data project.\nIn this project we investigate the employment rate of the danish population and the subpopulation made up by decendants of non-western immigrants. We further merge in a dataserie describing disposable income.\n\nThe data is cleaned and sorted at regional level\n\nFinally we plot the series in interactive plots on. Fro these plots it becomes visible that the employment rates were affected by the financial crisis in 2008, which was also expected.\n\n## Model project.\nIn this project we set up and plot the IS-LM-PC model, which is used instead of the AS-AD model in some newer macro books.\n\nWe display how the central bank can adjust the real interest rate to overcome inflation due to a positive output gab.\n\nwe introduce a utility function for the Central Bank as we expect the central bank to care not only for the inflation rate but also unemployment. This allows us to solve for the optimal real interest rate when both unemployment and inflation rate is considered by the CB.\n\nFrom the optimal real interest rate we solve for the optimal level of outpus, inflation rate and unemployment rate. \n\nFinallly we plot the real interest rate for different values of the inflation target, to graph how that affects the utility.\n\n" }, { "alpha_fraction": 0.792833149433136, "alphanum_fraction": 0.7973124384880066, "avg_line_length": 58.53333282470703, "blob_id": "1f47f3cc2da37ebdbe1db37352ca0d4b79d66de1", "content_id": "ad1c7c4326a4d2f7dc3d86dbeeda97231bc2dc2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 893, "license_type": "permissive", "max_line_length": 163, "num_lines": 15, "path": "/inauguralproject/README.md", "repo_name": "NumEconCopenhagen/projects-2020-skm", "src_encoding": "UTF-8", "text": "# Inaugural project\n\n#We have tried to solve the consumer maximization problem by relying on existing code from the lectures and the exercise sets.\n\n#For question 1 we have constructed a function that solves the maximization problem. The code is inspired by the third lecture on how to #optimize, print and plot.\n\n#From problem set 1 we have inspiration to plot the the optimal labour supply as a function of wage as well as the optimal consumption as a #function of w.\n\n#In question 3 we calculate the total tax revenue given the optimal values of labour supply, consumption and uniformly distributed wage #draws.\n\n#Finally we recalculate the total tax revenue given a new value of epsilon.\n\nThe **results** of the project can be seen from running inauguralproject copy.ipynb.\n\n**Dependencies:** Apart from a standard Anaconda Python 3 installation, the project requires no further packages.\n" }, { "alpha_fraction": 0.7964959740638733, "alphanum_fraction": 0.7978436946868896, "avg_line_length": 91.75, "blob_id": "7d91fc25dba3d1bb7336a3fb1da383e6cea74880", "content_id": "c8ea11e92506eb059c2b4b09b8fac037739d95ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 742, "license_type": "permissive", "max_line_length": 333, "num_lines": 8, "path": "/modelproject/README.md", "repo_name": "NumEconCopenhagen/projects-2020-skm", "src_encoding": "UTF-8", "text": "# Model analysis project\n\nOur project is titled **IS-LM-PC Model** which is a basic macroeconomic model that has replaced the AS-AD model in some newer macroeconomic textbooks. It describes the medium run by combining the IS-LM model with the Phillips curve, thus creating a relation between inflation, unemployment, the output gap, and interest rate policy. \nThe goal of the project is to optimize the interest rate chosen by the central bank, whose utility function reflect that they take bouth the inflation and unemployment into account.\n\nThe **results** of the project can be seen from running [ISLMPC.ipynb](ISLM.ipynb).\n\n**Dependencies:** Apart from a standard Anaconda Python 3 installation, the project requires no further packages.\n" } ]
5
Joe-ob/Shopping_Cart
https://github.com/Joe-ob/Shopping_Cart
6daa6db9078ef81946106412f9e9fea5e2d2a963
bc8b90dc37efdbe428786e072404ce10b4ef773f
f2993c833f2841f047f98cbf85b81cf39f2696e9
refs/heads/main
2023-03-12T17:38:49.360091
2021-02-25T01:32:11
2021-02-25T01:32:11
340,959,210
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7684346437454224, "alphanum_fraction": 0.7697283029556274, "avg_line_length": 69.36363983154297, "blob_id": "458d860959f407ae360de50a894277b89ce29e97", "content_id": "e5a9cea7d13b2eeb6536bd1025be941d06dca3ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 773, "license_type": "no_license", "max_line_length": 240, "num_lines": 11, "path": "/README.md", "repo_name": "Joe-ob/Shopping_Cart", "src_encoding": "UTF-8", "text": "# Shopping_Cart\n\nDownload the project from github and open the shopping cart program in your terminal (For a Mac, click 'Respository' in the upper left and then click 'Open in Terminal'). In the Command Line, type \"python shopping_cart.py\" and press ENTER. \n\nYou will be prompted to enter the id number or scan the barcode of the item. \n\nThe program pulls product information from the url of the Products.csv file we used in class, if changes need to be made to the product selection, the updates need to go on the same url.\n\nOnce you have logged all of the items to be purchased, enter the id: \"0\" into the program to finish.\n\nThen, your receipt will print: the time and date, the market's location, the items you ordered, the subtotal, the sales tax, and your total cost!" }, { "alpha_fraction": 0.6082781553268433, "alphanum_fraction": 0.6339403986930847, "avg_line_length": 48.105690002441406, "blob_id": "22c9715571164c0386774f17d4d70d12df912b1f", "content_id": "41567518d733d5bc1ec6876ec70136eb6af19022", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6040, "license_type": "no_license", "max_line_length": 159, "num_lines": 123, "path": "/shopping_cart.py", "repo_name": "Joe-ob/Shopping_Cart", "src_encoding": "UTF-8", "text": "# shopping_cart.py\n#This will be displayed on the top of the receipt\nimport datetime\nnow=datetime.datetime.now()\n\n#This downloads the csv file filled with product information\nimport pandas as pd\nproducts_df = pd.read_csv(\"https://raw.githubusercontent.com/prof-rossetti/intro-to-python/master/data/products.csv\")\n\n\n#This part converts the dataframe to a dictionary so the code can process the information easier\nproducts=products_df.to_dict('records')\n\n\n # FYI: this wget command is a terminal command, NOT python\n # ... in colab, we can execute terminal commands by prefixing them with an exclamation point\n # ... students are not responsible for knowing terminal commands like this\n #!wget -q $url \n\n#products = [\n# {\"id\":1, \"name\": \"Chocolate Sandwich Cookies\", \"department\": \"snacks\", \"aisle\": \"cookies cakes\", \"price\": 3.50},\n# {\"id\":2, \"name\": \"All-Seasons Salt\", \"department\": \"pantry\", \"aisle\": \"spices seasonings\", \"price\": 4.99},\n# {\"id\":3, \"name\": \"Robust Golden Unsweetened Oolong Tea\", \"department\": \"beverages\", \"aisle\": \"tea\", \"price\": 2.49},\n# {\"id\":4, \"name\": \"Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce\", \"department\": \"frozen\", \"aisle\": \"frozen meals\", \"price\": 6.99},\n# {\"id\":5, \"name\": \"Green Chile Anytime Sauce\", \"department\": \"pantry\", \"aisle\": \"marinades meat preparation\", \"price\": 7.99},\n# {\"id\":6, \"name\": \"Dry Nose Oil\", \"department\": \"personal care\", \"aisle\": \"cold flu allergy\", \"price\": 21.99},\n# {\"id\":7, \"name\": \"Pure Coconut Water With Orange\", \"department\": \"beverages\", \"aisle\": \"juice nectars\", \"price\": 3.50},\n# {\"id\":8, \"name\": \"Cut Russet Potatoes Steam N' Mash\", \"department\": \"frozen\", \"aisle\": \"frozen produce\", \"price\": 4.25},\n# {\"id\":9, \"name\": \"Light Strawberry Blueberry Yogurt\", \"department\": \"dairy eggs\", \"aisle\": \"yogurt\", \"price\": 6.50},\n# {\"id\":10, \"name\": \"Sparkling Orange Juice & Prickly Pear Beverage\", \"department\": \"beverages\", \"aisle\": \"water seltzer sparkling water\", \"price\": 2.99},\n# {\"id\":11, \"name\": \"Peach Mango Juice\", \"department\": \"beverages\", \"aisle\": \"refrigerated\", \"price\": 1.99},\n# {\"id\":12, \"name\": \"Chocolate Fudge Layer Cake\", \"department\": \"frozen\", \"aisle\": \"frozen dessert\", \"price\": 18.50},\n# {\"id\":13, \"name\": \"Saline Nasal Mist\", \"department\": \"personal care\", \"aisle\": \"cold flu allergy\", \"price\": 16.00},\n# {\"id\":14, \"name\": \"Fresh Scent Dishwasher Cleaner\", \"department\": \"household\", \"aisle\": \"dish detergents\", \"price\": 4.99},\n# {\"id\":15, \"name\": \"Overnight Diapers Size 6\", \"department\": \"babies\", \"aisle\": \"diapers wipes\", \"price\": 25.50},\n# {\"id\":16, \"name\": \"Mint Chocolate Flavored Syrup\", \"department\": \"snacks\", \"aisle\": \"ice cream toppings\", \"price\": 4.50},\n# {\"id\":17, \"name\": \"Rendered Duck Fat\", \"department\": \"meat seafood\", \"aisle\": \"poultry counter\", \"price\": 9.99},\n# {\"id\":18, \"name\": \"Pizza for One Suprema Frozen Pizza\", \"department\": \"frozen\", \"aisle\": \"frozen pizza\", \"price\": 12.50},\n# {\"id\":19, \"name\": \"Gluten Free Quinoa Three Cheese & Mushroom Blend\", \"department\": \"dry goods pasta\", \"aisle\": \"grains rice dried goods\", \"price\": 3.99},\n# {\"id\":20, \"name\": \"Pomegranate Cranberry & Aloe Vera Enrich Drink\", \"department\": \"beverages\", \"aisle\": \"juice nectars\", \"price\": 4.25}\n#] # based on data from Instacart: https://www.instacart.com/datasets/grocery-shopping-2017\n\n#This part defines the to_usd function, which converts float to currency\ndef to_usd(my_price):\n \"\"\"\n Converts a numeric value to usd-formatted string, for printing and display purposes.\n\n Param: my_price (int or float) like 4000.444444\n\n Example: to_usd(4000.444444)\n\n Returns: $4,000.44\n \"\"\"\n return f\"${my_price:,.2f}\" #> $12,000.71\n\n\n# TODO: write some Python code here to produce the desired output\n\n#Defining list a, which collects the item's prices\na=[]\n\n#Definint list b, which collects the item's names\nb=[]\n\n#Defining list c, which collects the item's prices as USD to be displayed\nc=[]\n\n#Establishes user input and repeats prompt endlessly until the user enters 0\nnumb=int(input(\"Please scan the item or if you are finished, enter '0': \"))\nwhile numb != 0:\n count=0\n for item in products:\n if numb==item[\"id\"]:\n #The Append functions saves the items checked out by the user\n a.append(item[\"price\"])\n b.append(item[\"name\"])\n count=count+1\n if count==0:\n numb=int(input(\"Hey, are you sure that product identifier is correct? Please try again!\"))\n #This if statement makes makes sure you entered a product id that is in the product csv file\n else:\n numb=int(input(\"Please scan the item or if you are finished, enter '0': \"))\n\n\n \n\n#This prints the receipt\nelse: \n print(\"-------------------\") \n print(\"-------------------\") \n#This calls the imported date/time function\n print(now.strftime(\"%Y-%m-%d %H:%M:%S\")) \n print(\"Joe's Market\") \n print(\"3700 O Street NW, Washington, DC 20007\")\n print(\"JoesMarket.com\")\n print(\"-------------------\") \n print(\"-------------------\")\n#The for look converts the prices to USD so they can be displayed that way \n for item in a:\n dollar_price=to_usd(item)\n#I seperate them to list C so I can still do calculations with list A\n c.append(dollar_price)\n#This prints the items bought and their price\n for item_b, item_c in zip(b, c):\n print(item_b, item_c)\n#This calculates the Sum_total\n sum_prices=(sum(a)) \n print(\"-------------------\")\n print(\"Your subtotal is \", to_usd(sum_prices))\n # \n#Since sum_prices is saved as a float, I use it to calculate the total with tax\n tax_perc=.0875\n tax=sum_prices*tax_perc\n tax_as_dollars=to_usd(tax)\n print(\"Sales Tax: \", tax_as_dollars)\n#Once Sales Tax is calculate, I add it to the subtotal and present the final cost\n Total_Cost=sum_prices+tax\n print(\"Your total cost is\", to_usd(Total_Cost))\n print(\"-------------------\")\n#Thank you message\n print(\"Thank you for shopping at Joe's Market, come again!\")\n print(\"-------------------\") \n print(\"-------------------\") " } ]
2
MatSalm/Finc599CardGame
https://github.com/MatSalm/Finc599CardGame
cf2cd710871ec2745d13de6a728b5268a5fe16b0
377374c8883a3177d9934bdbcd13bcfd8ec56018
8c711261f6437bc651032de7a18858c23044456a
refs/heads/master
2020-04-19T01:12:33.175561
2019-01-27T22:52:29
2019-01-27T22:52:29
167,866,305
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6460155844688416, "alphanum_fraction": 0.6735770106315613, "avg_line_length": 25.816667556762695, "blob_id": "eaffb9dfe4b008cf890e93c46c20a2bda81663a5", "content_id": "3017702c3a6cdccd5c1f0343548454c7e6c3dfe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8349, "license_type": "no_license", "max_line_length": 293, "num_lines": 300, "path": "/FINC599CardGame.py", "repo_name": "MatSalm/Finc599CardGame", "src_encoding": "UTF-8", "text": "BREAKING DOWN PROBLEMS & THOUGHT PROCESS\r\n\r\nConsider a modified version of the card game “War”. In this version, there are two players who are dealt 10 cards each, numbered 1 through 10.\r\n\r\n\"OK. I need to make a game with two players with 10 cards each\r\nfrom 1 through 10. how do I write this?\"\r\n\r\nWhen i was writing this initially I did it with the manual list, [1,2,3,4....], then figured i could use list(range(1,11)) - 11 cos it will only run until 10 as per python rules. but then i figured what if we want to dictate the number of cards at the start of the game? Thus.. coming up with:\r\n\r\n\tplayer_1 = list(range(1,(number_of_cards+1)))\r\n\tplayer_2 = list(range(1,(number_of_cards+1)))\r\n\r\nI wrote this down as it is without placing it in a function or whatever.\r\njust do it line per line and then piece them together like a puzzle after.\r\n\r\n----------------------\r\n\r\nIn each round of the game, player A and player B play a random card from their deck.\r\n\r\n\"OK I need to extract a random number from a list. how do i do that?\"\r\n\r\nFound out about the random module online and decided to import that and put it at the top of my code.\r\n\r\n\timport random\r\n\r\nnow i need to name these new variables that i plan to randomly pick\r\nout from the list.\r\n\r\n\tpick_p1=random.choice(player_1)\r\n\tpick_p2=random.choice(player_2)\r\n\r\n----------------------\r\nThe player who plays the higher card takes back their own\r\ncard, as well as the card of their opponent.\r\n\r\n\"OK I need to compare the 2 picks with if,then statements.\"\r\n\r\n\t\tif pick_p1>pick_p2:\r\n\r\n\t\telif pick_p1<pick_p2:\r\n\r\n\t\telif pick_p1==pick_p2:\r\n\r\n\"OK I need to figure out what I should make this fucking program do\r\ngiven those statements\"\r\n\r\nI need to (1)Remove pick from losing player and (2)append the pick into the winning players deck.\r\n\r\n\t\tif pick_p1>pick_p2:\r\n\t\t\tplayer_1.append(pick_p2)\r\n\t\t\tplayer_2.remove(pick_p2)\r\n\t\telif pick_p1<pick_p2:\r\n\t\t\tplayer_1.remove(pick_p1)\r\n\t\t\tplayer_2.append(pick_p1)\r\n\t\telif pick_p1==pick_p2:\r\n\r\n-------------------\r\n\r\nIn the event of a tie, each player gets their card back...\r\n\r\n\"OK I need to do this coin flip thing for a tie (based on prof's email)\"\r\n\r\nFor this coin flip, i can probably use a list [1,2]...\r\n\r\n\t\tcoin_flip = [1,2]\r\n\r\nHave the program choose from the list randomly:\r\n\r\n\t\tresult = random.choice(coin_flip)\r\n\r\nand make an if,then statement for the results of winner(same rules as above):\r\n\r\n\t\t\tif result==1:\r\n\t\t\t\tplayer_1.append(pick_p1)\r\n\t\t\t\tplayer_2.remove(pick_p1)\r\n\t\t\telif result==2:\r\n\t\t\t\tplayer_1.remove(pick_p1)\r\n\t\t\t\tplayer_2.append(pick_p1)\r\n\r\ncode now turns into this:\r\n\t\tpick_p1=random.choice(player_1)\r\n\t\tpick_p2=random.choice(player_2)\r\n\r\n\t\tif pick_p1>pick_p2:\r\n\t\t\tplayer_1.append(pick_p2)\r\n\t\t\tplayer_2.remove(pick_p2)\r\n\t\telif pick_p1<pick_p2:\r\n\t\t\tplayer_1.remove(pick_p1)\r\n\t\t\tplayer_2.append(pick_p1)\r\n\t\telif pick_p1==pick_p2:\r\n\t\t\tcoin_flip = [1,2]\r\n\t\t\tresult = random.choice(coin_flip)\r\n\t\t\tif result==1:\r\n\t\t\t\tplayer_1.append(pick_p1)\r\n\t\t\t\tplayer_2.remove(pick_p1)\r\n\t\t\telif result==2:\r\n\t\t\t\tplayer_1.remove(pick_p1)\r\n\t\t\t\tplayer_2.append(pick_p1)\r\n\r\n------------------\r\n...and it goes back into their pile randomly\r\n\r\n\"OK I need to randomize the cards\"\r\n\r\nI tried to put the pick in randomly but i figured this would be easier.\r\nI stumbled upon the randomize code while looking this up.\r\n\r\n\t\trandom.shuffle(player_1)\r\n\t\trandom.shuffle(player_2)\r\n\r\ninsert that after all if statements and the code looks like this:\r\n\r\n\t\tpick_p1=random.choice(player_1)\r\n\t\tpick_p2=random.choice(player_2)\r\n\r\n\t\tif pick_p1>pick_p2:\r\n\t\t\tplayer_1.append(pick_p2)\r\n\t\t\tplayer_2.remove(pick_p2)\r\n\t\t\trandom.shuffle(player_1)\r\n\t\t\trandom.shuffle(player_2)\r\n\t\telif pick_p1<pick_p2:\r\n\t\t\tplayer_1.remove(pick_p1)\r\n\t\t\tplayer_2.append(pick_p1)\r\n\t\t\trandom.shuffle(player_1)\r\n\t\t\trandom.shuffle(player_2)\r\n\t\telif pick_p1==pick_p2:\r\n\t\t\tcoin_flip = [1,2]\r\n\t\t\tresult = random.choice(coin_flip)\r\n\t\t\tif result==1:\r\n\t\t\t\tplayer_1.append(pick_p1)\r\n\t\t\t\tplayer_2.remove(pick_p1)\r\n\t\t\t\trandom.shuffle(player_1)\r\n\t\t\t\trandom.shuffle(player_2)\r\n\t\t\telif result==2:\r\n\t\t\t\tplayer_1.remove(pick_p1)\r\n\t\t\t\tplayer_2.append(pick_p1)\r\n\t\t\t\trandom.shuffle(player_1)\r\n\t\t\t\trandom.shuffle(player_2)\r\n\r\nI could have probably ended here but i was curious as to who wins so\r\nI put this code in:\r\n\r\n\tif len(player_1)==0:\r\n\t\tplayer_2_wins.append(1)\r\n\t\t#print('PLAYER 2 WINS!')\r\n\telse:\r\n\t\tplayer_1_wins.append(1)\r\n\t\t#print('PLAYER 1 WINS!')\r\n\r\nand put a list at the very top to consolidate:\r\n\r\n\tplayer_1_wins = list()\r\n\tplayer_2_wins = list()\r\n\r\nIts just to calculate whos list is empty and that's the loser.\r\nAppend (1) into the list and then i added all 1s in list after to see\r\ntotal wins of player.\r\n\r\n\tprint('player 1 wins',' ', sum(player_1_wins),' ', 'times')\r\n\tprint('player 2 wins',' ', sum(player_2_wins),' ', 'times')\r\n\r\n---------------\r\nA.\r\nWrite a series of functions that simulates the game\r\n\r\n\"AHM OK so this needs to be in a function\"\r\n\r\nI initially put this all in a while loop but now it says function,\r\nI put the while loop in the function. this is where the puzzle thing comes in.\r\n\r\ncode is now:\r\n\r\nimport random\r\n\r\nsimulations = int(input(\"Welcome to WAR! How many times do you want to run the simulation?:\"))\r\nnumber_of_cards = int(input(\"How many cards per player?:\"))\r\nplayer_1_wins = list()\r\nplayer_2_wins = list()\r\n\r\ndef war_game():\r\n\tplayer_1 = list(range(1,(number_of_cards+1)))\r\n\tplayer_2 = list(range(1,(number_of_cards+1)))\r\n\trounds = 0\r\n\r\n\twhile player_1 and player_2:\r\n\t\tpick_p1=random.choice(player_1)\r\n\t\tpick_p2=random.choice(player_2)\r\n\r\n\t\tif pick_p1>pick_p2:\r\n\t\t\tplayer_1.append(pick_p2)\r\n\t\t\tplayer_2.remove(pick_p2)\r\n\t\t\trandom.shuffle(player_1)\r\n\t\t\trandom.shuffle(player_2)\r\n\t\telif pick_p1<pick_p2:\r\n\t\t\tplayer_1.remove(pick_p1)\r\n\t\t\tplayer_2.append(pick_p1)\r\n\t\t\trandom.shuffle(player_1)\r\n\t\t\trandom.shuffle(player_2)\r\n\t\telif pick_p1==pick_p2:\r\n\r\n\t\t\tcoin_flip = [1,2]\r\n\t\t\tresult = random.choice(coin_flip)\r\n\t\t\tif result==1:\r\n\t\t\t\tplayer_1.append(pick_p1)\r\n\t\t\t\tplayer_2.remove(pick_p1)\r\n\t\t\t\trandom.shuffle(player_1)\r\n\t\t\t\trandom.shuffle(player_2)\r\n\t\t\telif result==2:\r\n\t\t\t\tplayer_1.remove(pick_p1)\r\n\t\t\t\tplayer_2.append(pick_p1)\r\n\t\t\t\trandom.shuffle(player_1)\r\n\t\t\t\trandom.shuffle(player_2)\r\n\r\n\r\n\tif len(player_1)==0:\r\n\t\tplayer_2_wins.append(1)\r\n\telse:\r\n\t\tplayer_1_wins.append(1)\r\n\r\nfor i in range(simulations):\r\n\twar_game()\r\n\r\n\r\nAdded the (for i) statement to run the function. and added\r\nvariable (simulations) up top to dictate how many times i want to run it.\r\n\r\nthe input function is used when you want the user to input\r\na value and used that.\r\n\r\n--------------\r\n\r\ncalculate the number of rounds each game takes.\r\n\r\n\"OK I need to make a counter for every loop the function makes\"\r\n\r\nadded this at the top of the function:\r\n\r\n\trounds = 0\r\n\r\nadded this at the end of the funtion:\r\n\r\n\trounds+=1\r\n\r\nI will also need to consolidate the number of rounds per simulation in\r\na list\r\n-------------\r\nRun your program 10,000 times and record each result.\r\n\r\nthis is where the input code comes in handy.\r\nI already put this code in so it should be ok\r\n\r\nput this at the very top of the code cos i want it outside of the\r\ndef function:\r\n\r\n\ttotal_rounds = list()\r\n\r\nand this one at the bottom:\r\n\r\n\ttotal_rounds.append(rounds)\r\n-------------\r\nTime how long the simulation takes.\r\n\r\n\"OK I need to time it\"\r\n\r\nfound the code of time. put this at the start to record time it starts:\r\n\tt0 = time.time()\r\n\r\nput this at the very end to record time code hits this line:\r\n\tt1 = time.time()\r\n\r\nsubtract!\r\n\ttotal_time= t1-t0\r\n\r\n-------------\r\nPlot a histogram of the distribution of the number of rounds played. Make sure there is a title, legend, and labeled axes.\r\n\r\nplt.hist(total_rounds, bins=20)\r\nplt.title('WAR Histogram of Rounds')\r\nplt.xlabel('Number of Rounds')\r\nplt.ylabel('Frequency')\r\nplt.show()\r\n\r\n\r\n------------\r\nC.\r\nRe-run the program for a game with 25 distinct cards. How long did this simulation take?\r\nDoes it vary linearly with respect to the 10 card\r\ngame?\r\n\r\njust changed the input numbers so i dont have to write code again w 25 cards.\r\n\r\n\r\n------------\r\n\r\nAfter that it's a matter of debugging and maybe fixing indents and stuff.\r\nNotice that my code has a lot of print statements i have disabled.\r\n\r\nThis helped a lot when i was starting out the code and it made me see\r\nwhat was pick 1 and pick 2 and if it compared correctly.\r\nand if number of rounds was working good.\r\n" }, { "alpha_fraction": 0.7439024448394775, "alphanum_fraction": 0.8292682766914368, "avg_line_length": 40, "blob_id": "23a63aa0627b8abf264ab6384283c898f62e670d", "content_id": "c1a0daf97962e39f22fbbe7cbdb31babe1db4dbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 82, "license_type": "no_license", "max_line_length": 63, "num_lines": 2, "path": "/README.md", "repo_name": "MatSalm/Finc599CardGame", "src_encoding": "UTF-8", "text": "# Finc599CardGame\nThis is a repository for FINC599 simulated card game in Python3\n" } ]
2
ashwin-2001/FITC__TCET_contest
https://github.com/ashwin-2001/FITC__TCET_contest
f58069611b5313c2a7a4127d1368f37339917253
3846a53f0def765a745804b9f1413fece553de93
f75f2afda396a8c7bc932086cc624896cca227b0
refs/heads/main
2023-04-03T09:16:06.267331
2021-04-11T07:09:49
2021-04-11T07:09:49
356,781,531
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5435203313827515, "alphanum_fraction": 0.591876208782196, "avg_line_length": 20.54166603088379, "blob_id": "d418b6470b40b6d241c03d514a7992d5229b3baa", "content_id": "8a81f313fa8eacd64cfc9d3460fcbf5901b8710a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 517, "license_type": "no_license", "max_line_length": 55, "num_lines": 24, "path": "/q3.py", "repo_name": "ashwin-2001/FITC__TCET_contest", "src_encoding": "UTF-8", "text": "# Python3 Program to find\n# best buying and selling days\n\ndef productBuySell(price, n):\n if price!=sorted(price,reverse=True):\n low=price[0]\n high=price[1]\n for i in price:\n if i<low:low=i\n if i>high:high=i\n\n print(\"Buy on day: \", price.index(low)+1, \"\\t\",\n \"Sell on day: \", price.index(high)+1)\n\n\n\n# Driver code\n\n# product prices on consecutive days\nprice = [100, 180, 260, 310, 40, 535, 695]\nn = len(price)\n\n# Fucntion call\nproductBuySell(price, n)\n" }, { "alpha_fraction": 0.5773195624351501, "alphanum_fraction": 0.5876288414001465, "avg_line_length": 19.64285659790039, "blob_id": "7ba1cf08ad787f111347d2b03c47e0a2e9b34859", "content_id": "014b03723b6657ea51aabb1b51cd9189103fda8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 34, "num_lines": 14, "path": "/q1.py", "repo_name": "ashwin-2001/FITC__TCET_contest", "src_encoding": "UTF-8", "text": "no_of_lines = int(input())\nlst = []\nfor i in range(no_of_lines):\n c = int(input())\n lst.append(str(c))\n\noutput_list = [no_of_lines]\nfor x in lst:\n occurances = 0\n for t in x:\n if t==\"5\":occurances+=1\n output_list.append(occurances)\nfor p in output_list:\n print(p)\n\n\n" } ]
2
jessex/wikilinks
https://github.com/jessex/wikilinks
f0188b29fcded9ab63dab4137df6f358ab284c16
7bce2cf79bdce15e95de692eda9fe1f165c9324a
a266bc1c6689fe3e196e925d0bb8335e1daa29b5
refs/heads/master
2016-09-08T01:47:31.271634
2011-03-14T22:55:30
2011-03-14T22:55:30
1,376,649
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6778905987739563, "alphanum_fraction": 0.6827008724212646, "avg_line_length": 34.037498474121094, "blob_id": "307145204b5ec098f919cbe9d3ebf4f9a303ef04", "content_id": "d5cd035f4a3e7a5c9461819a792f6fa27db8e28c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5613, "license_type": "no_license", "max_line_length": 154, "num_lines": 160, "path": "/wikilinks.py", "repo_name": "jessex/wikilinks", "src_encoding": "UTF-8", "text": "import urllib2, re\nfrom sys import argv, exit\nfrom time import sleep\n\n#namespaces of Wikipedia pages which we wish to ignore\nnamespaces = [\"Wikipedia:\", \"Category:\", \"Talk:\", \"File:\", \"Portal:\", \"User:\", \n\"WT:\", \"MediaWiki:\", \"Template:\", \"Image:\", \"WP:\", \"Wikipedia_talk:\", \"Help:\",\n\"Book:\", \"Media:\", \"Project:\", \"Project_talk:\", \"Image_talk:\", \"User_talk:\",\n\"Thread:\", \"Summary:\", \"Thread_talk:\", \"Summary_talk:\", \"Book_talk:\", \n\"MediaWiki_talk:\", \"Template_talk:\", \"Help_talk:\", \"Portal_talk:\", \"Special:\",\n\"Category_talk:\"]\n\nuser_agent = \"wikilinks/1.0\" #Wikipedia denies urllib2's default user agent\n\nre_link = re.compile(\"<a\\s*href=['\\\"](.*?)['\\\"].*?>\") #pulls links from HTML\nre_wiki = re.compile(\"/wiki/.+\") #cares only for links to Wikipedia articles\n#re_next = re.compile(\"/w/index.php\\?title=Special:WhatLinksHere/.*namespace=0.*limit=500.*from=.*back=.*\") #finds links to another page of incoming links\nre_next = re.compile(\"\\|\\s*<a\\s*href=['\\\"](.*?)['\\\"].*?>next 500\")\nre_input = re.compile(\"http://en.wikipedia.org/wiki/.+\")\n\nto_file = False\n\n#proper arguments usage: \n#wikilinks.py , http://en.wikipedia.org/wiki/whatever , OPTIONAL: -f filename\ndef process_args(args):\n\tglobal to_file, out_file\n\t\n\t#validate the starting article input\n\tinput = args[1]\n\tif re_input.match(input) == None:\n\t\tprint \"Start link must be from English Wikipedia's default namespace\"\n\t\texit(\"Must be of the form: http://en.wikipedia.org/wiki/...\")\n\telse:\n\t\tvalid = True\n\t\tfor ns in namespaces:\n\t\t\tif ns in input:\n\t\t\t\tvalid = False\n\t\t\t\tbreak\n\t\tif not valid:\n\t\t\tprint \"Start link must be from English Wikipedia's main namespace\"\n\t\t\texit(\"For more information on namespaces, \" \\\n\t\t\t\"visit: http://en.wikipedia.org/wiki/Wikipedia:Namespace\")\n\t#check if writing output to a file\n\tif len(args) > 2:\n\t\tif args[2] == \"-f\":\n\t\t\tif len(args) > 3:\n\t\t\t\tout_file = args[3]\n\t\t\t\tto_file = True\n\t\t\telse:\n\t\t\t\texit(\"Must include file name after the file flag '-f'\")\n\t\telse:\n\t\t\texit(\"Usage: Wikipedia_article [-f output_file]\")\n\treturn input\n\t\n\n#pass in a url and receive the page source HTML\ndef page_html(url, verbose=True):\n\trequest = urllib2.Request(url) #craft our request for the input page\n\trequest.add_header(\"User-Agent\", user_agent)\n\tif verbose:\n\t\tprint \"Requesting page...\", url\n\n\ttry:\n\t\tresponse = urllib2.urlopen(request) #send GET request to server\n\texcept IOError, e: #HTTPError extends URLError which extends IOError\n\t\tif hasattr(e, \"reason\"): #URLError\n\t\t\tprint \"Could not reach server: \", e.reason\n\t\telif hasattr(e, \"code\"): #HTTPError \n\t\t\tprint \"Could not fulfill request: \", e.code\n\t\texit(0)\n\telse: #received a good, valid response\n\t\treturn response.read() #page source\n\t\t\n#get a list of the links to all outgoing articles from the passed HTML page\ndef outgoing_articles(html, verbose=True):\n\tif verbose:\n\t\tprint \"Searching for outgoing Wikipedia articles...\"\n\t\t\n\tlinks = re_link.findall(html) #all outgoing links from input page\n\toutlinks = [] #all outgoing links within Wikipedia\n\tfor link in links:\n\t\tif link.lower() == article.lower(): #skip circular links (to self)\n\t\t\tcontinue\n\t\telif link == \"/wiki/Main_Page\": #on every page, not article\n\t\t\tcontinue\n\t\tif re_wiki.match(link) != None:\n\t\t\tmain_ns = True\n\t\t\tfor ns in namespaces: #check for non-main namespaces\n\t\t\t\tif ns in link:\n\t\t\t\t\tmain_ns = False\n\t\t\t\t\tbreak\n\t\t\tif main_ns:\n\t\t\t\tif link not in outlinks:\n\t\t\t\t\toutlinks.append(link)\n\treturn outlinks\n\n#get a list of all articles which link to the article with the given title\ndef incoming_articles(title, verbose=True):\n\tif verbose:\n\t\tprint \"Searching for incoming Wikipedia articles...\"\n\t\t\n\tlink = \"http://en.wikipedia.org/w/index.php?title=Special:WhatLinksHere/\"\n\tlink += title + \"&limit=500&namespace=0\"\n\twhatlinkshere = page_html(link)\n\t\n\tinlinks = []\n\tmore = True\n\twhile more: #scroll through all incoming links\n\t\tinlinks += outgoing_articles(whatlinkshere, verbose=False)\n\t\tnext_page = re_next.findall(whatlinkshere)\n\t\tif not next_page:\n\t\t\tmore = False\n\t\telse:\n\t\t\tnewlink = (\"http://en.wikipedia.org\" + next_page[0]).replace(\"&amp;\",\"&\")\n\t\t\tsleep(1) #wait one second before next request (politeness)\n\t\t\twhatlinkshere = page_html(newlink)\n\treturn inlinks\n\n\t\n\nif __name__ == \"__main__\":\n\tif len(argv) < 2:\n\t\texit(\"Please include a starting Wikipedia article link\")\n\n\tinput = process_args(argv) #form: http://en.wikipedia.org/wiki/whatever\n\tarticle = input.replace(\"http://en.wikipedia.org\", \"\") #form: /wiki/whatever\n\ttitle = article.replace(\"/wiki/\", \"\") #form: whatever\n\t\n\t#valid starting article at this point, beginning crawl of page\n\tprint \"Starting article:\", input\n\n\thtml = page_html(input)\n\toutlinks = outgoing_articles(html)\n\tsleep(1) #wait one second before next request (politeness)\n\tinlinks = incoming_articles(title)\n\n\t#have list of all articles pointed to by start article at this point\n\tif not to_file: #printing output to screen\n\t\tprint \"\\n%d outgoing links from starting article\" % len(outlinks)\n\t\tif outlinks:\n\t\t\tprint \"Outgoing articles:\"\n\t\t\tfor link in outlinks:\n\t\t\t\tprint \"http://en.wikipedia.org%s\" % link\n\t\tprint \"\\n%d incoming links to starting article\" % len(inlinks)\n\t\tif inlinks:\n\t\t\tprint \"Incoming articles:\"\n\t\t\tfor link in inlinks:\n\t\t\t\tprint \"http://en.wikipedia.org%s\" % link\n\t\t\t\t\n\telse: #writing output to file\n\t\tf = open(out_file, \"w\")\n\t\toutput = \"Start: %s\\nOutgoing(%d):\\n\" % (input, len(outlinks))\n\t\tfor link in outlinks:\n\t\t\toutput += \"http://en.wikipedia.org%s\\n\" % link\n\t\toutput += \"Incoming(%d):\\n\" % len(inlinks)\n\t\tfor link in inlinks:\n\t\t\toutput += \"http://en.wikipedia.org%s\\n\" % link\n\t\tf.write(output)\n\t\tf.close()\n\t\tprint \"Output written to file: '%s'\" % out_file\n\n\t\t\n\t\n\n" } ]
1
springle/elevator
https://github.com/springle/elevator
358fa53f9b5411aecab0b4156ead8a3daf0e0bc0
5acea955998495196c6b88cd75c4abdebfc624c4
3334ff34b1d270de84c6d3e6e00cb31c40765b73
refs/heads/master
2021-05-03T20:40:53.085911
2016-11-04T17:22:49
2016-11-04T17:22:49
71,406,158
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3301805555820465, "alphanum_fraction": 0.40171968936920166, "avg_line_length": 30.603260040283203, "blob_id": "f21c7b7a855190da97dd3a60ff39445c839edbee", "content_id": "bc6481f5d74e6866c0b6a845f0ffdc4f4b510565", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5815, "license_type": "no_license", "max_line_length": 88, "num_lines": 184, "path": "/elevator_control_system_tests.py", "repo_name": "springle/elevator", "src_encoding": "UTF-8", "text": "import unittest\nfrom elevator_control_system import ElevatorControlSystem\n\nclass TestElevatorControlSystem(unittest.TestCase):\n\tdef test_init(self):\n\t\t\"\"\"\n\t\tMake sure ElevatorControlSystem correctly initializes with 16 elevators and 16 floors.\n\t\t\"\"\"\n\t\tecs = ElevatorControlSystem(16, 16)\n\t\tself.assertEqual(ecs.status(), [(0, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (1, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (2, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (3, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (4, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (5, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (6, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (7, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (8, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (9, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (10, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (11, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (12, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (13, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (14, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t\t (15, 0, [], [], 0)])\n\n\tdef test_update_order(self):\n\t\t\"\"\"\n\t\tMake sure several requests are handled correctly by the up_queue.\n\t\tNote that this simulation is impractical because the pickup algorithm\n\t\t\twould have distributed people more evenly among the elevators; however,\n\t\t\tit is useful to make sure a single elevator orders several requests\n\t\t\tin the correct order.\n\t\t\"\"\"\n\t\tecs = ElevatorControlSystem(16, 16)\n\t\tecs.update(0, 5)\n\t\tecs.update(0, 4)\n\t\tecs.update(0, 2)\n\t\tecs.update(0, 8)\n\t\tself.assertEqual(ecs.status(), [(0, 0, [2, 5, 4, 8], [], 1), \\\n\t\t\t\t\t\t\t\t\t\t(1, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(2, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(3, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(4, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(5, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(6, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(7, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(8, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(9, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(10, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(11, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(12, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(13, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(14, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(15, 0, [], [], 0)])\n\n\tdef test_duplicates(self):\n\t\t\"\"\"\n\t\tMake sure no extra effort is wasted on a duplicate request.\n\t\t\"\"\"\n\t\tecs = ElevatorControlSystem(16, 16)\n\t\tecs.update(1, 5)\n\t\tecs.update(1, 5)\n\t\tself.assertEqual(ecs.status(), [(0, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(1, 0, [5], [], 1), \\\n\t\t\t\t\t\t\t\t\t\t(2, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(3, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(4, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(5, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(6, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(7, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(8, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(9, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(10, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(11, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(12, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(13, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(14, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(15, 0, [], [], 0)])\n\n\tdef test_pickup(self):\n\t\t\"\"\"\n\t\tMake sure the system correctly distributes a set of pickup requests.\n\t\tGiven the requests below, we check to make sure:\n\t\t\t(1) The system correctly penalizes based on elevator direction\n\t\t\t(2) The individual elevators order requests correctly\n\t\t\t(3) Duplicate requests are consolidated\n\t\t\"\"\"\n\t\tecs = ElevatorControlSystem(16,16)\n\t\tecs.pickup(5, 1)\n\t\tecs.pickup(5, -1)\n\t\tecs.pickup(4, 1)\n\t\tecs.pickup(5, 1)\n\t\tecs.pickup(6, -1)\n\t\tecs.pickup(4, -1)\n\t\tself.assertEqual(ecs.status(), [(0, 0, [4,5], [], 1), \\\n\t\t\t\t\t\t\t\t\t\t(1, 0, [5], [], 1), \\\n\t\t\t\t\t\t\t\t\t\t(2, 0, [6], [], 1), \\\n\t\t\t\t\t\t\t\t\t\t(3, 0, [4], [], 1), \\\n\t\t\t\t\t\t\t\t\t\t(4, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(5, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(6, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(7, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(8, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(9, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(10, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(11, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(12, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(13, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(14, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(15, 0, [], [], 0)])\n\n\tdef test_step(self):\n\t\t\"\"\"\n\t\tMake sure the simulation actually runs and that elevators drop people off as expected.\n\t\tGiven the requests below, we check to make sure:\n\t\t\t(1) Each elevator should move 4 floors, since they are all heading up.\n\t\t\t(2) All requests below or on floor 4 should be completed.\n\t\t\t(3) Elevators with now empty queues should have direction 0.\n\t\t\"\"\"\n\t\tecs = ElevatorControlSystem(16,16)\n\t\tecs.pickup(2, 1)\n\t\tecs.pickup(5, -1)\n\t\tecs.pickup(4, 1)\n\t\tecs.pickup(2, 1)\n\t\tecs.pickup(6, -1)\n\t\tecs.pickup(4, -1)\n\t\tecs.step()\n\t\tecs.step()\n\t\tecs.step()\n\t\tecs.step()\n\t\tself.assertEqual(ecs.status(), [(0, 4, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(1, 4, [5], [], 1), \\\n\t\t\t\t\t\t\t\t\t\t(2, 4, [6], [], 1), \\\n\t\t\t\t\t\t\t\t\t\t(3, 4, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(4, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(5, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(6, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(7, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(8, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(9, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(10, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(11, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(12, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(13, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(14, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(15, 0, [], [], 0)])\n\n\tdef test_turnaround(self):\n\t\t\"\"\"\n\t\tMake sure that the system correctly prioritizes between up_queue and down_queue.\n\t\tGiven the instructions below, we expect Elevator 0 to complete all of its\n\t\t\trequests within the 6 steps. This is considered optimal given the order\n\t\t\tand timing of the requests. We also expect the elevator to finish on\n\t\t\tfloor 2, because it should respect the order of the requests when possible.\n\t\t\"\"\"\n\t\tecs = ElevatorControlSystem(16,16)\n\t\tecs.pickup(4,1)\n\t\tecs.step()\n\t\tecs.step()\n\t\tecs.step()\n\t\tecs.update(0,2)\n\t\tecs.step()\n\t\tecs.step()\n\t\tecs.step()\n\t\tself.assertEqual(ecs.status(), [(0, 2, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(1, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(2, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(3, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(4, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(5, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(6, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(7, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(8, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(9, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(10, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(11, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(12, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(13, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(14, 0, [], [], 0), \\\n\t\t\t\t\t\t\t\t\t\t(15, 0, [], [], 0)])\n\nif __name__ == '__main__':\n\tunittest.main()\n" }, { "alpha_fraction": 0.7876476049423218, "alphanum_fraction": 0.793823778629303, "avg_line_length": 109.0999984741211, "blob_id": "a5815d08c2f4c685cc5d1bf30239cea78fefa7c0", "content_id": "8e1a7ee38a47e0409aa522bb30ff2ff84cde602b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5505, "license_type": "no_license", "max_line_length": 803, "num_lines": 50, "path": "/README.md", "repo_name": "springle/elevator", "src_encoding": "UTF-8", "text": "# Elevator Coding Challenge\n\n**[Update 1] I added unit tests which can be run with `python3 elevator_control_system_tests.py`.**\n\n**[Update 2] I added a main function with a simulation that continuously makes random requests to the system and prints the status. \nTo try this simulation, run `python3 elevator_control_system.py`. Since there is now a main function, interactive mode is less\npretty for the instructions below. For something cleaner, just open up a shell with `python3` and import the system\nwith `from elevator_control_system import ElevatorControlSystem`.**\n\n## How to Use the Interface\n\nI chose to implement the Elevator Control System in Python. The easiest way to start using it is to clone this repository, then start an interactive Python shell: `python3 -i elevator_control_system.py`.\n\nThe main interface is the `ElevatorControlSystem` class, which has a number of useful methods. Create an instance of this class with however many floors and elevators you prefer: `ecs = ElevatorControlSystem(20, 16)` (for 20 floors and 16 elevators)\n\nRun `ecs.status()` for a machine-readable description of the system's status. Its output will be in the form `[(<elevator id>, <current floor>, <up_queue>, <down_queue>, <direction>), ...]`.\n\nRun `ecs.describe()` for a human-readable description of the system's status.\n\nUse the pickup method to submit a request to the system. For example, `ecs.pickup(10, -1)` will submit a request on floor 10 going down.\n\nAfter submitting a few requests, try using the step method to walk through the simulation. Run `ecs.step()` to move one interval through the simulation. Check the status/description between intervals to see the system working.\n\n## Goals\n\nDesign and implement an elevator control system in Python that handles arbirtrary elevators and floors. The interface should:\n\n1. Allow a user to query the state of the elevators\n2. Allow an elevator to update its state in the system\n3. Allow a user to submit a pickup request\n4. Include a time-stepping simulation\n5. Optimize ordering of stops in elevator queue\n6. Pick the best elevator to handle each new pickup request\n\n## Design Decisions\n\nThe most interesting part of this problem was deciding how to handle queued elevator requests. To make my decision regarding which data structures and algorithms to use here, I made a few observations:\n\n1. Once an elevator is moving in a direction with a goal, it is never benificial (to the collective interests of the persons on the elevator) to change directions until it completes that goal. Basically, if an elevator is moving up, it should complete all of its up-queued requests before turning around and handling the down-queued ones.\n2. If an elevator is moving in a direction with a goal, but it can complete another goal on its way, this other goal should be completed on the way. For example, consider an elevator that is on floor 6 and headed up to floor 9. If someone requests a dropoff at floor 8, then this elevator should stop at floor 8 before continuing on to floor 9.\n\nGiven these considerations, it seemed natural to create two heaps to represent each elevator's queue. A min-heap to represent the elevator's \"up-queue\", and a max-heap to represent the elevator's \"down-queue\". An elevator would start out stationary, and start moving in the direction of its first request. It will then continue in that direction, adding all new requests to the appropriate heap as they come in. Once the heap in the current direciton is empty, the elevator will switch directions (or stop if both heaps are empty).\n\nThe two-heap implementation is a significant improvmenet on FCFS, because it drastically reduces the amount of time each person needs to spend on an elevator. The maximum amount of time an elevator can take to serve all of its current requests is now reduced to 2k (where k is the number of floors). With FCFS, this maximum time can grow arbitrarily with the number of reqeusts.\n\nAnother important part of the elevator control system is deciding which elevator should handle a given request. The approach I took involves calculating a heuristic distance for each elevator, then choosing the optimal one from the system's list. The raw distance from the requested floor to the elevator is a good starting point, but it is also important to take into account both the elevators direction and the requests direction. If these directions are opposite, or if the elevator already passed the requested floor in the given direction, the heuristic distance needs to be increased by a factor of 2 * the elevator's distance to the end of its current queue (the amount of time it will take for the elevator to finish moving in its current direction, and then come back to its current location).\n\nThere are a lot of moving pieces in this problem, so there are several places where I would want to improve my implementation given more time. I tried to focus on choosing the best data strutures and algorithms to solve the interesting problems optimally, and I also made sure to spend enough time building a system that would actually function correctly in the simulation. My next steps would be to increase validation of inputs and to add testing to catch potential errors in my implementation and ensure it's behaving as expected.\n\nPlease don't hesitate to contact me if you have any questions about my submission or in general! I really enjoyed working on this problem, and I look forward to discussion my implementation and getting feedback.\n" }, { "alpha_fraction": 0.6107757687568665, "alphanum_fraction": 0.6219539642333984, "avg_line_length": 35.36585235595703, "blob_id": "f3c684b6b1cf1fe69d1b45a1f909e70ea3e697a2", "content_id": "1fb1829a2d6981a16f09a670c831e9580f445453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4473, "license_type": "no_license", "max_line_length": 100, "num_lines": 123, "path": "/elevator_control_system.py", "repo_name": "springle/elevator", "src_encoding": "UTF-8", "text": "import heapq, random, time\n\nclass ElevatorControlSystem():\n\tdef __init__(self, number_of_floors, number_of_elevators):\n\t\tif number_of_elevators <= 0:\n\t\t\traise AssertionError(\"Your building must have at least one elevator.\")\n\t\tif number_of_floors <= 0:\n\t\t\traise AssertionError(\"Your building must have at least one floor.\")\n\t\tself.elevators = [Elevator(i) for i in range(number_of_elevators)]\n\t\tself.number_of_floors = number_of_floors\n\t\tself.pending_requests = []\n\n\tdef status(self):\n\t\t# returns the status of all elevators in the system (id, floor #, goal floor #)\n\t\treturn [(e.id, e.current_floor, e.up_queue, e.down_queue, e.direction) for e in self.elevators]\n\n\tdef describe(self):\n\t\tfor e in self.elevators:\n\t\t\tprint(e)\n\n\tdef update(self, elevator_id, floor_number):\n\t\t# updates the state of an elevator in the system, adding a floor to its queue\n\t\te = self.elevators[elevator_id]\n\t\te.add_to_queue(floor_number)\n\n\tdef pickup(self, floor_number, direction):\n\t\t# submits a pickup request to the system\n\t\tbest_elevator = self.elevators[0]\n\t\tbest_distance = self.number_of_floors * 2\n\t\tfor e in self.elevators:\n\t\t\tdistance = abs(e.current_floor - floor_number)\n\n\t\t\t# penalize elevator scores based on direction\n\t\t\tif (e.direction > 0 and floor_number < e.current_floor) or (e.direction > 0 and direction < 0):\n\t\t\t\thighest_stop = heapq.nlargest(1, e.up_queue)[0]\n\t\t\t\tdistance += 2 * highest_stop\n\t\t\telif (e.direction < 0 and floor_number > e.current_floor) or (e.direction < 0 and direction > 0):\n\t\t\t\tlowest_stop = heapq.nsmallest(1, e.down_queue)[0]\n\t\t\t\tdistance += 2 * lowest_stop\n\n\t\t\tif distance < best_distance:\n\t\t\t\tbest_elevator = e\n\t\t\t\tbest_distance = distance\n\t\tbest_elevator.add_to_queue(floor_number)\n\n\tdef step(self):\n\t\t# moves through one interval in the simulation\n\t\tfor e in self.elevators:\n\t\t\te.step()\n\nclass Elevator():\n\tdef __init__(self, elevator_id):\n\t\tself.id = elevator_id\n\t\tself.current_floor = 0\n\t\tself.direction = 0 # 1 for moving up, -1 for moving down, 0 for stationary\n\t\tself.up_queue = [] # heap\n\t\tself.down_queue = [] # heap\n\n\tdef step(self):\n\t\tself.current_floor += self.direction\n\t\tself.drop_off()\n\t\tself.update_direction()\n\n\tdef drop_off(self):\n\t\tif self.up_queue and self.current_floor == self.up_queue[0]:\n\t\t\theapq.heappop(self.up_queue)\n\t\t\tprint(\"Elevator \" + str(self.id) + \" stopping on floor \" + str(self.current_floor))\n\t\telif self.down_queue and self.current_floor == abs(self.down_queue[0]):\n\t\t\theapq.heappop(self.down_queue)\n\t\t\tprint(\"Elevator \" + str(self.id) + \" stopping on floor \" + str(self.current_floor))\n\n\tdef update_direction(self):\n\t\tif self.direction > 0 and not self.up_queue:\n\t\t\tself.direction = -1 if self.down_queue else 0\n\t\tif self.direction < 0 and not self.down_queue:\n\t\t\tself.direction = 1 if self.up_queue else 0\n\n\tdef add_to_queue(self, floor_number, direction=0):\n\t\tif floor_number == self.current_floor:\n\t\t\tprint(\"Elevator \" + str(self.id) + \" stopping on floor \" + str(floor_number))\n\t\telif floor_number > self.current_floor:\n\t\t\tif floor_number not in self.up_queue:\n\t\t\t\theapq.heappush(self.up_queue, floor_number)\n\t\t\tif not self.direction:\n\t\t\t\tself.direction = 1\n\t\telse:\n\t\t\tif floor_number not in self.down_queue:\n\t\t\t\theapq.heappush(self.down_queue, -floor_number)\n\t\t\tif not self.direction:\n\t\t\t\tself.direction = -1\n\n\tdef __str__(self):\n\t\treturn \"Elevator \" + str(self.id) \\\n + \" is on floor \" \\\n + str(self.current_floor) \\\n + \" going in direction \" \\\n + str(self.direction) \\\n + \" with up_queue \" \\\n + str(self.up_queue) \\\n + \" and down_queue \" \\\n + str(self.down_queue) \\\n + \".\"\n\nif __name__ == '__main__':\n\tprint(\"----------------------------------\")\n\tprint(\"---BEGINNING RANDOM SIMULATIONS---\")\n\tprint(\"-------PRESS CTRL+C TO STOP-------\")\n\tprint(\"----------------------------------\")\n\ttime.sleep(2)\n\tecs = ElevatorControlSystem(16,16)\n\twhile(True):\n\t\tfor i in range(16):\n\t\t\ta = random.randint(0,15)\n\t\t\tb = random.randint(0,15)\n\t\t\tecs.update(i, a)\n\t\t\tprint('Requesting elevator ' + str(i) + ' to stop on floor ' + str(a) + '.')\n\t\t\tdirection = random.choice([-1,1])\n\t\t\tecs.pickup(b, direction)\n\t\t\tprint('Requesting pickup on floor ' + str(b) + ' in direction ' + str(direction) + '.')\n\t\tfor i in range(16):\n\t\t\tecs.step()\n\t\t\tprint(ecs.status())\n\t\t\ttime.sleep(1)\n" } ]
3
empeje/mecode
https://github.com/empeje/mecode
7375a97fd6b79681915e9a65e5913fffcd831839
dbab87d61ef2ec9e9fb657774202a2941473479d
c4a47ddf26ca8f5ec036df254e98842492039862
refs/heads/master
2018-12-19T15:53:30.726567
2018-09-16T08:59:57
2018-09-16T08:59:57
126,249,133
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5253549814224243, "alphanum_fraction": 0.5321162939071655, "avg_line_length": 24.947368621826172, "blob_id": "950922ae153dd659e772df76187f59754446826b", "content_id": "f07b4e9f9d03e85f6d580a077d0b5812b670d791", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1479, "license_type": "permissive", "max_line_length": 55, "num_lines": 57, "path": "/30-days-of-code-hr-2/day-23/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\n\nclass Node:\n def __init__(self,data):\n self.right=self.left=None\n self.data = data\nclass Solution:\n def insert(self,root,data):\n if root==None:\n return Node(data)\n else:\n if data<=root.data:\n cur=self.insert(root.left,data)\n root.left=cur\n else:\n cur=self.insert(root.right,data)\n root.right=cur\n return root\n\n def levelOrder(self, root):\n h = self.getHeight(root)\n for i in range(1, h + 1):\n self.printGivenLevel(root, i)\n\n # Print nodes at a given level\n def printGivenLevel(self, root, level):\n if root is None:\n return\n if level == 1:\n print(root.data, end=\" \"),\n elif level > 1:\n self.printGivenLevel(root.left, level - 1)\n self.printGivenLevel(root.right, level - 1)\n\n def getHeight(self, root):\n if root is None:\n return 0\n else:\n # Compute the height of each subtree\n lheight = self.getHeight(root.left)\n rheight = self.getHeight(root.right)\n\n # Use the larger one\n if lheight > rheight:\n return lheight + 1\n else:\n return rheight + 1\n\nT=int(input())\nmyTree=Solution()\nroot=None\nfor i in range(T):\n data=int(input())\n root=myTree.insert(root,data)\nmyTree.levelOrder(root)\n" }, { "alpha_fraction": 0.7601010203361511, "alphanum_fraction": 0.7702020406723022, "avg_line_length": 35, "blob_id": "5c39af6ab0a460faac06861a29dcd0760508bebe", "content_id": "8683dc4307c3286a1fe47113441ac37322a9aa44", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 396, "license_type": "permissive", "max_line_length": 160, "num_lines": 11, "path": "/README.md", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "# mappuji/mecode\n\nThis repository is my personal journey while playing with programming stuffs. Created for personal notes purpose. If you found this useful, feel free to use it.\n\n## License\n\nmecode is Copyright &copy; 2018 Abdurrachman Mappuji. It is free software, and may be\nredistributed under the terms specified in the [MIT-LICENSE][MIT] file.\n\n\n[MIT]: http://www.opensource.org/licenses/mit-license.php\n" }, { "alpha_fraction": 0.8214285969734192, "alphanum_fraction": 0.8214285969734192, "avg_line_length": 28, "blob_id": "56471bba331beee3f2f5a282138c584a4e6ac4da", "content_id": "7faf1e0f53ecd599b290acd908973c2b200bda80", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "permissive", "max_line_length": 28, "num_lines": 1, "path": "/30-days-of-code-hr-2/day-27/README.md", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "# No significant code needed" }, { "alpha_fraction": 0.6971235275268555, "alphanum_fraction": 0.6988155841827393, "avg_line_length": 18.700000762939453, "blob_id": "fb049aa2dfd86964f56ed1b313a10b6c34567084", "content_id": "ebccced4381545b7fc2c216e3ce5f98740eda8ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 591, "license_type": "permissive", "max_line_length": 77, "num_lines": 30, "path": "/cracking-the-coding-interview-hr/sock-merchant/sock-merchant.rb", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/bin/ruby\n\nrequire 'json'\nrequire 'stringio'\nrequire 'set'\n\n# Complete the sockMerchant function below.\ndef pair_available(stock, color)\n stock_for_selected_color = stock.select { |member| member == color }.length\n stock_for_selected_color / 2\nend\n\ndef sock_merchant(n, ar)\n available_color = Set.new(ar)\n stock_each_color = available_color.map{ |color| pair_available(ar, color) }\n stock_each_color.sum\nend\n\nfptr = File.open(ENV['OUTPUT_PATH'], 'w')\n\nn = gets.to_i\n\nar = gets.rstrip.split(' ').map(&:to_i)\n\nresult = sock_merchant n, ar\n\nfptr.write result\nfptr.write \"\\n\"\n\nfptr.close\n" }, { "alpha_fraction": 0.5484400391578674, "alphanum_fraction": 0.5632184147834778, "avg_line_length": 20, "blob_id": "3b862aea3f63345b59227d60b321d04fe9c910b7", "content_id": "52f2fefaa955059b3eefe2d9a0464e619c443e96", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "permissive", "max_line_length": 63, "num_lines": 29, "path": "/30-days-of-code-hr-2/day-20/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\n\nn = int(input().strip())\na = list(map(int, input().strip().split(' ')))\n\n\n# Write Your Code Here\ndef swap(element_one, element_two):\n temp_var = a[element_one]\n a[element_one] = a[element_two]\n a[element_two] = temp_var\n\n\nnumber_of_swaps = 0\n\nfor i in range(len(a)):\n for j in range(len(a) - 1):\n if a[j] > a[j + 1]:\n swap(j, j + 1)\n number_of_swaps += 1\n\n if number_of_swaps == 0:\n break\n\nprint(\"Array is sorted in \" + str(number_of_swaps) + \" swaps.\")\nprint(\"First Element:\", a[0])\nprint(\"Last Element:\", a[len(a) - 1])\n" }, { "alpha_fraction": 0.5934861302375793, "alphanum_fraction": 0.6091676950454712, "avg_line_length": 24.507692337036133, "blob_id": "c0e5915a89304ad34ab4b87c59d4c9c5f6416be0", "content_id": "7be7e3e2bc9d178292b4f67b7a84474df1439963", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1658, "license_type": "permissive", "max_line_length": 90, "num_lines": 65, "path": "/10-days-of-stats/exercise-05/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\ncounts = int(input())\ndata = list(map(int, input().split(\" \")))\ndata_frequencies = list(map(int, input().split(\" \")))\n\n\ndef mean(data_list):\n return sum(data_list) / len(data_list)\n\n\ndef is_odd(number):\n if number % 2 == 0:\n return False\n else:\n return True\n\n\ndef median(data_list):\n data_list.sort()\n if is_odd(len(data_list)):\n return data_list[(len(data_list) // 2)]\n else:\n medians = data_list[len(data_list) // 2] + data_list[len(data_list) // 2 - 1]\n resulted_median = medians / 2\n return resulted_median\n\n\ndef mode(data_list):\n data_list.sort()\n m = max([data_list.count(a) for a in data_list])\n return [x for x in data_list if data_list.count(x) == m][0] if m > 1 else data_list[0]\n\n\ndef q1(data_list):\n data_list.sort()\n if is_odd(len(data_list)):\n new_data_list = data_list[:(len(data_list) // 2)]\n else:\n new_data_list = data_list[:(len(data_list) // 2)]\n return median(new_data_list)\n\n\ndef q3(data_list):\n data_list.sort()\n if is_odd(len(data_list)):\n new_data_list = data_list[(len(data_list) // 2) + 1:]\n else:\n new_data_list = data_list[(len(data_list) // 2):]\n return median(new_data_list)\n\ndef expand_data(data_list, frequencies_list):\n expanded_data_list = []\n for i in range(len(data_list)):\n expanded_data_list += [data_list[i]] * frequencies_list[i]\n return expanded_data_list\n\n\nexpanded_data = expand_data(data, data_frequencies)\n\nquartile_2 = median(expanded_data)\nquartile_1 = q1(expanded_data)\nquartile_3 = q3(expanded_data)\n\nprint(format(quartile_3 - quartile_1, \"0.1f\"))\n" }, { "alpha_fraction": 0.6405228972434998, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 18.95652198791504, "blob_id": "3606ff89e6bc965e012da8d02667a9da81c96556", "content_id": "4c6ef74e55b47cd08f7a7279947f62b7f8aca8fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "permissive", "max_line_length": 63, "num_lines": 23, "path": "/10-days-of-stats/exercise-03/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport math\n\ncounts = int(input())\ndata = list(map(int, input().split(\" \")))\n\n\ndef mean(data_list):\n return sum(data_list) / len(data_list)\n\n\ndef stdev(data_list):\n average = mean(data_list)\n numerators = 0\n for i in range(len(data_list)):\n numerators += math.pow(data_list[i] - average, 2)\n\n standard_deviation = math.sqrt(numerators / len(data_list))\n return standard_deviation\n\n\nprint(round(stdev(data), 1))\n" }, { "alpha_fraction": 0.416015625, "alphanum_fraction": 0.447265625, "avg_line_length": 18, "blob_id": "ffb87f42b85a0d5b238c235278bc6cf12f042d0b", "content_id": "a2b4052121d8b6caf79eed875b0ced89f2c4dbc1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "permissive", "max_line_length": 41, "num_lines": 27, "path": "/30-days-of-code-hr-2/day-25/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\ndef is_prime(n):\n if n == 2 or n == 3:\n return True\n if n < 2 or n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n r = int(n ** 0.5)\n f = 5\n while f <= r:\n if n % f == 0: return False\n if n % (f + 2) == 0: return False\n f += 6\n return True\n\n\nT = int(input())\nfor i in range(T):\n data = int(input())\n if is_prime(data):\n print(\"Prime\")\n else:\n print(\"Not prime\")" }, { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 9.399999618530273, "blob_id": "020f7c01bf63576a276e5aa7cfeb982e72c1ab79", "content_id": "b4b83b1c92766ba7cda459d9ca2952fa784462ff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 52, "license_type": "permissive", "max_line_length": 19, "num_lines": 5, "path": "/30-days-of-code-hr-2/day-15/run.sh", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\njavac main.java\n\njava Solution\n" }, { "alpha_fraction": 0.5408719182014465, "alphanum_fraction": 0.5599455237388611, "avg_line_length": 17.350000381469727, "blob_id": "597fcdc36cfb9339f023447346deae677e8f0433", "content_id": "8dfe437b8bea03292d049606bf6cd32c2a859c98", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 734, "license_type": "permissive", "max_line_length": 75, "num_lines": 40, "path": "/10-days-of-stats/exercise-01/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\ncounts = int(input())\ndata = list(map(int, input().split(\" \")))\n\n\ndef mean(list):\n return sum(list) / len(list)\n\n\ndef is_odd(number):\n if number % 2 == 0:\n False\n else:\n True\n\n\ndef median(list):\n list.sort()\n if is_odd(len(list)):\n return list[(len(list) // 2) + 1]\n else:\n medians = list[len(list) // 2] + list[len(list) // 2 - 1]\n median = medians / 2\n return median\n\n\ndef mode(list):\n list.sort()\n m = max([list.count(a) for a in list])\n return [x for x in list if list.count(x) == m][0] if m > 1 else list[0]\n\n\nmean = mean(data)\nmedian = median(data)\nmode = mode(data)\n\nprint(round(mean, 1))\nprint(round(median, 1))\nprint(round(mode, 1))\n" }, { "alpha_fraction": 0.6222826242446899, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 19.44444465637207, "blob_id": "4838cd671c89938a34354d4c37ce59aa371b96fd", "content_id": "6e2eff0117c73ddf64a5b5426a19ed90f1057c66", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 368, "license_type": "permissive", "max_line_length": 55, "num_lines": 18, "path": "/30-days-of-code-hr-2/day-28/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nimport re\n\nlist_of_users = []\nN = int(input().strip())\nfor a0 in range(N):\n firstName, emailID = input().strip().split(' ')\n firstName, emailID = [str(firstName), str(emailID)]\n\n if emailID.find('@gmail.com') != -1:\n list_of_users.append(firstName)\n\nlist_of_users.sort()\n\nfor user in list_of_users:\n print(user)\n" }, { "alpha_fraction": 0.7822014093399048, "alphanum_fraction": 0.7868852615356445, "avg_line_length": 37.818180084228516, "blob_id": "2c0fb46e80ab4a9de4a0809450784e1c082ef238", "content_id": "c8dfef1f29809e0b5a842761311cee022480424a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 427, "license_type": "permissive", "max_line_length": 154, "num_lines": 11, "path": "/30-days-of-code-hr-2/README.md", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "# About\n\nIt is a folder where I store my journey to the second half of 30 Days of Code in Hacker Rank since the first half is already done in the different folder.\n\n# Programming Language\n\nSince not every problem on HackerRank not available for Python, I choose Java as my secondary language.\n\n# HackerRank Profile\n\nYou can connect me in the HackerRank by visiting [hackerrank.com/mappuji](https://www.hackerrank.com/mappuji).\n" }, { "alpha_fraction": 0.599179208278656, "alphanum_fraction": 0.6306429505348206, "avg_line_length": 29.5, "blob_id": "285519e97f14c2feca725c17b596221ee43df3a7", "content_id": "ea7338438d1df1c2f3ac909f0da4090c2bb72c49", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 731, "license_type": "permissive", "max_line_length": 121, "num_lines": 24, "path": "/30-days-of-code-hr-2/day-26/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nactual_date_raw = str(input()).split(\" \")\nexpected_date_raw = str(input()).split(\" \")\n\nexpected_date = {'day': int(expected_date_raw[0]), 'month': int(expected_date_raw[1]), 'year': int(expected_date_raw[2])}\nactual_date = {'day': int(actual_date_raw[0]), 'month': int(actual_date_raw[1]), 'year': int(actual_date_raw[2])}\n\nday_diff = actual_date['day'] - expected_date['day']\nmonth_diff = actual_date['month'] - expected_date['month']\nyear_diff = actual_date['year'] - expected_date['year']\n\nif year_diff >= 1:\n print(10000)\nelif year_diff < 0:\n print(0)\nelif month_diff >= 1:\n fine = 500 * month_diff\n print(fine)\nelif day_diff >= 1:\n fine = 15 * day_diff\n print(fine)\nelse:\n print(0)" }, { "alpha_fraction": 0.6382636427879333, "alphanum_fraction": 0.6414790749549866, "avg_line_length": 26.04347801208496, "blob_id": "a31378431b97cfc5367b479b9cc5ccc230cad913", "content_id": "a3a33e19f0aa0d5a27744db71cb288eb1651d1b1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "permissive", "max_line_length": 62, "num_lines": 23, "path": "/10-days-of-stats/exercise-02/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\ncounts = int(input())\ndata = list(map(int, input().split(\" \")))\ndata_weights = list(map(int, input().split(\" \")))\n\n\ndef data_times_weights(data_list, weights):\n if len(data_list) == len(weights):\n numerators = 0\n for i in range(len(data_list)):\n numerators += data_list[i] * weights[i]\n return numerators\n else:\n raise ValueError('Size of data and weights not equal')\n\n\ndef weighted_mean(data_list, weights):\n numerators = data_times_weights(data_list, weights)\n return numerators / sum(weights)\n\n\nprint(round(weighted_mean(data, data_weights),1))\n" }, { "alpha_fraction": 0.48230087757110596, "alphanum_fraction": 0.491150438785553, "avg_line_length": 19.636363983154297, "blob_id": "2f2c7175f0666b350249365ca4327d9e56472ab9", "content_id": "74e484271e12d87e2e346ada507993676a64a7a4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "permissive", "max_line_length": 36, "num_lines": 11, "path": "/30-days-of-code-hr-2/day-29/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bas\n\n# I also still confuse with it\nfor _ in range(int(input())):\n n, k = map(int, input().split())\n max = k - 1\n b = ~max & -~max\n if max | b > n:\n print(max - 1)\n else:\n print(max)" }, { "alpha_fraction": 0.6647058725357056, "alphanum_fraction": 0.6647058725357056, "avg_line_length": 14.454545021057129, "blob_id": "64d4425a9c3eb3e9524732f022dc8411c3047e87", "content_id": "ab2a16ac1a61408b8ecd9970f950f30e41157d2b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "permissive", "max_line_length": 33, "num_lines": 11, "path": "/30-days-of-code-hr-2/day-16/main.py", "repo_name": "empeje/mecode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\n\nstring_value = input().strip()\n\ntry:\n int_value = int(string_value)\n print(int_value)\nexcept ValueError:\n print(\"Bad String\")\n" } ]
16
scienceopen/isrutils
https://github.com/scienceopen/isrutils
b7e7f8ec35e220d7c2d572b2d8c5d3ebab70c706
c9146e236db8f593bba89ceee913fd62677518c4
92a98fc611da3253fae65a1ce317dde377570793
refs/heads/master
2021-04-19T01:11:56.581536
2021-03-22T04:47:28
2021-03-22T04:47:28
43,934,331
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.5862573385238647, "alphanum_fraction": 0.652046799659729, "avg_line_length": 19.727272033691406, "blob_id": "364203a93af982a3aadb6b50d690a2e41e209e86", "content_id": "31bf3677de3336e867e303a14319f70898a3482e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 684, "license_type": "permissive", "max_line_length": 80, "num_lines": 33, "path": "/Examples/SimpleSNR-2013-04-23.py", "repo_name": "scienceopen/isrutils", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"\n2013-05-01\n\"\"\"\nfrom pathlib import Path\nfrom datetime import datetime\nfrom matplotlib.pyplot import show\nfrom isrutils.looper import simpleloop\nimport seaborn as sns\n\nsns.set_context(\"talk\", 1.5)\n\n# %% users param\nvlim = (22, 55)\n# zlim=(90, 400)\nzlim = (None, None)\ntlim = (datetime(2013, 5, 1), datetime(2013, 5, 1))\ntlim = (None, None)\n\n\nP = {\n \"path\": \"~/data/2013-04-23/isr\",\n \"beamid\": 64157,\n \"showacf\": False,\n \"showsamples\": True,\n}\n# %% iterate over list. Files are ID'd by file extension (See README.rst)\n\nflist = [x for x in Path(P[\"path\"]).expanduser().iterdir() if x.suffix == \".h5\"]\n\nsimpleloop(flist, tlim, zlim, vlim, P)\n\nshow()\n" } ]
1
javiercm16/Proyecto-Aseguramiento-2018
https://github.com/javiercm16/Proyecto-Aseguramiento-2018
513b92d938e40c0b5f067e14a8609ea48dec1ee4
e1f9bcd7ade1fe4987fa07fa709bbb8010e72907
d54aaa9ffcdcfea59abeabae4684b6b58d796a36
refs/heads/master
2021-11-22T05:08:13.905047
2018-09-02T05:29:31
2018-09-02T05:29:31
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.606533408164978, "alphanum_fraction": 0.6387128233909607, "avg_line_length": 36.71697998046875, "blob_id": "88186270936c7e49465be919ce9ace98e0206cb5", "content_id": "4f08608886aa8f3a2be30112b75d7f64a569b965", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2051, "license_type": "no_license", "max_line_length": 119, "num_lines": 53, "path": "/POC ACS/PruebasUnitarias/PruebasUnitarias.py", "repo_name": "javiercm16/Proyecto-Aseguramiento-2018", "src_encoding": "UTF-8", "text": "'''\r\nCreated on Aug 23, 2018\r\n\r\n@author: Javier\r\n'''\r\nimport unittest\r\nfrom Gestores import gestorCSV,gestorImagenes, gestorKeras\r\nfrom Gestores.gestorCSV import registroObjetos\r\nfrom Gestores.gestorImagenes import guardarImagen\r\nfrom Gestores.gestorKeras import cargarModelo, guardarModelo\r\n \r\nclass SimpleTestCase(unittest.TestCase):\r\n\r\n def test_existePath(self):\r\n listaDatos = [{'identificacion':'1', 'centroide':'234','area': '467'}]\r\n pathFalso = 'ProyectoAseguramiento/CSV/listaObjetos.csv'\r\n self.assertFalse(registroObjetos(listaDatos, pathFalso))\r\n \r\n\r\n def test_DatosValidosCSV(self):\r\n datosValidos = ['identificacion','centroide','area']\r\n listaDatos = [{datosValidos[0].lower():'1', datosValidos[1].lower():'234',datosValidos[2].lower(): '467'},\r\n {datosValidos[0].lower():'2', datosValidos[1].lower():'1000',datosValidos[2].lower(): '4967'},\r\n {datosValidos[0].lower():'3', datosValidos[1].lower():'678',datosValidos[2].lower(): '492'}]\r\n\r\n self.assertTrue(registroObjetos(listaDatos,'../CSV/listaObjetos.csv'))\r\n \r\n\r\n def test_imagenyDirectorioInexistente(self):#,fuente,archivo\r\n nombreArchivo= \"../static/mcf-z-stacks-03212011_a12_s1.png\"\r\n outputDirectory=\"../static/hola.jpg\"\r\n self.assertTrue(guardarImagen(nombreArchivo,outputDirectory))\r\n\r\n def test_DatosValidosModelo(self):\r\n nombre = \"modelo_test\"\r\n num_filtros = 32\r\n forma = (100,)\r\n activacion = \"relu\"\r\n optimizador = \"rmsprop\"\r\n perdida = \"binary_crossentropy\" \r\n epocas = 100 \r\n tam_batch = 32\r\n self.assertTrue(guardarModelo(nombre, num_filtros, forma, activacion, optimizador, perdida, epocas, tam_batch))\r\n \r\n\r\n def test_nombreModeloExistente(self):\r\n nombre = \"noExisto\"\r\n self.assertTrue(cargarModelo(nombre))\r\n nombre = \"modelo_test\"\r\n self.assertTrue(cargarModelo(nombre))\r\n \r\nif __name__ == \"__main__\":\r\n unittest.main() " }, { "alpha_fraction": 0.7044673562049866, "alphanum_fraction": 0.7096219658851624, "avg_line_length": 34.4375, "blob_id": "3b6614808240f9e632e111745c53fffeb229b9b7", "content_id": "a04c241db4347652803077801caf28537804e595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 129, "num_lines": 32, "path": "/POC ACS/Gestores/gestorCSV.py", "repo_name": "javiercm16/Proyecto-Aseguramiento-2018", "src_encoding": "UTF-8", "text": "'''\r\nCreated on Aug 19, 2018\r\n\r\n@author: Javier\r\n'''\r\n\r\n## \\package Gestores\r\n# Contiene todas las funciones asociadas al manejo de archivos de extension CSV utilizando la libreria PANDAS\r\n#\r\n\r\nimport pandas\r\nfrom collections import namedtuple\r\n\r\n\r\n## Descripcion de la funcion registroObjetos\r\n#\r\n# Se encarga de agrupar los datos, que provienen en diccionarios, en tres categorias las cuales seran cargadas en un archivo CSV\r\n# \\param listaObjetos Es una lista de diccionarios y cada diccionario contiene los atributos de area, centroide e identificacion\r\n# \\param directorio Es el directorio de salida en donde se va a generar el CSV\r\n# \\return Nada\r\ndef registroObjetos(listaObjetos, directorio):\r\n try:\r\n listaIdentificacion= [d[\"identificacion\"] for d in listaObjetos ]\r\n listaCentroide= [d[\"centroide\"] for d in listaObjetos]\r\n listaArea = [d[\"area\"] for d in listaObjetos]\r\n diccionario = {'Identificacion':listaIdentificacion,'Centroide': listaCentroide,'Area': listaArea}\r\n \r\n datos = pandas.DataFrame(diccionario)\r\n datos.to_csv(directorio)\r\n return True\r\n except:\r\n return False" }, { "alpha_fraction": 0.7101449370384216, "alphanum_fraction": 0.717391312122345, "avg_line_length": 29.923076629638672, "blob_id": "2c7b95ecf54b5b0778903bcd2b1151a47634e78e", "content_id": "29d89ada0cb2bba51b98a27961cf70c97b2cfadd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 828, "license_type": "no_license", "max_line_length": 102, "num_lines": 26, "path": "/POC ACS/Gestores/gestorImagenes.py", "repo_name": "javiercm16/Proyecto-Aseguramiento-2018", "src_encoding": "UTF-8", "text": "'''\r\nCreated on Aug 19, 2018\r\n\r\n@author: Javier\r\n'''\r\n\r\n## \\package Gestores\r\n# Contiene todas las funciones asociadas al manejo de imagenes utilizando las librerias numpy y PIL\r\n#\r\nfrom PIL import Image\r\nimport numpy as np\r\n\r\n## Descripcion de la funcion guardarImagen\r\n#\r\n# Se encarga de guardar una imagen en el directorio de salida especificado\r\n# \\param nombreArchivo Es el nombre de la imagen a buscar en la carpeta static\r\n# \\param directorioArchivo Contiene el directorio de salida asi como el nombre de la imagen a guardar\r\n# \\return Nada\r\ndef guardarImagen(nombreArchivo,directorioArchivo):\r\n try:\r\n imagenNueva= Image.fromarray(np.asarray(Image.open(nombreArchivo))) \r\n imagenNueva.save(directorioArchivo)\r\n return True\r\n except Exception as e:\r\n print(e)\r\n return False" }, { "alpha_fraction": 0.6260986924171448, "alphanum_fraction": 0.6386071443557739, "avg_line_length": 36.441558837890625, "blob_id": "d90eff980e47334569bba2ead27c306b94fb972c", "content_id": "b58d3d66a47194f6f1ad313a3b82773c33ea2655", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2958, "license_type": "no_license", "max_line_length": 116, "num_lines": 77, "path": "/POC ACS/Gestores/gestorKeras.py", "repo_name": "javiercm16/Proyecto-Aseguramiento-2018", "src_encoding": "UTF-8", "text": "## \\package Gestores\r\n# Modulo que contiene funciones de carga y guardado de modelos de aprendizaje, mediante el uso de la libreria Keras\r\n#\r\n\r\nfrom keras.models import Sequential, Model\r\nfrom keras.layers import Input, Dense, Activation\r\nfrom keras.models import model_from_json\r\nimport numpy as np\r\nimport os\r\n\r\n## Descripcion de la funcion guardarModelo\r\n#\r\n# Toma los parametros dados por el usuario, crea un modelo en base a ellos, y guarda tal modelo en un archivo .JSON\r\n# \\param model_name Define el nombre del modelo, asi como los archivos\r\n# \\param filter_number Numero de filtros utilizados por el modelo\r\n# \\param shape_tuple Tupla de valores donde se define la forma\r\n# \\param str_activation Define el modo de activacion del modelo (relu, sigmoid, softmax, etc.)\r\n# \\param optim Describe el optimizador para compilar el modelo (usualmente rmsprop)\r\n# \\param loss_function Define la funcion de perdida (binaria, multiple)\r\n# \\param b_size Asigna los tamanos para los batches\r\n# \\return Nada\r\n\r\ndef guardarModelo(model_name, filter_number, shape_tuple, str_activation, optim, loss_function, num_epochs, b_size):\r\n try:\r\n model = Sequential([\r\n Dense(filter_number, input_shape=shape_tuple),\r\n Activation(str_activation)\r\n ])\r\n \r\n model.compile(optimizer=optim,\r\n loss=loss_function,\r\n metrics=['accuracy'])\r\n \r\n data = np.random.random((1000, 100))\r\n labels = np.random.randint(2, size=(1000, 10))\r\n \r\n model.fit(data, labels, epochs = num_epochs, batch_size = b_size)\r\n \r\n model_json = model.to_json()\r\n str_name = model_name + \".json\"\r\n with open(str_name, \"w\") as json_file:\r\n json_file.write(model_json)\r\n\r\n str_name = model_name + \".h5\"\r\n model.save_weights(str_name)\r\n return True\r\n except:\r\n return False\r\n \r\n## Descripcion de la funcion cargarModelo\r\n#\r\n# En base a un nombre de archivo, busca el JSON del modelo junto con los pesos y los carga a memoria\r\n# \\param model_name Indica el nombre del modelo, y por ende los archivos\r\n# \\return Nada\r\n\r\ndef cargarModelo(name):\r\n try:\r\n model_name = name + \".json\"\r\n json_file = open(model_name, 'r')\r\n modelo_cargado_json = json_file.read()\r\n json_file.close()\r\n modelo_cargado = model_from_json(modelo_cargado_json)\r\n\r\n model_name = name + \".h5\"\r\n modelo_cargado.load_weights(model_name)\r\n print(\"Modelo cargado\")\r\n \r\n modelo_cargado.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\r\n \r\n data = np.random.random((1000, 100))\r\n labels = np.random.randint(2, size=(1000, 10))\r\n \r\n score = modelo_cargado.evaluate(data, labels, verbose=0)\r\n print(\"%s: %.2f%%\" % (modelo_cargado.metrics_names[1], score[1]*100))\r\n return True\r\n except:\r\n return False" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6847047209739685, "avg_line_length": 35.49074172973633, "blob_id": "a270ba337d32b066eab4be87d1a69e1ddfe5ddc1", "content_id": "6883d286a4738baf847988b4bd6805a6afd609a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4047, "license_type": "no_license", "max_line_length": 143, "num_lines": 108, "path": "/POC ACS/main.py", "repo_name": "javiercm16/Proyecto-Aseguramiento-2018", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request,session,redirect, url_for\r\n\r\nfrom Gestores import gestorCSV, gestorImagenes, gestorKeras\r\nfrom Modelos.ModeloObjeto import *\r\n\r\n\r\napp = Flask(__name__)\r\napp.secret_key = \"something-from-os.urandom(24)\"\r\n\r\n## Descripcion de la funcion setVariablesDeSesion\r\n#\r\n# Se encarga de definir cada una de las variables de sesion a utilizar.\r\n# \\return Nada\r\ndef setVariablesDeSesion():\r\n session['listaObjetos'] = []\r\n \r\[email protected]('/')\r\n\r\n## Descripcion de la funcion main\r\n#\r\n# Inicializa las variables de Sesion y redirige a la pagina web\r\n# \\return Comando para visualizar la pagina de generacion del CSV\r\ndef main():\r\n setVariablesDeSesion()\r\n return render_template('csvPandas.html') \r\n\r\[email protected]('/cambioPantalla', methods = ['POST'])\r\n\r\n## Descripcion de la funcion cambioPantalla\r\n#\r\n# Se encarga de establecer la pantalla a mostrar de html dependiendo de la seleccion del usuario en la interfaz\r\n# \\return Comando para visualizar la pagina de generacion de CSV, cargado y guardado de imagenes y cargado de modelos en Keras\r\ndef cambioPantalla():\r\n valorBoton = request.form.get(\"cambiar\")\r\n \r\n if(valorBoton == \"imagen\"):\r\n return render_template(\"imagen.html\")\r\n elif(valorBoton == \"csv\"):\r\n return render_template(\"csvPandas.html\")\r\n elif(valorBoton == \"keras\"):\r\n return render_template(\"modeloKeras.html\")\r\n\r\[email protected]('/registrarPersonas', methods=['POST'])\r\n\r\n## Descripcion de la funcion registrarPersonas\r\n#\r\n# Se encarga de recibir los datos ingresados por el usuario y los almacena para ser usados en conjunto posteriormente\r\n# \\return Comando para visualizar la pagina de generacion del CSV\r\ndef registrarPersonas():\r\n __numero = request.form.get(\"numero\")\r\n __centroide = request.form.get(\"centroide\")\r\n __area = request.form.get(\"area\")\r\n \r\n objetos = session['listaObjetos']\r\n objetos.append(ObjetoImagen(__numero,__centroide,__area).__dict__)\r\n session['listaObjetos'] = objetos\r\n \r\n \r\n return render_template(\"csvPandas.html\")\r\n\r\[email protected]('/generarCSV', methods = ['POST'])\r\n\r\n## Descripcion de la funcion generarCSV\r\n#\r\n# Se encarga de llamar a la funcion almacenada en el modulo de manejo de CSV\r\n# \\return Comando para visualizar la pagina de generacion del CSV\r\ndef generarCSV():\r\n __directorioCSV = request.form.get(\"directorioCSV\")\r\n gestorCSV.registroObjetos(session['listaObjetos'],__directorioCSV)\r\n\r\n return render_template(\"csvPandas.html\")\r\n\r\[email protected]('/guardarImagen', methods = ['POST'])\r\n\r\n## Descripcion de la funcion guardarImagen\r\n#\r\n# Se encarga de recibir la imagen seleccionada por el usuario y la almacena en el directorio de salida especificado\r\n# \\return Comando para visualizar la pagina de cargado y guardado de imagenes\r\ndef guardarImagen():\r\n nombreArchivo = request.files['file']\r\n __directorioArchivo = request.form.get(\"directorioArchivo\")\r\n gestorImagenes.guardarImagen(nombreArchivo,__directorioArchivo)\r\n \r\n return render_template(\"imagen.html\")\r\n\r\[email protected]('/cargarModelo', methods = ['POST']) \r\ndef cargarModelo():\r\n gestorKeras.cargarModelo(request.form.get(\"name\"))\r\n \r\n return render_template(\"modeloKeras.html\")\r\n\r\[email protected]('/guardarModelo', methods = ['POST']) \r\ndef guardarModelo():\r\n __model_name = request.form.get(\"model_name\")\r\n __filter_number = request.form.get(\"filter_number\")\r\n __shape_tuple = request.form.get(\"shape_tuple\")\r\n __str_activation = request.form.get(\"str_activation\")\r\n __optim = request.form.get(\"optim\")\r\n __loss_function = request.form.get(\"loss_function\")\r\n __num_epochs = request.form.get(\"num_epochs\")\r\n __b_size = request.form.get(\"b_size\")\r\n gestorKeras.guardarModelo(__model_name, __filter_number, __shape_tuple, __str_activation, __optim, __loss_function, __num_epochs, __b_size)\r\n \r\n return render_template(\"modeloKeras.html\")\r\n\r\nif __name__ == '__main__':\r\n\r\n app.run(debug=True)" }, { "alpha_fraction": 0.6772486567497253, "alphanum_fraction": 0.6851851940155029, "avg_line_length": 28.079999923706055, "blob_id": "86f5596c36533d6d44147f34dc4a8d28dd436df0", "content_id": "64088dbb18f305ca38c1d3a1cb4d04b9025c82c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 756, "license_type": "no_license", "max_line_length": 136, "num_lines": 25, "path": "/POC ACS/Modelos/ModeloObjeto.py", "repo_name": "javiercm16/Proyecto-Aseguramiento-2018", "src_encoding": "UTF-8", "text": "'''\r\nCreated on Aug 19, 2018\r\n\r\n@author: Javier\r\n'''\r\n\r\n## \\package Modelos\r\n# Contiene todo los atributos y comportamiento de la clase ObjetoImagen\r\n#\r\n#\r\n\r\n ## Descripcion de la clase Objeto Imagen.\r\n #\r\n # Contiene la informacion y atributos (Centroide y Area) de los objetos encontrados en una imagen y un identificador para cada objeto.\r\n \r\nclass ObjetoImagen:\r\n ## El constructor\r\n # \\param self\r\n # \\param identificacion Es el identificador del objeto\r\n # \\param centroide Caracteristica especial del objeto\r\n # \\param area Caracteristica especial del objeto\r\n def __init__(self,identificacion,centroide,area):\r\n self.identificacion = identificacion\r\n self.centroide = centroide\r\n self.area = area\r\n " } ]
6
excray/coding-algos
https://github.com/excray/coding-algos
10b91c5c6a5e1464caafddd8eb4233b276b37fe1
17fbcf9aa70cf17f6f30e57211b22162ecd9a7f8
b670fb2a460c7643364df530b146e7f5e79c6cc6
refs/heads/master
2020-04-05T22:45:28.567598
2017-06-03T20:36:06
2017-06-03T20:36:06
40,498,782
0
0
null
2015-08-10T18:29:24
2015-08-10T18:29:24
2015-10-09T02:02:59
Python
[ { "alpha_fraction": 0.4027617871761322, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 27.064516067504883, "blob_id": "3e3500f1f4fa68c03f9633641d91cf717d4300dc", "content_id": "b1355d2543e2dad975f3982cdcd76f64eebaec95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 881, "license_type": "no_license", "max_line_length": 84, "num_lines": 31, "path": "/excrayg/leetcode/python/min.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "def find_max_min(ip):\n if len(ip) ==1:\n return int(ip[0])\n \n val1 = float(\"-inf\")\n # val2 = float(\"inf\")\n for i in range(1, len(ip), 2):\n if ip[i] == '+':\n val1 = max(val1, find_max_min(ip[:i]) + find_max_min(ip[i+1:]))\n else:\n val1 = max(val1, find_max_min(ip[:i]) - find_max_min(ip[i+1:]))\n \n return val1\n\ndef find_max_min1(ip, d, s, e):\n if e-s == 1:\n return int(ip[s])\n \n val1 = float(\"-inf\")\n # val2 = float(\"inf\")\n for i in range(1, e-s, 2):\n if ip[i] == '+': \n \tval1 = max(val1, find_max_min1(ip, d, s, i) + find_max_min1(ip, d, i+1, e))\n else:\n val1 = max(val1, find_max_min1(ip[:i]) - find_max_min1(ip[i+1:]))\n \n return val1\n\nprint(find_max_min1(\"1+3−2−5+1−6+7\"))\n\nprint(find_max_min(\"1+3−2−5+1−6+7\"))" }, { "alpha_fraction": 0.4184167981147766, "alphanum_fraction": 0.4491114616394043, "avg_line_length": 21.925926208496094, "blob_id": "2901399d1b5535355518a1b43d43fa4200a40125", "content_id": "cc8aa917fa3d737034514496219b96be3a041cc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "no_license", "max_line_length": 45, "num_lines": 27, "path": "/excrayg/leetcode/python/next_perm.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # @param num : a list of integer\n # @return : a list of integer\n def nextPermutation(self, num):\n # write your code here\n #num_str = \"\".join(num)\n k = len(num)\n if k == 0 or k==1:\n return num\n \n while k > 0:\n if int(num[k-1]) < int(num[-1]):\n break\n k-=1\n \n if k == 0:\n return sorted(num)\n \n num[k-1], num[-1] = num[-1], num[k-1]\n return num[:k]+num[k:]\n\n\ns = Solution()\nt=s.nextPermutation([1,3,2,3])\nprint(t)\ns.nextPermutation([4,3,2,1])\nprint(t)\n" }, { "alpha_fraction": 0.5480349063873291, "alphanum_fraction": 0.5502183437347412, "avg_line_length": 34.19230651855469, "blob_id": "ef3f77e6a2e5b534b7ca27252746bd44a9a12e88", "content_id": "70666111a20623922842714820510c588e8cb7e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 916, "license_type": "no_license", "max_line_length": 88, "num_lines": 26, "path": "/excrayg/leetcode/python/unique_comb.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # @param candidates, a list of integers\n # @param target, integer\n # @return a list of lists of integers\n def DFS(self, candidates, target, start, valuelist):\n length = len(candidates)\n if target == 0:\n return Solution.ret.append(list(valuelist))\n prev = None\n for i in range(start, length):\n if target < candidates[i]:\n break\n # if prev != None and prev == candidates[i]:\n # continue\n self.DFS(candidates, target - candidates[i], i, valuelist + [candidates[i]])\n prev = candidates[i]\n \n def combinationSum(self, candidates, target):\n candidates.sort()\n c = set(candidates)\n candidates = []\n for i in c:\n candidates.append(i)\n Solution.ret = []\n self.DFS(candidates, target, 0, [])\n return Solution.ret\n\n" }, { "alpha_fraction": 0.589216947555542, "alphanum_fraction": 0.6097561120986938, "avg_line_length": 23.935483932495117, "blob_id": "866a0d2b72a5bc89e23ec98d2f3f55204e89747c", "content_id": "cf8d331ca8719e65cd7def07b7acf4fd2edd6a3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 779, "license_type": "no_license", "max_line_length": 74, "num_lines": 31, "path": "/excrayg/leetcode/python/sorting.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\n#Recursive insertion sort\ndef rec_insertion(seq, i):\n\tif i > 0:\n\t\trec_insertion(seq, i-1)\n\t\tj = i-1\n\t\twhile j>0 and seq[j-1] > seq[j]:\n\t\t\t#swap(seq[j-1],seq[j])\n\t\t\tseq[j-1], seq[j] = seq[j], seq[j-1]\n\t\t\tj-=1\n\t\t#print seq\n\n\n\nimport unittest\nimport random\nclass TestSequenceFunctions(unittest.TestCase):\n\n def setUp(self):\n self.seq = range(10)\n random.shuffle(self.seq)\n\n def test_rec_insertion(self):\n # make sure the shuffled sequence does not lose any elements \n rec_insertion(self.seq, len(self.seq))\n self.assertEqual(self.seq, range(10)) \n # should raise an exception for an immutable sequence\n self.assertRaises(TypeError, rec_insertion, (1,2,3))\n \n\nif __name__ == '__main__':\n unittest.main()\n " }, { "alpha_fraction": 0.47192567586898804, "alphanum_fraction": 0.49051007628440857, "avg_line_length": 14.908804893493652, "blob_id": "b8fc318f843c21dbc20e12a702011d4fa45d443b", "content_id": "a88c72a24dd5b154d47dc3fe4784ae259b794569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5058, "license_type": "no_license", "max_line_length": 78, "num_lines": 318, "path": "/excrayg/leetcode/cpp/epi_algos.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "#include <iostream>\nusing namespace std;\n#include <cstring>\n#include <map>\n#include <utility>\n#include <vector>\n\ntemplate<typename T>\nstruct Node{\n\tT data;\n\tNode<T>* next; \n\tNode<T>(T _a):data(_a), next(NULL){}\n\n\tstatic void printList(Node<T>* a)\n\t{\n\t\twhile(a != NULL)\n\t\t{\n\t\t\tcout<<a->data<<\"->\";\n\t\t\ta = a->next;\n\t\t}\n\t\tcout<<endl;\n\t}\n\n\tstatic void addList(Node<T>* a1, Node<T>* a2)\n\t{\n\t\tT carry=(T)0, sum;\n\t\tNode<T>* prev = NULL, *head = NULL, *rem;\n\t\tif(!a1)\n\t\t\tprintList(a2);\n\t\tif(!a2)\n\t\t\tprintList(a1);\n\t\tprintList(a1);\n\t\tprintList(a2);\n\n\t\twhile(a1 && a2)\n\t\t{\n\t\t\tsum = (a1->data+a2->data+carry);\n\t\t\tcout<<sum<<endl;\n\t\t\tcarry = sum / 10;\n\t\t\tsum %= 10;\n\t\t\tif(prev)\n\t\t\t{\n\t\t\t\tNode<T>* n = new Node<T>(sum);\n\t\t\t\tprev->next = n;\n\t\t\t\tprev = n;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tNode<T>* n = new Node<T>(sum);\n\t\t\t\tprev = n;\n\t\t\t\thead = prev;\n\t\t\t}\n\t\t\ta1 = a1->next; a2 = a2->next;\n\t\t}\n\t\t// printList(head);\n\n\t\ta1?rem=a1:rem=a2;\n\t\twhile(rem)\n\t\t{\n\t\t\tsum = (rem->data+carry);\n\t\t\tcarry = sum / 10;\n\t\t\tsum %= 10;\n\t\t\tNode<T>* n = new Node<T>(sum);\n\t\t\tprev->next = n;\n\t\t\tprev = n;\n\t\t\trem = rem->next;\n\t\t}\n\t\tif(carry)\n\t\t{\n\t\t\tNode<T>* n = new Node<T>(carry);\n\t\t\tprev->next = n;\n\t\t}\n\n\t\tprintList(head);\n\n\t}\n\n\tstatic void mergeList(Node<T>* a1, Node<T>* a2)\n\t{\n\t\t\n\t\tNode<T>* head1 = a1;\n\t\tNode<T>* head2 = a2;\n\n\t\tNode<T>* prev = NULL, *head=NULL;\n\t\twhile(head1 && head2)\n\t\t{\n\t\t\tcout<<head1->data<<\" \"<<head2->data<<endl;\n\t\t\tif(head1->data < head2->data)\n\t\t\t{\n\t\t\t\tif(!head)\n\t\t\t\t\thead = head1;\n\t\t\t\tif(prev)\n\t\t\t\t{\n\t\t\t\t\tprev->next = head1;\n\t\t\t\t}\n\t\t\t\tprev = head1;\n\t\t\t\thead1 = head1->next;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\n\t\t\t\tif(!head)\n\t\t\t\t\thead = head2;\n\t\t\t\tif(prev)\n\t\t\t\t{\n\t\t\t\t\tprev->next = head2;\n\t\t\t\t}\n\t\t\t\tprev = head2;\n\t\t\t\thead2 = head2->next;\n\t\t\t}\n\t\t}\n\t\tcout<<\"Merged\\n\";\n\t\tif(head1)\n\t\t\tprev->next = head1;\n\t\tif(head2)\n\t\t\tprev->next = head2;\n\n\t\tprintList(head);\n\t}\n\n\tstatic void reverseList(Node<T>* head)\n\t{\n\t\tNode<T>* curr = head, *prev = NULL;\n\t\twhile( curr != NULL )\n\t\t{\n\t\t\tNode<T>* temp = curr->next;\n\t\t\tif(prev)\n\t\t\t{\n\t\t\t\tcurr->next = prev;\n\t\t\t\tprev = curr;\n\t\t\t\tcurr = temp;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tprev = curr;\n\t\t\t\tprev->next = NULL;\n\t\t\t\tcurr = temp;\n\t\t\t}\n\t\t}\n\t\tprintList(prev);\n\t}\n\n\tstatic void findOverlappintg(Node<T>* head, Node<T>* tail)\n\t{\n\n\t}\n\n\t// static void reverseSubList(Node<T>* head, int s, int f)\n\t// {\n\t// \tNode<T>* curr = head, *prev = NULL, *list_prev = NULL, *list_curr = NULL;\n\t// \tint ctr = 1;\n\t// \twhile( curr != NULL )\n\t// \t{\n\t// \t\tif( ctr > s && ctr <= f)\n\t// \t\t{\n\t// \t\t\tif(!list_prev)\n\t// \t\t\t\tlist_prev = prev;\n\t// \t\t\tif(!list_curr)\n\t// \t\t\t\tlist_curr = curr;\n\t// \t\t\tNode<T>* temp = curr->next;\n\t// \t\t\tif(prev)\n\t// \t\t\t{\n\t// \t\t\t\tcurr->next = prev;\n\t// \t\t\t\tprev = curr;\n\t// \t\t\t\tcurr = temp;\n\t// \t\t\t}\n\t// \t\t\telse\n\t// \t\t\t{\n\t// \t\t\t\tprev = curr;\n\t// \t\t\t\tprev->next = NULL;\n\t// \t\t\t\tcurr = temp;\n\t// \t\t\t}\n\t// \t\t}\n\t// \t\telse if( ctr <= s)\n\t// \t\t{\n\t// \t\t\tlist_prev = prev;\n\t// \t\t\tprev = curr;\n\t// \t\t\tcurr = curr->next;\n\t// \t\t}\n\t// \t\telse\n\t// \t\t{\n\t// \t\t\tlist_prev->next = prev;\n\t// \t\t\tlist_curr-next = curr;\n\t// \t\t\tbreak;\n\t// \t\t}\n\t// \t\tctr++;\n\t// \t}\n\t// \tprintList(head);\n\t// }\n\n\t// static void reverseKList(Node<T>* head, int k)\n\t// {\n\n\t// }\n\n\n};\n\n\ntemplate <typename t>\nclass Queue\n{\n\tpublic:\n\t\tQueue(t max):max1(max){}\n\t\tvoid Enqueue(t data)\n\t\t{\n\t\t\t//if(data>max) max = data;\n\t\t\ta.push_back(make_pair<t,t>(data, 1));\n\t\t}\n\t\tt Dequeue()\n\t\t{\n\t\t\tif(!b.empty())\n\t\t\t{\n\t\t\t\tpair<t, t> p = b.back(); b.pop_back();\n\n\t\t\t\treturn p.first;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tt max = max1;\n\t\t\t\twhile(!a.empty())\n\t\t\t\t{\n\t\t\t\t\tt p = a.back().first; a.pop_back();\n\t\t\t\t\tif(p > max) max = p;\n\t\t\t\t\tb.push_back(make_pair<t,t>(p, max));\n\t\t\t\t}\n\t\t\t\tif(!b.empty())\n\t\t\t\t{\n\t\t\t\t\tt r = b.back().first;\n\t\t\t\t\tb.pop_back();\n\t\t\t\t\treturn r;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tcout<<\"Queue is empty\\n\";\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tt getMax()\n\t\t{\n\n\t\t\tif(!b.empty())\n\t\t\t{\n\t\t\t\tpair<t, t> p = b.back(); b.pop_back();\n\t\t\t\tb.push_back(p);\n\t\t\t\treturn p.second;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tt max = max1;\n\t\t\t\twhile(!a.empty())\n\t\t\t\t{\n\t\t\t\t\tt p = a.back().first; a.pop_back();\n\t\t\t\t\tif(p > max) max = p;\n\t\t\t\t\tb.push_back(make_pair<t,t>(p, max));\n\t\t\t\t}\n\t\t\t\tif(!b.empty())\n\t\t\t\t{\n\t\t\t\t\tpair<t, t> p = b.back(); b.pop_back();\n\t\t\t\t\tb.push_back(p);\n\t\t\t\t\treturn p.second;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tcout<<\"Max: Queue is empty\\n\";\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\tprivate:\n\t\tvector<pair<t,t> > a;\n\t\tvector<pair<t,t> > b;\n\t\tt max1;\n\n};\n\n\n\nint main(int argc, char const *argv[])\n{\n\t\n\t// char* s = \"bbaacd\";\n\t// replaceAndDelete(s);\n\tNode<int>* a1 = new Node<int>(9);\n\tNode<int>* a2 = new Node<int>(9);\n\tNode<int>* a3 = new Node<int>(9);\n\tNode<int>* a4 = new Node<int>(9);\n\tNode<int>* a5 = new Node<int>(9);\n\n\ta1->next = a2;\n\ta2->next = a3;\n\ta4->next = a5;\n\n\n\tNode<int>::printList(a1);\n\tNode<int>::printList(a4);\n\n\t// printList<int>(a1);\n\t//Node<int>::mergeList(a1, a4);\n\t//Node<int>::reverseList(a1);\n\n\tcout<<\"addList\\n\";\n\tNode<int>::printList(a1);\n\tNode<int>::printList(a4);\n\tNode<int>::addList(a1,a4);\n\n\n\tcout<<\"Queue\\n\";\n\tQueue<int> q(-1000);\n\tq.Enqueue(5);\n\tq.Enqueue(1);\n\tq.Enqueue(2);\n\tcout<<\"\\n\"<<q.Dequeue()<<\"\\n\";\n\tcout<<\"\\n\"<<q.getMax()<<\"\\n\";\n\tcout<<\"\\n\"<<q.Dequeue()<<\"\\n\";\n\n\treturn 0;\n}" }, { "alpha_fraction": 0.4913194477558136, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 20.296297073364258, "blob_id": "bba783b886ba44a039796d51698dd5707fecb1ab", "content_id": "d2f9870921fc603fdb411d3d08652501ba79f112", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "no_license", "max_line_length": 47, "num_lines": 27, "path": "/excrayg/leetcode/python/find_longest_arr.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\ndef find_max_arr(input_arr):\n \n start_idx = 0\n max_len = float(\"-inf\")\n\n h = {}\n\n for idx, elem in enumerate(input_arr):\n if elem in h:\n last_occur_idx = h[elem]\n if last_occur_idx >= start_idx:\n start_idx = last_occur_idx + 1\n h[elem] = idx\n else:\n h[elem] = idx\n max_len = max(max_len, idx - start_idx + 1)\n \n return max_len\n\narr = [5,7,5,9,11,2,11,10,9]\nprint(find_max_arr(arr))\narr = [1,2,3,4,4,5,6,7,8]\nprint(find_max_arr(arr))\narr = [1,1,1]\nprint(find_max_arr(arr))\narr = [1,2,3,4,1,6,7,4,5,5]\nprint(find_max_arr(arr))\n" }, { "alpha_fraction": 0.389032244682312, "alphanum_fraction": 0.4003225862979889, "avg_line_length": 23.2265625, "blob_id": "2d99c14bb7904cf310da413df014699554d2647c", "content_id": "d9e2584fc745b392a76ba072474716252fe62877", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3100, "license_type": "no_license", "max_line_length": 102, "num_lines": 128, "path": "/excrayg/leetcode/cpp/pass.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "#include <cstddef>\n#include <iostream>\n\nusing namespace std;\n\n\nsize_t max(int a, int b)\n{\n return a>b?a:b;\n}\n\nsize_t abs1(int x)\n{\n if(x < 0)\n {\n return -1*x;\n }\n \n return x;\n}\n\nclass Solution {\npublic:\n\n size_t get_num_repeating_sets(const string& s)\n {\n size_t n = s.length();\n char prev = NULL, curr = NULL;\n size_t i = 0;\n size_t num_repeat = 0, num_succ = 0;\n while(i < n)\n {\n curr = s[i];\n if(prev == NULL)\n {\n prev = curr;\n num_succ += 1;\n }\n else\n {\n if(prev == curr)\n {\n num_succ += 1;\n if(num_succ == 3)\n {\n num_repeat += 1;\n prev = NULL;\n num_succ = 0;\n }\n }\n else\n {\n prev = curr;\n num_succ = 1;\n }\n }\n i+=1;\n }\n \n return num_repeat;\n }\n \n size_t get_num_unmet_conditions(const string& s)\n {\n size_t i = 0;\n size_t n = s.length();\n \n size_t l = 0, u = 0, d = 0;\n \n while(i < n)\n {\n char c = s[i];\n if(c >= 'a' && c <= 'z')\n {\n if(l == 0) l += 1;\n }\n else if(c >= 'A' && c <= 'Z')\n {\n if(u == 0) u += 1;\n }\n else if(c >= '0' && c <= '9')\n {\n if(d == 0) d += 1;\n }\n i+=1;\n }\n \n return max(0, 3-(l+u+d));\n }\n \n int strongPasswordChecker(string s) {\n \n size_t min_len = 6;\n size_t max_len = 20;\n \n size_t len = s.length();\n \n if(len < 6)\n {\n size_t chars_to_add_or_remove = min_len - len;\n size_t num_repeating_sets = get_num_repeating_sets(s);\n size_t num_unmet_conditions = get_num_unmet_conditions(s);\n // cout<<chars_to_add_or_remove<<\" \"<<num_repeating_sets<<\" \"<<num_unmet_conditions<<endl;\n return max(max(num_repeating_sets, num_unmet_conditions), chars_to_add_or_remove);\n }\n else if(len > 20)\n {\n size_t chars_to_add_or_remove = len - max_len;\n size_t num_repeating_sets = get_num_repeating_sets(s);\n size_t num_unmet_conditions = get_num_unmet_conditions(s);\n return max(max(num_repeating_sets, num_unmet_conditions), chars_to_add_or_remove);\n }\n else\n {\n size_t num_repeating_sets = get_num_repeating_sets(s);\n size_t num_unmet_conditions = get_num_unmet_conditions(s);\n cout<<\" \"<<num_repeating_sets<<\" \"<<num_unmet_conditions<<endl;\n return max(num_repeating_sets, num_unmet_conditions);\n }\n }\n};\n\nint main()\n{\n\n cout<<Solution().strongPasswordChecker(\"aaAA11\");\n return 0;\n}" }, { "alpha_fraction": 0.351919561624527, "alphanum_fraction": 0.35923218727111816, "avg_line_length": 22.80434799194336, "blob_id": "1b58be52204d69ee8732795d4b0bf1ed8ac9f5fd", "content_id": "5a6b33b98077933cddd3bfc74928443aa3b3fbc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1094, "license_type": "no_license", "max_line_length": 50, "num_lines": 46, "path": "/excrayg/leetcode/python/reverse_words.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # @param s, a string\n # @return a string\n def reverseWords(self, s):\n s = s.strip()\n if s == \"\":\n return s\n r = s[::-1]\n si = 0\n spaceFound = False\n r = list(r)\n n = len(r)\n ei = 0\n while ei < n:\n if r[ei] == \" \":\n if spaceFound:\n del r[ei]\n n-=1\n continue\n else:\n spaceFound = True\n self.reverseWord(r, si, ei)\n else:\n if spaceFound:\n si = ei\n spaceFound = False\n ei+=1\n \n print(r, si)\n self.reverseWord(r, si, len(r)-1) \n \n return \"\".join(r)\n \n def reverseWord(self, r, si, ei):\n if si <= ei:\n return r\n else:\n temp = r[si]\n r[si] = r[ei]\n r[ei] = temp\n print(r)\n return self.reverseWord(r, si+1, ei-1)\n\n\ns = Solution()\nprint(s.reverseWords(\"hi!\"))" }, { "alpha_fraction": 0.46042296290397644, "alphanum_fraction": 0.4658610224723816, "avg_line_length": 39.39024353027344, "blob_id": "85031cea7559e121ebd0a4b4ebaf7d70615dc422", "content_id": "ab07539d3fe52d87fccc50166fc55429430b3b4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1655, "license_type": "no_license", "max_line_length": 68, "num_lines": 41, "path": "/excrayg/leetcode/python/word_ladder_ii_reference.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # param Start, a String \n # param End, a String \n # param dict, a set of String \n # return a List of lists of String \n def findLadders (self, Start, End, dict):\n def buildpath ( path, Word):\n if len (prevMap [Word]) == 0:\n path.append (Word); currPath = path [:]\n currPath.reverse (); result.append (currPath)\n path.pop ();\n return\n path.append (word)\n for iter in prevMap [Word]:\n buildpath (path, iter)\n path.pop ()\n \n result = []\n prevMap = {}\n length = len (Start)\n for i in dict:\n prevMap [i] = []\n candidates = [set (), set ()]; current = 0; Previous = 1\n candidates [current] .add (start)\n while True:\n current, Previous = Previous, current\n for i in candidates [Previous]: dict.remove (i)\n candidates [current] .clear ()\n for Word in candidates [Previous]:\n for i in range (length):\n part1 = Word [: i]; part2 = Word [i + 1 :]\n for J in ' abcdefghijklmnopqrstuvwxyz ' :\n if Word [i] =! J:\n nextword = J + part1 + part2\n if nextword in dict:\n prevMap [nextword] .append (word)\n candidates [current] .add (nextword)\n if len (candidates [current]) == 0: return result\n if End in candidates [current]: break\n buildpath ([], end)\n return result" }, { "alpha_fraction": 0.3031988739967346, "alphanum_fraction": 0.3226703703403473, "avg_line_length": 14.276596069335938, "blob_id": "081b8a37029f49b918ed8e201f9e1a0b32774985", "content_id": "46bad5deeee749168c3883d8cb83998b5842c904", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 719, "license_type": "no_license", "max_line_length": 32, "num_lines": 47, "path": "/excrayg/leetcode/cpp/sqrt.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n#include <iostream>\n#include <cmath>\nusing namespace std;\n\nclass Solution {\npublic:\n /**\n * @param x: An integer\n * @return: The sqrt of x\n */\n int sqrt(long x) {\n // write your code here\n \n long m = 0;\n long s = 0;\n long e = x;\n \n while(s<=e)\n {\n m = s +(e-s)/2;\n cout<<m<<s<<e<<endl;\n if(m*m == x)\n {\n return m;\n }\n else if (m*m < x)\n {\n s = m+1;\n }\n else\n {\n e = m-1;\n }\n }\n \n return m;\n }\n};\n\n\nint main()\n{\n\n Solution s;\n s.sqrt(999999999);\n\n}\n" }, { "alpha_fraction": 0.564257025718689, "alphanum_fraction": 0.5652610659599304, "avg_line_length": 24.202531814575195, "blob_id": "215f75ea8f209a8b3e22b66940fd50ba572ac1d3", "content_id": "4387ad50884a8c8eb96e3df956156cecc0889191", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1992, "license_type": "no_license", "max_line_length": 97, "num_lines": 79, "path": "/excrayg/leetcode/python/graphs.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\nfrom collections import deque\n\ngraph = {'A': ['B', 'C'],\n 'B': ['C', 'D'],\n 'C': ['A','G'],\n 'D': ['E'],\n 'E': ['F'],\n 'F': ['C']}\n\ndef find_path(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return path\n if start not in graph:\n return None\n for node in graph.get(start, []):\n if node not in path:\n newpath = find_path(graph, node, end, path)\n if newpath: return newpath\n return None\n\ndef findpathfromprev(start, end, path_finder):\n path = []\n while end != start:\n path = path + [end]\n end = path_finder[end]\n path = path + [start]\n path.reverse()\n return path\n\ndef find_path_dfs(graph, start, end):\n stack = []\n visited = set()\n path_finder = {}\n\n stack = stack + [start]\n while len(stack) != 0:\n \n node = stack.pop()\n visited.add(node)\n if node == end:\n return findpathfromprev(start, end, path_finder)\n else:\n #path.pop()\n for adj_node in reversed(graph.get(node, [])):\n if adj_node not in visited:\n stack.append(adj_node)\n path_finder[adj_node] = node\n\n return None\n\ndef find_path_bfs(graph, start, end):\n q = deque()\n path_finder = {}\n q.append(start)\n path_finder[start] = None\n\n while len(q) != 0:\n node = q.popleft()\n if node == end:\n return findpathfromprev(start, end, path_finder)\n else:\n for adj_node in graph.get(node, []):\n if adj_node not in path_finder:\n q.append(adj_node)\n path_finder[adj_node] = node\n\n return None \n\nnodes = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\"]\nfor start in nodes:\n for end in nodes:\n dfs = find_path(graph, start, end)\n dfs_iter = find_path_dfs(graph, start, end)\n print(dfs, dfs_iter)\n assert dfs == dfs_iter\n print(\"DFS: start: {} end: {} path: {}\".format(start, end, dfs))\n print(\"DFS_ITER: start: {} end: {} path: {}\".format(start, end, dfs_iter))\n print(\"BFS: start: {} end: {} path: {}\".format(start, end, find_path_bfs(graph, start, end)))\n" }, { "alpha_fraction": 0.3607696294784546, "alphanum_fraction": 0.3709246516227722, "avg_line_length": 23.272727966308594, "blob_id": "e3a00cc30a3dc6d89334dadbbc5644fc59e93929", "content_id": "233d80d8eab77d74032402ffe5004b9574969b38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1871, "license_type": "no_license", "max_line_length": 77, "num_lines": 77, "path": "/excrayg/leetcode/python/lev.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\ndef h(A, a, B, b, d, solns):\n\n if (a,b) in d:\n return d[(a,b)]\n\n if a == len(A):\n d[(a,b)] = len(B) - b\n solns[(a,b)] = 'i'\n return d[(a,b)]\n\n if b == len(B):\n d[(a,b)] = len(A) - a\n solns[(a,b)] = 'd'\n return d[(a,b)]\n\n\n if A[a] == B[b]:\n d[(a,b)] = h(A, a+1, B, b+1, d, solns)\n solns[(a,b)] = 's'\n return d[(a,b)]\n else:\n i = h(A, a, B, b+1, d, solns) + 1\n de = h(A, a+1, B, b, d, solns) + 1\n r = h(A, a+1, B, b+1, d, solns) + 1\n\n if i <= de and i <= r:\n solns[(a,b)] = 'i'\n d[(a,b)] = i\n\n if de <= i and de <= r:\n solns[(a,b)] = 'd'\n d[(a,b)] = de\n\n if r <= i and r <= de:\n solns[(a,b)] = 'r'\n d[(a,b)] = r\n\n return d[(a,b)]\n\ndef edit_distance(A, B):\n solns = {}\n di = {}\n d = h(A, 0, B, 0, di, solns)\n # print(di)\n print(\"Min distance: {}\".format(d))\n # print(solns)\n a = 0\n b = 0\n while a < len(A) and b < len(B):\n soln = solns[(a,b)]\n if soln == 's':\n print(\"Step A: {} and B: {}\".format(a,b))\n a+=1\n b+=1\n continue\n if soln == 'r':\n print(\"replace A: {} and B: {} with value: {}\".format(a,b, B[b]))\n a+=1\n b+=1\n continue\n if soln == 'i':\n print(\"Insert A: {} and B: {} with value: {}\".format(a,b, B[b]))\n b+=1\n continue\n if soln == 'd':\n print(\"Delete A: {} and B: {} with value: {}\".format(a,b, A[a]))\n a+=1\n continue\n\n if a == len(A):\n print(\"Insert {} chars from B\".format(len(B)-b))\n\n if b == len(B):\n print(\"Delete {} chars from A\".format(len(A)-a))\n\n# edit_distance('anshuman', 'antihuman')\nedit_distance('abcd', 'abc')\n\n" }, { "alpha_fraction": 0.650719940662384, "alphanum_fraction": 0.658151388168335, "avg_line_length": 19.30188751220703, "blob_id": "5f81e22990eaeba3177e676ae0085316fec3dac1", "content_id": "91b707574ec49625df3fc0530d819e52fb99c221", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2153, "license_type": "no_license", "max_line_length": 58, "num_lines": 106, "path": "/excrayg/leetcode/python/heaps.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\n#Elements of programming interviews\n#Heap structure\nclass Node:\n\tdef __init__(self, val, l=None, r=None, p=None):\n\t\tself.data = val\n\t\tself.left = l\n\t\tself.right = r\n\t\tself.parent = p\n\ndef find_node(root):\n\t\n#insert - insert on rightmost node and upheap.\ndef insert(root, elem):\n\tif root == None:\n\t\treturn root\n\troot = find_node(root)\n\tnode = Node(elem, None, None, root)\n\troot.right = node\n\tupheap(node)\n\ndef remove(root):\n\tif root == None:\n\t\treturn root\n\tif root.left == None and root.right==None:\n\t\tt = root.data\n\t\troot = None\n\t\treturn t\n\tretval = root.data\n\ttemp = root\n\troot = find_node(root)\n\ttemp.data = root.data\n\troot.parent.right = None\n\tdownheap(temp)\n\treturn retval\n\ndef downheap(root):\n\tif(root==None):\n\t\treturn root\n\tchild_val = None\n\tchild_node = None\n\tif root.left == None and root.right == None:\n\t\treturn root\n\tif(root.left == None):\n\t\tchild_val = root.right.data\n\t\tchild_node = root.right\n\telif(root.right == None):\n\t\tchild_val = root.left.data\n\t\tchild_node = root.left\n\telse:\n\t\tif(root.left.data < root.right.data ):\n\t\t\tchild_val = root.left.data\n\t\t\tchild_node = root.left\n\t\telse:\n\t\t\tchild_val = root.right.data\n\t\t\tchild_node = root.right\n\n\tif(root.data > child_val):\n\t\tchild_node.data = root.data\n\t\troot.data = child_val\n\t\tdownheap(child_node)\n\n\n# def swap(root, node):\n# \ttemp = root\n# \troot.parent = node.parent\n# \troot.left = node.left\n# \troot.right = node.right\n# \troot.data = node.data\n# \tnode.parent = temp.parent\n# \tnode.left = temp.left\n# \tnode.right = temp.right\n# \tnode.data = temp.data\n# \tif node.left != None:\n# \t\tnode.left.parent\n\ndef upheap(node):\n\tif(node == None):\n\t\treturn\n\tif(node.parent != None and node.parent.data > node.data):\n\t\tt = node.data\n\t\tx = node.parent.data\n\t\tnode.parent.data = t\n\t\tnode.data = x\n\t\t# swap(node.parent, node)\n\t\tupheap(node.parent)\n\n\ndef print_tree(root):\n\tif(root==None):\n\t\treturn\n\tprint_tree(root.left)\n\tprint(\"node: %d\" %root.data)\n\tprint_tree(root.right)\n\n\n\ndef construct_heap(seq):\n\troot = Node(seq[0])\n\tfor i in range(1, len(seq)):\n\t\tinsert(root, seq[i])\n\tprint_tree(root)\n\tfor i in range(len(seq)):\n\t\tprint(remove(root))\n\nseq = [4,9,8,17,26, 50,16,19]\nconstruct_heap((seq[::-1]))" }, { "alpha_fraction": 0.5823438167572021, "alphanum_fraction": 0.5869901776313782, "avg_line_length": 26.26760482788086, "blob_id": "063fefa98149f147912e016d0f915a859832023d", "content_id": "32275b0b18cb03096b33d859f3b0c7785017da97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1937, "license_type": "no_license", "max_line_length": 77, "num_lines": 71, "path": "/excrayg/leetcode/python/lvl.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\n\n\nclass Solution:\n\n '''\n @param root: An object of TreeNode, denote the root of the binary tree.\n This method will be invoked first, you should design your own algorithm \n to serialize a binary tree which denote by a root node to a string which\n can be easily deserialized by your own \"deserialize\" method later.\n '''\n def serialize(self, root):\n # write your code here\n \n if root == None:\n return \"#,\"\n p = str(root.val)+\",\"\n p+=self.serialize(root.left)\n p+=self.serialize(root.right)\n return p\n\n '''\n @param data: A string serialized by your serialize method.\n This method will be invoked second, the argument data is what exactly\n you serialized at method \"serialize\", that means the data is not given by\n system, it's given by your own serialize method. So the format of data is\n designed by yourself, and deserialize it here as you serialize it in \n \"serialize\" method.\n '''\n\n def d_h(self, nodes, i):\n if i >= len(nodes):\n return None\n\n if nodes[i] == \"#\":\n return None\n\n root = TreeNode(int(nodes[i]))\n root.left = self.d_h(nodes, 2*i+1)\n root.right = self.d_h(nodes, 2*i+2)\n return root\n\n def deserialize(self, data):\n # write your code here\n data = data.strip()\n data=data[:len(data)-1]\n nodes = data.split(\",\")\n \n head = self.d_h(nodes, 0)\n \n return head \n\n def inorder(self, root):\n if root:\n self.inorder(root.left)\n print(root.val)\n self.inorder(root.right)\n\ns = Solution()\nroot = TreeNode(1)\nleft = TreeNode(2)\nright = TreeNode(3)\nroot.left = left\nroot.right = right\nt=s.serialize(root)\nprint(t)\ns.inorder(s.deserialize(t))\n" }, { "alpha_fraction": 0.5132817029953003, "alphanum_fraction": 0.5185390114784241, "avg_line_length": 28.876033782958984, "blob_id": "1732514164b561b9c9ca184d63eaa707e513c5d5", "content_id": "a2eb692a9f2d3915edf158f802e50a23a4992600", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3614, "license_type": "no_license", "max_line_length": 250, "num_lines": 121, "path": "/excrayg/leetcode/cpp/todo/combinations_of_string.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "// #13.13 Write a function that takes as input a string s and a list L of equal length strings and returns all substrings of S which are contactenation of all the strings in L. Each string in L must appear exactly once and the ordering is immaterial.\n\n\n/**\n * You are given a string, S, and a list of words, L, that are all of the same length. \n * Find all starting indices of substring(s) in S that is a concatenation of each word in L exactly once and without any intervening characters.\n * For example, given:\n * S: \"barfoothefoobarman\"\n * L: [\"foo\", \"bar\"]\n * You should return the indices: [0,9].\n * (order does not matter).\n */\n \n \n #include <iostream>\n #include <string>\n #include <vector>\n #include <unordered_map>\n using namespace std;\n \n int search_from_idx(const string s, const int len, int curr_idx, int num_of_words_found, unordered_map<string, int>& map_of_words)\n {\n cout << \"String: \" << s << \" Curr Idx: \" << curr_idx << \" Length: \" << len << endl;\n if(num_of_words_found == map_of_words.size())\n {\n return curr_idx;\n }\n if(curr_idx+len > s.length())\n {\n return -1;\n }\n string cand = s.substr(curr_idx, len);\n cout<<\" Candidate: \" << cand << endl;\n auto it = map_of_words.find(cand);\n if(it == map_of_words.end())\n {\n return -1;\n }\n else\n {\n int num_count = it->second;\n if(num_count == 0)\n {\n num_of_words_found+=1;\n it->second = 1;\n }\n else\n {\n return -1;\n }\n }\n return search_from_idx(s, len, curr_idx+len, num_of_words_found, map_of_words);\n }\n \n vector<string> get_substrings(const string s, const vector<string>& list_of_words)\n {\n vector<string> candidates;\n int num_chars = s.length();\n int len = list_of_words[0].length();\n int num_words = list_of_words.size();\n \n for(int start_idx = 0; start_idx < num_chars-len; start_idx++)\n {\n unordered_map<string, int> map_of_words;\n for(const auto& word: list_of_words)\n {\n map_of_words[word] = 0;\n }\n // string cand = s.substring(start_idx, start_idx + len);\n int num_words_found = 0;\n int end_idx = search_from_idx(s, len, start_idx, num_words_found, map_of_words);\n if(end_idx != -1)\n {\n candidates.emplace_back(s.substr(start_idx, end_idx-start_idx));\n }\n }\n \n return candidates;\n }\n \n \n int main()\n {\n // while(1)\n // {\n // string s;\n // int n;\n // vector<string> list_of_words;\n // cout << \"Enter a string: \";\n // getline(cin, s);\n // if(s.empty())\n // {\n // cout << \"Exiting\\n\";\n // break;\n // }\n // cout << \"\\nEnter number of words in list: \";\n // cin >> n;\n // for(int i = 0; i < n; i++)\n // {\n // string l;\n // cin >> l;\n // list_of_words.emplace_back(l);\n // }\n // vector<string> candidates = get_substrings(s, list_of_words);\n // cout << \"\\nPrinting all candidates: \";\n // for(const auto& cand : candidates)\n // {\n // cout << cand << endl;\n // }\n // }\n \n string s = \"foobbarthebarfoo\";\n vector<string> list_of_words = {\"foo\", \"bar\"};\n \n vector<string> candidates = get_substrings(s, list_of_words);\n cout << \"\\nPrinting all candidates: \";\n for(const auto& cand : candidates)\n {\n cout << cand << endl;\n }\n }" }, { "alpha_fraction": 0.5218623280525208, "alphanum_fraction": 0.5287449359893799, "avg_line_length": 20.119657516479492, "blob_id": "8451434645d4ae17be9b6e0c38452c76f081a282", "content_id": "49c27d4060ffd91379d30499978506c44664dd4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2470, "license_type": "no_license", "max_line_length": 109, "num_lines": 117, "path": "/excrayg/leetcode/cpp/check_if_valid_bin_tree.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "// Trees/Graphs\n// Given a head to a Graph where each Node only has 2 neighbors each, determine if it is a valid binary tree.\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n#include <stdexcept>\n#include <iostream>\n#include <string>\nusing namespace std;\n\n\nclass Graph\n{\n \n public:\n void add_node(int u);\n void add_edge(int u, int v);\n bool is_valid_binary_tree();\n private:\n unordered_map<int, vector<int>> _graph;\n};\n\nvoid Graph::add_node(int u)\n{\n auto itr = _graph.find(u);\n if(itr != _graph.end())\n {\n throw runtime_error(\"Error: Node already in graph\");\n }\n else\n {\n _graph[u] = {};\n }\n}\n\nvoid Graph::add_edge(int u, int v)\n{\n auto itr1 = _graph.find(u);\n auto itr2 = _graph.find(v);\n if(itr1 == _graph.end() || itr2 == _graph.end())\n {\n throw runtime_error(\"Error: Node not found in graph\");\n }\n else\n {\n _graph[u].emplace_back(v);\n }\n}\n\nbool Graph::is_valid_binary_tree()\n{\n int root_node = 1;\n vector<int> stack;\n unordered_set<int> visited;\n stack.emplace_back(root_node);\n while(!stack.empty())\n {\n int node = stack.back();\n cout<<\"Visiting node: \"<<node<<endl;\n stack.pop_back();\n auto iter = visited.find(node);\n if(iter != visited.end())\n {\n cout<<\"Cycle found: Invalid binary tree\"<<endl;\n return false;\n }\n visited.insert(node);\n int num_children = _graph[node].size();\n if(num_children > 2)\n {\n cout<<\"More than one children: Invalid binary tree\"<<endl;\n return false;\n }\n else\n {\n for(auto child: _graph[node])\n {\n stack.emplace_back(child);\n }\n }\n }\n \n for(auto& kv: _graph)\n {\n auto itr = visited.find(kv.first);\n if(itr == visited.end())\n {\n auto msg = \"Node \" + to_string(kv.first) + \" not visited, Invalid binary tree\";\n cout<<msg<<endl;\n return false;\n }\n }\n return true;\n}\n\nint main()\n{\n Graph graph;\n graph.add_node(1);\n graph.add_node(2);\n graph.add_node(3);\n\n graph.add_edge(1,2);\n graph.add_edge(1,3);\n graph.add_edge(1,4);\n\n if(graph.is_valid_binary_tree())\n {\n cout<<\"Graph is a valid binary tree\"<<endl;\n }\n else\n {\n cout<<\"Graph is invalid binary tree\"<<endl;\n }\n \n return 0;\n}" }, { "alpha_fraction": 0.3753753900527954, "alphanum_fraction": 0.3821321427822113, "avg_line_length": 21.94827651977539, "blob_id": "b990ad0046544efd406e377d76232388c4966129", "content_id": "b7f97f295e75c5db136f92fc7182b5e0001b2e97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1332, "license_type": "no_license", "max_line_length": 83, "num_lines": 58, "path": "/excrayg/leetcode/cpp/3sum.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nclass Solution {\npublic: \n /**\n * @param numbers : Give an array numbers of n integer\n * @return : Find all unique triplets in the array which gives the sum of zero.\n */\n vector<vector<int> > threeSum(vector<int> &nums) {\n // write your code here\n std::sort(nums.begin(), nums.end());\n vector<vector<int> > res;\n int n = nums.size();\n for(int i = 0; i < n-2; i++)\n {\n int j = i+1;\n int k = n-1;\n while(j<k)\n {\n \n vector<int> p;\n int s = nums[i] + nums[j] + nums[k];\n if(s==0)\n {\n p.push_back(nums[i]);\n p.push_back(nums[j]);\n p.push_back(nums[k]);\n std::sort(p.begin(), p.end());\n res.push_back(p);\n }\n else if(s > 0)\n {\n k--;\n }\n else\n {\n j++;\n }\n }\n }\n \n return res;\n }\n};\n\nint main()\n{\n Solution s;\n vector<int> a;\n a.push_back(-1);\n a.push_back(1);\n a.push_back(0);\n\n s.threeSum(a);\n}\n" }, { "alpha_fraction": 0.5806988477706909, "alphanum_fraction": 0.6106489300727844, "avg_line_length": 20.35714340209961, "blob_id": "b0a7cf5bb291ef5fb330e82925b0a4c51dfa1be9", "content_id": "2553296666a86fd2e4bbb7348e8d21a6d8c64a77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 51, "num_lines": 28, "path": "/excrayg/leetcode/python/find_max_subarray.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\ndef find_max_subarray(a):\n\t#edge case, if all numbers are negative. \n\tcurr_sum = 0 \n\tmax_sum = a[0]\n\n\tfor i in a:\n\t\tcurr_sum += i\n\t\tif curr_sum < 0:\n\t\t\tcurr_sum = 0\n\t\tmax_sum = max(max_sum, curr_sum)\n\n\treturn max_sum\n\ndef find_max_subarray1(a):\n\tmax_ending_here = a[0]\n\tmax_so_far = a[0]\n\n\tfor i in range(1, len(a)):\n\t\tprint(max_so_far, max_ending_here)\n\t\tmax_ending_here = max(max_ending_here+a[i], a[i])\n\t\t# max_ending_here += a[i]\n\t\tmax_so_far = max(max_so_far, max_ending_here)\n\n\treturn max_so_far\n\na = [-2, -1, -3, -4, -1, 2, 1, -5, 4]\nprint(find_max_subarray(a))\nprint(find_max_subarray1(a))\n\n" }, { "alpha_fraction": 0.48024314641952515, "alphanum_fraction": 0.5440729260444641, "avg_line_length": 15.399999618530273, "blob_id": "3694e67bff633630c9d6caf95f722b5710dce359", "content_id": "233059b981c2c92310507e662b355baa43d3e52a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 31, "num_lines": 20, "path": "/excrayg/leetcode/python/max_prod.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\ndef max_prod(a):\n\tprint(a)\n\tmx1 = a[0]\n\tmn1 = a[0]\n\tmaxAns = a[0]\n\tfor i in range(1, len(a)):\n\t\tt = a[i]\n\t\tmx = mx1\n\t\tmn = mn1 \n\t\tmx1 = max(max(mx*t, t), mn*t)\n\t\tmn1 = min(min(mx*t, t), mn*t)\n\t\tmaxAns = max(mx1, maxAns)\n\t\tprint(mx1, mn1, maxAns)\n\treturn maxAns\n\na=[2,3,-2,-4]\nprint(max_prod(a))\n\na=[2,3,-2,4]\nprint(max_prod(a))" }, { "alpha_fraction": 0.565858781337738, "alphanum_fraction": 0.6016860008239746, "avg_line_length": 19.521739959716797, "blob_id": "496372c5971c65f393fb0e64d507a8532a1db895", "content_id": "753f54e27332d7d3fad4f6efc1792d681e7ea1d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "no_license", "max_line_length": 48, "num_lines": 46, "path": "/excrayg/leetcode/python/bin_search_rotated.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\n# [1,5,7,9,10]\n\n# [7,9,10,1,5]\n\ndef bin_search(arr, start, end, val):\n\tif start >= end:\n\t\treturn False\n\n\tmid = start + int((end-start)/2)\n\tif arr[mid] == val:\n\t\treturn True\n\n\tif val < arr[mid]:\n\t\treturn bin_search(arr, start, mid-1, val)\n\telse:\n\t\treturn bin_search(arr, mid+1, end, val)\n\n\ndef mod_bin_search(arr, start, end, val):\n\t\n\tif start >= end:\n\t\treturn False\n\n\tmid = start + int((end-start)/2)\n\tif arr[mid] == val:\n\t\treturn True\n\n\tif arr[start] <= arr[mid]:\n\t\tif val > arr[mid]:\n\t\t\treturn mod_bin_search(arr, mid+1, end, val)\n\t\telse:\n\t\t\tif arr[start] <= val:\n\t\t\t\treturn bin_search(arr, start, mid-1, val)\n\t\t\telse:\n\t\t\t\treturn mod_bin_search(arr, mid+1, end, val)\n\telse:\n\t\tif val > arr[mid]:\n\t\t\treturn bin_search(arr, mid+1, end, val)\n\t\telse:\n\t\t\treturn mod_bin_search(arr, start, mid-1, val)\n\nv=[7,9,10,1,5]\nfor i in v:\n\tprint(mod_bin_search(v, 0, len(v), 5))\nprint(mod_bin_search(v, 0, len(v), 0))\nprint(mod_bin_search(v, 0, len(v), 15))\n\n\n\n" }, { "alpha_fraction": 0.36918604373931885, "alphanum_fraction": 0.39244186878204346, "avg_line_length": 25.423076629638672, "blob_id": "201758e150fdbf4135a6b6dd9678cd4da1bbac0e", "content_id": "f381d95205540ec205a13683774db4e5f3ad1aac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 688, "license_type": "no_license", "max_line_length": 71, "num_lines": 26, "path": "/excrayg/leetcode/python/partition.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # @param A : string\n # @return an integer\n def minCut(self, A):\n n = len(A)\n if n == 0:\n return 0\n table = [ [0]*(n+1) for i in range(n+1) ]\n for length in range(1, n+1):\n s = 0\n e = length\n while s+length <= n:\n if A[s:e] == A[s:e][::-1]:\n table[s][e] = 0\n else:\n table[s][e] = min(table[s][e-1], table[s+1][e]) + 1\n \n s+=1\n e+=1\n from pprint import pprint\n pprint(table) \n return table[0][n]\n\ns=Solution()\nprint(s.minCut(\"ababb\"))\nprint(s.minCut(\"AB\"))\n\n" }, { "alpha_fraction": 0.5998857021331787, "alphanum_fraction": 0.6070305705070496, "avg_line_length": 19.916168212890625, "blob_id": "0d0517411c206b523b8e07705db6197123067a6f", "content_id": "7d631f8e2f9f63485d550cb96c42677ed5c8d76e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3499, "license_type": "no_license", "max_line_length": 103, "num_lines": 167, "path": "/excrayg/leetcode/python/dfs_adm.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\nimport sys\n\nclass EdgeNode:\n\tdef __init__(self, _y, _wt=0, _next=None):\n\t\tself.y = _y\n\t\tself.weight = _wt\n\t\tself.next = _next\n\n\nclass Graph:\n\tdef __init__(self, num_vertices=0, num_edges = 0, directed = False):\n\t\tself.num_vertices = num_vertices\n\t\tself.num_edges = num_edges\n\t\tself.directed = directed\n\t\tself.edges = {}\n\t\tself.degree = {}\n\t\tself.vertices = set()\n\n\t#nv ne\n\t#e1 e2\n\tdef read(self, fname):\n\t\twith open(fname) as f:\n\t\t\tfl = f.readlines()\t\n\t\t\t\n\t\tassert(len(fl)>=2)\n\t\tl = list(map( lambda x: int(x), fl[0].split()))\n\t\tself.num_vertices = l[0]\t\t\t\n\t\tnum_edges = l[1]\n\n\t\tfor i in range(num_edges):\n\t\t\tl = list(map( lambda x: int(x), fl[i+1].split()))\n\t\t\tself.insert_edge(l[0], l[1], self.directed)\n\t\t\tself.vertices.add(l[0])\n\t\t\tself.vertices.add(l[1])\n\n\t\tfor i in self.vertices:\n\t\t\tif i not in self.edges:\n\t\t\t\tself.edges[i] = None\n\n\n\tdef insert_edge(self, x, y, directed):\n\t\te = EdgeNode(y)\n\t\tif x in self.edges:\n\t\t\tt = self.edges[x]\n\t\t\te.next = t\n\t\t\tself.edges[x] = e\n\t\telse:\n\t\t\tself.edges[x] = e\n\n\t\tif x in self.degree:\n\t\t\tself.degree[x]+=1\n\t\telse:\n\t\t\tself.degree[x] = 0\n\n\t\tif not directed:\n\t\t\tself.insert_edge(y,x,True)\n\n\t\tself.num_edges+=1\n\n\tdef print_graph(self):\n\t\tprint(\"Number of vertices: {} Number of Edges: {}\".format(self.num_vertices, self.num_edges))\n\t\tk = sorted(self.edges.keys())\n\t\tfor l in k:\n\t\t\tprint(\"{}: \".format(l))\n\t\t\te = self.edges[l]\n\t\t\twhile e:\n\t\t\t\tprint(\"{} -> {} \".format(l, e.y))\n\t\t\t\te = e.next\n\t\t\tprint()\n\n\nclass Search(object):\n\t\"\"\"docstring for Search\"\"\"\n\tdef __init__(self, arg):\n\t\tsuper(Search, self).__init__()\n\t\tself.graph = arg\n\n\t\tself.time = 0\n\t\tself.discovered = {}\n\t\tself.entry_time = {}\n\t\tself.parent = {}\n\t\tself.processed = {}\n\t\tself.exit_time = {}\n\n\t\tfor k in self.graph.vertices:\n\t\t\tself.discovered[k] = False\n\t\t\tself.entry_time[k] = None\n\t\t\tself.parent[k] = None\n\t\t\tself.processed[k] = False\n\t\t\tself.exit_time[k] = None\n\n\n\tdef process_vertex_early(self, x):\n\t\tprint(\"Process vertex early: {}\".format(x))\n\n\n\tdef edge_classification(self, x, y):\n\n\t\tif self.parent[y] == x:\n\t\t\tprint(\"Tree edge\")\n\t\t\treturn\n\n\t\tif self.discovered[y] and not self.processed[y]:\n\t\t\tprint(\"Back edge\")\n\t\t\treturn\n\n\t\tif self.processed[y] and (self.entry_time[y] < self.entry_time[x]):\n\t\t\tprint(\"Forward edge\")\n\t\t\treturn\n\n\t\tif self.processed[y] and (self.entry_time[y] > self.entry_time[x]):\n\t\t\tprint(\"Cross edge\")\n\t\t\treturn\n\n\n\tdef process_edge(self, x, y, s):\n\n\t\tprint(\"Processing edge: {} {} {}\".format(x, y, s))\n\t\tself.edge_classification(x,y)\n\t\tif self.parent[x] != y and self.discovered[y]:\n\t\t\tprint(\"Found cycle from {} to {} \".format(y,x))\n\n\tdef process_vertex_late(self, x):\n\t\tprint(\"Process vertex late: {}\".format(x))\n\n\tdef dfs(self, x):\n\t\tself.discovered[x] = True\n\t\tself.time+=1\n\t\tself.entry_time[x] = self.time\n\n\t\tself.process_vertex_early(x)\n\t\te = self.graph.edges[x]\t\n\t\twhile e:\n\t\t\ty = e.y\n\t\t\tif not self.discovered[y]:\n\t\t\t\tself.parent[y] = x\n\t\t\t\tself.process_edge(x,y,\"IF\")\t\n\t\t\t\tself.dfs(y)\n\t\t\telif not self.processed[y] or self.graph.directed:\n\t\t\t\tself.process_edge(x, y, \"ELSE\")\n\n\t\t\te = e.next\n\n\t\tself.process_vertex_late(x)\n\t\tself.time += 1\n\t\tself.exit_time[x] = self.time\n\t\tself.processed[x] = True\n\n\tdef dfs_stat(self):\n\t\tfor k in self.graph.edges.keys():\n\t\t\tprint(\"{}: Entry time: {}, Exit time: {} Parent: {}\".format(k, self.entry_time[k], self.exit_time[k]\n\t\t\t\t\t\t\t\t, self.parent[k]))\n\n\t\t\nd = False\nif len(sys.argv) == 3:\n\td = True\n\ng = Graph(0,0,d)\ng.read(sys.argv[1])\ng.print_graph()\n\ns = Search(g)\ns.dfs(1)\ns.dfs_stat()\n\n#num edges is wrong.\n\n\n\n\n\n" }, { "alpha_fraction": 0.5410668849945068, "alphanum_fraction": 0.5817103981971741, "avg_line_length": 18.327869415283203, "blob_id": "7eaf141cc60e071ef3c0f14c86d814f26972c705", "content_id": "2e384483f18087fab7f2329517f7becd2623fac2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1183, "license_type": "no_license", "max_line_length": 100, "num_lines": 61, "path": "/excrayg/leetcode/python/spiral_matrix.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "# 35. Spiral Matrix\n# Code it now: https://oj.leetcode.com/problems/spiral-matrix/ Difficulty: Medium, Frequency: Medium\n# Question:\n# Given a matrix of m ✕ n elements (m rows, n columns), return all elements of the\n# matrix in spiral order.\n# For example, given the following matrix:\n# [\n# [ 1, 2, 3 ],\n# [ 4, 5, 6 ],\n# [ 7, 8, 9 ]\n# ]\n# You should return [1,2,3,6,9,8,7,4,5].\n\n#have a function to print outer layer of matrix\n\n\ndef print_spiral(mat):\n\tm = len(mat)\n\tn = len(mat[0])\n\ti = 0\n\tj = 0\n\tl = []\n\twhile m > 1 and n > 1:\n\t\tprint_outer_layer(mat, i, j, m, n, l)\n\t\ti+=1\n\t\tj+=1\n\t\tm-=2\n\t\tn-=2\n\n\tif m == 1:\n\t\t#print ith row\n\t\tfor a in range(j, n):\n\t\t\tl.append(mat[i][a])\n\telif n == 1:\n\t\t#print jth column\n\t\tfor a in range(i, m):\n\t\t\tl.append(mat[a][j])\n\n\treturn l\n\n\ndef print_outer_layer(mat, i, j, m, n, l):\n\t#print i'th row\n\tfor a in range(j, n):\n\t\tl.append(mat[i][a])\n\n\t#print nth column\n\tfor a in range(i+1, m):\n\t\tl.append(mat[a][n-1])\n\n\t#print mth row in reverse\n\tfor a in range(n-2, j-1, -1):\n\t\tl.append(mat[i][a])\n\n\t#print jth column in reverse.\n\tfor a in range(m-2, i-1, -1):\n\t\tl.append(mat[a][j])\n\n\nmat = [[ 1, 2, 3 ],[ 4, 5, 6 ],[ 7, 8, 9 ]]\nprint(print_spiral(mat))\n\n\n" }, { "alpha_fraction": 0.3416898846626282, "alphanum_fraction": 0.34818941354751587, "avg_line_length": 21.63829803466797, "blob_id": "e25695f57f7b753a6e42f634689c488c6a5c6e65", "content_id": "1f919309eacf1f420e6cc7f30b5bf689c3084b92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 52, "num_lines": 47, "path": "/excrayg/leetcode/python/algos.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # @param A : string\n # @param B : string\n # @param C : string\n # @return an integer\n def isInterleave(self, A, B, C):\n return self.h(A, 0, B, 0, C, 0, {})\n \n def h(self, A, a, B, b, C, c, d):\n \n if (a,b) in d:\n return d[(a,b)]\n\n # print(A[a:], B[b:], C[c:])\n \n if a == len(A):\n if B[b:] == C[c:]:\n # print(\"hi\")\n return True\n else:\n return False\n \n if b == len(B):\n if A[a:] == C[c:]:\n return True\n else:\n return False\n \n with_a = False \n if A[a] == C[c]:\n with_a = self.h(A, a+1, B, b, C, c+1, d)\n \n with_b = False\n if not with_a and B[b] == C[c]:\n with_b = self.h(A, a, B, b+1, C, c+1, d)\n \n \n d[(a,b)] = with_a or with_b\n return d[(a,b)] \n \n\nA=\"eZCHXr0CgsB4O3TCDlitYI7kH38rEElI\"\nB=\"UhSQsB6CWAHE6zzphz5BIAHqSWIY24D\"\nC=\"eUZCHhXr0SQsCgsB4O3B6TCWCDlAitYIHE7k6H3z8zrphz5EEBlIIAHqSWIY24D\"\n\ns = Solution() \nprint(s.isInterleave(A,B,C)) \n \n" }, { "alpha_fraction": 0.37890625, "alphanum_fraction": 0.390625, "avg_line_length": 21.940298080444336, "blob_id": "2df73dd70c9f7fcd25617d46d8236d724e25c53d", "content_id": "a43714aa534fff9e1d669dd48ff72925e2435b2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1536, "license_type": "no_license", "max_line_length": 52, "num_lines": 67, "path": "/excrayg/leetcode/python/nqueen.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n \"\"\"\n Get all distinct N-Queen solutions\n @param n: The number of queens\n @return: All distinct solutions\n \"\"\"\n def solveNQueens(self, n):\n # write your code here\n # if n < 4:\n # return []\n cols = [-100] * n\n cur_row = 0\n solns = []\n self.sh(n, cur_row, cols, solns)\n return solns\n \n def can_be_placed(self, i, cur_row, cols):\n if cur_row == 0:\n return True\n \n t = cur_row-1\n d = 1\n while t >= 0:\n print(cols[t], i, t)\n if abs(cols[t]-i) == d:\n return False\n d+=1\n t-=1\n \n t = cur_row\n # print(t)\n while t >= 0:\n if cols[t] == i:\n return False\n t-=1\n \n return True\n\n def fs(self, cols):\n n = len(cols)\n l = [\".\"] * n\n q = \"Q\"\n t = []\n for i in cols:\n l[i] = \"Q\"\n t.append(\"\".join(l))\n l[i] = \".\"\n\n return t\n \n def sh(self, n, cur_row, cols, solns):\n \n if cur_row == n:\n solns.append(self.fs(cols))\n return\n \n for i in range(n):\n if self.can_be_placed(i, cur_row, cols):\n print(cur_row, i)\n cols[cur_row] = i\n self.sh(n, cur_row+1, cols, solns)\n cols[cur_row] = -100\n \n return \n\ns = Solution()\nprint(s.solveNQueens(4))" }, { "alpha_fraction": 0.3888888955116272, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 36, "blob_id": "d52cc5de72c67a8c047ca586faa05a6bf04e9978", "content_id": "7a1411ee4ffa0af0563af0db1f8530bd61d18c96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 36, "license_type": "no_license", "max_line_length": 36, "num_lines": 1, "path": "/excrayg/leetcode/cpp/todo/reverse_integer.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "// -123 becomes -321 123 becomes 321" }, { "alpha_fraction": 0.4969053864479065, "alphanum_fraction": 0.5110521912574768, "avg_line_length": 27.299999237060547, "blob_id": "98b2a3b2961ca2a5281b187040f6d403a59bf4f7", "content_id": "27d5450424c61562da65f27b899c1039b73e736d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1131, "license_type": "no_license", "max_line_length": 78, "num_lines": 40, "path": "/excrayg/leetcode/python/three_sum.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "import random\nimport pprint\n\ndef get_rand_list(max_len):\n min_int, max_int = -50, 50\n test_list = []\n for i in range(max_len):\n test_list.append(random.randint(min_int, max_int))\n \n return test_list\n \ndef get_three_sum_to_k(test_list, k):\n test_list.sort()\n num_items = len(test_list)\n for a_idx in range(num_items-3+1):\n b_idx = a_idx + 1\n c_idx = num_items-1\n while b_idx < c_idx:\n a = test_list[a_idx]\n b = test_list[b_idx]\n c = test_list[c_idx]\n if a+b+c == k:\n return [a,b,c]\n elif a+b+c < k:\n b_idx += 1\n else:\n c_idx -= 1\n \n return []\n\nnum_test_cases = 10 \nfor test_case in range(num_test_cases):\n max_len_of_list = 10\n min_randk, max_randk = 0, 0 \n test_list = get_rand_list(max_len_of_list)\n k = random.randint(min_randk, max_randk)\n three_elem = get_three_sum_to_k(test_list, k)\n print(\"Test list: {} K: {}\\nA,B,C: {}\\n\".format(test_list, k, three_elem))\n if three_elem:\n assert sum(three_elem) == k" }, { "alpha_fraction": 0.5656512379646301, "alphanum_fraction": 0.6302521228790283, "avg_line_length": 14.800000190734863, "blob_id": "aa17714e3182f68b2600dbf4f30cb6480d4f7de4", "content_id": "0dec555ea431b40d79a3f7c595b03e7681f06f77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1904, "license_type": "no_license", "max_line_length": 75, "num_lines": 120, "path": "/excrayg/leetcode/python/determine_valid_bst.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\n# Question:\n# Given a binary tree, determine if it is a valid Binary Search Tree (BST).\n\n#BST\n#All nodes in left of root are less than root val. \n#All nodes in right of root are greater than root val. \n\n#left subtree and right subtree are valid BST. \n\nclass Node(object):\n\t\"\"\"docstring for Node\"\"\"\n\tdef __init__(self, arg):\n\t\tsuper(Node, self).__init__()\n\t\tself.val = arg\n\t\tself.left = None\n\t\tself.right = None\n\nctr = 0\nctr1 = 0\nctr2 = 0\n\ndef is_subtree_less_than(root, val):\n\tglobal ctr1\n\tctr1 += 1\n\tif root == None:\n\t\treturn True\n\n\treturn root.val < val and is_subtree_less_than(root.left, val) and \\\n\t\t\tis_subtree_less_than(root.right, val)\n\ndef is_subtree_greater_than(root, val):\n\tglobal ctr2\n\tctr2 += 1\n\tif root == None:\n\t\treturn True\n\n\treturn root.val > val and is_subtree_greater_than(root.left, val) and \\\n\t\t\tis_subtree_greater_than(root.right, val)\n\ndef is_valid_bst(root):\n\tglobal ctr\n\tctr += 1\n\tif root == None:\n\t\treturn True\n\n\treturn is_subtree_less_than(root.left, root.val) and \\\n\t\t\tis_subtree_greater_than(root.right, root.val) and \\\n\t\t\tis_valid_bst(root.left) and is_valid_bst(root.right)\n\nn1 = Node(1)\nn2 = Node(2)\nn3 = Node(3)\nn4 = Node(4)\nn5 = Node(5)\nn6 = Node(6)\nn7 = Node(7)\n\n# 4\n# 2 6\n# 1 7 3 5\n\nn4.left = n2\nn4.right = n6\n\nn2.left = n1\nn2.right = n7\n\nn6.left = n3\nn6.right = n5\n\nprint(is_valid_bst(n4))\nprint(ctr,ctr1,ctr2)\nctr = 0 \nctr1 = 0\nctr2 = 0\n\n# 4\n# 2 6\n# 1 3 5 7\n\nn1 = Node(1)\nn2 = Node(2)\nn3 = Node(3)\nn4 = Node(4)\nn5 = Node(5)\nn6 = Node(6)\nn7 = Node(7)\n\nn4.left = n2\nn4.right = n6\n\nn2.left = n1\nn2.right = n3\n\nn6.left = n5\nn6.right = n7\n\nprint(is_valid_bst(n4))\nprint(ctr, ctr1, ctr2)\nctr = 0\nctr1 = 0\nctr2 = 0\n\nn1 = Node(1)\nn2 = Node(2)\nn3 = Node(3)\nn4 = Node(4)\nn5 = Node(5)\nn6 = Node(6)\nn7 = Node(7)\n\nn1.right = n2\nn2.right = n3\nn3.right = n4\nn4.right = n5\nn5.right = n6\nn6.right = n7\n\nprint(is_valid_bst(n1))\nprint(ctr, ctr1, ctr2)\n\n\n\t\t\n\n" }, { "alpha_fraction": 0.4583229124546051, "alphanum_fraction": 0.4688360393047333, "avg_line_length": 17.41474723815918, "blob_id": "58122a82818e7ab2a68d01ff0aa37d2f2a1ce86c", "content_id": "b80baf9f5c1fe1cf0f797bbb1c80b1c949106dac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3995, "license_type": "no_license", "max_line_length": 63, "num_lines": 217, "path": "/excrayg/leetcode/cpp/merge_bsts.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "// Given two BST, Merge them.\n// Convert BST to DLL. \n// Merge two DLL. \n// Convert DLL to BST.\n\n// How to convert BST to DLL. \n\n/*\n1) Do inorder traversal of BST and create a DLL. \n2) Have an inorder iterator, \n it->next(). node.left = prev, prev.right = node\n*/\n\n#include <iostream>\n#include <stack>\n\nusing namespace std;\n\nstruct Node\n{\n Node(int val)\n {\n this->val = val;\n }\n int val; \n Node* left;\n Node* right;\n};\n\nclass BSTIterator\n{\n public:\n BSTIterator(Node* root)\n {\n Node* temp = root;\n while(temp)\n {\n bst_stack.push(temp);\n temp = temp->left;\n }\n }\n \n bool hasNext()\n {\n return !bst_stack.empty();\n }\n \n Node* next()\n {\n Node* top = bst_stack.top();\n bst_stack.pop();\n \n if(top->right)\n {\n Node* temp = top->right;\n while(temp)\n {\n bst_stack.push(temp);\n temp = temp->left;\n }\n }\n return top;\n }\n private:\n stack<Node*> bst_stack;\n};\n\nNode* convert_bst_to_dll(Node* root)\n{\n BSTIterator bIter(root);\n Node *prev = NULL, *head = NULL;\n \n while(bIter.hasNext())\n {\n Node* curr = bIter.next();\n if(!head)\n {\n head = curr;\n }\n curr->left = prev;\n if(prev)\n {\n prev->right = curr;\n }\n prev = curr;\n }\n \n return head;\n}\n\nNode* merge_two_dll(Node* first, Node* second)\n{\n Node* dummy = new Node(0);\n Node* head = dummy;\n while(first && second)\n {\n Node* curr = NULL;\n if(first->val < second -> val)\n {\n curr = first;\n first = first->right;\n }\n else\n {\n curr = second;\n second = second->right;\n }\n dummy->right = curr;\n curr->left = dummy;\n dummy = curr;\n curr = curr->right;\n }\n \n if(first)\n {\n dummy->right = first;\n }\n else\n {\n dummy->right = second; \n }\n \n return head->right;\n}\n\nint count_nodes_in_dll(Node* head);\nNode* convert_dll_to_bst_helper(Node** head, int n);\n\nNode* convert_dll_to_bst(Node* head)\n{\n // Count the nodes in the DLL. \n int n = count_nodes_in_dll(head);\n return convert_dll_to_bst_helper(&head, n);\n}\n\nint count_nodes_in_dll(Node* head)\n{\n int count = 0;\n while(head)\n {\n count += 1;\n head = head->right;\n }\n \n return count;\n}\n\nNode* convert_dll_to_bst_helper(Node** head, int n)\n{\n if(n == 0)\n {\n return NULL;\n }\n else\n {\n Node* left = convert_dll_to_bst_helper(head, n/2); \n Node* root = *head;\n root->left = left;\n *head = (*head)->right;\n Node* right = convert_dll_to_bst_helper(head, n-n/2-1);\n root->right = right;\n return root;\n }\n}\n\nvoid printNodes(Node* root)\n{\n while(root)\n {\n cout << root->val << \" \";\n root = root->right;\n }\n cout << endl;\n}\n\nvoid printTree(Node* root)\n{\n if(root)\n {\n printTree(root->left);\n cout << root->val << \" \";\n printTree(root->right);\n }\n}\n\nNode* merge_bst(Node* bst1, Node* bst2)\n{\n Node* dll1 = convert_bst_to_dll(bst1);\n Node* dll2 = convert_bst_to_dll(bst2);\n \n printNodes(dll1); printNodes(dll2);\n Node* merged = merge_two_dll(dll1, dll2);\n printNodes(merged);\n \n return convert_dll_to_bst(merged);\n}\n\nint main()\n{\n Node* n1 = new Node(1);\n Node* n2 = new Node(2);\n Node* n3 = new Node(3);\n Node* n4 = new Node(4);\n Node* n5 = new Node(5);\n Node* n6 = new Node(6);\n \n n2->left = n1;\n n2->right = n3;\n \n n5->left = n4;\n n5->right = n6;\n \n Node* merged = merge_bst(n2, n5);\n printTree(merged);\n \n return 0;\n}" }, { "alpha_fraction": 0.46796658635139465, "alphanum_fraction": 0.49860724806785583, "avg_line_length": 15.952381134033203, "blob_id": "62fe871b2001b35ff2b700bd7e41b70cb3e1e366", "content_id": "dc5717d6cf2f2522a9833afd89585ac0adb0d553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 79, "num_lines": 21, "path": "/excrayg/leetcode/python/trial.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\ndef min_coins(S, index, V, count):\n if S < 0:\n return float(\"inf\")\n \n if S == 0:\n return count\n\n if index >= len(V):\n return float(\"inf\")\n\n i = index\n count = min( min_coins(S, i+1, V, count), min_coins(S-V[i], i, V, count+1))\n\n return count\n\n\nS = 7\nv=[1,2,3, 7]\ncount = float(\"inf\")\n\nprint(min_coins(S, 0, v, 0))\n\n\n" }, { "alpha_fraction": 0.5792610049247742, "alphanum_fraction": 0.6150178909301758, "avg_line_length": 15.720000267028809, "blob_id": "cdcd0ab2e4eb62b6c6d47a755a8d47b785d0abaa", "content_id": "1fc493741b500a4e7e7f59e858028e9d0138c48b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 839, "license_type": "no_license", "max_line_length": 56, "num_lines": 50, "path": "/excrayg/leetcode/python/find_k_balanced.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\ndef find_k_balanced(node, k):\n \n k_node, height = find_k_balanced_helper(node, k)\n if height == -1:\n return k_node\n else:\n return None\n\ndef find_k_balanced_helper(node, k):\n \n if node == None:\n return node, 0\n\n lnode, lheight = find_k_balanced_helper(node.left, k)\n if lheight == -1:\n return lnode, lheight\n\n rnode, rheight = find_k_balanced_helper(node.right, k)\n if rheight == -1:\n return rnode, rheight\n\n diff = abs(rheight - lheight)\n if diff > k:\n return node, -1\n\n return node, lheight+rheight+1\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\nn1 = Node(1)\nn2 = Node(2)\nn3 = Node(3)\nn4 = Node(4)\nn5 = Node(5)\nn6 = Node(6)\n\nn1.left = n2\nn2.right = n3\nn3.left = n4\nn4.right = n5\nn3.right = n6\n\n\nk_node = find_k_balanced(n1, 3)\nprint(k_node.val)\n\n\n" }, { "alpha_fraction": 0.4506172835826874, "alphanum_fraction": 0.4794238805770874, "avg_line_length": 16.321428298950195, "blob_id": "a215bcacbc50b684317fab76f5eaf81a51330ac0", "content_id": "4b67165d4a1fd94651ea069a9ffe916b108fd9ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 486, "license_type": "no_license", "max_line_length": 68, "num_lines": 28, "path": "/excrayg/leetcode/cpp/vec.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "#include <vector>\n#include <iostream>\nusing namespace std;\n\n\ntemplate<typename T>\nstd::ostream& operator<<(std::ostream& s, const std::vector<T>& v) {\n s.put('[');\n char comma[3] = {'\\0', ' ', '\\0'};\n for (const auto& e : v) {\n s << comma << e;\n comma[0] = ',';\n }\n return s << ']';\n}\n\nint main()\n{\n vector<int> hello(20,0);\n hello.resize(25);\n for(int i = 0; i < 20; i++)\n {\n hello[i] = 1;\n }\n\n cout<<hello<<endl;\n return 0;\n}\n\n" }, { "alpha_fraction": 0.49320149421691895, "alphanum_fraction": 0.516687273979187, "avg_line_length": 16.60869598388672, "blob_id": "e4319e6ac6c55b7b8a7f97e477c1ed779d1dc7d6", "content_id": "e57125adc5c2058bcaacad1371eaf1276965972f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 809, "license_type": "no_license", "max_line_length": 47, "num_lines": 46, "path": "/excrayg/leetcode/python/pre_in.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Root:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n \n \ndef inorder(root):\n if root:\n inorder(root.left)\n print(root.val)\n inorder(root.right)\n \n return\n\n\n\ndef contruct_bt(inorder):\n global pre\n if len(inorder) == 0:\n return None\n\n if len(inorder) == 1:\n pre = pre[1:] \n return Root(inorder[0])\n \n root = Root(pre[0])\n print(pre, inorder)\n index = inorder.index(root.val)\n pre = pre[1:]\n root.left = contruct_bt(inorder[:index])\n root.right = contruct_bt(inorder[index+1:])\n \n return root\n \n\n# a = Root(1)\n# b = Root(2)\n# c = Root(3)\n# d = Root(4)\n\n# a.left = b\n# a.right = c\n# c.left = d\npre = [1,2,3,4]\ninorder(contruct_bt([2,1,4,3]))" }, { "alpha_fraction": 0.43893590569496155, "alphanum_fraction": 0.44619104266166687, "avg_line_length": 15.857142448425293, "blob_id": "ee0ad12927101901c2b57f7ee275198615be69f8", "content_id": "6614fe986caa09c2dccafcd82016a8f5bbac51f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 827, "license_type": "no_license", "max_line_length": 49, "num_lines": 49, "path": "/excrayg/leetcode/cpp/re.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n#include<iostream>\n#include <vector>\nusing namespace std;\n\nclass Solution {\n public:\n /** \n *@param A: A list of integers\n *@param elem: An integer\n *@return: The new length after remove\n */\n int removeElement(vector<int> &A, int elem) {\n // write your code here\n cout<<\"remove\";\n int e = A.size()-1;\n if(e==0)\n {\n return e;\n }\n int nl = 0;\n vector<int>::iterator b = A.begin();\n cout<<\"how\"<<endl;\n while(b != b+e)\n {\n cout<<\"what\"<<endl;\n if(*b == elem)\n {\n int t = *b;\n *b = A[e];\n A[e--] = t;\n nl+=1;\n }\n b++;\n }\n\n return nl;\n }\n};\n\n\nint main()\n{\n cout<<\"Wait\";\n Solution s;\n vector<int> A;\n cout<<\"Call\";\n s.removeElement(A, 0); \n return 0;\n}\n" }, { "alpha_fraction": 0.5223246216773987, "alphanum_fraction": 0.5478383898735046, "avg_line_length": 13.852631568908691, "blob_id": "8687a52c828297186cf0f194cb9dad6f489985dd", "content_id": "6a491d584af7a470a9fff03a44be2bbd7571d10d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1411, "license_type": "no_license", "max_line_length": 78, "num_lines": 95, "path": "/excrayg/leetcode/cpp/adm_backtrack_combination.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n#include <iostream>\nusing namespace std;\n\nbool finished = false;\n\n\nbool is_a_solution(int* a, int k, int n)\n{\n\treturn k==n;\n}\n\nvoid construct_candidates_perm(int* a, int k, int n, int* c, int* ncandidates)\n{\n\tint i;\n\tbool in_perm[1000];\n\n\tfor(i = 1; i < 1000; i++) \n\t\tin_perm[i] = false;\n\n\tfor( i = 1; i < k; i++)\n\t{\n\t\t// if(a[i]==10000) cout<<\"Whatt\";\n\t\tin_perm[a[i]] = true;\n\t\t// cout<<i<<\" \"<<a[i]<<\" \";\n\t}\n\n\t*ncandidates = 0;\n\tfor(i = 1; i<=n; i++)\n\t\tif( in_perm[i] == false )\n\t\t{\n\t\t\tc[*ncandidates] = i;\n\t\t\t*ncandidates+=1;\n\t\t}\n\tcout<<*ncandidates<<endl;\n\n}\n\nvoid construct_candidates(int* a, int k, int n, int* c, int* ncandidates)\n{\n\tc[0] = true;\n\tc[1] = false;\n\t*ncandidates = 2;\n}\n\nvoid process_solution_perm(int* a, int k, int n)\n{\n\tint i;\n\t\n\tfor(i = 1; i<=k; i++)\n\t\tcout<<\" \"<<a[i];\n\tcout<<endl;\n}\n\nvoid process_solution(int* a, int k, int n)\n{\n\tint i;\n\tcout<<\"{\";\n\tfor(i = 1; i<=k; i++)\n\t\tif(a[i] == true) cout<<\" \"<<i;\n\tcout<<\"}\"<<endl;\n}\n\n\nvoid backtrack(int *a, int k, int n)\n{\n\tint c[10000];\n\tint ncandidates;\n\tint i;\n\n\tif(is_a_solution(a,k,n))\n\t{\n\t\tprocess_solution_perm(a,k,n);\n\t}\n\telse\n\t{\n\t\tk = k+1;\n\t\tconstruct_candidates_perm(a,k,n,c, &ncandidates);\n\t\tfor(i=0; i<ncandidates; i++)\n\t\t{\n\t\t\ta[k] = c[i];\n\t\t\t//make move\n\t\t\tbacktrack(a, k, n);\n\t\t\t//unmake move\n\t\t\tif(finished) return;\n\t\t}\n\t}\n}\n\nint main(int argc, char const *argv[])\n{\n\tint a[100];\n\tint n = 4;\n\tbacktrack(a, 0, n);\n\treturn 0;\n}" }, { "alpha_fraction": 0.6261762380599976, "alphanum_fraction": 0.652694582939148, "avg_line_length": 23.33333396911621, "blob_id": "8b5b83b38f98ad32378184817b2c72063cb799ac", "content_id": "933c92f0c701a912486887965823e84fbde523f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1181, "license_type": "no_license", "max_line_length": 106, "num_lines": 48, "path": "/excrayg/leetcode/python/swap_nodes_in_list.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "# 22. Swap Nodes in Pairs\n# Code it now: https://oj.leetcode.com/problems/swap-nodes-in-pairs/ Difficulty: Medium, Frequency: Medium\n# Question:\n# Given a linked list, swap every two adjacent nodes and return its head.\n# For example,\n# Given 1  2  3  4, you should return the list as 2  1  4  3.\n# Your algorithm should use only constant space. You may not modify the values in the\n# list, only nodes itself can be changed.\n# Example Questions Candidate Might Ask:\n# Q: What if the number of nodes in the linked list has only odd number of nodes?\n# A: The last node should not be swapped.\n\n# a and b point to first and second node. \n\n# 1.next = a, a.next = b. b.next = 2\n# 1.next = b, b.next = a, a.next = 2\n\nclass Node(object):\n\t\"\"\"docstring for Node\"\"\"\n\tdef __init__(self, arg):\n\t\tsuper(Node, self).__init__()\n\t\tself.val = arg\n\t\t\n\ndef swap_nodes(a):\n\tif not a or not a.next\n\t\treturn a\n\n\tdummy = Node(-1)\n\ta1 = a\n\tb1 = a.next\n\tprev = dummy\n\tprev.next = a1\n\tlast = b1.next\n\n\twhile a1 and b1:\n\t\tprev.next = b1\n\t\tb1.next = a1\n\t\ta1.next = last\n\t\tprev = last\n\t\tif last:\n\t\t\ta1 = last.next\n\t\t\tif a1:\n\t\t\t\tb1 = a1.next\n\t\t\t\tif b1:\n\t\t\t\t\tlast = b1.next\n\n\treturn dummy.next\n\n" }, { "alpha_fraction": 0.44923076033592224, "alphanum_fraction": 0.4692307710647583, "avg_line_length": 23.074073791503906, "blob_id": "41487dea0b2c8b8a9b875e9e1b4425b49a3c440e", "content_id": "304dc74ae2f4215af1d8eedb0985f0df75527c59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 650, "license_type": "no_license", "max_line_length": 75, "num_lines": 27, "path": "/excrayg/leetcode/python/delete_digits.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n \"\"\"\n @param A: A positive integer which has N digits, A is a string.\n @param k: Remove k digits.\n @return: A string\n \"\"\"\n def DeleteDigits(self, A, k):\n # write you code here\n n = len(A)\n if n == 0:\n return \"\"\n A = list(A)\n idxs = [1]*n\n sorted_str = sorted(enumerate(A), key=lambda x: x[1], reverse=True)\n for i in range(k):\n idxs[sorted_str[i][0]] = 0\n r = \"\"\n for i,v in enumerate(A):\n if idxs[i] == 1:\n r+=v\n \n return r\n\ns = Solution()\nA=\"178542\"\nk=4\nprint(s.DeleteDigits(A,k))\n" }, { "alpha_fraction": 0.35832470655441284, "alphanum_fraction": 0.36763185262680054, "avg_line_length": 25.86111068725586, "blob_id": "28a40bd12b0f81beb453d9c91fb7eef4ab6fa2cf", "content_id": "c70fa69b3cc3cf14253f6e62500a5900196f230c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1934, "license_type": "no_license", "max_line_length": 103, "num_lines": 72, "path": "/excrayg/leetcode/python/word_ladder_lint.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # @param start, a string\n # @param end, a string\n # @param dict, a set of string\n # @return an integer\n def ladderLength(self, start, end, dict):\n # write your code here\n if start == end:\n return 0\n \n st = []\n dict.add(end)\n st.append(start)\n l = 1\n visited = set()\n cand = self.ch(dict)\n nl = 0\n cl = 1\n while len(st) != 0:\n node = st[0]\n st = st[1:]\n\n print(node)\n \n if node == end:\n return l\n \n visited.add(node)\n neighbours = self.gn(node, dict, cand)\n print(node, neighbours)\n for n in neighbours:\n if n not in visited:\n if end == n:\n l+=1\n return l\n else:\n st.append(n)\n nl+=1\n cl-=1\n if cl == 0:\n cl = nl\n nl = 0\n l+=1\n # l+=1\n return l\n \n def gn(self, node, dict, cand):\n neigh = set()\n for i in range(1,len(node)+1):\n idx = i-1\n lts = cand[i]\n for j in lts:\n if node[idx] != j:\n t = list(node)\n t[idx] = j\n new_node = \"\".join(t)\n if new_node in dict:\n neigh.add(new_node)\n return neigh\n \n def ch(self, d):\n cand = dict() \n for w in d:\n for i,c in enumerate(w):\n idx = i+1\n if idx not in cand:\n cand[idx] = set()\n cand[idx].add(c)\n return cand\n\ns = Solution()\nprint(s.ladderLength(\"game\", \"thee\", {\"frye\",\"heat\",\"tree\",\"thee\",\"game\",\"free\",\"hell\",\"fame\",\"faye\"}))\n" }, { "alpha_fraction": 0.29881155490875244, "alphanum_fraction": 0.40025466680526733, "avg_line_length": 22.23762321472168, "blob_id": "b629c4d5ac313c6c4ca26c8841eba95b9a1b2958", "content_id": "87635c6a0e100008d9c248447b8ebe59c70ec913", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2356, "license_type": "no_license", "max_line_length": 299, "num_lines": 101, "path": "/excrayg/leetcode/python/largest_number.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n #@param num: A list of non negative integers\n #@return: A string\n #23 > 20\n # 1 > 10\n # 2 > 10\n # 20 > 1\n def is_greater_than(self, a, b):\n \n _a = []\n _b = []\n a1 = a\n b1 = b\n\n if not a:\n _a.append(0)\n if not b:\n _b.append(0)\n\n while a:\n _a.append(a%10)\n a=int(a/10)\n while b:\n _b.append(b%10)\n b=int(b/10)\n\n t, u = len(_a)-1, len(_b)-1\n if t == u:\n if a1 > b1:\n return True\n return False\n \n while t>=0 and u>=0:\n if _a[t] > _b[u]:\n return True\n elif _a[t] < _b[u]:\n return False\n\n t-=1\n u-=1\n\n if t == -1:\n return True\n\n return False\n \n \n def merge(self, l, r):\n nl = len(l)\n nr = len(r)\n temp = []\n i = 0 \n j = 0\n while i < nl and j < nr:\n if self.is_greater_than(l[i], r[j]):\n temp.append(l[i])\n i+=1\n else:\n temp.append(r[j])\n j+=1\n \n if i == nl:\n temp.extend(r[j:])\n else:\n temp.extend(l[i:])\n \n return temp\n \n def merge_sort(self, num, lo, hi):\n # print(lo, hi)\n if lo >= hi:\n return [num[lo]]\n \n m = lo+int((hi-lo)/2)\n left = self.merge_sort(num, lo, m)\n right = self.merge_sort(num, m+1, hi)\n\n # print(left,right)\n \n return self.merge(left, right)\n \n def largestNumber(self, num):\n # write your code here\n\n n = len(num)\n if n == 0:\n return \"\"\n \n num = self.merge_sort(num, 0, len(num)-1)\n\n t = sum(num)\n if t == 0:\n return \"0\"\n print(num)\n return ''.join(map(str, num))\n\ns = Solution()\nl = [41,23,87,55,50,53,18,9,39,63,35,33,54,25,26,49,74,61,32,81,97,99,38,96,22,95,35,57,80,80,16,22,17,13,89,11,75,98,57,81,69,8,10,85,13,49,66,94,80,25,13,85,55,12,87,50,28,96,80,43,10,24,88,52,16,92,61,28,26,78,28,28,16,1,56,31,47,85,27,30,85,2,30,51,84,50,3,14,97,9,91,90,63,90,92,89,76,76,67,55]\n\nk = s.largestNumber(l)\nprint(k)\n \n" }, { "alpha_fraction": 0.41271552443504333, "alphanum_fraction": 0.44288793206214905, "avg_line_length": 24.08108139038086, "blob_id": "29bedbdef68b02f0b29ee443d5ea9002931a38ee", "content_id": "1b48625b0c950aba85148f2a8c88ce37b18ab68f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 77, "num_lines": 37, "path": "/excrayg/leetcode/python/max_num.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution: \n #@param num: A list of non negative integers\n #@return: A string\n def largestNumber(self, num):\n # write your code here\n \n n = len(num)\n if n == 0:\n return \"\"\n idxs = list(range(n)) \n m = -1\n for i in num:\n m = max(m, i)\n mult = 1\n while m>=10:\n mult *= 10\n m/=10\n \n for i in range(n):\n p_10 = 10**int(math.log(num[i], 10))\n mult1 = int(mult/p_10)\n num[i] *= mult1\n idxs[i] = mult1\n \n \n sorted_num = sorted(enumerate(num), key=lambda x: x[1], reverse=True)\n print(sorted_num)\n new_str = \"\"\n for idx, val in sorted_num:\n val /= idxs[idx]\n new_str += str(int(val))\n \n return new_str\nimport math\ns = Solution()\nt=s.largestNumber([1,23,20,4,8])\nprint(t)\n" }, { "alpha_fraction": 0.3075483441352844, "alphanum_fraction": 0.3699313700199127, "avg_line_length": 24.580644607543945, "blob_id": "1eb7208b903545debfad95ce638efec8f1dccd42", "content_id": "a3c636b4b8305a07b18b46caa236b58213253421", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1603, "license_type": "no_license", "max_line_length": 88, "num_lines": 62, "path": "/excrayg/leetcode/python/int_roman.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # @param A : integer\n # @return a strings\n def intToRoman(self, A):\n\n# I = 1\n# V = 5\n# X = 10\n# L = 50\n# C = 100\n# D = 500\n# M = 1000\n\n# Only one I, X, and C can be used as the leading numeral in part of a subtractive pair.\n# I can only be placed before V and X.\n# X can only be placed before L and C.\n# C can only be placed before D and M.\n\n D = {1:\"I\", 5:\"V\", 10:\"X\", 50:\"L\", 100:\"C\", 500:\"D\", 1000:\"M\"}\n R = {}\n for k,v in D.items():\n R[v] = k\n \n rules = [[\"I\", \"V\", \"X\"], [\"X\", \"L\", \"C\"], [\"C\", \"D\", \"M\"]]\n for rule in rules:\n s, f1, f2 = rule\n num = R[f1] - R[s]\n D[num] = s+f1\n num = R[f2] - R[s]\n D[num] = s+f2\n \n s = \"\"\n curr_mod = 10\n T = A\n while T != 0:\n rem = A % curr_mod\n A -= rem\n T = int(A/curr_mod)\n print(rem, T)\n\n curr_mod *= 10\n if rem in D:\n s=D[rem]+s\n elif rem / 1000 >= 1:\n s=D[1000]*(rem/1000)+s\n elif rem / 500 >= 1:\n s=D[500]*(rem/500)+s\n elif rem / 100 >= 1:\n s=D[100]*(rem/100)+s\n elif rem / 50 >= 1:\n s=D[50]*(rem/50)+s\n elif rem / 10 >= 1:\n s=D[10]*(rem/10)+s\n elif rem / 5 >= 1:\n s=D[5]*(rem/5)+s\n elif rem / 1 >= 1:\n s=D[1]*(rem/1)+s\n \n return s\n \ns = Solution()\nprint(s.intToRoman(41))\n \n " }, { "alpha_fraction": 0.41002073884010315, "alphanum_fraction": 0.4201008081436157, "avg_line_length": 27.327730178833008, "blob_id": "baea87dcf9f30edf7628c3152ee627dede97ef5b", "content_id": "f50260232a8e6a175ab516bf98511c41b147bd50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3373, "license_type": "no_license", "max_line_length": 131, "num_lines": 119, "path": "/excrayg/leetcode/python/python_algos.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\ndef coin_min(coins, S):\n t = [float(\"inf\")]*(S+1)\n t[0] = 0\n for i in range(1,S+1):\n if i in coins:\n t[i] = 1\n continue\n for j in range(i-1, 0, -1):\n if t[i-j]+t[j] < t[i]:\n t[i] = t[i-j]+t[j]\n\n return t[S]\n\ndef coin_min1(coins, S):\n t = [float(\"inf\")]*(S+1)\n coins_list = [None]*(S+1)\n print(coins_list) \n t[0] = 0\n coins_list[0] = []\n for i in range(1,S+1): \n for j in range(0, len(coins)):\n if coins[j]<=i and t[i-coins[j]]+1 < t[i]:\n t[i] = t[i-coins[j]]+1\n coins_list[i] = coins_list[i-coins[j]]+[coins[j]]\n\n return t[S], coins_list[S]\n# def dfs(G, node, discover_time, finish_time, Seen=None, time=0):\n# if Seen is None: Seen = set()\n# discover_time[node] = time; time+=1\n# Seen.add(node)\n# for u in G[node]:\n# if u in Seen: continue\n# time = dfs(G, u, discover_time, finish_time, Seen, time)\n# finish_time[node] = time; time += 1\n# return time\n\n# def dfs_1(G, node, Seen):\n# C = set()\n# C.add(node)\n# Seen.add(node)\n# for u in G[node]:\n# if u in Seen: continue\n# C=C|dfs_1(G, u, Seen)\n# return C\n\n# G = {'a':['c', 'b'], 'b':['d', 'i', 'e'], 'c': ['d'], 'd':['a', 'h'], 'e':['f'], 'f':['g'], 'g':['e', 'h'], 'h':['i'], 'i':['h']}\n# discover_time = dict()\n# finish_time = dict()\n# dfs(G, 'a', discover_time, finish_time)\n# #print(discover_time)\n# #print(finish_time)\n\n# def dfs_topsort(G):\n# Seen, res = set(), []\n# def recurse(node):\n# if node in Seen: return\n# Seen.add(node)\n# for neigh in G[node]:\n# #if neigh in Seen: continue\n# recurse(neigh)\n# res.append(node)\n# for u in G:\n# recurse(u)\n# res.reverse()\n# return res\n\n# res = dfs_topsort(G)\n# #print(res)\n\n# def iddfs(G, node):\n# yielded = set()\n# def recurse(G, node, depth, Seen=None):\n# if node not in yielded:\n# #yield node\n# yielded.add(node)\n# if depth == 0: return\n# if Seen is None: Seen = set()\n# Seen.add(node)\n# for neigh in G[node]:\n# if neigh in Seen: continue\n# recurse(G, neigh, depth-1, Seen)\n# #print(depth, Seen)\n# n = len(G)\n# for d in range(n):\n# if len(yielded) == n: break\n# recurse(G, node, d)\n# #print(yielded)\n# # for n in iddfs(G, 'a'):\n# # #print(n)\n# iddfs(G, 'a')\n\n\n# def transpose(G):\n# GT = {}\n# for u in G:\n# GT[u] = set()\n# for u in G:\n# for v in G[u]:\n# GT[v].add(u)\n# return GT\n\n# def SCC(G):\n# GT = transpose(G)\n# sccs, seen = [], set()\n# for u in dfs_topsort(G):\n# print(u)\n# if u in seen: continue\n# # print(u)\n# C = dfs_1(GT, u, seen)\n# #seen.update(C)\n# sccs.append(C)\n# return sccs\n\n# print(SCC(G))\n# GT = transpose(G)\n# print(dfs_topsort(GT))\ncoins = [1,3,5]\nS = 11\nprint(coin_min1(coins, S))\n" }, { "alpha_fraction": 0.5029411911964417, "alphanum_fraction": 0.520588219165802, "avg_line_length": 15.75, "blob_id": "e1b2e54f79a053b3003bce5957a77c3949054a58", "content_id": "a46406dae9d5c14e800681686d9974bb054507c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "no_license", "max_line_length": 30, "num_lines": 20, "path": "/excrayg/leetcode/python/combs-i.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\ndef helper(result, s, cur, i):\n\tfor j in range(i, len(s)):\n\t\tif j > i and s[j-1] == s[j]:\n\t\t\tcontinue\n\t\tcur.append(s[j])\n\t\tresult.append(list(cur))\n\t\thelper(result, s, cur, j+1)\n\t\tcur.pop()\n\ndef comb(s):\n\ts_s = []\n\ts_s.append([])\n\tif s == [] or s == None:\n\t\treturn s_s\n\ts.sort()\n\thelper(s_s, s, [], 0)\n\tprint(s_s)\n\ns = [1,2,2]\ncomb(s)\n\n\n\n" }, { "alpha_fraction": 0.42134830355644226, "alphanum_fraction": 0.4350811541080475, "avg_line_length": 20.0657901763916, "blob_id": "a4701aa771c894a576f351836fa6d62c061dcbfa", "content_id": "6f7c1000a91027016ccee25395a25916e05ff3fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1602, "license_type": "no_license", "max_line_length": 58, "num_lines": 76, "path": "/excrayg/leetcode/python/copy_random.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "# Definition for singly-linked list with a random pointer.\n\n# Definition of ListNode\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n \"\"\"\n @param head: The first node of the linked list.\n @return: nothing\n \"\"\"\n \n def reverseList(self, head):\n # write your code here\n if head == None or head.next == None:\n return head\n \n # dummy = ListNode(-1, head)\n #1-2-3-None\n # 2 - 1 - None\n prev = None\n while head:\n n = head.next\n head.next = prev\n prev = head\n head = n\n \n return prev\n \n def merge(self, head, mid):\n a = head\n b = mid\n \n while a and b:\n t1 = a\n t2 = b\n a.next = b\n b.next = t1.next\n \n a = t1.next\n b = t2.next\n \n return head\n \n def reorderList(self, head):\n # write your code here\n if head == None or head.next == None:\n return head\n \n n = 0\n t = head\n while t:\n t = t.next\n n+=1\n \n m = int(n/2)\n t = head\n prev_t = None\n i = 0\n while i < m:\n prev_t = t\n t = t.next\n i+=1\n prev_t.next = None\n midList = self.reverseList(t)\n return self.merge(head, midList)\n\nn1 = ListNode(1)\nn2 = ListNode(1, n1)\n\nhead = n2\ns = Solution()\nh = s.reorderList(head)\n\n" }, { "alpha_fraction": 0.39318886399269104, "alphanum_fraction": 0.39938080310821533, "avg_line_length": 22.925926208496094, "blob_id": "9c11062d16a08efbde8a29f2d20edce4bf26219d", "content_id": "5ee7efe23793c6e3ebcd30af12d8184a4c8efcb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "no_license", "max_line_length": 48, "num_lines": 27, "path": "/excrayg/leetcode/python/palind.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution(object):\n \n def h(self, s, start_idx, l, ll):\n if start_idx == len(s):\n ll.append(list(l))\n else:\n for idx in range(start_idx, len(s)):\n cand = s[start_idx:idx+1] \n if cand == cand[::-1]:\n l.append(cand)\n self.h(s, idx+1, l, ll)\n l.pop()\n \n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n \n l = []\n ll = []\n self.h(s, 0, l, ll)\n return ll\n\ns = Solution()\nprint(s.partition(\"abc\"))\nprint(s.partition(\"aab\"))\n" }, { "alpha_fraction": 0.5385656356811523, "alphanum_fraction": 0.5730717182159424, "avg_line_length": 15.09890079498291, "blob_id": "4de2a496879032c6de241fa0e2c8d1b1ece51a46", "content_id": "a05c4353940f616009fac9c4ed5b9517dbe86dc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1478, "license_type": "no_license", "max_line_length": 52, "num_lines": 91, "path": "/excrayg/leetcode/python/epi_algos.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\nimport sys\n\ndef cyclic_search(arr, elem):\n\tif len(arr) == 0:\n\t\treturn -1\n\tif len(arr) == 1:\n\t\treturn 0\n\tmid = int(len(arr)/2)\n\tif mid-1 < 0:\n\t\tpass\n\tif arr[mid-1] > arr[mid] and arr[mid] < arr[mid+1]:\n\t\treturn mid\n\tif arr[0] < arr[mid]:\n\t\tif arr[:-1] < arr[0]:\n\t\t\treturn cyclic_search(arr[mid+1:])\n\n\ndef sqrt(elem):\n\tif elem == 0 or elem == 1:\n\t\treturn elem\n\n\tleft = 0\n\tright = elem\n\twhile(left + 1 < right):\n\t\tmid = left + int((right-left)/2)\n\t\tprint(left,mid,right)\n\t\tsqr = mid*mid\n\t\tif sqr == elem:\n\t\t\treturn mid\n\t\telif sqr < elem:\n\t\t\tleft = mid \n\t\telse:\n\t\t\tright = mid - 1\n\n\tif right * right <= elem:\n\t\treturn right\n\treturn left\n\n# print(sqrt(10))\n# print(sqrt(401))\n# print(sqrt(2))\n# print(sqrt(16))\n\n#Strings algos\n#convert strings to int and int to strings \ndef atoi(s):\n\tif len(s) == 0:\n\t\tassert(\"Null string passed\")\n\n\tsign = 1\n\tstartIndex = 0\n\tif s[0] == \"+\":\n\t\tstartIndex = 1\n\tif s[0] == \"-\":\n\t\tsign = -1\n\t\tstartIndex = 1\n\n\tres = 0\n\tdig = 1\n\tfor c in s[startIndex:]:\n\t\tres *= dig\n\t\tascii_c = ord(c) - ord('0')\n\t\t# print(ascii_c)\n\t\tif ascii_c >= 0 and ascii_c <= 9:\n\t\t\tres += ascii_c\n\t\t\tdig = 10\n\t\telse:\n\t\t\treturn (\"Error in encoding\")\n\t\t\t# return \"\"\n\n\n\treturn res*sign\n\n\n\n# print(atoi(\"123\"))\n# print(atoi(\"1\"))\n# print(atoi(\"12a\"))\n# print(atoi(\"-11\"))\n\nclass Node(object):\n\t\"\"\"docstring for Node\"\"\"\n\tdef __init__(self, arg):\n\t\tsuper(Node, self).__init__()\n\t\tself.data = arg\n\t\tself.left = None\n\t\tself.right = None\n\n\nn = Node(5)\n\t\t\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6073102355003357, "alphanum_fraction": 0.6476101279258728, "avg_line_length": 17.06779670715332, "blob_id": "57f3c4cdc54da72ae8044679312eb4b9f8ae786b", "content_id": "a38729c73c049941db206efbd4919739ed75e2a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1067, "license_type": "no_license", "max_line_length": 90, "num_lines": 59, "path": "/excrayg/leetcode/python/is_height_balanced.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "# Question:\n# Given a binary tree, determine if it is height-balanced.\n# For this problem, a height-balanced binary tree is defined as a binary tree in which the\n# depth of the two subtrees of every node never differs by more than 1.\n\n\nclass Node(object):\n\t\"\"\"docstring for Node\"\"\"\n\tdef __init__(self, arg):\n\t\tsuper(Node, self).__init__()\n\t\tself.val = arg\n\t\tself.left = None\n\t\tself.right = None\n\n\ndef maxDepth(root):\n\tif root == None:\n\t\treturn 0\n\n\treturn max(maxDepth(root.left), maxDepth(root.right)) + 1\n\n\ndef minDepth(root):\n\tif root == None:\n\t\treturn 0\n\n\t# if root.left and root.right:\n\t# \treturn min(minDepth(root.left), minDepth(root.right)) + 1\n\t# elif root.left:\n\t# \treturn minDepth(root.left) + 1\n\t# else:\n\t# \treturn minDepth(root.right) + 1\n\n\treturn min(minDepth(root.left), minDepth(root.right)) + 1\n\n\n# 4\n# 2 6\n# 1 3 5 7\n\nn1 = Node(1)\nn2 = Node(2)\nn3 = Node(3)\nn4 = Node(4)\nn5 = Node(5)\nn6 = Node(6)\nn7 = Node(7)\n\nn4.left = n2\nn4.right = n6\n\n# n2.left = n1\n# n2.right = n3\n\nn6.left = n5\nn6.right = n7\n\nprint(maxDepth(n4))\nprint(minDepth(n4))\n\n" }, { "alpha_fraction": 0.4832535982131958, "alphanum_fraction": 0.5071770548820496, "avg_line_length": 13.310344696044922, "blob_id": "5f61efda8e4985ae724c8333b7038c10039497aa", "content_id": "b911565275c3713652641c4f991079a516a6951d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 74, "num_lines": 29, "path": "/excrayg/leetcode/python/tow.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\nclass pegs:\n\tdef __init__(self, naam, vals):\n\t\tself.name = naam\n\t\tself.l = vals\n\n\ndef toh(s, h, t, n):\n\tif n == 0:\n\t\treturn\n\n\t\n\ttoh(pegs(s.name, s.l), t, h, n-1)\n\n\t\n\tif s.l:\n\t\tprint(\"moving disk %d from %s to %s\" %(s.l[len(s.l)-1], s.name, t.name))\n\t\tt.l.append(s.l.pop())\n\n\t\n\ttoh(pegs(h.name, h.l), s, t, n-1)\n\n\ns = pegs(\"peg1\", [3,2,1])\nh = pegs(\"peg2\", [])\nt = pegs(\"peg3\", [])\n\ntoh(s,h,t, len(s.l))\n\nprint(t.l)\n\n\n" }, { "alpha_fraction": 0.534378170967102, "alphanum_fraction": 0.582912027835846, "avg_line_length": 21.813953399658203, "blob_id": "b4ca86b5a8871809eca49d334bd3c78abec685aa", "content_id": "97369f67294b40c8d6fb8147184086015b678885", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1978, "license_type": "no_license", "max_line_length": 84, "num_lines": 86, "path": "/excrayg/leetcode/python/leetcode.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\ndef bin_search(arr, s, e, x):\n\tif s < e:\n\t\tm = s + int((e-s)/2)\n\t\tif arr[m] == x:\n\t\t\treturn m\n\n\t\tif x < arr[m]:\n\t\t\treturn bin_search(arr, s, m, x)\n\t\telse:\n\t\t\treturn bin_search(arr, m+1 , e, x )\n\n\treturn -1\n\n#[1,2,3] tgt = 5\ndef two_sum_sort(arr, tgt):\n\tprint(\"Running two_sum_sort\")\n\tfor idx, elem in enumerate(arr):\n\t\telem_to_find = tgt - elem\n\t\tl = bin_search(arr, 0, idx, elem_to_find)\n\t\tif l != -1:\n\t\t\tprint(\"Found the two elems at {} and {}\".format(idx, l))\n\t\t\t\n\t\tr = bin_search(arr, idx, len(arr), elem_to_find)\n\t\tif r != -1:\n\t\t\tprint(\"Found the two elems at {} and {}\".format(idx, r))\n\t\t\t\n\n\ntwo_sum_sort([1,2,3,4], 5)\ntwo_sum_sort([1,2,3,4,5], 9)\ntwo_sum_sort([1,2], 3)\ntwo_sum_sort([1,2,3,4], 15)\n\ndef two_sum_sort_two_ptr(arr, p1, p2, tgt):\n\tif p1 > p2:\n\t\tprint(\"Not found\")\n\t\treturn\n\n\tif arr[p1] + arr[p2] == tgt:\n\t\tprint(\"Found the two elems at {} and {}\".format(p1, p2))\n\n\tif arr[p1] + arr[p2] > tgt:\n\t\treturn two_sum_sort_two_ptr(arr, p1, p2-1, tgt)\n\telse:\n\t\treturn two_sum_sort_two_ptr(arr, p1+1, p2, tgt)\n\n\treturn\n\nprint(\"Running\")\ntwo_sum_sort_two_ptr([1,2,3,4], 0, 3, 5)\nprint(\"Running\")\ntwo_sum_sort_two_ptr([1,2,3,4,5], 0, 4, 9)\nprint(\"Running\")\ntwo_sum_sort_two_ptr([1,2], 0, 1, 3)\nprint(\"Running\")\ntwo_sum_sort_two_ptr([1,2,3,4], 0, 3, 15)\n\ndef lower(c):\n\tif ord(c) >= ord('A') and ord(c) <= ord('Z'):\n\t\treturn chr(ord('a')+(ord(c)-ord('A')))\n\n\treturn c\n\ndef is_valid_palindrome(arr, p1, p2):\n\t# print(p1,p2)\n\tif p1 >= p2:\n\t\treturn True\n\n\tleft_chr = arr[p1]\n\tright_chr = arr[p2]\n\n\tif not (ord(lower(left_chr)) >= ord('a') and ord(lower(left_chr)) <= ord('z')):\n\t\tp1+=1\n\t\treturn is_valid_palindrome(arr, p1, p2)\n\n\telif not (ord(lower(right_chr)) >= ord('a') and ord(lower(right_chr)) <= ord('z')):\n\t\tp2-=1\n\t\treturn is_valid_palindrome(arr, p1, p2)\n\n\telif lower(left_chr) == lower(right_chr):\n\t\treturn is_valid_palindrome(arr, p1+1, p2-1)\n\telse:\n\t\treturn False\n\ns = \"A man, a plan, a canal: Panama.\"\nprint(is_valid_palindrome(s, 0, len(s)-1))\n\n\n\n\n\n\n\n\n\n\n\n\n\t\t\n" }, { "alpha_fraction": 0.4336118996143341, "alphanum_fraction": 0.45682451128959656, "avg_line_length": 21.375, "blob_id": "2405a16b00f711b3c652f796e90fd72dc3461dda", "content_id": "660357267cf334df547279cd48737d88444bb8f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 73, "num_lines": 48, "path": "/excrayg/leetcode/python/my_book.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\ndef lev_dist(a, b):\n l_a = len(a)+1\n l_b = len(b)+1\n d = [[0]*l_b for t in range(l_a)]\n for i in range(l_a):\n d[i][0] = i\n for j in range(l_b):\n d[0][j] = j\n for i in range(1,l_a):\n for j in range(1,l_b):\n if a[i-1] == b[j-1]:\n d[i][j] = d[i-1][j-1]\n else:\n d[i][j] = 1 + min(d[i-1][j-1], min(d[i-1][j], d[i][j-1]))\n\n print(d[l_a-1][l_b-1])\n\n\n#lev_dist(\"abc\", \"abc\")\n#lev_dist(\"qwe\", \"we\")\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\ndef build_bst_helper(arr, l, r):\n if l <= r:\n m = l+(r-l)/2\n n = Node(arr[m])\n n.left = build_bst_helper(arr, l, m-1)\n n.right = build_bst_helper(arr, m+1, r)\n return n\n else:\n return None\n\ndef inorder(n):\n if n != None:\n inorder(n.left)\n print(n.val)\n inorder(n.right)\n\ndef build_bst(sorted_arr):\n n = build_bst_helper(sorted_arr, 0, len(sorted_arr)-1)\n inorder(n)\n\nbuild_bst(list(range(10)))\n\n" }, { "alpha_fraction": 0.4472111463546753, "alphanum_fraction": 0.45119521021842957, "avg_line_length": 23.487804412841797, "blob_id": "a0fe9cdd1f2accf06cf0d6e2a1edf789f1e77791", "content_id": "19a754843a4371faa0352b7913086f9caed4238d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 67, "num_lines": 41, "path": "/excrayg/leetcode/python/leetcode_wildcard_matching.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution(object):\n def isMatch(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n if s == \"\" and p == \"\":\n return True\n \n if p == \"\" or s == \"\":\n return False\n \n char_in_str, rest_char_in_str = s[0], s[1:]\n char_in_pat, rest_char_in_pat = p[0], p[1:]\n \n if char_in_str == char_in_pat or char_in_pat == '?':\n return self.isMatch(rest_char_in_str, rest_char_in_pat)\n\n if char_in_pat == '*':\n n = len(s)\n for i in range(n):\n if self.isMatch(s[i:], rest_char_in_pat):\n return True\n\n return self.isMatch(s, rest_char_in_pat)\n \n return False\n\n\ns = Solution()\nprint(s.isMatch(\"aa\",\"a\"))\n\nprint(s.isMatch(\"aa\",\"aa\"))\nprint(s.isMatch(\"aaa\",\"aa\"))\n\nprint(s.isMatch(\"aa\", \"*\"))\nprint(s.isMatch(\"aa\", \"a*\"))\n\nprint(s.isMatch(\"ab\", \"?*\"))\nprint(s.isMatch(\"aab\", \"c*a*b\"))\n" }, { "alpha_fraction": 0.355498731136322, "alphanum_fraction": 0.39300936460494995, "avg_line_length": 24.521739959716797, "blob_id": "76f8d686e76780916ae3b4f8b1dcb479402a81fe", "content_id": "d7df744cf00012b2dd6a51f63a143d1d92bc7cfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1173, "license_type": "no_license", "max_line_length": 67, "num_lines": 46, "path": "/excrayg/leetcode/python/gas.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # @param gas, a list of integers\n # @param cost, a list of integers\n # @return an integer\n \n #COST = [4, 2, 4, 11] gas = [1, 4, 1, 20]\n # S = 2\n # \n def canCompleteCircuit(self, gas, cost):\n # write your code \n sum_rem = 0\n i = 0\n start = -1\n first = True\n notFound = False\n counter = 0\n while i != start and i < len(gas) and counter < 2*len(gas):\n # print(i, start)\n counter+=1\n if sum_rem + gas[i] < cost[i]:\n first = True\n i+=1\n if start == len(gas)-1:\n notFound = True\n break\n else:\n sum_rem += gas[i] - cost[i]\n if first:\n first = False\n start = i\n i+=1\n if i == len(gas):\n i = 0\n \n \n \n if i == len(gas) or notFound or counter==2*len(gas):\n return -1\n \n return start\n\na=[5,0,9,4,3,3,9,9,1,2]\nb=[6,7,5,9,5,8,7,1,10,5]\n\ns = Solution()\ns.canCompleteCircuit(a,b)" }, { "alpha_fraction": 0.46335211396217346, "alphanum_fraction": 0.47001537680625916, "avg_line_length": 26.72857093811035, "blob_id": "f400b9226d43f48d844a0ef39f4914fb7548168b", "content_id": "3ebaf2c2ea23f86ac68970836d65618c11b481d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1951, "license_type": "no_license", "max_line_length": 95, "num_lines": 70, "path": "/excrayg/leetcode/python/word_ladder.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution(object):\n def findLadders(self, beginWord, endWord, wordlist):\n \"\"\"\n :type beginWord: str\n :type endWord: str\n :type wordlist: Set[str]\n :rtype: List[List[int]]\n \"\"\"\n \n if beginWord == endWord:\n return []\n\n dict1 = set()\n for w in wordList:\n dict1.add(w)\n \n dict1.add(beginWord)\n dict1.add(endWord)\n wordSeq = dict()\n\n for k, v in dict1.items():\n wordSeq[k] = []\n\n self.do_bfs(beginWord, endWord, wordList, wordSeq)\n \n return wordSeq\n \n def get_neigh(self, word, wordList, visited):\n neigh = []\n for idx in range(len(word)):\n for c in range(ord('a'), ord('z')+1):\n new_word = word[0:idx]+chr(c)+word[idx+1:]\n # print(new_word)\n if (new_word in wordList) and (new_word not in visited) and (new_word != word):\n neigh.append(new_word)\n \n return neigh\n \n def do_bfs(self, beginWord, endWord, wordList, wordSeq):\n queue = []\n visited = set()\n queue.append(beginWord)\n short_seq = []\n\n \n while len(queue) != 0:\n # while seq_left:\n word = queue.pop(0)\n neighbours = self.get_neigh(word, wordList, visited)\n queue.extend(neighbours)\n short_seq.append(word)\n visited.add(word)\n\n if word == endWord:\n # print(short_seq, queue)\n min1 = min(min1, len(short_seq))\n # if len(short_seq) != min1:\n # seq_left = False\n # break\n wordSeq.append(short_seq)\n \n return\n \n\nbeginWord = \"a\"\nendWord = \"c\"\nwordList = [\"a\", \"b\", \"c\"]\ns = Solution()\nt = s.findLadders(beginWord, endWord, wordList)\nprint(t)\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.49150142073631287, "alphanum_fraction": 0.5042492747306824, "avg_line_length": 22.433332443237305, "blob_id": "b820829589f0c566f6a3bf2a691f00ac759a5902", "content_id": "8759186084da6003975ba6abc5a8d9d9a36ca95b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 706, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/excrayg/leetcode/python/unique_bst.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # @paramn n: An integer\n # @return: An integer\n def numTrees(self, n):\n # write your code here\n t = []\n self.numTrees_helper(0, n, n, 0, t)\n return len(t)\n \n def numTrees_helper(self, lo, hi, total, count, t):\n if lo >= hi:\n return None\n for i in range(lo, hi):\n count += 1\n self.numTrees_helper(lo, i, total, count, t)\n self.numTrees_helper(i+1, hi, total, count, t)\n if count == total:\n \tt.append(1)\n\n return None\n \ns = Solution()\nt = s.numTrees(0)\nprint(t)\nt = s.numTrees(1)\nprint(t)\nt = s.numTrees(2)\nprint(t)\nt = s.numTrees(3)\nprint(t)\n\n\n\n" }, { "alpha_fraction": 0.5226480960845947, "alphanum_fraction": 0.532171905040741, "avg_line_length": 23.872833251953125, "blob_id": "77ee6c82582074c4038f49cda3702702679a022e", "content_id": "99db48de14d67377da3a1f79dea6a3e682438fe4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4305, "license_type": "no_license", "max_line_length": 65, "num_lines": 173, "path": "/excrayg/leetcode/python/merge_bst.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\n#merge two BST\n\nclass Node:\n def __init__(self, val, l=None, r=None):\n self._val = val\n self.left = l\n self.right = r\n\n def __repr__(self):\n return (\"Node: {} \".format(self._val))\n\ndef print_list(node):\n print(\"Printing DLL:\")\n while node:\n print(\"{} \".format(node._val), end=\"\")\n node = node.right\n\n print()\n\ndef print_tree(node):\n print(\"Printing tree:\")\n stack = []\n done = False\n while not done:\n if node:\n stack.append(node)\n node = node.left\n else:\n if len(stack) != 0:\n node = stack.pop()\n print(\"{} \".format(node._val), end=\"\")\n node = node.right\n else:\n done = True\n\n print()\n\ndef count_nodes_in_dll(head):\n node = head\n c = 0\n while node:\n node = node.right\n c+=1\n return c\n\ndef convert_dll_to_bst(head):\n n = count_nodes_in_dll(head)\n return convert_dll_to_bst_helper([head], n)\n\ndef convert_dll_to_bst_helper(node_ptr, n):\n if n <= 0:\n return None\n else:\n left = convert_dll_to_bst_helper(node_ptr, int(n/2))\n node = node_ptr[0]\n node_ptr[0] = node.right\n node.left = left\n right = convert_dll_to_bst_helper(node_ptr, n-int(n/2)-1)\n node.right = right\n return node\n\ndef convert_bst_to_dll(root):\n head = convert_bst_to_dll_helper(root)\n head.left.right = None\n head.left = None\n return head\n\ndef convert_bst_to_dll_helper(root):\n if root is None:\n return None\n else:\n left_list = convert_bst_to_dll_helper(root.left)\n right_list = convert_bst_to_dll_helper(root.right)\n \n left_tail = None\n if left_list:\n left_tail = left_list.left\n left_tail.right = root\n root.left = left_tail\n left_tail = root\n else:\n left_list = root\n left_tail = root\n\n right_tail= None\n if right_list:\n right_tail = right_list.left\n left_tail.right = right_list\n right_list.left = left_tail\n else:\n right_tail = left_tail\n\n right_tail.right = left_list\n left_list.left = right_tail\n\n return left_list\n\ndef merge_circular_dll(left_dll, right_dll):\n if left_dll is None:\n return right_dll\n elif right_dll is None:\n return left_dll\n else:\n # print_list(left_dll)\n # print_list(right_dll)\n left_dll_head = left_dll\n left_dll_tail = left_dll_head.left\n right_dll_head = right_dll\n right_dll_tail = right_dll_head.left\n left_dll_tail.right = right_dll_head\n right_dll_head.left = left_dll_tail\n right_dll_tail.right = left_dll_head\n left_dll_head.left = right_dll_tail\n return left_dll_head\n\ndef merge_sorted_dll(left_dll, right_dll):\n if left_dll is None:\n return right_dll\n elif right_dll is None:\n return left_dll\n else:\n result = Node(-1)\n dummyNode = result\n\n while left_dll and right_dll:\n if left_dll._val < right_dll._val:\n result.right = left_dll\n left_dll.left = result\n result = left_dll\n left_dll = left_dll.right\n else:\n result.right = right_dll\n right_dll.left = result\n result = right_dll\n right_dll = right_dll.right\n\n if left_dll:\n result.right = left_dll\n else:\n result.right = right_dll\n\n return dummyNode.right\n\ndef merge_two_bst(root1, root2):\n node1 = convert_bst_to_dll(root1)\n print_list(node1)\n node2 = convert_bst_to_dll(root2)\n print_list(node2)\n node = merge_sorted_dll(node1, node2)\n print_list(node)\n root = convert_dll_to_bst(node)\n return root\n\n\ndef create_bst(l):\n if len(l) == 0:\n return None\n else:\n n = len(l)\n m = int(n/2)\n node = Node(l[m])\n node.left = create_bst(l[:m])\n node.right = create_bst(l[m+1:])\n return node\n\ndef main():\n root1 = create_bst([2,5,11,17,23])\n root2 = create_bst([3,7,13,19])\n root = merge_two_bst(root1, root2)\n print_tree(root)\n\n\nmain()\n" }, { "alpha_fraction": 0.5182807445526123, "alphanum_fraction": 0.5207926034927368, "avg_line_length": 26.3435115814209, "blob_id": "ee729ff15cd9e937ccb9eda818297dec1da5489d", "content_id": "37568d707af3044c6e38afd0bd521ad0d7271c87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3583, "license_type": "no_license", "max_line_length": 144, "num_lines": 131, "path": "/excrayg/leetcode/cpp/word_search.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n#include <queue>\n#include <unordered_set>\n#include <unordered_map>\n#include <utility>\n#include <algorithm>\n#include <iostream>\nusing namespace std;\n\ntypedef vector<vector<string>> SolVector;\n\n\nvector<string> get_neigh(string node, unordered_set<string> &visited, vector<string> &dict)\n{\n \n int m = node.length();\n vector<string> neighs;\n for(int i = 0; i < m; i++)\n {\n char to_replace = node[i];\n for(char j = 'a'; j <= 'z'; j++)\n {\n if(j == to_replace)\n {\n continue;\n }\n node[i] = j;\n // cout<<node<<\" \";\n unordered_set<string>::iterator it = visited.find(node);\n vector<string>::iterator it1 = find(dict.begin(), dict.end(), node);\n if(it1 != dict.end() && it == visited.end())\n {\n neighs.emplace_back(node);\n }\n }\n node[i] = to_replace;\n }\n return neighs;\n}\n\nvector<string> construct_path(unordered_map<int, string> &path, int total_levels)\n{\n vector<string> new_path;\n for(int i = 0; i <= total_levels; i++)\n {\n new_path.emplace_back(path[i]);\n // cout<<path[i]<<\" \";\n }\n // cout<<endl;\n return new_path;\n}\n\nvoid bfs(string start, string end, vector<string> &dict, SolVector &all_paths)\n{\n queue<pair<vector<string>, string>> q;\n unordered_set<string> visited;\n // unordered_map<int, string> path;\n \n vector<string> path;\n q.emplace(make_pair(path, start));\n visited.emplace(start);\n int min_level = 1000;\n\n while(!q.empty())\n {\n pair<vector<string>, string> elem = q.front();\n q.pop();\n vector<string> cur_path = elem.first;\n string node = elem.second;\n // cout<<node<<\": \"<<level<<endl;\n // path[level] = node;\n // cout<<node<<endl;\n if(node == end)\n {\n int level = cur_path.size();\n if(min_level > level) min_level = level;\n if(level > min_level) break;\n // vector<string> new_path = construct_path(path, level);\n cur_path.emplace_back(node);\n all_paths.emplace_back(cur_path); \n visited = unordered_set<string>();\n // cout<<\"Found\"<<endl;\n }\n else\n {\n vector<string> neigh = get_neigh(node, visited, dict);\n cur_path.emplace_back(node);\n for(auto n : neigh)\n {\n // cout<<n<<\" \";\n q.emplace(make_pair(cur_path, n));\n // visited.emplace(n);\n }\n // cout<<endl;\n }\n }\n}\n\nvector<vector<string> > findLadders(string start, string end, vector<string> &dict) {\n // Do not write main() function.\n // Do not read input, instead use the arguments to the function.\n // Do not print the output, instead return values as specified\n // Still have a doubt. Checkout www.interviewbit.com/pages/sample_codes/ for more details\n \n \n SolVector all_ans;\n if(start == end)\n {\n vector<string> v = {start};\n all_ans.emplace_back(v);\n return all_ans;\n }\n dict.emplace_back(end);\n bfs(start, end, dict, all_ans);\n \n return all_ans;\n}\n\nint main()\n{\n vector<string> v = {\"baba\",\"abba\",\"aaba\",\"bbbb\",\"abaa\",\"abab\",\"aaab\",\"abba\",\"abba\",\"abba\",\"bbba\",\"aaab\",\"abaa\",\"baba\",\"baaa\",\"bbaa\",\"babb\"};\n SolVector ans = findLadders(\"bbaa\", \"babb\", v);\n for(auto a : ans)\n {\n for(auto p : a)\n {\n cout<<p<<\" \";\n }\n cout<<endl;\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.51474928855896, "alphanum_fraction": 0.5737463235855103, "avg_line_length": 12.857142448425293, "blob_id": "823538a19082a34d6f4bcbb83ff3f6a0ed29ebda", "content_id": "95fbff385db942e093de5785df990f9f2578f212", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "no_license", "max_line_length": 33, "num_lines": 49, "path": "/excrayg/leetcode/python/find_max_path.py.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Node(object):\n\t\"\"\"docstring for Node\"\"\"\n\tdef __init__(self, arg):\n\t\tsuper(Node, self).__init__()\n\t\tself.val = arg\n\t\tself.left = None\n\t\tself.right = None\n\n\n\nn1 = Node(1)\nn2 = Node(2)\nn3 = Node(3)\nn4 = Node(4)\nn5 = Node(5)\nn6 = Node(6)\nn7 = Node(7)\n\n# 4\n# 2 6\n# 1 7 3 5\n\nn4.left = n2\nn4.right = n6\n\nn2.left = n1\nn2.right = n7\n\nn6.left = n3\nn6.right = n5\n\nm = float(\"-inf\")\ndef find_max_path(node, m):\n\t# global m\n\tif not node:\n\t\treturn 0\n\tl = find_max_path(node.left, m)\n\tr = find_max_path(node.right, m)\n\tm[0] = max(l+node.val+r, m[0])\n\tprint(m)\n\tret = node.val + max(l,r)\n\tif ret>0:\n\t\treturn ret\n\telse:\n\t\treturn 0\n\nt=[m]\nprint(find_max_path(n4, t))\nprint(t[0])" }, { "alpha_fraction": 0.5768725275993347, "alphanum_fraction": 0.5913271903991699, "avg_line_length": 19.026315689086914, "blob_id": "bbec3de391c45db5fb8e95ad2cb8cbba6ce45690", "content_id": "064e9ce21725bc7781c1cddd6db1d92470085399", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 761, "license_type": "no_license", "max_line_length": 49, "num_lines": 38, "path": "/excrayg/leetcode/python/inv_pair.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "def count_inv_pairs( arr ):\n\tcount_inv_pairs_helper(arr, 0, len(arr)-1)\n\ndef count_inv_pairs_helper(arr, start, end):\n\tmid = start + (end-start)/2\n\n\tif ( start > end ):\n\t\treturn 0\n\n\tcount = count_inv_pairs_helper(arr, start, mid)\n\tcount += count_inv_pairs_helper(arr, mid+1, end)\n\tcount += merge_count(arr, start, mid, end)\n\n\treturn count\n\ndef merge_count(arr, start, mid, end):\n\ti = start, j = mid+1, count = 0, temp = []\n\twhile i <= mid && j <= end:\n\t\tif arr[i] > arr[j]:\n\t\t\tcount += mid - i\n\t\t\ti++\n\t\t\ttemp.append(arr[j])\n\t\telse:\n\t\t \tj++\n\t\t\ttemp.append(arr[i])\n\n\twhile i <= mid:\n\t\ttemp.append(arr[i])\n\t\ti++\n\twhile j <= end:\n\t\ttemp.append(arr[j])\n\t\tj++\n\tfor( i = start i < end i++)\n\t\tarr[i] = temp.popFront()\n\t\nreturn count\n\nprint(count_inv_pairs(4,3,2,1))\n" }, { "alpha_fraction": 0.4920634925365448, "alphanum_fraction": 0.5343915224075317, "avg_line_length": 27.80769157409668, "blob_id": "5b49024dc1cd7a4f1d0f08439967f85e0c87db61", "content_id": "9c20663c545d29832d49996fd4a5d4cdc9840137", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 756, "license_type": "no_license", "max_line_length": 53, "num_lines": 26, "path": "/excrayg/leetcode/python/indent.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "def uberPool(A, B, C, X, Y):\n initialDistance = abs(B[0]-A[0]) + abs(B[1]-A[1])\n \n if initialDistance == 0:\n return -1\n maxDist = 2*initialDistance\n distTillNow = abs(C[0]-A[0]) + abs(C[1]-A[1])\n distTillX = abs(C[0]-X[0]) + abs(C[1]-X[1])\n distTillY = abs(C[0]-Y[0]) + abs(C[1]-Y[1])\n distXToB = abs(X[0]-B[0]) + abs(X[1]-B[1])\n distYToB = abs(Y[0]-B[0]) + abs(Y[1]-B[1])\n \n finalX = distTillNow + distTillX + distXToB\n finalY = distTillNow + distTillY + distYToB\n \n if finalX <= maxDist and finalY <= maxDist:\n if distTillX < distTillY:\n return 1\n else:\n return 2\n elif finalX <= maxDist:\n return 1\n elif finalY <= maxDist:\n return 2\n else:\n return -1\n \n" }, { "alpha_fraction": 0.39123377203941345, "alphanum_fraction": 0.4107142984867096, "avg_line_length": 23.639999389648438, "blob_id": "786ee49744ad1dd9cfdf32cdedcb9d407ea99151", "content_id": "c47c19e9c2e8ca135be3792f32ac484189d854da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 616, "license_type": "no_license", "max_line_length": 42, "num_lines": 25, "path": "/excrayg/leetcode/python/jump_game.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n # @param A, a list of integers\n # @return a boolean\n def canJump(self, A):\n # write your code here\n n = len(A)\n if n == 0 or n == 1:\n return True\n canReach = [False]*n\n canReach[-1] = True\n i = n-2\n while i >= 0:\n if i + A[i] >= n:\n canReach[i] = True\n elif canReach[i+A[i]] == True:\n canReach[i] = True\n else:\n canReach[i] = False\n \n i-=1\n \n return canReach[0]\n\ns = Solution()\nprint(s.canJump([3,2,1,0,4]))\n" }, { "alpha_fraction": 0.3766842484474182, "alphanum_fraction": 0.4188635051250458, "avg_line_length": 27.93220329284668, "blob_id": "7af35c4e718ba4a4f9660fcfb8be7f070b3ad053", "content_id": "79f4d549f0f9b1a2ca0f77e865566b42481cedb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1707, "license_type": "no_license", "max_line_length": 91, "num_lines": 59, "path": "/excrayg/leetcode/python/interleave.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n \"\"\"\n @params s1, s2, s3: Three strings as description.\n @return: return True if s3 is formed by the interleaving of\n s1 and s2 or False if not.\n @hint: you can use [[True] * m for i in range (n)] to allocate a n*m matrix.\n \"\"\"\n def isInterleave(self, s1, s2, s3):\n # write your code here\n n = len(s1)\n m = len(s2)\n \n # mat = [[False] * m+1 for i in range(n+1)]\n \n \n return self.ih( s1, 0, s2, 0, s3, 0)\n \n def ih(self, s1, i, s2, j, s3, k):\n print(i,j,k)\n if i == len(s1) and j == len(s2) and k == len(s3):\n return True\n \n if k == len(s3):\n return False\n \n if i == len(s1) and j == len(s2):\n return False\n \n if i == len(s1):\n if s2[j] == s3[k]:\n return self.ih(s1, i, s2, j+1, s3, k+1)\n else:\n return False\n \n elif j == len(s2):\n if s1[i] == s3[k]:\n return self.ih(s1, i+1, s2, j, s3, k+1)\n else:\n return False\n \n else:\n if s1[i] == s2[j] and s1[i] == s3[k]:\n return self.ih(s1, i+1, s2, j, s3, k+1) or self.ih(s1, i, s2, j+1, s3, k+1)\n elif s1[i] == s3[k]:\n return self.ih(s1, i+1, s2, j, s3, k+1)\n \n elif s2[j] == s3[k]:\n return self.ih(s1, i, s2, j+1, s3, k+1)\n \n else:\n return False\n\ns = Solution()\na = \"aabcc\"\nb = \"dbbca\"\ns_t = \"aadbbcbcac\"\ns_f = \"aadbbbaccc\"\nprint(s.isInterleave(a,b,s_t))\nprint(s.isInterleave(a,b,s_f))\n" }, { "alpha_fraction": 0.6932656764984131, "alphanum_fraction": 0.7126383781433105, "avg_line_length": 27.8799991607666, "blob_id": "9d61e513e2bf5d662fab980aacfb398bfca62846", "content_id": "0935485600f1c0e5b63b8dde472c3030ad8486c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2168, "license_type": "no_license", "max_line_length": 80, "num_lines": 75, "path": "/excrayg/leetcode/python/fun_algos.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "from random import randrange\n\n\n#Problem statement:\n#Given a set of people who are sitting on some seats, \n#and they like to sit on some other seats,\n#find a permutation such that max people are satisfied. \n#[2,2,0,5,3,5,7,4]\ndef naive_max_perm(preferred_seats, seats_in_contention):\n\tif len(seats_in_contention) <= 1:\n\t\treturn seats_in_contention\n\tseats_that_are_preferred = set(preferred_seats[i] for i in seats_in_contention)\n\tseats_not_preferred = seats_in_contention - seats_that_are_preferred\n\tif seats_not_preferred:\n\t\tseats_in_contention -= seats_not_preferred\n\t\treturn naive_max_perm(preferred_seats, seats_in_contention)\n\n\treturn seats_in_contention\n\nfrom collections import Counter\ndef max_perm(preferred_seats):\n\tn = len(preferred_seats)\n\tseats_in_contention = set(range(n))\n\tcount = Counter(preferred_seats)\n\tseats_not_preferred = [i for i in seats_in_contention if count[i]==0 ]\n\twhile seats_not_preferred:\n\t\tk = seats_not_preferred.pop()\n\t\tseats_in_contention.remove(k)\n\t\tj = preferred_seats[k]\n\t\tcount[j] -= 1\n\t\tif count[j] == 0:\n\t\t\tseats_not_preferred.append(j)\n\n\treturn seats_in_contention\n\n#\ndef naive_celebrity(celeb_graph):\n\tnum_nodes = len(celeb_graph)\n\tfor node1 in range(num_nodes):\n\t\tfor node2 in range(num_nodes):\n\t\t\tif node1 == node2:\n\t\t\t\tcontinue\n\t\t\tif celeb_graph[node1][node2] == 1: #node1 knows someone\n\t\t\t\tbreak\n\t\t\tif celeb_graph[node2][node1] == 0: #someone doesnt know node1\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprint(node1)\n\n\nimport unittest\nimport random\nclass TestSequenceFunctions(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.preferred_seats = [2,2,0,5,3,5,7,4]\n\t\tself.seats_in_contention = set(range(len(self.preferred_seats)))\n\n\tdef test_naive_max_perm(self):\n\t# make sure the shuffled sequence does not lose any elements \n\t\tperm = naive_max_perm(self.preferred_seats, self.seats_in_contention)\n\t\tself.assertEqual(perm, {0,2,5}) \n\n\tdef test_max_perm(self):\n\t\tperm = max_perm(self.preferred_seats)\n\t\tself.assertEqual(perm, {0,2,5})\n\n\tdef test_naive_celeb(self):\n\t\tn = 10\n\t\tceleb_graph = [[randrange(2) for i in range(n)] for i in range(n)]\n\t\tprint(celeb_graph)\n\t\tnaive_celebrity(celeb_graph)\n\nif __name__ == '__main__':\n\tunittest.main()\n\n\n" }, { "alpha_fraction": 0.4064587950706482, "alphanum_fraction": 0.4187082350254059, "avg_line_length": 23.97222137451172, "blob_id": "2fb26460ebe286b36652c0d1cba03dd437ca82ee", "content_id": "abdc04f25e8fc8897d2482c47c62b4380f6803c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 898, "license_type": "no_license", "max_line_length": 61, "num_lines": 36, "path": "/excrayg/leetcode/python/comb.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "class Solution:\n \"\"\" \n @param n: Given the range of numbers\n @param k: Given the numbers of combinations\n @return: All the combinations of k numbers out of 1..n \n \"\"\"\n def combine(self, n, k): \n # write your code here \n l_l = []\n l = []\n self.combine_h(n, 0, k, l, l_l)\n return l_l\n \n def combine_h(self, n, index, k, l, l_l):\n if index == n:\n if len(l) != 0:\n l_l.append(list(l))\n # print(l_l)\n return\n \n if k == 0:\n if len(l) != 0:\n l_l.append(list(l))\n # print(l_l)\n return\n \n l.append(index+1)\n self.combine_h( n, index+1, k-1, l, l_l)\n l.pop()\n \n self.combine_h( n, index+1, k, l, l_l)\n \n return\n\ns = Solution()\nprint(s.combine(2, 1))" }, { "alpha_fraction": 0.5867508053779602, "alphanum_fraction": 0.6214510798454285, "avg_line_length": 12.125, "blob_id": "5e4d01b20c690090fa490ed958026064234185d1", "content_id": "08f07be6fc686652240376b4fe7117d42ca1c508", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 317, "license_type": "no_license", "max_line_length": 57, "num_lines": 24, "path": "/excrayg/leetcode/cpp/graphs.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\n// 1->2 2->1 1->3\n// 1 -> 2,3\n// 2 -> 1\n\n//Implement DFS and BFS\n\n\ntypedef unordered_map<int, unique_ptr<Node>> Graph;\ntypedef vector<unique_ptr<Node>> Edges;\nstruct Node\n{\n int id;\n Edges edges;\n};\n\nvoid add_edge(unique_ptr<Node> from, unique_ptr<Node> to)\n{\n from->edges.emplace_back(to);\n}\n\nint main()\n{\n \n}\n" }, { "alpha_fraction": 0.5379310250282288, "alphanum_fraction": 0.5568965673446655, "avg_line_length": 27.149999618530273, "blob_id": "7ed2837e63ea37bbebe5bfdc847ea3794265de3f", "content_id": "ffa8310c6f7a00649f9b0468cf756851c90bf70c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 592, "license_type": "no_license", "max_line_length": 88, "num_lines": 20, "path": "/excrayg/leetcode/cpp/todo/swap_adj_nodes.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "// # swap nodes in pairs\n\n// # Question:\n// # Given a linked list, swap every two adjacent nodes and return its head.\n// # For example,\n// # Given 1  2  3  4, you should return the list as 2  1  4  3.\n// # Your algorithm should use only constant space. You may not modify the values in the\n// # list, only nodes itself can be changed.\n\n// class Node:\n// def __init__(self, val):\n// self.val = val\n// self.next = None\n\n// def swap_nodes(a):\n// dummy = Node(0)\n// prev = dummy\n// prev.next = a\n// a1 = a\n// b1 = a.next\n \n \n\n \n\n" }, { "alpha_fraction": 0.4734446108341217, "alphanum_fraction": 0.512898325920105, "avg_line_length": 18.84848403930664, "blob_id": "98868cbcc3e869b35022dcb6f7ec5a544e4fa7ac", "content_id": "dc66818cbaafa282b01bd941715a38c6a638312c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 659, "license_type": "no_license", "max_line_length": 60, "num_lines": 33, "path": "/excrayg/leetcode/python/num_ways.py", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\ndef count1(k, score_ways):\n combinations = [0]*(k+1)\n combinations[0] = 1\n for score in score_ways:\n for j in range(score, k+1):\n combinations[j] += combinations[j-score]\n return combinations[k]\n\ndef count2(k, score_ways, i, d):\n if k == 0:\n return 1\n n = len(score_ways)\n if i == n:\n return 0\n\n if k < 0:\n return 0\n\n\n if (i,k) in d:\n return d[(i,k)]\n\n d[(i,k)] = count2(k-score_ways[i], score_ways, i, d) + \\\n count2(k, score_ways, i+1, d)\n\n return d[(i,k)]\n\n\nk = 120\nscore_ways = [2,3,4,5,7]\n\nprint(count1(k, score_ways))\nprint(count2(k,score_ways,0,{}))\n\n\n\n" }, { "alpha_fraction": 0.534866452217102, "alphanum_fraction": 0.5400593280792236, "avg_line_length": 22.63157844543457, "blob_id": "bf8ce0f9dd6281ed680c79ffe384815be948375a", "content_id": "f78bf09f0d05097242a4b60f659c2099486bb467", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1352, "license_type": "no_license", "max_line_length": 91, "num_lines": 57, "path": "/excrayg/leetcode/cpp/todo/longest_substr_atmost_two_distinct_chars.cpp", "repo_name": "excray/coding-algos", "src_encoding": "UTF-8", "text": "\n\n// # Given a string S, find the length of the longest substring T that contains at most two\n// # distinct characters.\n// # For example,\n// # Given S = “eceba”,\n// # T is \"ece\" which its length is 3.\n\n#include <iostream>\n#include <unordered_set>\n#include <string>\nusing namespace std;\n\nstring print_longest(string s);\n\nint main()\n{\n // string s;\n // cout << \"Enter a string:\\n\";\n // cin >> s;\n \n cout << print_longest(\"eceba\");\n cout << print_longest(\"eceebbbbbbca\");\n cout << print_longest(\"eceba\");\n cout << print_longest(\"eceba\");\n cout << print_longest(\"eceba\");\n cout << print_longest(\"eceba\");\n cout << print_longest(\"eceba\");\n}\n\nstring print_longest(string s)\n{\n int max_len = 0;\n int max_start = 0, max_end = 0;\n \n int start_idx = 0;\n \n while(start_idx < s.length())\n {\n unordered_set<char> unique_chars;\n int tail = start_idx;\n while(unique_chars.size() <= 2 && tail < s.length())\n {\n char c = s[tail];\n unique_chars.emplace(c);\n tail++;\n }\n int cur_len = tail-start_idx-1;\n if(cur_len > max_len)\n {\n max_len = cur_len;\n max_start = start_idx;\n }\n start_idx++;\n }\n \n string longest_str = s.substr(max_start, max_len) + \"\\n\";\n return longest_str;\n}" } ]
67
azzhu/Deeps_Inference_Package
https://github.com/azzhu/Deeps_Inference_Package
9007000db012faee1bfd7570f0ea14d25992ae8c
6d00165ded88a59f21d95c4c769e28709a3a7f3d
519ec4cef9693bfa9ea511bd1b6311d1332ed09b
refs/heads/master
2023-01-14T12:45:49.380077
2020-11-25T07:34:18
2020-11-25T07:34:18
311,565,653
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6054794788360596, "alphanum_fraction": 0.6410958766937256, "avg_line_length": 21.8125, "blob_id": "59afca181fbb5fce33a0dc0009fe957eff08eb1d", "content_id": "68e34ef2ea76e1b135171deb090cb8c625f2fa26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "permissive", "max_line_length": 63, "num_lines": 16, "path": "/build2exe.py", "repo_name": "azzhu/Deeps_Inference_Package", "src_encoding": "UTF-8", "text": "#!/GPFS/zhangli_lab_permanent/zhuqingjie/env/py3_tf2/bin/python\n'''\n@Time : 20/09/11 下午 05:02\n@Author : zhuqingjie \n@User : zhu\n@FileName: build2exe.py\n@Software: PyCharm\n'''\nimport PyInstaller.__main__\n\nPyInstaller.__main__.run([\n '--onefile', # 生成单独一个exe文件,而不是一个文件夹\n '--clean', # 清除上次运行的缓存\n '--noupx', # 不压缩,快速发布,正式发布的时候还是可以压缩\n 'main.py'\n])\n" }, { "alpha_fraction": 0.583116888999939, "alphanum_fraction": 0.6142857074737549, "avg_line_length": 19.810810089111328, "blob_id": "aed8d5284c6f61a550dba0ecadc8ebeee399794e", "content_id": "23e4bd2c2cb5e9ad0a8d1ace3aefe68f3f677d15", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "permissive", "max_line_length": 105, "num_lines": 37, "path": "/config.py", "repo_name": "azzhu/Deeps_Inference_Package", "src_encoding": "UTF-8", "text": "# param\ngpus = [\n # 0,\n # 1,\n # 2,\n 7,\n]\ngpu_nbs = len(gpus)\nepoch = 20\nbatch_per_gpu = 8\nbatch_size = batch_per_gpu * gpu_nbs\n\n# print('batch_per_gpu:{}'.format(batch_per_gpu))\n\nprjdir = '/home/zhangli_lab/zhuqingjie/DATA/prj'\n\n# data init\n# 可以用的hw值:384,448,512\nsize = hw = 256\nstep = 80\ndata_path = f'{prjdir}/tunet_onesample/data/'\n\nworkdir = f'{prjdir}/tunet_onesample/'\nlogdir = f'{prjdir}/tunet_onesample/logdir/'\n\nrestore_model = False\nrestore_path = f'{prjdir}/tunet_onesample/logdir'\n\n\ndef read_hot_config():\n lines = open('hot_config', 'r').readlines()\n return {l.split('=')[0].strip(): l.split('=')[1].strip() for l in lines if l.strip() and l[0] != '#'}\n\n\nif __name__ == '__main__':\n hot_params = read_hot_config()\n print(hot_params)\n" }, { "alpha_fraction": 0.6512276530265808, "alphanum_fraction": 0.6981026530265808, "avg_line_length": 33.480770111083984, "blob_id": "a20c6a6cfa626e9d436c4018b0dce2c254a3dec1", "content_id": "d9931e718d466c26ba6d59f32911074dd6f4cae1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1807, "license_type": "permissive", "max_line_length": 144, "num_lines": 52, "path": "/README.md", "repo_name": "azzhu/Deeps_Inference_Package", "src_encoding": "UTF-8", "text": "# Deeps Server Inference Package\nA released inference package of [deeps server](http://deeps.cibr.ac.cn/), which easily processes your data in local computer.\n![avatar](imgs/img.jpg)\n\n## Documentation\n\n### Download\nThree files are available, you can download files from [here](http://119.90.33.35:3557/sharing/wJWmfODpQ):\n \n- **Executable file**: main.exe;\n- **Config file**: config.yaml;\n- **Pre-trained model file**: model/.\n\n### Configuration\nModify the config file: ```config.yaml```.\n\n- **img_path**: The path of an image(ONLY ONE), or the folder path of lots of images;\n- **model_path**: Model path. One model has two files, for example:\n\n model_68900.data-00000-of-00001\n model_68900.index\n \n So parameters ```model_path``` should be set to ```{current directory}\\model_68900```\n- **sr_or_os**: Option:sr,os. If your task is \"image super-resolution\", you should select ```sr```,\nconversely, you should select ```os```. But it's important to note that parameters: ```sr_or_os``` and ```model_path``` should be corresponding.\n\n### Run\nDouble-click ```main.exe``` to run, a cmd window will pop up, and then the output text will print during the executive process.\n\n## Model Zoo\n\n* Super Resolution\n \n DeepS: [download](http://119.90.33.35:3557/sharing/IK0i5v6K0)\n \n JustUnet: [download](http://119.90.33.35:3557/sharing/p8tFZhzeD)\n\n* Optical Section\n\n Deeps: [download](http://119.90.33.35:3557/sharing/cD7odmZI5)\n\n## Useful Links\n\n💜 Deeps homepage: http://deeps.cibr.ac.cn/\n\n💜 Deeps documentation: https://github.com/azzhu/deeps/blob/master/SERVER/webserver_doc.md\n\n💜 Deeps repository: https://github.com/azzhu/deeps\n\n💜 Deeps inference package repository: https://github.com/azzhu/Deeps_Inference_Package\n\n💜 CIBR homepage: http://www.cibr.ac.cn/" }, { "alpha_fraction": 0.4685727655887604, "alphanum_fraction": 0.4825141727924347, "avg_line_length": 28.80281639099121, "blob_id": "5cd854e8a037c9eb49867468a33d4bc2b7c140c1", "content_id": "71640ed2e6e44f52afb37ae051d0d45c6a1dc544", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4480, "license_type": "permissive", "max_line_length": 98, "num_lines": 142, "path": "/main.py", "repo_name": "azzhu/Deeps_Inference_Package", "src_encoding": "UTF-8", "text": "#!/GPFS/zhangli_lab_permanent/zhuqingjie/env/py3_tf2/bin/python\n'''\n@Time : 20/09/11 下午 02:43\n@Author : zhuqingjie \n@User : zhu\n@FileName: main.py\n@Software: PyCharm\n'''\nimport traceback\n\ntry:\n import yaml, os, cv2\n from pathlib import Path\n import numpy as np\n import tensorflow as tf\n import tensorflow.contrib\n\n tf.get_logger().setLevel('ERROR')\n\n\n def kvprint(k, v, k_len=15):\n k = k + ':'\n if len(k) >= k_len:\n print(f'{k}{v}')\n else:\n print(f'{k}{\" \" * (k_len - len(k))}{v}')\n\n\n kvprint(\"Tensorflow version\", tf.__version__)\n\n # load config file\n cfg_path = 'config.yaml'\n if not Path(cfg_path).exists():\n print(f'the {cfg_path} file in the same folder is not exists!')\n os.system('pause')\n exit()\n cfg = yaml.safe_load(open('config.yaml', 'r'))\n\n # print config file\n print(f'\\n{\"+\" * 20} config {\"+\" * 20}')\n for k, v in cfg.items():\n kvprint(k, v)\n print(f'{\"+\" * 20} config {\"+\" * 20}\\n')\n\n # import module\n if cfg['sr_or_os'] == 'sr':\n from model import UNET_sr as unet\n elif cfg['sr_or_os'] == 'os':\n from model import UNET_os as unet\n else:\n print('please set a right value of param sr_or_os in config.yaml, option values: sr, os.')\n\n\n def load_img(img_dir):\n img_dir = Path(img_dir)\n if img_dir.is_dir():\n files = list(img_dir.iterdir())\n # 过滤掉上次预测产生的结果\n files = [f for f in files if not f.stem.endswith('_predict')]\n for f in files:\n img = cv2.imread(str(f))\n if img is not None:\n yield img, str(f)\n else:\n img = cv2.imread(str(img_dir))\n if img is not None:\n yield img, str(img_dir)\n\n\n def inference():\n # load img\n if not Path(cfg['img_path']).exists():\n print('please check if the path of image is exists or not.')\n return\n imgs_and_paths = load_img(cfg['img_path'])\n\n # load model and run\n model = unet(predict_flag=True)\n with tf.Session(graph=model.graph) as sess:\n saver = tf.train.Saver()\n try: # 这一步如果报错,很有可能是os、sr模型没有对应正确\n saver.restore(sess, cfg['model_path'])\n print(f'load model from: {cfg[\"model_path\"]}')\n except Exception:\n traceback.print_exc()\n print('''\n please check if the param \"model_path\" and \"sr_or_os\" are correct or not!\n \"model_path\" and \"sr_or_os\" must be corresponding.\n ''')\n\n is_has_imgs = False\n for img, img_path in imgs_and_paths:\n is_has_imgs = True\n print('-' * 100)\n kvprint('Input', img_path)\n # convert data\n if ((img[:, :, 0] == img[:, :, 1]) *\n (img[:, :, 0] == img[:, :, 2])).all(): # gray img\n x = img[np.newaxis, :, :, :1]\n kvprint('Channel', '1')\n else: # color img\n x = np.transpose(img, [2, 0, 1])\n x = x[:, :, :, np.newaxis]\n kvprint('Channel', '3')\n x = x.astype(np.float16) / 255\n\n res = sess.run(model.prd, feed_dict={model.x: x})\n\n # save result\n if len(res) == 1:\n res = res[0, :, :, 0]\n else:\n res = res[:, :, :, 0]\n res = np.transpose(res, [1, 2, 0])\n res = np.round(res * 255).astype(np.uint8)\n src_path = Path(img_path)\n dst_path = str(Path(src_path.parent, f'{src_path.stem}_predict.tif'))\n cv2.imwrite(dst_path, res)\n kvprint('Output', dst_path)\n\n if not is_has_imgs:\n print('No eligible images were found!')\n\n\n if __name__ == '__main__':\n inference()\n\n\nexcept Exception as e:\n print(e)\n traceback.print_exc()\n\nfinally:\n os.system('pause')\n\n'''\n更新内容:\n1,异常参数的判断;\n2,彩色图像的支持;\n3,批处理图像的支持(必须要求同一文件夹下的图像具有相同分辨率,有待改进);\n4,解决了上一条的问题,允许同一批次下有不同分辨率的图像;\n'''\n" } ]
4
4rozbior/d-c-s
https://github.com/4rozbior/d-c-s
a03ee4d2295785d9becfb891a8c04cd93bf47527
499728d7ef5320a0e7465ec7a800ce119b971f9d
7e082f3e224794c898fd6d2893cc2637b1a4ee1c
refs/heads/master
2020-05-20T16:05:45.238294
2015-07-13T22:38:21
2015-07-13T22:38:21
39,039,226
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4781250059604645, "alphanum_fraction": 0.48472222685813904, "avg_line_length": 29.01041603088379, "blob_id": "cab27ecbb5dbbb1ddec6bba80cd2f527a0f078d8", "content_id": "8c979edbd5dce9e832e694938977087f8cebfc30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2880, "license_type": "no_license", "max_line_length": 107, "num_lines": 96, "path": "/MultiDict.py", "repo_name": "4rozbior/d-c-s", "src_encoding": "UTF-8", "text": "# -*- cp1250 -*-\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass MultiDict(object):\n \n def __init__(self,url=\"http://pl.bab.la/slownik/angielski-polski/\"):\n self.Dict = {}\n self.cyfra = input('Podaj jezyk: \\n \\\n 1) angielski \\n \\\n 2) niemiecki \\n \\\n 3) rosyjski \\n \\\n ---> ')\n jezyk = {1:'angielski', 2:'niemiecki', 3:'rosyjski'}\n url = 'http://pl.bab.la/slownik/' + jezyk[self.cyfra] + '-polski/'\n self.url = url\n\n def wypCyfre(self):\n return self.cyfra\n\n def seturl(self,url):\n self.url = url\n \n def _get_html_page(self,url):\n \"\"\"Just to fetch url source into a string\"\"\"\n req = requests.get(url)\n return req\n\n def _chckifWord(self,obj):\n \"\"\"\"\"\"\n return type(obj) == str\n \n def _chckifList(self,obj):\n \"\"\"\"\"\"\n return type(obj) == list\n \n def addWord(self,obj):\n \"\"\"obj: string/list/tuple.\"\"\"\n if self._chckifWord(obj) and obj not in self.Dict:\n self.Dict[obj] = []\n \n elif self._chckifList(obj):\n self.Dict.update({word:[] for word in obj})\n\n def _wrapper(self,word):\n \"\"\"just to wrapp smthng\"\"\"\n url = self.url + word\n return self._get_html_page(url)\n \n def _info(self):\n \"\"\"just to create a readable information abt words\"\"\"\n Info = \"\"\n for word in self.Dict:\n temp = word + \": \\n\"\n for translation in self.Dict[word]:\n temp += \"- \" + translation + \"\\n\"\n Info += temp + \"\\n\"\n print Info\n \n def translate(self,hits=5,info=True):\n \"\"\"hits: int - how many translation of words we wanna get.\n info: bool - True/False, depends on if we wanna print a message (True) or return dict (False)\"\"\"\n for key in self.Dict:\n url = self._wrapper(key)\n soup = BeautifulSoup(url.content,\"html.parser\")\n g_data = soup.find_all(\"div\",{\"class\":\"row-fluid result-row\"})\n for item in g_data:\n temp = item.contents[3].find_all(\"a\",{\"class\":\"result-link\"})\n for inf in temp:\n if len(self.Dict[key]) == hits:\n break\n self.Dict[key].append(inf.text)\n if info == True:\n return self._info()\n \n\nif __name__ == \"__main__\":\n d = MultiDict()\n\n plik = d.wypCyfre()\n jezyk_pliku = {1:'slowka_EN.txt', 2:'slowka_DE.txt', 3:'slowka_RU.txt'}\n\n f = open(jezyk_pliku[plik], 'r')\n fd2 = f.readlines()\n f.close()\n fd=[]\n for i in fd2:\n if '\\n' in i:\n fd.append(i[:-1])\n else:\n fd.append(i)\n print fd\n \n d.addWord(fd)\n d.translate()" } ]
1
toniivan99/python
https://github.com/toniivan99/python
46ee4424ed8673d4365bb1441e3902cf403383e1
eaadd2eaeddc5cf0cbf1bbec8148c3d455e2d4fd
60531efe149c2acfdd14c9b7506045472837cd06
refs/heads/master
2023-03-20T23:32:21.016266
2021-03-09T20:38:02
2021-03-09T20:38:02
340,287,058
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6448189616203308, "alphanum_fraction": 0.6460674405097961, "avg_line_length": 31.059999465942383, "blob_id": "3f9ccf558e2d529c429002c08356b312c1ec5068", "content_id": "152244f761d12a06ac8318d4f95b9bac13de7c64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1602, "license_type": "no_license", "max_line_length": 82, "num_lines": 50, "path": "/RentCarSystem/Client.py", "repo_name": "toniivan99/python", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\n\nfrom RentCarSystem import RentCarSystem\n\nclass Client:\n def __init__(self, clientId, name):\n self.clientId = clientId\n self.name = name\n\n def getCarsCatalogFactory(self, onlyAvailableCars):\n if onlyAvailableCars:\n return RentCarSystem.getAvailableCars()\n else:\n return RentCarSystem.getAllCars()\n\n def rentCarsForHours(self, licensePlates, hours):\n fromDate = datetime.now()\n toDate = fromDate + timedelta(hours=hours)\n\n cars = self.getCarsCatalogFactory(False)\n carsToRent = []\n for car in cars:\n if car.licensePlate in licensePlates:\n carsToRent.append(car)\n\n return RentCarSystem.rentCars(carsToRent, fromDate, toDate, self.clientId)\n\n def rentCarsForDay(self, licensePlates):\n fromDate = datetime.now()\n toDate = fromDate + timedelta(days=1)\n\n cars = self.getCarsCatalogFactory(False)\n carsToRent = []\n for car in cars:\n if car.licensePlate in licensePlates:\n carsToRent.append(car)\n\n return RentCarSystem.rentCars(carsToRent, fromDate, toDate, self.clientId)\n\n def rentCarsForWeek(self, licensePlates):\n fromDate = datetime.now()\n toDate = fromDate + timedelta(days=7)\n\n cars = self.getCarsCatalogFactory(False)\n carsToRent = []\n for car in cars:\n if car.licensePlate in licensePlates:\n carsToRent.append(car)\n\n return RentCarSystem.rentCars(carsToRent, fromDate, toDate, self.clientId)" }, { "alpha_fraction": 0.5114976167678833, "alphanum_fraction": 0.5173938870429993, "avg_line_length": 32.93000030517578, "blob_id": "a2be6b707719611c11f012ad086eb9e052d48af2", "content_id": "505b794fcc7bbbb9f45217ab48ac2b63815475a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3392, "license_type": "no_license", "max_line_length": 127, "num_lines": 100, "path": "/RentCarSystem/RentCarSystem.py", "repo_name": "toniivan99/python", "src_encoding": "UTF-8", "text": "import json\nfrom datetime import datetime\n\nfrom Car import Car\n\nclass RentCarSystem:\n allCars = []\n\n @staticmethod\n def calculateSum(cars, fromDate, toDate):\n totalSum = 0\n for i in range(len(cars)):\n weeks = (toDate - fromDate).days // 7\n days = (toDate - fromDate).days\n hours = (toDate - fromDate).total_seconds() / 3600\n\n if weeks > 0:\n totalSum += weeks * cars[i].pricePerWeek\n days -= (weeks * 7)\n hours -= (weeks * 7) * 24\n\n if days > 0:\n totalSum += days * cars[i].pricePerDay\n hours -= days * 24\n\n if hours > 0:\n totalSum += (hours * cars[i].pricePerHour)\n\n if len(cars) > 3:\n return totalSum - (totalSum * 0.3)\n else:\n return totalSum\n\n @staticmethod\n def getAvailableCars():\n cars = RentCarSystem.getAllCars()\n availableCars = []\n for car in cars:\n if not car.isRented:\n availableCars.append(car)\n\n return availableCars\n\n @staticmethod\n def getAllCars():\n if len(RentCarSystem.allCars) > 0:\n return RentCarSystem.allCars.copy()\n else:\n file = open('cars.json')\n fileData = json.load(file)\n for data in fileData:\n brand = data['brand']\n model = data['model']\n consumption = data['consumption']\n licensePlate = data['licensePlate']\n pricePerHour = data['pricePerHour']\n pricePerDay = data['pricePerDay']\n pricePerWeek = data['pricePerWeek']\n isRented = data['isRented']\n if isRented:\n rentedBy = data['rentedBy']\n rentedFrom = datetime.strptime(data['rentedFrom'], '%Y-%m-%d %H:%M:%S.%f')\n rentedTo = datetime.strptime(data['rentedTo'], '%Y-%m-%d %H:%M:%S.%f')\n else:\n rentedBy = \"\"\n rentedFrom = \"\"\n rentedTo = \"\"\n car = Car(brand, model, consumption, licensePlate, pricePerHour, pricePerDay, pricePerWeek, isRented, rentedBy,\n rentedFrom, rentedTo)\n RentCarSystem.allCars.append(car)\n\n file.close()\n return RentCarSystem.allCars.copy()\n\n @staticmethod\n def rentCars(cars, fromDate, toDate, clientId):\n for car in cars:\n if car.isRented:\n if car.rentedTo >= fromDate:\n print(\"Car with license plate \" + car.licensePlate + \" is already rented\")\n return\n\n for car in cars:\n car.isRented = True\n car.rentedFrom = fromDate\n car.rentedTo = toDate\n car.rentedBy = clientId\n\n RentCarSystem.saveAllCars()\n return RentCarSystem.calculateSum(cars, fromDate, toDate)\n\n @staticmethod\n def saveAllCars():\n for car in RentCarSystem.allCars:\n if car.isRented:\n car.rentedFrom = car.rentedFrom.strftime('%Y-%m-%d %H:%M:%S.%f')\n car.rentedTo = car.rentedTo.strftime('%Y-%m-%d %H:%M:%S.%f')\n\n jsonFile = open('cars.json', \"w\")\n json.dump(RentCarSystem.allCars, jsonFile, indent=4, default=lambda x: x.__dict__)" }, { "alpha_fraction": 0.6507936716079712, "alphanum_fraction": 0.6507936716079712, "avg_line_length": 39.5, "blob_id": "256582bb533688265511b7d55f7ad8704b3aa359", "content_id": "66da10fbf9d79166c481c0008143c5fcb708679a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 124, "num_lines": 14, "path": "/RentCarSystem/Car.py", "repo_name": "toniivan99/python", "src_encoding": "UTF-8", "text": "class Car:\n def __init__(self, brand, model, consumption, licensePlate, pricePerHour, pricePerDay, pricePerWeek, isRented, rentedBy,\n rentedFrom, rentedTo):\n self.brand = brand\n self.model = model\n self.consumption = consumption\n self.licensePlate = licensePlate\n self.pricePerHour = pricePerHour\n self.pricePerDay = pricePerDay\n self.pricePerWeek = pricePerWeek\n self.isRented = isRented\n self.rentedBy = rentedBy\n self.rentedFrom = rentedFrom\n self.rentedTo = rentedTo\n" }, { "alpha_fraction": 0.5831874012947083, "alphanum_fraction": 0.5915061235427856, "avg_line_length": 35.269840240478516, "blob_id": "7895eecf47814f1e8742a999d87b147ee68db63c", "content_id": "aa58918ae95f7e687c20b3f8293bcbd689419bac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2284, "license_type": "no_license", "max_line_length": 89, "num_lines": 63, "path": "/RentCarSystem/main.py", "repo_name": "toniivan99/python", "src_encoding": "UTF-8", "text": "from random import randint\nfrom Client import Client\n\nprint(\"Please enter you name:\")\nname = input()\nclient = Client(randint(1, 10000), name)\n\nprint(\"Please choose option:\")\nprint(\"1. Get all cars\")\nprint(\"2. Get only available cars\")\nprint(\"3. Rent car(s) for hour(s)\")\nprint(\"4. Rent car(s) for day\")\nprint(\"5. Rent car(s) for week\")\nprint(\"6. Exit\")\noption = int(input())\n\ndef printCars(cars):\n for car in cars:\n print(\"Brand: \" + car.brand)\n print(\"Model: \" + car.model)\n print(\"Consumption: \" + str(car.consumption))\n print(\"License plate: \" + car.licensePlate)\n print(\"Price per hour: \" + str(car.pricePerHour))\n print(\"Price per day: \" + str(car.pricePerDay))\n print(\"Price per week: \" + str(car.pricePerWeek))\n print(\"Is car rented: \" + str(car.isRented))\n if car.isRented:\n print(\"Client rented the car: \" + str(car.rentedBy))\n print(\"Rented from date: \" + car.rentedFrom.strftime('%Y-%m-%d %H:%M:%S.%f'))\n print(\"Rented to date: \" + car.rentedTo.strftime('%Y-%m-%d %H:%M:%S.%f'))\n print(\"----------\")\n\nwhile option != 6:\n if option == 1:\n cars = client.getCarsCatalogFactory(False)\n printCars(cars)\n option = int(input())\n elif option == 2:\n cars = client.getCarsCatalogFactory(True)\n printCars(cars)\n option = int(input())\n elif option == 3:\n print(\"Please enter cars license plates separated by comma\")\n licensePlates = input()\n print(\"Please enter hours\")\n hours = int(input())\n sum = client.rentCarsForHours(licensePlates.split(','), hours)\n print(\"Total sum to pay: \" + str(sum))\n option = int(input())\n elif option == 4:\n print(\"Please enter cars license plates separated by comma\")\n licensePlates = input()\n sum = client.rentCarsForDay(licensePlates.split(','))\n print(\"Total sum to pay: \" + str(sum))\n option = int(input())\n elif option == 5:\n print(\"Please enter cars license plates separated by comma\")\n licensePlates = input()\n sum = client.rentCarsForWeek(licensePlates.split(','))\n print(\"Total sum to pay: \" + str(sum))\n option = int(input())\n elif option == 6:\n break" }, { "alpha_fraction": 0.7062228918075562, "alphanum_fraction": 0.713458776473999, "avg_line_length": 27.83333396911621, "blob_id": "b89b662086b653418868a426e9d4603185e111c1", "content_id": "021f9f92de4a1efe7e0b946994ee0d1d521692a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 691, "license_type": "no_license", "max_line_length": 124, "num_lines": 24, "path": "/task4/json_output.py", "repo_name": "toniivan99/python", "src_encoding": "UTF-8", "text": "import argparse\nimport re\nfrom pathlib import Path\nimport json\nimport csv\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"file_path\", type=Path)\np = parser.parse_args()\n\nfile = open(p.file_path)\nfileContent = file.readlines();\nfile.close()\n\nfileContentAsStr = \"\"\nfileContentAsStr = fileContentAsStr.join(fileContent)\n\nparagraphsDictionary = {}\nmatches = re.findall(\"## Begin (Paragraph \\w*)\\s*(\\w*) type=(\\w*) format=(\\w*)\\s*## End Paragraph \\w*\\s*\", fileContentAsStr)\nfor match in matches:\n paragraphsDictionary[match[0]] = {\"name\": match[1], \"type\": match[2], \"format\": match[3]}\n \njsonFile = open('json_output.json', \"w\")\njson.dump(paragraphsDictionary, jsonFile, indent=4)" } ]
5
NicolasDutronc/FlappyBirdRL
https://github.com/NicolasDutronc/FlappyBirdRL
39e40e60ab3ea9ba74427a2bf64b32a6a5df997f
1f00ab384c7930ed4e1f274e56aff49524bf12b9
f09ec80c3c39228af8c024b0371b908475092cbf
refs/heads/master
2022-07-08T13:13:36.290009
2019-11-05T07:56:57
2019-11-05T07:56:57
219,676,234
0
0
null
2019-11-05T06:45:48
2019-11-05T08:02:12
2022-06-21T23:19:44
Python
[ { "alpha_fraction": 0.747940719127655, "alphanum_fraction": 0.7858319878578186, "avg_line_length": 25.434782028198242, "blob_id": "65d17c2e078e49937621a410ff8cfc68b813a69c", "content_id": "878dcae2db560ad6180d5f5f4a15f0bfb38fd077", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "no_license", "max_line_length": 78, "num_lines": 23, "path": "/test.py", "repo_name": "NicolasDutronc/FlappyBirdRL", "src_encoding": "UTF-8", "text": "import gym\nimport gym_ple\n\nimport torch.optim as optim\nimport torch.nn as nn\n\nfrom agents.dqn import DQNAgent\nfrom models.cnn import CNNModel, DuelingCNNModel\nfrom environment import Environment\n\n\nlr = 0.00001\nmomentum = 0.95\nnum_episodes = 1000000000\nbatch_size = 32\n\nenv = Environment('FlappyBird-v0')\nmodel = DuelingCNNModel(env.action_space())\noptimizer = optim.RMSprop(params=model.parameters(), lr=lr, momentum=momentum)\nloss = nn.SmoothL1Loss()\nagent = DQNAgent(environment=env, model=model, optimizer=optimizer, loss=loss)\n\nagent.train(num_episodes=num_episodes, batch_size=batch_size, verbose=True)" }, { "alpha_fraction": 0.7958477735519409, "alphanum_fraction": 0.7993079423904419, "avg_line_length": 18.33333396911621, "blob_id": "9d4fcfae56d46608b5c6edb9d3f9463399aedc70", "content_id": "221e4e7a0dc0ab96fae0d942c188c92e1bdd4899", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "no_license", "max_line_length": 46, "num_lines": 15, "path": "/test_play.py", "repo_name": "NicolasDutronc/FlappyBirdRL", "src_encoding": "UTF-8", "text": "import gym\nimport gym_ple\n\nfrom agents.dqn import DQNAgent\nfrom models.cnn import DuelingCNNModel\nfrom environment import Environment\n\nimport torch\n\n\nenv = Environment('FlappyBird-v0')\nmodel = DuelingCNNModel(env.action_space())\nagent = DQNAgent(environment=env, model=model)\n\nagent.play()" }, { "alpha_fraction": 0.6001994013786316, "alphanum_fraction": 0.6121634840965271, "avg_line_length": 23.487804412841797, "blob_id": "8b88aecd0195416d37fb7b688ba77bc52bb802b0", "content_id": "4b52db0cd5d8d231cf610a811f2efb12a0bdaffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1003, "license_type": "no_license", "max_line_length": 55, "num_lines": 41, "path": "/environment.py", "repo_name": "NicolasDutronc/FlappyBirdRL", "src_encoding": "UTF-8", "text": "import gym\nimport gym_ple\nimport cv2\nimport numpy as np\n\n\nclass Environment:\n\n def __init__(self, game, image_shape=(84, 84)):\n self.game = gym.make(game)\n self.image_shape = image_shape\n\n def reset(self):\n return self.preprocess(self.game.reset())\n\n def preprocess(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n # print(image.shape)\n image = cv2.resize(image, self.image_shape)\n # print(image.shape)\n image = image/255\n image = image.reshape(self.image_shape)\n # print(image.shape)\n return image\n \n def get_screen(self):\n screen = self.game.render('rgb_array')\n screen = self.preprocess(screen)\n return screen\n\n def step(self, action):\n return self.game.step(action)\n\n def action_space(self):\n return self.game.action_space.n\n\n def random_action(self):\n return self.game.action_space.sample()\n\n def render(self):\n self.game.render()" }, { "alpha_fraction": 0.563882052898407, "alphanum_fraction": 0.6050368547439575, "avg_line_length": 38.240962982177734, "blob_id": "4ebcc17cc1c75d5bc1172618dff2e39ef6fc9d1d", "content_id": "e3c80203cadbb9827318d3809ebd6a7217f8261d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3256, "license_type": "no_license", "max_line_length": 92, "num_lines": 83, "path": "/models/cnn.py", "repo_name": "NicolasDutronc/FlappyBirdRL", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CNNModel(nn.Module):\n\n def __init__(self, action_space):\n super(CNNModel, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=4, out_channels=32, kernel_size=8, stride=4) # 20\n self.bn1 = nn.BatchNorm2d(num_features=32)\n self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2) # 9\n self.bn2 = nn.BatchNorm2d(num_features=64)\n self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1) # 7\n self.bn3 = nn.BatchNorm2d(num_features=64)\n self.fc1 = nn.Linear(in_features=7*7*64, out_features=512)\n self.out = nn.Linear(in_features=512, out_features=action_space)\n # self.out = nn.Linear(in_features=256, out_features=action_space)\n # self.weights_init()\n \n def forward(self, x):\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n # x = F.relu(self.fc2(x))\n return self.out(x)\n \n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n def weights_init(self):\n for p in self.parameters():\n print(p)\n nn.init.xavier_normal(p, gain=nn.init.calculate_gain('relu'))\n\n\nclass DuelingCNNModel(nn.Module):\n\n def __init__(self, action_space):\n super(DuelingCNNModel, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=4, out_channels=32, kernel_size=8, stride=4) # 20\n self.bn1 = nn.BatchNorm2d(num_features=32)\n self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2) # 9\n self.bn2 = nn.BatchNorm2d(num_features=64)\n self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1) # 7\n self.bn3 = nn.BatchNorm2d(num_features=64)\n self.fcValue = nn.Linear(in_features=7*7*64, out_features=512)\n self.fcAdvantage = nn.Linear(in_features=7*7*64, out_features=512)\n self.value = nn.Linear(in_features=512, out_features=1)\n self.advantages = nn.Linear(in_features=512, out_features=action_space)\n \n def forward(self, x):\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n x = x.view(-1, self.num_flat_features(x))\n value = F.relu(self.fcValue(x))\n value = self.value(value)\n advantages = F.relu(self.fcAdvantage(x))\n advantages = self.advantages(advantages)\n out = value + (advantages - advantages.mean())\n return out\n \n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n def weights_init(self):\n for p in self.parameters():\n print(p)\n nn.init.xavier_normal(p, gain=nn.init.calculate_gain('relu'))\n\nmodel = CNNModel(2)\nprint(model)" }, { "alpha_fraction": 0.4161073863506317, "alphanum_fraction": 0.6510066986083984, "avg_line_length": 13.899999618530273, "blob_id": "04bb03c3ce04a356c1d323b0191bf5f5e69e2f6e", "content_id": "d4d27a109013718c2733a7d5eb70a64408083716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 149, "license_type": "no_license", "max_line_length": 23, "num_lines": 10, "path": "/requirements.txt", "repo_name": "NicolasDutronc/FlappyBirdRL", "src_encoding": "UTF-8", "text": "cloudpickle==1.2.2\nfuture==0.18.2\ngym==0.15.3\ngym-ple==0.3\nnumpy==1.17.3\nopencv-python==4.1.1.26\npyglet==1.3.2\nscipy==1.3.1\nsix==1.12.0\ntorch==1.3.0\n" }, { "alpha_fraction": 0.5534883737564087, "alphanum_fraction": 0.5665116310119629, "avg_line_length": 20.959182739257812, "blob_id": "4860d07b20029f925fd64de5ab88f4bfd35695b8", "content_id": "71984e1c2c8c88c7e4978d5d87a90720c5590cfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1075, "license_type": "no_license", "max_line_length": 86, "num_lines": 49, "path": "/experience_replay.py", "repo_name": "NicolasDutronc/FlappyBirdRL", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nfrom collections import deque, namedtuple\n\n\nTransition = namedtuple('Transition', ['obs', 'action', 'reward', 'next_obs', 'done'])\n\nclass Experience_buffer:\n\n def __init__(self, buffer_size=10000):\n self.buffer = deque()\n self.buffer_size = buffer_size\n\n def __len__(self):\n return len(self.buffer)\n\n def __repr__(self):\n s = 'buffer\\n'\n s += 'items:\\n'\n for item in self.buffer:\n s += str(item)\n s += '\\n'\n s += 'size: ' + str(len(self.buffer))\n return s\n \n def is_full(self):\n return len(self.buffer) == self.buffer_size\n \n def add(self, *args):\n if self.is_full():\n del self.buffer[0]\n self.buffer.append(Transition(*args))\n\n def sample(self, size):\n return random.sample(self.buffer, size)\n\n\n'''\nbuffer = Experience_buffer(10)\nfor _ in range(15):\n exp = np.random.random((1, 5, 5))\n buffer.add(exp)\n print(buffer)\n print()\n\nbatch = buffer.sample(7)\nprint(batch)\nprint(batch.shape)\n#'''" }, { "alpha_fraction": 0.723192036151886, "alphanum_fraction": 0.7306733131408691, "avg_line_length": 14.423076629638672, "blob_id": "ea64ebc9be717f4807ff80fcfc876f6616453988", "content_id": "18ce713de9d142980d99fc34cc896aca3510284d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 401, "license_type": "no_license", "max_line_length": 67, "num_lines": 26, "path": "/README.md", "repo_name": "NicolasDutronc/FlappyBirdRL", "src_encoding": "UTF-8", "text": "# FlappyBirdRL\n\nReinforcement Learning applied to Flappy Bird\n\n## Installation\n\nMake a virtual environment with python 3.7 or higher.\n\n```bash\npython3 -m venv ./.venv\nsource activate .venv/bin/activate\n```\n\nInstall dependencies\n\n```bash\npip install -r requirements.txt\n```\n\n## Train\n\nRun `test.py`. You can of course adjust the parameters as you wish.\n\n## Watch it play\n\nRun `test_play.py` and watch.\n" }, { "alpha_fraction": 0.5402005314826965, "alphanum_fraction": 0.5491448640823364, "avg_line_length": 39.055118560791016, "blob_id": "db8ef1d374314c84339f16e2ee3117cf7174d92c", "content_id": "748111e4eda2d714edfb853e700c926d09168c2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10174, "license_type": "no_license", "max_line_length": 158, "num_lines": 254, "path": "/agents/dqn.py", "repo_name": "NicolasDutronc/FlappyBirdRL", "src_encoding": "UTF-8", "text": "import copy\nimport numpy as np\nimport math\n\nfrom collections import deque\nfrom experience_replay import Experience_buffer\n\nimport torch\nfrom torch.autograd import Variable\n\n\nclass DQNAgent:\n\n def __init__(\n self,\n environment,\n model=None,\n optimizer=None,\n loss=None, \n model_path='./model.pt', \n save_model_freq=5, \n update_target_freq=1000, \n update_model_freq=4, \n replay_size_start=5000, \n action_repeat=4,\n frame_skipping=4, \n discount_factor=0.99, \n exploration_rate_start=0.2, \n exploration_rate_end=0.01, \n exploration_decay=1e5):\n\n # objects\n self.environment = environment\n self.model = model\n self.target_model = copy.deepcopy(self.model)\n self.optimizer = optimizer\n self.loss = loss\n self.model_path = model_path\n self.state_buffer = deque(maxlen=action_repeat)\n self.replay_memory = Experience_buffer()\n\n # statistics\n self.num_updates = 0\n self.num_steps = 0\n self.last_rewards = deque(maxlen=100)\n\n # frequences\n self.save_model_freq = save_model_freq\n self.update_target_freq = update_target_freq\n self.update_model_freq = update_model_freq\n\n # other parameters\n self.replay_size_start = replay_size_start\n self.action_repeat = action_repeat\n self.frame_skipping = frame_skipping\n self.discount_factor = discount_factor\n self.current_best_reward = 0\n self.playing = False\n \n # exploration parameters\n self.exploration_rate = exploration_rate_start\n self.exploration_rate_end = exploration_rate_end\n self.exploration_rate_step = (exploration_rate_start - exploration_rate_end) / exploration_decay\n\n def select_action(self, state):\n self.num_steps += 1\n if self.playing:\n state = Variable(torch.from_numpy(state).unsqueeze(0).float(), volatile=True)\n q_values = self.model(state).data\n return np.argmax(q_values.numpy())\n else:\n if self.num_steps > self.replay_size_start and self.exploration_rate > self.exploration_rate_end:\n self.exploration_rate -= self.exploration_rate_step\n if np.random.rand() < self.exploration_rate:\n return self.environment.random_action()\n else:\n state = Variable(torch.from_numpy(state).unsqueeze(0).float(), volatile=True)\n q_values = self.model(state).data\n return np.argmax(q_values.numpy())\n \n def update(self, data):\n observations = Variable(torch.from_numpy(np.array(tuple(data[i].obs for i in range(len(data))))).float())\n actions = Variable(torch.from_numpy(np.array(tuple(data[i].action for i in range(len(data))))).long())\n rewards = Variable(torch.from_numpy(np.array(tuple(data[i].reward for i in range(len(data))))).float())\n next_obs = Variable(torch.from_numpy(np.array(tuple(data[i].next_obs for i in range(len(data))))).float())\n dones = Variable(torch.from_numpy(np.array(tuple(0. if data[i].done else 1. for i in range(len(data))))).float())\n \n next_max_q_values = self.target_model(next_obs)\n next_max_q_values = Variable(next_max_q_values.data)\n\n best_actions = self.model(next_obs)\n best_actions = Variable(best_actions.data)\n _, best_actions = best_actions.max(dim=1, keepdim=True)\n\n next_max_q_values = next_max_q_values.gather(1, best_actions)\n next_max_q_values = next_max_q_values * dones.unsqueeze(1)\n\n current_q_values = self.model(observations).gather(1, actions.unsqueeze(1)).squeeze()\n target_values = rewards + self.discount_factor * next_max_q_values.squeeze()\n target_values = Variable(target_values.data)\n\n loss = self.loss(current_q_values, target_values)\n self.optimizer.zero_grad()\n loss.backward()\n for param in self.model.parameters():\n param.grad.data.clamp(-1, 1)\n self.optimizer.step()\n self.num_updates += 1\n \n if self.num_updates % self.update_target_freq == 0:\n self.update_target()\n\n return loss.data[0]\n\n def save_model(self):\n print('INFO AGENT: SAVING MODEL...')\n torch.save(self.model.state_dict(), self.model_path)\n \n def load_model(self):\n self.model.load_state_dict(torch.load(self.model_path))\n\n def update_target(self):\n print('INFO TARGET: target updating... -----------------------------------------------------------------------------------')\n self.target_model.load_state_dict(self.model.state_dict())\n \n def get_recent_states(self):\n return np.array(self.state_buffer)\n\n def play(self, verbose=True):\n self.playing = True\n i = 0\n while True:\n self.load_model()\n if verbose:\n print('Episode #', i)\n i += 1\n \n done = False\n episode_reward = 0\n num_episode_steps = 0\n self.environment.reset()\n\n # get first observation\n current_obs = self.environment.get_screen()\n self.state_buffer = deque(maxlen=self.action_repeat)\n for _ in range(self.action_repeat):\n self.state_buffer.append(current_obs)\n \n while not done:\n current_obs = self.get_recent_states()\n action = self.select_action(current_obs)\n num_episode_steps += 1\n\n _, reward, done, _ = self.environment.step(action)\n self.state_buffer.append(self.environment.get_screen())\n if reward == 0:\n reward = 1\n elif reward == 1:\n reward = 5\n \n self.environment.render()\n \n # update satistics\n episode_reward += reward\n if episode_reward > self.current_best_reward:\n self.current_best_reward = episode_reward\n \n self.last_rewards.append(episode_reward)\n if verbose:\n print('Reward:', episode_reward)\n print('Current best reward:', self.current_best_reward)\n print('Mean reward over the last 100 episodes:', np.mean(self.last_rewards))\n print('Max reward over the last 100 episodes:', np.max(self.last_rewards))\n print('Min reward over the last 100 episodes:', np.min(self.last_rewards))\n print()\n \n def train(self, num_episodes=10000, batch_size=32, verbose=True):\n for i in range(num_episodes):\n if verbose:\n print('Episode #', i)\n done = False\n episode_reward = 0\n num_episode_steps = 0\n current_loss = 0\n self.environment.reset()\n\n # get first observation\n current_obs = self.environment.get_screen()\n self.state_buffer = deque(maxlen=self.action_repeat)\n for _ in range(self.action_repeat):\n self.state_buffer.append(current_obs)\n \n while not done:\n current_obs = self.get_recent_states()\n\n if self.num_steps > self.replay_size_start:\n if self.num_steps % self.frame_skipping == 0:\n action = 1\n self.num_steps += 1\n else:\n action = self.select_action(current_obs)\n # action = self.select_action(current_obs)\n else:\n action = self.environment.random_action()\n self.num_steps += 1\n \n num_episode_steps += 1\n\n # skip some frames\n #for _ in range(self.frame_skipping):\n # _, reward, done, _ = self.environment.step(action)\n # self.state_buffer.append(self.environment.get_screen())\n # if done:\n # break\n\n _, reward, done, _ = self.environment.step(action)\n self.state_buffer.append(self.environment.get_screen())\n if reward == 0:\n reward = 1\n elif reward == 1:\n reward = 5\n\n next_obs = self.get_recent_states()\n self.replay_memory.add(current_obs, action, reward, next_obs, done)\n\n # update satistics\n episode_reward += reward\n \n # if the buffer is filled enough, periodically update the model\n if len(self.replay_memory) > batch_size and self.num_steps % self.update_model_freq == 0 and len(self.replay_memory) > self.replay_size_start:\n if verbose:\n print('INFO: agent updating...')\n batch = self.replay_memory.sample(batch_size)\n current_loss = self.update(batch)\n \n self.last_rewards.append(episode_reward)\n\n if i % self.save_model_freq == 0 and self.num_steps > self.replay_size_start:\n self.save_model()\n \n if episode_reward > self.current_best_reward:\n self.current_best_reward = episode_reward\n \n if verbose:\n print('Reward:', episode_reward)\n print('Mean reward over the last 100 episodes:', np.mean(self.last_rewards))\n print('Max reward over the last 100 episodes:', np.max(self.last_rewards))\n print('Min reward over the last 100 episodes:', np.min(self.last_rewards))\n print('Current loss:', current_loss)\n print('Current exploration rate:', self.exploration_rate)\n print('Number of steps:', self.num_steps)\n print('Number of updates:', self.num_updates)\n print('Current best reward:', self.current_best_reward)\n print()\n" } ]
8
zhangjinhui152/Rust_c-java
https://github.com/zhangjinhui152/Rust_c-java
b1a4bcd49a8e6cc10617c75dd70a9bb86efaf7a8
3e9f86e2ff2b4b0be023394aeae24417a12384f5
244735457a12fd9549ce8c2adda253872cc0d88d
refs/heads/main
2023-04-19T12:37:08.338915
2021-05-12T10:37:35
2021-05-12T10:37:35
331,309,701
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.5308641791343689, "alphanum_fraction": 0.5617284178733826, "avg_line_length": 18.200000762939453, "blob_id": "9ee1e8af173f05bfc62230351359764dbe10ffce", "content_id": "d171559d1f50a8f9c9a7bb7dc7ef54fa1563be45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 810, "license_type": "no_license", "max_line_length": 63, "num_lines": 40, "path": "/src/cn/code/base2/ArrayList06.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package cn.code.base2;\r\nimport java.util.ArrayList;\r\nimport java.util.Random;\r\npublic class ArrayList06 {\r\n\tpublic static void main(String[] args) {\r\n\t\t\r\n\t\tArrayList<Integer> unityBig= new ArrayList<Integer>();\r\n\t\tRandom r1 = new Random();\r\n\t\t\r\n\t\tint num;\r\n\t\t\r\n\t\t\r\n\t\tfor(int i = 0;i < 20;i++)\r\n\t\t{\r\n\t\t\tnum =r1.nextInt(100);\r\n\t\t\tunityBig.add(num);\r\n\t\t//\tSystem.out.print(unityBig.get(i));\r\n\t\t}\r\n\t\t\r\n\t\tArrayList<Integer> a1 = get(unityBig);\r\n\t\t\r\n\t\tfor (int j3 = 0; j3 < a1.size(); j3++) {\r\n\t\tSystem.out.print(a1.get(j3));\r\n\t\t}\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t}\r\n\t\r\n\tpublic static ArrayList<Integer> get(ArrayList<Integer> big) {\r\n\t\t ArrayList<Integer> a1 = new ArrayList<Integer>();\r\n\t\t for (int i = 0; i < big.size(); i++) {\r\n\t\t\tint num = big.get(i);\r\n\t\t\tif (num % 2 == 0) {\r\n\t\t\t\ta1.add(num);\r\n\t\t\t}\r\n\t\t}\r\n\t\treturn a1;\r\n\t}\r\n\t}\r\n\r\n" }, { "alpha_fraction": 0.5381818413734436, "alphanum_fraction": 0.5709090828895569, "avg_line_length": 18.674999237060547, "blob_id": "6d89f17101ae49cf8db48caea6fd4ef556a2c746", "content_id": "d4c4c1faec5e0f79dbfaf2ea87d82a1e6c39b468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 843, "license_type": "no_license", "max_line_length": 63, "num_lines": 40, "path": "/src/cn/code/base2/ArrayList07.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.base2;\r\nimport java.util.ArrayList;\r\nimport java.util.Random;\r\npublic class ArrayList07 {\r\n\tprivate static void main(String[] args) {\r\n\t\t// TODO 自动生成的方法存根\r\n\t\tArrayList<Integer> unityBig= new ArrayList<Integer>();\r\n\t\tRandom r1 = new Random();\r\n\t\t\r\n\t\tint num;\r\n\t\t\r\n\t\t\r\n\t\tfor(int i = 0;i < 20;i++)\r\n\t\t{\r\n\t\t\tnum =r1.nextInt(100);\r\n\t\t\tunityBig.add(num);\r\n//\t\t\tSystem.out.prissssnt(unityBig.get(i));\r\n\t\t}\r\n\t\t\r\n\t\tArrayList<Integer> a1 = get(unityBig);\r\n\t\t\r\n\t\tfor (int j3 = 0; j3 < a1.size(); j3++) {\r\n\t\tSystem.out.print(a1.get(j3));\r\n\t\t}\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t}\r\n\t\r\n\tpublic static ArrayList<Integer> get(ArrayList<Integer> big) {\r\n\t\t ArrayList<Integer> a1 = new ArrayList<Integer>();\r\n\t\t for (int i = 0; i < a1.size(); i++) {\r\n\t\t\tint num = a1.get(i);\r\n\t\t\tif (num % 2 == 0) {\r\n\t\t\t\ta1.add(num);\r\n\t\t\t}\r\n\t\t}\r\n\t\treturn a1;\r\n\t}\r\n}" }, { "alpha_fraction": 0.5169082283973694, "alphanum_fraction": 0.5169082283973694, "avg_line_length": 7.761904716491699, "blob_id": "4680e710485cb31d1c795ba6dc07c43eb3637881", "content_id": "fa91047419834c97c99081fe3f6f13bf3bebd23a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 207, "license_type": "no_license", "max_line_length": 24, "num_lines": 21, "path": "/ThreadToLambda/DemoForLsmbda/Person.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "\r\npackage DemoForLsmbda;\r\n\r\npublic class Person\r\n{\r\n\tint a;\r\n\t\r\n\tpublic Person(int a)\r\n\t{\r\n\t\tthis.a = a;\r\n\t}\r\n\r\n\tpublic int getA()\r\n\t{\r\n\t\treturn a;\r\n\t}\r\n\r\n\tpublic void setA(int a)\r\n\t{\r\n\t\tthis.a = a;\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5573770403862, "alphanum_fraction": 0.6065573692321777, "avg_line_length": 13.25, "blob_id": "4572ae5e273dfd4ac01601f47ec7cdbfa234646d", "content_id": "b95aa3708e5e617d80ec33bca6c15de9365ff2a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 134, "license_type": "no_license", "max_line_length": 40, "num_lines": 8, "path": "/src/cn/code/base2/String01.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "WINDOWS-1252", "text": "package cn.code.base2;\r\n\r\npublic class String01 {\r\npublic static void main(String[] args) {\r\n\t//´´½¨×Ö·û´®µÄ3+1\r\n\t\r\n}\r\n}\r\n" }, { "alpha_fraction": 0.4636363685131073, "alphanum_fraction": 0.49696969985961914, "avg_line_length": 13.714285850524902, "blob_id": "0909b81c2ea6056ad7e071bca74d6ffa4c118fed", "content_id": "91abd5f30c1e92147e13a3ebfa3bb9776d25f7e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 330, "license_type": "no_license", "max_line_length": 41, "num_lines": 21, "path": "/src/cn/code/base2/three.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package cn.code.base2;\r\n\r\npublic class three\r\n{\r\n\tpublic static void main(String[] args)\r\n\t{\r\n\t\tint a = 10;\r\n\t\tint b = 28;\r\n\t\tint c = 30;\r\n\t\tint e = 40;\r\n\t\tint d = 0;\r\n\t\tint f = 0;\r\n\t\tint max;\r\n\r\n\t\tmax = a < b ? b : a;\r\n\t\t\r\n\t\tSystem.out.println(max);\r\n\t\tmax = d<f ?(f = b<c?b:c):(d = a<e?e:a);\r\n\t\tSystem.out.println(max);\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5918803215026855, "alphanum_fraction": 0.632478654384613, "avg_line_length": 18.34782600402832, "blob_id": "fd4be31a84d0817db2405043999298e10166d55b", "content_id": "1ec9471e97a0814a8195c67b828820b7acf85b58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 592, "license_type": "no_license", "max_line_length": 54, "num_lines": 23, "path": "/src/cn/code/ArraysAnfMath/ArrayS01.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.ArraysAnfMath;\r\nimport java.util.Arrays;\r\n\r\npublic class ArrayS01 {\r\n//与数组有关的数据 提供大量的静态方法 实现常用操做\r\n\tpublic static void main(String[] args) {\r\n\t\tint[] i1 = {3,21,10,23,34};\r\n\t\t\r\n\t\tArrays.sort(i1);\r\n\t\t//默认排序数字从小到大\r\n\t\t\r\n\t\tString s1 = Arrays.toString(i1);\r\n\t\tSystem.out.println(s1);//数组转换为字符串\r\n\t\t\r\n\t\tString[] a2 = {\"ddd\",\"bbb\",\"ccc\"};\r\n\t\tArrays.sort(a2);\r\n\t\tSystem.out.println(Arrays.toString(a2));//默认排序按照字母升序\r\n\t\t//自定义类型要co'mparable接口的支持\r\n\t\t\r\n\r\n\t\t\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.640625, "alphanum_fraction": 0.640625, "avg_line_length": 12.769230842590332, "blob_id": "9f19969fa86f60a6a3354e794b508e99eb1bcc15", "content_id": "59894b3197923d45e383c778c0d7051aa397fe17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 226, "license_type": "no_license", "max_line_length": 30, "num_lines": 13, "path": "/succed/cn/code/abstractcalss/Animal.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.abstractcalss;\r\n\r\n//定义抽象方法\r\n\r\npublic abstract class Animal {\r\n\t\r\n\tpublic Animal() {\r\n\t\t// TODO 自动生成的构造函数存根\r\n\t\tSystem.out.println(\"dd\");\r\n\t}\r\n\tpublic abstract void Eat();\r\n\r\n}\r\n" }, { "alpha_fraction": 0.5875486135482788, "alphanum_fraction": 0.5875486135482788, "avg_line_length": 12.277777671813965, "blob_id": "ce9ddee84730339a5be54da11b83928b50f2c7e3", "content_id": "d8686887d3a052d6bd9b746d3766328f44257474", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 257, "license_type": "no_license", "max_line_length": 33, "num_lines": 18, "path": "/src/APisource/TiankPad.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package APisource;\r\n\r\npublic class TiankPad\r\n{\r\n\tpublic void PowerOn()\r\n\t{\r\n\t\tSystem.out.println(\"!!!Open\");\r\n\t}\r\n\tpublic void PowerOff()\r\n\t{\r\n\t\tSystem.out.println(\"!!!Close\");\r\n\t}\r\n\tpublic void UseDevice(UsB usb)\r\n\t{\r\n\t\tusb.open();\r\n\t\tusb.close();\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5126262903213501, "alphanum_fraction": 0.5252525210380554, "avg_line_length": 12.666666984558105, "blob_id": "c56d94dfbc0bfdc29edc51533bfde868fd7ebc60", "content_id": "b4a74352d6b02bd6209831b9b69051c9b143674b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 425, "license_type": "no_license", "max_line_length": 49, "num_lines": 27, "path": "/ThreadToLambda/ThreadDemo2/synychrized.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package ThreadDemo2;\r\n\r\npublic class synychrized implements Runnable\r\n{\r\n\tint ticket = 100;\r\n\r\n\tObject obj = new Object();// 🔒对象\r\n\r\n\t@Override\r\n\tpublic void run()\r\n\t{\r\n\t\twhile (true) {\r\n\r\n\t\t\tsynchronized (obj) {\r\n\t\t\t\t// TODO 自动生成的方法存根\r\n\t\t\t\tif (this.ticket > 0) {\r\n\t\t\t\t\tSystem.out.println(\"买票---->\" + this.ticket);\r\n\t\t\t\t\tthis.ticket--;\r\n\t\t\t\t} \r\n\t\t\t\telse {\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t}\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5800711512565613, "alphanum_fraction": 0.5800711512565613, "avg_line_length": 10.772727012634277, "blob_id": "a6fb3d54ade5477545befce510e5264cfe323449", "content_id": "69d5bb1ed358c79a85112be384050c537ff95903", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 317, "license_type": "no_license", "max_line_length": 40, "num_lines": 22, "path": "/src/APisource/Mouse.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package APisource;\r\n\r\npublic class Mouse implements UsB\r\n{\r\n\r\n\t@Override\r\n\tpublic void open()\r\n\t{\r\n\t\t// TODO 自动生成的方法存根\r\n\t\tSystem.out.println(\"Mouse!!!OPen!!!\");\r\n\t\t\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void close()\r\n\t{\r\n\t\t// TODO 自动生成的方法存根\r\n\t\tSystem.out.println(\"!!!CloseM\");\r\n\t\t\r\n\t}\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.5880503058433533, "alphanum_fraction": 0.6194968819618225, "avg_line_length": 15.666666984558105, "blob_id": "b247a8ad5d6e68df35d1108a85b5e6c0373a4410", "content_id": "a7dac275e7ebb497910707bf0d411bc0d43b3274", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 318, "license_type": "no_license", "max_line_length": 45, "num_lines": 18, "path": "/src/cn/code/ArraysAnfMath/demo02.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package cn.code.ArraysAnfMath;\r\nimport java.util.Arrays;\r\n\r\n\r\npublic class demo02 {\r\npublic static void main(String[] args) {\r\n\t\r\n\tString s1 = \"hiuevyhutywuiytaoltyobVLEthil\";\r\n\tchar[] c1 = s1.toCharArray();\r\n\t\r\n\tArrays.sort(c1);\r\n\t\r\n\tfor (int i = c1.length-1; i >=0; i--) {\r\n\t\tSystem.out.print(c1[i]);\r\n\t\t\r\n\t}\r\n}\r\n}\r\n" }, { "alpha_fraction": 0.5991561412811279, "alphanum_fraction": 0.6160337328910828, "avg_line_length": 16.230770111083984, "blob_id": "04955f66d521dca535dce90fce6baf9eb184323b", "content_id": "8813ea03b678261b52bf9ac6f5742f153a048872", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 237, "license_type": "no_license", "max_line_length": 44, "num_lines": 13, "path": "/ThreadToLambda/DemoForLsmbda/MainCalc.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package DemoForLsmbda;\r\n\r\npublic class MainCalc\r\n{\r\n\tpublic static void main(String[] args)\r\n\t{\r\n\t\tshow(10, 20, (a,b)->{return a+b;});\r\n\t}\r\n\tpublic static void show(int a,int b,Calc c)\r\n\t{\r\n\t\tSystem.out.println(c.CalcAdd(a, b));\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.573737382888794, "alphanum_fraction": 0.5757575631141663, "avg_line_length": 13.34375, "blob_id": "1c6141ea109f1d1a6835fee36139ce5abd56208c", "content_id": "d9375799668cc3143b3297aea77c8cee148b4562", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 551, "license_type": "no_license", "max_line_length": 36, "num_lines": 32, "path": "/src/cn/code/base2/OOP.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.base2;\r\n\r\n\r\npublic class OOP {\r\n\tprivate String m_name;\r\n\tprivate int age;\r\n\t\r\n\t//构造方法\r\n\t//public className ()\r\n\t//{\r\n\t// }\r\n\tpublic OOP() {\r\n\t\t// TODO 自动生成的构造函数存根\r\n\t}\r\n\tpublic OOP(String name,int age) {\r\n\t\t// TODO 自动生成的构造函数存根\r\n\t\tthis.age = age;\r\n\t\tthis.m_name = name;\r\n\t}\r\n\t\t\r\n\tpublic void eat() {\r\n\t\tSystem.out.println(\"huanger\");\r\n\t}\r\n\t\r\n\tpublic void Show() {\r\n\t\tSystem.out.println(age+m_name);\r\n\t}\r\n\t\r\n\tpublic void SetName(String sname) {\r\n\t\tthis.m_name = sname;\r\n\t}\r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.5956284403800964, "alphanum_fraction": 0.6038251519203186, "avg_line_length": 13.913043022155762, "blob_id": "1c5b98252b67c068c3d489529a4c7c022e547d1e", "content_id": "0d2b29b88bfefcc418be7f004dbdf36d4aa2bd65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 402, "license_type": "no_license", "max_line_length": 45, "num_lines": 23, "path": "/src/APisource/defaultImpl.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package APisource;\r\n\r\npublic class defaultImpl implements Default {\r\n\r\n\t@Override\r\n\tpublic void fun1() {\r\n\t\t// TODO 自动生成的方法存根\r\n\t\tSystem.out.println(\"print->\");\r\n\t\tme();\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void fun2() {\r\n\t\t// TODO 自动生成的方法存根\r\n\t\tDefault.super.fun2();\r\n\t\tSystem.out.println(\"jianrern\");\r\n\t\tme();\r\n\t}\r\n\tprivate void me() {\r\n\t\tSystem.out.println(\"me\");\r\n\t}\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.5609756112098694, "alphanum_fraction": 0.5894308686256409, "avg_line_length": 14.399999618530273, "blob_id": "27aebd41092d48863baa4cd9916a22ba8dc65c0b", "content_id": "3d3732b410b173df06abd8edc775fac298dc7fdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 246, "license_type": "no_license", "max_line_length": 41, "num_lines": 15, "path": "/src/cn/code/base2/funs.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package cn.code.base2;\r\n\r\npublic class funs {\r\n\tpublic static void main(String[] args) {\r\n\t\tint a;\r\n\t\ta= funs02(1, 2);\r\n\t\tSystem.out.println(a);\r\n\t\t\r\n\t}\r\n\tpublic static int funs02(int a,int b) {\r\n\t\tint result = a+b;\r\n\t\treturn result;\r\n\t\t\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5453763604164124, "alphanum_fraction": 0.5587096810340881, "avg_line_length": 30.15277862548828, "blob_id": "ee47b71d405398733d6a3e9b88c2170c709c5e7f", "content_id": "5b053d65fc2013d35ae7783b99ae3d171b358438", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2395, "license_type": "no_license", "max_line_length": 145, "num_lines": 72, "path": "/src/DownloadUrlGet.py", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "import urllib\r\nimport requests\r\nfrom qcloud_cos import CosConfig\r\nfrom qcloud_cos import CosS3Client\r\nimport sys\r\nimport logging\r\nimport json\r\nimport time\r\nimport re\r\nfrom tencentcloud.common import credential\r\nfrom tencentcloud.common.profile.client_profile import ClientProfile\r\nfrom tencentcloud.common.profile.http_profile import HttpProfile\r\nfrom tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\r\nfrom tencentcloud.asr.v20190614 import asr_client, models\r\n\r\nclass UrlGet():\r\n \"\"\"\r\n 设置并获取url 注意获取的是url列表\r\n \"\"\"\r\n def __init__(self,secret_id,secret_key,region):\r\n \"\"\"\r\n 设置并获取url 注意获取的是url列表\r\n \"\"\"\r\n self.__secret_id =secret_id\r\n self.__secret_key = secret_key\r\n self.__region = region\r\n self.__scheme = 'https' \r\n self.__token = None\r\n logging.basicConfig(level=logging.INFO, stream=sys.stdout)\r\n config = CosConfig(Region=self.__region, SecretId=self.__secret_id,SecretKey=self.__secret_key, Token=self.__token, Scheme=self.__scheme)\r\n # 2. 获取客户端对象\r\n self.__client = CosS3Client(config)\r\n self.__Bucket_appid = \"1earning-1305041181\"\r\n self.__KeyList = list()\r\n self.__UrlList = list()\r\n \r\n def GetUrlDownList(self):\r\n \"\"\"\r\n 获取url-list\r\n \"\"\"\r\n self.__getObjectList()\r\n self.__getAnyUrl()\r\n return self.__UrlList\r\n def getKeyList(self):\r\n return self.__KeyList\r\n\r\n def __getObjectList(self):\r\n response = self.__client.list_objects(\r\n Bucket= self.__Bucket_appid\r\n )\r\n result = response['Contents']\r\n\r\n for i in result:\r\n self.__KeyList.append(i['Key'])\r\n \r\n def __getAnyUrl(self):\r\n count = 0\r\n for i in self.__KeyList:\r\n response = self.__client.get_presigned_download_url(\r\n Bucket=self.__Bucket_appid,\r\n Key=self.__KeyList[count],\r\n Expired=1000,\r\n Headers={\r\n 'Content-Length': '\tvideo/mp4',\r\n },\r\n Params={\r\n 'param1': 'string',\r\n 'param2': 'string'\r\n }\r\n )\r\n self.__UrlList.append(response)\r\n count+=1\r\n \r\n" }, { "alpha_fraction": 0.5175201892852783, "alphanum_fraction": 0.5444744229316711, "avg_line_length": 11.25, "blob_id": "156ff825d9803526603fe2255231aca3089752ee", "content_id": "1608580de7fe772102e23186490708cbee1fb733", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 371, "license_type": "no_license", "max_line_length": 39, "num_lines": 28, "path": "/ThreadToLambda/DemoForLsmbda/returnFormst.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package DemoForLsmbda;\r\n\r\nimport java.util.Arrays;\r\n\r\npublic class returnFormst\r\n{\r\n\tpublic static void main(String[] args)\r\n\t{\r\n\r\n\t}\r\n\r\n\tpublic static void show()\r\n\t{\r\n\t\tPerson[] arr = {\r\n\t\t\tnew Person(10),\t\r\n\t\t\tnew Person(10),\t\r\n\t\t\tnew Person(10),\t\r\n\t\t};\r\n\t\t\r\n\t\t\r\n\t\tArrays.sort(arr,(Person o1,Person o2)\r\n\t\t\t\t->{return o1.getA()-o2.getA();\r\n\t\t\t\t});\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.6414141654968262, "alphanum_fraction": 0.6616161465644836, "avg_line_length": 13.230769157409668, "blob_id": "6cde5712409932ce90d66ad4f7c63cb9c9fe971c", "content_id": "3e0af8444bf6a7e3e5eac21a03a6f403e37537a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 216, "license_type": "no_license", "max_line_length": 57, "num_lines": 13, "path": "/src/APisource/SonApi.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package APisource;\r\n\r\npublic interface SonApi extends inferfaceF ,inferfaceF2 {\r\n\tpublic void funs5();\r\n\r\n\r\n\r\n\t@Override\r\n\tdefault void s2() {\r\n\t\t// TODO 自动生成的方法存根\r\n\t\tinferfaceF.super.s2();\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.6158940196037292, "alphanum_fraction": 0.6158940196037292, "avg_line_length": 8.785714149475098, "blob_id": "0fa47a6486cb0f45ec4dd1640f46c591f6e43cf3", "content_id": "c5081c84dcc4289d3ca5137ec7d721c9e31eec10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 151, "license_type": "no_license", "max_line_length": 38, "num_lines": 14, "path": "/ThreadToLambda/TheaadPond/Runer.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package TheaadPond;\r\n\r\n\r\npublic class Runer implements Runnable\r\n{\r\n\r\n\t@Override\r\n\tpublic void run()\r\n\t{\r\n\t\tSystem.out.println(\"nihao\");\r\n\t\t\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.5994962453842163, "alphanum_fraction": 0.6020151376724243, "avg_line_length": 14.541666984558105, "blob_id": "02310f2899172eda35d59cf2c3ae3f25708cdb7c", "content_id": "379f65b8c1b88f5cb1d50b87075e2cd100270b31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 397, "license_type": "no_license", "max_line_length": 55, "num_lines": 24, "path": "/ThreadToLambda/ThreadDemo1/DEmoGetnmae.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package ThreadDemo1;\r\n\r\npublic class DEmoGetnmae extends Thread\r\n{\r\n\tpublic DEmoGetnmae()\r\n\t{\r\n\t\tsuper();\r\n\t}\r\n\t@Override\r\n\tpublic void run()\r\n\t{\r\n//\t\tString s = this.getName();\r\n//\t\tSystem.out.println(s);\r\n//\t\t\r\n\t\t\r\n//\t\tThread t = Thread.currentThread();\r\n//\t\tString tName = t.getName();\r\n//\t\tSystem.out.println(tName);\r\n\t\t\r\n\t\tSystem.out.println(Thread.currentThread().getName());\r\n\t\t\r\n\t}\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.6811594367027283, "alphanum_fraction": 0.695652186870575, "avg_line_length": 11.800000190734863, "blob_id": "601af4878e44aff8484aecceb9fc215e5ee15d14", "content_id": "a8c8bd2faf6d10e00cc5e0ff50ee81206a7a3835", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 69, "license_type": "no_license", "max_line_length": 29, "num_lines": 5, "path": "/src/cn/code/polymorphic/Son2.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package cn.code.polymorphic;\r\n\r\npublic class Son2 extends Fa{\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.6052631735801697, "alphanum_fraction": 0.6228070259094238, "avg_line_length": 14.285714149475098, "blob_id": "a48fb1f94443b2f9ac5167953e44de4467c430c1", "content_id": "f9554f10c165e965d131736f1238391aa73ad833", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 260, "license_type": "no_license", "max_line_length": 40, "num_lines": 14, "path": "/succed/cn/code/succeed/format.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.succeed;\r\n\r\npublic class format {\r\npublic static void main(String[] args) {\r\n\tPersonSon p1 = new PersonSon();\r\n\tp1.Methon();\r\n\tSystem.out.println(p1.i);\r\n\t//重名等号左边是谁优先调用谁\r\n\t//向上找\r\n\t\r\n\t\r\n\tzi z1 = new zi();\r\n}\r\n}\r\n" }, { "alpha_fraction": 0.6035242080688477, "alphanum_fraction": 0.6299559473991394, "avg_line_length": 13.133333206176758, "blob_id": "bf557ad0fb3a4524d698a91e7eee880e9f000bef", "content_id": "c627dfb4a197af003df71427e5e226067733b216", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 265, "license_type": "no_license", "max_line_length": 38, "num_lines": 15, "path": "/src/APisource/Ddemo1.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package APisource;\r\n\r\npublic class Ddemo1\r\n{\r\npublic static void main(String[] args)\r\n{\r\n\tTiankPad T1 = new TiankPad();\r\n\tT1.PowerOn();\r\n\t\r\n\tUsB u1 = new Mouse();\r\n\t//向上转型 usb类型,确保传入的是usb\r\n\t\r\n\tT1.UseDevice(u1);// 自动类型转换\r\n}\r\n}\r\n" }, { "alpha_fraction": 0.698630154132843, "alphanum_fraction": 0.7123287916183472, "avg_line_length": 14.222222328186035, "blob_id": "d0266cd2b067540bf516f01cc79c1e6db472b22b", "content_id": "e35d646cf8f05817f2746055f8456b58621b42f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 192, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/src/APisource/Demo02.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package APisource;\r\n\r\npublic interface Demo02 {\r\npublic abstract void methon();\r\n//可以省略不写\r\n//要一个实现类 public class 实现类 implement 接口名称\r\n//必须重写\r\n\r\n}\r\n" }, { "alpha_fraction": 0.6645962595939636, "alphanum_fraction": 0.6770186424255371, "avg_line_length": 15.88888931274414, "blob_id": "bc54aa6797b260a0e4deb39820dcc55fefb50aeb", "content_id": "dfe9293247b35939c974967a569ae401be0f5e63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 179, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/src/APisource/DemoImpl.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package APisource;\r\n\r\npublic class DemoImpl implements Demo02{\r\n\t@Override\r\n\tpublic void methon() {\r\n\t\t// TODO 自动生成的方法存根\r\n\tSystem.out.println(\"HELLO\");\t\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.6276150345802307, "alphanum_fraction": 0.6276150345802307, "avg_line_length": 19.727272033691406, "blob_id": "ecc185b6fb36c53cb89b4ad6dc36b1ef83aa8025", "content_id": "e98bd7c03d19da5f54197b3cc89f0ef8e9fb2ef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 259, "license_type": "no_license", "max_line_length": 68, "num_lines": 11, "path": "/ThreadToLambda/DemoForLsmbda/Minimalist.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package DemoForLsmbda;\r\n\r\npublic class Minimalist\r\n{\r\n\tpublic static void main(String[] args)\r\n\t{\r\n\t\tnew Thread(()->{System.out.println(\"nihao\");}).start();\r\n\t\t\r\n\t\tnew Thread(()->System.out.println(\"nihao\")).start();//省略{}和;仅限一行代码\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5486542582511902, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 14.655172348022461, "blob_id": "be9532ec6aa11d09912e8e4d943b5dfdd7f588e8", "content_id": "88b527ecb13d37146e8d28ce13d5c2c47fc1074b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 645, "license_type": "no_license", "max_line_length": 41, "num_lines": 29, "path": "/src/cn/code/polymorphic/Multi01.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.polymorphic;\r\n\r\n\r\n\r\npublic class Multi01 {\r\n// 父类引用 指向子类对象\r\n\r\n\t// 父类名称 对象 = new 子类名称()\r\n\tpublic static void main(String[] args) {\r\n\t\tFa obj = new Son();\r\n\t\tobj.funs();\r\n\t\tSystem.out.println(obj.num);\r\n\t\tobj.Show();//方法属于谁就用谁\r\n\t\t//以上全是向上转型 也就是多态 一定是安全的 同时不能调用子类的内容\r\n\t\t\r\n\t\t//下面是向下转型\r\n\t\t//子类名称 obj = (子类名称)父类对象\r\n\t\t((Son) obj).fuunsSp();\r\n\t\tSon obj1 = (Son)obj;\r\n\t\tobj1.fuunsSp();\r\n\t\t\r\n\t\tif (obj instanceof Son2) {\r\n\t\t\tSon2 obj2 = (Son2)obj;\r\n\t\t}\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5860655903816223, "alphanum_fraction": 0.6270492076873779, "avg_line_length": 14.266666412353516, "blob_id": "6b7cb0cfeff95eb51b199b86e5dc1f885b36647e", "content_id": "23576b439d009c8ee2e2320344e18b72f0a4844e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 264, "license_type": "no_license", "max_line_length": 40, "num_lines": 15, "path": "/src/APisource/Demo01.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package APisource;\r\n\r\npublic class Demo01 {\r\npublic static void main(String[] args) {\r\n\tDemoImpl D1 = new DemoImpl();\r\n\tD1.methon();\r\n\t\r\n\tdefaultImpl d1 = new defaultImpl();\r\n\td1.fun1();\r\n\td1.fun2();\r\n\t\r\n\t//通过接口名称调用方法\r\n\tDefault.funs3();\r\n}\r\n}\r\n" }, { "alpha_fraction": 0.5623529553413391, "alphanum_fraction": 0.6047058701515198, "avg_line_length": 19.25, "blob_id": "7172822e7efede3b0aa39a9940ab43291eb0519c", "content_id": "8b9b47a169c77a71df5e5c41289ad621e292d536", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 469, "license_type": "no_license", "max_line_length": 47, "num_lines": 20, "path": "/src/cn/code/base2/ArrayClass.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.base2;\r\n//定义数储存对象\r\n\r\npublic class ArrayClass {\r\n\tpublic static void main(String[] args) {\r\n\t\t//创建数组\r\n\t\tPerson[] array = new Person[3];\r\n\t\t\r\n\t\tPerson one = new Person(\"hello1\",12);\r\n\t\tPerson two = new Person(\"hello21\",12);\r\n\t\tPerson three = new Person(\"hello321\",12);\r\n\t\t\r\n\t\tarray[0] = one;\r\n\t\tarray[0] = two;\r\n\t\tarray[0] = three;\r\n\t\t\r\n\t\tSystem.out.print(array[0].getM_nameString());\r\n\t\t//一旦创建 长度不能改变;\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5141327381134033, "alphanum_fraction": 0.5209850072860718, "avg_line_length": 29.328859329223633, "blob_id": "75f600c293535c9e0423347d1965526536382045", "content_id": "cb739ab44a9eef2f70a8f3cfd741595db5101ea3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4736, "license_type": "no_license", "max_line_length": 94, "num_lines": 149, "path": "/src/GetSrtClass.py", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "\r\nimport urllib\r\nimport json\r\nimport time\r\nimport re\r\nfrom tencentcloud.common import credential\r\nfrom tencentcloud.common.profile.client_profile import ClientProfile\r\nfrom tencentcloud.common.profile.http_profile import HttpProfile\r\nfrom tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\r\nfrom tencentcloud.asr.v20190614 import asr_client, models\r\nimport DownloadUrlGet\r\n\r\n\r\nclass GetSrt:\r\n\r\n def __init__(self,SecretId,SecretKey):\r\n \"\"\"\r\n 请先调用设置url 最后获取result\r\n \"\"\"\r\n self.__SecretId = SecretId\r\n self.__SecretKey = SecretKey\r\n self.__url = \"\"\r\n self.__rowData = None\r\n self.__ResultData = None\r\n\r\n def simpleGetResultFile(self):\r\n \"\"\"\r\n 适合单url\r\n 先设置url!!!!!!!!再运行!!!\r\n \"\"\"\r\n try:\r\n client, resp = self.__getClientAndresp()\r\n self.__rowData = self.__getREsult(resp, client)\r\n self.__handlerData()\r\n self.__writeText()\r\n except TencentCloudSDKException as err:\r\n print(err)\r\n\r\n def GetKeyResultFile(self,urlList,keyList):\r\n \"\"\"\r\n 适合多url\r\n \"\"\"\r\n for url,key in urlList,keyList:\r\n self.setUrl(url=url)\r\n client, resp = self.__getClientAndresp()\r\n self.__rowData = self.__getREsult(resp, client)\r\n self.__handlerData()\r\n self.__writeTextAndKeyName(KeyName=key)\r\n print(url+key)\r\n pass\r\n\r\n def setUrl(self,url):\r\n self.__url = url\r\n\r\n\r\n def __getClientAndresp(self):\r\n cred = credential.Credential(self.__SecretId,self.__SecretKey)\r\n httpProfile = HttpProfile()\r\n httpProfile.endpoint = \"asr.tencentcloudapi.com\"\r\n clientProfile = ClientProfile()\r\n clientProfile.httpProfile = httpProfile\r\n client = asr_client.AsrClient(cred, \"\", clientProfile)\r\n req = models.CreateRecTaskRequest()\r\n params = {\r\n \"EngineModelType\": \"16k_zh_video\",\r\n \"ChannelNum\": 1,\r\n \"ResTextFormat\": 0,\r\n \"SourceType\": 0,\r\n \"Url\": self.__url\r\n }\r\n req.from_json_string(json.dumps(params))\r\n resp = client.CreateRecTask(req)\r\n print(resp.to_json_string())\r\n return client,resp\r\n\r\n def __getREsult(self,resp, client):\r\n req1 = models.DescribeTaskStatusRequest()\r\n re = json.loads(json.dumps(resp.to_json_string()))\r\n fun = json.loads(re)\r\n print(fun)\r\n params1 = {\r\n \"TaskId\": fun['Data']['TaskId']\r\n }\r\n req1.from_json_string(json.dumps(params1))\r\n while(True):\r\n resp1 = client.DescribeTaskStatus(req1)\r\n result = json.loads(resp1.to_json_string())\r\n if(result['Data']['StatusStr'] == 'success'):\r\n break\r\n if(result['Data']['StatusStr'] == 'failed'):\r\n print(result)\r\n break\r\n time.sleep(5)\r\n print(result['Data']['StatusStr'])\r\n return result\r\n \r\n def __handlerData(self):\r\n result = self.__rowData['Data']['Result']\r\n txt = result.split('\\n')\r\n txt = str(txt)\r\n\r\n print(txt)\r\n txt = txt.replace(\"\\', \\'\", \"\\n\\n\")\r\n txt = re.sub(\"[,]\", \" --> \", txt)\r\n txt = txt.replace(\"[\", \"\")\r\n txt = txt.replace(\"]\", \"\")\r\n txt = \"\\n\\n\"+txt\r\n\r\n txt = self.__ChangeTxt(txt)\r\n\r\n self.__ResultData = txt\r\n \r\n\r\n def __ChangeTxt(self, txt):\r\n a = 0\r\n cou = txt.count(\"\\n\")/2\r\n b = str(a)\r\n while(True):\r\n b = str(a)\r\n txt = txt.replace(\"\\n\\n\", \"\\n\"+b+\"\\n\", 1)\r\n if(cou < 0):\r\n print(\"End\")\r\n txt = txt.replace(\" \", \"\\n\")\r\n break\r\n a += 1\r\n cou -= 1\r\n return txt\r\n\r\n def __writeText(self):\r\n Data = self.__ResultData\r\n Data = str(Data)\r\n a = time.time()\r\n a = str(int(a))\r\n with open(a+\"result.srt\", 'w', encoding='utf-8') as f:\r\n f.write(Data)\r\n \r\n def __writeTextAndKeyName(self,KeyName):\r\n Data = self.__ResultData\r\n Data = str(Data)\r\n with open(KeyName+\"result.srt\", 'w', encoding='utf-8') as f:\r\n f.write(Data)\r\n\r\nif __name__ == \"__main__\":\r\n SecretId = \"AKIDEGNH1WCy2sJeninrbasujGagV5RwLLMR\"\r\n SecretKey = \"jCTx8B5nQZT8sX1NXiVCm094kAJQOI3q\"\r\n\r\n g1 = DownloadUrlGet.UrlGet(secret_id=SecretId,secret_key=SecretKey,region=\"ap-nanjing\")\r\n\r\n g = GetSrt(SecretId=SecretId,SecretKey=SecretKey)\r\n g.GetKeyResultFile(urlList=g1.GetUrlDownList(),keyList=g1.getKeyList())\r\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6527777910232544, "avg_line_length": 17.636363983154297, "blob_id": "ed881f3bd2ef68ca0462fbb33930efb4606fc53c", "content_id": "07f5390eeb70dac8cf66cbe3a301891806416eea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 262, "license_type": "no_license", "max_line_length": 40, "num_lines": 11, "path": "/src/cn/code/string/String02lake.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.string;\r\n\r\npublic class String02lake {\r\npublic static void main(String[] args) {\r\n\tString s1 = \"a\";\r\n\tString s2 = \"a\";\r\n\tSystem.out.println(s1==s2);\r\n\t//直接写上“”就在字符串chang'liang池\r\n\t//对于引用类型来说是比较\r\n}\r\n}\r\n" }, { "alpha_fraction": 0.5767635107040405, "alphanum_fraction": 0.6473029255867004, "avg_line_length": 18.08333396911621, "blob_id": "50e1ab409ab5af2dc227b02664836ce458554f47", "content_id": "a341642d8b2a8b55a3763188746c423d3d6957ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 257, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/succed/cn/code/inClaas/INdemo01.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.inClaas;\r\n\r\npublic class INdemo01 {\r\npublic static void main(String[] args) {\r\n\tInclass01 i1 = new Inclass01();\r\n\ti1.M1();\r\n\tInclass01.Dokidoki i2 = new Inclass01().new Dokidoki();//直接创建内部对象\r\n\ti2.beat();\r\n\t\r\n\ti2.M2();\r\n}\r\n}\r\n" }, { "alpha_fraction": 0.5285171270370483, "alphanum_fraction": 0.5760456323623657, "avg_line_length": 15.533333778381348, "blob_id": "329a305b5e5e8fee9b96aefb03d785d479106c92", "content_id": "9d9f4eab1c6c491f63cc039cd3ad61e5c7404ebb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 554, "license_type": "no_license", "max_line_length": 47, "num_lines": 30, "path": "/succed/cn/code/succeed/reabag/demo.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.succeed.reabag;\r\n\r\nimport java.util.ArrayList;\r\n\r\npublic class demo {\r\n\tpublic static void main(String[] args) {\r\n\t\tManger M1 = new Manger(\"群主\",1000);\r\n\t\tMamber p1 = new Mamber(\"闲散人员\",0);\r\n\t\tMamber p2 = new Mamber(\"闲散人员\",0);\r\n\t\tMamber p3 = new Mamber(\"闲散人员\",0);\r\n\t\t\r\n\t\t\r\n\t\tM1.show();\r\n\t\tp1.show();\r\n\t\tp2.show();\r\n\t\t\r\n\t\t\r\n\t\tArrayList<Integer> redList = M1.send(100, 6);\r\n\t\t\r\n\t\tp1.recetive(redList);\r\n\t\tp2.recetive(redList);\r\n\t\tp3.recetive(redList);\r\n\t\t\r\n\t\tM1.show();\r\n\t\tp1.show();\r\n\t\tp2.show();\r\n\t\t\r\n\t\t\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.6181818246841431, "alphanum_fraction": 0.657575786113739, "avg_line_length": 17.41176414489746, "blob_id": "bb45448609d1eea65873cfbf0e203e615b0f9558", "content_id": "dc37b9a2e1c3d5a5f143b35f746c4b5d67646282", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 360, "license_type": "no_license", "max_line_length": 41, "num_lines": 17, "path": "/src/cn/code/ArraysAnfMath/math01.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.ArraysAnfMath;\r\n\r\n\r\n\r\npublic class math01 {\r\n//abs 绝对值\r\n\t//ceil 向上取整\r\n\t//floor 向下取整\r\n\t//long round 四舍五入\r\n\t\r\n\tpublic static void main(String[] args) {\r\n\tSystem.out.println(Math.abs(-12));\r\n\tSystem.out.println(Math.ceil(13.3));\r\n\tSystem.out.println(Math.floor(13.3));\r\n\tSystem.out.println(Math.round(12.5));\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.519187331199646, "alphanum_fraction": 0.5530474185943604, "avg_line_length": 19.095237731933594, "blob_id": "fe52557f957990edf0962b685611247323ff55c8", "content_id": "c25867bbd78223f9f6f41c52b2083bfe296985b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 443, "license_type": "no_license", "max_line_length": 52, "num_lines": 21, "path": "/src/cn/code/base2/ArrayList03.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package cn.code.base2;\r\nimport java.util.Random;\r\nimport java.util.ArrayList;\r\npublic class ArrayList03 {\r\n\t\tpublic static void main(String[] args) {\r\n\t\t\tRandom r1 = new Random();\r\n\t\t\tint ran;\r\n\t\t\tArrayList<Integer> a1 = new ArrayList<Integer>();\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tfor (int i = 0; i < 6; i++) {\r\n\t\t\t\tran = r1.nextInt(33)+1;\r\n\t\t\t\ta1.add(ran);\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\tfor(int j = 0;j <a1.size();j++)\r\n\t\t\t{\r\n\t\t\t\tSystem.out.println(a1.get(j));\r\n\t\t\t}\r\n\t\t}\r\n}\r\n" }, { "alpha_fraction": 0.6480447053909302, "alphanum_fraction": 0.6480447053909302, "avg_line_length": 11.769230842590332, "blob_id": "94ab7308c523cb87dd4d3f7e7c7fee81d7b64cf7", "content_id": "eb4e282455647a02794d8c4b6fd3abbc492085eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 197, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/succed/cn/code/inClaas/skill.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.inClaas;\r\n\r\npublic class skill implements killqueen\r\n{\r\n\r\n\t@Override\r\n\tpublic void use()\r\n\t{\r\n\t\t// TODO 自动生成的方法存根\r\n\t\tSystem.out.println(\"jianrenqusi\");\r\n\t}\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.6450304388999939, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 18.54166603088379, "blob_id": "c03ef8e7ae4058c64b56caecb1454854c643e8b2", "content_id": "64f97ec2604a0aaa4a62474318aee20fce3f93c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 493, "license_type": "no_license", "max_line_length": 51, "num_lines": 24, "path": "/src/cn/code/base2/Person.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package cn.code.base2;\r\n\r\npublic class Person {\r\n\tprivate String m_nameString;\r\n\tprivate int m_age;\r\n\tpublic String getM_nameString() {\r\n\t\treturn m_nameString;\r\n\t}\r\n\tpublic void setM_nameString(String m_nameString) {\r\n\t\tthis.m_nameString = m_nameString;\r\n\t}\r\n\tpublic int getM_age() {\r\n\t\treturn m_age;\r\n\t}\r\n\tpublic void setM_age(int m_age) {\r\n\t\tthis.m_age = m_age;\r\n\t}\r\n\tpublic Person(String m_nameString, int m_age) {\r\n\t\t\r\n\t\tthis.m_nameString = m_nameString;\r\n\t\tthis.m_age = m_age;\r\n\t}\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5675675868988037, "avg_line_length": 9.482758522033691, "blob_id": "4ab83b80e823b626dc5d5007883aa0883d4de8d4", "content_id": "54c92383d0a2d771c7cb56a90dcb0efbc5bbfa06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 405, "license_type": "no_license", "max_line_length": 40, "num_lines": 29, "path": "/src/APisource/SonIpml.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package APisource;\r\n\r\npublic class SonIpml implements SonApi {\r\n\r\n\t@Override\r\n\tpublic void fun2() {\r\n\t\t// TODO 自动生成的方法存根\r\n\t\t\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void fun1() {\r\n\t\t// TODO 自动生成的方法存根\r\n\t\t\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void funs5() {\r\n\t\t// TODO 自动生成的方法存根\r\n\t\t\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void s1() {\r\n\t\t// TODO 自动生成的方法存根\r\n\t\t\r\n\t}\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.5589041113853455, "alphanum_fraction": 0.5780822038650513, "avg_line_length": 13.8695650100708, "blob_id": "6e2043226a4f87a1a94f2b62d05119fc408ae07f", "content_id": "3a478c44ceafbf0ce333717048aefde93e7e73c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 463, "license_type": "no_license", "max_line_length": 39, "num_lines": 23, "path": "/succed/cn/code/inClaas/Mydemo.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.inClaas;\r\n\r\n//只要父类或接口实现类子需要使用一次,可以省掉定义,使用匿名内部定义\r\npublic class Mydemo\r\n{\r\n\tpublic static void main(String[] args)\r\n\t{\r\n\t\tMyInfet M1 = new MyinfetImp(); // 多态\r\n\t\tM1.Me1();\r\n\r\n\t\tMyInfet M2 = new MyInfet()\r\n\t\t{\r\n\t\t\t@Override\r\n\t\t\tpublic void Me1()\r\n\t\t\t{\r\n\t\t\t\t// TODO 自动生成的方法存根\r\n\t\t\t\tSystem.out.println(\"hello\");\r\n\t\t\t}// 匿名类部类\r\n\t\t};\r\n\r\n\t\tM2.Me1();\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5968064069747925, "alphanum_fraction": 0.6067864298820496, "avg_line_length": 15.892857551574707, "blob_id": "f635bd6299ba5afa8fa2a8d27b49fc2a4b6ffa6e", "content_id": "9bf5fb82bd33c7968c18da8eb2e0a4c9edd4a13a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 523, "license_type": "no_license", "max_line_length": 81, "num_lines": 28, "path": "/ThreadToLambda/ThreadDemo2/DemoLock.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package ThreadDemo2;\r\n\r\nimport java.util.concurrent.locks.Lock;\r\nimport java.util.concurrent.locks.ReentrantLock;\r\n\r\npublic class DemoLock implements Runnable\r\n{\r\n\tint ticket = 100;\r\n\tLock l = new ReentrantLock();\r\n\r\n\t@Override\r\n\tpublic void run()\r\n\t{\r\n\r\n\t\twhile (true) {\r\n\t\t\tl.lock();\r\n\t\t\t// TODO 自动生成的方法存根\r\n\t\t\tif (this.ticket > 0) {\r\n\t\t\t\tSystem.out.println(Thread.currentThread().getName()+\"买票---->\" + this.ticket);\r\n\t\t\t\tthis.ticket--;\r\n\t\t\t\tl.unlock();\r\n\t\t\t} else {\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5301204919815063, "alphanum_fraction": 0.5301204919815063, "avg_line_length": 13.090909004211426, "blob_id": "202b79a2611ac67ac2bc4e2d832d5ade3541c1d4", "content_id": "f7b7b22cc2936ba3e4c7d595051e9555e399f612", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 528, "license_type": "no_license", "max_line_length": 39, "num_lines": 33, "path": "/ThreadToLambda/waitAndNOgir/eat.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package waitAndNOgir;\r\n\r\npublic class eat extends Thread\r\n{\r\n\tprivate BAO baozi;\r\n\r\n\tpublic eat(BAO baozi)\r\n\t{\r\n\r\n\t\tthis.baozi = baozi;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void run()\r\n\t{\r\n\t\twhile (true) {\r\n\t\t\tsynchronized (baozi) {\r\n\t\t\t\tif (baozi.flag == false) {\r\n\t\t\t\t\ttry {\r\n\t\t\t\t\t\tbaozi.wait();\r\n\t\t\t\t\t} catch (InterruptedException e) {\r\n\t\t\t\t\t\te.printStackTrace();\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t\t//被唤醒后的代码\r\n\t\t\t\tbaozi.flag = false;\r\n\t\t\t\tbaozi.notify();\r\n\t\t\t\tSystem.out.println(\"tmd就这????再来\");\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5461689829826355, "alphanum_fraction": 0.5913556218147278, "avg_line_length": 17.576923370361328, "blob_id": "394639bb4a6b5b61bca69e01e05ea9c8d39872f9", "content_id": "090fe4e1769ee348e90458c927a40568c3b0b524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 521, "license_type": "no_license", "max_line_length": 40, "num_lines": 26, "path": "/src/cn/code/string/String01.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "WINDOWS-1252", "text": "package cn.code.string;\r\n\r\npublic class String01 {\r\npublic static void main(String[] args) {\r\n\t//System.out.println(\"hello\");\r\n\t//´´½¨×Ö·û´®µÄ3+1\r\n\t//one\r\n\t//public String();\r\n\t//string(char[] array)\r\n\t//string(byte[] array)\r\n\tString s1 = new String();\r\n\tSystem.out.println(s1);\r\n\t\r\n\tchar[] c1 = {'a','b'};\r\n\tString s2 = new String(c1);\r\n\tSystem.out.println(s2);\r\n\t\r\n\tbyte[] b1 = {97,98,99};\r\n\tString s3 = new String(b1);\r\n\tSystem.out.println(s3);\r\n\t\r\n\tString s4 = \"hello\";\r\n\tSystem.out.println(s4);\r\n\r\n}\r\n}\r\n" }, { "alpha_fraction": 0.5988538861274719, "alphanum_fraction": 0.6389684677124023, "avg_line_length": 17.38888931274414, "blob_id": "30cbf040c8709539d809b03d7640963e643498c9", "content_id": "486027d73f201eee5923deb3ce02dda2b46fa171", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 403, "license_type": "no_license", "max_line_length": 45, "num_lines": 18, "path": "/src/cn/code/aboutstatic/Static04.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.aboutstatic;\r\n\r\npublic class Static04 {\r\n\t\tpublic static void main(String[] args)\r\n\t\t{\r\n\t\t\tstatic03menonh s1 = new static03menonh();\r\n\t\t\ts1.m1();\r\n\t\t\tstatic03menonh.M2();//静态方法直接类名调用\r\n\t\t\t\r\n\t\t\t//静态代码块\r\n\t\t\t//一次性对静态变量赋值 优先\r\n\t\t\tStaticcodeblock s3= new Staticcodeblock();\r\n\t\t\tStaticcodeblock s4= new Staticcodeblock();\r\n\t\t\t\r\n\t\t\t\r\n\r\n}\r\n}\r\n" }, { "alpha_fraction": 0.6715927720069885, "alphanum_fraction": 0.6715927720069885, "avg_line_length": 19.75, "blob_id": "739cddf6103a973ed8d5910beb6d34a6964fc7ec", "content_id": "0d73fee7832d5f6c599c524b224bcf2f432923a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 671, "license_type": "no_license", "max_line_length": 61, "num_lines": 28, "path": "/succed/cn/code/succeed/reabag/Mamber.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.succeed.reabag;\r\n\r\nimport java.util.ArrayList;\r\nimport java.util.Random;\r\n\r\npublic class Mamber extends User{\r\n\tpublic Mamber() {\r\n\t\t// TODO 自动生成的构造函数存根\r\n\t}\r\n\r\n\tpublic Mamber(String name, int money) {\r\n\t\tsuper(name, money);\r\n\t\t// TODO 自动生成的构造函数存根\r\n\t}@Override\r\n\tprotected Object clone() throws CloneNotSupportedException {\r\n\t\t// TODO 自动生成的方法存根\r\n\t\treturn super.clone();\r\n\t}\r\n\tpublic void recetive(ArrayList<Integer> list) {\r\n\t\t\r\n\t\tint index = new Random().nextInt(list.size());\r\n\t\tint delta = list.remove(index);\r\n\t\tint money = super.getMoney();\r\n\t\t\r\n\t\tsuper.setMoney(money+delta);\r\n\t}\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.6014492511749268, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 11.800000190734863, "blob_id": "384d129c0402b6b2e79026a0bcab436a3a6c75ee", "content_id": "fa9ba934cd40279119256122a30f5a0445b741fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 138, "license_type": "no_license", "max_line_length": 30, "num_lines": 10, "path": "/src/APisource/inferfaceF2.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "package APisource;\r\n\r\npublic interface inferfaceF2 {\r\n\tpublic void fun2();\r\n\tpublic void s1();\r\n\tpublic default void s2()\r\n\t{\r\n\t\t\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5483579635620117, "alphanum_fraction": 0.6248869895935059, "avg_line_length": 33.308509826660156, "blob_id": "8cbd33fe803855a4a4088fc44ce6bda4c5f378e9", "content_id": "e89cce7bfe4fde0c24fd4653e309ee6b55eecb4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3319, "license_type": "no_license", "max_line_length": 827, "num_lines": 94, "path": "/one.py", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "UTF-8", "text": "import json\r\nimport time\r\nimport re\r\nfrom tencentcloud.common import credential\r\nfrom tencentcloud.common.profile.client_profile import ClientProfile\r\nfrom tencentcloud.common.profile.http_profile import HttpProfile\r\nfrom tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\r\nfrom tencentcloud.asr.v20190614 import asr_client, models\r\n\r\n\r\nSecretId = \"AKIDEGNH1WCy2sJeninrbasujGagV5RwLLMR\"\r\nSecretKey = \"jCTx8B5nQZT8sX1NXiVCm094kAJQOI3q\"\r\nurl=\"https://1earning-1305041181.cos.ap-nanjing.myqcloud.com/%E5%B0%9A%E7%A1%85%E8%B0%B7%E5%B0%9A%E7%AD%B9%E7%BD%91Java%E9%A1%B9%E7%9B%AE%E5%AE%9E%E6%88%98%E5%BC%80%E5%8F%91%E6%95%99%E7%A8%8B(%E5%90%ABSSM%E6%A1%86%E6%9E%B6%2C%E5%BE%AE%E6%9C%8D%E5%8A%A1%E6%9E%B6%E6%9E%84%2C%E5%B0%81%E6%8D%B7%E4%B8%BB%E8%AE%B2)%20-%20109.109.%E5%B0%9A%E7%A1%85%E8%B0%B7_%E8%A7%92%E8%89%B2%E7%BB%B4%E6%8A%A4-%E5%88%86%E9%A1%B5-%E5%90%8E%E7%AB%AF-RoleMapper(Av95017741%2CP109).mp4?q-sign-algorithm=sha1&q-ak=AKIDxgajuqR8UnXUZN4taMeW1VfgiO1qPimAOUAjCY5lI6Q8b_pD1f2oLYmrAQI6BC96&q-sign-time=1620713135;1620716735&q-key-time=1620713135;1620716735&q-header-list=&q-url-param-list=&q-signature=a3f2d47ef80f6924be6c3af221a4341b78962c71&x-cos-security-token=gFRDp51A4EzgvxRNGaYqgB5iOUUljjea9771647610113c64825870e8b0e37c9fd6CsARe0zouq7LTTQQ1HXCqDnww0CcBVMqCapfs4XCC1m_4dyDFxwHVnp3pkKHstpBGm6AKaDwmOOd5BDfH2MNY91Ehm5u6QXn9drmgo8sa1UZufKps6BxE4nCJoQJ3J3TOyxYYuQrtQ0WQyrrjKSD7x70uuI2DbWVgtuHcjKog&response-content-type=application%2Foctet-stream&response-content-disposition=attachment\"\r\n\r\n\r\ndef getREsult(resp,client):\r\n req1 = models.DescribeTaskStatusRequest()\r\n re = json.loads(json.dumps(resp.to_json_string()))\r\n fun = json.loads(re)\r\n print(fun)\r\n params1 = {\r\n \"TaskId\": fun['Data']['TaskId']\r\n }\r\n\r\n\r\n req1.from_json_string(json.dumps(params1))\r\n while(True):\r\n resp1 = client.DescribeTaskStatus(req1)\r\n result = json.loads(resp1.to_json_string())\r\n if(result['Data']['StatusStr']=='success'):\r\n break\r\n if(result['Data']['StatusStr']=='failed'):\r\n print(result)\r\n break\r\n time.sleep(5)\r\n print(result['Data']['StatusStr'])\r\n return result\r\n\r\ndef GO():\r\n try: \r\n \r\n cred = credential.Credential(SecretId, SecretKey) \r\n httpProfile = HttpProfile()\r\n httpProfile.endpoint = \"asr.tencentcloudapi.com\"\r\n clientProfile = ClientProfile()\r\n clientProfile.httpProfile = httpProfile\r\n client = asr_client.AsrClient(cred, \"\", clientProfile) \r\n\r\n req = models.CreateRecTaskRequest()\r\n params = {\r\n \"EngineModelType\": \"16k_zh_video\",\r\n \"ChannelNum\": 1,\r\n \"ResTextFormat\": 0,\r\n \"SourceType\": 0,\r\n \"Url\":url\r\n }\r\n \r\n req.from_json_string(json.dumps(params))\r\n \r\n resp = client.CreateRecTask(req)\r\n print(resp.to_json_string()) \r\n return getREsult(resp,client)\r\n\r\n\r\n except TencentCloudSDKException as err: \r\n print(err) \r\n\r\ndef writeText(Data):\r\n Data = str(Data)\r\n with open('a.txt','w',encoding = 'utf-8') as f:\r\n f.write(Data)\r\n pass\r\n\r\ndef handlerData(Data):\r\n result = Data['Data']['Result']\r\n txt = result.split('\\n')\r\n txt = str(txt)\r\n \r\n print(txt)\r\n txt = txt.replace(\"\\', \\'\",\"\\r\\n\")\r\n txt = re.sub(\"[,]\", \" --> \", txt)\r\n txt = txt.replace(\"[\",\"\")\r\n txt = txt.replace(\"]\",\"\")\r\n writeText(txt)\r\n \r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n result = GO()\r\n print(result)\r\n handlerData(result)\r\n print(\"succeed\")\r\n" }, { "alpha_fraction": 0.49491095542907715, "alphanum_fraction": 0.5241730213165283, "avg_line_length": 19.83333396911621, "blob_id": "394d66b17fd6b0c2f1ecb6341d923cab68812b19", "content_id": "7b58a8afe7cba4bbd18ca22522a638716444119f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1698, "license_type": "no_license", "max_line_length": 57, "num_lines": 72, "path": "/src/cn/code/base2/array.java", "repo_name": "zhangjinhui152/Rust_c-java", "src_encoding": "GB18030", "text": "package cn.code.base2;\r\n\r\nimport java.util.Arrays;\r\n\r\npublic class array {\r\n\tpublic static void main(String[] args) {\r\n\t\t// 动态 指定长度\r\n\t\t// 数据类型【】 数组名称 = new 数据类型(数据长度);\r\n\t\tint[] numA = new int[10];\r\n\r\n\t\t// 静态 指定内容\r\n\t\tint[] numB = new int[] { 1, 2, 3, 4, 5, 5, 5 };\r\n\t\t// 自动推算\r\n\t\tString[] charA = new String[] { \"hello\", \"c++yes\" };\r\n\t\t// 格式省略\r\n\t\t// 省掉后面的new 数据类型\r\n\t\tint[] arrayA = { 1, 2, 3 };\r\n\t\tSystem.out.println(arrayA[1]);\r\n\t\t/*\r\n\t\t * 默认 int = 0 double = 0.0 char = '\\u0000' bool = false\r\n\t\t * \r\n\t\t */\r\n\t\tarrayA[1] = 2;\r\n\t\tint temp;\r\n\t\tint[] arrayB = arrayA;// a改变时b也改变;\r\n\t\tSystem.out.println(arrayA.length);\r\n\r\n\t\tfor (int i = 0; i < arrayA.length; i++) {\r\n\t\t\tif (arrayA[0] < arrayA[i]) {\r\n\t\t\t\ttemp = arrayA[i];\r\n\t\t\t\tarrayA[i] = arrayA[0];\r\n\t\t\t\tarrayA[0] = temp;\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tSystem.out.println(arrayA[0]);\r\n\r\n\t\tfor (int time = 0; time < arrayA.length; time++) {\r\n\t\t\tSystem.out.println(arrayA[time]);\r\n\t\t}\r\n\r\n\t\t// 冒泡排序\r\n\t\t\r\n\t\tfor (int a = 0; a < arrayA.length; a++) {\r\n\t\t\tfor (int b = a; b < arrayA.length - 1 - a; b++) {\r\n\t\t\t\tif (arrayA[b] < arrayA[b + 1]) {\r\n\t\t\t\t\ttemp = arrayA[b];\r\n\t\t\t\t\tarrayA[b] = arrayA[b + 1];\r\n\t\t\t\t\tarrayA[b + 1] = temp;\r\n\t\t\t\t}\r\n\r\n\t\t\t}\r\n\t\t}\r\n\t\tfor (int time = 0; time < arrayA.length; time++) {\r\n\t\t\tSystem.out.println(arrayA[time]);\r\n\t\t}\r\n\r\n\t\tarrayPrint(arrayA);\r\n\t\tSystem.out.println(arrayA[2]);\r\n\t\t\r\n\t}\r\n\r\n\tpublic static void arrayPrint(int[] ar) {\r\n\t\tSystem.out.println(ar[2]);\r\n\t}\r\n\tpublic static int[] arrayreturn(int[] ar) {\r\n\t\tar[2] += 2;\r\n\t\treturn ar;\r\n\t}\r\n\tint[] ar1 = {1,2,3,4,5};\r\n\t//System.out.println(Arrays.toString(ar1));\r\n}\r\n" } ]
47
oiwah/classifier
https://github.com/oiwah/classifier
f3ac9ff58a2eb1f315bd6b14d56a0f697f764ab3
d2b2264a3a376cf48bae55799868cfbd9b5cd4d9
ea15e528c7985528cf478eea3968abfb6118a60b
refs/heads/master
2020-04-16T11:42:28.358819
2013-06-09T16:02:11
2013-06-09T16:02:11
1,670,993
8
3
null
null
null
null
null
[ { "alpha_fraction": 0.569369375705719, "alphanum_fraction": 0.5816216468811035, "avg_line_length": 24, "blob_id": "db98f5797f5cedda17788f8550ef3113b5d59307", "content_id": "d03de1358738275f009569bb1fedbcddbd85d4bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2775, "license_type": "no_license", "max_line_length": 88, "num_lines": 111, "path": "/multiclass/passive_aggressive/pa.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"pa.h\"\n\n#include <algorithm>\n\nnamespace classifier {\nnamespace pa {\nPA::PA(size_t mode) : mode_(mode), C_(0.001) {\n weight_matrix().swap(weight_);\n}\n\nvoid PA::SetC(double C) {\n if (C > 0.0)\n C_ = C;\n}\n\nvoid PA::Train(const datum& datum) {\n score2class scores(0);\n CalcScores(datum.fv, &scores);\n \n Update(datum, scores);\n}\n\nvoid PA::Train(const std::vector<datum>& data,\n const size_t iteration) {\n for (size_t iter = 0; iter < iteration; ++iter) {\n for (std::vector<datum>::const_iterator it = data.begin();\n it != data.end();\n ++it) {\n Train(*it);\n }\n }\n}\n\nvoid PA::Test(const feature_vector& fv,\n std::string* predict) const {\n score2class scores(0);\n CalcScores(fv, &scores);\n *predict = scores[0].second;\n}\n\nvoid PA::CalcScores(const feature_vector& fv,\n score2class* scores) const {\n scores->push_back(make_pair(non_class_score, non_class));\n\n for (weight_matrix::const_iterator it = weight_.begin();\n it != weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n scores->push_back(make_pair(score, it->first));\n }\n\n sort(scores->begin(), scores->end(),\n std::greater<std::pair<double, std::string> >());\n}\n\nvoid PA::Update(const datum& datum,\n const score2class& scores) {\n std::string non_correct_predict;\n double hinge_loss = CalcLossScore(scores, datum.category, &non_correct_predict, 1.0);\n double fv_norm = CalcFvNorm(datum.fv);\n double update = 0.0;\n\n switch(mode_) {\n case 0:\n update = hinge_loss / fv_norm;\n break;\n\n case 1:\n update = std::min(hinge_loss / fv_norm, C_);\n break;\n\n case 2:\n update = hinge_loss / (fv_norm + 1 / (2.0 * C_));\n break;\n\n default:\n break;\n }\n update /= 2.0;\n\n if (update > 0.0) {\n weight_vector &correct_weight = weight_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_weight.size() <= it->first)\n correct_weight.resize(it->first + 1, 0.0);\n correct_weight[it->first] += update * it->second;\n }\n\n if (non_correct_predict == non_class)\n return;\n\n weight_vector &wrong_weight = weight_[non_correct_predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_weight.size() <= it->first)\n wrong_weight.resize(it->first + 1, 0.0);\n wrong_weight[it->first] -= update * it->second;\n }\n }\n}\n\nvoid PA::GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const {\n ReturnFeatureWeight(feature_id, weight_, results);\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.6562150120735168, "alphanum_fraction": 0.6702127456665039, "avg_line_length": 23.80555534362793, "blob_id": "a04d0f1c3289c62efcba243af7f3a5cbd094a621", "content_id": "edbe463f137204da5a8db005ca7322828cb2f3fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1786, "license_type": "no_license", "max_line_length": 85, "num_lines": 72, "path": "/multiclass/confidence_weighted/cw.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_CONFIDENCE_WEIGHTED_CW_H_\n#define CLASSIFIER_CONFIDENCE_WEIGHTED_CW_H_\n\n#include <iostream>\n#include <vector>\n\n#include \"../../utility/calc.h\"\n\n/**\n * CW (mode:0)\n * Confidence-weighted linear classification.\n * Dredze et al. ICML 2008\n * http://www.cs.jhu.edu/~mdredze/publications/icml_variance.pdf\n */\n\n/**\n * SCW-I (mode:1), SCW-II (mode:2)\n * Soft Confidence-Weighted Learning.\n * Jialei et al. ICML2012\n * http://icml.cc/2012/papers/86.pdf\n */\n\nnamespace classifier {\nnamespace cw {\ntypedef std::vector<double> covariance_vector;\ntypedef std::unordered_map<std::string, covariance_vector> covariance_matrix;\n\nclass CW {\n public:\n explicit CW(double phi = 0.0);\n ~CW() {};\n\n void Train(const datum& datum);\n void Train(const std::vector<datum>& data,\n const size_t iteration = 1);\n void Test(const feature_vector& fv,\n std::string* predict) const;\n void GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const;\n\n void SetC(double C) { C_ = C; }\n void ChangeMode(int mode) { mode_ = mode; }\n\n private:\n void CalcScores(const feature_vector& fv,\n score2class* scores) const;\n\n double CalcV(const datum& datum,\n const std::string& non_correct_predict);\n\n double CalcAlpha(double m, double v) const;\n double CalcAlpha0(double m, double v) const;\n double CalcAlpha1(double m, double v) const;\n double CalcAlpha2(double m, double v) const;\n\n double CalcBeta(double v, double alpha) const;\n\n void Update(const datum& datum, const score2class& scores);\n\n int mode_;\n\n weight_matrix weight_;\n covariance_matrix cov_;\n double phi_;\n\n double C_;\n};\n\n} //namespace\n} //namespace\n\n#endif //CLASSIFIER_CONFIDENCE_WEIGHTED_CW_H_\n" }, { "alpha_fraction": 0.5556029081344604, "alphanum_fraction": 0.5628461837768555, "avg_line_length": 26.611764907836914, "blob_id": "86618c8e290fd5adc1650f8294aa0fde1a664fe8", "content_id": "4962178fb475c7498112ab6a3a85d2e5aacbd07d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2347, "license_type": "no_license", "max_line_length": 88, "num_lines": 85, "path": "/utility/calc.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_TOOL_CALC_H_\n#define CLASSIFIER_TOOL_CALC_H_\n\n#include \"feature.h\"\n#include \"weight.h\"\n\nnamespace classifier {\ntypedef std::vector<std::pair<double, std::string> > score2class;\ninline double InnerProduct(const feature_vector& fv,\n const weight_vector& wv) {\n double score = 0.0;\n for (feature_vector::const_iterator it = fv.begin();\n it != fv.end();\n ++it) {\n if (wv.size() <= it->first) continue;\n score += wv[it->first] * it->second;\n }\n return score;\n}\n\ninline double CalcFvNorm(const feature_vector& fv) {\n double fv_norm = 0.0;\n for (feature_vector::const_iterator it = fv.begin();\n it != fv.end();\n ++it)\n fv_norm += it->second * it->second;\n\n return fv_norm;\n}\n\ninline void ReturnFeatureWeight(size_t feature_id,\n const weight_matrix& wm,\n std::vector<std::pair<std::string, double> >* results) {\n for (weight_matrix::const_iterator it = wm.begin();\n it != wm.end();\n ++it) {\n std::string category = it->first;\n if (feature_id < it->second.size()) {\n double score = it->second.at(feature_id);\n results->push_back(make_pair(category, score));\n } else {\n results->push_back(make_pair(category, 0.0));\n }\n }\n}\n\ninline double CalcLossScore(const score2class& s2c,\n const std::string& correct,\n std::string* non_correct_predict,\n const double margin = 0.0) {\n bool correct_done = false;\n bool predict_done = false;\n double loss_score = margin;\n\n for (score2class::const_iterator it = s2c.begin();\n it != s2c.end();\n ++it) {\n if (it->second == correct) {\n loss_score -= it->first;\n correct_done = true;\n } else if (!predict_done) {\n *non_correct_predict = it->second;\n if (*non_correct_predict != non_class)\n loss_score += it->first;\n predict_done = true;\n }\n\n if (correct_done && predict_done)\n break;\n }\n\n return loss_score;\n}\n\ninline double CalcLossScore(const double score,\n const int correct,\n const double margin = 0.0) {\n if (correct == 1) {\n return margin - score;\n }\n return score - margin;\n}\n} //namespace\n\n#endif //CLASSIFIER_TOOL_CALC_H_\n" }, { "alpha_fraction": 0.6582959890365601, "alphanum_fraction": 0.6627802848815918, "avg_line_length": 23.77777862548828, "blob_id": "22b0fcaa0883283e193e80f94104535ce3166cbe", "content_id": "d2e4ed94d7ab2e0872a3dde2e0cfbe0575da99c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1115, "license_type": "no_license", "max_line_length": 85, "num_lines": 45, "path": "/multiclass/subgradient/averaged_hinge.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_SUBGRADIENT_AVERAGED_HINGE_H_\n#define CLASSIFIER_SUBGRADIENT_AVERAGED_HINGE_H_\n\n#include <iostream>\n#include <vector>\n\n#include \"../../utility/calc.h\"\n\nnamespace classifier {\nnamespace subgradient {\nclass ASGDHinge {\n public:\n explicit ASGDHinge(double eta = 1.0);\n ~ASGDHinge() {};\n\n void Train(const datum& datum,\n bool calc_averaged = true);\n void Train(const std::vector<datum>& data,\n const size_t iteration = 1);\n void Test(const feature_vector& fv, std::string* predict) const;\n void GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const;\n\n private:\n void CalcScores(const feature_vector& fv,\n score2class* scores,\n size_t mode) const;\n\n void Update(const datum& datum,\n const score2class& scores);\n\n void CalcAveragedWeight();\n\n weight_matrix weight_;\n weight_matrix differential_weight_;\n weight_matrix averaged_weight_;\n\n size_t dataN_;\n double eta_;\n};\n\n} //namespace\n} //namespace\n\n#endif //CLASSIFIER_SUBGRADIENT_AVERAGED_HINGE_H_\n" }, { "alpha_fraction": 0.6673796772956848, "alphanum_fraction": 0.6727272868156433, "avg_line_length": 23.605262756347656, "blob_id": "ccefc2b4b2340891a079eaa1b3771f2375019dea", "content_id": "de29289a04871529befb2420b47050f50fb86f46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 935, "license_type": "no_license", "max_line_length": 85, "num_lines": 38, "path": "/multiclass/subgradient/hinge.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_SUBGRADIENT_HINGE_H_\n#define CLASSIFIER_SUBGRADIENT_HINGE_H_\n\n#include <iostream>\n#include <vector>\n\n#include \"../../utility/calc.h\"\n\nnamespace classifier {\nnamespace subgradient {\nclass SubgradientHinge {\n public:\n explicit SubgradientHinge(double eta = 1.0);\n ~SubgradientHinge() {};\n\n void Train(const datum& datum);\n void Train(const std::vector<datum>& data,\n const size_t iteration = 1);\n void Test(const feature_vector& fv, std::string* predict) const;\n void GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const;\n\n private:\n void CalcScores(const feature_vector& fv,\n score2class* scores) const;\n\n void Update(const datum& datum,\n const score2class& scores);\n\n weight_matrix weight_;\n size_t dataN_;\n double eta_;\n};\n\n} //namespace\n} //namespace\n\n#endif //CLASSIFIER_SUBGRADIENT_HINGE_H_\n" }, { "alpha_fraction": 0.601385235786438, "alphanum_fraction": 0.6090279221534729, "avg_line_length": 30.481203079223633, "blob_id": "060c866796a15043809ba216bddeb39c118ea2da", "content_id": "9e8953f54029d94865cc214fb7cf6941803a92e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4187, "license_type": "no_license", "max_line_length": 99, "num_lines": 133, "path": "/multiclass/dual_averaging/da.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"da.h\"\n\n#include <cmath>\n#include <algorithm>\n\nnamespace classifier {\nnamespace dual_averaging {\nDualAveraging::DualAveraging(double gamma) : dataN_(0), gamma_(gamma) {\n weight_matrix().swap(weight_);\n weight_matrix().swap(subgradient_sum_);\n}\n\nvoid DualAveraging::Train(const datum& datum,\n bool primal) {\n CalcWeight(datum.fv);\n ++dataN_;\n\n score2class scores(0);\n CalcScores(datum.fv, &scores);\n Update(datum, scores);\n\n if (primal)\n CalcWeightAll();\n}\n\nvoid DualAveraging::Train(const std::vector<datum>& data,\n const size_t iteration) {\n for (size_t iter = 0; iter < iteration; ++iter) {\n for (std::vector<datum>::const_iterator it = data.begin();\n it != data.end();\n ++it) {\n Train(*it, false);\n }\n }\n CalcWeightAll();\n}\n\nvoid DualAveraging::Test(const feature_vector& fv,\n std::string* predict) const {\n score2class scores(0);\n CalcScores(fv, &scores);\n *predict = scores[0].second;\n}\n\nvoid DualAveraging::CalcWeight(const feature_vector& fv) {\n if (dataN_ == 0) return;\n double scalar = - sqrt(dataN_) / gamma_;\n\n for (weight_matrix::const_iterator wm_it = subgradient_sum_.begin();\n wm_it != subgradient_sum_.end();\n ++wm_it) {\n weight_vector &weight_vec = weight_[wm_it->first];\n weight_vector &subgradient_vec = subgradient_sum_[wm_it->first];\n for (feature_vector::const_iterator fv_it = fv.begin();\n fv_it != fv.end();\n ++fv_it) {\n if (subgradient_vec.size() <= fv_it->first)\n subgradient_vec.resize(fv_it->first, 0.0);\n\n if (weight_vec.size() <= fv_it->first)\n weight_vec.resize(fv_it->first + 1, 0.0);\n\n weight_vec[fv_it->first] = scalar * subgradient_vec[fv_it->first];\n }\n }\n}\n\nvoid DualAveraging::CalcWeightAll() {\n double scalar = - sqrt(dataN_) / gamma_;\n for (weight_matrix::const_iterator wm_it = subgradient_sum_.begin();\n wm_it != subgradient_sum_.end();\n ++wm_it) {\n weight_vector &weight_vec = weight_[wm_it->first];\n weight_vector subgradient_vec = subgradient_sum_[wm_it->first];\n if (weight_vec.size() < subgradient_vec.size())\n weight_vec.resize(subgradient_vec.size(), 0.0);\n for (size_t feature_id = 0; feature_id < subgradient_vec.size(); ++feature_id) {\n weight_vec[feature_id] = scalar * subgradient_vec[feature_id];\n }\n }\n}\n\nvoid DualAveraging::CalcScores(const feature_vector& fv,\n score2class* scores) const {\n scores->push_back(make_pair(non_class_score, non_class));\n\n for (weight_matrix::const_iterator it = weight_.begin();\n it != weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n scores->push_back(make_pair(score, it->first));\n }\n\n sort(scores->begin(), scores->end(),\n std::greater<std::pair<double, std::string> >());\n}\n\nvoid DualAveraging::Update(const datum& datum,\n const score2class& scores) {\n std::string non_correct_predict;\n double hinge_loss = CalcLossScore(scores, datum.category, &non_correct_predict, 1.0);\n\n if (hinge_loss > 0.0) {\n weight_vector &correct_subgradient_vec = subgradient_sum_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_subgradient_vec.size() <= it->first)\n correct_subgradient_vec.resize(it->first + 1, 0.0);\n correct_subgradient_vec[it->first] -= it->second / 2.0;\n }\n\n if (non_correct_predict == non_class)\n return;\n\n weight_vector &wrong_subgradient_vec = subgradient_sum_[non_correct_predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_subgradient_vec.size() <= it->first)\n wrong_subgradient_vec.resize(it->first + 1, 0.0);\n wrong_subgradient_vec[it->first] += it->second / 2.0;\n }\n }\n}\n\nvoid DualAveraging::GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const {\n ReturnFeatureWeight(feature_id, weight_, results);\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.7714285850524902, "alphanum_fraction": 0.7714285850524902, "avg_line_length": 27, "blob_id": "2c7b91ef5d219f84df5fcfe96e93eba7f396d13c", "content_id": "7f15d362b0614dc208d18c138b03334f097949c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 280, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/utility/weight.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_TOOL_WEIGHT_H_\n#define CLASSIFIER_TOOL_WEIGHT_H_\n\n#include <unordered_map>\nnamespace classifier {\ntypedef std::vector<double> weight_vector;\ntypedef std::unordered_map<std::string, weight_vector> weight_matrix;\n} //namespace\n\n#endif //CLASSIFIER_TOOL_WEIGHT_H_\n" }, { "alpha_fraction": 0.5844435095787048, "alphanum_fraction": 0.5883111357688904, "avg_line_length": 25.146066665649414, "blob_id": "177ad300390c8e49de71eabec2b6f5117488a0a0", "content_id": "7924521c76f8e63e1fb95eb73ddc68bad2ee6a71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2327, "license_type": "no_license", "max_line_length": 91, "num_lines": 89, "path": "/test/test.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_TEST_TEST_H_\n#define CLASSIFIER_TEST_TEST_H_\n\n#include <iostream>\n#include <fstream>\n#include <sstream>\n\n#include \"../utility/feature.h\"\n#include \"../utility/neutral_parser.h\"\n#include \"../utility/libsvm_parser.h\"\n\nnamespace classifier {\nbool ParseFile(const std::string& file_path,\n std::vector<classifier::datum>* data,\n feature2id* f2i,\n bool libsvm=false) {\n std::vector<classifier::datum>(0).swap(*data);\n\n std::ifstream ifs(file_path);\n if (!ifs) {\n std::cerr << \"cannot open \" << file_path << std::endl;\n return false;\n }\n\n size_t lineN = 0;\n for (std::string line; getline(ifs, line); ++lineN) {\n datum datum;\n std::istringstream iss(line);\n\n std::string category = \"Not defined\";\n if (!(iss >> category)) {\n std::cerr << \"parse error: you must set category in line \" << lineN << std::endl;\n return false;\n }\n datum.category = category;\n\n if (!libsvm)\n parser::NeutralParser(&iss, f2i, &datum);\n else\n parser::LibsvmParser(&iss, &datum);\n data->push_back(datum);\n }\n\n return true;\n}\n\ntemplate <class T>\nvoid PrintFeatureWeights(T& classifier,\n const feature_vector& fv) {\n for (feature_vector::const_iterator it = fv.begin();\n it != fv.end();\n ++it) {\n size_t word = it->first;\n std::cout << word << std::endl;\n\n std::vector<std::pair<std::string, double> > results(0);\n classifier.GetFeatureWeight(word, &results);\n for (std::vector<std::pair<std::string, double> >::const_iterator it = results.begin();\n it != results.end();\n ++it) {\n std::cout << it->first << \"\\t\" << it->second << std::endl;\n }\n }\n}\n\ntemplate <class T>\nint Run (T& classifier,\n const char* classifier_name,\n const std::vector<classifier::datum>& train,\n const std::vector<classifier::datum>& test) {\n std::cout << classifier_name << std::endl;\n classifier.Train(train);\n\n size_t score = 0;\n for (size_t i = 0; i < test.size(); ++i) {\n std::string result;\n classifier.Test(test[i].fv, &result);\n if (test[i].category == result)\n ++score;\n }\n\n std::cout << \"accuracy : \" << score << \" / \" << test.size() << std::endl;\n std::cout << std::endl;\n return 0;\n}\n\n} //namespace\n\n#endif //CLASSIFIER_TEST_TEST_H_\n" }, { "alpha_fraction": 0.5671697854995728, "alphanum_fraction": 0.5837736129760742, "avg_line_length": 28.608938217163086, "blob_id": "cc8a750ef4188e8a1d40d751051e58ace2caa0ac", "content_id": "a49862220ab1a41369078e24150aa2dbaf1e8293", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5300, "license_type": "no_license", "max_line_length": 89, "num_lines": 179, "path": "/multiclass/confidence_weighted/cw.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"cw.h\"\n\n#include <cmath>\n#include <cfloat>\n#include <algorithm>\n\nnamespace classifier {\nnamespace cw {\nCW::CW(double phi) : mode_(0), phi_(phi), C_(1.0) {\n weight_matrix().swap(weight_);\n covariance_matrix().swap(cov_);\n}\n\nvoid CW::Train(const datum& datum) {\n if (mode_ != 0 && mode_ != 1 && mode_ != 2) {\n std::cout << \"mode should be set {0,1,2}.\" << std::endl;\n std::cout << \" mode 0 : Confidence-Weighted\" << std::endl;\n std::cout << \" mode 1 : Soft Confidence-Weighted1\" << std::endl;\n std::cout << \" mode 2 : Soft Confidence-Weighted2\" << std::endl;\n return;\n }\n score2class scores(0);\n CalcScores(datum.fv, &scores);\n Update(datum, scores);\n}\n\nvoid CW::Train(const std::vector<datum>& data,\n const size_t iteration) {\n for (size_t iter = 0; iter < iteration; ++iter) {\n for (std::vector<datum>::const_iterator it = data.begin();\n it != data.end();\n ++it) {\n Train(*it);\n }\n }\n}\n\nvoid CW::Test(const feature_vector& fv,\n std::string* predict) const {\n score2class scores(0);\n CalcScores(fv, &scores);\n *predict = scores[0].second;\n}\n\nvoid CW::CalcScores(const feature_vector& fv,\n score2class* scores) const {\n scores->push_back(make_pair(non_class_score, non_class));\n\n for (weight_matrix::const_iterator it = weight_.begin();\n it != weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n scores->push_back(make_pair(score, it->first));\n }\n\n sort(scores->begin(), scores->end(),\n std::greater<std::pair<double, std::string> >());\n}\n\ndouble CW::CalcV(const datum& datum,\n const std::string& non_correct_predict) {\n double v = 0.0;\n covariance_vector &correct_cov = cov_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_cov.size() <= it->first)\n correct_cov.resize(it->first + 1, 1.0);\n v += correct_cov[it->first] * it->second * it->second;\n }\n\n if (non_correct_predict == non_class)\n return v;\n\n covariance_vector &wrong_cov = cov_[non_correct_predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_cov.size() <= it->first)\n wrong_cov.resize(it->first + 1, 1.0);\n v += wrong_cov[it->first] * it->second * it->second;\n }\n\n return v;\n}\n\ndouble CW::CalcAlpha(double m, double v) const {\n if (mode_ == 0) {\n return CalcAlpha0(m, v);\n } else if (mode_ == 1) {\n return CalcAlpha1(m, v);\n } else if (mode_ == 2) {\n return CalcAlpha2(m, v);\n }\n return 0.0;\n}\n\ndouble CW::CalcAlpha0(double m, double v) const {\n double psi = 1.0 + phi_ * phi_ / 2.0;\n double phi2 = phi_ * phi_;\n double phi4 = phi2 * phi2;\n double zeta = 1.0 + phi_ * phi_;\n\n double alpha =\n (-m * psi + std::sqrt(m*m*phi4/4.0 + v*phi2*zeta)) / (v * zeta);\n return std::max(0.0, alpha);\n}\n\ndouble CW::CalcAlpha1(double m, double v) const {\n return std::min(C_, CalcAlpha0(m, v));\n}\n\ndouble CW::CalcAlpha2(double m, double v) const {\n double phi2 = phi_ * phi_;\n\n double n = v + 1.0 / (2.0 * C_);\n double gamma = phi_ * std::sqrt(phi2*m*m*v*v + 4*n*v*(n+v*phi2));\n double alpha = - (2.0 * m * n + phi2 * m * v) + gamma;\n if (alpha <= 0.0) return 0.0;\n alpha /= 2.0 * (n*n + n*v*phi2);\n return alpha;\n}\n\ndouble CW::CalcBeta(double v, double alpha) const {\n double u = (-alpha * v * phi_\n + std::sqrt(alpha * alpha * v * v * phi_ * phi_ + 4.0 * v));\n u = u * u / 4.0;\n\n double beta = alpha * phi_ / (std::sqrt(u) + v * alpha * phi_);\n return beta;\n}\n\nvoid CW::Update(const datum& datum,\n const score2class& scores) {\n std::string non_correct_predict;\n double m = - CalcLossScore(scores, datum.category, &non_correct_predict);\n double v = CalcV(datum, non_correct_predict);\n double alpha = CalcAlpha(m, v);\n double beta = CalcBeta(v, alpha);\n\n if (alpha > 0.0) {\n weight_vector &correct_weight = weight_[datum.category];\n covariance_vector &correct_cov = cov_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_weight.size() <= it->first)\n correct_weight.resize(it->first + 1, 0.0);\n correct_weight[it->first] += alpha * correct_cov[it->first] * it->second;\n\n correct_cov[it->first] -=\n beta * it->second * it->second * correct_cov[it->first] * correct_cov[it->first];\n }\n\n if (non_correct_predict == non_class)\n return;\n\n weight_vector &wrong_weight = weight_[non_correct_predict];\n covariance_vector &wrong_cov = cov_[non_correct_predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_weight.size() <= it->first)\n wrong_weight.resize(it->first + 1, 0.0);\n wrong_weight[it->first] -= alpha * wrong_cov[it->first] * it->second;\n\n wrong_cov[it->first] -=\n beta * it->second * it->second * wrong_cov[it->first] * wrong_cov[it->first];\n }\n }\n}\n\nvoid CW::GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const {\n ReturnFeatureWeight(feature_id, weight_, results);\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.567415714263916, "alphanum_fraction": 0.567415714263916, "avg_line_length": 24.428571701049805, "blob_id": "ca1b49d806f65823cf01aa7d10e65d26e47c5e77", "content_id": "e198fb92c31688350f83fa7957c4784b5d4e7127", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 178, "license_type": "no_license", "max_line_length": 41, "num_lines": 7, "path": "/test/wscript", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\ndef build(bld):\n bld(features = 'cxx cprogram',\n source = ['test.cc'],\n target = 'classifier_test',\n use = ['multiclass_classifiers'])\n" }, { "alpha_fraction": 0.5794320702552795, "alphanum_fraction": 0.5874903798103333, "avg_line_length": 27.63736343383789, "blob_id": "be605b18e3f74c36d2db3e7e1e25cf76b21770a7", "content_id": "8abd7ffedd2ac234dfa3b7cd792eb9c3ade3cdcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2606, "license_type": "no_license", "max_line_length": 98, "num_lines": 91, "path": "/multiclass/loglinear/loglinear_sgd.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"loglinear_sgd.h\"\n\n#include <cmath>\n#include <algorithm>\n\nnamespace classifier {\nnamespace loglinear {\nLogLinearSGD::LogLinearSGD(double eta) : eta_(eta) {\n weight_matrix().swap(weight_);\n}\n\nvoid LogLinearSGD::Train(const datum& datum) {\n score2class scores(0);\n CalcScores(datum.fv, &scores);\n Update(datum, scores);\n}\n\nvoid LogLinearSGD::Train(const std::vector<datum>& data,\n const size_t iteration) {\n for (size_t iter = 0; iter < iteration; ++iter) {\n for (std::vector<datum>::const_iterator it = data.begin();\n it != data.end();\n ++it) {\n Train(*it);\n }\n }\n}\n\nvoid LogLinearSGD::Test(const feature_vector& fv,\n std::string* predict) const {\n score2class scores(0);\n CalcScores(fv, &scores);\n *predict = scores[0].second;\n}\n\nvoid LogLinearSGD::CalcScores(const feature_vector& fv,\n score2class* scores) const {\n double max_score = 0.0;\n for (weight_matrix::const_iterator it = weight_.begin();\n it != weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n max_score = std::max(max_score, score);\n scores->push_back(make_pair(score, it->first));\n }\n\n double sum_score = 0.0;\n for (score2class::iterator it = scores->begin();\n it != scores->end();\n ++it) {\n it->first = std::exp(it->first - max_score);\n sum_score += it->first;\n }\n\n for (score2class::iterator it = scores->begin();\n it != scores->end();\n ++it) {\n it->first /= sum_score;\n }\n\n sort(scores->begin(), scores->end(),\n std::greater<std::pair<double, std::string> >());\n}\n\nvoid LogLinearSGD::Update(const datum& datum,\n const score2class& scores) {\n weight_vector &correct_weight = weight_[datum.category];\n for (feature_vector::const_iterator fv_it = datum.fv.begin();\n fv_it != datum.fv.end();\n ++fv_it) {\n for (size_t i = 0; i < scores.size(); ++i) {\n weight_vector &weight_vec = weight_[scores[i].second];\n if (weight_vec.size() <= fv_it->first)\n weight_vec.resize(fv_it->first + 1, 0.0);\n weight_vec[fv_it->first] -= eta_ * scores[i].first * fv_it->second;\n }\n\n if (correct_weight.size() <= fv_it->first)\n correct_weight.resize(fv_it->first + 1, 0.0);\n\n correct_weight[fv_it->first] += eta_ * fv_it->second;\n }\n}\n\nvoid LogLinearSGD::GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const {\n ReturnFeatureWeight(feature_id, weight_, results);\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.670766294002533, "alphanum_fraction": 0.6754966974258423, "avg_line_length": 24.16666603088379, "blob_id": "c9146f6e2fa2b24c5f19aa8b7b9565f41c868099", "content_id": "1990540f92a7710de7907091aeb4615e8d58613f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1057, "license_type": "no_license", "max_line_length": 85, "num_lines": 42, "path": "/multiclass/dual_averaging/da.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_DUAL_AVERAGING_DA_H_\n#define CLASSIFIER_DUAL_AVERAGING_DA_H_\n\n#include <iostream>\n#include <vector>\n\n#include \"../../utility/calc.h\"\n\nnamespace classifier {\nnamespace dual_averaging {\nclass DualAveraging {\n public:\n explicit DualAveraging(double gamma = 1.0);\n ~DualAveraging() {};\n\n void Train(const datum& datum, bool primal = true);\n void Train(const std::vector<datum>& data,\n const size_t iteration = 1);\n void Test(const feature_vector& fv, std::string* predict) const;\n void GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const;\n\n private:\n void CalcWeight(const feature_vector& fv);\n void CalcWeightAll();\n\n void CalcScores(const feature_vector& fv,\n score2class* scores) const;\n\n void Update(const datum& datum,\n const score2class& scores);\n\n weight_matrix weight_;\n weight_matrix subgradient_sum_;\n size_t dataN_;\n double gamma_;\n};\n\n} //namespace\n} //namespace\n\n#endif //CLASSIFIER_DUAL_AVERAGING_DA_H_\n" }, { "alpha_fraction": 0.6702508926391602, "alphanum_fraction": 0.6720430254936218, "avg_line_length": 24.363636016845703, "blob_id": "0f75523ac99c9551144d4dae62566bfedc002f2e", "content_id": "6d49b8de85aa6ceb8bc05b73fdf68c41fb265605", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1116, "license_type": "no_license", "max_line_length": 85, "num_lines": 44, "path": "/multiclass/perceptron/averaged_perceptron.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_PERCEPTRON_AVERAGED_PERCEPTRON_H_\n#define CLASSIFIER_PERCEPTRON_AVERAGED_PERCEPTRON_H_\n\n#include <iostream>\n#include <vector>\n\n#include \"../../utility/calc.h\"\n\nnamespace classifier {\nnamespace perceptron {\nclass AveragedPerceptron {\n public:\n AveragedPerceptron();\n ~AveragedPerceptron() {};\n\n void Train(const datum& datum,\n const bool calc_averaged = true);\n void Train(const std::vector<datum>& data,\n const size_t iteration = 1);\n void Test(const feature_vector& fv, std::string* predict) const;\n void GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const;\n\n private:\n void Update(const datum& datum,\n const std::string& predict);\n\n void Predict(const feature_vector& fv,\n std::string* predict,\n size_t mode = 0) const;\n\n void CalcAveragedWeight();\n\n weight_matrix weight_;\n weight_matrix differential_weight_;\n weight_matrix averaged_weight_;\n size_t dataN_;\n\n};\n\n} //namespace\n} //namespace\n\n#endif //CLASSIFIER_PERCEPTRON_AVERAGED_PERCEPTRON_H_\n" }, { "alpha_fraction": 0.7305389046669006, "alphanum_fraction": 0.7325349450111389, "avg_line_length": 24, "blob_id": "64cf5dea5841c83c8eaf07538b9fedc4dad3b9f0", "content_id": "56e9a9c21756a1d20fbf82c51ce03d69e6101031", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 501, "license_type": "no_license", "max_line_length": 66, "num_lines": 20, "path": "/utility/feature.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_TOOL_FEATURE_H_\n#define CLASSIFIER_TOOL_FEATURE_H_\n\n#include <limits>\n#include <vector>\n#include <unordered_map>\n\nnamespace classifier {\ntypedef std::vector<std::pair<size_t, double> > feature_vector;\ntypedef std::unordered_map<std::string, size_t> feature2id;\nstruct datum {\n std::string category;\n feature_vector fv;\n};\n\nconst std::string non_class = \"None\";\nconst double non_class_score = std::numeric_limits<double>::min();\n} //namespace\n\n#endif //CLASSIFIER_TOOL_FEATURE_H_\n\n" }, { "alpha_fraction": 0.5726513266563416, "alphanum_fraction": 0.5867128372192383, "avg_line_length": 28.56024169921875, "blob_id": "963205467d9ea4f975edc8d92fa4be6d30ca471d", "content_id": "ce86e99599c340afc6fea145725fff35be19acfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4907, "license_type": "no_license", "max_line_length": 89, "num_lines": 166, "path": "/multiclass/confidence_weighted/scw.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include <confidence_weighted/scw.h>\n\n#include <cmath>\n#include <cfloat>\n#include <algorithm>\n\nnamespace classifier {\nnamespace scw {\nSCW::SCW(double phi) : mode_(2), phi_(phi), C_(1.0) {\n weight_matrix().swap(weight_);\n covariance_matrix().swap(cov_);\n}\n\nvoid SCW::Train(const datum& datum) {\n score2class scores(0);\n CalcScores(datum.fv, &scores);\n Update(datum, scores);\n}\n\nvoid SCW::Train(const std::vector<datum>& data,\n const size_t iteration) {\n for (size_t iter = 0; iter < iteration; ++iter) {\n for (std::vector<datum>::const_iterator it = data.begin();\n it != data.end();\n ++it) {\n Train(*it);\n }\n }\n}\n\nvoid SCW::Test(const feature_vector& fv,\n std::string* predict) const {\n score2class scores(0);\n CalcScores(fv, &scores);\n *predict = scores[0].second;\n}\n\nvoid SCW::CalcScores(const feature_vector& fv,\n score2class* scores) const {\n scores->push_back(make_pair(non_class_score, non_class));\n\n for (weight_matrix::const_iterator it = weight_.begin();\n it != weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n scores->push_back(make_pair(score, it->first));\n }\n\n sort(scores->begin(), scores->end(),\n std::greater<std::pair<double, std::string> >());\n}\n\ndouble SCW::CalcV(const datum& datum,\n const std::string& non_correct_predict) {\n double v = 0.0;\n covariance_vector &correct_cov = cov_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_cov.size() <= it->first)\n correct_cov.resize(it->first + 1, 1.0);\n v += correct_cov[it->first] * it->second * it->second;\n }\n\n if (non_correct_predict == non_class)\n return v;\n\n covariance_vector &wrong_cov = cov_[non_correct_predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_cov.size() <= it->first)\n wrong_cov.resize(it->first + 1, 1.0);\n v += wrong_cov[it->first] * it->second * it->second;\n }\n\n return v;\n}\n\ndouble SCW::CalcAlpha(double m, double v) const {\n if (mode_ == 1) {\n return CalcAlpha1(m, v);\n } else if (mode_ == 2) {\n return CalcAlpha2(m, v);\n }\n return 0.0;\n}\n\ndouble SCW::CalcAlpha1(double m, double v) const {\n double psi = 1.0 + phi_ * phi_ / 2.0;\n double zeta = 1 + phi_ * phi_;\n double phi2_ = phi_*phi_;\n double phi4_ = phi2_*phi2_;\n\n double alpha =\n (-m * psi + std::sqrt(m*m*phi4_/4.0 + v*phi2_*zeta)) / (v * zeta);\n if (alpha <= 0.0) return 0.0;\n if (alpha >= C_) return C_;\n return alpha;\n}\n\ndouble SCW::CalcAlpha2(double m, double v) const {\n double n = v + 1.0 / (2.0 * C_);\n double gamma = phi_ * std::sqrt(phi_*phi_*m*m*v*v + 4*n*v*(n+v*phi_*phi_));\n double alpha = - (2.0 * m * n + phi_ * phi_ * m * v) + gamma;\n alpha /= ( 2.0 * (n*n + n*v*phi_*phi_) );\n if (alpha <= 0.0) return 0.0;\n return alpha;\n}\n\ndouble SCW::CalcBeta(double v, double alpha) const {\n double u = (-alpha * v * phi_\n + std::sqrt(alpha * alpha * v * v * phi_ * phi_ + 4.0 * v));\n u = u * u / 4.0;\n\n double beta = alpha * phi_ / (std::sqrt(u) + v * alpha * phi_);\n return beta;\n}\n\nvoid SCW::Update(const datum& datum,\n const score2class& scores) {\n std::string non_correct_predict;\n double m = - CalcLossScore(scores, datum.category, &non_correct_predict);\n double v = CalcV(datum, non_correct_predict);\n double alpha = CalcAlpha(m, v);\n double beta = CalcBeta(v, alpha);\n\n if (alpha > 0.0) {\n weight_vector &correct_weight = weight_[datum.category];\n covariance_vector &correct_cov = cov_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_weight.size() <= it->first)\n correct_weight.resize(it->first + 1, 0.0);\n correct_weight[it->first] += alpha * correct_cov[it->first] * it->second;\n\n correct_cov[it->first] -=\n beta * it->second * it->second * correct_cov[it->first] * correct_cov[it->first];\n }\n\n if (non_correct_predict == non_class)\n return;\n\n weight_vector &wrong_weight = weight_[non_correct_predict];\n covariance_vector &wrong_cov = cov_[non_correct_predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_weight.size() <= it->first)\n wrong_weight.resize(it->first + 1, 0.0);\n wrong_weight[it->first] -= alpha * wrong_cov[it->first] * it->second;\n\n wrong_cov[it->first] +=\n beta * it->second * it->second * correct_cov[it->first] * correct_cov[it->first];\n }\n }\n}\n\nvoid SCW::GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const {\n ReturnFeatureWeight(feature_id, weight_, results);\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.5962034463882446, "alphanum_fraction": 0.6041715741157532, "avg_line_length": 30.145984649658203, "blob_id": "1121863242a27e2fd621302b8b6619fc0d009ec2", "content_id": "c182df869244a98d3d110281830c801a6326fd77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4267, "license_type": "no_license", "max_line_length": 95, "num_lines": 137, "path": "/multiclass/subgradient/averaged_hinge.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"averaged_hinge.h\"\n\n#include <cmath>\n#include <algorithm>\n\nnamespace classifier {\nnamespace subgradient {\nASGDHinge::ASGDHinge(double eta) : dataN_(0), eta_(eta) {\n weight_matrix().swap(weight_);\n weight_matrix().swap(differential_weight_);\n weight_matrix().swap(averaged_weight_);\n}\n\nvoid ASGDHinge::Train(const datum& datum,\n bool calc_averaged) {\n ++dataN_;\n score2class scores(0);\n CalcScores(datum.fv, &scores, 0);\n Update(datum, scores);\n\n if (calc_averaged)\n CalcAveragedWeight();\n}\n\nvoid ASGDHinge::Train(const std::vector<datum>& data,\n const size_t iteration) {\n for (size_t iter = 0; iter < iteration; ++iter) {\n for (std::vector<datum>::const_iterator it = data.begin();\n it != data.end();\n ++it) {\n Train(*it, false);\n }\n }\n\n CalcAveragedWeight();\n}\n\nvoid ASGDHinge::Test(const feature_vector& fv,\n std::string* predict) const {\n score2class scores(0);\n CalcScores(fv, &scores, 1);\n *predict = scores[0].second;\n}\n\nvoid ASGDHinge::CalcScores(const feature_vector& fv,\n score2class* scores,\n size_t mode) const {\n scores->push_back(make_pair(non_class_score, non_class));\n\n if (mode == 0) {\n for (weight_matrix::const_iterator it = weight_.begin();\n it != weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n scores->push_back(make_pair(score, it->first));\n }\n } else if (mode == 1) {\n for (weight_matrix::const_iterator it = averaged_weight_.begin();\n it != averaged_weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n scores->push_back(make_pair(score, it->first));\n }\n }\n\n sort(scores->begin(), scores->end(),\n std::greater<std::pair<double, std::string> >());\n}\n\nvoid ASGDHinge::Update(const datum& datum,\n const score2class& scores) {\n std::string non_correct_predict;\n double hinge_loss = CalcLossScore(scores, datum.category, &non_correct_predict, 1.0);\n\n if (hinge_loss > 0.0) {\n double step_distance = eta_ / (std::sqrt(dataN_) * 2.0);\n\n weight_vector &correct_weight = weight_[datum.category];\n weight_vector &correct_diffetial_weight = differential_weight_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_weight.size() <= it->first)\n correct_weight.resize(it->first + 1, 1.0);\n correct_weight[it->first] += step_distance * it->second;\n\n if (correct_diffetial_weight.size() <= it->first)\n correct_diffetial_weight.resize(it->first + 1, 0.0);\n correct_diffetial_weight[it->first] += dataN_ * step_distance * it->second;\n }\n\n if (non_correct_predict == non_class)\n return;\n\n weight_vector &wrong_weight = weight_[non_correct_predict];\n weight_vector &wrong_diffetial_weight = differential_weight_[non_correct_predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_weight.size() <= it->first)\n wrong_weight.resize(it->first + 1, 1.0);\n wrong_weight[it->first] -= step_distance * it->second;\n\n if (wrong_diffetial_weight.size() <= it->first)\n wrong_diffetial_weight.resize(it->first + 1, 0.0);\n wrong_diffetial_weight[it->first] -= dataN_ * step_distance * it->second;\n }\n }\n}\n\nvoid ASGDHinge::CalcAveragedWeight() {\n weight_matrix ave_wm;\n\n for (weight_matrix::const_iterator wm_it = weight_.begin();\n wm_it != weight_.end();\n ++wm_it) {\n weight_vector &diff_wv = differential_weight_[wm_it->first];\n weight_vector &wv = weight_[wm_it->first];\n\n weight_vector &ave_wv = ave_wm[wm_it->first];\n ave_wv.resize(wv.size(), 0.0);\n\n for (size_t feature_id = 0; feature_id < wv.size(); ++feature_id) {\n ave_wv[feature_id] = wv[feature_id] - diff_wv[feature_id] / dataN_;\n }\n }\n\n averaged_weight_.swap(ave_wm);\n}\n\nvoid ASGDHinge::GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const {\n ReturnFeatureWeight(feature_id, weight_, results);\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.5938438177108765, "alphanum_fraction": 0.6017267107963562, "avg_line_length": 28.600000381469727, "blob_id": "6ef940b5d40d20973a5283a7a7dc5126d64822d1", "content_id": "aacaf7426f3102486f1eb6816b539e9298a7c119", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2664, "license_type": "no_license", "max_line_length": 102, "num_lines": 90, "path": "/multiclass/subgradient/hinge.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"hinge.h\"\n\n#include <cmath>\n#include <algorithm>\n\nnamespace classifier {\nnamespace subgradient {\nSubgradientHinge::SubgradientHinge(double eta) : dataN_(0), eta_(eta) {\n weight_matrix().swap(weight_);\n}\n\nvoid SubgradientHinge::Train(const datum& datum) {\n ++dataN_;\n score2class scores(0);\n CalcScores(datum.fv, &scores);\n\n Update(datum, scores);\n}\n\nvoid SubgradientHinge::Train(const std::vector<datum>& data,\n const size_t iteration) {\n for (size_t iter = 0; iter < iteration; ++iter) {\n for (std::vector<datum>::const_iterator it = data.begin();\n it != data.end();\n ++it) {\n Train(*it);\n }\n }\n}\n\nvoid SubgradientHinge::Test(const feature_vector& fv,\n std::string* predict) const {\n score2class scores(0);\n CalcScores(fv, &scores);\n *predict = scores[0].second;\n}\n\nvoid SubgradientHinge::CalcScores(const feature_vector& fv,\n score2class* scores) const {\n scores->push_back(make_pair(non_class_score, non_class));\n\n for (weight_matrix::const_iterator it = weight_.begin();\n it != weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n scores->push_back(make_pair(score, it->first));\n }\n\n sort(scores->begin(), scores->end(),\n std::greater<std::pair<double, std::string> >());\n}\n\nvoid SubgradientHinge::Update(const datum& datum,\n const score2class& scores) {\n std::string non_correct_predict;\n double hinge_loss = CalcLossScore(scores, datum.category, &non_correct_predict, 1.0);\n\n if (hinge_loss > 0.0) {\n double step_distance = eta_ / (std::sqrt(dataN_) * 2.0);\n\n weight_vector &correct_weight = weight_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_weight.size() <= it->first)\n correct_weight.resize(it->first + 1, 1.0);\n correct_weight[it->first] += step_distance * it->second;\n }\n\n if (non_correct_predict == non_class)\n return;\n\n weight_vector &wrong_weight = weight_[non_correct_predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_weight.size() <= it->first)\n wrong_weight.resize(it->first + 1, 1.0);\n wrong_weight[it->first] -= step_distance * it->second;\n }\n }\n}\n\nvoid SubgradientHinge::GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const {\n ReturnFeatureWeight(feature_id, weight_, results);\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.5978723168373108, "alphanum_fraction": 0.6025531888008118, "avg_line_length": 32.09859085083008, "blob_id": "982967d26b2026a653628c4cd87c8856feb563f9", "content_id": "ad534abc25a35a8fb72a93e51a6617308642e106", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2350, "license_type": "no_license", "max_line_length": 86, "num_lines": 71, "path": "/multiclass/complement_nb/complement_nb.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"complement_nb.h\"\n\n#include <cmath>\nnamespace classifier {\nnamespace naivebayes {\ndouble ComplementNaiveBayes::CalculateProbability(const feature_vector& fv,\n const std::string& category) const {\n double probability = 0.0;\n double smoothing_parameter = 0.0;\n if (smoothing_)\n smoothing_parameter = alpha_ - 1.0;\n\n // Class Probability\n probability -= log(\n ((document_sum_ - document_count_.at(category)) + smoothing_parameter) /\n ((double)document_sum_ + document_count_.size() * smoothing_parameter) );\n\n // Calculate Word Sum Except One Category\n double word_sum_except_a_category = 0.0;\n for (word_sum_vector::const_iterator it = word_sum_in_each_category_.begin();\n it != word_sum_in_each_category_.end();\n ++it) {\n if (it->first == category) continue;\n word_sum_except_a_category += it->second;\n }\n\n // Calculate Word Count Except One Category\n for (feature_vector::const_iterator fv_it = fv.begin();\n fv_it != fv.end();\n ++fv_it) {\n size_t word_id = fv_it->first;\n double word_count_except_a_category = 0.0;\n for (word_matrix::const_iterator cate_it = word_count_in_each_category_.begin();\n cate_it != word_count_in_each_category_.end();\n ++cate_it) {\n if (cate_it->first == category) continue;\n\n const word_vector &word_count_in_a_category\n = word_count_in_each_category_.at(cate_it->first);\n if (word_id < word_count_in_a_category.size())\n word_count_except_a_category += word_count_in_a_category.at(word_id);\n else\n continue;\n }\n\n // Word Probability\n if (word_count_except_a_category != 0) {\n probability -= fv_it->second * log(\n (word_count_except_a_category + smoothing_parameter)\n / ((double)word_sum_except_a_category\n + (fv.size() * smoothing_parameter)) );\n } else {\n if (!smoothing_) {\n probability = - non_class_score;\n break;\n }\n\n // Approximate the number of word summation\n probability -= fv_it->second * log(\n smoothing_parameter /\n ((double)word_sum_except_a_category\n + (fv.size() * smoothing_parameter)) );\n }\n }\n\n //std::cout << category << \" : \" << probability << std::endl;\n return probability;\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.6015661954879761, "alphanum_fraction": 0.6065496206283569, "avg_line_length": 29.536231994628906, "blob_id": "2a84e39bf3440c88df1f354dba3c34a5d6e09e75", "content_id": "62c4c197f298319154ea9a91570bd50633b6187b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4214, "license_type": "no_license", "max_line_length": 96, "num_lines": 138, "path": "/multiclass/naivebayes/nb.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"nb.h\"\n\n#include <cmath>\n\nnamespace classifier {\nnamespace naivebayes {\nNaiveBayes::NaiveBayes() : smoothing_(false), alpha_(0.0), document_sum_(0) {\n document_vector().swap(document_count_);\n word_sum_vector().swap(word_sum_in_each_category_);\n word_matrix().swap(word_count_in_each_category_);\n}\n\nvoid NaiveBayes::set_alpha(double alpha) {\n if (alpha <= 1.0) {\n std::cerr << \"you must set alpha more than 1.0\" << std::endl;\n } else {\n if (!smoothing_) smoothing_ = true;\n alpha_ = alpha;\n }\n}\n\nvoid NaiveBayes::Train(const std::vector<datum>& data) {\n for (std::vector<datum>::const_iterator it = data.begin();\n it != data.end();\n ++it) {\n ++document_sum_;\n std::string category = it->category;\n \n CountCategory(category);\n CountWord(category, it->fv);\n }\n}\n\nvoid NaiveBayes::Test(const feature_vector& fv, std::string* result) const {\n *result = non_class;\n double score = non_class_score;\n\n for (word_matrix::const_iterator it = word_count_in_each_category_.begin();\n it != word_count_in_each_category_.end();\n ++it) {\n std::string category = it->first;\n double probability = CalculateProbability(fv, category);\n\n if (*result == non_class || score < probability) {\n *result = category;\n score = probability;\n }\n }\n}\n\nvoid NaiveBayes::GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const {\n for (word_matrix::const_iterator it = word_count_in_each_category_.begin();\n it != word_count_in_each_category_.end();\n ++it) {\n std::string category = it->first;\n if (it->second.size() <= feature_id) {\n results->push_back(make_pair(category, 0.0));\n } else {\n double score = it->second.at(feature_id) / word_sum_in_each_category_.at(category);\n results->push_back(make_pair(category, score));\n }\n }\n}\n\nvoid NaiveBayes::CountCategory(const std::string& category) {\n if (document_count_.find(category) == document_count_.end()) {\n document_count_[category] = 1;\n word_sum_in_each_category_[category] = 0.0;\n word_count_in_each_category_.insert(make_pair(category, word_vector()));\n } else {\n ++document_count_[category];\n }\n}\n\nvoid NaiveBayes::CountWord(const std::string& category,\n const feature_vector& fv) {\n word_vector &word_count = word_count_in_each_category_[category];\n double &word_sum = word_sum_in_each_category_[category];\n for (feature_vector::const_iterator it = fv.begin();\n it != fv.end();\n ++it) {\n size_t word_id = it->first;\n double count = it->second;\n\n if (word_count.size() <= word_id)\n word_count.resize(word_id+1, 0.0);\n\n word_count[word_id] += count;\n word_sum += count;\n }\n}\n\ndouble NaiveBayes::CalculateProbability(const feature_vector& fv,\n const std::string& category) const {\n double probability = 0.0;\n double smoothing_parameter = 0.0;\n if (smoothing_)\n smoothing_parameter = alpha_ - 1.0;\n\n // Class Probability\n probability += log(\n (document_count_.at(category) + smoothing_parameter) /\n ((double)document_sum_ + document_count_.size() * smoothing_parameter) );\n\n // Word Probability\n for (feature_vector::const_iterator it = fv.begin();\n it != fv.end();\n ++it) {\n size_t word_id = it->first;\n const word_vector &word_count_in_a_category\n = word_count_in_each_category_.at(category);\n if (word_id < word_count_in_a_category.size()) {\n probability += log(\n (word_count_in_a_category.at(word_id) + smoothing_parameter)\n / (word_sum_in_each_category_.at(category) + (fv.size() * smoothing_parameter)) )\n * it->second;\n } else {\n if (!smoothing_) {\n probability = non_class_score;\n break;\n }\n\n // Approximate the number of word summation\n probability += log(\n smoothing_parameter /\n (word_sum_in_each_category_.at(category)\n + (fv.size() * smoothing_parameter)) )\n * it->second;\n }\n }\n\n //std::cout << category << \" : \" << probability << std::endl;\n return probability;\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.6945244669914246, "alphanum_fraction": 0.6945244669914246, "avg_line_length": 26.760000228881836, "blob_id": "1f3985fee4281789af1422a4956b30a1d57a569a", "content_id": "b58753ac0b2aaf29e27997488bf960ae4394b249", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1388, "license_type": "no_license", "max_line_length": 85, "num_lines": 50, "path": "/multiclass/naivebayes/nb.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_NAIVEBAYES_NB_H_\n#define CLASSIFIER_NAIVEBAYES_NB_H_\n\n#include <iostream>\n#include <vector>\n#include <unordered_map>\n\n#include \"../../utility/feature.h\"\n\nnamespace classifier {\nnamespace naivebayes {\ntypedef std::unordered_map<std::string, size_t> document_vector;\ntypedef std::vector<double> word_vector;\ntypedef std::unordered_map<std::string, word_vector> word_matrix;\ntypedef std::unordered_map<std::string, double> word_sum_vector;\n\nclass NaiveBayes {\n public:\n NaiveBayes();\n virtual ~NaiveBayes() {};\n\n void set_alpha(double alpha);\n \n void Train(const std::vector<datum>& data);\n void Test(const feature_vector& fv, std::string* result) const;\n void GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const;\n protected:\n bool smoothing_;\n double alpha_; //smoothing parameter\n\n size_t document_sum_;\n document_vector document_count_;\n\n word_sum_vector word_sum_in_each_category_;\n word_matrix word_count_in_each_category_;\n\n private:\n void CountCategory(const std::string& category);\n void CountWord(const std::string& category,\n const feature_vector& fv);\n\n virtual double CalculateProbability(const feature_vector& fv,\n const std::string& category) const;\n};\n\n} //namespace\n} //namespace\n\n#endif //CLASSIFIER_NAIVEBAYES_NB_H_\n" }, { "alpha_fraction": 0.7202505469322205, "alphanum_fraction": 0.7202505469322205, "avg_line_length": 20.772727966308594, "blob_id": "a66e9dd1810e1b1acde1ab6c45ccc92bf9d39ccb", "content_id": "faa9130d0ec2e5ccfadfad6da69a822a90374762", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 479, "license_type": "no_license", "max_line_length": 65, "num_lines": 22, "path": "/multiclass/complement_nb/complement_nb.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_COMPLEMENTNB_COMPLEMENTNB_H_\n#define CLASSIFIER_COMPLEMENTNB_COMPLEMENTNB_H_\n\n#include \"../naivebayes/nb.h\"\n\nnamespace classifier {\nnamespace naivebayes {\n\nclass ComplementNaiveBayes : public NaiveBayes {\n public:\n ~ComplementNaiveBayes() {};\n\n private:\n double CalculateProbability(const feature_vector& fv,\n const std::string& category) const;\n\n};\n\n} //namespace\n} //namespace\n\n#endif //CLASSIFIER_COMPLEMENTNB_COMPLEMENTNB_H_\n" }, { "alpha_fraction": 0.5824781060218811, "alphanum_fraction": 0.5954943895339966, "avg_line_length": 29.496183395385742, "blob_id": "b05b2db4e1fa0c4019c1ccbd150a1cc2e8ced681", "content_id": "e1abbdddc673fcd04f127d5c94014b51cec09c18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3995, "license_type": "no_license", "max_line_length": 93, "num_lines": 131, "path": "/multiclass/arow/arow.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"arow.h\"\n\n#include <cmath>\n#include <cfloat>\n#include <algorithm>\n\nnamespace classifier {\nnamespace arow {\nAROW::AROW(double phi) : phi_(phi) {\n weight_matrix().swap(weight_);\n covariance_matrix().swap(cov_);\n}\n\nvoid AROW::Train(const datum& datum) {\n score2class scores(0);\n CalcScores(datum.fv, &scores);\n Update(datum, scores);\n}\n\nvoid AROW::Train(const std::vector<datum>& data,\n const size_t iteration) {\n for (size_t iter = 0; iter < iteration; ++iter) {\n for (std::vector<datum>::const_iterator it = data.begin();\n it != data.end();\n ++it) {\n Train(*it);\n }\n }\n}\n\nvoid AROW::Test(const feature_vector& fv,\n std::string* predict) const {\n score2class scores(0);\n CalcScores(fv, &scores);\n *predict = scores[0].second;\n}\n\nvoid AROW::CalcScores(const feature_vector& fv,\n score2class* scores) const {\n scores->push_back(make_pair(non_class_score, non_class));\n\n for (weight_matrix::const_iterator it = weight_.begin();\n it != weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n scores->push_back(make_pair(score, it->first));\n }\n\n sort(scores->begin(), scores->end(),\n std::greater<std::pair<double, std::string> >());\n}\n\ndouble AROW::CalcV(const datum& datum,\n const std::string& non_correct_predict) {\n double v = 0.0;\n covariance_vector &correct_cov = cov_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_cov.size() <= it->first)\n correct_cov.resize(it->first + 1, 1.0);\n v += correct_cov[it->first] * it->second * it->second;\n }\n\n if (non_correct_predict == non_class)\n return v;\n\n covariance_vector &wrong_cov = cov_[non_correct_predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_cov.size() <= it->first)\n wrong_cov.resize(it->first + 1, 1.0);\n v += wrong_cov[it->first] * it->second * it->second;\n }\n\n return v;\n}\n\nvoid AROW::Update(const datum& datum,\n const score2class& scores) {\n std::string non_correct_predict;\n double loss = CalcLossScore(scores, datum.category, &non_correct_predict, 1.0);\n\n if (loss > 0.0) {\n double v = CalcV(datum, non_correct_predict);\n double beta = 1.0 / (v + 1.0 / (2.0 * phi_));\n double alpha = loss * beta;\n\n weight_vector &correct_weight = weight_[datum.category];\n covariance_vector &correct_cov = cov_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_weight.size() <= it->first)\n correct_weight.resize(it->first + 1, 0.0);\n correct_weight[it->first] += alpha * correct_cov[it->first] * it->second;\n\n double tmp = correct_cov[it->first] * correct_cov[it->first] * it->second * it->second;\n correct_cov[it->first] -= beta * tmp;\n if (correct_cov[it->first] < 1.0E-100)\n correct_cov[it->first] = 1.0E-100;\n }\n\n if (non_correct_predict == non_class)\n return;\n\n weight_vector &wrong_weight = weight_[non_correct_predict];\n covariance_vector &wrong_cov = cov_[non_correct_predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_weight.size() <= it->first)\n wrong_weight.resize(it->first + 1, 0.0);\n wrong_weight[it->first] -= alpha * wrong_cov[it->first] * it->second;\n\n double tmp = wrong_cov[it->first] * wrong_cov[it->first] * it->second * it->second;\n wrong_cov[it->first] -= beta * tmp;\n if (wrong_cov[it->first] < 1.0E-100)\n wrong_cov[it->first] = 1.0E-100;\n }\n }\n}\n\nvoid AROW::GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const {\n ReturnFeatureWeight(feature_id, weight_, results);\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.6400862336158752, "alphanum_fraction": 0.6443965435028076, "avg_line_length": 21.634145736694336, "blob_id": "7db96cc513a05c830a8377ed97955982b2519222", "content_id": "7515e2cd18417e7ecc68aa90d83061d43ad344fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 928, "license_type": "no_license", "max_line_length": 85, "num_lines": 41, "path": "/multiclass/passive_aggressive/pa.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_PASSIVE_AGGRESSIVE_PA_H_\n#define CLASSIFIER_PASSIVE_AGGRESSIVE_PA_H_\n\n#include <iostream>\n#include <vector>\n\n#include \"../../utility/calc.h\"\n\nnamespace classifier {\nnamespace pa {\nclass PA {\n public:\n explicit PA(size_t mode = 0);\n ~PA() {};\n\n void SetC(double C);\n\n void Train(const datum& datum);\n void Train(const std::vector<datum>& data,\n const size_t iteration = 1);\n void Test(const feature_vector& fv,\n std::string* predict) const;\n void GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const;\n\n private:\n void CalcScores(const feature_vector& fv,\n score2class* scores) const;\n\n void Update(const datum& datum,\n const score2class& scores);\n\n weight_matrix weight_;\n size_t mode_;\n double C_;\n};\n\n} //namespace\n} //namespace\n\n#endif //CLASSIFIER_PASSIVE_AGGRESSIVE_PA_H_\n" }, { "alpha_fraction": 0.6005706191062927, "alphanum_fraction": 0.6148359775543213, "avg_line_length": 23.172412872314453, "blob_id": "c4215bd7b80680e46526d43f0a9248b21fa52a45", "content_id": "8fda70bb88c9cdeff29b015d2f7573ebb666a798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 701, "license_type": "no_license", "max_line_length": 54, "num_lines": 29, "path": "/utility/neutral_parser.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIERS_UTILITY_NEUTRALPARSER_H_\n#define CLASSIFIERS_UTILITY_NEUTRALPARSER_H_\n\n#include <sstream>\n\n#include \"feature.h\"\n\nnamespace classifier {\nnamespace parser {\nvoid NeutralParser(std::istringstream* iss,\n feature2id* f2i,\n classifier::datum* datum) {\n std::string word = \"\";\n while (*iss >> word) {\n size_t word_id = 0;\n if (f2i->find(word) == f2i->end()) {\n word_id = f2i->size();\n f2i->insert(std::make_pair(word, word_id));\n } else {\n word_id = f2i->at(word);\n }\n\n datum->fv.push_back(std::make_pair(word_id, 1.0));\n }\n}\n} //namespace parser\n} //namespace classifier\n\n#endif //CLASSIFIERS_UTILITY_NEUTRALPARSER_H_\n" }, { "alpha_fraction": 0.4250706732273102, "alphanum_fraction": 0.4250706732273102, "avg_line_length": 41.439998626708984, "blob_id": "f70906ff3b65464fd0ee83d430b5e525ab4349b4", "content_id": "bdcc3fd092dde049a47337496b86f0d2da4beb8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1061, "license_type": "no_license", "max_line_length": 62, "num_lines": 25, "path": "/multiclass/wscript", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\ndef build(bld):\n bld.install_files('${PREFIX}/include/classifier',\n bld.path.ant_glob('**/*.h'),\n cwd=bld.path.find_dir('multiclass'),\n relative_trick=True)\n\n bld.objects(source = ['naivebayes/nb.cc',\n 'complement_nb/complement_nb.cc',\n 'perceptron/perceptron.cc',\n 'perceptron/averaged_perceptron.cc',\n 'passive_aggressive/pa.cc',\n 'confidence_weighted/cw.cc',\n 'arow/arow.cc',\n 'subgradient/hinge.cc',\n 'subgradient/averaged_hinge.cc',\n 'fobos/fobos.cc',\n 'fobos/cumulative_fobos.cc',\n 'dual_averaging/da.cc',\n 'loglinear/loglinear_sgd.cc',\n ],\n\n target = 'multiclass_classifiers',\n name = 'multiclass_classifiers')\n" }, { "alpha_fraction": 0.5898478627204895, "alphanum_fraction": 0.6127429008483887, "avg_line_length": 30.76555061340332, "blob_id": "9132e7e79b647d5d5d522b1707f2e4407f08fca7", "content_id": "238dbd883ffca73c0c52704777914fde883b567b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6639, "license_type": "no_license", "max_line_length": 102, "num_lines": 209, "path": "/test/test.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"test.h\"\n\n#include \"../multiclass/naivebayes/nb.h\"\n#include \"../multiclass/complement_nb/complement_nb.h\"\n\n#include \"../multiclass/perceptron/perceptron.h\"\n#include \"../multiclass/perceptron/averaged_perceptron.h\"\n#include \"../multiclass/passive_aggressive/pa.h\"\n#include \"../multiclass/confidence_weighted/cw.h\"\n#include \"../multiclass/arow/arow.h\"\n\n#include \"../multiclass/subgradient/hinge.h\"\n#include \"../multiclass/subgradient/averaged_hinge.h\"\n#include \"../multiclass/fobos/fobos.h\"\n#include \"../multiclass/fobos/cumulative_fobos.h\"\n#include \"../multiclass/dual_averaging/da.h\"\n\n#include \"../multiclass/loglinear/loglinear_sgd.h\"\n\n#include \"../utility/cmdline.h\"\n\nnamespace {\nstd::string algo_name = \"select algorithm(All/NB/CNB/P/AP/PA/PA1/PA2/CW/SGD/ASGD/FOBOS/CFOBOS/DA/LL)\";\nenum algo_num {\n All = 0,\n None = 9,\n\n NaiveBayes = 11,\n ComplementNaiveBayes = 12,\n Perceptron = 21,\n AveragedPerceptron = 22,\n PassiveAggressive = 31,\n PassiveAggressiveI = 32,\n PassiveAggressiveII = 33,\n ConfidenceWeighted = 41,\n\n SubgradientHinge = 101,\n AveragedSubgradientHinge = 102,\n FOBOS = 111,\n CumulativeFOBOS = 112,\n DualAveraging = 121,\n\n LogLinearSGD = 201,\n};\n\nalgo_num SelectAlgo(const std::string& algo_identifier) {\n if (algo_identifier == \"All\") return All;\n else if (algo_identifier == \"NB\") return NaiveBayes;\n else if (algo_identifier == \"CNB\") return ComplementNaiveBayes;\n else if (algo_identifier == \"P\") return Perceptron;\n else if (algo_identifier == \"AP\") return AveragedPerceptron;\n else if (algo_identifier == \"PA\") return PassiveAggressive;\n else if (algo_identifier == \"PAI\") return PassiveAggressiveI;\n else if (algo_identifier == \"PAII\") return PassiveAggressiveII;\n else if (algo_identifier == \"SGD\") return SubgradientHinge;\n else if (algo_identifier == \"ASGD\") return AveragedSubgradientHinge;\n else if (algo_identifier == \"FOBOS\") return FOBOS;\n else if (algo_identifier == \"CFOBOS\") return CumulativeFOBOS;\n else if (algo_identifier == \"DA\") return DualAveraging;\n else if (algo_identifier == \"LL\") return LogLinearSGD;\n else return None;\n}\n} //namespace\n\nint main(int argc, char** argv) {\n cmdline::parser parser;\n parser.add<std::string>(\"train\", 't', \"train file path\", true);\n parser.add<std::string>(\"classify\", 'c', \"classify file path\", true);\n parser.add<std::string>(\"algorithm\", 'a', algo_name, true, \"All\");\n parser.add<double>(\"alpha\", '\\0', \"alpha (default : 1.5) [NB/CNB]\", false, 1.5);\n parser.add<double>(\"C\", '\\0', \"C (default : 1.0) [PA-I|PA-II]\", false, 1.0);\n\n parser.parse_check(argc, argv);\n\n classifier::feature2id f2i(0);\n std::vector<classifier::datum> train;\n if (!ParseFile(parser.get<std::string>(\"train\"), &train, &f2i, true))\n return -1;\n\n std::vector<classifier::datum> test;\n if (!ParseFile(parser.get<std::string>(\"classify\"), &test, &f2i, true))\n return -1;\n\n classifier::naivebayes::NaiveBayes nb;\n nb.set_alpha(1.5);\n if (classifier::Run(nb, \"NaiveBayes\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::naivebayes::ComplementNaiveBayes c_nb;\n c_nb.set_alpha(1.5);\n if (classifier::Run(c_nb, \"ComplementNaiveBayes\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::perceptron::Perceptron perc;\n if (classifier::Run(perc, \"Perceptron\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::perceptron::AveragedPerceptron av_perc;\n if (classifier::Run(av_perc, \"AveragedPerceptron\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::pa::PA pa;\n if (classifier::Run(pa, \"PassiveAggressive\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::pa::PA pa_one(1);\n pa_one.SetC(1.0);\n if (classifier::Run(pa_one, \"PassiveAggressive-I\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::pa::PA pa_two(2);\n pa_two.SetC(1.0);\n if (classifier::Run(pa_two, \"PassiveAggressive-II\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n {\n double eta = 100.0;\n for (int i = 0; i < 5; ++i) {\n std::cout << \"eta : \" << eta << std::endl;\n classifier::cw::CW cw(eta);\n if (classifier::Run(cw, \"ConfidenceWeighted\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n eta *= 0.1;\n }\n }\n\n {\n double eta = 100.0;\n for (int i = 0; i < 5; ++i) {\n double C = 100.0;\n for (int j = 0; j < 10; ++j) {\n std::cout << \"eta : \" << eta << std::endl;\n std::cout << \"C : \" << C << std::endl;\n classifier::cw::CW scw1(eta);\n scw1.ChangeMode(1);\n scw1.SetC(C);\n if (classifier::Run(scw1, \"SoftConfidenceWeighted-I\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n C *= 0.5;\n }\n eta *= 0.1;\n }\n }\n\n {\n double eta = 100.0;\n for (int i = 0; i < 5; ++i) {\n double C = 1.0;\n for (int j = 0; j < 10; ++j) {\n std::cout << \"eta : \" << eta << std::endl;\n std::cout << \"C : \" << C << std::endl;\n classifier::cw::CW scw2(eta);\n scw2.ChangeMode(2);\n scw2.SetC(C);\n if (classifier::Run(scw2, \"SoftConfidenceWeighted-II\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n C *= 0.5;\n }\n eta *= 0.1;\n }\n }\n\n classifier::arow::AROW arow(0.01);\n if (classifier::Run(arow, \"AROW\", train, test) == -1) {\n std::cerr << \"AROW failed.\" << std::endl;\n }\n\n classifier::subgradient::SubgradientHinge sgh;\n if (classifier::Run(sgh, \"SubgradientHinge\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::subgradient::ASGDHinge asgdh;\n if (classifier::Run(asgdh, \"ASGDHinge\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::fobos::FOBOS fobos(1.0, 0.001);\n if (classifier::Run(fobos, \"FOBOS\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::fobos::CumulativeFOBOS cfobos(1.0, 0.001);\n if (classifier::Run(cfobos, \"CumulativeFOBOS\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::dual_averaging::DualAveraging da(0.001);\n if (classifier::Run(da, \"DualAveraging\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n classifier::loglinear::LogLinearSGD llsgd;\n if (classifier::Run(llsgd, \"LogLinearSGD\", train, test) == -1) {\n std::cerr << \"error occurring!\" << std::endl;\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5957656502723694, "alphanum_fraction": 0.6033973693847656, "avg_line_length": 30.007633209228516, "blob_id": "6b8cfe41dd485f6855830e14cf235a14a68dd8c8", "content_id": "7f7ba0bc905260c8479de5e75b58d94131472ee4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4062, "license_type": "no_license", "max_line_length": 104, "num_lines": 131, "path": "/multiclass/perceptron/averaged_perceptron.cc", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#include \"averaged_perceptron.h\"\n\n#include <algorithm>\n\nnamespace classifier {\nnamespace perceptron {\nAveragedPerceptron::AveragedPerceptron() : dataN_(0) {\n weight_matrix().swap(weight_);\n weight_matrix().swap(differential_weight_);\n weight_matrix().swap(averaged_weight_);\n}\n\nvoid AveragedPerceptron::Train(const datum& datum,\n const bool calc_averaged) {\n ++dataN_;\n std::string predict;\n Predict(datum.fv, &predict);\n Update(datum, predict);\n\n if (calc_averaged)\n CalcAveragedWeight();\n}\n\nvoid AveragedPerceptron::Train(const std::vector<datum>& data,\n const size_t iteration) {\n for (size_t iter = 0; iter < iteration; ++iter) {\n for (std::vector<datum>::const_iterator it = data.begin();\n it != data.end();\n ++it) {\n Train(*it, false);\n }\n }\n CalcAveragedWeight();\n}\n\nvoid AveragedPerceptron::Test(const feature_vector& fv,\n std::string* predict) const {\n Predict(fv, predict, 1);\n}\n\nvoid AveragedPerceptron::Predict(const feature_vector& fv,\n std::string* predict,\n size_t mode) const {\n score2class scores(0);\n scores.push_back(make_pair(non_class_score, non_class));\n\n if (mode == 0) {\n for (weight_matrix::const_iterator it = weight_.begin();\n it != weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n scores.push_back(make_pair(score, it->first));\n }\n } else if (mode == 1) {\n for (weight_matrix::const_iterator it = averaged_weight_.begin();\n it != averaged_weight_.end();\n ++it) {\n double score = InnerProduct(fv, it->second);\n scores.push_back(make_pair(score, it->first));\n }\n }\n\n sort(scores.begin(), scores.end(),\n std::greater<std::pair<double, std::string> >());\n *predict = scores[0].second;\n}\n\nvoid AveragedPerceptron::Update(const datum& datum,\n const std::string& predict) {\n if (datum.category == predict)\n return;\n\n weight_vector &correct_weight = weight_[datum.category];\n weight_vector &correct_diffetial_weight = differential_weight_[datum.category];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (correct_weight.size() <= it->first)\n correct_weight.resize(it->first + 1, 0.0);\n correct_weight[it->first] += it->second / 2.0;\n\n if (correct_diffetial_weight.size() <= it->first)\n correct_diffetial_weight.resize(it->first + 1, 0.0);\n correct_diffetial_weight[it->first] += dataN_ * it->second / 2.0;\n }\n\n if (predict == non_class)\n return;\n\n weight_vector &wrong_weight = weight_[predict];\n weight_vector &wrong_diffetial_weight = differential_weight_[predict];\n for (feature_vector::const_iterator it = datum.fv.begin();\n it != datum.fv.end();\n ++it) {\n if (wrong_weight.size() <= it->first)\n wrong_weight.resize(it->first + 1, 0.0);\n wrong_weight[it->first] -= it->second / 2.0;\n\n if (wrong_diffetial_weight.size() <= it->first)\n wrong_diffetial_weight.resize(it->first + 1, 0.0);\n wrong_diffetial_weight[it->first] -= dataN_ * it->second / 2.0;\n }\n}\n\nvoid AveragedPerceptron::CalcAveragedWeight() {\n weight_matrix ave_wm;\n\n for (weight_matrix::const_iterator wm_it = weight_.begin();\n wm_it != weight_.end();\n ++wm_it) {\n weight_vector diff_wv = differential_weight_[wm_it->first];\n weight_vector wv = wm_it->second;\n\n weight_vector &ave_wv = ave_wm[wm_it->first];\n ave_wv.resize(wv.size(), 0.0);\n\n for (size_t feature_id = 0; feature_id < wv.size(); ++feature_id) {\n ave_wv[feature_id] = wv[feature_id] - diff_wv[feature_id] / dataN_;\n }\n }\n\n averaged_weight_.swap(ave_wm);\n}\n\nvoid AveragedPerceptron::GetFeatureWeight(size_t feature_id,\n std::vector<std::pair<std::string, double> >* results) const {\n ReturnFeatureWeight(feature_id, averaged_weight_, results);\n}\n\n} //namespace\n} //namespace\n" }, { "alpha_fraction": 0.6766467094421387, "alphanum_fraction": 0.6846307516098022, "avg_line_length": 22.85714340209961, "blob_id": "9bbcc7ebd08863487fbb0b39fbfa340eb90e6c32", "content_id": "e36aabfab8fda84ea7d67ca630b50b9af8f9852b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 501, "license_type": "no_license", "max_line_length": 51, "num_lines": 21, "path": "/utility/libsvm_parser.h", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#ifndef CLASSIFIER_UTILITY_LIBSVMPARSER_H_\n#define CLASSIFIER_UTILITY_LIBSVMPARSER_H_\n\n#include <sstream>\n\n#include \"feature.h\"\n\nnamespace classifier {\nnamespace parser {\nvoid LibsvmParser(std::istringstream* iss,\n classifier::datum* datum) {\n size_t id = 0;\n char comma = 0;\n double value = 0.0;\n while (*iss >> id >> comma >> value)\n datum->fv.push_back(std::make_pair(id, value));\n}\n} //namespace parser\n} //namespace classifier\n\n#endif //CLASSIFIER_UTILITY_LIBSVMPARSER_H_\n" }, { "alpha_fraction": 0.40136054158210754, "alphanum_fraction": 0.4108843505382538, "avg_line_length": 25.25, "blob_id": "8e8975c23f6034069d077c339bd7f4b31ac2db1f", "content_id": "e70a167ce8ec770f3080557e6c96e5fc2481c7e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "no_license", "max_line_length": 62, "num_lines": 28, "path": "/wscript", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nAPPNAME = 'classifier'\nVERSION = '0.5.0'\n\ntop = '.'\nout = 'build'\n\ndef options(opt):\n opt.load('compiler_cxx')\n\ndef configure(conf):\n conf.load('compiler_cxx')\n conf.env.append_unique('CXXFLAGS',\n ['-std=c++0x',\n '-O2',\n '-W',\n '-Wall'])\n conf.env.append_unique('LINKFLAGS', ['-std=c++0x',\n '-O2',\n '-W',\n '-Wall'])\n conf.env.HPREFIX = conf.env.PREFIX + '/include/classifier'\n\ndef build(bld):\n# bld.recurse('binary')\n bld.recurse('multiclass')\n bld.recurse('test')\n" }, { "alpha_fraction": 0.6675324440002441, "alphanum_fraction": 0.6727272868156433, "avg_line_length": 20.79245376586914, "blob_id": "832e5a1ed435e90d44ff53c32de31537972b5f30", "content_id": "29a18b4c72c7fd9589b7d67522a58fa0d79c5739", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 72, "num_lines": 53, "path": "/README.md", "repo_name": "oiwah/classifier", "src_encoding": "UTF-8", "text": "Classification Library -- Several Online Learning Algorithms\n============================================================\n\nEnviromental Requirement\n========================\n\n* gcc version *> 4.4.0* (Using std::unordered_map)\n\nAbout\n=====\n\nlibrary For Online learning algorihtm implemented by C++\n\n\nNaiveBayes\n----------\n\n* NaiveBayes (multinominal + smoothing)\n* Complement NaiveBayes\n\nOnline Algorithms\n-----------------\n\n* Perceptron\n* Averaged Perceptron\n* Passive-Aggressive (PA, PA-I, PA-II)\n* Confidence-Weighted (Single Constraint)\n* Soft Confidence-Weighted (Single Constraint)\n* Adaptive Regularized of Weighted Vectors (Single Constraint, Diagonal)\n\nOptimization\n------------\n\n* LogLinearModel (SGD)\n* Subgradient Method (Hinge-Loss)\n* Averaged Subgradient Method (Hinge-Loss)\n* Forward Backward Splitting (Hinge-Loss + L1-regularization)\n* FOBOS with Cumulative Penalty (Hinge-Loss + L1-regularization)\n* Primal-Dual Averaging (Hinge-Loss + L2-regularization[proximal])\n\nTODO\n----\n\nAuthor\n------\n\n**Hidekazu Oiwa** <[email protected]>\n\nThe University of Tokyo,\n\nGraduate School of Mathematical Informatics,\n\n[HomePage](http://www.r.dl.itc.u-tokyo.ac.jp/~oiwa/)\n" } ]
30
CarlosML27/PyDutching
https://github.com/CarlosML27/PyDutching
2b54c994cc8f298f7295431a10b3129c8bc4952d
2f5e0a24073da1112df499224df20255b7915024
a28572008230740a2efbf476ffecdf4a8d1a81be
refs/heads/master
2021-01-21T07:08:29.479053
2017-02-27T15:41:42
2017-02-27T15:41:42
83,325,973
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6282431483268738, "alphanum_fraction": 0.6567828059196472, "avg_line_length": 32.15189743041992, "blob_id": "9b4cdde2f6298b142fb8a9f5466dc90ecd430492", "content_id": "6b28ea6cd059324aa24714a3afb4673e8b598be9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2698, "license_type": "permissive", "max_line_length": 138, "num_lines": 79, "path": "/PyDutching.py", "repo_name": "CarlosML27/PyDutching", "src_encoding": "UTF-8", "text": "'''\r\n PyDutching v0.1\r\n\r\n Copyright 2017 Carlos Morente Lozano (@CarlosML27)\r\n\r\n Licensed under the Apache License, Version 2.0 (the \"License\");\r\n you may not use this file except in compliance with the License.\r\n You may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n Unless required by applicable law or agreed to in writing, software\r\n distributed under the License is distributed on an \"AS IS\" BASIS,\r\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n See the License for the specific language governing permissions and\r\n limitations under the License.\r\n'''\r\n\r\n\r\ntry:\r\n linea=float(raw_input('Introduce la linea del dutching (0.75, 1.0, 1.25, ...)\\n'))\r\nexcept ValueError:\r\n print \"No es un numero correcto\"\r\n\r\ndecimallinea = linea % 1\r\nif decimallinea == 0:\r\n lineainf = linea - 0.5\r\n lineasup = linea + 0.5\r\nelif decimallinea == 0.25:\r\n lineainf = linea - 0.75\r\n lineasup = linea + 0.25\r\nelif decimallinea == 0.75:\r\n lineainf = linea - 0.25\r\n lineasup = linea + 0.75\r\nelse:\r\n raise NameError('La linea del dutching no es correcta')\r\n \r\ntry:\r\n cuotainf=float(raw_input('Introduce la cuota del over {}\\n'.format(lineainf)))\r\nexcept ValueError:\r\n print \"No es un numero correcto\"\r\n\r\ntry:\r\n cuotasup=float(raw_input('Introduce la cuota del over {}\\n'.format(lineasup)))\r\nexcept ValueError:\r\n print \"No es un numero correcto\"\r\n\r\ntry:\r\n stake=float(raw_input('Introduce la cantidad de dinero que quieres apostar\\n'))\r\nexcept ValueError:\r\n print \"No es un numero correcto\"\r\n \r\n\r\nif decimallinea == 0.0:\r\n stakeinf = 0.0\r\n while (stakeinf * cuotainf) < stake:\r\n stakeinf += 0.01\r\n stakesup = stake - stakeinf\r\nelif decimallinea == 0.25:\r\n stakeinf = 0.0\r\n while (stakeinf * cuotainf) < (stake/2):\r\n stakeinf += 0.01\r\n stakesup = stake - stakeinf\r\nelif decimallinea == 0.75:\r\n stakeinf = stake/2\r\n stakesup = stake/2\r\n diferencia = abs(((stakeinf*cuotainf)-stake) - (stakesup*cuotasup))\r\n nuevostakeinf = stakeinf + 0.01\r\n nuevostakesup = stakesup - 0.01\r\n nuevadiferencia = abs(((nuevostakeinf*cuotainf)-stake) - (nuevostakesup*cuotasup))\r\n while(nuevadiferencia < diferencia):\r\n stakeinf = nuevostakeinf\r\n stakesup = nuevostakesup\r\n diferencia = nuevadiferencia\r\n nuevostakeinf = stakeinf + 0.01\r\n nuevostakesup = stakesup - 0.01\r\n nuevadiferencia = abs(((nuevostakeinf*cuotainf)-stake) - (nuevostakesup*cuotasup))\r\nprint '{} al over {} goles (cuota {}) y {} al over {} goles (cuota {})'.format(stakeinf, lineainf, cuotainf, stakesup, lineasup, cuotasup)\r\nraw_input()\r\n" }, { "alpha_fraction": 0.8390804529190063, "alphanum_fraction": 0.8390804529190063, "avg_line_length": 42.5, "blob_id": "df99712f0120216b05fbbc4d8201572b28694ea7", "content_id": "e6136967c385d53ec71e18b438eb6d15b6af7a43", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "permissive", "max_line_length": 73, "num_lines": 2, "path": "/README.md", "repo_name": "CarlosML27/PyDutching", "src_encoding": "UTF-8", "text": "# PyDutching\nA program that calculates dutching stakes for sports betting using Python\n" } ]
2
weaverba137/comparator
https://github.com/weaverba137/comparator
88b9a63be7fdd768107e751a549814938c7f0468
40d6422ba70dd99511e5f4c94efe69f739a0d220
e3498f591ad7bf64ea331ea6a3d84a29d7db01b1
refs/heads/main
2023-09-03T02:53:55.681199
2023-08-17T17:53:50
2023-08-17T17:53:50
171,907,485
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5883838534355164, "alphanum_fraction": 0.6073232293128967, "avg_line_length": 19.30769157409668, "blob_id": "c2e4aa88a03c52bb37bc55517e9e9ed968e92d38", "content_id": "577cb5e8baa0463881ce1d797286abe9087271bd", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 792, "license_type": "permissive", "max_line_length": 77, "num_lines": 39, "path": "/doc/index.rst", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": ".. comparator documentation master file, created by\n sphinx-quickstart on Tue Feb 3 15:49:03 2015.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\n======================================\nWelcome to comparator's documentation!\n======================================\n\nIntroduction\n++++++++++++\n\ncomparator is a Python_ package. It is currently being developed on GitHub_.\n\n.. _Python: http://python.org\n.. _GitHub: https://github.com/weaverba137/comparator\n\nRequirements\n++++++++++++\n\ncomparator needs SQLAlchemy_.\n\n.. _SQLAlchemy: https://www.sqlalchemy.org/\n\nContents\n++++++++\n\n.. toctree::\n :maxdepth: 1\n\n api\n changes\n\nIndices and tables\n++++++++++++++++++\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n" }, { "alpha_fraction": 0.6184971332550049, "alphanum_fraction": 0.6300578117370605, "avg_line_length": 20.625, "blob_id": "f667dfa39dc1804d59b57e831abc6cf404585522", "content_id": "8443c8a4097df8ffb9bbe21c20b71f6638f7cf8c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "permissive", "max_line_length": 63, "num_lines": 8, "path": "/comparator/test/__init__.py", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"\ncomparator.test\n===============\n\nUsed to initialize the unit test framework.\n\"\"\"\n" }, { "alpha_fraction": 0.6419752836227417, "alphanum_fraction": 0.6707819104194641, "avg_line_length": 19.25, "blob_id": "934137c3fb55c48b4c3056af6b4722c8e85d5f76", "content_id": "c70a8fab91a50b2adfe3b90d23f81c796d8caf17", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "permissive", "max_line_length": 68, "num_lines": 12, "path": "/comparator/__init__.py", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst.\n# -*- coding: utf-8 -*-\n\"\"\"\ncomparator\n==========\n\nObtain filesystem metadata necessary for comparing the same data set\nat different locations.\n\"\"\"\n\n\n__version__ = '0.2.0.dev17'\n" }, { "alpha_fraction": 0.5678496956825256, "alphanum_fraction": 0.6012526154518127, "avg_line_length": 27.176469802856445, "blob_id": "0c2a875d810a477500be15ff92528f181b2a9391", "content_id": "14faa4064173d9b6f11bd2c2f6dd16d164224ede", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 479, "license_type": "permissive", "max_line_length": 90, "num_lines": 17, "path": "/comparator/test/test_top_level.py", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"\ncomparator.test.test_top_level\n==============================\n\nTest top-level comparator functions.\n\"\"\"\nimport re\nfrom .. import __version__ as theVersion\n\n\ndef test_version_string():\n \"\"\"Ensure the version conforms to PEP386/PEP440.\n \"\"\"\n versionre = re.compile(r'([0-9]+!)?([0-9]+)(\\.[0-9]+)*((a|b|rc|\\.post|\\.dev)[0-9]+)?')\n assert versionre.match(theVersion) is not None\n" }, { "alpha_fraction": 0.6263157725334167, "alphanum_fraction": 0.6368421316146851, "avg_line_length": 22.75, "blob_id": "d8e0a52e1f645951fd5d9f06a1a5d313ec835646", "content_id": "7dd42d0a05b393e91158fafee22b159f65f683d1", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "permissive", "max_line_length": 64, "num_lines": 8, "path": "/comparator/checksum.py", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst.\n# -*- coding: utf-8 -*-\n\"\"\"\ncomparator.checksum\n===================\n\nObtain checksum files from an authoritative source.\n\"\"\"\n" }, { "alpha_fraction": 0.6650246381759644, "alphanum_fraction": 0.6798029541969299, "avg_line_length": 21.55555534362793, "blob_id": "d0008bc6df33e0f8ff643a46205fee380522371c", "content_id": "c0293e0857682e713868685e40404970ecc3151b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 203, "license_type": "permissive", "max_line_length": 65, "num_lines": 9, "path": "/README.rst", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "==========\ncomparator\n==========\n\n.. image:: comparator.png\n :target: https://github.com/weaverba137/comparator\n :alt: Comparator\n\nCompare large data sets on geographically separated file systems.\n" }, { "alpha_fraction": 0.6187050342559814, "alphanum_fraction": 0.6187050342559814, "avg_line_length": 14.44444465637207, "blob_id": "dd931cd223ab463024f2eade8ee7b752816a94b5", "content_id": "08c961529f08f53fefd9d200be2443355d6f007b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 278, "license_type": "permissive", "max_line_length": 37, "num_lines": 18, "path": "/doc/api.rst", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "==============\ncomparator API\n==============\n\n.. automodule:: comparator\n :members:\n\n.. automodule:: comparator.checksum\n :members:\n\n.. automodule:: comparator.db\n :members:\n\n.. automodule:: comparator.find\n :members:\n\n.. automodule:: comparator.initialize\n :members:\n" }, { "alpha_fraction": 0.6927223801612854, "alphanum_fraction": 0.6954177618026733, "avg_line_length": 24.295454025268555, "blob_id": "6bf67e25a6f61f5327a229f64bc38f4ce9cce303", "content_id": "e5e869b4bfcf9de117c76b36e02803e1b59323ce", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1113, "license_type": "permissive", "max_line_length": 76, "num_lines": 44, "path": "/setup.py", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# NOTE: The configuration for the package, including the name, version, and\n# other information are set in the setup.cfg file.\n\nimport sys\nfrom setuptools import setup\n\n# First provide helpful messages if contributors try and run legacy commands\n# for tests or docs.\n\nTEST_HELP = \"\"\"\nNote: running tests is no longer done using 'python setup.py test'. Instead\nyou will need to run:\n\n pytest\n\nIf you don't already have pytest installed, you can install it with:\n\n pip install pytest\n\"\"\"\n\nDOCS_HELP = \"\"\"\nNote: building the documentation is no longer done using\n'python setup.py {0}'. Instead you will need to run:\n\n sphinx-build -W --keep-going -b html doc doc/_build/html\n\nIf you don't already have Sphinx installed, you can install it with:\n\n pip install Sphinx\n\"\"\"\n\nmessage = {'test': TEST_HELP,\n 'build_docs': DOCS_HELP.format('build_docs'),\n 'build_sphinx': DOCS_HELP.format('build_sphinx'), }\n\nfor m in message:\n if m in sys.argv:\n print(message[m])\n sys.exit(1)\n\nsetup()\n" }, { "alpha_fraction": 0.5781019330024719, "alphanum_fraction": 0.5827178955078125, "avg_line_length": 33.278480529785156, "blob_id": "d45752ecef5c3d5490029f955f27de85eb511c27", "content_id": "f9a381ab6e0fa4bd3ca3439710d745d96136b251", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5416, "license_type": "permissive", "max_line_length": 98, "num_lines": 158, "path": "/comparator/db.py", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst.\n# -*- coding: utf-8 -*-\n\"\"\"\ncomparator.db\n=============\n\nContains SQLAlchemy classes.\n\"\"\"\nimport os\nfrom sqlalchemy import (ForeignKey, Column, Integer, String, Float,\n DateTime, Boolean)\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import (scoped_session, sessionmaker, relationship,\n backref, reconstructor)\nfrom sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound\nfrom sqlalchemy.orm.collections import attribute_mapped_collection\nfrom sqlalchemy.types import TypeDecorator\n\n\nBase = declarative_base()\nengine = None\nSession = scoped_session(sessionmaker())\n\n\n_missing = object() # sentinel object for missing values\n\n\nclass cached_hybrid_property(hybrid_property):\n def __get__(self, instance, owner):\n if instance is None:\n # getting the property for the class\n return self.expr(owner)\n else:\n # getting the property for an instance\n name = self.fget.__name__\n value = instance.__dict__.get(name, _missing)\n if value is _missing:\n value = self.fget(instance)\n instance.__dict__[name] = value\n return value\n\n\nclass FileSystem(Base):\n \"\"\"Representation of a filesystem.\n \"\"\"\n __tablename__ = 'filesystem'\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False, unique=True)\n\n def __repr__(self):\n return (\"<FileSystem(id={0.id:d}, name='{0.name}')>\").format(self)\n\n\nclass Directory(Base):\n \"\"\"Representation of a directory.\n\n Notes\n -----\n See https://docs.sqlalchemy.org/en/latest/_modules/examples/adjacency_list/adjacency_list.html\n \"\"\"\n __tablename__ = 'directory'\n\n id = Column(Integer, primary_key=True)\n filesystem_id = Column(Integer, ForeignKey('filesystem.id'), nullable=False)\n parent_id = Column(Integer, ForeignKey(id), nullable=False, index=True)\n nfiles = Column(Integer, nullable=False, default=0)\n name = Column(String, nullable=False)\n\n filesystem = relationship('FileSystem', back_populates='directories')\n\n children = relationship(\"Directory\",\n cascade=\"all, delete-orphan\", # cascade deletions\n # many to one + adjacency list - remote_side is\n # required to reference the 'remote' column\n # in the join condition.\n backref=backref(\"parent\", remote_side=id),\n # children will be represented as a dictionary\n # on the \"name\" attribute.\n collection_class=attribute_mapped_collection(\"name\"))\n\n def __repr__(self):\n return (\"<Directory(id={0.id:d}, \" +\n \"filesystem_id={0.filesystem_id:d}, \" +\n \"parent_id={0.parent_id:d}, \" +\n \"nfiles={0.nfiles:d}, \" +\n \"name='{0.name}')>\").format(self)\n\n @cached_hybrid_property\n def fullpath(self):\n \"\"\"Full system directory path.\n \"\"\"\n if not self.name:\n return self.filesystem.name\n fp = [self.name]\n parent = self.parent\n while parent.name:\n fp.insert(0, parent.name)\n parent = parent.parent\n fp.insert(0, self.filesystem.name)\n return os.path.join(*fp)\n\n\nFileSystem.directories = relationship('Directory', back_populates='filesystem')\n\n\nclass File(Base):\n \"\"\"Representation of an ordinary file or a symlink.\n \"\"\"\n __tablename__ = 'file'\n\n id = Column(Integer, primary_key=True)\n directory_id = Column(Integer, ForeignKey('directory.id'), nullable=False)\n # mode = Column(String(10), nullable=False)\n # uid = Column(Integer, ForeignKey('users.uid'), nullable=False)\n # gid = Column(Integer, ForeignKey('groups.gid'), nullable=False)\n size = Column(Integer, nullable=False)\n # mtime = Column(AwareDateTime(timezone=True), nullable=False)\n mtime = Column(Integer, nullable=False)\n name = Column(String, nullable=False)\n link = Column(Boolean, nullable=False, default=False)\n destination = Column(String, nullable=False, default='')\n\n directory = relationship('Directory', back_populates='files')\n\n def __repr__(self):\n return (\"<File(id={0.id:d}, \" +\n \"directory_id={0.directory_id:d}, \" +\n # \"mode='{0.mode}', \" +\n # \"uid={0.uid:d}, \" +\n # \"gid={0.gid:d}, \" +\n \"size={0.size:d}, \" +\n # \"mtime='{0.mtime}', \" +\n \"mtime={0.mtime:d}, \" +\n \"name='{0.name}', \" +\n \"link={0.link}\" +\n \"destination='{0.destination}')>\").format(self)\n\n @property\n def path(self):\n \"\"\"Full system path to the file.\n \"\"\"\n return os.path.join(self.directory.fullpath, self.name)\n\n @property\n def realpath(self):\n \"\"\"Full system path to the target of a symlink, if the file is a\n symlink.\n \"\"\"\n if self.link:\n return os.path.realpath(self.path)\n else:\n return self.path\n\n\nDirectory.files = relationship('File', order_by=File.name,\n back_populates='directory')\n" }, { "alpha_fraction": 0.5625806450843811, "alphanum_fraction": 0.565376341342926, "avg_line_length": 33.19117736816406, "blob_id": "f1f2fd58922e29dbaa60a0b8c71182a39b98a65d", "content_id": "d1d03bdc116b7bde55dd5a6b444d28dfb0edfe0d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4650, "license_type": "permissive", "max_line_length": 83, "num_lines": 136, "path": "/comparator/find.py", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst.\n# -*- coding: utf-8 -*-\n\"\"\"\ncomparator.find\n===============\n\nUtilities for scanning a filesystem.\n\"\"\"\nimport os\nfrom .db import Session, Directory, File\n\n\ndef walk(top):\n \"\"\"Simplified directory tree generator.\n\n Adapted from :func:`os.walk`, the yield is similar, but symbolic\n links are *always* treated as files, even if they point to directories,\n and never followed.\n\n For each directory in the directory tree rooted at `top` (including `top`\n itself, but excluding '.' and '..'), yields a 3-tuple::\n\n dirpath, dirnames, filenames\n\n ``dirpath`` is a string, the path to the directory. ``dirnames`` is a\n list of :class:`os.DirEntry` objects for subdirectories in dirpath\n (excluding '.' and '..'). ``filenames`` is a list of :class:`os.DirEntry`\n objects for the non-directory files in ``dirpath``.\n \"\"\"\n dirs = []\n nondirs = []\n\n # We may not have read permission for top, in which case we can't\n # get a list of the files the directory contains. os.walk\n # always suppressed the exception then, rather than blow up for a\n # minor reason when (say) a thousand readable directories are still\n # left to visit. That logic is copied here.\n try:\n scandir_it = os.scandir(top)\n except OSError as error:\n return\n\n with scandir_it:\n while True:\n try:\n try:\n entry = next(scandir_it)\n except StopIteration:\n break\n except OSError as error:\n return\n\n try:\n is_dir = entry.is_dir(follow_symlinks=False)\n except OSError:\n # If is_dir() raises an OSError, consider that the entry is not\n # a directory, same behaviour than os.path.isdir().\n is_dir = False\n\n if is_dir:\n dirs.append(entry)\n else:\n nondirs.append(entry)\n\n yield top, dirs, nondirs\n\n # Recurse into sub-directories\n for d in dirs:\n new_path = os.path.join(top, d.name)\n # Issue #23605: os.path.islink() is used instead of caching\n # entry.is_symlink() result during the loop on os.scandir() because\n # the caller can replace the directory entry during the \"yield\"\n # above.\n if not os.path.islink(new_path):\n yield from walk(new_path)\n\n\ndef directories(fs, directory_id=1):\n \"\"\"Find all physical directories on filesystem `fs`.\n\n Parameters\n ----------\n fs : :class:`FileSystem`\n The filesystem to scan.\n directory_id : :class:`int`, optional\n The id number of the directory corresponding to the root of `fs`.\n\n Returns\n -------\n :class:`int`\n The id of the last directory found. If scanning multiple filesystems,\n add one (1) to this number to set the `directory_id` for top of the\n next filesystem.\n \"\"\"\n parents = {fs.name: directory_id}\n Session.add(Directory(id=directory_id, filesystem_id=fs.id,\n parent_id=parents[fs.name], name=''))\n Session.commit()\n for dirpath, dirnames, filenames in walk(fs.name):\n p = Session.query(Directory).filter(Directory.id == parents[dirpath]).one()\n p.nfiles = len(filenames)\n for d in dirnames:\n directory_id += 1\n parents[os.path.join(dirpath, d.name)] = directory_id\n Session.add(Directory(id=directory_id, filesystem_id=fs.id,\n parent_id=parents[dirpath], name=d.name))\n Session.commit()\n return directory_id\n\n\ndef files(directory):\n \"\"\"Find files in `directory`; identify symlinks.\n\n Parameters\n ----------\n directory : :class:`Directory`\n Directory to scan with :func:`os.scandir()`.\n \"\"\"\n p = directory.fullpath\n with os.scandir(p) as it:\n for entry in it:\n if not entry.is_dir(follow_symlinks=False):\n if entry.is_symlink():\n d = os.readlink(os.path.join(p, entry.name))\n f = File(directory_id=directory.id,\n size=0, mtime=0,\n name=entry.name,\n link=True, destination=d)\n else:\n st = entry.stat(follow_symlinks=False)\n f = File(directory_id=directory.id,\n size=st.st_size,\n mtime=int(st.st_mtime),\n name=entry.name)\n Session.add(f)\n Session.commit()\n" }, { "alpha_fraction": 0.5587866306304932, "alphanum_fraction": 0.560669481754303, "avg_line_length": 36.34375, "blob_id": "980459c5d2d6cd9aa001df3e2f7c0ed4fbe07dbc", "content_id": "bdc14ffcca78c10dac3793cb0155df4ee4a4e7cc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4780, "license_type": "permissive", "max_line_length": 130, "num_lines": 128, "path": "/comparator/initialize.py", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst.\n# -*- coding: utf-8 -*-\n\"\"\"\ncomparator.initialize\n=====================\n\nObtain filesystem metadata necessary for comparing the same data set\nat different locations.\n\"\"\"\nimport os\nfrom sqlalchemy import create_engine, func\nfrom sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound\nfrom .db import engine, Session, Base, FileSystem, Directory, File\nfrom .find import directories, files\n\n\ndef _options():\n \"\"\"Parse the command-line options.\n\n Returns\n -------\n The parsed options.\n \"\"\"\n from sys import argv\n from argparse import ArgumentParser\n xct = os.path.basename(argv[0])\n desc = \"Obtain filesystem metadata necessary for comparing the same data set at different locations.\"\n prsr = ArgumentParser(description=desc, prog=xct)\n prsr.add_argument('-f', '--filesystem', action='append',\n dest='filesystem', metavar=\"DIR\",\n help='FileSystem(s) to examine.')\n prsr.add_argument('-F', '--skip-files', action='store_true',\n dest='skip_files', help='Skip the file search stage.')\n # prsr.add_argument('-l', '--log-dir', dest='logging', metavar='DIR',\n # default=os.path.join(os.environ['HOME'], 'Documents', 'Logs'),\n # help='Log files in DIR (default %(default)s).')\n # prsr.add_argument('-R', '--root', dest='root', metavar='DIR',\n # default='/global/project/projectdirs',\n # help='Path containing metadata directory (default %(default)s).')\n # prsr.add_argument('-s', '--sql', dest='sql', action='store_true',\n # help='Output SQL statements instead of loading database.')\n prsr.add_argument('-o', '--overwrite', action='store_true',\n dest='overwrite', help='Overwrite any existing database.')\n prsr.add_argument('-v', '--verbose', action='store_true', dest='verbose',\n help='Log extra debugging information.')\n prsr.add_argument('release', metavar='RELEASE',\n help='Release to examine, e.g. \"dr15\".')\n prsr.add_argument('database', metavar='DB',\n help='Path to database file.')\n return prsr.parse_args()\n\n\ndef main():\n \"\"\"Entry point for command-line scripts.\n\n Returns\n -------\n :class:`int`\n An integer suitable for passing to :func:`sys.exit`.\n \"\"\"\n #\n # Arguments\n #\n options = _options()\n #\n # Initialize database.\n #\n if options.overwrite and os.path.exists(options.database):\n os.remove(options.database)\n engine = create_engine('sqlite:///'+options.database, echo=options.verbose)\n Session.remove()\n Session.configure(bind=engine, autocommit=False,\n autoflush=True, expire_on_commit=True)\n Base.metadata.create_all(engine)\n #\n # Add filesystems.\n #\n try:\n q = Session.query(FileSystem).one()\n except NoResultFound:\n Session.add_all([FileSystem(name=os.path.join(root, options.release))\n for root in options.filesystem])\n Session.commit()\n #\n # Scan Directories.\n #\n last_id = 0\n for fs in Session.query(FileSystem).all():\n if os.path.exists(fs.name):\n try:\n q = Session.query(Directory).filter(Directory.filesystem_id == fs.id).one()\n except NoResultFound:\n last_id = directories(fs, last_id+1)\n except MultipleResultsFound:\n last_id = Session.query(func.max(Directory.id)).scalar()\n else:\n #\n # Apparently there was exactly one directory.\n # This is not as weird as it sounds, because the release\n # directory in the filesystem may be present but empty.\n #\n last_id = q.id\n #\n # Scan files.\n #\n if not options.skip_files:\n for fs in Session.query(FileSystem).all():\n if os.path.exists(fs.name):\n try:\n q = Session.query(File).join(Directory).filter(Directory.filesystem_id == fs.id).one()\n except NoResultFound:\n for d in Session.query(Directory).filter(Directory.filesystem_id == fs.id).filter(Directory.nfiles > 0).all():\n files(d)\n except MultipleResultsFound:\n #\n # Already scanned.\n #\n pass\n else:\n #\n # Apparently there was exactly one file. OK, fine.\n #\n pass\n #\n # Exit gracefully.\n #\n Session.close()\n return 0\n" }, { "alpha_fraction": 0.5155038833618164, "alphanum_fraction": 0.569767415523529, "avg_line_length": 18.846153259277344, "blob_id": "96a69c982d2ba72eb6138a96d4ebdbbbd20faaae", "content_id": "f8f7fc4afe4ac7e17f09c546c4a95ee9d572485f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 258, "license_type": "permissive", "max_line_length": 80, "num_lines": 13, "path": "/doc/changes.rst", "repo_name": "weaverba137/comparator", "src_encoding": "UTF-8", "text": "=============\nRelease Notes\n=============\n\n0.2.0 (unreleased)\n------------------\n\n* Planned: develop a method to sync checksum files from an authoritative source.\n\n0.1.0 (2023-08-17)\n------------------\n\n* Initial release to establish package infrastructure.\n" } ]
12
patrickdijusto/nitehawk
https://github.com/patrickdijusto/nitehawk
f6917862b5a64ccde49513687276a6d4faf3223e
350701ea1c239a5e8b85402e350dc4dcb5cf08bd
7bb4cf2b9a08dcb42fcfd5950dd20c97f7c43f5e
refs/heads/master
2020-03-26T15:41:48.323761
2018-12-08T15:36:43
2018-12-08T15:36:43
145,059,038
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7166947722434998, "alphanum_fraction": 0.7273749113082886, "avg_line_length": 24.41428565979004, "blob_id": "0ef16f7bdd613eacdde2afe0629ff1f346ef983d", "content_id": "a50f789d78230431b34db9e5065b8b1a9d8a0bb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1779, "license_type": "no_license", "max_line_length": 133, "num_lines": 70, "path": "/nitehawk.py", "repo_name": "patrickdijusto/nitehawk", "src_encoding": "UTF-8", "text": "import twitter\nfrom settings import *\nimport re\n\nimport time\n\ntry:\n # For Python 3.0 and later\n from urllib.request import urlopen\nexcept ImportError:\n # Fall back to Python 2's urllib2\n from urllib2 import urlopen\n\n\n\n\n\n\n\nfile = open(\"change.txt\",\"r\")\nchange = file.read()\nfile.close\nprint(change)\n\nhtml_content = urlopen('https://nitehawkcinema.com/prospectpark/').read().decode('utf-8')\n\nmatches = re.findall('Nitehawk Cinema Prospect Park is set to open in 2018', html_content);\n\nprint(matches)\n\n\nif (len(matches) > 0) or (change == \"1\"): \n print('No Change')\nelse:\n\t## Run entire twitter infrasctucture\n\tprint(\"Change has come\")\n\tprint('establish the twitter object')\n\t# see \"Authentication\" section below for tokens and keys\n\tapi = twitter.Api(consumer_key=CONSUMER_KEY,\n\t\tconsumer_secret=CONSUMER_SECRET,\n\t\taccess_token_key=OAUTH_TOKEN,\n\t\taccess_token_secret=OAUTH_SECRET,\n\t)\n\n\tprint('twitter object established')\n\tmessage = \"The Nitehawk Prospect website has changed. But I am only a bot, and I don't know if this means that the theater is open.\"\n\tdx = api.PostDirectMessage(text=message,screen_name=\"@patrickdijusto\")\n\tprint(message)\n\tprint(dx)\n\ttime.sleep(5)\n\tdx = api.PostDirectMessage(text=message,screen_name=\"@ejgertz\")\n\tprint(dx)\n\ttime.sleep(5)\n\tdx = api.PostDirectMessage(text=message,screen_name=\"@proggrrl\")\n\tprint(dx)\n\ttime.sleep(5)\n\tdx = api.PostDirectMessage(text=message,screen_name=\"@tritia\")\n\tprint(dx)\n\ttime.sleep(5)\n\tdx = api.PostDirectMessage(text=message,screen_name=\"@harrislynn\")\n\tprint(dx)\n\ttime.sleep(5)\n\tdx = api.PostDirectMessage(text=message,screen_name=\"@saldenaro\")\n\tprint(dx)\n\ttime.sleep(5)\n\tdx = api.PostDirectMessage(text=message,screen_name=\"@nyisblue\")\n\tprint(dx)\n\tfile=open(\"change.txt\",\"w\")\n\tfile.write(\"1\")\n\tfile.close()\n" }, { "alpha_fraction": 0.7863636612892151, "alphanum_fraction": 0.7863636612892151, "avg_line_length": 53.75, "blob_id": "fc53fb15f0ba92484c4637177ea73b4957e6537a", "content_id": "c3e6e2c57a2c288fcd3fcd4cc48af5511fab2839", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 220, "license_type": "no_license", "max_line_length": 116, "num_lines": 4, "path": "/README.md", "repo_name": "patrickdijusto/nitehawk", "src_encoding": "UTF-8", "text": "# nitehawk\nA Bot that checks to see if the Nitehawk Prospect website https://nitehawkcinema.com/prospectpark/ has been updated.\n\nEdited to add: sets a binary flag if the website has changed, so the bot no longer tweets\n\n" } ]
2
slprasanna2003/python-programming
https://github.com/slprasanna2003/python-programming
7fb1802932e963dae6ca86ff299e4bc07fa16374
0e3c8131202e2e3aab9990ddbac21eade8d3c2a0
682efbccf3fcc19ed5868016344e5655982f2383
refs/heads/main
2023-05-01T04:41:13.649456
2021-05-21T07:50:34
2021-05-21T07:50:34
368,425,018
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.5686274766921997, "avg_line_length": 33, "blob_id": "2fd5117e4eb41c5d8a79133850984cc7a9cbf249", "content_id": "237c8befaffdcbb110ad161ef0d24a969828aea0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 70, "num_lines": 3, "path": "/range.py", "repo_name": "slprasanna2003/python-programming", "src_encoding": "UTF-8", "text": "A=range(1,10,2)\nprint(list(A)) #range datatype depend on the list \nprint(type(A))\n" }, { "alpha_fraction": 0.5542168617248535, "alphanum_fraction": 0.5963855385780334, "avg_line_length": 54.33333206176758, "blob_id": "ea76cf77bb6ad32781a83170c90eb8756fbcfa3e", "content_id": "1a675742ca297eb6d5dd8c3437221d5ba5db28df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 92, "num_lines": 3, "path": "/tuple.py", "repo_name": "slprasanna2003/python-programming", "src_encoding": "UTF-8", "text": "details_student=(101,'sai','1st year',98.5)\nprint(details_student) #tuple datatype ------ use paranthesis\nprint(type(details_student))\n" }, { "alpha_fraction": 0.44915252923965454, "alphanum_fraction": 0.5762711763381958, "avg_line_length": 38.33333206176758, "blob_id": "25b2a65cb60986893ff2594ae9a84b5a7d2dd2c8", "content_id": "b3a0e75bb7b00b326202713e2c5c2a1d0aa762fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 72, "num_lines": 3, "path": "/set.py", "repo_name": "slprasanna2003/python-programming", "src_encoding": "UTF-8", "text": "CSEA={501,502,503,504,505}\nprint(CSEA) #set datatype ------ use flower bracket\nprint(type(CSEA))\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 50.33333206176758, "blob_id": "3d0ca692b7b5c0b02f3254c405072f5fcf9cb0eb", "content_id": "417e483ecdaa97eeb7142848d10014754f497d04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 86, "num_lines": 3, "path": "/list.py", "repo_name": "slprasanna2003/python-programming", "src_encoding": "UTF-8", "text": "student_details=[201,202,203,204,205]\nprint(student_details) #list datatype ------- use square bracket\nprint(type(student_details))\n" }, { "alpha_fraction": 0.5199999809265137, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 11.5, "blob_id": "6e98197e133163b462ea0f1b024ceddd063cd194", "content_id": "7b0238a92260da7a6935304e6ab413a27a1e90ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 30, "num_lines": 4, "path": "/if.py", "repo_name": "slprasanna2003/python-programming", "src_encoding": "UTF-8", "text": "A=10\nB=20\nif(B>A):\n print(\"B is greater than A\")\n" } ]
5
huntmk/SchoolWork
https://github.com/huntmk/SchoolWork
454651f19ed18a5081917b5c8af017d86d303413
8cdf87b3be4c815cd3471f6292eba598038422dc
847851d522c80a2211ab69a05435f93ef0446b5b
refs/heads/master
2021-12-11T19:22:38.888640
2021-12-07T23:16:53
2021-12-07T23:16:53
218,656,800
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6240409016609192, "alphanum_fraction": 0.6240409016609192, "avg_line_length": 14.291666984558105, "blob_id": "458b51fd7b6db50aac4299a5367795de2b5bca95", "content_id": "45a342a2fa06034a05b75fcae64e51d3fab9dc6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 391, "license_type": "no_license", "max_line_length": 42, "num_lines": 24, "path": "/Year 2/Assignment #6/BubbleSort.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n#ifndef BUBBLESORT_H\r\n#define BUBBLESORT_H\r\n#include \"Sort.h\"\r\nclass BubbleSort: public Sort\r\n{\r\n\tpublic:\r\n\t\t// constructor\r\n\t\tBubbleSort();\r\n\t\t//destructor\r\n\t\t~BubbleSort();\r\n\t\t//virtual sort method\r\n\t\tvoid sort(int *, int);\r\n\t\t \r\n};\r\n\r\n#endif//BUBBLESORT_H\r\n" }, { "alpha_fraction": 0.6806219816207886, "alphanum_fraction": 0.6806219816207886, "avg_line_length": 23.33333396911621, "blob_id": "f906b105d15e8cab0a0c8174876f730736326088", "content_id": "aa79506de364d577a4a7ffa488cd17cb265c22dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 836, "license_type": "no_license", "max_line_length": 62, "num_lines": 33, "path": "/Year 3/Assignment 4/Eval_Expr_Tree.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n#ifndef _EVAL_EXPR_TREE\r\n#define _EVAL_EXPR_TREE\r\n\r\n#include \"Expr_Node_Visitor.h\"\r\n\r\nclass Eval_Expr_Tree : public Expr_Node_Visitor \r\n{\r\n\tpublic:\r\n\t\tEval_Expr_Tree(void);\r\n\t\tvirtual ~Eval_Expr_Tree(void);\r\n\t\t\r\n\t\t//methods for visiting nodes\r\n\t\tvirtual void Visit_Add_Node (Add_Expr_Node & node);\r\n\t\tvirtual void Visit_Subtract_Node(Subtract_Expr_Node & node);\r\n\t\tvirtual void Visit_Number_Node(Num_Expr_Node & node);\r\n\t\tvirtual void Visit_Multiply_Node(Multiply_Expr_Node & node);\r\n\t\tvirtual void Visit_Division_Node(Division_Expr_Node & node);\r\n\t\tvirtual void Visit_Modulus_Node(Modulus_Expr_Node & node);\r\n\t\t\r\n\t\tint result (void) const;\r\n\t\t\r\n\tprivate:\r\n\t\tint result_;\r\n\t\t//other state for calculating result\t\t\r\n};\r\n#endif\r\n" }, { "alpha_fraction": 0.6141374707221985, "alphanum_fraction": 0.6175962090492249, "avg_line_length": 27.207317352294922, "blob_id": "0f6805500b03cbc3d1841f64e439110e6b64fc45", "content_id": "9d2cef6c77b76ccc4af2fd2d8031c4ad3296ebe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4626, "license_type": "no_license", "max_line_length": 107, "num_lines": 164, "path": "/Master Year 1/Computer Graphics/HW4/renderer/scene/Vertex.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\n\n*/\n\npackage renderer.scene;\n\n/**\n A {@code Vertex} object has four doubles which represent the\n homogeneous coordinates of a point in 3-dimensional space.\n The fourth, homogeneous, coordinate will usually be 1, but in\n some stages of the graphics rendering pipeline it can be some\n other (non-zero) number.\n<p>\n When a {@code Vertex} object is created in a client program,\n before the {@code Vertex} object moves down the graphics rendering\n pipeline, the coordinates in the {@code Vertex} will be in some\n model's local coordinate system.\n<p>\n As a {@code Vertex} object moves down the graphics rendering\n pipeline, the coordinates in the {@code Vertex} will be transformed\n from one coordinate system to another.\n*/\npublic class Vertex\n{\n public double x, y, z, w; // a vertex in homogenous coordinates\n\n /**\n Construct a default {@code Vertex}.\n */\n public Vertex()\n {\n set(0.0, 0.0, 0.0, 1.0);\n }\n\n\n /**\n Construct a new {@code Vertex} (with homogeneous coordinates)\n using the given {@code x}, {@code y}, and {@code z} coordinates.\n\n @param x x-coordinate of the new {@code Vertex}\n @param y y-coordinate of the new {@code Vertex}\n @param z z-coordinate of the new {@code Vertex}\n */\n public Vertex(double x, double y, double z)\n {\n set(x, y, z, 1.0);\n }\n\n\n /**\n Construct a new {@code Vertex} with the given homogeneous coordinates.\n\n @param x x-coordinate of the new {@code Vertex}\n @param y y-coordinate of the new {@code Vertex}\n @param z z-coordinate of the new {@code Vertex}\n @param w w-coordinate of the new {@code Vertex}\n */\n public Vertex(double x, double y, double z, double w)\n {\n set(x, y, z, w);\n }\n\n\n /**\n Construct a new {@code Vertex} that is a copy of another {@code Vertex}.\n\n @param v {@code Vertex} to make a copy of\n */\n public Vertex(Vertex v) // a \"copy constructor\"\n {\n set(v.x, v.y, v.z, v.w);\n }\n\n\n /**\n Copy the coordinates of {@code Vertex} {@code v} to\n this {@code Vertex}.\n\n @param v {@code Vertex} whose coordinates are copied to this {@code Vertex}\n */\n public void set(Vertex v)\n {\n set(v.x, v.y, v.z, v.w);\n }\n\n\n /**\n Set the {@code x}, {@code y}, and {@code z} coordinates\n of this {@code Vertex}.\n\n @param x new x-coordinate for this {@code Vertex}\n @param y new y-coordinate for this {@code Vertex}\n @param z new z-coordinate for this {@code Vertex}\n */\n public void set(double x, double y, double z)\n {\n set(x, y, z, 1);\n }\n\n\n /**\n Set the homogeneous coordinates of this {@code Vertex}.\n\n @param x new x-coordinate for this {@code Vertex}\n @param y new y-coordinate for this {@code Vertex}\n @param z new z-coordinate for this {@code Vertex}\n @param w new w-coordinate for this {@code Vertex}\n */\n public void set(double x, double y, double z, double w)\n {\n this.x = x;\n this.y = y;\n this.z = z;\n this.w = w;\n }\n\n\n /**\n For debugging.\n\n @return {@link String} representation of this {@code Vertex} object\n */\n @Override\n public String toString()\n {\n final int precision = 5; // the default precision for the format string\n return toString(precision);\n }\n\n\n /**\n For debugging.\n <p>\n Allow the precision of the formatted output to be specified.\n\n @param precision precision value for the format string\n @return {@link String} representation of this {@code Vertex} object\n */\n public String toString(final int precision)\n {\n final int iWidth = 3; // default width of integer part of the format string\n return toString(precision, iWidth);\n }\n\n\n /**\n For debugging.\n <p>\n Allow the precision and width of the formatted output to be specified.\n By width, we mean the width of the integer part of each number.\n\n @param precision precision value for the format string\n @param iWidth width of the integer part of the format string\n @return {@link String} representation of this {@code Vertex} object\n */\n public String toString(final int precision, final int iWidth)\n {\n // Here is one way to get programmable precision and width.\n final int p = precision; // the precision for the following format string\n final int t = p + iWidth + 2; // the width for the following format string\n final String format = \"(x,y,z,w)=(% \"+t+\".\"+p+\"f % \"+t+\".\"+p+\"f % \"+t+\".\"+p+\"f % \"+t+\".\"+p+\"f)\\n\";\n return String.format(format, x, y, z, w);\n }\n}\n" }, { "alpha_fraction": 0.47607362270355225, "alphanum_fraction": 0.4969325065612793, "avg_line_length": 21.285715103149414, "blob_id": "a5dd04a386297b6b6691e2ce88563054e4e0c216", "content_id": "1c17161d7426b6e65ad2fa0987bd6468146db3e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 815, "license_type": "no_license", "max_line_length": 57, "num_lines": 35, "path": "/Master Year 2/Operating Systems/HW3/hw3/filters/reverse.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program reads lines from standard input and\r\n writes their reverse to standard output.\r\n\r\n When using Windows, if standard input is the console\r\n keyboard, use ^z (Control-z) to denote the end of file\r\n (and you must use ^z at the beginning of a line!).\r\n*/\r\n#include <stdio.h>\r\n\r\nint main()\r\n{\r\n char oneLine [1000];\r\n int i, j;\r\n\r\n while ( fgets(oneLine, 1000, stdin) != NULL )\r\n {\r\n // find the end of line\r\n for (i = 0; i < 1000; i++)\r\n {\r\n if ( oneLine[i] == '\\0' || oneLine[i] == '\\n' )\r\n {\r\n break;\r\n }\r\n }\r\n // write the reversed line\r\n for (j = i-1; j >= 0; j--)\r\n {\r\n printf(\"%c\", oneLine[j]);\r\n }\r\n printf(\"\\n\");\r\n fflush(stdout); // try commenting this out\r\n }\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.6698459386825562, "alphanum_fraction": 0.6727806329727173, "avg_line_length": 32.875, "blob_id": "134d7f2800ed55750ac8fc5f6ab98e9c6f4bd63d", "content_id": "1c89f427e2a3da4208f4b5deecfa58a9223f5ece", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1363, "license_type": "no_license", "max_line_length": 155, "num_lines": 40, "path": "/Year 4/csci487Group4Project-makingGraphs/Agents/Vehicles.py", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#Vehicles.py\n#Created 4/5/20 by Jasper Heist\n#Contains Vehicles Parent Class and subclassed types of vehicle definitions\n\nfrom Utilities.Definitions import AverageCarLength, AverageSemiLength\nfrom Utilities.UIDGenerator import NumberGenerator\n\nclass Vehicle(object):\n \"\"\"class that all vehicles inherit from\"\"\"\n def __init__(self, length=AverageCarLength):\n object.__init__(self)\n #length of the car. Idea is to track the space the car takes up on the road, since roads have alimited amount of space cars can be bumper to bumper\n self.__length = length\n #UID for this vehicle on the road to track them\n self.__uid = NumberGenerator().vehicle_uid()\n\n def __get_length(self):\n \"\"\"returns lenght of this vehicle\"\"\"\n return self.__length\n \n def __get_id(self):\n \"\"\"return unique ID for this vehicle\"\"\"\n return self.__uid\n\n length = property(fget=__get_length)\n uid = property(fget=__get_id)\n\n\nclass Car(Vehicle):\n \"\"\"class representing an average car\"\"\"\n\n def __init__(self, length=AverageCarLength):\n Vehicle.__init__(Vehicle, length)\n\n\nclass Semi(Vehicle):\n \"\"\"class representing a semi truck (probably wont use this, but mostly for abstraction practice with python)\"\"\"\n\n def __init__(self, length=AverageSemiLength):\n Vehicle.__init__(Vehicle, length)\n " }, { "alpha_fraction": 0.3527420461177826, "alphanum_fraction": 0.3657413721084595, "avg_line_length": 33.676326751708984, "blob_id": "52fda02c72d37eb9ac42c4b6bd8d835245036a1a", "content_id": "e5fd397e2271e81d1295573e14f37a342fcac547", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7385, "license_type": "no_license", "max_line_length": 82, "num_lines": 207, "path": "/Master Year 1/Programming Languages and Compilers/HW4/hw4/Language_7a_Examples.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program parses and evaluates strings from Language_7a.\r\n*/\r\n\r\npublic class Language_7a_Examples\r\n{\r\n public static void main(String[] args)\r\n {\r\n // IMPORTANT: Set this to 0 or 1 depending on whether you need\r\n // to see all of the interpreter's debugging information.\r\n Evaluate_7a.DEBUG = 1;\r\n\r\n String[] programs =\r\n {\r\n \"(prog (var a (array 1 2 3 4 5 6 false true))\" +\r\n \"(var b (array dim 10 1 2 3 4 5 false a))\" +\r\n \"(print a)\" +\r\n \"(print b)\" +\r\n \"(print (index b 4))\" +\r\n \"(print (index b 5))\" +\r\n \"(print (index b 6))\" +\r\n \"(print (sizeOf b))\" +\r\n \"(var c (array dim 10))\" +\r\n \"(set (index c 5) (index a 2))\" +\r\n \"c)\",\r\n\r\n\r\n \"(prog (var n 5)\" +\r\n \"(var a (array dim n))\" +\r\n \"(print (sizeOf a))\" +\r\n \"(print a)\" +\r\n \"(var i 0)\" +\r\n \"(while (< i (sizeOf a))\" +\r\n \"(begin\" +\r\n \"(set (index a i) (- i))\" +\r\n \"(print (index a i))\" +\r\n \"(set i (+ i 1))))\" +\r\n \"a)\",\r\n\r\n\r\n \"(prog\" +\r\n \"(fun g (lambda x (+ 10 x)))\" +\r\n \"(fun printArray (lambda a\" +\r\n \"(for (var i 0) (< i (sizeOf a)) (++ i)\" +\r\n \"(print (index a i)))))\" +\r\n \"(fun arrayMap (lambda f a\" + // f is a function parameter\r\n \"(begin\" +\r\n \"(for (var i 0) (< i (sizeOf a)) (+++ i)\" +\r\n \"(set (index a i) (apply f (index a i))))\" +\r\n \"a)))\" +\r\n \"(var a (array 2 3 4 5))\" +\r\n \"(print a)\" +\r\n \"(apply arrayMap g a)\" +\r\n \"(print a)\" +\r\n \"(apply printArray (apply arrayMap g a))\" +\r\n \"a)\",\r\n\r\n\r\n \"(prog\" +\r\n \"(fun fill (lambda a\" +\r\n \"(begin\" +\r\n \"(var i 0)\" +\r\n \"(while (< i (sizeOf a))\" +\r\n \"(begin\" +\r\n \"(set (index a i) (rand 0 100))\" +\r\n \"(set i (+ i 1))))\" +\r\n \"a)))\" + // this is the return value for fill()\r\n \"(fun max (lambda a\" +\r\n \"(begin\" +\r\n \"(var m (index a 0))\" +\r\n \"(var i 1)\" +\r\n \"(while (< i (sizeOf a))\" +\r\n \"(begin\" +\r\n \"(if (< m (index a i))\" +\r\n \"(set m (index a i))\" +\r\n \"0) \" +\r\n \"(set i (+ i 1))))\" +\r\n \"m)))\" + // this is the return value for max()\r\n \"(var SIZE 20)\" +\r\n \"(apply max (print (apply fill (array dim SIZE)))))\",\r\n\r\n\r\n \"(prog\" +\r\n \"(fun fill (lambda a\" +\r\n \"(for (var i 0) (< i (sizeOf a)) (++ i)\" +\r\n \"(set (index a i) (rand 0 100)))))\" +\r\n \"(fun extreme (lambda a comp\" + // a \"generic\" max/min function\r\n \"(begin\" +\r\n \"(var m (index a 0))\" +\r\n \"(for (var i 1) (< i (sizeOf a)) (++ i)\" +\r\n \"(if (apply comp (index a i) m)\" +\r\n \"(set m (index a i))\" +\r\n \"0))\" +\r\n \"m)))\" + // return value for extreme()\r\n \"(fun comp1 (lambda a b (< a b)))\" +\r\n \"(fun comp2 (lambda a b (> a b)))\" +\r\n \"(var SIZE 20)\" +\r\n \"(var a (array dim SIZE))\" +\r\n \"(apply fill a)\" +\r\n \"(print (apply extreme a comp1))\" + // find the min\r\n \"(print (apply extreme a comp2))\" + // find the max\r\n \"a)\",\r\n\r\n\r\n \"(prog\" +\r\n \"(fun fill (lambda a\" +\r\n \"(begin\" +\r\n \"(for (var i 0) (< i (sizeOf a)) (++ i)\" +\r\n \"(set (index a i) (rand 0 100)))\" +\r\n \"a)))\" + // return value for fill()\r\n \"(fun sort (lambda a comp\" + // a \"generic\" bubble sort function\r\n \"(begin\" +\r\n \"(var done false)\" +\r\n \"(while (! done)\" +\r\n \"(begin\" +\r\n \"(set done true)\" +\r\n \"(for (var i 0) (< i (- (sizeOf a) 1)) (++ i)\" +\r\n \"(if (apply comp (index a (+ i 1)) (index a i))\" +\r\n \"(begin\" +\r\n \"(set done false)\" +\r\n \"(var temp (index a i))\" +\r\n \"(set (index a i) (index a (+ i 1)))\" +\r\n \"(set (index a (+ i 1)) temp))\" +\r\n \"0))))\" +\r\n \"a ) ) ) \" + // return value for sort()\r\n \"(fun comp1 (lambda a b (< a b)))\" +\r\n \"(fun comp2 (lambda a b (> a b)))\" +\r\n \"(var SIZE 10)\" +\r\n \"(var a (array dim SIZE))\" +\r\n \"(print (apply fill a))\" +\r\n \"(print (apply sort a comp1))\" + // sort ascending\r\n \"(print (apply sort a comp2))\" + // sort descending\r\n \"a)\"\r\n };\r\n\r\n\r\n int i = 0;\r\n for (i = 0; i < programs.length; i++)\r\n {\r\n System.out.println(i + \" =========================================\");\r\n\r\n // Build and evaluate the AST that represents the expression.\r\n try\r\n {\r\n Tree ast = ParseTree.buildTree( programs[i] );\r\n\r\n // Print the AST as an S-expression\r\n //System.out.println( ast + \"\\n\" );\r\n\r\n // Pretty-print the abstract syntax tree.\r\n System.out.println( PrettyPrinter.prettyPrint( ast ) + \"\\n\" );\r\n\r\n // Print the infix version of the expression.\r\n System.out.println( AST2infix_7a.ast2infix( ast ) + \"\\n\" );\r\n\r\n // Evaluate the expression (interpret the AST).\r\n try\r\n {\r\n Value value = Evaluate_7a.eval( ast );\r\n\r\n System.out.println(\"result = \" + value + \"\\n\" );\r\n }\r\n catch (EvalException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n\r\n\r\n // Create dot and png files from the AST.\r\n if (Evaluate_7a.DEBUG > 0)\r\n try\r\n {\r\n // Create the (empty) dot file.\r\n String baseName = String.format(\"Language_7.%02d\", i);\r\n java.io.PrintWriter out = new java.io.PrintWriter(\r\n new java.io.File(baseName + \".dot\") );\r\n // Put dot commands into the dot file\r\n out.println( Tree2dot.tree2dot(ast) + \"\\n\" );\r\n out.close();\r\n // Create a command line for running dot.exe.\r\n String[] cmd = {\"C:\\\\graphviz-2.38\\\\release\\\\bin\\\\dot.exe\",\r\n \"-Tpng\",\r\n baseName + \".dot\",\r\n \"-o\",\r\n baseName + \".png\"};\r\n // Execute the command line.\r\n java.lang.Runtime.getRuntime().exec(cmd);\r\n }\r\n catch (Exception e)\r\n {\r\n System.out.println( e );\r\n }\r\n }\r\n catch (TokenizeException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n catch (ParseException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6533665657043457, "alphanum_fraction": 0.6533665657043457, "avg_line_length": 16.227272033691406, "blob_id": "becf1edd310bd3ad8fc556d2d6ffb3c9a0d28946", "content_id": "97619d9795574485cd4e3cb299d2455591f3c504", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 401, "license_type": "no_license", "max_line_length": 42, "num_lines": 22, "path": "/Year 2/Assignment #6/InsertionSort.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n#ifndef INSERTIONSORT_H\r\n#define INSERTIONSORT_H\r\n#include \"Sort.h\"\r\nclass InsertionSort: public Sort\r\n{\r\n\tpublic:\r\n\t\t//constructor\r\n\t\tInsertionSort();\r\n\t\t//destructor\r\n\t\t~InsertionSort();\r\n\t\t//virtual sort method\r\n\t\tvoid sort(int *, int);\r\n};\r\n#endif//INSERTIONSORT_H\r\n" }, { "alpha_fraction": 0.5519982576370239, "alphanum_fraction": 0.5642458200454712, "avg_line_length": 27.45569610595703, "blob_id": "2f4d1c4ee7e218a3e44ff9ee6085606ed23c3917", "content_id": "10f3e8b1516040d65dcfbc70a160e84c338fcac8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4654, "license_type": "no_license", "max_line_length": 64, "num_lines": 158, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/Axes3D.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\nimport java.awt.Color;\r\n\r\n/**\r\n Create a positive x, y, and z axis in 3-dimensional space.\r\n*/\r\npublic class Axes3D extends Model\r\n{\r\n /**\r\n Create a positive x, y, and z axis\r\n with one unit length for each axis.\r\n The default {@link Color} is black.\r\n */\r\n public Axes3D( )\r\n {\r\n this(1.0, 1.0, 1.0);\r\n }\r\n\r\n\r\n /**\r\n Create a positive x, y, and z axis\r\n with the given length for each axis.\r\n The default {@link Color} is black.\r\n\r\n @param xMax length of the x-axis\r\n @param yMax length of the y-axis\r\n @param zMax length of the z-axis\r\n */\r\n public Axes3D(double xMax, double yMax, double zMax)\r\n {\r\n this(xMax, yMax, zMax, Color.black);\r\n }\r\n\r\n\r\n /**\r\n Create a positive x, y, and z axis\r\n with the given length for each axis.\r\n Use the given {@link Color} for all three axes.\r\n\r\n @param xMax length of the x-axis\r\n @param yMax length of the y-axis\r\n @param zMax length of the z-axis\r\n @param c color for all three axes\r\n */\r\n public Axes3D(double xMax, double yMax, double zMax, Color c)\r\n {\r\n this(xMax, yMax, zMax, c, c, c);\r\n }\r\n\r\n\r\n /**\r\n Create a positive x, y, and z axis\r\n with the given length for each axis.\r\n Use the given {@link Color} for each axis.\r\n\r\n @param xMax length of the x-axis\r\n @param yMax length of the y-axis\r\n @param zMax length of the z-axis\r\n @param cX color for the x-axis\r\n @param cY color for the y-axis\r\n @param cZ color for the z-axis\r\n */\r\n public Axes3D(double xMax, double yMax, double zMax,\r\n Color cX, Color cY, Color cZ)\r\n {\r\n this(0.0, xMax, 0.0, yMax, 0.0, zMax, cX, cY, cZ);\r\n }\r\n\r\n\r\n /**\r\n Create an x, y, and z axis with the\r\n given endpoints for each axis.\r\n The default {@link Color} is black.\r\n\r\n @param xMin left endpoint of the x-axis\r\n @param xMax right endpoint of the x-axis\r\n @param yMin bottom endpoint of the y-axis\r\n @param yMax top endpoint of the y-axis\r\n @param zMin back endpoint of the z-axis\r\n @param zMax front endpoint of the z-axis\r\n */\r\n public Axes3D(double xMin, double xMax,\r\n double yMin, double yMax,\r\n double zMin, double zMax)\r\n {\r\n this(xMin, xMax, yMin, yMax, zMin, zMax, Color.black);\r\n }\r\n\r\n\r\n /**\r\n Create an x, y, and z axis with the\r\n given endpoints for each axis.\r\n Use the given {@link Color} for all three axes.\r\n\r\n @param xMin left endpoint of the x-axis\r\n @param xMax right endpoint of the x-axis\r\n @param yMin bottom endpoint of the y-axis\r\n @param yMax top endpoint of the y-axis\r\n @param zMin back endpoint of the z-axis\r\n @param zMax front endpoint of the z-axis\r\n @param c color for all three axes\r\n */\r\n public Axes3D(double xMin, double xMax,\r\n double yMin, double yMax,\r\n double zMin, double zMax,\r\n Color c)\r\n {\r\n this(xMin, xMax, yMin, yMax, zMin, zMax, c, c, c);\r\n }\r\n\r\n\r\n /**\r\n Create an x, y, and z axis with the\r\n given endpoints for each axis.\r\n Use the given {@link Color} for each axis.\r\n\r\n @param xMin left endpoint of the x-axis\r\n @param xMax right endpoint of the x-axis\r\n @param yMin bottom endpoint of the y-axis\r\n @param yMax top endpoint of the y-axis\r\n @param zMin back endpoint of the z-axis\r\n @param zMax front endpoint of the z-axis\r\n @param cX color for the x-axis\r\n @param cY color for the y-axis\r\n @param cZ color for the z-axis\r\n */\r\n public Axes3D(double xMin, double xMax,\r\n double yMin, double yMax,\r\n double zMin, double zMax,\r\n Color cX, Color cY, Color cZ)\r\n {\r\n super(\"Axes 3D\");\r\n\r\n Vertex x0 = new Vertex(xMin, 0, 0);\r\n Vertex x1 = new Vertex(xMax, 0, 0);\r\n Vertex y0 = new Vertex( 0, yMin, 0);\r\n Vertex y1 = new Vertex( 0, yMax, 0);\r\n Vertex z0 = new Vertex( 0, 0, zMin);\r\n Vertex z1 = new Vertex( 0, 0, zMax);\r\n\r\n // Add the vertices to the model.\r\n addVertex(x0, x1, y0, y1, z0, z1);\r\n\r\n // Add the colors to the model.\r\n addColor(cX, cY, cZ);\r\n\r\n // Create 3 line segments.\r\n addLineSegment(new LineSegment(0, 1, 0)); // use color cX\r\n addLineSegment(new LineSegment(2, 3, 1)); // use color cY\r\n addLineSegment(new LineSegment(4, 5, 2)); // use color cZ\r\n }\r\n}//Axes3D\r\n" }, { "alpha_fraction": 0.5110823512077332, "alphanum_fraction": 0.5191716551780701, "avg_line_length": 34.14619827270508, "blob_id": "d8a4c6ced93254b295f4043662493f39dac19a29", "content_id": "1d8a1b60911ca4931c54e9cf7523eee3777c7877", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6181, "license_type": "no_license", "max_line_length": 81, "num_lines": 171, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/ObjSimpleModel.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\nimport java.util.Scanner;\r\nimport java.io.File;\r\nimport java.io.FileInputStream;\r\nimport java.io.IOException;\r\nimport java.io.FileNotFoundException;\r\nimport java.util.regex.*;\r\nimport java.util.ArrayList;\r\n\r\n/**\r\n<p>\r\n A simple demonstration of loading and drawing a basic OBJ file.\r\n<p>\r\n A basic OBJ file is a text file that contains three kinds of lines:\r\n lines that begin with the character {@code 'v'}, lines that begin\r\n with the character {@code 'f'}, and lines that begin with the\r\n character {@code '#'}.\r\n<p>\r\n A line in an OBJ file that begins with {@code '#'} is a comment line\r\n and can be ignored.\r\n<p>\r\n A line in an OBJ file that begins with {@code 'v'} is a line that\r\n describes a vertex in 3-dimensional space. The {@code 'v'} will always\r\n be followed on the line by three doubles, the {@code x}, {@code y},\r\n and {@code z} coordinates of the vertex.\r\n<p>\r\n A line in an OBJ file that begins with {@code 'f'} is a line that\r\n describes a \"face\". The {@code 'f'} will be followed on the line by\r\n a sequence of positive integers. The integers are the indices of the\r\n vertices that make up the face. The \"index\" of a vertex is the order\r\n in which the vertex was listed in the OBJ file. So a line like this\r\n<pre>{@code\r\n f 2 4 1\r\n}</pre>\r\n would represent a triangle made up of the 2nd vertex read from the file,\r\n the 4th vertex read from the file, and the 1st vertex read from the file.\r\n And a line like this\r\n<pre>{@code\r\n f 2 4 3 5\r\n}</pre>\r\n would represent a quadrilateral made up of the 2nd vertex read from the file,\r\n the 4th vertex read from the file, the 3rd vertex read from the file, and\r\n the 5th vertex read from the file.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Wavefront_.obj_file\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Wavefront_.obj_file</a>\r\n*/\r\npublic class ObjSimpleModel extends Model\r\n{\r\n /**\r\n Create a wireframe model from the contents of an OBJ file.\r\n\r\n @param objFile {@link File} object for the OBJ data file\r\n */\r\n public ObjSimpleModel(File objFile)\r\n {\r\n super(\"OBJ Model\");\r\n\r\n // Open the OBJ file.\r\n String objName = null;\r\n FileInputStream fis = null;\r\n try\r\n {\r\n objName = objFile.getCanonicalPath();\r\n fis = new FileInputStream( objFile );\r\n }\r\n catch (FileNotFoundException e)\r\n {\r\n e.printStackTrace(System.err);\r\n System.err.printf(\"ERROR! Could not find OBJ file: %s\\n\", objName);\r\n System.exit(-1);\r\n }\r\n catch (IOException e)\r\n {\r\n e.printStackTrace(System.err);\r\n System.err.printf(\"ERROR! Could not open OBJ file: %s\\n\", objName);\r\n System.exit(-1);\r\n }\r\n\r\n this.name = objName;\r\n\r\n // Get the geometry from the OBJ file.\r\n try\r\n {\r\n // Pattern for parsing lines that start with \"f\"\r\n Pattern p = Pattern.compile(\"^(\\\\d*)[/]?(\\\\d*)[/]?(\\\\d*)\");\r\n\r\n Scanner scanner = new Scanner(fis);\r\n while ( scanner.hasNext() )\r\n {\r\n String token = scanner.next();\r\n if ( token.startsWith(\"#\")\r\n || token.startsWith(\"vt\")\r\n || token.startsWith(\"vn\")\r\n || token.startsWith(\"s\")\r\n || token.startsWith(\"g\")\r\n || token.startsWith(\"o\")\r\n || token.startsWith(\"usemtl\")\r\n || token.startsWith(\"mtllib\") )\r\n {\r\n scanner.nextLine(); // skip over these lines\r\n }\r\n else if ( token.startsWith(\"v\") )\r\n {\r\n double x = scanner.nextDouble();\r\n double y = scanner.nextDouble();\r\n double z = scanner.nextDouble();\r\n Vertex v = new Vertex(x, y, z);\r\n this.addVertex( v );\r\n }// parse vertex\r\n else if ( token.startsWith(\"f\") )\r\n {\r\n // tokenize the rest of the line\r\n String restOfLine = scanner.nextLine();\r\n Scanner scanner2 = new Scanner( restOfLine );\r\n // parse three vertices and make two line segments\r\n int[] v = new int[3];\r\n for (int i = 0; i < 3; i++)\r\n {\r\n // parse a \"v/vt/vn\" group\r\n String faceGroup = scanner2.next();\r\n Matcher m = p.matcher( faceGroup );\r\n if ( m.find() )\r\n {\r\n v[i] = Integer.parseInt( m.group(1) );\r\n String vt = m.group(2); // don't need\r\n String vn = m.group(3); // don't need\r\n }\r\n else\r\n System.err.println(\"Error: bad face: \" + faceGroup);\r\n }\r\n addLineSegment(new LineSegment( v[0]-1, v[1]-1 ));\r\n addLineSegment(new LineSegment( v[1]-1, v[2]-1 ));\r\n\r\n // parse another vertex (if there is one) and make a line segment\r\n while (scanner2.hasNext())\r\n {\r\n v[1] = v[2];\r\n String faceGroup = scanner2.next();\r\n Matcher m = p.matcher( faceGroup );\r\n if ( m.find() )\r\n {\r\n v[2] = Integer.parseInt( m.group(1) );\r\n String vt = m.group(2); // don't need\r\n String vn = m.group(3); // don't need\r\n }\r\n else\r\n System.err.println(\"Error: bad face: \" + faceGroup);\r\n\r\n addLineSegment(new LineSegment( v[1]-1, v[2]-1 ));\r\n }\r\n // close the line loop around this face\r\n addLineSegment(new LineSegment( v[2]-1, v[0]-1 ));\r\n }// parse face\r\n }// parse one line\r\n fis.close();\r\n }\r\n catch (Exception e)\r\n {\r\n e.printStackTrace(System.err);\r\n System.err.printf(\"ERROR! Could not read OBJ file: %s\\n\", objName);\r\n System.exit(-1);\r\n }\r\n }\r\n}//ObjSimpleModel\r\n" }, { "alpha_fraction": 0.652996838092804, "alphanum_fraction": 0.652996838092804, "avg_line_length": 14.789473533630371, "blob_id": "208395dfb83d10c34829ab2bd1f6378d41a8642d", "content_id": "ba5bc8d09a801b5a6bc7a7537ddf87c28eb866de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 317, "license_type": "no_license", "max_line_length": 59, "num_lines": 19, "path": "/Year 3/Assignment 4/Expr_Node.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#include \"Expr_Node.h\"\r\nExpr_Node::Expr_Node(void):\r\nright_leaf(nullptr),\r\nleft_leaf(nullptr)\r\n{\r\n\t//initialize leaves to nullptr\r\n}\r\n\r\nExpr_Node::~Expr_Node(void)\r\n{\r\n\tdelete right_leaf;\r\n\tdelete left_leaf;\r\n}" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.523809552192688, "avg_line_length": 13.923076629638672, "blob_id": "afb6e1863ef4bc6f0e070bb3128098e379fece69", "content_id": "d04511d1d3bfa7c0c3d57e90c55691f4e048ffcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 630, "license_type": "no_license", "max_line_length": 45, "num_lines": 39, "path": "/Year 2/Project 1/LinkedList.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include \"LinkedList.h\"\r\n#include \"Tour.h\"\r\n#include <iostream>\r\nusing namespace std;\r\n\r\n\t//adds node to the linked list\r\n\tvoid LinkedList:: addNode(int n,int m)\r\n\t{\r\n\t\tnode *temp = new node;\r\n\t\ttemp->data = (n,m);\r\n\t\t\r\n\t\ttemp->next = NULL;\r\n\r\n\t\tif (head == NULL)\r\n\t\t{\r\n\t\t\thead = temp;\r\n\t\t\ttail = temp;\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\ttail->next = temp;\r\n\t\t\ttail = tail->next;\r\n\t\t}\r\n\t}\r\n\t\r\n\t\r\n\t//traverses through list and prints the data\r\n\tvoid LinkedList:: displayList()\r\n\t{ \r\n\t\t\t\r\n\t\tnode * temp = new node;\r\n\t\ttemp = head;\r\n\t\twhile (temp != NULL)\r\n\t\t{\r\n\t\t\tcout << temp->data + \" \" << endl;\r\n\t\t\t\r\n\t\t\ttemp = temp->next;\r\n\t\t}\r\n\t}\r\n\t\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.835616409778595, "alphanum_fraction": 0.835616409778595, "avg_line_length": 72, "blob_id": "6e054b51e2d3b78a4cd434fdd839a6ecb4147358", "content_id": "7522a647168b8b83f9ac54c737596502704111b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 73, "license_type": "no_license", "max_line_length": 72, "num_lines": 1, "path": "/Master Year 2/Operating Systems/README.txt", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "Instructions for each assignment is within the folders with source code.\n" }, { "alpha_fraction": 0.5025248527526855, "alphanum_fraction": 0.5349405407905579, "avg_line_length": 33.69186019897461, "blob_id": "b5c2c85ea8496be0f19ada9d08e8fbefd88c72c0", "content_id": "12fe4bcde91526e5fa462ec17461ef9e9e085f16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6139, "license_type": "no_license", "max_line_length": 78, "num_lines": 172, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/PyramidFrustum.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a frustum of a right square pyramid\r\n with its base in the xz-plane.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Frustum\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Frustum</a>\r\n\r\n @see Pyramid\r\n*/\r\npublic class PyramidFrustum extends Model\r\n{\r\n /**\r\n Create a frustum of a right square pyramid with its base in the\r\n xz-plane, a base side length of 2, top side length of 1, and height 1/2.\r\n */\r\n public PyramidFrustum( )\r\n {\r\n this(2.0, 1.0, 0.5, 7, 4);\r\n }\r\n\r\n\r\n /**\r\n Create a frustum of a right square pyramid with its base in the\r\n xz-plane, a base side length of {@code s1}, top side length of\r\n {@code s2}, and height {@code h}.\r\n <p>\r\n This model works with either {@code s1 > s2} or {@code s1 < s2}.\r\n In other words, the frustum can have its \"apex\" either above or\r\n below the xz-plane.\r\n\r\n @param s1 side length of the base of the frustum\r\n @param s2 side length of the top of the frustum\r\n @param h height of the frustum\r\n */\r\n public PyramidFrustum(double s1, double s2, double h)\r\n {\r\n super();\r\n\r\n // Create the pyramid's geometry.\r\n Vertex v0 = new Vertex(-s1/2, 0, -s1/2); // base\r\n Vertex v1 = new Vertex(-s1/2, 0, s1/2);\r\n Vertex v2 = new Vertex( s1/2, 0, s1/2);\r\n Vertex v3 = new Vertex( s1/2, 0, -s1/2);\r\n Vertex v4 = new Vertex(-s2/2, h, -s2/2); // top\r\n Vertex v5 = new Vertex(-s2/2, h, s2/2);\r\n Vertex v6 = new Vertex( s2/2, h, s2/2);\r\n Vertex v7 = new Vertex( s2/2, h, -s2/2);\r\n addVertex(v0, v1, v2, v3, v4, v5, v6, v7);\r\n\r\n // Create 6 faces.\r\n addLineSegment(new LineSegment(0, 1)); // base\r\n addLineSegment(new LineSegment(1, 2));\r\n addLineSegment(new LineSegment(2, 3));\r\n addLineSegment(new LineSegment(3, 0));\r\n addLineSegment(new LineSegment(0, 4)); // 4 sides\r\n addLineSegment(new LineSegment(1, 5));\r\n addLineSegment(new LineSegment(2, 6));\r\n addLineSegment(new LineSegment(3, 7));\r\n addLineSegment(new LineSegment(4, 5)); // top\r\n addLineSegment(new LineSegment(5, 6));\r\n addLineSegment(new LineSegment(6, 7));\r\n addLineSegment(new LineSegment(7, 4));\r\n }\r\n\r\n\r\n /**\r\n Create a frustum of a right square pyramid with its base in the\r\n xz-plane, a base side length of {@code s}, top of the frustum at\r\n height {@code h}, and with the pyramid's apex at on the y-axis at\r\n height {@code a}.\r\n\r\n @param n number of lines of latitude\r\n @param k number of lines of longitude\r\n @param s side length of the base of the frustum\r\n @param h height of the frustum\r\n @param a height of the apex of the pyramid\r\n */\r\n public PyramidFrustum(int n, int k, double s, double h, double a)\r\n {\r\n this(s, (1 - h/a)*s, h, n, k);\r\n }\r\n\r\n\r\n /**\r\n Create a frustum of a right square pyramid with its base in the\r\n xz-plane, a base side length of {@code s1}, top side length of\r\n {@code s2}, and height {@code h}.\r\n <p>\r\n This model works with either {@code s1 > s2} or {@code s1 < s2}.\r\n In other words, the frustum can have its \"apex\" either above or\r\n below the xz-plane.\r\n\r\n @param s1 side length of the base of the frustum\r\n @param s2 side length of the top of the frustum\r\n @param h height of the frustum\r\n @param n number of lines of latitude\r\n @param k number of lines of longitude\r\n */\r\n public PyramidFrustum(double s1, double s2, double h, int n, int k)\r\n {\r\n super(\"Pyramid Frustum\");\r\n\r\n if (n < 0) n = 0;\r\n if (k < 1) k = 1;\r\n\r\n // Create the frustum's geometry.\r\n int index = 0;\r\n\r\n // Create all the lines of longitude from the top, down to the base,\r\n // across the base, then back up to the top, and across the top.\r\n s1 = s1/2;\r\n s2 = s2/2;\r\n double delta1 = (2 * s1) / k;\r\n double delta2 = (2 * s2) / k;\r\n // lines of \"longitude\" perpendicular to the x-axis\r\n for (int j = 0; j <= k; ++j)\r\n {\r\n double d1 = j * delta1;\r\n double d2 = j * delta2;\r\n addVertex(new Vertex(-s2+d2, h, -s2),\r\n new Vertex(-s1+d1, 0, -s1),\r\n new Vertex(-s1+d1, 0, s1),\r\n new Vertex(-s2+d2, h, s2));\r\n addLineSegment(new LineSegment(index+0, index+1),\r\n new LineSegment(index+1, index+2),\r\n new LineSegment(index+2, index+3),\r\n new LineSegment(index+3, index+0));\r\n index += 4;\r\n }\r\n // lines of \"longitude\" perpendicular to the z-axis\r\n for (int j = 0; j <= k; ++j)\r\n {\r\n double d1 = j * delta1;\r\n double d2 = j * delta2;\r\n addVertex(new Vertex( s2, h, -s2+d2),\r\n new Vertex( s1, 0, -s1+d1),\r\n new Vertex(-s1, 0, -s1+d1),\r\n new Vertex(-s2, h, -s2+d2));\r\n addLineSegment(new LineSegment(index+0, index+1),\r\n new LineSegment(index+1, index+2),\r\n new LineSegment(index+2, index+3),\r\n new LineSegment(index+3, index+0));\r\n index += 4;\r\n }\r\n // Create all the lines of \"latitude\" around the pyramid, starting\r\n // from the base and working up to the top.\r\n double deltaH = h / (n + 1);\r\n double deltaS = (s1 - s2) / (n + 1);\r\n double s = s1;\r\n for (int i = 0; i <= n; ++i)\r\n {\r\n h = i * deltaH;\r\n addVertex(new Vertex( s, h, s),\r\n new Vertex( s, h, -s),\r\n new Vertex(-s, h, -s),\r\n new Vertex(-s, h, s));\r\n addLineSegment(new LineSegment(index+0, index+1),\r\n new LineSegment(index+1, index+2),\r\n new LineSegment(index+2, index+3),\r\n new LineSegment(index+3, index+0));\r\n s -= deltaS;\r\n index += 4;\r\n }\r\n }\r\n}//PyramidFrustum\r\n" }, { "alpha_fraction": 0.6333333253860474, "alphanum_fraction": 0.6380952596664429, "avg_line_length": 14.307692527770996, "blob_id": "96e4be189e5e1076b9fb7767c5eda60caa9f482f", "content_id": "5aac7d2ecdcfffbef704cd19a232d74901dbfe5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 210, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/Year 2/Project 3/newSort.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//Sort.h\r\n#ifndef NEWSORT_H\r\n#define NEWSORT_H\r\nclass newSort\r\n{\r\n\tpublic: \r\n\t\t//pure virtual sort method\r\n\t\tvirtual void sort(int*, int,int)= 0;\r\n\t\t//destructor\r\n\t\tvirtual ~newSort(){}\r\n};\r\n\r\n#endif//NEWSORT_H" }, { "alpha_fraction": 0.624923586845398, "alphanum_fraction": 0.6282420754432678, "avg_line_length": 43.18146896362305, "blob_id": "aaf95778f300eb0a532dd001f0f784f838f3d7d8", "content_id": "c9a98a347408f8e6283e691376a563f6d76510c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11483, "license_type": "no_license", "max_line_length": 178, "num_lines": 259, "path": "/Year 4/csci487Group4Project-makingGraphs/CityStructure/Intersection.py", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#Intersection.py\n#Created 4/5/20 by Jasper Heist\n#Intersection class definition\n\nfrom Utilities.Definitions import IntersectionType, Orientation, Direction\nfrom CityStructure.Connection import Connection\nfrom Utilities.UIDGenerator import NumberGenerator\n\n#class that represents an intersection. handles passing cars through the intersection(not implemented)-Essentially a node in our City/Graph\nclass Intersection(object):\n def __init__(self, intersection_type):\n object.__init__(self)\n self.__intersection_type = intersection_type\n self.__uid = NumberGenerator().intersection_uid()\n\n #lane definitions\n self.__connections={\n #southbound lane entering intersection on top\n Direction.SouthboundIn:None,\n #northbound lane leaving intersection on top \n Direction.NorthboundOut:None, \n #westbound lane entering intersection on right\n Direction.WestboundIn:None, \n #eastbound lane leaving intersection on right\n Direction.EastboundOut:None, \n #northbound lane entering intersection on bottom\n Direction.NorthboundIn:None,\n #southbound lane leaving intersection on bottom\n Direction.SouthboundOut:None,\n #eastbound lane entering intersection on left\n Direction.EastboundIn:None,\n #westbound lane leaving intersection on left\n Direction.WestboundOut:None\n }\n\n #coordinates relative to main intersection in city(first one added), used for mapping\n self.__coordinates_in_city = [None, None]\n\n def print_info(self):\n \"\"\"prints textual information about this intersection (prints all connection info this intersection has)\"\"\"\n print(\" Intersection \" + str(self.__uid) + \"(\" + str(self.__intersection_type) + \")\")\n print(\" --------------------\")\n for connection in self.__connections:\n if(self.__connections[connection] == None):\n continue\n else:\n print(\" \" + str(connection))\n self.__connections[connection].print_info()\n\n print()\n\n def print_visual(self):\n \"\"\"gathers visual representation of intersection and return array of strings\"\"\"\n intersection_visuals = list()\n #build ascii art\n intersection_visuals.append(\" | | ↑ ↑ \")\n intersection_visuals.append(\" | | | | \")\n intersection_visuals.append(\" |{}| |{}| \".format(self.get_uid_for_connection(Direction.SouthboundIn), self.get_uid_for_connection(Direction.NorthboundOut)))\n intersection_visuals.append(\" ↓ ↓ | | \")\n intersection_visuals.append(\"←{}- ←{}-\".format(self.get_uid_for_connection(Direction.WestboundOut), self.get_uid_for_connection(Direction.WestboundIn)))\n intersection_visuals.append(\"←{}- ←{}-\".format(self.get_uid_for_connection(Direction.WestboundOut), self.get_uid_for_connection(Direction.WestboundIn)))\n intersection_visuals.append(\" ({})[{}] \".format(self.get_uid_string(), self.get_coordinates_string_for_intersection()))\n intersection_visuals.append(\"-{}→ -{}→\".format(self.get_uid_for_connection(Direction.EastboundIn), self.get_uid_for_connection(Direction.EastboundOut)))\n intersection_visuals.append(\"-{}→ -{}→\".format(self.get_uid_for_connection(Direction.EastboundIn), self.get_uid_for_connection(Direction.EastboundOut)))\n intersection_visuals.append(\" | | ↑ ↑ \")\n intersection_visuals.append(\" |{}| |{}| \".format(self.get_uid_for_connection(Direction.SouthboundOut), self.get_uid_for_connection(Direction.NorthboundIn)))\n intersection_visuals.append(\" | | | | \")\n intersection_visuals.append(\" ↓ ↓ | | \")\n\n return intersection_visuals\n\n def get_coordinates_string_for_intersection(self):\n x = \"\"\n y = \"\"\n\n if self.coordinates[0] >= 0:\n x += \" \"\n if self.coordinates[1] >= 0:\n y += \" \"\n \n x += str(self.coordinates[0])\n y += str(self.coordinates[1])\n\n return \"{},{}\".format(x, y)\n\n def get_uid_string(self):\n if self.uid > 9:\n return str(self.uid)\n else:\n return \" {}\".format(self.uid)\n\n def get_uid_for_connection(self, direction:Direction):\n \"\"\"return formatted string from the get_formatting_for_direction(int, direction) instance method after doing a NULL check\"\"\"\n if(self.__connections[direction] == None):\n if(direction == Direction.NorthboundIn) or (direction == Direction.NorthboundOut) or (direction == Direction.SouthboundIn) or (direction == Direction.SouthboundOut):\n return(\"XX\")\n else:\n return(\"-XX-\")\n else:\n connection:Connection = self.__connections[direction]\n uid = connection.uid\n return self.get_formatting_for_direction(uid, direction)\n\n\n\n def get_formatting_for_direction(self, uid:int, direction:Direction):\n \"\"\"return proper formatting for uid based on if this is a verticle or horizontal road\"\"\"\n if(direction == Direction.NorthboundIn) or (direction == Direction.NorthboundOut) or (direction == Direction.SouthboundIn) or (direction == Direction.SouthboundOut):\n #check size of uid for formatting\n if(uid < 10):\n return(\" \" + str(uid))\n else:\n return str(uid)\n else:\n if(uid < 10):\n return(\"-{}--\".format(uid))\n elif(uid < 100):\n return(\"-{}-\".format(uid))\n else:\n return()\n\n\n def link_connections(self, connection_coming_in:Connection, connection_coming_out:Connection, orientation:Orientation):\n \"\"\"takes two connections (one coming in and one coming out and links them to this intersection at the supplied orientation)\n\n Parameters\n ----------\n connection_coming_in:Connection\n connection that will feed into this intersection\n \n connection_coming_out:Connection\n connection that will be feeding cars out of this intersection\n\n orientation:Orientation Enum type\n where to attach these connection in relation to the intersection\n\n \"\"\"\n\n #determine correct connection to make\n if(orientation == Orientation.Top):\n self.__connections[Direction.NorthboundOut] = connection_coming_out\n self.__connections[Direction.SouthboundIn]= connection_coming_in\n elif(orientation == Orientation.Right):\n self.__connections[Direction.EastboundOut] = connection_coming_out\n self.__connections[Direction.WestboundIn]= connection_coming_in\n elif(orientation == Orientation.Bottom):\n self.__connections[Direction.SouthboundOut] = connection_coming_out\n self.__connections[Direction.NorthboundIn]= connection_coming_in\n elif(orientation == Orientation.Left):\n self.__connections[Direction.WestboundOut] = connection_coming_out\n self.__connections[Direction.EastboundIn]= connection_coming_in\n else:\n pass\n\n connection_coming_in.output_intersection = self.uid\n connection_coming_out.output_intersection = self.uid\n\n def link_intersection(self, other_intersection, orientation, skipCoordinateAssignment = False):\n \"\"\"links this intersection with the supplied intersection.\n \n Parameters\n ----------\n other_intersection: Intersection\n intersection to connect to this intersection\n\n orientation: Orientation Enum Type\n where this connection should be in relation to the intersection this is called on\n\n in_connection: Connection\n connection that feeds in to this intersection and out of the other one (will create a default one if not supplied)\n\n out_connection: Connection\n connection that feeds out of this intersection and into the other one (will create a default one if not supplied)\n \"\"\"\n\n\n in_connection = Connection()\n out_connection = Connection()\n\n #link intersections with connections\n self.link_connections(in_connection, out_connection, orientation)\n #link other intersection to other end\n other_intersection.link_connections(out_connection, in_connection, Orientation.other_side(orientation))\n if skipCoordinateAssignment:\n return\n\n\n if(orientation == Orientation.Top):\n other_intersection.coordinates = [self.coordinates[0], self.coordinates[1]-1]\n elif(orientation == Orientation.Right):\n other_intersection.coordinates = [self.coordinates[0]+1, self.coordinates[1]]\n elif(orientation == Orientation.Bottom):\n other_intersection.coordinates = [self.coordinates[0], self.coordinates[1]+1]\n if(orientation == Orientation.Left):\n other_intersection.coordinates = [self.coordinates[0]-1, self.coordinates[1]]\n\n def get_adjacent_intersection(self, relation:Orientation):\n connection_to_follow:Connection = None\n if relation == Orientation.Top:\n connection_to_follow = self.__connections[Direction.NorthboundOut]\n elif relation == Orientation.Right:\n connection_to_follow = self.__connections[Direction.EastboundOut]\n elif relation == Orientation.Bottom:\n connection_to_follow = self.__connections[Direction.SouthboundOut]\n elif relation == relation.Left:\n connection_to_follow = self.__connections[Direction.WestboundOut]\n\n if connection_to_follow == None:\n return None\n else:\n return connection_to_follow.output_intersection \n\n\n #returns enum value of the type of intersection this is\n def __get_intersection_type(self):\n return self.__intersection_type\n\n def __get_uid(self):\n \"\"\"gets uid for intersection\"\"\"\n return self.__uid\n\n def __set_coordinates(self, coordinates):\n self.__coordinates_in_city[0] = coordinates[0]\n self.x = coordinates[0]\n self.__coordinates_in_city[1] = coordinates[1]\n self.y = coordinates[1]\n\n def __get_coordinates(self):\n if(self.__coordinates_in_city[0] == None or self.__coordinates_in_city[1] == None):\n return None\n else:\n return self.__coordinates_in_city\n\n #Property Definitions\n\n intersectionType = property(fget=__get_intersection_type)\n\n #uid\n uid = property(fget=__get_uid)\n\n coordinates = property(fset=__set_coordinates, fget=__get_coordinates)\n\nclass TrafficLight(Intersection):\n \"\"\"class representing a four way traffic light\"\"\"\n\n def __init__(self):\n Intersection.__init__(self, IntersectionType.TrafficLight)\n\nclass FourWayStop(Intersection):\n \"\"\"class that represents a two way stop\"\"\"\n\n def __init__(self):\n Intersection.__init__(self, IntersectionType.FourWayStop)\n \nclass TwoWayStop(Intersection):\n \"\"\"class that represents a road that\"\"\"\n\n def __init__(self):\n Intersection.__init__(self, IntersectionType.TwoWayStop)\n " }, { "alpha_fraction": 0.5553157329559326, "alphanum_fraction": 0.5650296807289124, "avg_line_length": 21.417720794677734, "blob_id": "2a4ebaacd7cc9fb2c26cb480fd7c6a500e51f2e4", "content_id": "45c769ea6692f7105731d612ce33d3ca1eec323a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1853, "license_type": "no_license", "max_line_length": 109, "num_lines": 79, "path": "/Year 2/Assignment #3/Driver.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n\r\n#include <iostream>\r\n#include \"Player.h\"\r\n\r\n\r\nint main()\r\n{\t\r\n\t\r\n\tstd::string decision;\r\n\t//declare array of objects for the team\r\n\tPlayer * person = new Player[11];\r\n\t\r\n\t// Print menu and ask for input using string decision\r\n\tstd::cout << \"Welcome to our CSCI 240 Roster!\" << std::endl;\r\n\tperson->printMenu();\r\n\tstd::cin >> decision;\r\n\t//i represents index\r\n\tint i = 0;\r\n\t//num represents how many times a Player has been called\r\n\tint num = 0;\r\n\t\r\n\r\n\t\t\t//while decision doesn't equal 3\t\r\n\t\t\twhile (decision != \"3\"){\r\n\t\t\t\t// if decision equals 1\r\n\t\t\t\tif (decision == \"1\") {\r\n\t\t\t\t\t//check if array of objects is full\r\n\t\t\t\t\tif(num >=11){\r\n\t\t\t\t\t\tstd::cout << \"Sorry you can only have 11 players on the team, Enter another optionn: \" << std::endl;\r\n\t\t\t\t\t\tstd::cin >> decision;\r\n\t\t\t\t\t}\r\n\t\t\t\t\telse{\r\n\t\t\t\t// add new player to the index\r\n\t\t\t\t\tperson[i].addPlayer();\r\n\t\t\t\t\t//increment i and num to represent the index and num times a player has been added\r\n\t\t\t\t\ti++;\r\n\t\t\t\t\tnum++;\r\n\t\t\t\t\tstd::cout << std::endl;\r\n\t\t\t\t\t//ask for another input\r\n\t\t\t\t\tperson[i].printMenu();\r\n\t\t\t\t\tstd::cin >> decision;\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\r\n\t\t\t\t// if decision == 2\r\n\t\t\t\telse if (decision == \"2\") {\r\n\t\t\t\t\t// for every array spot, print players created by comparing to number of times a player has been created\r\n\t\t\t\t\tfor (i = 0; i < num; i++) {\r\n\t\t\t\t\t\t//call function to show player\r\n\t\t\t\t\t\tperson[i].showPlayer();\r\n\t\t\t\t\t}\r\n\t\t\t\t\tstd::cout << std::endl;\r\n\t\t\t\t\t//ask for another input\r\n\t\t\t\t\tperson[i].printMenu();\r\n\t\t\t\t\tstd::cin >> decision;\r\n\r\n\t\t\t\t}\r\n\r\n\t\t\t// else ask for correct number\r\n\t\t\telse {\r\n\t\t\t\tstd::cout << \"Wrong input, enter correct option\";\r\n\t\t\t\tstd::cin >> decision;\r\n\t\t\t\t}\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t}\r\n\t\t\t//destructor\r\n\t\t\tdelete [] person;\r\n\t\t\t\r\n\t\t\t\r\n}\r\n\t\r\n" }, { "alpha_fraction": 0.5720930099487305, "alphanum_fraction": 0.5860465168952942, "avg_line_length": 16.485713958740234, "blob_id": "9dca84b45be33a46f6737c072d387cfabc0b8ba2", "content_id": "e15787c2a38020d34bf3b24c40d9c5dc2e45fcf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 645, "license_type": "no_license", "max_line_length": 47, "num_lines": 35, "path": "/Year 2/Project 3/InsertionSort.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//InsertionSort.cpp\r\n#include <iostream>\r\n#include \"InsertionSort.h\"\r\n\r\n\r\n//default constructor\r\nInsertionSort::InsertionSort(){\r\n\t\r\n}\r\n\r\nInsertionSort::~InsertionSort(){\r\n}\r\n\r\n//sort method for Insertion sorting\r\nvoid InsertionSort::sort(int array[], int size)\r\n{\r\n\t//sort the array from i to i-1.\r\n\tint i,sortedSet, k;\r\n\tfor( i = 1; i < size; i++)\r\n\t{\r\n\t\tsortedSet = array[i];\r\n\t\tk = i-1;\r\n\t\t//while index[k] is greater than sorted, \r\n\t\twhile(k>=0 && array[k] < sortedSet)\r\n\t\t{\r\n\t\t\t// k+1 holds k if k is greater, \r\n\t\t\tarray[k +1] = array[k];\r\n\t\t\tk = k -1;\r\n\t\t}\r\n\t\t\r\n\t\t//add that [k+1] to the sorted set\r\n\t\tarray[k +1 ] = sortedSet;\r\n\t\t\r\n\t}\r\n}" }, { "alpha_fraction": 0.7551867365837097, "alphanum_fraction": 0.7842323780059814, "avg_line_length": 29.125, "blob_id": "4f337e6dbc1bcd6068abcbc21e634885cd5b0978", "content_id": "0dbe88f95a80ced521d313f7cdfdcc22be8a0048", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 241, "license_type": "no_license", "max_line_length": 55, "num_lines": 8, "path": "/Year 3/README.txt", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "Course work from assignments/projects from third year.\nCS_363 (Software Design Patterns):\n array-source (Assignment 1)\n compotion-source (Assignment 2)\n Assignment 3\n Assignment 4\n\nInstructions for assignment are in each folder as pdfs.\n" }, { "alpha_fraction": 0.610124945640564, "alphanum_fraction": 0.6364234089851379, "avg_line_length": 27.823530197143555, "blob_id": "f2d6481b2be9ef3ca4481c5e6b538288d1de5950", "content_id": "2b5c93708e2d2e6301a681b5621e2b2e4049653c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1521, "license_type": "no_license", "max_line_length": 127, "num_lines": 51, "path": "/Master Year 1/Algorithms/knapsack.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include <algorithm> //for max function\r\n#include <iostream> //output\r\n\r\n//knapsack function\r\n// limit - weight capacity\r\n// value - holds value of items\r\n// weight - holds weight of items\r\n// n - number of items\r\nint knapsack(int limit, int value[], int weight[], int n)\r\n{\r\n\t//base case: if limit or weight == 0, then return 0\r\n\tif (limit == 0 || n == 0)\r\n\t\treturn 0;\r\n\r\n\t//if the weight of the nth item is more than capacity ,\r\n\t//then knapsack function will be called again with one less item\r\n\r\n\t//Note: because of the indexing, to the nth item, we use n-1\r\n\t//Ex. n = 3, indexes of weight[0,1,2] , so the 3rd item is in index 2.\r\n\t//Hence n - 1\r\n\r\n\tif (weight[n - 1] > limit)\r\n\t{\r\n\t\treturn knapsack(limit, value, weight, n - 1);\r\n\t}\r\n\t//we need to get the max value to put in knapsack\r\n\t//so from the above condition, if weight[n - 1] is not greater than limit\r\n\t//then include the nth item include case: value[n-1] + knapsack(limit - weight[n-1],value,weight,n-1)\r\n\t//this adds the nth value to the knapsack call for the next nth item\r\n\r\n\t//the other case is when not including the nth item: so this knapsack call just goes to the next nth item\r\n\telse\r\n\t{\r\n\t\t//return the max of:\r\n\t\treturn std::max(value[n - 1] + knapsack(limit - weight[n - 1], value, weight, n - 1), knapsack(limit, value, weight, n - 1));\r\n\r\n\t}\r\n\r\n\r\n\r\n}\r\nint main()\r\n{\r\n\t//declare arrays \r\n\tint n = 5;\r\n\tint limit = 10;\r\n\tint values[] = { 40,35,20,10,5};\r\n\tint weight[] = { 1,2,3,4,5 };\r\n\tstd::cout << knapsack(limit, values, weight, n);\r\n\r\n}\r\n" }, { "alpha_fraction": 0.6164383292198181, "alphanum_fraction": 0.6203522682189941, "avg_line_length": 15.689655303955078, "blob_id": "ee1f83528721463a4f6969167b0623bdb54ae61b", "content_id": "a272dedcfc2a973626da7e74e72c43ff4167aaa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 511, "license_type": "no_license", "max_line_length": 59, "num_lines": 29, "path": "/Year 3/Assignment 4/Expr_Node.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#ifndef _EXPR_NODE\r\n#define _EXPR_NODE\r\n\r\nclass Expr_Node_Visitor;\r\n\r\n//Command Class\r\nclass Expr_Node {\r\n\tpublic:\r\n\t\r\n\t\tExpr_Node(void);\r\n\t\tvirtual ~Expr_Node(void);\r\n\t\t\r\n\t\tvirtual int eval(void) = 0;\r\n\t\t\r\n\t\t//Used to traverse the tree\r\n\t\tvirtual void accept (Expr_Node_Visitor & v) = 0;\r\n\t\t\r\n\t\t//every node has a right and left leaf\r\n\t\tExpr_Node * right_leaf;\r\n\t\tExpr_Node * left_leaf;\r\n\t\t\t\r\n};\r\n#endif" }, { "alpha_fraction": 0.6200000047683716, "alphanum_fraction": 0.6200000047683716, "avg_line_length": 13.473684310913086, "blob_id": "a8b7ddd0ffaa05778e0d14401616196c3b345b37", "content_id": "2a0a36bf1829d8631e1cf67d4dcf3a520f1a4b56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 300, "license_type": "no_license", "max_line_length": 59, "num_lines": 19, "path": "/Year 3/Assignment 4/Expr_Tree.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//Expr_Tree class\r\n\r\n// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n#include \"Expr_Tree.h\"\r\nExpr_Tree::Expr_Tree(void):\r\nleft_(nullptr),\r\nright_(nullptr)\r\n{\r\n\t\r\n}\r\nExpr_Tree::~Expr_Tree(void)\r\n{\r\n\tdelete left_;\r\n\tdelete right_;\r\n}\r\n\t\t\r\n\t\t" }, { "alpha_fraction": 0.3903619050979614, "alphanum_fraction": 0.6783433556556702, "avg_line_length": 47.28971862792969, "blob_id": "073c1fc08b77eff980c6bb90e14a905c364dde94", "content_id": "467ca36d1a1fa6a657acbe75a92905ffadc6b5c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5167, "license_type": "no_license", "max_line_length": 935, "num_lines": 107, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/errorCorrection.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//Andrew Chittick\n//Error Correction takes data codewords\n//and produces error correction codewords\n\n#include \"errorCorrection.h\"\n\nvoid errorCorrection(std::string * codewords, int numCodeWords, int* errorCorrectionWords, int numErrorWords){\n //define functions\n //void codeToDecimal(std::string *, int, int *);\n //void getErrorWords(int*, int, int*, int);\n //run functions produces errorCorrectionWords\n int * decimalCodewords = new int[numCodeWords];\n codeToDecimal(codewords, numCodeWords, decimalCodewords);\n getErrorWords(decimalCodewords, numCodeWords, errorCorrectionWords, numErrorWords);\n delete [] decimalCodewords;\n return;\n}\n\n//Converts an 8 char string to a decimal size times; writes to decimalCodewords\nvoid codeToDecimal(std::string * code, int size, int * decimalCodewords){\n int count, decimal;\n for (int i=0; i<size; i++){\n count = 7;\n decimal = 0;\n for(std::string::iterator it=code[i].begin(); it!=code[i].end(); ++it){\n if (*it == '1'){\n decimal = decimal + pow(2,count);\n }\n count--;\n }\n decimalCodewords[i] = decimal;\n }\n return;\n}\n\n//Generates errorCorrectionWords array\nvoid getErrorWords(int * decimalCodewords, int numCodeWords, int * errorCorrectionWords, int numErrorWords){\n //initialize functions\n //int convertToAlpha(int);\n //int convertToInteger(int);\n //initialize variables\n int generatorOne[11] = {0, 251, 67, 46, 61, 118, 70, 64, 94, 32, 45};\n int generatorTwo[17] = {0, 120, 104, 107, 109, 102, 161, 76, 3, 91, 191, 147, 169, 182, 194, 225, 120};\n int generatorThree[27] = {0, 173, 125, 158, 2, 103, 182, 118, 17, 145, 201, 111, 28, 165, 53, 161, 21, 245, 142, 13, 102, 48, 227, 153, 145, 218, 70};\n int generatorSize = numErrorWords+1;\n int * generator;\n if (generatorSize == 11){\n generator = generatorOne;\n }\n else if (generatorSize == 17){\n generator = generatorTwo;\n }\n else{\n generator = generatorThree;\n }\n int steps = numCodeWords;\n int temp = 0;\n int alph = 0;\n //generate error correction words\n for (int index=0; index<numCodeWords; index++){\n //convert lead term to alpha\n alph = convertToAlpha(decimalCodewords[0]);\n //1a\n //multiply generator by lead term of result from prev step a\n for (int i=0; i<generatorSize; i++){\n temp = generator[i] + alph;\n //maintain Galois Field\n if (temp > 255){\n temp = temp % 255;\n }\n //1b XOR with result from prev step b\n temp = convertToInteger(temp);\n decimalCodewords[i] = temp ^ decimalCodewords[i];\n }\n //XOR overrun (result bigger than generator polynomial)\n for (int i = generatorSize; i<steps; i++){\n //step 1b: XOR\n decimalCodewords[i] = 0 ^ decimalCodewords[i];\n }\n //prepare the result\n for (int i = 0; i < steps; i++){\n //discard lead term\n decimalCodewords[i]= decimalCodewords[i+1];\n }\n //steps shrinks until it is same size as generator\n if (steps >= generatorSize){\n steps--;\n }\n else{\n decimalCodewords[numErrorWords] = 0;\n }\n }\n //save result to errorCorrectionWords\n for (int i=0; i<numErrorWords; i++){\n errorCorrectionWords[i] = decimalCodewords[i];\n }\n return;\n}\nint intIndex[] = {0,1,25,2,50,26,198,3,223,51,238,27,104,199,75,4,100,224,14,52,141,239,129,28,193,105,248,200,8,76,113,5,138,101,47,225,36,15,33,53,147,142,218,240,18,130,69,29,181,194,125,106,39,249,185,201,154,9,120,77,228,114,166,6,191,139,98,102,221,48,253,226,152,37,179,16,145,34,136,54,208,148,206,143,150,219,189,241,210,19,92,131,56,70,64,30,66,182,163,195,72,126,110,107,58,40,84,250,133,186,61,202,94,155,159,10,21,121,43,78,212,229,172,115,243,167,87,7,112,192,247,140,128,99,13,103,74,222,237,49,197,254,24,227,165,153,119,38,184,180,224,17,68,146,217,35,32,137,46,55,63,209,91,149,188,207,205,144,135,151,178,220,252,190,97,242,86,211,171,20,42,93,158,132,60,57,83,71,109,65,162,31,45,67,216,183,123,164,118,196,23,73,236,127,12,111,246,108,161,59,82,41,157,85,170,251,96,134,177,187,204,62,90,203,89,95,176,156,169,160,81,11,245,22,235,122,117,44,215,79,174,213,233,230,231,173,232,116,214,244,234,168,80,88,175};\nint alphaIndex[] = {1,2,4,8,16,32,64,128,29,58,116,232,205,135,19,38,76,152,45,90,180,117,234,201,143,3,6,12,24,48,96,192,157,39,78,156,37,74,148,53,106,212,181,119,238,193,159,35,70,140,5,10,20,40,80,160,93,186,105,210,185,111,222,161,95,190,97,194,153,47,94,188,101,202,137,15,30,60,120,240,253,231,211,187,107,214,177,127,254,225,223,163,91,182,113,226,217,175,67,134,17,34,68,136,13,26,52,104,208,189,103,206,129,31,62,124,248,237,199,147,59,118,236,197,151,51,102,204,133,23,46,92,184,109,218,169,79,158,33,66,132,21,42,84,168,77,154,41,82,164,85,170,73,146,57,114,228,213,183,115,230,209,191,99,198,145,63,126,252,229,215,179,123,246,241,255,227,219,171,75,150,49,98,196,149,55,110,220,165,87,174,65,130,25,50,100,200,141,7,14,28,56,112,224,221,167,83,166,81,162,89,178,121,242,249,239,195,155,43,86,172,69,138,9,18,36,72,144,61,122,244,245,247,243,251,235,203,139,11,22,44,88,176,125,250,233,207,131,27,54,108,216,173,71,142,1};\n\nint convertToAlpha(int index){\n return intIndex[index-1];\n}\nint convertToInteger(int index){\n return alphaIndex[index];\n}\n" }, { "alpha_fraction": 0.6485788226127625, "alphanum_fraction": 0.6511628031730652, "avg_line_length": 14.125, "blob_id": "b7ce44ca24269eaf120c26ab3cb82b83ab789683", "content_id": "999017aec258ca7307942490e0a8897c862d7e46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 387, "license_type": "no_license", "max_line_length": 59, "num_lines": 24, "path": "/Year 3/Assignment3/Parenthesis_Command.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n#include \"Parenthesis_Command.h\"\r\n\r\nParenthesis_Command::Parenthesis_Command(Stack <int> & s):\r\ns_(s)\r\n{\r\n\t//constructor\r\n}\r\n\r\nvoid Parenthesis_Command::execute(void)\r\n{\r\n\t//remove it from stack\r\n\ts_.pop();\r\n\t\r\n}\r\n\r\nint Parenthesis_Command::prec (void) const\r\n{\r\n\treturn 1;\r\n}\r\n" }, { "alpha_fraction": 0.6555773019790649, "alphanum_fraction": 0.6634050607681274, "avg_line_length": 16.77777862548828, "blob_id": "cdfab1e5ae8c2ee6dcab16f609e75bbefe565a7b", "content_id": "028aaae19b8ed5d9c52660227df5964fb8258280", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 511, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/Year 3/Assignment 4/Subtract_Expr_Node.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#include \"Subtract_Expr_Node.h\"\r\n\r\nSubtract_Expr_Node::Subtract_Expr_Node(void)\r\n{\r\n\t//constructor\r\n}\r\nSubtract_Expr_Node::~Subtract_Expr_Node(void)\r\n{\r\n\t//destructor\r\n}\r\n\t\t\r\nint Subtract_Expr_Node::calculate(int num1, int num2)\r\n{\r\n\t//return subtraction of two numbers\r\n\treturn num1 - num2;\r\n}\r\n\r\nvoid Subtract_Expr_Node::accept (Expr_Node_Visitor & v)\r\n{\r\n\tv.Visit_Subtract_Node (*this);\r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.6532438397407532, "alphanum_fraction": 0.6657717823982239, "avg_line_length": 17.162601470947266, "blob_id": "6d0afcac636fac4c201fcbc9bd7785e5ba62bee6", "content_id": "e7632c873b20426e3366d793c8ac5e9d0eeab74a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2235, "license_type": "no_license", "max_line_length": 143, "num_lines": 123, "path": "/Year 3/composition-source/Stack.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// $Id: Stack.cpp 827 2011-02-07 14:20:53Z hillj $\n\n// Honor Pledge:\n//\n// I pledge that I have neither given nor received any help\n// on this assignment.\n\n//\n// Stack\n//\n\ntemplate <typename T>\nStack <T>::Stack (void):\ntop_element(0)\n{\n\t//default constructor\n\t//other members intialized in base class\n\t\n\t//set members\n\ttop_element = -1;\n\t\t\n}\n\n//\n// Stack\n//\ntemplate <typename T>\nStack <T>::Stack (const Stack & stack):\ns(stack.s),\ntop_element(0)\n{\n\t//copy constructor\n\t//set members of this object to stack's members\n\ttop_element = stack.top_element;\n\t\t\n}\n\n//\n// ~Stack\n//\ntemplate <typename T>\nStack <T>::~Stack (void)\n{\n\t//destructor called in base class\n}\n\n//\n// push\n//\ntemplate <typename T>\nvoid Stack <T>::push (T element)\n{\n\t//if top is equal to max size then it will overflow(so add 10 extra spaces to stack), \n\t//else increment top and put on that new spot\n\tif (top_element == s.max_size())\n\t{\n\t\t//Resize\n\t\ts.resize(s.max_size() + 10);\n\t\t\n\t}\n\t\n\t//increment top element to point to next spot on stack. then put element at top_element spot\n\ttop_element++;\n\ts.set(top_element,element);\n\t\t\n}\n\n//\n// pop\n//\ntemplate <typename T>\nvoid Stack <T>::pop (void)\n{\n\t//throw empty exception if nothing is on stack\n\tif (top_element == -1)\n\t{\n\t\tthrow empty_exception(\"Stack is empty. \");\n\t\t\n\t}\n\t\n\t//else decrement top_element to remove it\n\telse\n\t{\n\t\ttop_element--;\n\t\t\n\t}\n}\n\n//\n// operator =\n//\ntemplate <typename T>\nconst Stack <T> & Stack <T>::operator = (const Stack & rhs)\n{\n\t//assign rhs top element to current object's top element\n\ttop_element = rhs.top_element;\n\t\n\t//this will call operator overload function with these two objects\n\ts = rhs.s;\n}\n\n//\n// clear\n//\ntemplate <typename T>\nvoid Stack <T>::clear (void)\n{\n // COMMENT Just reset the variables instead popping each element, which\n // is expensive. Also, your stack will fail if T cannot be assigned the\n // value 0.\n\n //RESPONSE: I called the destructor to delete the old heap memory then create a new array object and assign it to the previous array object. \n \n\t//call destructor to delete any heap memory, then create new stack and assign to the previous stack\n\ts.~Array<T>();\n\tArray<T> newStack;\n\t\n\ts = newStack;\n\t\n\t//top element is set to -1\n\ttop_element = -1;\n\t\t\n}\n\n" }, { "alpha_fraction": 0.39795252680778503, "alphanum_fraction": 0.4216946065425873, "avg_line_length": 32.780303955078125, "blob_id": "e09a1218f7a7345aae4ccdfa2e3696924247989f", "content_id": "687bbd040290e46a80c0b47adc02de4c73c4a391", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4600, "license_type": "no_license", "max_line_length": 95, "num_lines": 132, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/Cube2.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "WINDOWS-1252", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a cube with its center\r\n at the origin, having edge length 2, and with its\r\n corners at {@code (±1, ±1, ±1)}.\r\n<p>\r\n This version of the cube model has each face of\r\n the cube cut up by an n by m grid of lines.\r\n<p>\r\n Here is a picture showing how the cube's eight corners\r\n are labeled.\r\n<pre>{@code\r\n v[4]\r\n +-----------------+ v[5]\r\n /| /|\r\n / | / |\r\n / | / |\r\n / | / |\r\n v[7] +-----------------+ v[6] |\r\n | | | | y\r\n | | | | |\r\n | | | | |\r\n | v[0] +---------|-------+ v[1] |\r\n | / | / |\r\n | / | / +----. x\r\n | / | / /\r\n |/ |/ /\r\n +-----------------+ /\r\n v[3] v[2] z\r\n}</pre>\r\n\r\n @see Cube\r\n @see Cube3\r\n @see Cube4\r\n*/\r\npublic class Cube2 extends Cube\r\n{\r\n /**\r\n Create a cube with its center at the origin, having edge\r\n length 2, with its corners at {@code (±1, ±1, ±1)}. and\r\n with two perpendicular grid lines going across the middle\r\n of each of the cube's faces.\r\n */\r\n public Cube2( )\r\n {\r\n this(1, 1, 1);\r\n }\r\n\r\n\r\n /**\r\n Create a cube with its center at the origin, having edge\r\n length 2, with its corners at {@code (±1, ±1, ±1)}, and\r\n with each of the cube's faces containing the given number\r\n of grid lines parallel to the x, y, and z directions.\r\n\r\n @param xGrid number of grid lines perpendicular to the x-axis\r\n @param yGrid number of grid lines perpendicular to the y-axis\r\n @param zGrid number of grid lines perpendicular to the z-axis\r\n */\r\n public Cube2(int xGrid, int yGrid, int zGrid)\r\n {\r\n super(); // create the basic cube with 8 vertices and 12 edges\r\n this.name = \"Cube2\";\r\n\r\n int index = 8;\r\n\r\n if (xGrid < 0) xGrid = 0;\r\n if (yGrid < 0) yGrid = 0;\r\n if (zGrid < 0) zGrid = 0;\r\n\r\n double xStep = 2.0 / (1 + xGrid);\r\n double yStep = 2.0 / (1 + yGrid);\r\n double zStep = 2.0 / (1 + zGrid);\r\n\r\n // Grid lines perpendicular to the x-axis.\r\n double x = -1.0;\r\n for (int i = 0; i < xGrid; ++i)\r\n {\r\n x += xStep;\r\n // Start at the top, front edge, go down the front face, and around the cube.\r\n addVertex(new Vertex(x, 1, 1),\r\n new Vertex(x, -1, 1),\r\n new Vertex(x, -1, -1),\r\n new Vertex(x, 1, -1));\r\n addLineSegment(new LineSegment(index+0, index+1),\r\n new LineSegment(index+1, index+2),\r\n new LineSegment(index+2, index+3),\r\n new LineSegment(index+3, index+0));\r\n index += 4;\r\n }\r\n\r\n // Grid lines perpendicular to the y-axis.\r\n double y = -1.0;\r\n for (int i = 0; i < yGrid; ++i)\r\n {\r\n y += yStep;\r\n // Start at the front, right edge, go left across the front face, and around the cube.\r\n addVertex(new Vertex( 1, y, 1),\r\n new Vertex(-1, y, 1),\r\n new Vertex(-1, y, -1),\r\n new Vertex( 1, y, -1));\r\n addLineSegment(new LineSegment(index+0, index+1),\r\n new LineSegment(index+1, index+2),\r\n new LineSegment(index+2, index+3),\r\n new LineSegment(index+3, index+0));\r\n index += 4;\r\n }\r\n\r\n // Grid lines perpendicular to the z-axis.\r\n double z = -1.0;\r\n for (int i = 0; i < zGrid; ++i)\r\n {\r\n z += zStep;\r\n // Start at the top, right edge, go left across the top face, and around the cube.\r\n addVertex(new Vertex( 1, 1, z),\r\n new Vertex(-1, 1, z),\r\n new Vertex(-1, -1, z),\r\n new Vertex( 1, -1, z));\r\n addLineSegment(new LineSegment(index+0, index+1),\r\n new LineSegment(index+1, index+2),\r\n new LineSegment(index+2, index+3),\r\n new LineSegment(index+3, index+0));\r\n index += 4;\r\n }\r\n }\r\n}//Cube2\r\n" }, { "alpha_fraction": 0.48709186911582947, "alphanum_fraction": 0.5280941724777222, "avg_line_length": 34.58333206176758, "blob_id": "3f73ee723b7cb455fceba925c9809f94a05ea6ba", "content_id": "f2435ed862767f3c5a41097717f04eda5d3a2317", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2640, "license_type": "no_license", "max_line_length": 80, "num_lines": 72, "path": "/Master Year 1/Computer Graphics/HW2/renderer/models/Octahedron.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "WINDOWS-1252", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a regular octahedron\r\n with its center at the origin, having side length\r\n {@code sqrt(2) = 1.4142},with its center plane given\r\n by the four vertices {@code (±1, 0, ±1)}. and with\r\n the top and bottom vertices being {@code (0, ±1, 0)}.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Octahedron\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Octahedron</a>\r\n\r\n @see Tetrahedron\r\n @see Cube\r\n @see Icosahedron\r\n @see Dodecahedron\r\n*/\r\npublic class Octahedron extends Model\r\n{\r\n /**\r\n Create a regular octahedron with its center at the\r\n origin, having side length {@code sqrt(2) = 1.4142},\r\n with its center plane given by the four vertices\r\n {@code (±1, 0, ±1)}. and with the top and bottom\r\n vertices being {@code (0, ±1, 0)}.\r\n */\r\n public Octahedron()\r\n {\r\n super(\"Octahedron\");\r\n\r\n // Create the octahedron's geometry.\r\n // It has 6 vertices and 12 edges.\r\n addVertex(new Vertex( 1, 0, 0), // 4 vertices around the center plane\r\n new Vertex( 0, 0, -1),\r\n new Vertex(-1, 0, 0),\r\n new Vertex( 0, 0, 1),\r\n new Vertex( 0, 1, 0), // vertex at the top\r\n new Vertex( 0, -1, 0)); // vertex at the bottom\r\n/*\r\n // These vertices create an Octahedron with side length 1.\r\n final double sqrt3 = Math.sqrt(3.0);\r\n final double sqrt2 = Math.sqrt(2.0);\r\n addVertex(new Vertex( 0.5, 0, 0.5), // 4 vertices around the center plane\r\n new Vertex(-0.5, 0, 0.5),\r\n new Vertex(-0.5, 0, -0.5),\r\n new Vertex( 0.5, 0, -0.5),\r\n new Vertex( 0, 1/sqrt2, 0), // vertex at the top\r\n new Vertex( 0, -1/sqrt2, 0)); // vertex at the bottom\r\n*/\r\n // Create 12 line segments.\r\n // four line segments around the center plane\r\n addLineSegment(new LineSegment(0, 1),\r\n new LineSegment(1, 2),\r\n new LineSegment(2, 3),\r\n new LineSegment(3, 0));\r\n // edges going to the top vertex\r\n addLineSegment(new LineSegment(0, 4),\r\n new LineSegment(1, 4),\r\n new LineSegment(2, 4),\r\n new LineSegment(3, 4));\r\n // edges going to the bottom vertex\r\n addLineSegment(new LineSegment(0, 5),\r\n new LineSegment(1, 5),\r\n new LineSegment(2, 5),\r\n new LineSegment(3, 5));\r\n }\r\n}//Octahedron\r\n" }, { "alpha_fraction": 0.54205721616745, "alphanum_fraction": 0.562160849571228, "avg_line_length": 37.940887451171875, "blob_id": "8f63ca93af3e742baa6eb37957b71c7700e609bb", "content_id": "3c4dbaee4cbf8107d101dcdc79c69f44e35a021c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 8108, "license_type": "no_license", "max_line_length": 79, "num_lines": 203, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/TorusSector.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a partial torus.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Torus\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Torus</a>\r\n<p>\r\n This partial torus is the surface of revolution generated by revolving\r\n a sector of the circle in the xy-plane with radius {@code r2} and center\r\n {@code (r1,0,0)} part way around the y-axis. We are assuming that\r\n {@code r1 > r2}.\r\n<p>\r\n The whole torus is the surface of revolution generated by revolving\r\n the whole circle in the xy-plane with radius {@code r2} and center\r\n {@code (r1,0,0)} all the way around the y-axis.\r\n<p>\r\n Here are parametric equations for the circle in the xy-plane with\r\n radius {@code r2} and center {@code (r1,0,0)} and parameterized\r\n starting from the top, with parameter {@code 0 <= phi <= 2*PI}.\r\n <pre>{@code\r\n x(phi) = r1 + r2 * sin(phi)\r\n y(phi) = r2 * cos(phi)\r\n z(phi) = 0\r\n }</pre>\r\n Here is the 3D rotation matrix that rotates around the y-axis\r\n by {@code theta} radians with {@code 0 <= theta <= 2*PI}.\r\n <pre>{@code\r\n [ cos(theta) 0 sin(theta)]\r\n [ 0 1 0 ]\r\n [-sin(theta) 0 cos(theta)]\r\n }</pre>\r\n If we multiply the rotation matrix with the circle parameterization,\r\n we get a parameterization of the torus.\r\n <pre>{@code\r\n [ cos(theta) 0 sin(theta)] [r1 + r2 * sin(phi)]\r\n [ 0 1 0 ] * [ r2 * cos(phi)]\r\n [-sin(theta) 0 cos(theta)] [ 0 ]\r\n\r\n = ( r1*cos(theta) + r2*cos(theta)*sin(phi).\r\n r2*cos(phi),\r\n -r1*sin(theta) - r2*sin(theta)*sin(phi) )\r\n\r\n = ( (r1 + r2*sin(phi)) * cos(theta),\r\n r2*cos(phi),\r\n -(r1 + r2*sin(phi)) * sin(theta) )\r\n }</pre>\r\n See\r\n <a href=\"http://en.wikipedia.org/wiki/Torus#Geometry\" target=\"_top\">\r\n http://en.wikipedia.org/wiki/Torus#Geometry</a>\r\n\r\n @see Torus\r\n*/\r\npublic class TorusSector extends Model\r\n{\r\n /**\r\n Create a partial torus with half the circle of revolution with radius 3/4\r\n and a cross section that is half the circle of longitude with radius 1/4.\r\n */\r\n public TorusSector( )\r\n {\r\n this(0.75, 0.25, Math.PI/2, 3*Math.PI/2, Math.PI, 2*Math.PI, 6, 8);\r\n }\r\n\r\n\r\n /**\r\n Create a partial torus with a partial circle of revolution\r\n with radius {@code r1} and a cross section circle (circle of\r\n longitude) with radius {@code r2}.\r\n <p>\r\n If {@code theta1 > 0} or {@code theta2 < 2*PI}, then the (partial)\r\n circle of revolution is the circular sector from angle {@code theta1}\r\n to angle {@code theta2}. In other words, the (partial) circles of\r\n latitude in the model extend from angle {@code theta1} to angle\r\n {@code theta2}.\r\n <p>\r\n The last two parameters determine the number of circles of longitude\r\n and the number of (partial) circles of latitude in the model.\r\n <p>\r\n If there are {@code k} circles of longitude, then each (partial)\r\n circle of latitude will have {@code k-1} line segments.\r\n If there are {@code n} (partial) circles of latitude, then each\r\n circle of longitude will have {@code n} line segments.\r\n <p>\r\n There must be at least four circles of longitude and at least\r\n three circles of latitude.\r\n\r\n @param r1 radius of the circle of revolution\r\n @param r2 radius of the cross section circle (circle of longitude)\r\n @param theta1 beginning longitude angle for the circle of revolution\r\n @param theta2 ending longitude angle for the circle of revolution\r\n @param n number of circles of latitude\r\n @param k number of circles of longitude\r\n */\r\n public TorusSector(double r1, double r2,\r\n double theta1, double theta2,\r\n int n, int k)\r\n {\r\n this(r1, r2, theta1, theta2, 0, 2*Math.PI, n+1, k);\r\n }\r\n\r\n\r\n /**\r\n Create a partial torus with a partial circle of revolution with\r\n radius {@code r1} and a partial cross section circle with radius\r\n {@code r2}.\r\n <p>\r\n If {@code phi1 > 0} or {@code phi2 < 2*PI}, then the (partial) cross\r\n section circle is the circular sector from angle {@code phi1} to angle\r\n {@code phi2}. In other words, the (partial) circles of logitude in the\r\n model extend from angle {@code phi1} to angle {@code phi2}.\r\n <p>\r\n If {@code theta1 > 0} or {@code theta2 < 2*PI}, then the (partial) circle\r\n of revolution is the circular sector from angle {@code theta1} to angle\r\n {@code theta2}. In other words, the (partial) circles of latitude in\r\n the model extend from angle {@code theta1} to angle {@code theta2}.\r\n <p>\r\n The last two parameters determine the number of (partial) circles of\r\n longitude and the number of (partial) circles of latitude in the model.\r\n <p>\r\n If there are {@code k} circles of longitude, then each (partial)\r\n circle of latitude will have {@code k-1} line segments.\r\n If there are {@code n} (partial) circles of latitude, then each\r\n circle of longitude will have {@code n-1} line segments.\r\n <p>\r\n There must be at least four circles of longitude and at least\r\n four circles of latitude.\r\n\r\n @param r1 radius of the circle of revolution\r\n @param r2 radius of the cross section circle (circle of longitude)\r\n @param theta1 beginning longitude angle for the circle of revolution\r\n @param theta2 ending longitude angle for the circle of revolution\r\n @param phi1 beginning latitude angle for the cross section circle\r\n @param phi2 ending latitude angle for the cross section circle\r\n @param n number of circles of latitude\r\n @param k number of circles of longitude\r\n */\r\n public TorusSector(double r1, double r2,\r\n double theta1, double theta2,\r\n double phi1, double phi2,\r\n int n, int k)\r\n {\r\n super(\"Torus Sector\");\r\n\r\n if (n < 4) n = 4;\r\n if (k < 4) k = 4;\r\n\r\n // Create the torus's geometry.\r\n\r\n double deltaPhi = (phi2 - phi1) / (n - 1);\r\n double deltaTheta = (theta2 - theta1) / (k - 1);\r\n\r\n // An array of vertices to be used to create line segments.\r\n Vertex[][] v = new Vertex[n][k];\r\n\r\n // Create all the vertices.\r\n for (int j = 0; j < k; ++j) // choose a rotation around the y-axis\r\n {\r\n double c1 = Math.cos(theta1 + j * deltaTheta);\r\n double s1 = Math.sin(theta1 + j * deltaTheta);\r\n for (int i = 0; i < n; ++i) // go around a cross section circle\r\n {\r\n double c2 = Math.cos(phi1 + i * deltaPhi);\r\n double s2 = Math.sin(phi1 + i * deltaPhi);\r\n v[i][j] = new Vertex( (r1 + r2*s2) * c1,\r\n r2*c2,\r\n -(r1 + r2*s2) * s1 );\r\n }\r\n }\r\n\r\n // Add all of the vertices to this model.\r\n for (int i = 0; i < n; ++i)\r\n {\r\n for (int j = 0; j < k; ++j)\r\n {\r\n addVertex( v[i][j] );\r\n }\r\n }\r\n\r\n // Create the vertical (partial) cross-section circles.\r\n for (int j = 0; j < k; ++j) // choose a rotation around the y-axis\r\n {\r\n for (int i = 0; i < n - 1; ++i) // go around a cross section circle\r\n { // v[i][j] v[i+1][j]\r\n addLineSegment(new LineSegment( (i * k) + j, ((i+1) * k) + j ));\r\n }\r\n }\r\n\r\n // Create all the horizontal (partial) circles around the torus.\r\n for (int i = 0; i < n; ++i) //choose a rotation around the cross section\r\n {\r\n for (int j = 0; j < k - 1; ++j) // go around a horizontal circle\r\n { // v[i][j] v[i][j+1]\r\n addLineSegment(new LineSegment( (i * k) + j, (i * k) + (j+1) ));\r\n }\r\n }\r\n }\r\n}//TorusSector\r\n" }, { "alpha_fraction": 0.4880809783935547, "alphanum_fraction": 0.5244373679161072, "avg_line_length": 28.035999298095703, "blob_id": "e54ec00d701832281984fbfa0f66f2d305cca973", "content_id": "8d742d493310ac8e311c76d9a940d177ec746bf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7509, "license_type": "no_license", "max_line_length": 94, "num_lines": 250, "path": "/Master Year 1/Computer Graphics/HW2/Hw2_v2.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n Course: CS 45500 / CS 51580\r\n Name: Marcellus Hunt\r\n Email: [email protected]\r\n Assignment: 2\r\n*/\r\n\r\nimport renderer.scene.*;\r\nimport renderer.pipeline.*;\r\nimport renderer.framebuffer.*;\r\n\r\nimport java.awt.Color;\r\nimport java.util.LinkedList;\r\n\r\n/**\r\n\r\n*/\r\npublic class Hw2_v2\r\n{\r\n final static int WIDTH = 800;\r\n final static int HEIGHT = 800;\r\n final static int LENGTH = 30;\r\n\r\n public static void main(String[] args)\r\n {\r\n // Turn on clipping in the rasterizer.\r\n Rasterize_Clip.doClipping = true;\r\n\r\n Scene scene = new Scene();\r\n\r\n scene.addModel( new P(), new N(), new W() );\r\n\r\n // Push the models away from where the camera is.\r\n for (Model m : scene.modelList)\r\n {\r\n for (int i = 0; i < m.vertexList.size(); ++i)\r\n {\r\n final Vertex v = m.vertexList.get(i);\r\n m.vertexList.set(i, new Vertex(v.x,\r\n v.y,\r\n v.z - 2));\r\n }\r\n }\r\n\r\n // Give each model an initial location.\r\n moveModel(scene.getModel(0), -1.6, 0); // P\r\n moveModel(scene.getModel(1), -0.5, 0); // N\r\n moveModel(scene.getModel(2), 0.6, 0); // W\r\n\r\n // Create an empty List of FrameBuffers.\r\n final LinkedList<FrameBuffer> fbList = new LinkedList<>();\r\n\r\n // Initialize the list of FrameBuffers and create the initial segment of frames.\r\n for (int i = 0; i < LENGTH; ++i)\r\n {\r\n // create a new FrameBuffer\r\n final FrameBuffer fb = new FrameBuffer(WIDTH,HEIGHT);\r\n\r\n // render the current Scene into the new FrameBuffer\r\n Pipeline.render(scene, fb.vp);\r\n\r\n // add the new FrameBuffer at the front to the list\r\n fbList.addFirst(fb);\r\n\r\n // save a post processed frame\r\n postProcess(fbList).dumpFB2File(String.format(\"PPM_Hw2_v2_Frame%03d.ppm\", i));\r\n // update the Scene\r\n updateScene(scene, i);\r\n }\r\n\r\n \r\n // Now cycle through the list of FrameBuffers, keeping its length constant.\r\n for (int i = LENGTH; i <= 450; ++i)\r\n {\r\n // remove the oldest FrameBuffer from the tail of the list\r\n FrameBuffer old = fbList.removeLast();\r\n \r\n //clear framebuffer\r\n old.clearFB();\r\n\r\n // render the current Scene into the recycled FrameBuffer\r\n Pipeline.render(scene, old.vp);\r\n \r\n // add the recycled FrameBuffer at the front of the list\r\n fbList.addFirst(old);\r\n\r\n // save a post processed frame\r\n postProcess(fbList).dumpFB2File(String.format(\"PPM_Hw2_v2_Frame%03d.ppm\", i));\r\n\r\n // update the Scene\r\n updateScene(scene, i);\r\n }\r\n\r\n // Empty out the list and create the last segment of frames.\r\n for (int i = 451; i < 450 + LENGTH; ++i)\r\n {\r\n // remove the oldest FrameBuffer from the list\r\n fbList.removeLast();\r\n\r\n // save a post processed frame\r\n postProcess(fbList).dumpFB2File(String.format(\"PPM_Hw2_v2_Frame%03d.ppm\", i));\r\n }\r\n }\r\n\r\n\r\n private static void updateScene(Scene scene, int frameNumber)\r\n {\r\n //There are six movements that need to be performed. There are 450 frames to account for\r\n //The movemnts that move 1 unit takes up 37 frames each rounding to 150. \r\n //Three units down-right takes up 225 frames. Two units left takes up 75 frames\r\n\r\n \r\n //one units up\r\n if (frameNumber < 38)\r\n {\r\n moveModels(scene, 0, (1.0/37.5));\r\n }\r\n \r\n //three units down-right\r\n else if (frameNumber >= 37 && frameNumber < 262)\r\n {\r\n moveModels(scene,(3.0/225.0),-1.0*(3.0/225.0));\r\n } \r\n\r\n //one unit left\r\n else if (frameNumber >= 262 && frameNumber < 300)\r\n {\r\n moveModels(scene, -1.0*(1.0/37.5), 0);\r\n }\r\n \r\n \r\n //one unit up-left\r\n else if(frameNumber>= 300 && frameNumber <338)\r\n {\r\n moveModels(scene, -1.0*(1.0/37.5), 1.0/37.5);\r\n }\r\n\r\n //two units left\r\n else if(frameNumber >=338 && frameNumber < 412)\r\n {\r\n moveModels(scene, -1.0*(2.0/75), 0);\r\n }\r\n\r\n //one unit up-right\r\n else if(frameNumber >= 412 && frameNumber < 450)\r\n {\r\n moveModels(scene, 1.0/37.5, 1.0/37.5);\r\n }\r\n }\r\n\r\n\r\n private static void moveModels(Scene scene, double deltaX, double deltaY)\r\n {\r\n //Move all models in the scene by calling moveModel functions\r\n for (Model x: scene.modelList)\r\n {\r\n moveModel(x, deltaX, deltaY);\r\n }\r\n }\r\n\r\n\r\n private static void moveModel(Model model, double deltaX, double deltaY)\r\n {\r\n //Move Model by adding delta x and y to the model's current vertex\r\n for (int i = 0; i < model.vertexList.size(); i++)\r\n {\r\n final Vertex v = model.vertexList.get(i);\r\n model.vertexList.set(i,new Vertex(v.x+deltaX, v.y+deltaY,v.z));\r\n }\r\n \r\n }\r\n\r\n\r\n /**\r\n Use the List of FrameBuffer objects to compute, and return,\r\n a new FrameBuffer object.\r\n */\r\n private static FrameBuffer postProcess(final LinkedList<FrameBuffer> fbList)\r\n {\r\n //counter will be used to see how far I am in the frame buffer list\r\n int counter =1;\r\n Color dim = new Color(1.0f,1.0f,1.0f);\r\n Color white = new Color(255,255,255);\r\n final FrameBuffer resultFB = new FrameBuffer(WIDTH, HEIGHT);\r\n\r\n // Iterate through the list of source framebuffers and copy\r\n // every white pixel from a source framebuffer into resultFB,\r\n for(FrameBuffer B: fbList)\r\n {\r\n //if statements that dims the color every time a FrameBuffer is iterated\r\n if (counter <3)\r\n {\r\n dim = new Color(1.0f,1.0f,1.0f);\r\n }\r\n else if (counter >=3 && counter <6)\r\n {\r\n dim = new Color(0.9f,0.9f,0.9f);\r\n }\r\n else if(counter>=6 && counter <9)\r\n {\r\n dim = new Color(0.8f,0.8f,0.8f);\r\n }\r\n else if(counter>=9 && counter <12)\r\n {\r\n dim = new Color(0.7f,0.7f,0.7f);\r\n }\r\n else if(counter>=12 && counter <15)\r\n {\r\n dim = new Color(0.6f,0.6f,0.6f);\r\n }\r\n else if(counter>=15 && counter <18)\r\n {\r\n dim = new Color(0.5f,0.5f,0.5f);\r\n }\r\n else if(counter>=18 && counter <21)\r\n {\r\n dim = new Color(0.4f,0.4f,0.4f);\r\n }\r\n else if(counter>=21 && counter <24)\r\n {\r\n dim = new Color(0.3f,0.3f,0.3f);\r\n }\r\n else if(counter>=24 && counter <27)\r\n {\r\n dim = new Color(0.2f,0.2f,0.2f);\r\n }\r\n else if(counter>=27 && counter <29)\r\n {\r\n dim = new Color(0.1f,0.1f,0.1f);\r\n }\r\n //used double nested for-loop to loop through framebuffer\r\n for (int i = 0; i < WIDTH; i++)\r\n {\r\n for (int j=0; j < HEIGHT; j++)\r\n {\r\n //if pixel is white then set it\r\n if (B.getPixelFB(i, j).equals(white))\r\n resultFB.setPixelFB(i, j, dim);\r\n }\r\n }\r\n counter++;\r\n }\r\n // but reduce the brightness of the white pixel by an amount\r\n // proportional to how \"old\" the source frame is. Don't let any\r\n // \"older\" pixel overwrite a \"newer\" pixel already in resultFB.\r\n\r\n return resultFB;\r\n }\r\n\r\n}\r\n" }, { "alpha_fraction": 0.6489741206169128, "alphanum_fraction": 0.6538804769515991, "avg_line_length": 34.587303161621094, "blob_id": "a06fd688daa1e08abc5f8c71b4244c3eb401e3b4", "content_id": "3b2452be11327790a4f36a720e0bcc8ccc8fc73a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2242, "license_type": "no_license", "max_line_length": 126, "num_lines": 63, "path": "/Year 4/csci487Group4Project-makingGraphs/CityStructure/Connection.py", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#Connection.py\n#Created 4/5/20 by Jasper Heist\n#contains definition of a connection\n\nfrom Agents.Vehicles import Car, Semi\nfrom Utilities.UIDGenerator import NumberGenerator\nfrom Utilities.Definitions import AverageCarLength\n\n\nclass Connection(object):\n \"\"\"class representing a ONE-DIRECTIONAL segment of a road that connects two intersections together\"\"\"\n\n def __init__(self, name=\"unamed\", length=AverageCarLength*10, lanes=2, speed_limit=30):\n \"\"\"initializer (set default length to 10 car lengths)\"\"\"\n object.__init__(self)\n self.__name = name\n #length of road-the idea here is to get a max amount of people that can be on a road by making sure the length of all \n #the cars on the road does not exceed the length of this segement of road itself\n self.__length = length\n #all vehicles on this section of road\n self.__vehicles = list()\n #number of lanes in this connection\n self.__lanes = lanes\n #unique identifier for this connection\n self.__uid = NumberGenerator().connection_uid()\n #max speed cars can go\n self.__speed_limit = speed_limit\n #intersection the cars come from as they enter this connection\n self.__input_intersection = None\n #intersection this connection feed into\n self.__output_intersection = None\n\n def __print_info(self):\n print(\" Connection \" + str(self.uid))\n\n def __get_name(self):\n \"\"\"gets name of road\"\"\"\n return self.__name\n\n def __get_uid(self):\n \"\"\"return unique ID for Connection\"\"\"\n return self.__uid\n\n def __get_input_inter(self):\n return self.__input_intersection\n \n def __set_input_inter(self, intersection:int):\n self.__input_intersection = intersection\n\n def __get_output_inter(self):\n return self.__output_intersection\n \n def __set_output_inter(self, intersection:int):\n self.__output_intersection = intersection\n\n #name of road\n name = property(fget = __get_name)\n \n uid = property(fget = __get_uid)\n\n input_intersection = property(fget=__get_input_inter, fset=__set_input_inter)\n\n output_intersection = property(fget=__get_output_inter, fset=__set_output_inter)\n" }, { "alpha_fraction": 0.56977778673172, "alphanum_fraction": 0.574222207069397, "avg_line_length": 30.60869598388672, "blob_id": "7dd204ff7e197ad14e322287542f8eab8f9b0029", "content_id": "79aa7cbdeaa7d55bea37bfb785b3237e7dcafda6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2250, "license_type": "no_license", "max_line_length": 76, "num_lines": 69, "path": "/Year 3/Assignment 4/Array_Base.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//Array_Base class\r\n#include <cstring> // for size_t definition\r\n\r\n\r\ntemplate <typename T>\r\nclass Array_Base\r\n{\t\r\npublic:\r\n\r\n\ttypedef T type;\r\n\r\n//virtual methods\r\n/**\r\n * Get the character at the specified index. If the \\a index is not within\r\n * the range of the array, then std::out_of_range exception is thrown.\r\n *\r\n * @param[in] index Zero-based location\r\n * @return Character at \\index\r\n * @exception std::out_of_range Invalid index value\r\n */ \r\n virtual T get (size_t index) const = 0;\r\n\r\n /** \r\n * Set the character at the specified \\a index. If the \\a index is not\r\n * within range of the array, then std::out_of_range exception is \r\n * thrown.\r\n *\r\n * @param[in] index Zero-based location\r\n * @param[in] value New value for character\r\n * @exception std::out_of_range Invalid \\a index value\r\n */\r\n virtual void set (size_t index, T value) = 0;\r\n\r\n /**\r\n * Locate the specified character in the array. The index of the first\r\n * occurrence of the character is returned. If the character is not\r\n * found in the array, then -1 is returned.\r\n *\r\n * @param[in] ch Character to search for\r\n * @return Index value of the character\r\n * @retval -1 Character not found\r\n */\r\n virtual int find (T element) const = 0;\r\n\r\n /**\r\n * @overload\r\n *\r\n * This version allows you to specify the start index of the search. If\r\n * the start index is not within the range of the array, then the\r\n * std::out_of_range exception is thrown.\r\n *\r\n * @param[in] ch Character to search for\r\n * @param[in] start Index to begin search\r\n * @return Index value of first occurrence\r\n * @retval -1 Character not found\r\n * @exception std::out_of_range Invalid index\r\n */\r\n virtual int find (T element, size_t start) const = 0;\r\n \r\n //virtual fill methods\r\n virtual void fill (T element) = 0;\r\n \r\n //return current size of array\r\n virtual size_t size (void) const = 0;\r\n \r\n //return max size of array\r\n virtual size_t max_size (void) const = 0;\r\n \r\n};\r\n" }, { "alpha_fraction": 0.4865359365940094, "alphanum_fraction": 0.4901633858680725, "avg_line_length": 26.705440521240234, "blob_id": "3aa05f17513c39b439251be7b53874f00ee3c00c", "content_id": "bb118d3addbbc73f4630a93d6861ecfc97cea852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 30600, "license_type": "no_license", "max_line_length": 87, "num_lines": 1066, "path": "/Master Year 1/Programming Languages and Compilers/HW3/hw3/Evaluate_6a.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/**\r\n This program, as distributed, interprets Language_6.\r\n You are to modify this program so it interprets the\r\n following modified version of Language_6.\r\n\r\n Prog ::= Exp\r\n | '(' 'prog' Exp+ ')'\r\n\r\n Exp ::= For\r\n | Repeat\r\n | PostInc\r\n | PreInc\r\n | PostDec\r\n | PreDec\r\n | Spaceship\r\n | If\r\n | While\r\n | Set\r\n | Var\r\n | Begin\r\n | Print\r\n | AExp\r\n | BExp\r\n | INTEGER\r\n | BOOLEAN\r\n | VARIABLE\r\n\r\n For ::= '(' 'for' Exp Exp Exp Exp ')'\r\n\r\n Repeat ::= '(' 'repeat' Exp Exp ')'\r\n\r\n PostInc ::= '(' '++' VARIABLE ')'\r\n PostDec ::= '(' '--' VARIABLE ')'\r\n PreInc ::= '(' '+++' VARIABLE ')'\r\n PreDec ::= '(' '---' VARIABLE ')'\r\n\r\n Spaceship ::= '(' '<=>' Exp Exp ')'\r\n\r\n If ::= '(' 'if' Exp Exp Exp ')'\r\n\r\n While ::= '(' 'while' Exp Exp ')'\r\n\r\n Set ::= '(' 'set' VARIABLE Exp ')'\r\n\r\n Var ::= '(' 'var' VARIABLE Exp ')'\r\n\r\n Begin ::= '(' 'begin' Exp+ ')'\r\n\r\n Print ::= '(' 'print' Exp ')'\r\n\r\n BExp ::= '(' '||' Exp Exp+ ')'\r\n | '(' '&&' Exp Exp+ ')'\r\n | '(' '!' Exp ')'\r\n | '(' EqOp Exp Exp ')'\r\n | '(' RelOp Exp Exp ')'\r\n\r\n EqOp ::= '==' | '!='\r\n RelOp ::= '<' | '>' | '<=' | '>='\r\n\r\n AExp ::= '(' '+' Exp Exp* ')'\r\n | '(' '-' Exp Exp? ')'\r\n | '(' '*' Exp Exp+ ')'\r\n | '(' '/' Exp Exp ')'\r\n | '(' '%' Exp Exp ')'\r\n | '(' '^' Exp Exp ')'\r\n\r\n INTEGER ::= [-|+][0-9]+\r\n BOOLEAN ::= 'true' | 'false'\r\n VARIABLE ::= [a-zA-Z][a-zA-Z0-9]*\r\n*/\r\n\r\npublic class Evaluate_6a\r\n{\r\n public static int DEBUG = 1;\r\n\r\n\r\n /**\r\n The methods evaluateProg(), evaluateExp(), evaluateAexp(), evaluateBexp(),\r\n and evaluateRexp() are essentially a post-order traversal of the abstract\r\n syntax tree.\r\n */\r\n public static Value eval(Tree tree) throws EvalException\r\n {\r\n // Instantiate a global environment object.\r\n final Environment env = new Environment();\r\n\r\n return evaluateProg( tree, env );\r\n }//eval()\r\n\r\n\r\n // Evaluate a prog\r\n public static Value evaluateProg(Tree tree, Environment env) throws EvalException\r\n {\r\n final Value result; // a blank final\r\n\r\n // Check whick kind of Prog we have.\r\n if ( ! tree.getElement().equals(\"prog\") )\r\n {\r\n // Evaluate the single expression.\r\n result = evaluateExp( tree, env );\r\n }\r\n else\r\n {\r\n // Evaluate each Exp in the Prog.\r\n // Any Var expressions will have the side effect\r\n // of putting a variable in the environment.\r\n // Any Set expressions will have the side effect\r\n // of changing a value in the environment.\r\n // Any Print expressions will have the side effect\r\n // of printing an output.\r\n // Any other expressions would be pointless!\r\n for (int i = 0; i < tree.degree()-1; i++)\r\n {\r\n evaluateExp( tree.getSubTree(i), env );\r\n }\r\n\r\n // Evaluate the last expression and use its\r\n // value as the value of the prog expression.\r\n result = evaluateExp( tree.getSubTree(tree.degree()-1), env );\r\n }\r\n\r\n return result;\r\n }//evaluateProg()\r\n\r\n\r\n // Evaluate an expression\r\n public static Value evaluateExp(Tree tree, Environment env) throws EvalException\r\n {\r\n final Value result; // a blank final\r\n\r\n final String node = tree.getElement();\r\n\r\n if ( node.equals(\"if\") )\r\n {\r\n result = evaluateIf( tree, env );\r\n }\r\n else if ( node.equals(\"while\") )\r\n {\r\n result = evaluateWhile( tree, env );\r\n }\r\n else if ( node.equals(\"set\") )\r\n {\r\n result = evaluateSet( tree, env );\r\n }\r\n else if ( node.equals(\"var\") )\r\n {\r\n result = evaluateVar( tree, env );\r\n }\r\n else if ( node.equals(\"begin\") )\r\n {\r\n result = evaluateBegin( tree, env );\r\n }\r\n else if ( node.equals(\"print\") )\r\n {\r\n result = evaluatePrint(tree, env);\r\n }\r\n else if ( node.equals(\"&&\")\r\n || node.equals(\"||\")\r\n || node.equals(\"!\") )\r\n {\r\n result = evaluateBexp(tree, env); // boolean expression\r\n }\r\n else if ( node.equals(\"==\")\r\n || node.equals(\"!=\") )\r\n {\r\n result = evaluateEqexp(tree, env); // equality operator\r\n }\r\n else if ( node.equals(\"<\")\r\n || node.equals(\">\")\r\n || node.equals(\"<=\")\r\n || node.equals(\">=\") )\r\n {\r\n result = evaluateRelexp(tree, env); // relational operator\r\n }\r\n else if ( node.equals(\"+\")\r\n || node.equals(\"-\")\r\n || node.equals(\"*\")\r\n || node.equals(\"/\")\r\n || node.equals(\"%\")\r\n || node.equals(\"^\") )\r\n {\r\n result = evaluateAexp(tree, env); // arithmetic expression\r\n }\r\n else if (node.equals(\"<=>\") )\r\n {\r\n result = spaceship(tree, env);\r\n }\r\n else if (node.equals(\"++\")\r\n ||node.equals(\"+++\") )\r\n {\r\n result = increment(tree,env);\r\n }\r\n\r\n else if (node.equals(\"--\")\r\n || node.equals(\"---\"))\r\n {\r\n result = decrement(tree,env);\r\n }\r\n else if (node.equals(\"for\"))\r\n {\r\n result = For(tree, env);\r\n }\r\n else if (node.equals(\"repeat\"))\r\n {\r\n result = Repeat(tree, env);\r\n }\r\n else if ( tree.degree() == 0 )\r\n {\r\n if ( node.equals(\"true\") || node.equals(\"false\") )\r\n {\r\n result = new Value( node.equals(\"true\") );\r\n }\r\n else if ( node.matches(\"^[-|+]*[0-9][0-9]*\") )\r\n {\r\n result = new Value( Integer.parseInt( node ) );\r\n }\r\n else if ( env.defined(node) ) // a variable\r\n {\r\n result = env.lookUp( node );\r\n }\r\n else // runtime check\r\n {\r\n throw new EvalException(\"undefined variable: \" + node);\r\n }\r\n }\r\n else\r\n {\r\n throw new EvalException(\"invalid expression: \" + tree);\r\n }\r\n\r\n return result;\r\n }//evaluateExp()\r\n\r\n //for expression\r\n private static Value For(Tree tree, Environment env) throws EvalException\r\n {\r\n\r\n //runtime check\r\n //check if it has 3 children nodes before it; else throw error\r\n if(tree.degree()!=4 )\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n // Create a new Environment object chained to (or \"nested in\")\r\n // the previous (\"outer\") environment object.\r\n final Environment newEnv = new Environment(env);\r\n\r\n //evaluate first expression once, which is intitalization\r\n evaluateExp( tree.getSubTree(0), newEnv );\r\n\r\n //evaluate second expression which is the bool condition\r\n Value bool = evaluateExp(tree.getSubTree(1), newEnv);\r\n\r\n // do a runtime type check\r\n if ( ! bool.tag.equals(Value.BOOL_TAG) )\r\n {\r\n throw new EvalException(\"illegal boolean expression: \" + tree);\r\n }\r\n \r\n //while bool is true,\r\n while(bool.valueB)\r\n {\r\n \r\n //evaluate fourth expression (body)\r\n evaluateExp(tree.getSubTree(3), newEnv);\r\n\r\n //next evaluate third expression (update)\r\n evaluateExp(tree.getSubTree(2), newEnv);\r\n\r\n //re-evaluate bool condition\r\n bool = evaluateExp(tree.getSubTree(1), newEnv);\r\n\r\n //runtime check\r\n if ( ! bool.tag.equals(Value.BOOL_TAG) )\r\n {\r\n throw new EvalException(\"illegal boolean expression: \" + tree);\r\n }\r\n\r\n }\r\n\r\n return new Value(false);\r\n }\r\n\r\n //repeat statement\r\n private static Value Repeat(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"incorrect repeat expression: \" + tree);\r\n }\r\n\r\n //evaluate body\r\n // evaluate the body of the loop (for its side effects)\r\n Value body = evaluateExp( tree.getSubTree(0), env );\r\n\r\n //evaluate condition\r\n Value condition = evaluateExp(tree.getSubTree(0),env);\r\n\r\n //run time check\r\n if ( ! condition.tag.equals(Value.BOOL_TAG) )\r\n {\r\n throw new EvalException(\"illegal boolean expression: \" + tree);\r\n }\r\n\r\n while(condition.valueB)\r\n {\r\n //evaluate body again\r\n body = evaluateExp(tree.getSubTree(0), env);\r\n\r\n //re-evaluate condition\r\n condition = evaluateExp(tree.getSubTree(1), env);\r\n\r\n //runtime check\r\n if ( ! condition.tag.equals(Value.BOOL_TAG) )\r\n {\r\n throw new EvalException(\"illegal boolean expression: \" + tree);\r\n }\r\n \r\n }\r\n \r\n return body;\r\n\r\n }\r\n\r\n private static Value spaceship(Tree tree, Environment env) throws EvalException\r\n {\r\n\r\n //runtime check\r\n if (tree.degree() != 2)\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n int result = 0;\r\n\r\n //get elements being comapred\r\n Value valueL = evaluateExp( tree.getSubTree(0), env );\r\n Value valueR = evaluateExp( tree.getSubTree(1), env );\r\n\r\n //runtime check \r\n if (valueL.tag.equals(Value.BOOL_TAG) && valueR.tag.equals(Value.INT_TAG))\r\n {\r\n throw new EvalException(\"wrong type of arguments: \" + tree);\r\n }\r\n\r\n //runtime check \r\n if (valueL.tag.equals(Value.INT_TAG) && valueR.tag.equals(Value.BOOL_TAG))\r\n {\r\n throw new EvalException(\"wrong type of arguments: \" + tree);\r\n }\r\n \r\n // boolean tags\r\n if (valueL.tag.equals(Value.BOOL_TAG))\r\n {\r\n boolean resL = valueL.valueB;\r\n boolean resR = valueR.valueB;\r\n\r\n if(resL == false && resR == true)\r\n {\r\n result = -1;\r\n }\r\n\r\n else if(resL == true && resR == false)\r\n {\r\n result = 1;\r\n }\r\n\r\n else \r\n {\r\n result = 0;\r\n }\r\n\r\n }\r\n \r\n // int tags\r\n else if ( valueL.tag.equals(Value.INT_TAG) )\r\n {\r\n int resultL = valueL.valueI;\r\n int resultR = valueR.valueI;\r\n\r\n if (resultL < resultR)\r\n result = -1;\r\n\r\n else if (resultL > resultR)\r\n result = 1;\r\n\r\n else\r\n result = 0;\r\n }\r\n \r\n return new Value (result);\r\n \r\n }\r\n\r\n //pre/post increment function\r\n private static Value increment(Tree tree, Environment env) throws EvalException\r\n {\r\n \r\n String node = tree.getElement();\r\n\r\n //get value being incremented\r\n Value temp = evaluateExp( tree.getSubTree(0), env );\r\n \r\n if (tree.degree() > 1 ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n String strTemp = tree.getSubTree(0).getElement();\r\n\r\n if(!env.defined(strTemp) || strTemp.matches(\"^[-|+]*[0-9][0-9]*\"))\r\n {\r\n throw new EvalException(\"cannot increment a literal: \" + tree);\r\n }\r\n\r\n int result = 0;\r\n boolean boolRes = false;\r\n //post increment \r\n if (node.equals(\"++\"))\r\n {\r\n //boolean increment\r\n if(temp.tag.equals(Value.BOOL_TAG))\r\n {\r\n \r\n //if temp boolean is true\r\n if (temp.valueB == true)\r\n {\r\n boolRes = true;\r\n\r\n return new Value(boolRes);\r\n }\r\n\r\n else \r\n {\r\n //boolRes hold temp value\r\n boolRes = temp.valueB;\r\n\r\n //change value of temp\r\n temp.valueB = !boolRes;\r\n\r\n //update temp\r\n env.update(node,temp);\r\n\r\n //change boolRes back to true because it's increment\r\n boolRes = true;\r\n \r\n return new Value(!boolRes);\r\n\r\n }\r\n \r\n }\r\n\r\n //int increment\r\n else\r\n {\r\n //access value\r\n //then increment\r\n result= temp.valueI++;\r\n env.update(node, temp);\r\n }\r\n //return new Value(boolRes);\r\n }\r\n //pre increment\r\n else if (node.equals(\"+++\"))\r\n {\r\n //boolean increment\r\n if(temp.tag.equals(Value.BOOL_TAG))\r\n {\r\n boolRes = true;\r\n temp.valueB = boolRes;\r\n env.update(node,temp);\r\n return new Value(boolRes);\r\n\r\n }\r\n\r\n //int increment\r\n else \r\n {\r\n //access value\r\n //then increment\r\n result= ++temp.valueI;\r\n env.update(node, temp);\r\n }\r\n \r\n }\r\n return new Value(result);\r\n }\r\n\r\n //pre/post decrement function\r\n private static Value decrement(Tree tree, Environment env) throws EvalException\r\n {\r\n String node = tree.getElement();\r\n\r\n //get value being incremented\r\n Value temp = evaluateExp( tree.getSubTree(0), env );\r\n\r\n \r\n if (tree.degree() > 1 ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n String strTemp = tree.getSubTree(0).getElement();\r\n\r\n if(!env.defined(strTemp) || strTemp.matches(\"^[-|+]*[0-9][0-9]*\"))\r\n {\r\n throw new EvalException(\"cannot decrement a literal: \" + tree);\r\n }\r\n\r\n int result = 0;\r\n boolean boolRes = false;\r\n\r\n //post decrement\r\n if (node.equals(\"--\"))\r\n {\r\n //boolean decrement\r\n if(temp.tag.equals(Value.BOOL_TAG))\r\n {\r\n \r\n //if temp boolean is true\r\n if (temp.valueB == false)\r\n {\r\n boolRes = false;\r\n\r\n return new Value(boolRes);\r\n }\r\n\r\n else \r\n {\r\n //boolRes hold temp value\r\n boolRes = temp.valueB;\r\n\r\n //change value of temp\r\n temp.valueB = !boolRes;\r\n\r\n //update temp\r\n env.update(node,temp);\r\n\r\n //change boolRes back to false because it's decrement\r\n boolRes = false;\r\n \r\n return new Value(!boolRes);\r\n\r\n }\r\n }\r\n else\r\n {\r\n //access value\r\n //then decrement\r\n result= temp.valueI--;\r\n env.update(node, temp);\r\n }\r\n \r\n }\r\n //pre decrement\r\n else if (node.equals(\"---\"))\r\n {\r\n //boolean decrement\r\n if(temp.tag.equals(Value.BOOL_TAG))\r\n {\r\n boolRes = false;\r\n temp.valueB = boolRes;\r\n env.update(node,temp);\r\n return new Value(boolRes);\r\n\r\n }\r\n else\r\n {\r\n //access value\r\n //then decrement\r\n result= --temp.valueI;\r\n env.update(node, temp);\r\n }\r\n\r\n }\r\n return new Value(result);\r\n\r\n }\r\n\r\n\r\n // Evaluate an if-expression\r\n private static Value evaluateIf(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 3 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"incorrect conditional expression: \" + tree);\r\n }\r\n\r\n final Value result; // a blank final\r\n\r\n final Value conditionalExp = evaluateExp( tree.getSubTree(0), env );\r\n // do a runtime type check\r\n if ( ! conditionalExp.tag.equals(Value.BOOL_TAG) )\r\n {\r\n throw new EvalException(\"illegal boolean expression: \" + tree);\r\n }\r\n\r\n if ( conditionalExp.valueB )\r\n {\r\n result = evaluateExp( tree.getSubTree(1), env );\r\n }\r\n else\r\n {\r\n result = evaluateExp( tree.getSubTree(2), env );\r\n }\r\n\r\n return result;\r\n }//evaluateIf()\r\n\r\n\r\n // Evaluate a while-loop expression\r\n private static Value evaluateWhile(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"incorrect while expression: \" + tree);\r\n }\r\n\r\n // evaluate the boolean condition\r\n Value conditionalExp = evaluateExp( tree.getSubTree(0), env );\r\n // do a runtime type check\r\n if ( ! conditionalExp.tag.equals(Value.BOOL_TAG) )\r\n {\r\n throw new EvalException(\"illegal boolean expression: \" + tree);\r\n }\r\n\r\n while ( conditionalExp.valueB )\r\n {\r\n // evaluate the body of the loop (for its side effects)\r\n evaluateExp( tree.getSubTree(1), env );\r\n // re-evaluate the boolean condition\r\n conditionalExp = evaluateExp( tree.getSubTree(0), env );\r\n // do a runtime type check\r\n if ( ! conditionalExp.tag.equals(Value.BOOL_TAG) )\r\n {\r\n throw new EvalException(\"illegal boolean expression: \" + tree);\r\n }\r\n }\r\n\r\n // always return false for a while-loop expression\r\n return new Value( false );\r\n }//evaluateWhile()\r\n\r\n\r\n // Evaluate a set expression\r\n private static Value evaluateSet(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree + \"\\n\");\r\n }\r\n\r\n // get the variable\r\n final String variable = tree.getSubTree(0).getElement();\r\n\r\n // check that we have a proper variable\r\n if ( ! variable.matches(\"^[a-zA-Z][a-zA-Z0-9]*\") ) // runtime check\r\n {\r\n throw new EvalException(\"improper variable name: \" + variable);\r\n }\r\n\r\n // check if this variable has already been declared\r\n if ( ! env.defined(variable) )\r\n {\r\n throw new EvalException(\"undefined variable: \" + variable);\r\n }\r\n\r\n // get, and then evaluate, the expression\r\n final Tree expr = tree.getSubTree(1);\r\n final Value result = evaluateExp( expr, env );\r\n // update this variable in the environment\r\n env.update(variable, result);\r\n\r\n if (DEBUG > 0) System.out.println( env + \"\\n\" ); // for debugging purposes\r\n\r\n return result;\r\n }//evaluateSet()\r\n\r\n\r\n // Evaluate a var expression\r\n private static Value evaluateVar(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree + \"\\n\");\r\n }\r\n\r\n // get the variable\r\n final String variable = tree.getSubTree(0).getElement();\r\n\r\n // check that we have a proper variable\r\n if ( ! variable.matches(\"^[a-zA-Z][a-zA-Z0-9]*\") ) // runtime check\r\n {\r\n throw new EvalException(\"improper variable name: \" + variable);\r\n }\r\n\r\n // check if this variable has already been declared\r\n // in the local environment\r\n if ( env.definedLocal(variable) )\r\n {\r\n throw new EvalException(\"variable already declared: \" + variable + \"\\n\");\r\n }\r\n\r\n // get, and then evaluate, the expression\r\n final Tree expr = tree.getSubTree(1);\r\n final Value result = evaluateExp( expr, env );\r\n\r\n // declare the new, local, variable\r\n env.add(variable, result);\r\n\r\n if (DEBUG > 0) System.out.println( env + \"\\n\" ); // for debugging purposes\r\n\r\n return result;\r\n }//evaluateVar()\r\n\r\n\r\n // Evaluate a begin expression\r\n private static Value evaluateBegin(Tree tree, Environment env) throws EvalException\r\n {\r\n // Create a new Environment object chained to (or \"nested in\")\r\n // the previous (\"outer\") environment object.\r\n final Environment newEnv = new Environment(env);\r\n\r\n // Evaluate each sub expression in the begin\r\n // expression (using the new environment chain).\r\n // The return value of each expression is\r\n // discarded, so any expression without a\r\n // side-effect is worthless.\r\n for (int i = 0; i < tree.degree()-1; i++)\r\n {\r\n evaluateExp( tree.getSubTree(i), newEnv );\r\n }\r\n\r\n // Evaluate the last expression and use its\r\n // value as the value of the begin expression.\r\n final Value result = evaluateExp( tree.getSubTree(tree.degree()-1), newEnv );\r\n\r\n // When this method returns, the local Environment\r\n // object that we created at the beginning of this method\r\n // becomes a garbage object (and will be garbage colected).\r\n // This is like \"popping\" a stack frame off of the call stack\r\n // in Java, C , or C++.\r\n\r\n return result;\r\n }//evaluateBegin()\r\n\r\n\r\n // Evaluate a print expression\r\n private static Value evaluatePrint(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 1 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree + \"\\n\");\r\n }\r\n\r\n final Value result = evaluateExp( tree.getSubTree(0), env );\r\n\r\n if (DEBUG > 0)\r\n {\r\n System.out.println( result );\r\n }\r\n else\r\n {\r\n System.out.println( result.toSimpleString() );\r\n }\r\n\r\n return result;\r\n }//evaluatePrint()\r\n\r\n\r\n // Evaluate a boolean expression\r\n private static Value evaluateBexp(Tree tree, Environment env) throws EvalException\r\n {\r\n boolean result = false;\r\n\r\n final String node = tree.getElement();\r\n\r\n Value value = evaluateExp( tree.getSubTree(0), env );\r\n if ( ! value.tag.equals(Value.BOOL_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a boolean expression: \"\r\n + tree.getSubTree(0));\r\n }\r\n result = value.valueB;\r\n\r\n if ( node.equals(\"&&\") )\r\n {\r\n if ( 2 > tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n for (int i = 1; i < tree.degree(); i++)\r\n {\r\n if (result)\r\n {\r\n value = evaluateExp( tree.getSubTree(i), env );\r\n if ( ! value.tag.equals(Value.BOOL_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a boolean expression: \"\r\n + tree.getSubTree(i));\r\n }\r\n result = result && value.valueB;\r\n }\r\n else // short circuit the evaluation of '&&'\r\n {\r\n result = false;\r\n break;\r\n }\r\n }\r\n }\r\n else if ( node.equals(\"||\") )\r\n {\r\n if ( 2 > tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n for (int i = 1; i < tree.degree(); i++)\r\n {\r\n if (! result)\r\n {\r\n value = evaluateExp( tree.getSubTree(i), env );\r\n if ( ! value.tag.equals(Value.BOOL_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a boolean expression: \"\r\n + tree.getSubTree(i));\r\n }\r\n result = result || value.valueB;\r\n }\r\n else // short circuit the evaluation of '||'\r\n {\r\n result = true;\r\n break;\r\n }\r\n }\r\n }\r\n else if ( node.equals(\"!\") )\r\n {\r\n if ( 1 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n result = ! result;\r\n }\r\n\r\n return new Value( result );\r\n }//evaluateBexp()\r\n\r\n\r\n // Evaluate an equality expression (which is a kind of boolean expression)\r\n private static Value evaluateEqexp(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n final boolean result; // a blank final\r\n\r\n final String op = tree.getElement();\r\n\r\n final Value valueL = evaluateExp( tree.getSubTree(0), env );\r\n final Value valueR = evaluateExp( tree.getSubTree(1), env );\r\n\r\n if ( op.equals(\"==\") )\r\n {\r\n if ( ! valueL.tag.equals(valueR.tag) )\r\n {\r\n result = false;\r\n }\r\n else if ( valueL.tag.equals(Value.INT_TAG) )\r\n {\r\n final int resultL = valueL.valueI;\r\n final int resultR = valueR.valueI;\r\n result = resultL == resultR;\r\n }\r\n else // boolean data type\r\n {\r\n final boolean resultL = valueL.valueB;\r\n final boolean resultR = valueR.valueB;\r\n result = resultL == resultR;\r\n }\r\n }\r\n else // the '!=' operator\r\n {\r\n if ( ! valueL.tag.equals(valueR.tag) )\r\n {\r\n result = true;\r\n }\r\n else if ( valueL.tag.equals(Value.INT_TAG) )\r\n {\r\n final int resultL = valueL.valueI;\r\n final int resultR = valueR.valueI;\r\n result = resultL != resultR;\r\n }\r\n else // boolean data type\r\n {\r\n final boolean resultL = valueL.valueB;\r\n final boolean resultR = valueR.valueB;\r\n result = resultL != resultR;\r\n }\r\n }\r\n\r\n return new Value( result );\r\n }//evaluateEqexp()\r\n\r\n\r\n // Evaluate a relational expression (which is a kind of boolean expression)\r\n private static Value evaluateRelexp(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n final boolean result; // a blank final\r\n\r\n final String op = tree.getElement();\r\n\r\n final Value valueL = evaluateExp( tree.getSubTree(0), env );\r\n if ( ! valueL.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(0));\r\n }\r\n\r\n final Value valueR = evaluateExp( tree.getSubTree(1), env );\r\n if ( ! valueR.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(1));\r\n }\r\n\r\n final int resultL = valueL.valueI;\r\n final int resultR = valueR.valueI;\r\n\r\n if ( op.equals(\"<\") )\r\n {\r\n result = resultL < resultR;\r\n }\r\n else if ( op.equals(\">\") )\r\n {\r\n result = resultL > resultR;\r\n }\r\n else if ( op.equals(\"<=\") )\r\n {\r\n result = resultL <= resultR;\r\n }\r\n else // if ( op.equals(\">=\") )\r\n {\r\n result = resultL >= resultR;\r\n }\r\n\r\n return new Value( result );\r\n }//evaluateRelexp()\r\n\r\n\r\n // Evaluate an arithmetic expression\r\n private static Value evaluateAexp(Tree tree, Environment env) throws EvalException\r\n {\r\n int result = 0;\r\n\r\n final String node = tree.getElement();\r\n\r\n final Value valueL = evaluateExp( tree.getSubTree(0), env );\r\n if ( ! valueL.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(0));\r\n }\r\n final int resultL = valueL.valueI;\r\n int resultR = 0;\r\n\r\n Value valueR = null;\r\n if ( tree.degree() >= 2 )\r\n {\r\n valueR = evaluateExp( tree.getSubTree(1), env );\r\n if ( ! valueR.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(1));\r\n }\r\n resultR = valueR.valueI;\r\n }\r\n\r\n if ( node.equals(\"+\") )\r\n {\r\n if ( tree.degree() == 1 )\r\n result = resultL;\r\n else\r\n {\r\n result = resultL + resultR;\r\n\r\n for (int i = 2; i < tree.degree(); i++)\r\n {\r\n Value temp = evaluateExp( tree.getSubTree(i), env );\r\n if ( ! temp.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(i));\r\n }\r\n result += temp.valueI;\r\n }\r\n }\r\n }\r\n else if ( node.equals(\"-\") )\r\n {\r\n if ( 2 < tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n if ( tree.degree() == 1 )\r\n result = -resultL;\r\n else\r\n result = resultL - resultR;\r\n }\r\n else if ( node.equals(\"*\") )\r\n {\r\n if ( 1 == tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n result = resultL * resultR;\r\n\r\n for (int i = 2; i < tree.degree(); i++)\r\n {\r\n Value temp = evaluateExp( tree.getSubTree(i), env );\r\n if ( ! temp.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(i));\r\n }\r\n result *= temp.valueI;\r\n }\r\n }\r\n else if ( node.equals(\"/\") )\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n result = resultL / resultR;\r\n }\r\n else if ( node.equals(\"%\") )\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n result = resultL % resultR;\r\n }\r\n else if ( node.equals(\"^\") )\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n result = (int)Math.pow(resultL, resultR);\r\n }\r\n\r\n return new Value( result );\r\n }//evaluateAexp()\r\n}\r\n" }, { "alpha_fraction": 0.6678507924079895, "alphanum_fraction": 0.6678507924079895, "avg_line_length": 17.482759475708008, "blob_id": "6681ac3482ed5c218dd331b473566bd0ee88d8fe", "content_id": "23ef566bdec6166b5c0e63ff3b7097e459faccca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 563, "license_type": "no_license", "max_line_length": 59, "num_lines": 29, "path": "/Year 3/Assignment3/Multiply_Command.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Binary_Op_Command.h\"\r\n\r\n#ifndef _MUTLIPLY_COMMAND_H\r\n#define _MULTIPLY_COMMAND_H\r\n\r\n//Multiplication Class\r\nclass Multiply_Command : public Binary_Op_Command{\r\n\tpublic:\r\n\t\r\n\t\tMultiply_Command(Stack <int> &s);\r\n\t\t\r\n\t\t~Multiply_Command(void);\r\n\t\t\r\n\t\t//evaluate multiplication between the integers\r\n\t\tvirtual int evaluate (int, int) const;\r\n\t\t\r\n\t\t//returns precedence\r\n\t\tvirtual int prec (void) const;\r\n\t\t\r\n\tprivate:\r\n\t\tint precedence;\r\n\t\t\r\n};\r\n\t\t\r\n#endif" }, { "alpha_fraction": 0.7439862489700317, "alphanum_fraction": 0.780927836894989, "avg_line_length": 53.52381134033203, "blob_id": "6a44f50d45d7e4393d3d25ea2e8489484028570b", "content_id": "f563d5df1d2178fa1a90afa590fd42640245043b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 241, "num_lines": 21, "path": "/Master Year 1/Object Oriented Design/README.txt", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "This program demonstrates the use of multithreads and using them to calculate prime numbers from 1 to 10,000\r\n\r\nI created three different types of thread classes based on the how many threads are used:\r\nOne thread\r\n100 threads\r\n1,000 threads\r\n\r\nI created a basic function to also calculate the prime numbers called primeCalc(). This is inside all three thread classes' run functions.\r\n\r\nIn the main function is where the threads are ran.\r\nFor the single thread I just created the object and called it's run method.\r\n\r\nFor the other two. I created a loop for creating and starting each thread and the loop only ran for as many threads I needed to complete the program\r\nSo for the 100 threads, a for loop cycled through 100 times. And for the 1,000 threads it did this 1,000 times. The code currently runs the 1,000 thread version but you can comment and un-comment code in the main function to test the others.\r\n\r\nRecorded times:\r\nOne thread: It calcualted the numbers almost instantaneously.\r\n100 threads: This completed in about 2.3 seconds\r\n1,000 threads: This took about 19.6 seconds to compile\r\n\r\nIt seems as more threads are used the more time the job takes." }, { "alpha_fraction": 0.601214587688446, "alphanum_fraction": 0.6194332242012024, "avg_line_length": 21.5238094329834, "blob_id": "af7d9b47fa3b34f166fdd67382496d286d71e747", "content_id": "98e8e5d8833d7cc35712eed1680a6ec94da44b2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 494, "license_type": "no_license", "max_line_length": 57, "num_lines": 21, "path": "/Master Year 2/Operating Systems/HW3/hw3/filters/echo.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program reads lines from standard input and\r\n echos them to standard output.\r\n\r\n When using Windows, if standard input is the console\r\n keyboard, use ^z (Control-z) to denote the end of file\r\n (and you must use ^z at the beginning of a line!).\r\n*/\r\n#include <stdio.h>\r\n\r\nint main()\r\n{\r\n char oneLine [1000];\r\n\r\n while ( fgets(oneLine, 1000, stdin) != NULL )\r\n {\r\n fputs(oneLine, stdout);\r\n fflush(stdout); // try commenting this out\r\n }\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.45762407779693604, "alphanum_fraction": 0.4616556167602539, "avg_line_length": 25.56049346923828, "blob_id": "6e08cde0c92a7e136c952f07d6367e6eac2c5777", "content_id": "ce6b8e45ff61684f438a83f9f57d9b51d8b2040e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 22324, "license_type": "no_license", "max_line_length": 87, "num_lines": 810, "path": "/Master Year 1/Programming Languages and Compilers/HW2/hw2/Evaluate_3a.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "import javax.lang.model.util.ElementScanner6;\r\n\r\n/**\r\n This program, as distributed, interprets Language_3.\r\n You are to modify this program so it interprets the\r\n following modified version of Language_3.\r\n\r\n Prog ::= Exp\r\n | '(' 'prog' Exp+ Exp ')'\r\n\r\n Exp ::= PostInc\r\n | PreInc\r\n | PostDec\r\n | PreDec\r\n | Var\r\n | Print\r\n | AExp\r\n | BExp\r\n | INTEGER\r\n | BOOLEAN\r\n | VARIABLE\r\n\r\n PostInc ::= '(' '++' VARIABLE ')'\r\n PostDec ::= '(' '--' VARIABLE ')'\r\n PreInc ::= '(' '+++' VARIABLE ')'\r\n PreDec ::= '(' '---' VARIABLE ')'\r\n\r\n Spaceship ::= '(' '<=>' Exp Exp ')'\r\n\r\n Var ::= '(' 'var' VARIABLE Exp ')'\r\n\r\n Print ::= '(' 'print' Exp ')'\r\n\r\n AExp ::= '(' '+' Exp Exp* ')'\r\n | '(' '-' Exp Exp? ')'\r\n | '(' '*' Exp Exp+ ')'\r\n | '(' '/' Exp Exp ')'\r\n | '(' '%' Exp Exp ')'\r\n | '(' '^' Exp Exp ')'\r\n\r\n BExp ::= '(' '||' Exp Exp+ ')'\r\n | '(' '&&' Exp Exp+ ')'\r\n | '(' '!' Exp ')'\r\n | '(' EqOp Exp Exp ')'\r\n | '(' RelOp Exp Exp ')'\r\n\r\n EqOp ::= '==' | '!='\r\n RelOp ::= '<' | '>' | '<=' | '>='\r\n\r\n INTEGER ::= [-|+][0-9]+\r\n BOOLEAN ::= 'true' | 'false'\r\n VARIABLE ::= [a-zA-Z][a-zA-Z0-9]*\r\n*/\r\n\r\n/*\r\n Course: CS 51530\r\n Name: Marcellus Hunt\r\n Email: [email protected]\r\n Assignment: 2\r\n*/\r\n\r\n\r\npublic class Evaluate_3a\r\n{\r\n public static int DEBUG = 1;\r\n\r\n /**\r\n The methods evaluateProg(), evaluateExp(), evaluateAexp(), evaluateBexp(),\r\n and evaluateRexp() are essentially a post-order traversal of the abstract\r\n syntax tree.\r\n */\r\n public static Value eval(Tree tree) throws EvalException\r\n {\r\n Environment env = new Environment(); // global environment data structure\r\n\r\n return evaluateProg( tree, env );\r\n }//eval()\r\n\r\n\r\n // Evaluate a prog\r\n public static Value evaluateProg(Tree tree, Environment env) throws EvalException\r\n {\r\n Value result = null;\r\n\r\n // Instantiate the global environment object.\r\n env = new Environment();\r\n\r\n // Check whick kind of Prog we have.\r\n if ( ! tree.getElement().equals(\"prog\") )\r\n {\r\n // Evaluate the single expression.\r\n result = evaluateExp( tree, env );\r\n }\r\n else\r\n {\r\n // Evaluate each Exp in the Prog.\r\n // Any Var expressions will have the side effect\r\n // of putting a variable in the environment.\r\n // Any Print expressions will have the side effect\r\n // of printing an output.\r\n // Any other expressions would be pointless!\r\n for (int i = 0; i < tree.degree()-1; i++)\r\n {\r\n evaluateExp( tree.getSubTree(i), env );\r\n }\r\n\r\n // Evaluate the last expression and use its\r\n // value as the value of the prog expression.\r\n result = evaluateExp( tree.getSubTree(tree.degree()-1), env );\r\n }\r\n\r\n return result;\r\n }//evaluateProg()\r\n\r\n\r\n // Evaluate an expression\r\n public static Value evaluateExp(Tree tree, Environment env) throws EvalException\r\n {\r\n Value result = null;\r\n\r\n String node = tree.getElement();\r\n\r\n if ( node.equals(\"var\") )\r\n {\r\n result = evaluateVar( tree, env );\r\n }\r\n else if ( node.equals(\"print\") )\r\n {\r\n result = evaluatePrint( tree, env );\r\n }\r\n else if ( node.equals(\"&&\")\r\n || node.equals(\"||\")\r\n || node.equals(\"!\") )\r\n {\r\n result = evaluateBexp(tree, env); // boolean expression\r\n }\r\n else if ( node.equals(\"==\")\r\n || node.equals(\"!=\") )\r\n {\r\n result = evaluateEqexp(tree, env); // equality operator\r\n }\r\n else if ( node.equals(\"<\")\r\n || node.equals(\">\")\r\n || node.equals(\"<=\")\r\n || node.equals(\">=\") )\r\n {\r\n result = evaluateRelexp(tree, env); // relational operator\r\n }\r\n else if ( node.equals(\"+\")\r\n || node.equals(\"-\")\r\n || node.equals(\"*\")\r\n || node.equals(\"/\")\r\n || node.equals(\"%\")\r\n || node.equals(\"^\") )\r\n {\r\n result = evaluateAexp(tree, env); // arithmetic expression\r\n }\r\n else if (node.equals(\"<=>\") )\r\n {\r\n result = spaceship(tree, env);\r\n }\r\n else if (node.equals(\"++\")\r\n ||node.equals(\"+++\") )\r\n {\r\n result = increment(tree,env);\r\n }\r\n\r\n else if (node.equals(\"--\")\r\n || node.equals(\"---\"))\r\n {\r\n result = decrement(tree,env);\r\n }\r\n else if ( tree.degree() == 0 )\r\n {\r\n if ( node.equals(\"true\") || node.equals(\"false\") )\r\n {\r\n result = new Value( node.equals(\"true\") );\r\n }\r\n else if ( node.matches(\"^[-|+]*[0-9][0-9]*\") )\r\n {\r\n result = new Value( Integer.parseInt( node ) );\r\n }\r\n else if ( env.defined(node) ) // a variable\r\n {\r\n result = env.lookUp( node );\r\n }\r\n else // runtime check\r\n {\r\n throw new EvalException(\"undefined variable: \" + node);\r\n }\r\n }\r\n else\r\n {\r\n throw new EvalException(\"invalid expression: \" + tree);\r\n }\r\n\r\n return result;\r\n }//evaluateExp()\r\n\r\n //spaceship function\r\n private static Value spaceship(Tree tree, Environment env) throws EvalException\r\n {\r\n\r\n //runtime check\r\n if (tree.degree() != 2)\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n int result = 0;\r\n\r\n //get elements being comapred\r\n Value valueL = evaluateExp( tree.getSubTree(0), env );\r\n Value valueR = evaluateExp( tree.getSubTree(1), env );\r\n\r\n //runtime check \r\n if (valueL.tag.equals(Value.BOOL_TAG) && valueR.tag.equals(Value.INT_TAG))\r\n {\r\n throw new EvalException(\"wrong type of arguments: \" + tree);\r\n }\r\n\r\n //runtime check \r\n if (valueL.tag.equals(Value.INT_TAG) && valueR.tag.equals(Value.BOOL_TAG))\r\n {\r\n throw new EvalException(\"wrong type of arguments: \" + tree);\r\n }\r\n \r\n // boolean tags\r\n if (valueL.tag.equals(Value.BOOL_TAG))\r\n {\r\n boolean resL = valueL.valueB;\r\n boolean resR = valueR.valueB;\r\n\r\n if(resL == false && resR == true)\r\n {\r\n result = -1;\r\n }\r\n\r\n else if(resL == true && resR == false)\r\n {\r\n result = 1;\r\n }\r\n\r\n else \r\n {\r\n result = 0;\r\n }\r\n\r\n }\r\n \r\n // int tags\r\n else if ( valueL.tag.equals(Value.INT_TAG) )\r\n {\r\n int resultL = valueL.valueI;\r\n int resultR = valueR.valueI;\r\n\r\n if (resultL < resultR)\r\n result = -1;\r\n\r\n else if (resultL > resultR)\r\n result = 1;\r\n\r\n else\r\n result = 0;\r\n }\r\n \r\n return new Value (result);\r\n \r\n }\r\n\r\n //pre/post increment function\r\n private static Value increment(Tree tree, Environment env) throws EvalException\r\n {\r\n \r\n String node = tree.getElement();\r\n\r\n //get value being incremented\r\n Value temp = evaluateExp( tree.getSubTree(0), env );\r\n \r\n if (tree.degree() > 1 ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n String strTemp = tree.getSubTree(0).getElement();\r\n\r\n if(!env.defined(strTemp) || strTemp.matches(\"^[-|+]*[0-9][0-9]*\"))\r\n {\r\n throw new EvalException(\"cannot increment a literal: \" + tree);\r\n }\r\n\r\n int result = 0;\r\n boolean boolRes = false;\r\n //post increment \r\n if (node.equals(\"++\"))\r\n {\r\n //boolean increment\r\n if(temp.tag.equals(Value.BOOL_TAG))\r\n {\r\n //use to test post increment\r\n int a = 0;\r\n \r\n //if temp boolean is true\r\n if (temp.valueB == true)\r\n {\r\n boolRes = true;\r\n\r\n return new Value(boolRes);\r\n }\r\n\r\n else \r\n {\r\n //boolRes hold temp value\r\n boolRes = temp.valueB;\r\n\r\n //change value of temp\r\n temp.valueB = !boolRes;\r\n\r\n //update temp\r\n env.update(node,temp);\r\n\r\n //change boolRes back to true because it's increment\r\n boolRes = true;\r\n \r\n return new Value(!boolRes);\r\n\r\n }\r\n \r\n }\r\n\r\n //int increment\r\n else\r\n {\r\n //access value\r\n //then increment\r\n result= temp.valueI++;\r\n env.update(node, temp);\r\n }\r\n //return new Value(boolRes);\r\n }\r\n //pre increment\r\n else if (node.equals(\"+++\"))\r\n {\r\n //boolean increment\r\n if(temp.tag.equals(Value.BOOL_TAG))\r\n {\r\n boolRes = true;\r\n temp.valueB = boolRes;\r\n env.update(node,temp);\r\n return new Value(boolRes);\r\n\r\n }\r\n\r\n //int increment\r\n else \r\n {\r\n //access value\r\n //then increment\r\n result= ++temp.valueI;\r\n env.update(node, temp);\r\n }\r\n \r\n }\r\n return new Value(result);\r\n }\r\n\r\n //pre/post decrement function\r\n private static Value decrement(Tree tree, Environment env) throws EvalException\r\n {\r\n String node = tree.getElement();\r\n\r\n //get value being incremented\r\n Value temp = evaluateExp( tree.getSubTree(0), env );\r\n\r\n \r\n if (tree.degree() > 1 ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n String strTemp = tree.getSubTree(0).getElement();\r\n\r\n if(!env.defined(strTemp) || strTemp.matches(\"^[-|+]*[0-9][0-9]*\"))\r\n {\r\n throw new EvalException(\"cannot decrement a literal: \" + tree);\r\n }\r\n\r\n int result = 0;\r\n boolean boolRes = false;\r\n\r\n //post decrement\r\n if (node.equals(\"--\"))\r\n {\r\n //boolean decrement\r\n if(temp.tag.equals(Value.BOOL_TAG))\r\n {\r\n //use to test post increment\r\n int a = 0;\r\n \r\n //if temp boolean is true\r\n if (temp.valueB == false)\r\n {\r\n boolRes = false;\r\n\r\n return new Value(boolRes);\r\n }\r\n\r\n else \r\n {\r\n //boolRes hold temp value\r\n boolRes = temp.valueB;\r\n\r\n //change value of temp\r\n temp.valueB = !boolRes;\r\n\r\n //update temp\r\n env.update(node,temp);\r\n\r\n //change boolRes back to false because it's decrement\r\n boolRes = false;\r\n \r\n return new Value(!boolRes);\r\n\r\n }\r\n }\r\n else\r\n {\r\n //access value\r\n //then decrement\r\n result= temp.valueI--;\r\n env.update(node, temp);\r\n }\r\n \r\n }\r\n //pre decrement\r\n else if (node.equals(\"---\"))\r\n {\r\n //boolean decrement\r\n if(temp.tag.equals(Value.BOOL_TAG))\r\n {\r\n boolRes = false;\r\n temp.valueB = boolRes;\r\n env.update(node,temp);\r\n return new Value(boolRes);\r\n\r\n }\r\n else\r\n {\r\n //access value\r\n //then decrement\r\n result= --temp.valueI;\r\n env.update(node, temp);\r\n }\r\n\r\n }\r\n return new Value(result);\r\n\r\n }\r\n\r\n\r\n // Evaluate a var expression\r\n private static Value evaluateVar(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n Value result = null;\r\n\r\n // get the variable\r\n String variable = tree.getSubTree(0).getElement();\r\n\r\n // get, and then evaluate, the expression\r\n Tree expr = tree.getSubTree(1);\r\n result = evaluateExp( expr, env );\r\n\r\n // check if this variable has already been declared\r\n if (! env.defined(variable))\r\n {\r\n env.add(variable, result);\r\n }\r\n else // this variable is already in the environment\r\n {\r\n env.update(variable, result);\r\n }\r\n\r\n if (DEBUG > 0) System.out.println( env + \"\\n\"); // for debugging purposes\r\n\r\n return result;\r\n }//evaluateVar()\r\n\r\n\r\n // Evaluate a print expression\r\n private static Value evaluatePrint(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 1 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n Value result = evaluateExp( tree.getSubTree(0), env );\r\n\r\n if (Evaluate_3a.DEBUG > 0)\r\n {\r\n System.out.println( result );\r\n }\r\n else\r\n {\r\n System.out.println( result.toSimpleString() );\r\n }\r\n\r\n return result;\r\n }//evaluatePrint()\r\n\r\n\r\n // Evaluate a boolean expression\r\n private static Value evaluateBexp(Tree tree, Environment env) throws EvalException\r\n {\r\n boolean result = false;\r\n\r\n String node = tree.getElement();\r\n\r\n Value value = evaluateExp( tree.getSubTree(0), env );\r\n if ( ! value.tag.equals(Value.BOOL_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a boolean expression: \"\r\n + tree.getSubTree(0));\r\n }\r\n result = value.valueB;\r\n\r\n if ( node.equals(\"&&\") )\r\n {\r\n if ( 2 > tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n for (int i = 1; i < tree.degree(); i++)\r\n {\r\n if (result)\r\n {\r\n value = evaluateExp( tree.getSubTree(i), env );\r\n if ( ! value.tag.equals(Value.BOOL_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a boolean expression: \"\r\n + tree.getSubTree(i));\r\n }\r\n result = result && value.valueB;\r\n }\r\n else // short circuit the evaluation of '&&'\r\n {\r\n result = false;\r\n break;\r\n }\r\n }\r\n }\r\n else if ( node.equals(\"||\") )\r\n {\r\n if ( 2 > tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n for (int i = 1; i < tree.degree(); i++)\r\n {\r\n if (! result)\r\n {\r\n value = evaluateExp( tree.getSubTree(i), env );\r\n if ( ! value.tag.equals(Value.BOOL_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a boolean expression: \"\r\n + tree.getSubTree(i));\r\n }\r\n result = result || value.valueB;\r\n }\r\n else // short circuit the evaluation of '||'\r\n {\r\n result = true;\r\n break;\r\n }\r\n }\r\n }\r\n else if ( node.equals(\"!\") )\r\n {\r\n if ( 1 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n result = ! result;\r\n }\r\n\r\n return new Value( result );\r\n }//evaluateBexp()\r\n\r\n\r\n // Evaluate an equality expression (which is a kind of boolean expression)\r\n private static Value evaluateEqexp(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n boolean result = false;\r\n\r\n String op = tree.getElement();\r\n\r\n Value valueL = evaluateExp( tree.getSubTree(0), env );\r\n Value valueR = evaluateExp( tree.getSubTree(1), env );\r\n\r\n if ( op.equals(\"==\") )\r\n {\r\n if ( ! valueL.tag.equals(valueR.tag) )\r\n {\r\n result = false;\r\n }\r\n else if ( valueL.tag.equals(Value.INT_TAG) )\r\n {\r\n int resultL = valueL.valueI;\r\n int resultR = valueR.valueI;\r\n result = resultL == resultR;\r\n }\r\n else // boolean data type\r\n {\r\n boolean resultL = valueL.valueB;\r\n boolean resultR = valueR.valueB;\r\n result = resultL == resultR;\r\n }\r\n }\r\n else // the '!=' operator\r\n {\r\n if ( ! valueL.tag.equals(valueR.tag) )\r\n {\r\n result = true;\r\n }\r\n else if ( valueL.tag.equals(Value.INT_TAG) )\r\n {\r\n int resultL = valueL.valueI;\r\n int resultR = valueR.valueI;\r\n result = resultL != resultR;\r\n }\r\n else // boolean data type\r\n {\r\n boolean resultL = valueL.valueB;\r\n boolean resultR = valueR.valueB;\r\n result = resultL != resultR;\r\n }\r\n }\r\n\r\n return new Value( result );\r\n }//evaluateEqexp()\r\n\r\n\r\n // Evaluate a relational expression (which is a kind of boolean expression)\r\n private static Value evaluateRelexp(Tree tree, Environment env) throws EvalException\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n boolean result = false;\r\n\r\n String op = tree.getElement();\r\n\r\n Value valueL = evaluateExp( tree.getSubTree(0), env );\r\n if ( ! valueL.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(0));\r\n }\r\n\r\n Value valueR = evaluateExp( tree.getSubTree(1), env );\r\n if ( ! valueR.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(1));\r\n }\r\n\r\n int resultL = valueL.valueI;\r\n int resultR = valueR.valueI;\r\n\r\n if ( op.equals(\"<\") )\r\n {\r\n result = resultL < resultR;\r\n }\r\n else if ( op.equals(\">\") )\r\n {\r\n result = resultL > resultR;\r\n }\r\n else if ( op.equals(\"<=\") )\r\n {\r\n result = resultL <= resultR;\r\n }\r\n else if ( op.equals(\">=\") )\r\n {\r\n result = resultL >= resultR;\r\n }\r\n return new Value( result );\r\n }//evaluateRelexp()\r\n\r\n\r\n // Evaluate an arithmetic expression\r\n private static Value evaluateAexp(Tree tree, Environment env) throws EvalException\r\n {\r\n int result = 0;\r\n\r\n String node = tree.getElement();\r\n\r\n Value valueL = evaluateExp( tree.getSubTree(0), env );\r\n if ( ! valueL.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(0));\r\n }\r\n int resultL = valueL.valueI;\r\n int resultR = 0;\r\n\r\n Value valueR = null;\r\n if ( tree.degree() >= 2 )\r\n {\r\n valueR = evaluateExp( tree.getSubTree(1), env );\r\n if ( ! valueR.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(1));\r\n }\r\n resultR = valueR.valueI;\r\n }\r\n\r\n if ( node.equals(\"+\") )\r\n {\r\n if ( tree.degree() == 1 )\r\n result = resultL;\r\n else\r\n {\r\n result = resultL + resultR;\r\n\r\n for (int i = 2; i < tree.degree(); i++)\r\n {\r\n Value temp = evaluateExp( tree.getSubTree(i), env );\r\n if ( ! temp.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(i));\r\n }\r\n result += temp.valueI;\r\n }\r\n }\r\n }\r\n else if ( node.equals(\"-\") )\r\n {\r\n if ( 2 < tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n if ( tree.degree() == 1 )\r\n result = -resultL;\r\n else\r\n result = resultL - resultR;\r\n }\r\n else if ( node.equals(\"*\") )\r\n {\r\n if ( 1 == tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n\r\n result = resultL * resultR;\r\n\r\n for (int i = 2; i < tree.degree(); i++)\r\n {\r\n Value temp = evaluateExp( tree.getSubTree(i), env );\r\n if ( ! temp.tag.equals(Value.INT_TAG) ) // runtime check\r\n {\r\n throw new EvalException(\"not a integer expression: \"\r\n + tree.getSubTree(i));\r\n }\r\n result *= temp.valueI;\r\n }\r\n }\r\n else if ( node.equals(\"/\") )\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n result = resultL / resultR;\r\n }\r\n else if ( node.equals(\"%\") )\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n result = resultL % resultR;\r\n }\r\n else if ( node.equals(\"^\") )\r\n {\r\n if ( 2 != tree.degree() ) // runtime check\r\n {\r\n throw new EvalException(\"wrong number of arguments: \" + tree);\r\n }\r\n result = (int)Math.pow(resultL, resultR);\r\n }\r\n\r\n return new Value( result );\r\n }//evaluateAexp()\r\n}\r\n" }, { "alpha_fraction": 0.41244634985923767, "alphanum_fraction": 0.43519312143325806, "avg_line_length": 26.414634704589844, "blob_id": "6a00460eca8213bdd67ea92c29a5ab61f1638d79", "content_id": "5fcbe3f5e519e6c0395e95ad9ca75c396751daf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2330, "license_type": "no_license", "max_line_length": 78, "num_lines": 82, "path": "/Master Year 1/Programming Languages and Compilers/HW4/hw4/Language_7a_Runtime_Errors.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program parses and evaluates strings from Language_7a.\r\n*/\r\n\r\npublic class Language_7a_Runtime_Errors\r\n{\r\n public static void main(String[] args)\r\n {\r\n // IMPORTANT: Set this to 0 or 1 depending on whether you need\r\n // to see all of the interpreter's debugging information.\r\n Evaluate_7a.DEBUG = 0;\r\n\r\n String[] programs = // these are all one-line programs\r\n {\r\n \"(array dim)\",\r\n \"(array dim 0)\",\r\n \"(array dim -1)\",\r\n \"(begin (var x false) (array dim x))\",\r\n \"(array dim 1 2 3)\",\r\n \"(index (array dim 5) 5)\",\r\n \"(index (array dim 5) -1)\",\r\n \"(index (array dim 5) 1 2)\",\r\n \"(index (array dim 5))\",\r\n \"(index 10 1)\",\r\n \"(set (index (array dim 5) 5) 0)\",\r\n \"(set (index (array dim 2) 0) (index (array dim 5) 5))\",\r\n \"(set 0 1)\",\r\n \"(set x 1)\",\r\n \"(sizeOf 0)\",\r\n \"(sizeOf x)\",\r\n \"(sizeOf (array dim 3) 4)\",\r\n \"(begin (var x false) (sizeOf x))\",\r\n \"(sizeOf (array dim 5) 0)\",\r\n \"(rand 1)\",\r\n \"(rand 1 2 3)\",\r\n \"(rand 10 1)\",\r\n \"(rand 1 1)\",\r\n \"(rand false 1)\",\r\n \"(rand 0 true)\",\r\n \"(begin (var x false) (rand x 10))\"\r\n };\r\n\r\n\r\n int i = 0;\r\n for (i = 0; i < programs.length; i++)\r\n {\r\n System.out.println(i + \" =========================================\");\r\n\r\n // Build and evaluate the AST that represents the expression.\r\n try\r\n {\r\n Tree ast = ParseTree.buildTree( programs[i] );\r\n\r\n // Print the AST as an S-expression\r\n System.out.println( ast + \"\\n\" );\r\n\r\n // Evaluate the expression (interpret the AST).\r\n try\r\n {\r\n Value value = Evaluate_7a.eval( ast );\r\n\r\n System.out.println(\"result = \" + value + \"\\n\" );\r\n }\r\n catch (EvalException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n }\r\n catch (TokenizeException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n catch (ParseException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.3115900158882141, "alphanum_fraction": 0.3571180999279022, "avg_line_length": 24.341142654418945, "blob_id": "2258dd092247fdf3e900b8a85bb83aa4204c6eaf", "content_id": "3351950e1d7d07a81df27db0e3d51cfc452e0840", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 17308, "license_type": "no_license", "max_line_length": 110, "num_lines": 683, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/masking.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <cmath>\n#include <vector>\n#include <QImage>\n#include <map>\n\nint scorePattern(int **c, int width)\n{\n int score = 0;\n // score method 1\n // 5 squares in a row of the same color gets 3 points, every color of that in a row gets 1\n // vertical and horizontal\n int counter = 0;\n int currentColor = 2;\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if (currentColor == c[i][j])\n {\n counter += 1;\n }\n else\n {\n if (counter >= 5)\n {\n score += 3;\n counter -= 5;\n score += counter;\n }\n currentColor = c[i][j];\n counter = 1;\n }\n if (j == width - 1 && counter >= 5)\n {\n score += 3;\n counter -= 5;\n score += counter;\n }\n }\n currentColor = 2;\n counter = 0;\n }\n \n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if (currentColor == c[j][i])\n {\n counter += 1;\n }\n else\n {\n if (counter >= 5)\n {\n score += 3;\n counter -= 5;\n score += counter;\n }\n currentColor = c[j][i];\n counter = 1;\n }\n if (j == width - 1 && counter >= 5)\n {\n score += 3;\n counter -= 5;\n score += counter;\n counter = 0;\n }\n }\n currentColor = 2;\n counter = 0;\n }\n \n // score method 2\n // 3 points for every 2x2 square of the same color\n for (int i = 0; i < width - 1; i++)\n {\n for (int j = 0; j < width - 1; j++)\n {\n currentColor = c[i][j];\n if (currentColor == c[i + 1][j] && currentColor == c[i][j + 1] && currentColor == c[i + 1][j + 1])\n {\n score += 3;\n }\n }\n }\n \n \n \n // score method 3\n // look for either 10111010000 or 00001011101, if found add 40 to score\n int row1[11] = {1,0,1,1,1,0,1,0,0,0,0};\n int row2[11] = {0,0,0,0,1,0,1,1,1,0,1};\n for (int i = 0; i < width; i++)\n {\n bool hasRow1 = true;\n bool hasRow2 = true;\n for (int j = 0; j < width - 11; j++)\n {\n for (int k = 0; k < 11; k++)\n {\n if (c[i][j + k] != row1[k])\n {\n hasRow1 = false;\n }\n if (c[i][j + k] != row2[k])\n {\n hasRow2 = false;\n }\n }\n if (hasRow1)\n score += 40;\n if (hasRow2)\n score += 40;\n hasRow1 = true;\n hasRow2 = true;\n }\n }\n \n for (int i = 0; i < width - 11; i++)\n {\n bool hasRow1 = true;\n bool hasRow2 = true;\n for (int j = 0; j < width; j++)\n {\n for (int k = 0; k < 11; k++)\n {\n if (c[j][i + k] != row1[k])\n {\n hasRow1 = false;\n }\n if (c[j][i + k] != row2[k])\n {\n hasRow2 = false;\n }\n }\n if (hasRow1)\n score += 40;\n if (hasRow2)\n score += 40;\n hasRow1 = true;\n hasRow2 = true;\n }\n }\n \n // score method 4\n // get total number of cells, and total of dark cells\n // get percent dark\n // find closest multiples of five \n // sub both from 50, take abs values, and divide them by 5\n // take the smaller number and multiply it by 10 and add to the score\n int totalCells = width * width;\n int totalDark = 0;\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if (c[i][j] == 1)\n {\n totalDark += 1;\n }\n }\n }\n \n int percentDark = (double(totalDark) / double(totalCells)) * 100;\n \n int bottomFactor = percentDark / 5;\n int upperFactor = bottomFactor + 5;\n \n bottomFactor = std::abs(bottomFactor - 50) / 5;\n upperFactor = std::abs(upperFactor - 50) / 5;\n \n if (bottomFactor > upperFactor)\n score += upperFactor * 10;\n else\n score += bottomFactor * 10;\n \n return score;\n}\n\nint getLength(int a)\n{\n return log2(a);\n}\n\nstd::vector<int> getBinary(int a, int len)\n{\n std::vector<int> binary;\n for (int i = len; i >= 0; i--)\n {\n int temp = a >> i;\n if (temp & 1)\n {\n binary.push_back(1);\n }\n else\n {\n binary.push_back(0);\n }\n }\n \n return binary;\n}\n\nvoid genMaskingPatterns(int **c, int version, std::string color)\n{\n int width;\n \n if (version == 1)\n {\n width = 21;\n }\n else if (version == 2)\n {\n width = 25;\n }\n\n int lowestScore = 999999;\n int **bestMask;\n int mask;\n \n bool flipBit = false;\n \n // pattern 1\n int **pat1 = c;\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if ((i > 7 || j > 7) && (i > 7 || j < width - 8) && (i < width - 8 || j > 7))\n {\n if (version == 2 && (i < 16 || i > 20 || j < 16 || j > 20))\n {\n flipBit = true;\n }\n else if (version == 1)\n {\n flipBit = true;\n }\n }\n if (flipBit && ((i + j) % 2) == 0)\n {\n if (pat1[i][j] == 0)\n pat1[i][j] = 1;\n else\n pat1[i][j] = 0;\n }\n flipBit = false;\n }\n }\n int score = scorePattern(pat1, width);\n if (score < lowestScore)\n {\n lowestScore = score;\n bestMask = pat1;\n mask = 0;\n }\n \n // pattern 2\n int **pat2 = c;\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if ((i > 7 || j > 7) && (i > 7 || j < width - 8) && (i < width - 8 || j > 7))\n {\n if (version == 2 && (i < 16 || i > 20 || j < 16 || j > 20))\n {\n flipBit = true;\n }\n else if (version == 1)\n {\n flipBit = true;\n }\n }\n if (flipBit && (i) % 2 == 0)\n {\n if (pat2[i][j] == 0)\n pat2[i][j] = 1;\n else\n pat2[i][j] = 0;\n }\n flipBit = false;\n }\n }\n score = scorePattern(pat2, width);\n if (score < lowestScore)\n {\n lowestScore = score;\n bestMask = pat2;\n mask = 1;\n }\n \n // pattern 3\n int **pat3 = c;\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if ((i > 7 || j > 7) && (i > 7 || j < width - 8) && (i < width - 8 || j > 7))\n {\n if (version == 2 && (i < 16 || i > 20 || j < 16 || j > 20))\n {\n flipBit = true;\n }\n else if (version == 1)\n {\n flipBit = true;\n }\n }\n if (flipBit && (j) % 3 == 0)\n {\n if (pat3[i][j] == 0)\n pat3[i][j] = 1;\n else\n pat3[i][j] = 0;\n }\n flipBit = false;\n }\n }\n score = scorePattern(pat3, width);\n if (score < lowestScore)\n {\n lowestScore = score;\n bestMask = pat3;\n mask = 2;\n }\n \n // pattern 4\n int **pat4 = c;\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if ((i > 7 || j > 7) && (i > 7 || j < width - 8) && (i < width - 8 || j > 7))\n {\n if (version == 2 && (i < 16 || i > 20 || j < 16 || j > 20))\n {\n flipBit = true;\n }\n else if (version == 1)\n {\n flipBit = true;\n }\n }\n if (flipBit && ((i + j) % 3) == 0)\n {\n if (pat4[i][j] == 0)\n pat4[i][j] = 1;\n else\n pat4[i][j] = 0;\n }\n flipBit = false;\n }\n }\n score = scorePattern(pat4, width);\n if (score < lowestScore)\n {\n lowestScore = score;\n bestMask = pat4;\n mask = 3;\n }\n \n // pattern 5\n int **pat5 = c;\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if ((i > 7 || j > 7) && (i > 7 || j < width - 8) && (i < width - 8 || j > 7))\n {\n if (version == 2 && (i < 16 || i > 20 || j < 16 || j > 20))\n {\n flipBit = true;\n }\n else if (version == 1)\n {\n flipBit = true;\n }\n }\n if (flipBit && (int(i / 2) + int(j / 3) % 2) == 0)\n {\n if (pat5[i][j] == 0)\n pat5[i][j] = 1;\n else\n pat5[i][j] = 0;\n }\n flipBit = false;\n }\n }\n score = scorePattern(pat5, width);\n if (score < lowestScore)\n {\n lowestScore = score;\n bestMask = pat5;\n mask = 4;\n }\n \n // pattern 6\n int **pat6 = c;\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if ((i > 7 || j > 7) && (i > 7 || j < width - 8) && (i < width - 8 || j > 7))\n {\n if (version == 2 && (i < 16 || i > 20 || j < 16 || j > 20))\n {\n flipBit = true;\n }\n else if (version == 1)\n {\n flipBit = true;\n }\n }\n if (flipBit && (((i * j) % 2) + ((i * j) % 3)) == 0)\n {\n if (pat6[i][j] == 0)\n pat6[i][j] = 1;\n else\n pat6[i][j] = 0;\n }\n flipBit = false;\n }\n }\n score = scorePattern(pat6, width);\n if (score < lowestScore)\n {\n lowestScore = score;\n bestMask = pat6;\n mask = 5;\n }\n \n // pattern 7\n int **pat7 = c;\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if ((i > 7 || j > 7) && (i > 7 || j < width - 8) && (i < width - 8 || j > 7))\n {\n if (version == 2 && (i < 16 || i > 20 || j < 16 || j > 20))\n {\n flipBit = true;\n }\n else if (version == 1)\n {\n flipBit = true;\n }\n }\n if (flipBit && (((i * j) % 2) + ((i * j) % 3) % 2) == 0)\n {\n if (pat7[i][j] == 0)\n pat7[i][j] = 1;\n else\n pat7[i][j] = 0;\n }\n flipBit = false;\n }\n }\n score = scorePattern(pat7, width);\n if (score < lowestScore)\n {\n lowestScore = score;\n bestMask = pat7;\n mask = 6;\n }\n \n // pattern 8\n int **pat8 = c;\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if ((i > 7 || j > 7) && (i > 7 || j < width - 8) && (i < width - 8 || j > 7))\n {\n if (version == 2 && (i < 16 || i > 20 || j < 16 || j > 20))\n {\n flipBit = true;\n }\n else if (version == 1)\n {\n flipBit = true;\n }\n }\n if (flipBit && (((i + j) % 2) + ((i * j) % 3) % 2) == 0)\n {\n if (pat7[i][j] == 0)\n pat7[i][j] = 1;\n else\n pat7[i][j] = 0;\n }\n flipBit = false;\n }\n }\n score = scorePattern(pat8, width);\n if (score < lowestScore)\n {\n lowestScore = score;\n bestMask = pat8;\n mask = 7;\n }\n \n\n int formatLength = 5;\n int formatNum = mask;\n int originalGen = 1335;\n int genLength = 11;\n int origFormat[15];\n\n if (formatNum == 0)\n {\n int bin[15] = {1,0,1,0,1,0,0,0,0,0,1,0,0,1,0};\n for (int i = 0; i < 15; i++)\n {\n origFormat[i] = bin[i];\n }\n }\n else if (formatNum == 1)\n {\n int bin[15] = {1,0,1,0,0,0,1,0,0,1,0,0,1,0,1};\n for (int i = 0; i < 15; i++)\n {\n origFormat[i] = bin[i];\n }\n }\n else if (formatNum == 2)\n {\n int bin[15] = {1,0,1,1,1,1,0,0,1,1,1,1,1,0,0};\n for (int i = 0; i < 15; i++)\n {\n origFormat[i] = bin[i];\n }\n }\n else if (formatNum == 3)\n {\n int bin[15] = {1,0,1,1,0,1,1,0,1,0,0,1,0,1,1};\n for (int i = 0; i < 15; i++)\n {\n origFormat[i] = bin[i];\n }\n }\n else if (formatNum == 4)\n {\n int bin[15] = {1,0,0,0,1,0,1,1,1,1,1,1,0,0,1};\n for (int i = 0; i < 15; i++)\n {\n origFormat[i] = bin[i];\n }\n }\n else if (formatNum == 5)\n {\n int bin[15] = {1,0,0,0,0,0,0,1,1,0,0,1,1,1,0};\n for (int i = 0; i < 15; i++)\n {\n origFormat[i] = bin[i];\n }\n }\n else if (formatNum == 6)\n {\n int bin[15] = {1,0,0,1,1,1,1,1,0,0,1,0,1,1,1};\n for (int i = 0; i < 15; i++)\n {\n origFormat[i] = bin[i];\n }\n }\n else if (formatNum == 7)\n {\n int bin[15] = {1,0,0,1,0,1,0,1,0,1,0,0,0,0,0};\n for (int i = 0; i < 15; i++)\n {\n origFormat[i] = bin[i];\n }\n }\n\n\n // place dark module in bestMask\n bestMask[width - 8][8] = 1;\n \n // place timing patterns\n int len;\n if (version == 1)\n {\n len = 5;\n }\n else\n {\n len = 9;\n }\n // place pattern in the vertical and horizontal positions\n int start = 1;\n for (int i = 0; i < len; i++)\n {\n bestMask[6][8 + i] = start;\n bestMask[8 + i][6] = start;\n if (start == 1)\n start = 0;\n else\n start = 1;\n }\n \n // place format/version ECC\n bestMask[8][0] = origFormat[0];\n bestMask[8][1] = origFormat[1];\n bestMask[8][2] = origFormat[2];\n bestMask[8][3] = origFormat[3];\n bestMask[8][4] = origFormat[4];\n bestMask[8][5] = origFormat[5];\n bestMask[8][7] = origFormat[6];\n bestMask[8][8] = origFormat[7];\n bestMask[7][8] = origFormat[8];\n bestMask[5][8] = origFormat[9];\n bestMask[4][8] = origFormat[10];\n bestMask[3][8] = origFormat[11];\n bestMask[2][8] = origFormat[12];\n bestMask[1][8] = origFormat[13];\n bestMask[0][8] = origFormat[14];\n \n bestMask[width - 1][8] = origFormat[0];\n bestMask[width - 2][8] = origFormat[1];\n bestMask[width - 3][8] = origFormat[2];\n bestMask[width - 4][8] = origFormat[3];\n bestMask[width - 5][8] = origFormat[4];\n bestMask[width - 6][8] = origFormat[5];\n bestMask[width - 7][8] = origFormat[6];\n \n bestMask[8][width - 8] = origFormat[7];\n bestMask[8][width - 7] = origFormat[8];\n bestMask[8][width - 6] = origFormat[9];\n bestMask[8][width - 5] = origFormat[10];\n bestMask[8][width - 4] = origFormat[11];\n bestMask[8][width - 3] = origFormat[12];\n bestMask[8][width - 2] = origFormat[13];\n bestMask[8][width - 1] = origFormat[14];\n \n // add the quiet zone\n width += 8;\n int finalCode[width][width];\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if (i < 4 || j < 4 || i > width - 5 || j > width - 5)\n {\n finalCode[i][j] = 0;\n }\n else\n {\n finalCode[i][j] = bestMask[i - 4][j - 4];\n }\n }\n }\n \n QImage img(QSize(width, width), QImage::Format_RGB32);\n\n std::map<std::string, QRgb> colors;\n\n colors.insert(std::make_pair(\"Black\", qRgb(0, 0, 0)));\n colors.insert(std::make_pair(\"Red\", qRgb(255, 0, 0)));\n colors.insert(std::make_pair(\"Purple\", qRgb(255, 0, 255)));\n colors.insert(std::make_pair(\"Green\", qRgb(0, 100, 0)));\n colors.insert(std::make_pair(\"Blue\", qRgb(0, 0, 255)));\n colors.insert(std::make_pair(\"Orange\", qRgb(255, 140, 0)));\n\n for (int i = 0; i < width; i++)\n {\n for (int j = 0; j < width; j++)\n {\n if (finalCode[i][j] == 1)\n {\n img.setPixel(j, i, colors.at(color));\n }\n else\n {\n img.setPixel(j, i, qRgb(255, 255, 255));\n }\n }\n }\n\n img.save(\"QR.png\");\n}\n" }, { "alpha_fraction": 0.5817937254905701, "alphanum_fraction": 0.5824655890464783, "avg_line_length": 28.223350524902344, "blob_id": "a28a58c2f61e883e3c72e026ecfce4d8d050126c", "content_id": "8953837e93ef8c3fe96fb3db280110a47b14f4c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5954, "license_type": "no_license", "max_line_length": 93, "num_lines": 197, "path": "/Master Year 1/Computer Graphics/HW4/renderer/scene/Model.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.scene;\r\n\r\nimport java.util.List;\r\nimport java.util.ArrayList;\r\nimport java.awt.Color;\r\n\r\n/**\r\n A {@code Model} data structure represents a distinct geometric object\r\n in a {@link Scene}. A {@code Model} data structure is mainly a {@link List}\r\n of {@link Vertex} objects, a list of {@link LineSegment} objects, and a\r\n list of {@link Color} objects. Each {@link LineSegment} object contains\r\n four integers that are the indices of two {@link Vertex} objects from the\r\n {@code Model}'s vertex list and two {@link Color} objects from the\r\n {@code Model}'s color list. The two {@link Vertex} objects contain the\r\n coordinates, in the model's local coordinate system, for each of the line\r\n segment's two endpoints. The two {@link Color} objects contain the rgb\r\n values for each of the line segment's two endpoints.\r\n<p>\r\n A {@code Model} represent the geometric object as a \"wire-frame\" of line\r\n segments, that is, the geometric object is drawn as a collection of \"edges\".\r\n This is a fairly simplistic way of doing 3D graphics and we will\r\n improve this in later renderers.\r\n<p>\r\n See\r\n<br> <a href=\"http://en.wikipedia.org/wiki/Wire-frame_model\" target=\"_top\">\r\n http://en.wikipedia.org/wiki/Wire-frame_model</a>\r\n<br>or\r\n<br> <a href=\"https://www.google.com/search?q=graphics+wireframe&tbm=isch\" target=\"_top\">\r\n https://www.google.com/search?q=graphics+wireframe&tbm=isch</a>\r\n*/\r\npublic class Model\r\n{\r\n public List<Vertex> vertexList = new ArrayList<>();\r\n public List<LineSegment> lineSegmentList = new ArrayList<>();\r\n public List<Color> colorList = new ArrayList<>();\r\n\r\n public String name;\r\n public boolean visible;\r\n public boolean debug;\r\n\r\n\r\n /**\r\n Construct an empty {@code Model} object.\r\n */\r\n public Model()\r\n {\r\n this.name = \"\";\r\n this.visible = true;\r\n this.debug = false;\r\n }\r\n\r\n\r\n /**\r\n Construct an empty {@code Model} object with the given name.\r\n\r\n @param name a {link String} that is a name for this {@code Model}\r\n */\r\n public Model(final String name)\r\n {\r\n this();\r\n this.name = name;\r\n }\r\n\r\n\r\n /**\r\n A \"copy constructor\". This constructor should make a deep copy\r\n of the given {@code Model}'s {@link Vertex} list,\r\n {@link LineSegment} list, and {@link Color} list.\r\n\r\n @param model {@code Model} to make a copy of\r\n */\r\n public Model(final Model model) // a \"copy constructor\"\r\n {\r\n super();\r\n\r\n this.name = model.name;\r\n this.visible = model.visible;\r\n this.debug = model.debug;\r\n for (Vertex v : model.vertexList)\r\n {\r\n this.vertexList.add(new Vertex(v)); // deep copy of each Vertex\r\n }\r\n for (LineSegment ls : model.lineSegmentList)\r\n {\r\n this.lineSegmentList.add(new LineSegment(ls)); // deep copy of each LineSgement\r\n }\r\n for (Color c : model.colorList)\r\n {\r\n this.colorList.add(c); // Color objects are immutable\r\n }\r\n }\r\n\r\n\r\n /**\r\n Add a {@link Vertex} (or vertices) to this {@code Model}'s\r\n {@link List} of vertices.\r\n\r\n @param vArray array of {@link Vertex} objects to add to this {@code Model}\r\n */\r\n public void addVertex(final Vertex... vArray)\r\n {\r\n for (final Vertex v : vArray)\r\n {\r\n vertexList.add(new Vertex(v)); // NOTE: deep copy!\r\n }\r\n }\r\n\r\n\r\n /**\r\n Get a {@link LineSegment} from this {@code Model}'s\r\n {@link List} of line segments.\r\n\r\n @param index integer index of a {@link LineSegment} from this {@code Model}\r\n @return the {@link LineSegment} object at the given index\r\n */\r\n public LineSegment getLineSegment(final int index)\r\n {\r\n return lineSegmentList.get(index);\r\n }\r\n\r\n\r\n /**\r\n Add a {@link LineSegment} (or LineSegments) to this {@code Model}'s\r\n {@link List} of line segments.\r\n <p>\r\n NOTE: This method does not add any vertices to the {@code Model}'s\r\n {@link Vertex} list. This method assumes that the appropriate vertices\r\n have been added to the {@code Model}'s {@link Vertex} list.\r\n\r\n @param lsArray array of {@link LineSegment} objects to add to this {@code Model}\r\n */\r\n public void addLineSegment(final LineSegment... lsArray)\r\n {\r\n for (final LineSegment ls : lsArray)\r\n {\r\n lineSegmentList.add(ls);\r\n }\r\n }\r\n\r\n\r\n /**\r\n Add a {@link Color} (or colors) to this {@code Model}'s\r\n {@link List} of colors.\r\n\r\n @param cArray array of {@link Color} objects to add to this {@code Model}\r\n */\r\n public void addColor(final Color... cArray)\r\n {\r\n for (final Color c : cArray)\r\n {\r\n this.colorList.add(c);\r\n }\r\n }\r\n\r\n\r\n /**\r\n For debugging.\r\n\r\n @return {@link String} representation of this {@code Model} object\r\n */\r\n @Override\r\n public String toString()\r\n {\r\n String result = \"\";\r\n result += \"Model: \" + name + \"\\n\";\r\n result += \"This Model's visibility is: \" + visible + \"\\n\";\r\n result += \"Model has \" + vertexList.size() + \" vertices.\\n\";\r\n result += \"Model has \" + colorList.size() + \" colors.\\n\";\r\n result += \"Model has \" + lineSegmentList.size() + \" line segments.\\n\";\r\n int i = 0;\r\n for (Vertex v : this.vertexList)\r\n {\r\n result += i + \": \" + v.toString();\r\n ++i;\r\n }\r\n //result = \"Printing out this Model's \" + colortList.size() + \" colors:\\n\";\r\n i = 0;\r\n for (Color c : this.colorList)\r\n {\r\n result += i + \": \" + c.toString() + \"\\n\";\r\n ++i;\r\n }\r\n //result = \"Printing out this Model's \" + lineSegmentList.size() + \" Line segments:\\n\";\r\n i = 0;\r\n for (LineSegment ls : this.lineSegmentList)\r\n {\r\n result += i + \": \" + ls.toString();\r\n ++i;\r\n }\r\n //result += \"Done printing out Model\\n\";\r\n return result;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.47706422209739685, "alphanum_fraction": 0.5086914300918579, "avg_line_length": 21.280899047851562, "blob_id": "9ea96c506455f3c34b216b52f0f85e542a496982", "content_id": "2e5d43cde1a36ca66edf5a90dd61233b2951a8e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4142, "license_type": "no_license", "max_line_length": 101, "num_lines": 178, "path": "/Master Year 2/Operating Systems/HW3/hw3/hw3.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include <unistd.h>\r\n#include <stdio.h>\r\n#include <stdlib.h>\r\n#include <sys/wait.h>\r\n\r\n#define STDIN_PIPE 0 /* pipe index for the fd to read from */\r\n#define STDOUT_PIPE 1 /* pipe index for the fd to write to */\r\n/*\r\nThe parent process creates both pipe objects, the first, second, and third\r\n child processes.\r\n \r\n*/\r\n\r\nint main(int argc, char **argv)\r\n{\r\n\t//must be exactly 3 command line arguments\r\n\tif(argc == 4)\r\n\t{\r\n\t\t\r\n\t\tint pipe_one[2]; /* pipe_one */\r\n\t\tint pipe_two[2]; /* pipe_two */\r\n\t\tint fork_rv1;\r\n\t\t\r\n\t\t/* Step 1: create a pipe_one */\r\n\t\tif ( pipe(pipe_one) == -1 )\r\n\t\t{\r\n\t\t perror(\"pipe\");\r\n\t\t}\r\n\t\tprintf(\"pid=%d pipe one! It is file descriptors: { %d %d }\\n\", getpid(), pipe_one[0], pipe_one[1]);\r\n\t\t\r\n\t\t/* Step 1: create pipe_two */\r\n\t\tif ( pipe(pipe_two) == -1 )\r\n\t\t{\r\n\t\t perror(\"pipe\");\r\n\t\t}\r\n\t\tprintf(\"pid=%d pipe two! It is file descriptors: { %d %d }\\n\", getpid(), pipe_two[0], pipe_two[1]);\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\tfork_rv1 = fork(); /* Step 2: create process_one */\r\n\t\tif ( fork_rv1 == -1 ) /* check for error */\r\n\t\t{\r\n\t\t perror(\"fork\");\r\n\t\t}\r\n\t\telse if ( fork_rv1 > 0 ) /* parent */\r\n\t\t{\r\n\t\t\t//second prcoess\r\n\t\t\tint fork_rv2;\r\n\r\n\t\t\tprintf(\"pid=%d is the parent of child 1 with pid=%d\\n\", getpid(), fork_rv1);\r\n\t\t\tfork_rv2 = fork(); /* Step 3: create process_2 */\r\n\t\t\tif ( fork_rv2 == -1 ) /* check for error */\r\n\t\t\t{\r\n\t\t\t perror(\"fork\");\r\n\t\t\t}\r\n\t\t\telse if ( fork_rv2 > 0 ) /* parent */\r\n\t\t\t{\r\n\t\t\t\t//third prcoess\r\n\t\t\t\tint fork_rv3;\r\n\r\n\t\t\t\tprintf(\"pid=%d is the parent of child 2 with pid=%d\\n\", getpid(), fork_rv2);\r\n\t\t\t\tfork_rv3 = fork(); /* Step 3: create another new process */\r\n\t\t\t\tif ( fork_rv3 == -1 ) /* check for error */\r\n\t\t\t\t{\r\n\t\t\t\t\tperror(\"fork\");\r\n\t\t\t\t}\r\n\t\t\t\telse if(fork_rv3 > 0) /* parent */\r\n\t\t\t\t{\r\n\t\t\t\t\tint wait_rv;\r\n\r\n\t\t\t\t\tprintf(\"pid=%d is the parent of child 3 with pid=%d\\n\", getpid(), fork_rv3);\r\n\t\t\t\t\t/* Step 4: parent calls close() on its pipe descriptors 3 & 4, and 5 & 6 */\r\n\t\t\t\t\tclose(3);\r\n\t\t\t\t\tclose(4);\r\n\t\t\t\t\tclose(5);\r\n\t\t\t\t\tclose(6);\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\twait_rv = wait(NULL);\r\n\t\t\t\t\tprintf(\"pid=%d is done waiting for %d. Wait returned: %d\\n\", getpid(), fork_rv1, wait_rv);\r\n\t\t\t\t\twait_rv = wait(NULL);\r\n\t\t\t\t\tprintf(\"pid=%d is done waiting for %d. Wait returned: %d\\n\", getpid(), fork_rv2, wait_rv);\r\n\t\t\t\t\twait_rv = wait(NULL);\r\n\t\t\t\t\t\r\n\t\t\t\t\tprintf(\"pid=%d is done waiting for %d. Wait returned: %d\\n\", getpid(), fork_rv3, wait_rv);\r\n\t\t\t\t\tprintf(\"The pipelines is done with its work.\\n\");\r\n\r\n\t\t\t\t}\r\n\t\t\t\telse /* child 3 */\r\n\t\t\t\t{\r\n\t\t\t\t\t//close 3 & 4 since only want second pipe\r\n\t\t\t\t\tclose(3);\r\n\t\t\t\t\tclose(4);\r\n\t\t\t\t\t\r\n\t\t\t\t\tchar *arglist[2];\r\n\r\n\t\t\t\t\tprintf(\"pid=%d is child 3\\n\", getpid());\r\n\t\t\t\t\t/* child 3 calls close(6), close(0), dup(5), close(5) */\r\n\t\t\t\t\tclose(6);\r\n\t\t\t\t\tclose(0);\r\n\t\t\t\t\tdup(5);\r\n\t\t\t\t\tclose(5);\r\n\r\n\t\t\t\t\t//get arguments\r\n\t\t\t\t\targlist[0] = argv[1];\r\n\t\t\t\t\targlist[1] = 0;\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\texecvp( argv[1], arglist );\r\n\t\t\t\t\tperror(\"execvp prog1\");\r\n\t\t\t\t\texit(1);\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t }\r\n\t\t\t else /* child 2 */\r\n\t\t\t {\r\n\t\t\t\tchar *arglist[2];\r\n\r\n\t\t\t\tprintf(\"pid=%d is child 2\\n\", getpid());\r\n\t\t\t\t\r\n\t\t\t\t//handles input from first pipe\r\n\t\t\t\t/* child 2 calls close(4), close(0), dup(3), close(3) */\r\n\t\t\t\tclose(4);\r\n\t\t\t\tclose(0);\r\n\t\t\t\tdup(3);\r\n\t\t\t\tclose(3);\r\n\t\t\t\t\r\n\t\t\t\t//handles output to second pipe\r\n\t\t\t\t/* child 2 calls close(5), close(1), dup(6), close(6) */\r\n\t\t\t\tclose(5);\r\n\t\t\t\tclose(1);\r\n\t\t\t\tdup(6);\r\n\t\t\t\tclose(6);\r\n\r\n\t\t\t\t//get arguments\r\n\t\t\t\targlist[0] = argv[2];\r\n\t\t\t\targlist[1] = 0 ;\r\n\t\t\t\texecvp( argv[2] , arglist );\r\n\t\t\t\tperror(\"execvp prog2\");\r\n\t\t\t\texit(1);\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t }\r\n\t\t}\r\n\t\telse /* child 1 */\r\n\t\t{\r\n\t\t\t//close 5 & 6 since only want first pipe\r\n\t\t\tclose(5);\r\n\t\t\tclose(6);\r\n\t\t\tchar *arglist[2];\r\n\r\n\t\t\tprintf(\"pid=%d is child 1\\n\", getpid());\r\n\t\t\t\r\n\t\t\t/* Step 5: child 1 calls close(3), close(1), dup(4), close(4) */\r\n\t\t\tclose(3);\r\n\t\t\tclose(1);\r\n\t\t\tdup(4);\r\n\t\t\tclose(4);\r\n\t\t\t\r\n\t\t\t//get arguments\r\n\t\t\targlist[0] = argv[3];\r\n\t\t\targlist[1] = 0;\r\n\t\t\texecvp( argv[3] , arglist );\r\n\t\t\tperror(\"execvp prog3\");\r\n\t\t\texit(1);\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n\t\r\n\t}\r\n\telse\r\n\t{\r\n\t\t//output error\r\n\t\tprintf(\"must pass 3 command line arguments\");\r\n\t\t\r\n\t}\r\n\r\n\treturn 0;\r\n}" }, { "alpha_fraction": 0.6285714507102966, "alphanum_fraction": 0.6387755274772644, "avg_line_length": 14.333333015441895, "blob_id": "a6afd7316ccdb87af25b3218d056f0fe777a37ed", "content_id": "116c8b5e4d451ed1aad231f6eebe377a137c2527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 490, "license_type": "no_license", "max_line_length": 59, "num_lines": 30, "path": "/Year 3/Assignment3/Add_Command.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Add_Command.h\"\r\n\r\nAdd_Command::Add_Command (Stack <int> & s):\r\nBinary_Op_Command (s),\r\nprecedence(2)\r\n{\r\n\t//constructor\r\n}\r\n\r\nAdd_Command::~Add_Command (void)\r\n{\r\n\t//destructor\t\r\n}\r\n\r\n\r\nint Add_Command::evaluate (int n1, int n2) const\r\n{\t\r\n\t\r\n\t//return the addition of the two integers\r\n\treturn n1 + n2;\r\n}\r\n\r\nint Add_Command::prec (void) const\r\n{\r\n\treturn precedence;\r\n}\r\n" }, { "alpha_fraction": 0.5212355256080627, "alphanum_fraction": 0.5227799415588379, "avg_line_length": 20.13675308227539, "blob_id": "67e9fda5ca7d5947b5bf1647b4040a71912f9947", "content_id": "3baa4e28016f3988760af44708d7625cac77a605", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2590, "license_type": "no_license", "max_line_length": 68, "num_lines": 117, "path": "/Master Year 1/Programming Languages and Compilers/HW2/hw2/Environment.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/**\r\n A Environment object holds <variable,value> pairs.\r\n*/\r\nimport java.util.ArrayList;\r\n\r\npublic class Environment\r\n{\r\n private ArrayList<String> variables;\r\n private ArrayList<Value> values;\r\n\r\n // Constructors\r\n public Environment()\r\n {\r\n variables = new ArrayList<String>();\r\n values = new ArrayList<Value>();\r\n }\r\n\r\n public Environment(Environment env)\r\n {\r\n variables = new ArrayList<String>();\r\n values = new ArrayList<Value>();\r\n }\r\n\r\n /**\r\n Add a <variable, value> pair to this environment object.\r\n */\r\n public void add(String variable, Value value)\r\n {\r\n variables.add(variable);\r\n values.add(value);\r\n }\r\n\r\n /**\r\n Look up variable in the environment and\r\n return its associated value.\r\n\r\n Returns null if variable is not found.\r\n */\r\n public Value lookUp(String variable)\r\n {\r\n int i;\r\n for (i = 0; i < variables.size(); i++)\r\n if ( variable.equals(variables.get(i)) )\r\n break;\r\n\r\n if ( i < variables.size() )\r\n return values.get(i);\r\n else\r\n {\r\n return null; // variable cannot be found\r\n }\r\n }\r\n\r\n /**\r\n Look up variable in the environment.\r\n Return true if the variable is in the it,\r\n otherwise return false.\r\n */\r\n public boolean defined(String variable)\r\n {\r\n int i;\r\n for (i = 0; i < variables.size(); i++)\r\n if ( variable.equals(variables.get(i)) )\r\n break;\r\n\r\n if ( i < variables.size() )\r\n return true;\r\n else\r\n {\r\n return false;\r\n }\r\n }\r\n\r\n\r\n /**\r\n Update the value associated with variable in the environment.\r\n Return true if the update is succesfull,\r\n return false if variable is not found.\r\n */\r\n public boolean update(String variable, Value value)\r\n {\r\n int i;\r\n for (i = 0; i < variables.size(); i++)\r\n if ( variable.equals(variables.get(i)) )\r\n break;\r\n\r\n if ( i < variables.size() )\r\n {\r\n values.set(i, value);\r\n return true;\r\n }\r\n else\r\n {\r\n return false;\r\n }\r\n }\r\n\r\n\r\n /**\r\n Convert the contents of the environment into a string.\r\n This is mainly for debugging purposes.\r\n */\r\n public String toString()\r\n {\r\n String result = \"\";\r\n\r\n result += \"[Global Environment\";\r\n\r\n for (int i = 0; i < variables.size(); i++)\r\n {\r\n result += \"\\n \" + variables.get(i) + \" = \" + values.get(i);\r\n }\r\n result += \"]\";\r\n\r\n return result;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6660482287406921, "alphanum_fraction": 0.6697587966918945, "avg_line_length": 18.80769157409668, "blob_id": "8b7aeaf4cf2630258d3fcec8ac0b8c4b2d718a37", "content_id": "ef914f3821d6019b973daa84df8cbda15df2735f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 539, "license_type": "no_license", "max_line_length": 59, "num_lines": 26, "path": "/Year 3/Assignment 4/Modulus_Expr_Node.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#ifndef _MODULUS_EXPR_NODE\r\n#define _MODULUS_EXPR_NODE\r\n\r\n#include \"Binary_Expr_Node.h\"\r\n#include \"Expr_Node_Visitor.h\"\r\n\r\nclass Modulus_Expr_Node : public Binary_Expr_Node\r\n{\r\n\tpublic:\r\n\t\tModulus_Expr_Node(void);\r\n\t\tvirtual ~Modulus_Expr_Node(void);\r\n\t\t\r\n\t\t//does modulus operation on the operands\r\n\t\tvirtual int calculate(int num1, int num2);\r\n\t\t\r\n\t\t//visits the node\r\n\t\tvirtual void accept (Expr_Node_Visitor & v);\r\n\t\r\n};\r\n#endif" }, { "alpha_fraction": 0.4821428656578064, "alphanum_fraction": 0.4972098171710968, "avg_line_length": 30, "blob_id": "9e43f2a56e9abdc1ce6d3bbc4101ae6d640bcc0b", "content_id": "7ec240b729bc15deaf2c199b50bf2931af7a262e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3584, "license_type": "no_license", "max_line_length": 83, "num_lines": 112, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/RingSector.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a sector of a ring (an annulus)\r\n in the xy-plane centered at the origin.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Annulus_(mathematics)\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Annulus_(mathematics)</a>\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Circular_sector\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Circular_sector</a>\r\n\r\n @see Ring\r\n*/\r\npublic class RingSector extends Model\r\n{\r\n /**\r\n Create half a ring (annulus) in the xy-plane\r\n with outer radius 1, inner radius 0.33, with 7\r\n spokes coming out of the center, and with 5\r\n concentric circles.\r\n */\r\n public RingSector( )\r\n {\r\n this(1.0, 0.33, 0, Math.PI, 5, 7);\r\n }\r\n\r\n\r\n /**\r\n Create a sector of a ring (annulus) in the xy-plane\r\n with outer radius {@code r1}, inner radius {@code r2},\r\n with {@code k} spokes coming out of the center, and\r\n with {@code n} concentric circles.\r\n <p>\r\n If there are {@code k} spokes, then each (partial) circle\r\n around the center will have {@code k-1} line segments.\r\n If there are {@code n} concentric circles around the center,\r\n then each spoke will have {@code n-1} line segments.\r\n <p>\r\n There must be at least four spokes and at least two concentric circle.\r\n\r\n @param r1 outer radius of the ring\r\n @param r2 inner radius of the ring\r\n @param theta1 beginning angle of the sector\r\n @param theta2 ending angle of the sector\r\n @param n number of concentric circles\r\n @param k number of spokes in the ring\r\n */\r\n public RingSector(double r1, double r2,\r\n double theta1, double theta2,\r\n int n, int k)\r\n {\r\n super(\"Ring Sector\");\r\n\r\n if (n < 2) n = 2;\r\n if (k < 4) k = 4;\r\n\r\n // Create the rings's geometry.\r\n\r\n double deltaR = (r1 - r2) / (n - 1);\r\n double deltaTheta = (theta2 - theta1) / (k - 1);\r\n\r\n // An array of vertices to be used to create line segments.\r\n Vertex[][] v = new Vertex[n][k];\r\n\r\n // Create all the vertices.\r\n for (int j = 0; j < k; ++j) // choose a spoke (an angle)\r\n {\r\n double c = Math.cos(theta1 + j * deltaTheta);\r\n double s = Math.sin(theta1 + j * deltaTheta);\r\n for (int i = 0; i < n; ++i) // move along the spoke\r\n {\r\n double ri = r2 + i * deltaR;\r\n v[i][j] = new Vertex(ri * c,\r\n ri * s,\r\n 0);\r\n }\r\n }\r\n\r\n // Add all of the vertices to this model.\r\n for (int i = 0; i < n; ++i)\r\n {\r\n for (int j = 0; j < k; ++j)\r\n {\r\n addVertex( v[i][j] );\r\n }\r\n }\r\n\r\n // Create line segments around each concentric ring.\r\n for (int i = 0; i < n; ++i) // choose a ring\r\n {\r\n for (int j = 0; j < k - 1; ++j)\r\n { // v[i][j] v[i][j+1]\r\n addLineSegment(new LineSegment( (i * k) + j, (i * k) + (j+1) ));\r\n }\r\n }\r\n\r\n // Create the spokes.connecting the inner circle to the outer circle.\r\n for (int j = 0; j < k; ++j) // choose a spoke\r\n {\r\n for (int i = 0; i < n - 1; ++i)\r\n { // v[i][j] v[i+1][j]\r\n addLineSegment(new LineSegment( (i * k) + j, ((i+1) * k) + j ));\r\n }\r\n }\r\n }\r\n}//RingSector\r\n" }, { "alpha_fraction": 0.5742574334144592, "alphanum_fraction": 0.6257425546646118, "avg_line_length": 21.954545974731445, "blob_id": "c748b9815a1e1fc549045bf3fc2975e00a222910", "content_id": "795128557175566fb93a11a5a69e81c3e50f1405", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 505, "license_type": "no_license", "max_line_length": 49, "num_lines": 22, "path": "/Master Year 2/Operating Systems/HW3/hw3/filters/Makefile", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#\n# Type \n# make\n# to compile all the programs in this folder.\n#\nCC = gcc\nCFLAGS = -g -std=c99 -pedantic -Wall -Wextra\nRM = rm -fv\n\n# See Section 5.4.3 of \"GNU Make Manual\"\n# 08 July 2002, GNU make Version 3.80.\nall: $(patsubst %.c,%,$(wildcard *.c))\n\n# Use a simple suffix rule to compile any c file.\n# See Section 11.7 of \"GNU Make Manual\"\n# 08 July 2002, GNU make Version 3.80.\n.c.:\n\t$(CC) $(CFLAGS) -o $@ $<\n\n# delete only the files that we made\nclean:\n\t$(RM) $(patsubst %.c,%,$(wildcard *.c))\n" }, { "alpha_fraction": 0.6701754331588745, "alphanum_fraction": 0.6701754331588745, "avg_line_length": 19.11111068725586, "blob_id": "4260be791fd734c4282b74abadde292c71dfda03", "content_id": "9ee6c2024117b4c73c2c9cc04b7b8253356c766e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 570, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/Year 3/Assignment3/Division_Command.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Binary_Op_Command.h\"\r\n\r\n#ifndef _DIVISION_COMMAND_H\r\n#define _DIVISION_COMMAND_H\r\n//Division Class\r\nclass Division_Command : public Binary_Op_Command{\r\n\tpublic:\r\n\t\tDivision_Command (Stack <int> &s);\r\n\t\t\r\n\t\t~Division_Command(void);\r\n\t\t\r\n\t\t//evaluate division between the integers\r\n\t\tvirtual int evaluate (int, int) const;\r\n\t\t\r\n\t\t//returns precedence of division operator\r\n\t\tvirtual int prec (void) const;\r\n\t\t\r\n\tprivate:\r\n\t\tint precedence;\r\n\t\t\r\n};\r\n\t\t\r\n#endif\r\n" }, { "alpha_fraction": 0.5222615003585815, "alphanum_fraction": 0.5882887244224548, "avg_line_length": 18.010101318359375, "blob_id": "2f57034c3652dc93c210b58a77a12ead144a9083", "content_id": "9bf7f116f09ab4ffe5d1d25a76c7cf2f8cc1c77b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9905, "license_type": "no_license", "max_line_length": 75, "num_lines": 495, "path": "/Year 2/Project 3/Driver.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//Driver File\r\n#include \"Sort.h\"\r\n#include \"newSort.h\"\r\n#include \"InsertionSort.h\"\r\n#include \"QuickSort.h\"\r\n#include \"HeapSort.h\"\r\n#include \"MergeSort.h\"\r\n#include <stdlib.h>\r\n#include <iostream>\r\n\r\n\r\nint randomFunct()\r\n{\r\n\tint randNum = rand() %20000 + 1;\r\n\treturn randNum;\r\n}\r\n\r\nvoid printArray(int a[], int size)\r\n{\r\n\tint t;\r\n\tfor (t = 0; t < size;t++)\r\n\t\tstd::cout << a[t] << \", \";\r\n\tstd::cout << std::endl;\r\n}\r\n\r\nvoid hundred()\r\n{\r\n\tSort * insert = new InsertionSort;\r\n\tnewSort * quick = new QuickSort;\r\n\tSort * heap = new HeapSort;\r\n\tnewSort * merge = new MergeSort;\r\n\t\r\n\tint *array = new int[100];\r\n\t\r\n\t//store numbers from random funct in array\r\n\tfor (int i = 0; i < 100; i++)\r\n\t\tarray[i] = randomFunct();\r\n\t//use temp array for sorting\r\n\tint temp[100];\r\n\t\r\n\tfor (int b = 0; b < 100; b++)\r\n\t\ttemp[b] = array[b];\r\n\t//do unsorted\r\n\tinsert->sort(temp,100);\r\n\t//printArray(temp,100);\r\n\t\r\n\t//do sorted\r\n\tinsert->sort(temp,100);\r\n\t//printArray(temp,100);\r\n\t\r\n\t//QUICK SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 100; b++)\r\n\t\ttemp[b] = array[b];\r\n\tquick ->sort(temp,99,0);\r\n\t//printArray(temp,100);\r\n\t\r\n\t//do sorted\r\n\tquick->sort(temp,99,0);\r\n\t\r\n\t//HEAP SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 100; b++)\r\n\t\ttemp[b] = array[b];\r\n\theap -> sort(temp,100);\r\n\t//printArray(temp,100);\r\n\t\t\r\n\t//do sorted\r\n\theap ->sort(temp,100);\r\n\t\r\n\t//MERGESORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 100; b++)\r\n\t\ttemp[b] = array[b];\r\n\tmerge ->sort(temp,0,99);\r\n\t//printArray(temp,100);\r\n\t//do sorted\r\n\tmerge ->sort(temp,0,99);\r\n\t\r\n\tstd::cout << \"Array size of 100 sorted\" << std::endl;\r\n\t\r\n\tdelete insert;\r\n\tdelete quick;\r\n\tdelete heap;\r\n\tdelete merge;\r\n\tdelete [] array;\r\n\t\r\n}\r\n\r\nvoid fiveHundred()\r\n{\r\n\tSort * insert = new InsertionSort;\r\n\tnewSort * quick = new QuickSort;\r\n\tSort * heap = new HeapSort;\r\n\tnewSort * merge = new MergeSort;\r\n\t\r\n\tint *array = new int[500];\r\n\t\r\n\t//store numbers from random funct in array\r\n\tfor (int i = 0; i < 500; i++)\r\n\t\tarray[i] = randomFunct();\r\n\t//use temp array for sorting\r\n\tint temp[500];\r\n\t\r\n\tfor (int b = 0; b < 500; b++)\r\n\t\ttemp[b] = array[b];\r\n\t//do unsorted\r\n\tinsert->sort(temp,500);\r\n\t//printArray(temp,500);\r\n\t\r\n\t//do sorted\r\n\tinsert->sort(temp,500);\r\n\t//printArray(temp,500);\r\n\t\r\n\t//QUICK SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 500; b++)\r\n\t\ttemp[b] = array[b];\r\n\tquick ->sort(temp,499,0);\r\n\t//printArray(temp,500);\r\n\t\r\n\t//do sorted\r\n\tquick->sort(temp,499,0);\r\n\t\r\n\t//HEAP SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 500; b++)\r\n\t\ttemp[b] = array[b];\r\n\theap -> sort(temp,500);\r\n\t//printArray(temp,500);\r\n\t//do sorted\r\n\theap ->sort(temp,500);\r\n\t\r\n\t//MERGESORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 500; b++)\r\n\t\ttemp[b] = array[b];\r\n\tmerge ->sort(temp,0,499);\r\n\t//printArray(temp,500);\r\n\t//do sorted\r\n\tmerge ->sort(temp,0,499);\r\n\t\r\n\tstd::cout << \"Array size of 500 sorted\" << std::endl;\r\n\tdelete insert;\r\n\tdelete quick;\r\n\tdelete heap;\r\n\tdelete merge;\r\n\tdelete [] array;\r\n}\r\n\r\nvoid thousand()\r\n{\r\n\tSort * insert = new InsertionSort;\r\n\tnewSort * quick = new QuickSort;\r\n\tSort * heap = new HeapSort;\r\n\tnewSort * merge = new MergeSort;\r\n\t\r\n\tint* array = new int[1000];\r\n\t\r\n\t//store numbers from random funct in array\r\n\tfor (int i = 0; i < 1000; i++)\r\n\t\tarray[i] = randomFunct();\r\n\t//use temp array for sorting\r\n\tint temp[1000];\r\n\t\r\n\tfor (int b = 0; b < 1000; b++)\r\n\t\ttemp[b] = array[b];\r\n\t//do unsorted\r\n\tinsert->sort(temp,1000);\r\n\t//printArray(temp,1000);\r\n\t\r\n\t//do sorted\r\n\tinsert->sort(temp,1000);\r\n\t//printArray(temp,1000);\r\n\t\r\n\t\r\n\t//QUICK SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 1000; b++)\r\n\t\ttemp[b] = array[b];\r\n\tquick ->sort(temp,999,0);\r\n\t//printArray(temp,1000);\r\n\t\r\n\t//do sorted\r\n\tquick->sort(temp,999,0);\r\n\t\r\n\t//HEAP SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 1000; b++)\r\n\t\ttemp[b] = array[b];\r\n\theap -> sort(temp,1000);\r\n\t//printArray(temp,1000);\r\n\t//do sorted\r\n\theap ->sort(temp,1000);\r\n\t\r\n\t//MERGESORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 1000; b++)\r\n\t\ttemp[b] = array[b];\r\n\tmerge ->sort(temp,0,999);\r\n\t//printArray(temp,1000);\r\n\t//do sorted\r\n\tmerge ->sort(temp,0,999);\r\n\t\r\n\t\r\n\tstd::cout << \"Array size of 1,000 sorted\" << std::endl;\r\n\tdelete insert;\r\n\tdelete quick;\r\n\tdelete heap;\r\n\tdelete merge;\r\n\tdelete [] array;\r\n}\r\n\r\nvoid twoThousand()\r\n{\r\n\tSort * insert = new InsertionSort;\r\n\tnewSort * quick = new QuickSort;\r\n\tSort * heap = new HeapSort;\r\n\tnewSort * merge = new MergeSort;\r\n\t\r\n\tint* array = new int[2000];\r\n\t\r\n\t//store numbers from random funct in array\r\n\tfor (int i = 0; i < 2000; i++)\r\n\t\tarray[i] = randomFunct();\r\n\t//use temp array for sorting\r\n\tint* temp = new int[2000];\r\n\t\r\n\tfor (int b = 0; b < 2000; b++)\r\n\t\ttemp[b] = array[b];\r\n\t//do unsorted\r\n\tinsert->sort(temp,2000);\r\n\t\r\n\t//printArray(temp,2000);\r\n\t//do sorted\r\n\t\r\n\tinsert->sort(temp,2000);\r\n\t//printArray(temp,2000);\r\n\t\r\n\t\r\n\t//QUICK SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 2000; b++)\r\n\t\ttemp[b] = array[b];\r\n\tquick ->sort(temp,1999,0);\r\n\t//printArray(temp,2000);\r\n\t\r\n\t//do sorted\r\n\tquick->sort(temp,1999,0);\r\n\t\r\n\t//HEAP SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 2000; b++)\r\n\t\ttemp[b] = array[b];\r\n\theap -> sort(temp,2000);\r\n\t//printArray(temp,2000);\r\n\t//do sorted\r\n\theap ->sort(temp,2000);\r\n\t\r\n\t//MERGESORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 2000; b++)\r\n\t\ttemp[b] = array[b];\r\n\tmerge ->sort(temp,0,1999);\r\n\t//printArray(temp,2000);\r\n\t//do sorted\r\n\tmerge ->sort(temp,0,1999);\r\n\t\r\n\tstd::cout << \"Array size of 2,000 sorted\" << std::endl;\r\n\t\r\n\tdelete insert;\r\n\tdelete quick;\r\n\tdelete heap;\r\n\tdelete merge;\r\n\tdelete [] array;\r\n\tdelete [] temp;\r\n}\r\n\r\nvoid fiveThousand()\r\n{\r\n\tSort * insert = new InsertionSort;\r\n\tnewSort * quick = new QuickSort;\r\n\tSort * heap = new HeapSort;\r\n\tnewSort * merge = new MergeSort;\r\n\t\r\n\tint* array = new int[5000];\r\n\t\r\n\t//store numbers from random funct in array\r\n\tfor (int i = 0; i < 5000; i++)\r\n\t\tarray[i] = randomFunct();\r\n\t//use temp array for sorting\r\n\tint* temp = new int[5000];\r\n\t\r\n\tfor (int b = 0; b < 5000; b++)\r\n\t\ttemp[b] = array[b];\r\n\t//do unsorted\r\n\tinsert->sort(temp,5000);\r\n\t//printArray(temp,5000);\r\n\t\r\n\t//do sorted\r\n\tinsert->sort(temp,5000);\r\n\t//printArray(temp,5000);\r\n\t\r\n\t//QUICK SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 5000; b++)\r\n\t\ttemp[b] = array[b];\r\n\tquick ->sort(temp,4999,0);\r\n\t//printArray(temp,5000);\r\n\t\r\n\t//do sorted\r\n\tquick->sort(temp,4999,0);\r\n\t\r\n\t//HEAP SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 5000; b++)\r\n\t\ttemp[b] = array[b];\r\n\theap -> sort(temp,5000);\r\n\t//printArray(temp,5000);\r\n\t//do sorted\r\n\theap ->sort(temp,5000);\r\n\t\r\n\t//MERGESORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 5000; b++)\r\n\t\ttemp[b] = array[b];\r\n\tmerge ->sort(temp,0,4999);\r\n\t//printArray(temp,5000);\r\n\t//do sorted\r\n\tmerge ->sort(temp,0,4999);\r\n\t\r\n\tstd::cout << \"Array size of 5,000 sorted\" << std::endl;\r\n\tdelete insert;\r\n\tdelete quick;\r\n\tdelete heap;\r\n\tdelete merge;\r\n\tdelete [] array;\r\n\tdelete [] temp;\r\n\t\r\n}\r\nvoid eightThousand()\r\n{\r\n\tSort * insert = new InsertionSort;\r\n\tnewSort * quick = new QuickSort;\r\n\tSort * heap = new HeapSort;\r\n\tnewSort * merge = new MergeSort;\r\n\t\r\n\tint *array= new int[8000];\r\n\t\r\n\t//store numbers from random funct in array\r\n\tfor (int i = 0; i < 8000; i++)\r\n\t\tarray[i] = randomFunct();\r\n\t//use temp array for sorting\r\n\tint *temp = new int[8000];\r\n\t\r\n\tfor (int b = 0; b < 8000; b++)\r\n\t\ttemp[b] = array[b];\r\n\t//do unsorted\r\n\tinsert->sort(temp,8000);\r\n\t//printArray(temp,8000);\r\n\t\r\n\t//do sorted\r\n\tinsert->sort(temp,8000);\r\n\t//printArray(temp,8000);\r\n\t\r\n\t//QUICK SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 8000; b++)\r\n\t\ttemp[b] = array[b];\r\n\tquick ->sort(temp,7999,0);\r\n\t//printArray(temp,8000);\r\n\t\r\n\t//do sorted\r\n\tquick->sort(temp,7999,0);\r\n\t\r\n\t//HEAP SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 8000; b++)\r\n\t\ttemp[b] = array[b];\r\n\theap -> sort(temp,8000);\r\n\t//printArray(temp,8000);\r\n\t//do sorted\r\n\theap ->sort(temp,8000);\r\n\t\r\n\t//MERGESORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 8000; b++)\r\n\t\ttemp[b] = array[b];\r\n\tmerge ->sort(temp,0,7999);\r\n\t//printArray(temp,8000);\r\n\t//do sorted\r\n\tmerge ->sort(temp,0,7999);\r\n\t\r\n\t\r\n\tstd::cout << \"Array size of 8,000 sorted\" << std::endl;\r\n\tdelete insert;\r\n\tdelete quick;\r\n\tdelete heap;\r\n\tdelete merge;\r\n\tdelete [] array;\r\n\tdelete [] temp;\r\n}\r\n\r\nvoid tenThousand()\r\n{\r\n\tSort * insert = new InsertionSort;\r\n\tnewSort * quick = new QuickSort;\r\n\tSort * heap = new HeapSort;\r\n\tnewSort * merge = new MergeSort;\r\n\t\r\n\tint* array= new int[10000];\r\n\t\r\n\t//store numbers from random funct in array\r\n\tfor (int i = 0; i < 10000; i++)\r\n\t\tarray[i] = randomFunct();\r\n\t//use temp array for sorting\r\n\tint *temp = new int[10000];\r\n\t\r\n\tfor (int b = 0; b < 10000; b++)\r\n\t\ttemp[b] = array[b];\r\n\t//do unsorted\r\n\tinsert->sort(temp,10000);\r\n\t//printArray(temp,10000);\r\n\t\r\n\t//do sorted\r\n\tinsert->sort(temp,10000);\r\n\t//printArray(temp,10000);\r\n\t\r\n\t//QUICK SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 10000; b++)\r\n\t\ttemp[b] = array[b];\r\n\tquick ->sort(temp,9999,0);\r\n\t//printArray(temp,10000);\r\n\t\r\n\t//do sorted\r\n\tquick->sort(temp,9999,0);\r\n\t\r\n\t//HEAP SORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 10000; b++)\r\n\t\ttemp[b] = array[b];\r\n\theap -> sort(temp,10000);\r\n\t//printArray(temp,10000);\r\n\t//do sorted\r\n\theap ->sort(temp,10000);\r\n\t\r\n\t//MERGESORT\r\n\t//do unsorted\r\n\tfor (int b = 0; b < 10000; b++)\r\n\t\ttemp[b] = array[b];\r\n\tmerge ->sort(temp,0,9999);\r\n\t//printArray(temp,10000);\r\n\t//do sorted\r\n\tmerge ->sort(temp,0,9999);\r\n\t\r\n\tstd::cout << \"Array size of 10,000 sorted\" << std::endl;\r\n\tdelete insert;\r\n\tdelete quick;\r\n\tdelete heap;\r\n\tdelete merge;\r\n\tdelete [] array;\r\n\tdelete [] temp;\r\n}\r\n\r\n//implement a random number generator function from numbers 1 - 20,000.\r\n//and store in array to be sorted\r\n\r\n\r\n//execute sorting algorithms in input sizes: 100,500,2000,5000,8000,& 10000\r\n//create a function for ech inp./ut size\r\n\t//inside the function create a dynamic array to do the sorting algorithms\r\n\t\r\n\r\n//main function\r\nint main()\r\n{\r\n\t\r\n\thundred();\r\n\t\r\n\tfiveHundred();\r\n\t\r\n\t\r\n\tthousand();\r\n\t\r\n\t\t//array size too large\r\n\t\t\t//declare temp as dynamic\r\n\ttwoThousand();\r\n\t\r\n\tfiveThousand();\r\n\teightThousand();\r\n\ttenThousand();\r\n\t\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.317220538854599, "alphanum_fraction": 0.40407854318618774, "avg_line_length": 26.17021369934082, "blob_id": "840a42544b5530a3e0ec65673e177a9a3cc99c0b", "content_id": "79aa8226dca98349b2579abbcabd727459abb7e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1324, "license_type": "no_license", "max_line_length": 46, "num_lines": 47, "path": "/Master Year 1/Computer Graphics/HW2/P.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\nimport renderer.scene.*;\r\n\r\n/**\r\n A two-dimensional model of the letter P.\r\n*/\r\npublic class P extends Model\r\n{\r\n /**\r\n The letter P.\r\n */\r\n public P()\r\n {\r\n super(\"P\");\r\n\r\n addVertex(new Vertex(0.00, 0.00, 0.0),\r\n new Vertex(0.00, 1.00, 0.0),\r\n new Vertex(0.75, 1.00, 0.0),\r\n new Vertex(1.00, 0.8, 0.0),\r\n new Vertex(1.00, 0.6, 0.0),\r\n new Vertex(0.75, 0.4, 0.0),\r\n new Vertex(0.25, 0.4, 0.0),\r\n new Vertex(0.25, 0.0, 0.0));\r\n\r\n addVertex(new Vertex(0.25, 0.8, 0.0),\r\n new Vertex(0.75, 0.8, 0.0),\r\n new Vertex(0.75, 0.6, 0.0),\r\n new Vertex(0.25, 0.6, 0.0));\r\n\r\n addLineSegment(new LineSegment(0, 1),\r\n new LineSegment(1, 2),\r\n new LineSegment(2, 3),\r\n new LineSegment(3, 4),\r\n new LineSegment(4, 5),\r\n new LineSegment(5, 6),\r\n new LineSegment(6, 7),\r\n new LineSegment(7, 0));\r\n\r\n addLineSegment(new LineSegment( 8, 9),\r\n new LineSegment( 9, 10),\r\n new LineSegment(10, 11),\r\n new LineSegment(11, 8));\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6734693646430969, "alphanum_fraction": 0.6734693646430969, "avg_line_length": 14.333333015441895, "blob_id": "16db2f3127e204bf8918c0a5e1aa863993d30229", "content_id": "4a71a832c9b361a97fa811ec6c3eb96bf7827be9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 98, "license_type": "no_license", "max_line_length": 27, "num_lines": 6, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/createPNG.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#ifndef CREATEPNG_H\r\n#define CREATEPNG_H\r\n\r\nvoid makeImage(int**, int);\r\n\r\n#endif // CREATEPNG_H\r\n" }, { "alpha_fraction": 0.4991482198238373, "alphanum_fraction": 0.5234242081642151, "avg_line_length": 35.269840240478516, "blob_id": "2c61ccc555bc01b72452165987fbccd8be056335", "content_id": "1e6912c7bbcd64259fc6ed886e8ce1deff95cfe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7044, "license_type": "no_license", "max_line_length": 86, "num_lines": 189, "path": "/Master Year 1/Computer Graphics/HW3/renderer/models/TriangularPyramid.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a tetrahedron as a\r\n triangular pyramid with an equilateral triangle\r\n base (centered at the origin in the xz-plane)\r\n whose three vertices are connected to a 4th vertex\r\n on the positive y-axis.\r\n\r\n @see Tetrahedron\r\n*/\r\npublic class TriangularPyramid extends Model\r\n{\r\n /**\r\n Create a regular tetrahedron having side length\r\n {@code sqrt(3)/sqrt(2)}, with one face in the\r\n xz-plane with its center at the origin, and the\r\n 4th vertex on the positive y-axis at height 1.\r\n */\r\n public TriangularPyramid( )\r\n {\r\n this(Math.sqrt(3)/Math.sqrt(2)); // makes the height = 1\r\n //or\r\n //this(Math.sqrt(3)); // make the height = sqrt(2) > 1\r\n }\r\n\r\n\r\n /**\r\n Create a regular tetrahedron having side length {@code s},\r\n with one face in the xz-plane with its center at the origin,\r\n and with the 4th vertex on the positive y-axis at\r\n height {@code s*sqrt(2)/sqrt(3)}.\r\n\r\n @param s the length of the regular tetrahedron's sides\r\n */\r\n public TriangularPyramid(final double s)\r\n {\r\n this(s/Math.sqrt(3), s*Math.sqrt(2)/Math.sqrt(3));\r\n }\r\n\r\n\r\n /**\r\n Create a tetrahedron with one face being an equilateral triangle\r\n inscribed in a circle of radius {@code r} centered at the origin\r\n of the xz-plane and with the 4th vertex on the y-axis at height\r\n {@code h}.\r\n <p>\r\n If {@code h = r * sqrt(2)}, then the tetrahedron is a regular tetrahedron.\r\n with side length {@code s = r * sqrt(3)}.\r\n <p>\r\n Another way to state this is, if an equilateral triangle is inscribed\r\n in a circle of radius {@code r}, then the edge length of the triangle\r\n is {@code r*sqrt(3)} and the height of the regular tetrahedron made\r\n from the triangle is {@code r*sqrt(2)}.\r\n\r\n @param r radius of circle in xz-plane that the equilateral base is inscribed in\r\n @param h coordinate on the y-axis of the apex\r\n */\r\n public TriangularPyramid(final double r, final double h)\r\n {\r\n super(\"Triangular Pyramid\");\r\n\r\n // Create the tetrahedron's geometry.\r\n final double sqrt3 = Math.sqrt(3.0);\r\n addVertex(new Vertex( r, 0, 0), // three vertices around the bottom face\r\n new Vertex(-r/2, 0, r*0.5*sqrt3),\r\n new Vertex(-r/2, 0, -r*0.5*sqrt3),\r\n new Vertex( 0, h, 0)); // vertex at the top\r\n\r\n // Create 6 line segments for 3 faces.\r\n addLineSegment(new LineSegment(0, 1), // bottom face\r\n new LineSegment(1, 2),\r\n new LineSegment(2, 0),\r\n new LineSegment(0, 3), // edge 1\r\n new LineSegment(1, 3), // edge 2\r\n new LineSegment(2, 3)); // edge 3\r\n }\r\n\r\n\r\n /**\r\n Create a tetrahedron with one face being an equilateral triangle\r\n inscribed in a circle of radius {@code r} centered at the origin\r\n of the xz-plane and with the 4th vertex on the y-axis at height\r\n {@code h}.\r\n <p>\r\n If {@code h = r * sqrt(2)}, then the tetrahedron is a regular tetrahedron.\r\n with side length {@code s = r * sqrt(3)}.\r\n <p>\r\n Another way to state this is, if an equilateral triangle is inscribed\r\n in a circle of radius {@code r}, then the edge length of the triangle\r\n is {@code r*sqrt(3)} and the height of the regular tetrahedron made\r\n from the triangle is {@code r*sqrt(2)}.\r\n\r\n @param r radius of circle in xz-plane that the equilateral base is inscribed in\r\n @param h coordinate on the y-axis of the apex\r\n @param n number of lines of latitude around the body of the pyramid\r\n @param k number of triangles in the triangle fan at the top of each side\r\n */\r\n public TriangularPyramid(final double r, final double h,\r\n int n, int k)\r\n {\r\n super();\r\n\r\n if (n < 1) n = 1;\r\n if (k < 1) k = 1;\r\n\r\n // Create the pyramid's geometry.\r\n final Vertex apex = new Vertex(0, h, 0);\r\n addVertex(apex);\r\n final Vertex centerVertex = new Vertex(0, 0, 0);\r\n addVertex(centerVertex);\r\n final int apexIndex = 0;\r\n final int centerIndex = 1;\r\n int index = 2;\r\n\r\n // Create all the lines of \"longitude\" from the apex, down\r\n // to the base, and then to the center of the base.\r\n final double sqrt3 = Math.sqrt(3.0);\r\n // three vertices around the bottom face\r\n final Vertex v0 = new Vertex( r, 0, 0);\r\n final Vertex v1 = new Vertex(-r/2, 0, r*0.5*sqrt3);\r\n final Vertex v2 = new Vertex(-r/2, 0, -r*0.5*sqrt3);\r\n for (int j = 0; j < k; ++j)\r\n {\r\n double t = j * (1.0 / k);\r\n // use linear interpolation (lerp)\r\n addVertex( new Vertex(\r\n // (1-t)*v0 + t*v1\r\n (1-t)*v0.x + t*v1.x,\r\n (1-t)*v0.y + t*v1.y,\r\n (1-t)*v0.z + t*v1.z ));\r\n addVertex( new Vertex(\r\n // (1-t)*v1 + t*v2\r\n (1-t)*v1.x + t*v2.x,\r\n (1-t)*v1.y + t*v2.y,\r\n (1-t)*v1.z + t*v2.z ));\r\n addVertex( new Vertex(\r\n // (1-t)*v2 + t*v0\r\n (1-t)*v2.x + t*v0.x,\r\n (1-t)*v2.y + t*v0.y,\r\n (1-t)*v2.z + t*v0.z ));\r\n\r\n // first side\r\n addLineSegment(new LineSegment(apexIndex, index+0),\r\n new LineSegment(index+0, centerIndex));\r\n // second side\r\n addLineSegment(new LineSegment(apexIndex, index+1),\r\n new LineSegment(index+1, centerIndex));\r\n // third side\r\n addLineSegment(new LineSegment(apexIndex, index+2),\r\n new LineSegment(index+2, centerIndex));\r\n\r\n index += 3;\r\n }\r\n // Create all the lines of \"latitude\" around the pyramid, starting\r\n // from the base and working upwards.\r\n for (int i = 0; i < n; ++i)\r\n {\r\n double t = i * (1.0 / n);\r\n // use linear interpolation (lerp)\r\n addVertex( new Vertex(\r\n // (1-t)*v0 + t*apex\r\n (1-t)*v0.x + t*apex.x,\r\n (1-t)*v0.y + t*apex.y,\r\n (1-t)*v0.z + t*apex.z ));\r\n addVertex( new Vertex(\r\n // (1-t)*v1 + t*apex\r\n (1-t)*v1.x + t*apex.x,\r\n (1-t)*v1.y + t*apex.y,\r\n (1-t)*v1.z + t*apex.z ));\r\n addVertex( new Vertex(\r\n // (1-t)*v2 + t*apex\r\n (1-t)*v2.x + t*apex.x,\r\n (1-t)*v2.y + t*apex.y,\r\n (1-t)*v2.z + t*apex.z ));\r\n\r\n addLineSegment(new LineSegment(index+0, index+1),\r\n new LineSegment(index+1, index+2),\r\n new LineSegment(index+2, index+0));\r\n\r\n index += 3;\r\n }\r\n }\r\n}//TriangularPyramid\r\n" }, { "alpha_fraction": 0.5199496150016785, "alphanum_fraction": 0.527929425239563, "avg_line_length": 25.05681800842285, "blob_id": "84331b764c365782b03d505874c3939ee584772e", "content_id": "2081d807057e3176f57ede58030d99b130c46b65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2381, "license_type": "no_license", "max_line_length": 90, "num_lines": 88, "path": "/Master Year 1/Computer Graphics/HW3/renderer/models/PanelYZ.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a flat wireframe checkerboard panel in the yz-plane.\r\n*/\r\npublic class PanelYZ extends Model\r\n{\r\n /**\r\n Create a flat checkerboard panel in the yz-plane that runs\r\n from -1 to 1 in the y-direction and -1 to 1 in the z-direction.\r\n */\r\n public PanelYZ( )\r\n {\r\n this(-1, 1, -1, 1);\r\n }\r\n\r\n\r\n /**\r\n Create a flat checkerboard panel in the xz-plane with the given dimensions.\r\n\r\n @param yMin location of bottom edge\r\n @param yMax location of top edge\r\n @param zMin location of back edge\r\n @param zMax location of front edge\r\n */\r\n public PanelYZ(final int yMin, final int yMax,\r\n final int zMin, final int zMax)\r\n {\r\n this(yMin, yMax, zMin, zMax, 0.0);\r\n }\r\n\r\n\r\n /**\r\n Create a flat checkerboard panel parallel to the yz-plane with the given dimensions.\r\n\r\n @param yMin location of bottom edge\r\n @param yMax location of top edge\r\n @param zMin location of back edge\r\n @param zMax location of front edge\r\n @param x x-plane that holds the panel\r\n */\r\n public PanelYZ(final int yMin, final int yMax,\r\n final int zMin, final int zMax,\r\n final double x)\r\n {\r\n super(\"PanelYZ\");\r\n\r\n // Create the checkerboard panel's geometry.\r\n\r\n // An array of indexes to be used to create line segments.\r\n final int[][] index = new int[(yMax-yMin)+1][(zMax-zMin)+1];\r\n\r\n // Create the checkerboard of vertices.\r\n int i = 0;\r\n for (int y = yMin; y <= yMax; ++y)\r\n {\r\n for (int z = zMin; z <= zMax; ++z)\r\n {\r\n addVertex(new Vertex(x, y, z));\r\n index[y-yMin][z-zMin] = i;\r\n ++i;\r\n }\r\n }\r\n\r\n // Create the line segments that run in the z-direction.\r\n for (int y = 0; y <= yMax - yMin; ++y)\r\n {\r\n for (int z = 0; z < zMax - zMin; ++z)\r\n {\r\n addLineSegment(new LineSegment(index[y][z], index[y][z+1]));\r\n }\r\n }\r\n\r\n // Create the line segments that run in the y-direction.\r\n for (int z = 0; z <= zMax - zMin; ++z)\r\n {\r\n for (int y = 0; y < yMax - yMin; ++y)\r\n {\r\n addLineSegment(new LineSegment(index[y][z], index[y+1][z]));\r\n }\r\n }\r\n }\r\n}//PanelYZ\r\n" }, { "alpha_fraction": 0.654275119304657, "alphanum_fraction": 0.6617100238800049, "avg_line_length": 16.066667556762695, "blob_id": "9642670773a8a00acc9ad9341e43a945a20efd6a", "content_id": "0d4425f3838b28981e1afa206829e7cde7b0a4b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 269, "license_type": "no_license", "max_line_length": 35, "num_lines": 15, "path": "/Year 3/Assignment3/Expr_Command.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include \"Stack.h\"\r\n\r\n#ifndef _EXPR_COMMAND\r\n#define _EXPR_COMMAND\r\n\r\n//Command Class\r\nclass Expr_Command {\r\n\tpublic:\r\n\t\t//interface used to execute \r\n\t\tvirtual void execute(void) = 0;\r\n\t\t\r\n\t\t//return precedence of value\r\n\t\tvirtual int prec(void) const = 0;\r\n};\r\n#endif" }, { "alpha_fraction": 0.6464174389839172, "alphanum_fraction": 0.6479750871658325, "avg_line_length": 16.941177368164062, "blob_id": "86d2579e929403a270848662a0717788f2d172a1", "content_id": "af2fd347941ef9e4045d9a19a686253786a85ea6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 642, "license_type": "no_license", "max_line_length": 60, "num_lines": 34, "path": "/Year 3/Assignment 4/Binary_Expr_Node.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n\r\n\r\n#ifndef _BINARY_EXPR_NODE\r\n#define _BINARY_EXPR_NODE\r\n\r\n#include \"Expr_Node.h\"\r\n\r\nclass Binary_Expr_Node : public Expr_Node\r\n{\r\n\tpublic:\r\n\t\tBinary_Expr_Node(void);\r\n\t\tvirtual ~Binary_Expr_Node(void);\r\n\t\t\r\n\t\tvirtual void accept (Expr_Node_Visitor & v);\r\n\t\t\r\n\t\t//template method\r\n\t\tvirtual int eval(void);\r\n\t\t\r\n\t\t//this method does the operation depending on the operator\r\n\t\tvirtual int calculate(int,int) = 0;\r\n\t\t\r\n\tprotected:\r\n\t\t//the binary has a right and left operation\r\n\t\tExpr_Node * right_;\r\n\t\tExpr_Node * left_;\r\n};\r\n#endif" }, { "alpha_fraction": 0.6590909361839294, "alphanum_fraction": 0.6590909361839294, "avg_line_length": 17.66666603088379, "blob_id": "2a60336a8abd02636af079bc816b28c796908d8a", "content_id": "02d14a58bd8b974dab55244aefa5bfd29ea45034", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 352, "license_type": "no_license", "max_line_length": 43, "num_lines": 18, "path": "/Year 2/Project 3/MergeSort.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//MergeSort.h\r\n\r\n#ifndef MERGESORT_H\r\n#define MERGESORT_H\r\n#include \"newSort.h\"\r\nclass MergeSort: public newSort\r\n{\r\n\tpublic:\r\n\t\t//constructor\r\n\t\tMergeSort();\r\n\t\t//destructor\r\n\t\t~MergeSort();\r\n\t\t//create function to merge two subarrays\r\n\t\tvoid merge(int *,int, int, int);\r\n\t\t//virtual sort method\r\n\t\tvoid sort(int *, int,int);\r\n};\r\n#endif//MERGESORT_H" }, { "alpha_fraction": 0.6479490399360657, "alphanum_fraction": 0.6515332460403442, "avg_line_length": 20.83478355407715, "blob_id": "19520c38b4d353ce3b6c936e584bdf77a0d5d1d2", "content_id": "1443764180af5b0329105741cb9d14ee1c771e2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2511, "license_type": "no_license", "max_line_length": 114, "num_lines": 115, "path": "/Year 3/composition-source/Fixed_Array.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// -*- C++ -*-\n\n// Honor Pledge:\n//\n// I pledge that I have neither given nor received any help\n// on this assignment.\n\n#ifndef _CS507_FIXED_ARRAY_H_\n#define _CS507_FIXED_ARRAY_H_\n\n#include \"Array.h\"\n\n/**\n * @class Fixed_Array\n *\n * Implementation of a fixed size array, i.e., one that is not\n * resizeable. It is derived from Array so it can inherit of \n * the Array class's methods.\n */\ntemplate <typename T, size_t N>\nclass Fixed_Array : public Array_Base <T>\n{\npublic:\n /// Default constructor.\n Fixed_Array (void);\n\n\t/**\n\t * Copy constructor.\n\t *\n\t * @param[in] arr Source array\n\t */\n Fixed_Array (const Fixed_Array <T, N> & arr);\n\t\n // COMMENT This method should not be here since it was part of the original\n // bad design.\n \n //RESPONSE: Deleted method \n\n /**\n * Initializing constructor. Fills the contents of the \n * array with the specified \\a fill value.\n *\n * @param[in] fill The file value.\n */\n Fixed_Array (T fill);\n\n /// Destructor.\n ~Fixed_Array (void);\n\n /**\n * Assignment operator\n *\n * @param[in] rhs Right-hand side of operator.\n */\n const Fixed_Array & operator = (const Fixed_Array <T, N> & rhs);\n \n // COMMENT This method should not be here since it was part of the original\n // bad design.\n \n //RESPONSE: Deleted method\n \n \n //COMMENT This method is a violation of LSP.\n\n //RESPONSE: Deleted method\n \n //redefine get function\n virtual T get (size_t index) const;\n \n //redefine set function\n virtual void set (size_t index, T value);\n \n //redefine find functions\n virtual int find (T element) const;\n \n virtual int find (T element, size_t start) const;\n \n //redefine fill method\n virtual void fill (T element);\n \n //return current size of array\n virtual size_t size (void) const;\n \n //return max size of array\n virtual size_t max_size (void) const;\n \n // COMMENT This method is a violation of LSP.\n \n //RESPONSE: Deleted method\n \n \n // COMMENT This should not be here since you are inheriting from the\n // base array class.\n \n //RESPONSE: I did not have these members in the array base class before. The array base has pure virtual methods.\n \n private:\n\t/// Pointer to the actual data.\n\tT * data_;\n\n\t/// Current size of the array.\n\tsize_t cur_size_;\n\n\t/// Maximum size of the array.\n\tsize_t max_size_;\n\n};\n\n// include the inline files\n#include \"Fixed_Array.inl\"\n\n// include the source file since template class\n#include \"Fixed_Array.cpp\"\n\n#endif // !defined _CS507_FIXED_ARRAY_H_\n" }, { "alpha_fraction": 0.633074939250946, "alphanum_fraction": 0.633074939250946, "avg_line_length": 14.913043022155762, "blob_id": "8417f5e258a9b304e25981f9075f7485da315d7c", "content_id": "d8aae3a71dba37ba77a00e36e651d1dace0611b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 387, "license_type": "no_license", "max_line_length": 59, "num_lines": 23, "path": "/Year 3/Assignment 4/Unary_Expr_Node.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#ifndef _UNARY_EXPR_NODE\r\n#define _UNARY_EXPR_NODE\r\n\r\n#include \"Expr_Node.h\"\r\nclass Unary_Expr_Node : public Expr_Node\r\n{\r\n\tpublic:\r\n\t\tUnanry_Expr_Node(void);\r\n\t\tvirtual ~Unary_Expr_Node(void);\r\n\t\t\r\n\t\tvirtual int eval(void);\r\n\t\t\r\n\tprotected:\r\n\t\tExpr_Node * child_;\r\n\t\t\r\n};\r\n#endif" }, { "alpha_fraction": 0.5004069805145264, "alphanum_fraction": 0.5368335247039795, "avg_line_length": 35.227272033691406, "blob_id": "179e9a5590c4923b74fcecf242418de78f717863", "content_id": "141f5a6c0ad6403927c2821a4437101519ea0fc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4914, "license_type": "no_license", "max_line_length": 85, "num_lines": 132, "path": "/Master Year 1/Computer Graphics/HW2/renderer/models/Triangle.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a barycentricly subdivided\r\n equilateral triangle.\r\n\r\n See <a href=\"https://en.wikipedia.org/wiki/Barycentric_subdivision\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Barycentric_subdivision</a>\r\n*/\r\npublic class Triangle extends Model\r\n{\r\n /**\r\n Create a barycentricly subdivided equilateral triangle\r\n in the xy-plane with corners on the unit circle.\r\n <p>\r\n The value of {@code n} should be less than 8.\r\n\r\n @param n number of barycentric subdivisions of the triangle\r\n */\r\n public Triangle(final int n)\r\n {\r\n this(0, n);\r\n }\r\n\r\n\r\n /**\r\n Create a barycentricly subdivided equilateral triangle\r\n in the xy-plane with corners on the unit circle and\r\n rotated by angle {@code theta} degrees.\r\n <p>\r\n The value of {@code n} should be less than 8.\r\n\r\n @param theta rotation (in dgreees) of the equilateral triangle\r\n @param n number of barycentric subdivisions of this triangle\r\n */\r\n public Triangle(final double theta, final int n)\r\n {\r\n final double theta1 = theta * Math.PI/180.0;\r\n final double theta2 = 2.0 * Math.PI / 3.0;\r\n addVertex(new Vertex(Math.cos(theta1),\r\n Math.sin(theta1),\r\n 0.0));\r\n addVertex(new Vertex(Math.cos(theta1 + theta2),\r\n Math.sin(theta1 + theta2),\r\n 0.0));\r\n addVertex(new Vertex(Math.cos(theta1 + 2*theta2),\r\n Math.sin(theta1 + 2*theta2),\r\n 0.0));\r\n addLineSegment(new LineSegment(0, 1));\r\n addLineSegment(new LineSegment(1, 2));\r\n addLineSegment(new LineSegment(2, 0));\r\n if (n > 0)\r\n barycentric(0, 1, 2, n);\r\n }\r\n\r\n\r\n /**\r\n Recursively use barycentric subdivision to put into this\r\n {@link Model} vertices and line segments that subdivide\r\n the triangle whose vertices are indexed by {@code vIndex0},\r\n {@code vIndex1} and {@code vIndex2}.\r\n <p>\r\n The value of {@code n} should be less than 8.\r\n\r\n @param vIndex0 index of a {link Vertex} of a triangle\r\n @param vIndex1 index of a {link Vertex} of a triangle\r\n @param vIndex2 index of a {link Vertex} of a triangle\r\n @param n number of barycentric subdivisions of this triangle\r\n */\r\n public void barycentric(final int vIndex0,\r\n final int vIndex1,\r\n final int vIndex2,\r\n final int n)\r\n {\r\n final Vertex v0 = vertexList.get(vIndex0);\r\n final Vertex v1 = vertexList.get(vIndex1);\r\n final Vertex v2 = vertexList.get(vIndex2);\r\n final int index = vertexList.size();\r\n\r\n if (n > 0)\r\n {\r\n // Barycentric subdivision.\r\n // https://en.wikipedia.org/wiki/Barycentric_subdivision\r\n\r\n // Add four vertices to the model.\r\n addVertex(new Vertex(\r\n // (1/3)*v0 + (1/3)*v1 + (1/3)*v2\r\n (v0.x + v1.x + v2.x)/3.0,\r\n (v0.y + v1.y + v2.y)/3.0,\r\n (v0.z + v1.z + v2.z)/3.0));\r\n addVertex(new Vertex(\r\n // (1/2)*v0 + (1/2)*v1\r\n (v0.x + v1.x)/2.0,\r\n (v0.y + v1.y)/2.0,\r\n (v0.z + v1.z)/2.0));\r\n addVertex(new Vertex(\r\n // (1/2)*v1 + (1/2)*v2\r\n (v1.x + v2.x)/2.0,\r\n (v1.y + v2.y)/2.0,\r\n (v1.z + v2.z)/2.0));\r\n addVertex(new Vertex(\r\n // (1/2)*v2 + (1/2)*v0\r\n (v2.x + v0.x)/2.0,\r\n (v2.y + v0.y)/2.0,\r\n (v2.z + v0.z)/2.0));\r\n // Give a name to the index of each of the four new vertices.\r\n final int vIndexCenter = index;\r\n final int vIndex01 = index + 1;\r\n final int vIndex12 = index + 2;\r\n final int vIndex20 = index + 3;\r\n // 6 new line segments\r\n addLineSegment(new LineSegment(vIndex0, vIndexCenter));\r\n addLineSegment(new LineSegment(vIndex1, vIndexCenter));\r\n addLineSegment(new LineSegment(vIndex2, vIndexCenter));\r\n addLineSegment(new LineSegment(vIndex01, vIndexCenter));\r\n addLineSegment(new LineSegment(vIndex12, vIndexCenter));\r\n addLineSegment(new LineSegment(vIndex20, vIndexCenter));\r\n\r\n barycentric(vIndex0, vIndex01, vIndexCenter, n-1);\r\n barycentric(vIndex0, vIndex20, vIndexCenter, n-1);\r\n barycentric(vIndex1, vIndex01, vIndexCenter, n-1);\r\n barycentric(vIndex1, vIndex12, vIndexCenter, n-1);\r\n barycentric(vIndex2, vIndex12, vIndexCenter, n-1);\r\n barycentric(vIndex2, vIndex20, vIndexCenter, n-1);\r\n }\r\n }\r\n}//Triangle\r\n" }, { "alpha_fraction": 0.4537999927997589, "alphanum_fraction": 0.5095999836921692, "avg_line_length": 36.75968933105469, "blob_id": "ba59b58692c11f3db0b64f58ff67fc2f0274934e", "content_id": "2c510e03b7f43fc48ad29b76a9d14b5068752d17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5004, "license_type": "no_license", "max_line_length": 92, "num_lines": 129, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/Icosahedron.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "WINDOWS-1252", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a regular icosahedron\r\n with its center at the origin, having edge length\r\n <pre>{@code\r\n 4/(1+sqrt(5)) = 1.2361,\r\n }</pre>\r\n and with its vertices on a sphere of radius\r\n <pre>{@code\r\n 4/(1+sqrt(5)) * sin(2Pi/5) = 1.1756.\r\n }</pre>\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Regular_icosahedron\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Regular_icosahedron</a>\r\n\r\n @see Tetrahedron\r\n @see Cube\r\n @see Octahedron\r\n @see Dodecahedron\r\n*/\r\npublic class Icosahedron extends Model\r\n{\r\n /**\r\n Create a regular icosahedron with its center at\r\n the origin, having edge length\r\n <pre>{@code\r\n 4/(1+sqrt(5)) = 1.2361,\r\n }</pre>\r\n and with its vertices on a sphere of radius\r\n <pre>{@code\r\n 4/(1+sqrt(5)) * sin(2Pi/5) = 1.1756.\r\n }</pre>\r\n */\r\n public Icosahedron()\r\n {\r\n super(\"Icosahedron\");\r\n\r\n // Create the icosahedron's geometry.\r\n // It has 12 vertices and 30 edges.\r\n double t = (1 + Math.sqrt(5))/2; // golden ratio\r\n double r = 1/t;\r\n //https://en.wikipedia.org/wiki/Regular_icosahedron#Cartesian_coordinates\r\n // All cyclic permutations of (0, ±r, ±1).\r\n Vertex v00 = new Vertex(-r, 1, 0);\r\n Vertex v01 = new Vertex( r, 1, 0);\r\n Vertex v02 = new Vertex(-r, -1, 0);\r\n Vertex v03 = new Vertex( r, -1, 0);\r\n Vertex v04 = new Vertex( 0, -r, 1);\r\n Vertex v05 = new Vertex( 0, r, 1);\r\n Vertex v06 = new Vertex( 0, -r, -1);\r\n Vertex v07 = new Vertex( 0, r, -1);\r\n Vertex v08 = new Vertex( 1, 0, -r);\r\n Vertex v09 = new Vertex( 1, 0, r);\r\n Vertex v10 = new Vertex(-1, 0, -r);\r\n Vertex v11 = new Vertex(-1, 0, r);\r\n/*\r\n // These vertices create a icosahedron with edge length 2,\r\n // and vertices on a sphere of radius\r\n // sqrt(10+2sqrt(5))/2 = 2sin(2Pi/5) = 1.90211.\r\n //https://en.wikipedia.org/wiki/Regular_icosahedron#Cartesian_coordinates\r\n // and also\r\n //https://github.com/mrdoob/three.js/blob/master/src/geometries/IcosahedronGeometry.js\r\n // All cyclic permutations of (0, ±1, ±t).\r\n Vertex v00 = new Vertex(-1, t, 0);\r\n Vertex v01 = new Vertex( 1, t, 0);\r\n Vertex v02 = new Vertex(-1, -t, 0);\r\n Vertex v03 = new Vertex( 1, -t, 0);\r\n Vertex v04 = new Vertex( 0, -1, t);\r\n Vertex v05 = new Vertex( 0, 1, t);\r\n Vertex v06 = new Vertex( 0, -1, -t);\r\n Vertex v07 = new Vertex( 0, 1, -t);\r\n Vertex v08 = new Vertex( t, 0, -1);\r\n Vertex v09 = new Vertex( t, 0, 1);\r\n Vertex v10 = new Vertex(-t, 0, -1);\r\n Vertex v11 = new Vertex(-t, 0, 1);\r\n*/\r\n // Add the icosahedron's vertices to the model.\r\n addVertex(v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11);\r\n\r\n // Create 30 line segments.\r\n // To figure out the edges, look at the orthogonal projection in the z-direction.\r\n // https://en.wikipedia.org/wiki/Regular_icosahedron#Orthogonal_projections\r\n\r\n // The edge from v00 to v01 is the top horizontal edge.\r\n // The edge from v02 to v03 is the bottom horizontal edge.\r\n // The edge from v04 to v05 is the front vertical edge.\r\n // The edge from v06 to v07 is the back vertical edge.\r\n // The edge from v08 to v09 is the right horizontal edge.\r\n // The edge from v10 to v11 is the left horizontal edge.\r\n\r\n // Working, more or less, from the top down.\r\n addLineSegment(new LineSegment( 0, 1),\r\n new LineSegment( 0, 5),\r\n new LineSegment( 0, 7),\r\n new LineSegment( 0, 11),\r\n new LineSegment( 0, 10),\r\n new LineSegment( 1, 5),\r\n new LineSegment( 1, 7),\r\n new LineSegment( 1, 9),\r\n new LineSegment( 1, 8),\r\n new LineSegment( 5, 11),\r\n new LineSegment( 5, 9),\r\n new LineSegment( 5, 4),\r\n new LineSegment( 7, 10),\r\n new LineSegment( 7, 8),\r\n new LineSegment( 7, 6),\r\n new LineSegment(11, 10),\r\n new LineSegment(11, 4),\r\n new LineSegment(11, 2),\r\n new LineSegment( 9, 8),\r\n new LineSegment( 9, 4),\r\n new LineSegment( 9, 3),\r\n new LineSegment(10, 6),\r\n new LineSegment(10, 2),\r\n new LineSegment( 8, 6),\r\n new LineSegment( 8, 3),\r\n new LineSegment( 4, 2),\r\n new LineSegment( 4, 3),\r\n new LineSegment( 6, 2),\r\n new LineSegment( 6, 3),\r\n new LineSegment( 2, 3));\r\n }\r\n}//Icosahedron\r\n" }, { "alpha_fraction": 0.5087014436721802, "alphanum_fraction": 0.5194109678268433, "avg_line_length": 13.395833015441895, "blob_id": "80deb17d0a04854acdd939a987289ee85f786037", "content_id": "4365adbc5cb6d92737317834b281f8d2abe9af22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 747, "license_type": "no_license", "max_line_length": 64, "num_lines": 48, "path": "/Year 2/Assignment #6/BubbleSort.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n//BubbleSort.cpp\r\n#include \"BubbleSort.h\"\r\n#include <iostream>\r\n\r\n//default constructor\r\nBubbleSort::BubbleSort()\r\n{\r\n\t\r\n}\r\nBubbleSort::~BubbleSort(){\r\n}\r\n//sort method for BubbleSort\r\nvoid BubbleSort::sort(int array[], int size)\r\n{\r\n\tint f;\r\n\tint s;\r\n\tint temp;\r\n\t\r\n\tfor (f = 0; f < size - 1; f++)\r\n\t{\r\n\t\t//std::cout << f << std::endl;\r\n\t\t\r\n\t\tfor (s = 0; s < size - f - 1; s++)\r\n\t\t{\r\n\t\t\t//if array[s] is greater than array[s+1], switch there places\r\n\t\t\tif (array[s] < array[s + 1])\r\n\t\t\t{\r\n\t\t\t\ttemp = array[s];\r\n\t\t\t\tarray[s] = array[s+1];\r\n\t\t\t\tarray[s+1] = temp;\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\r\n\t\t}\r\n\t\t\r\n\t\t\r\n\t}\r\n\t\r\n}\r\n\t\t\t\t\r\n\r\n" }, { "alpha_fraction": 0.6491332650184631, "alphanum_fraction": 0.6569037437438965, "avg_line_length": 23, "blob_id": "e7a5a3a5f7f6a7fc6b51f46b383e773f3b0c6c47", "content_id": "fd1ec841ff7ca6fcdca5d6abd0a7e6c79f959c1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 140, "num_lines": 67, "path": "/Year 2/Assignment #4/Student.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n\r\nimport java.util.Scanner;\r\nimport java.io.*;\r\npublic class Student\r\n{\r\n\t// declare private attributes\r\n\tprivate String first;\r\n\tprivate String last;\r\n\tprotected Address home;\r\n\tprivate String line1;\r\n\tprivate String line2;\r\n\tprivate String newCity;\r\n\tprivate String newState;\r\n\tprivate String zipCode;\r\n\tprivate String id;\r\n\tprivate String gpa;\r\n\t// fix addresses\r\n\t//default Constructor\r\n\tpublic Student(){\r\n\t\t\r\n\t}\r\n\t\r\n\t//overloaded constructor\r\n\tpublic Student(String first, String last,String line1,String line2, String newCity, String newState, String zipCode, String id, String gpa)\r\n\t{\r\n\t\t//intialize fields\r\n\t\tthis.first = first;\r\n\t\tthis.last = last;\r\n\t\tthis.line1 = line1;\r\n\t\tthis.line2 = line2;\r\n\t\tthis.newCity = newCity;\r\n\t\tthis.newState = newState;\r\n\t\tthis.zipCode = zipCode;\r\n\t\tthis.id = id;\r\n\t\tthis.gpa = gpa;\r\n\t\t//create new Address using fields passed to the student constructor\r\n\t\thome = new Address(this.line1,this.line2,this.newCity, this.newState, this.zipCode);\r\n\t\t\r\n\t}\r\n\t\r\n\t//format method\r\n\tpublic String format()\r\n\t{\r\n\t\t// return the Students in this format\r\n\t\treturn (\"ID: \" + id + \"\\t\" + \"Name: \" + first + \" \" + last + \"\\t\" + \"Address: \" + home.getAddress() + \"\\t\" + \"GPA: \" + gpa);\r\n\t\t\r\n\t}\r\n\tpublic void printMenu()\r\n\t{\r\n\t\t//shows user what to enter for options\r\n\t\tSystem.out.println(\"1. Load Students (From File)\" );\r\n\t\tSystem.out.println(\"2. Print Stack\") ;\r\n\t\tSystem.out.println(\"3. Exit Program\");\r\n\t\tSystem.out.println();\r\n\t\tSystem.out.println( \"Enter your selection: \");\r\n\t}\r\n\t\r\n}\r\n\t//methdod for the menu??" }, { "alpha_fraction": 0.5490981936454773, "alphanum_fraction": 0.5611222386360168, "avg_line_length": 15.438596725463867, "blob_id": "5238c8c160a0013e722155abb7f3623ca20907c2", "content_id": "cb26f2aba958502cf32964ee27b7d0b555ba5153", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 998, "license_type": "no_license", "max_line_length": 54, "num_lines": 57, "path": "/Year 2/Project 3/HeapSort.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//HeapSort.cpp\r\n#include <iostream>\r\n#include \"HeapSort.h\"\r\n\r\n\r\n//default constructor\r\nHeapSort::HeapSort(){\r\n\t\r\n}\r\n\r\nHeapSort::~HeapSort(){\r\n}\r\nvoid HeapSort::rearrange(int array[], int size, int f)\r\n{\r\n\tint large = f;\r\n\tint left = 2*f + 1;\r\n\tint right = 2*f + 2;\r\n\t\r\n\t//if left is larger than root\r\n\tif (left < size && array[left] > array[large])\r\n\t{\r\n\t\tlarge = left;\r\n\t}\r\n\t// if right is larger\r\n\tif (right < size && array[right] > array[large])\r\n\t{\r\n\t\tlarge = right;\r\n\t}\r\n\t//if large is not the root\r\n\tif (large != f)\r\n\t{\r\n\t\tint temp = array[f];\r\n\t\tarray[f] = array[large];\r\n\t\tarray[large] = temp;\r\n\t\t\r\n\t\trearrange(array,size,large);\r\n\t}\r\n}\r\n//sort method for Insertion sorting\r\nvoid HeapSort::sort(int array[], int size)\r\n{\r\n\t//build heap\r\n\tfor (int a = size / 2 -1; a>= 0; a--)\r\n\t{\r\n\t\trearrange(array,size,a);\r\n\t}\r\n\t\r\n\t//traverse through heap\r\n\tfor (int a = size -1; a>=0; a--)\r\n\t{\r\n\t\tint temp = array[a];\r\n\t\tarray[a] = array[0];\r\n\t\tarray[0] = temp;\r\n\t\t\r\n\t\trearrange(array,a, 0);\r\n\t}\r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.5780116319656372, "alphanum_fraction": 0.578374445438385, "avg_line_length": 18.264705657958984, "blob_id": "0c85983c34536fa8fe7a3dc2b83a83c2f94d674f", "content_id": "eebcfe8339a7805464b0fff3295c8d8f16784245", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2756, "license_type": "no_license", "max_line_length": 86, "num_lines": 136, "path": "/Year 3/Assignment 4/Calculator.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//Calculator Class\r\n\r\n//source file\r\n\r\n#include \"Calculator.h\"\r\n\r\n\r\nCalculator::Calculator(Expr_Tree_Builder & builder, Eval_Expr_Tree & visitor):\r\nbuild_(builder),\r\nvisitor_(visitor)\r\n{\r\n\t//constructor\r\n}\r\n\r\nCalculator::~Calculator()\r\n{\r\n\t//destructor\r\n}\r\n\r\nvoid Calculator::postfix_eval()\r\n{\r\n\t//Visit nodes\r\n\tExpr_Node * expr_tree = build_.get_expression();\r\n\t\r\n\texpr_tree->accept (visitor_);\r\n\tstd::cout << \"Visit. \" << std::endl;\r\n\tint result = visitor_.result();\r\n\t\t\r\n\t//STDOUT\r\n\tstd::cout << result << std::endl;\r\n\t\t\r\n}\r\n\r\n// COMMENT The name of this method is misleading since you are building\r\n// the expression, and not convert it from infix to postfix.\r\n\r\n//RESPONSE: Changed the name of the method from infix_to_postfix to 'build_expression'\r\n\r\n\r\nvoid Calculator::build_expression(const std::string & infix)\r\n{\r\n\t//create stream parser\r\n\tstd::istringstream input(infix);\r\n\t\r\n\t//current token\r\n\tstd::string token;\r\n\t\r\n\t//start expression\r\n\tbuild_.start_expression();\r\n\t\r\n\t//while the input is being read, \r\n\twhile (!input.eof())\r\n\t{\r\n\t\t\t\t\r\n\t\t//read each token from input\r\n\t\tinput >> token;\r\n\t\t\r\n\t\t\t///if the token is a add operator \r\n\t\t\tif (token == \"+\")\r\n\t\t\t{\r\n\t\t\t\t//build add operator\r\n\t\t\t\tthis->build_.build_add_operator();\r\n\t\t\t\tstd::cout << \"Build Add. \" << std::endl;\r\n\t\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t///if the token is a subtraction operator \r\n\t\t\telse if (token == \"-\")\r\n\t\t\t{\r\n\t\t\t\t//build subtract operator\r\n\t\t\t\tthis->build_.build_subtract_operator();\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t///if the token is a multiplication operator \r\n\t\t\telse if (token == \"*\")\r\n\t\t\t{\r\n\t\t\t\t//build multiply operator\r\n\t\t\t\tthis->build_.build_multiply_operator();\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t///if the token is a division operator \r\n\t\t\telse if (token == \"/\")\r\n\t\t\t{\r\n\t\t\t\t//build division operator\r\n\t\t\t\tthis->build_.build_division_operator();\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t///if the token is a modulus operator \r\n\t\t\telse if (token == \"%\")\r\n\t\t\t{\r\n\t\t\t\t//build modulus operator\r\n\t\t\t\tthis->build_.build_modulus_operator();\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t//else if the token is a left parenthesis\r\n\t\t\telse if (token == \"(\")\r\n\t\t\t{\r\n\t\t\t\t//call method to handle parentheses\r\n\t\t\t\tbuild_.build_left_parenthesis();\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t//else if token is right parenthesis\r\n\t\t\telse if (token == \")\")\r\n\t\t\t{\r\n\t\t\t\t//call method to handle parentheses\r\n\t\t\t\tbuild_.build_right_parenthesis();\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t//if its a digit\r\n\t\t\telse if (std::isdigit(token[0]))\r\n\t\t\t{\r\n\t\t\t\t//build num operator\r\n\t\t\t\tint num = std::stoi(token);\r\n\t\t\t\tstd::cout << \"Build Num. \" << std::endl;\r\n\t\t\t\tthis->build_.build_number(num);\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t//else throw exception\r\n\t\t\telse \r\n\t\t\t{\r\n\t\t\t\tthrow(\"Invalid input. \");\r\n\t\t\t}\r\n\t\r\n\t}\r\n\t\t\r\n}\r\n" }, { "alpha_fraction": 0.7019608020782471, "alphanum_fraction": 0.7019608020782471, "avg_line_length": 24.60416603088379, "blob_id": "390596dae5f94040f53b6e474dbab4b1155ae90b", "content_id": "1fc4cac28528350f97ad7ab4036c8ee99371790b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1275, "license_type": "no_license", "max_line_length": 65, "num_lines": 48, "path": "/Year 3/Assignment3/Stack_Expr_Command_Factory.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor receieved any help\r\n// on this assignment.\r\n\r\n#include \"Expr_Command_Factory.h\"\r\n\r\n#ifndef _STACK_EXPR_COMMAND_FACTORY_\r\n#define _STACK_EXPR_COMMAND_FACTORY_\r\nclass Stack_Expr_Command_Factory : public Expr_Command_Factory\r\n{\r\n\tpublic:\r\n\t\t\r\n\t\t//default constructor\r\n\t\tStack_Expr_Command_Factory(Stack <int> stack);\r\n\t\t\r\n\t\t//destructor\r\n\t\t~Stack_Expr_Command_Factory(void);\r\n\t\t\r\n\t\t//create function that returns answer from expression\r\n\t\tvirtual int answer(void);\r\n\t\t\r\n\t\t//return number command object\t\r\n\t\tvirtual Num_Command * create_num_command (int num);\r\n\t\t\r\n\t\t//return add command object\r\n\t\tvirtual Add_Command * create_add_command (void);\r\n\t\t\r\n\t\t//return subtract command object\r\n\t\tvirtual Subtract_Command * create_subtract_command (void);\r\n\t\t\r\n\t\t//return multiplication command object\r\n\t\tvirtual Multiply_Command * create_multiply_command (void);\r\n\t\t\r\n\t\t//return division command object\r\n\t\tvirtual Division_Command * create_division_command (void);\r\n\t\t\r\n\t\t//return modulus command object\r\n\t\tvirtual Modulus_Command * create_modulus_command (void);\r\n\t\t\r\n\t\t//return parenthesis command object\r\n\t\tvirtual Parenthesis_Command * create_parenthesis_command(void);\r\n\t\t\r\n\tprivate:\r\n\t\tStack <int> & stack_;\r\n};\r\n\r\n#endif" }, { "alpha_fraction": 0.6575875282287598, "alphanum_fraction": 0.6653696298599243, "avg_line_length": 16.88888931274414, "blob_id": "cbccdc5c470d971ee9ac35cd18705d39ac3032c6", "content_id": "d5e6191007c2635ded20c84b376c1085d5f2dd33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 514, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/Year 3/Assignment 4/Multiply_Expr_Node.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#include \"Multiply_Expr_Node.h\"\r\n\r\nMultiply_Expr_Node::Multiply_Expr_Node(void)\r\n{\r\n\t//constructor\r\n}\r\nMultiply_Expr_Node::~Multiply_Expr_Node(void)\r\n{\r\n\t//destructor\r\n}\r\n\t\t\r\nint Multiply_Expr_Node::calculate(int num1, int num2)\r\n{\r\n\t//return multiplication of two numbers\r\n\treturn num1 * num2;\r\n}\r\n\r\nvoid Multiply_Expr_Node::accept (Expr_Node_Visitor & v)\r\n{\r\n\tv.Visit_Multiply_Node (*this);\r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.6913644671440125, "alphanum_fraction": 0.6913644671440125, "avg_line_length": 30.760683059692383, "blob_id": "8fdc12549a5ce7670daf45be7520afc490812248", "content_id": "ba752bc18a8b6b92afd2159baa378c5123ab4268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3833, "license_type": "no_license", "max_line_length": 101, "num_lines": 117, "path": "/Year 3/Assignment 4/Eval_Expr_Tree.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n#include \"Eval_Expr_Tree.h\"\r\n#include \"Add_Expr_Node.h\"\r\n#include \"Subtract_Expr_Node.h\"\r\n#include \"Num_Expr_Node.h\"\r\n#include \"Multiply_Expr_Node.h\"\r\n#include \"Division_Expr_Node.h\"\r\n#include \"Modulus_Expr_Node.h\"\r\n\r\nEval_Expr_Tree::Eval_Expr_Tree(void)\r\n{\r\n\t//default constructor\r\n}\r\nEval_Expr_Tree::~Eval_Expr_Tree(void)\r\n{\r\n\t//destructor\r\n}\r\n\r\nvoid Eval_Expr_Tree::Visit_Add_Node (Add_Expr_Node & node)\r\n{\r\n\t//visit left node, visit right node, then do addition\r\n\t//visit two other nodes\r\n\t\r\n\tnode.left_leaf->accept (*this);\r\n\tnode.right_leaf->accept(*this);\r\n\t\t\r\n}\r\nvoid Eval_Expr_Tree::Visit_Subtract_Node(Subtract_Expr_Node & node)\r\n{\r\n // COMMENT: You are not using the visitor pattern correctly.\r\n // Instead, you have mixed the visitor with the composite version\r\n // of evaluate. You are to visit the left and right node via the\r\n // accept method to correctly implement the visitor pattern.\r\n // (e.g., left->accept (*this))\r\n \r\n//RESPONSE: Instead of calling the eval method, I call the accept method that then visits the node\r\n \r\n\t//visit left node, visit right node, then do subtraction\r\n\tnode.left_leaf->accept (*this);\r\n\tnode.right_leaf->accept(*this);\r\n\t\t\r\n}\r\nvoid Eval_Expr_Tree::Visit_Number_Node(Num_Expr_Node & node)\r\n{\r\n // COMMENT: You are not using the visitor pattern correctly.\r\n // Instead, you have mixed the visitor with the composite version\r\n // of evaluate. You are to visit the left and right node via the\r\n // accept method to correctly implement the visitor pattern.\r\n // (e.g., left->accept (*this))\r\n \r\n //RESPONSE: Instead of calling the eval method, I call the accept method that then visits the node\r\n\t//return number\r\n\t\r\n\t//node.left_leaf->accept (*this);\r\n\t//node.right_leaf->accept(*this);\r\n\t\r\n}\r\nvoid Eval_Expr_Tree::Visit_Multiply_Node(Multiply_Expr_Node & node)\r\n{\r\n // COMMENT: You are not using the visitor pattern correctly.\r\n // Instead, you have mixed the visitor with the composite version\r\n // of evaluate. You are to visit the left and right node via the\r\n // accept method to correctly implement the visitor pattern.\r\n // (e.g., left->accept (*this))\r\n \r\n //RESPONSE: Instead of calling the eval method, I call the accept method that then visits the nodes\r\n \r\n \r\n \r\n\t//visit left node, visit right node, then do multiplication\r\n\tnode.left_leaf->accept (*this);\r\n\tnode.right_leaf->accept(*this);\r\n\t\r\n}\r\nvoid Eval_Expr_Tree::Visit_Division_Node(Division_Expr_Node & node)\r\n{\r\n // COMMENT: You are not using the visitor pattern correctly.\r\n // Instead, you have mixed the visitor with the composite version\r\n // of evaluate. You are to visit the left and right node via the\r\n // accept method to correctly implement the visitor pattern.\r\n // (e.g., left->accept (*this))\r\n \r\n //RESPONSE: Instead of calling the eval method, I call the accept method that then visits the node\r\n \r\n\t//visit left node, visit right node, then do multiplication\r\n\tnode.left_leaf->accept (*this);\r\n\tnode.right_leaf->accept(*this);\r\n\t\r\n}\r\nvoid Eval_Expr_Tree::Visit_Modulus_Node(Modulus_Expr_Node & node)\r\n{\r\n // COMMENT: You are not using the visitor pattern correctly.\r\n // Instead, you have mixed the visitor with the composite version\r\n // of evaluate. You are to visit the left and right node via the\r\n // accept method to correctly implement the visitor pattern.\r\n // (e.g., left->accept (*this))\r\n \r\n //RESPONSE: Instead of calling the eval method, I call the accept method that then visits the node\r\n \r\n\t//visit left node, visit right node, then do modulus\r\n\tnode.left_leaf->accept (*this);\r\n\tnode.right_leaf->accept(*this);\r\n\t\r\n}\r\n\r\nint Eval_Expr_Tree::result (void) const\r\n{\r\n\t//returns result of evaluation\r\n\treturn this->result_;\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.811475396156311, "alphanum_fraction": 0.811475396156311, "avg_line_length": 19.33333396911621, "blob_id": "11cd55024baae847548776ee30510442b69fbaaa", "content_id": "27e82a77045d015ad4175ab54e2493a419227896", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 122, "license_type": "no_license", "max_line_length": 68, "num_lines": 6, "path": "/Year 4/QRcodeSubmission/README.md", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "# QRcodeGenerator\ncreates QR codes\n\nNeed QT to run.\n\nIn folder QRCodeGenerator open file QRCodeGenerator.pro in QT editor\n" }, { "alpha_fraction": 0.5182101726531982, "alphanum_fraction": 0.5202913880348206, "avg_line_length": 21.439023971557617, "blob_id": "c692c4d496c5c1c7511898110a73cff27ab14294", "content_id": "e7d5f305aa0694c13a1eec00b63f9c77e756707e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 961, "license_type": "no_license", "max_line_length": 83, "num_lines": 41, "path": "/Master Year 1/Programming Languages and Compilers/HW5/hw5/PrettyPrinter.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/**\r\n The prettyPrint() method takes a Tree and\r\n converts it into a well formatted string.\r\n*/\r\n\r\npublic class PrettyPrinter\r\n{\r\n public static String prettyPrint(Tree tree)\r\n {\r\n return prettyPrint(tree, \"\");\r\n }\r\n\r\n /**\r\n This prettyPrint() method is essentially\r\n a pre-order traversal of the tree.\r\n */\r\n public static String prettyPrint(Tree tree, String indentation)\r\n {\r\n String result = indentation;\r\n\r\n if ( 0 == tree.degree() )\r\n {\r\n result += tree.getElement();\r\n }\r\n else\r\n {\r\n // \"process\" the root node\r\n result += \"(\" + tree.getElement();\r\n\r\n // recursively traverse all the sub trees\r\n for (int i = 0; i < tree.degree(); i++)\r\n {\r\n result += \"\\n\" + prettyPrint( tree.getSubTree(i), indentation + \" \" );\r\n }\r\n\r\n result += \"\\n\" + indentation + \")\";\r\n }\r\n\r\n return result;\r\n }//prettyPrint()\r\n}\r\n" }, { "alpha_fraction": 0.6618182063102722, "alphanum_fraction": 0.6618182063102722, "avg_line_length": 17.034482955932617, "blob_id": "ce7e6a96531f240f8ebbf55bb3097340ead10d8f", "content_id": "25438d755b253671989a2b215ce98f414eb3dd3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 550, "license_type": "no_license", "max_line_length": 59, "num_lines": 29, "path": "/Year 3/Assignment3/Subtract_Command.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Binary_Op_Command.h\"\r\n\r\n#ifndef _SUBTRACT_COMMAND_H\r\n#define _SUBTRACT_COMMAND_H\r\n\r\nclass Subtract_Command : public Binary_Op_Command{\r\n\tpublic:\r\n\t\r\n\t\tSubtract_Command(Stack <int> &s);\r\n\t\t\r\n\t\t~Subtract_Command(void);\r\n\t\t\r\n\t\t//does subtraction on the two ints\r\n\t\tvirtual int evaluate (int, int) const;\r\n\t\t\r\n\t\t//returns precedence of subtraction\r\n\t\tvirtual int prec(void) const;\r\n\t\r\n\tprivate:\r\n\t\tint precedence;\r\n\t\t\r\n\t\t\r\n};\r\n\t\t\r\n#endif" }, { "alpha_fraction": 0.5954052209854126, "alphanum_fraction": 0.6011486649513245, "avg_line_length": 19.754966735839844, "blob_id": "69a58d342682dd7626fecd2735fcd3d2bb77347e", "content_id": "794f8a33df8e28bd4edd463c3f94fcf7a0c7f7fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3134, "license_type": "no_license", "max_line_length": 142, "num_lines": 151, "path": "/Master Year 2/Operating Systems/HW1/hw1/hw1.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\n\tCourse: CS 51520\n\tName: Marcellus Hunt\n\tEmail: [email protected]\n\tAssignment: 1\n\n*/\n#include <stdio.h>\n#include <stdlib.h>\n\nstruct StructA{\n int x;\n double y;\n char z[5];\n};\n\nstruct StructB{\n char x[5];\n int y;\n double z;\n};\n\nstruct StructC{\n double x;\n char y[5];\n int z;\n};\n\n\nstruct StructB\nconvertAtoB(struct StructA structA){\n \n struct StructB b;\n \n for (int i =0; i< 5; i++)\n {\n //char array allocation \n b.x[i] = structA.z[i];\n }\n \n //int allocation\n b.y = structA.x;\n \n //double allocation\n b.z = structA.y;\n\n //return copy\n return b;\n}\n\n\n// NOTE: The return structure should be allocated by convertAtoC().\nstruct StructC *\nconvertAtoC(struct StructA structA){\n\n static struct StructC c;\n \n for (int i =0; i< 5; i++)\n {\n //char array allocation\n c.y[i] = structA.z[i];\n }\n //int allocation\n c.z = structA.x;\n \n //double allocation\n c.x = structA.y;\n\n //return address because return type is a pointer\n return &c;\n}\n\n\nvoid\nconvertBtoC(const struct StructB * structB,\n struct StructC * structC){\n \n //(both parameters are pointers so '->' is used to point to field\n // structB is a const struct so it cannot be altered and is strictly an input\n \n //double allocation \n structC->x = structB->z;\n \n //char array allocation\n for (int i =0; i< 5; i++)\n {\n structC->y[i] = structB->x[i]; \n }\n \n //int allocation\n structC->z = structB->y;\n \n}\n\n\n// NOTE: The return structure should be allocated by convertCtoA().\nvoid\nconvertCtoA(struct StructC structC,\n struct StructA ** structA){ \n \n //structA is pointer to pointer, so this must be dereferenced and allocated by the callee (this function)\n *structA = (struct StructA *)malloc(sizeof(struct StructA));\n \n //double allocation\n (*structA)->y = structC.x;\n \n //char array allocation\n for (int i =0; i< 5; i++)\n {\n (*structA)->z[i] = structC.y[i]; \n }\n \n //int allocation\n (*structA)->x = structC.z;\n \n}\n\n\n// NOTE: The return array should be allocated by bundleIntoC().\nvoid\nbundleIntoC(const struct StructA * arrayA, int n,\n const struct StructB * arrayB, int m,\n struct StructC ** arrayC_p){\n \n //int n = number of elements in arrayA\n //int m = number of elements in arrayB\n \n //structC is pointer to pointer, so this must be dereferenced and allocated by the callee (this function)\n \n // (*arrayC_p) = pointer to StructC\n *arrayC_p = (struct StructC *)malloc((n+m)*sizeof(struct StructC));\n \n \n //Convert StructA to StructC and allocate to pointer\n for (int x = 0; x < n ; x++)\n {\n //dereference pointer to StructC to hold StructC value at array position , also dereference pointer returned by convertAtoC to get value\n \n *(*arrayC_p+x) = *(convertAtoC(arrayA[x]));\n }\n \n //Convert StructB to StructC and allocate to pointer\n //Loop through remainder of arrayC_p contents\n for (int y = n; y < n+m; y++)\n { \n //call function convertBtoC\n convertBtoC(&(arrayB[y-n]), (*arrayC_p+y));\n \n }\n \n}\n" }, { "alpha_fraction": 0.6791378259658813, "alphanum_fraction": 0.6940723657608032, "avg_line_length": 39.092594146728516, "blob_id": "35f26aa4c69f085b9b0fd8d33ed5241f45335036", "content_id": "ed9c19c9524fc88772def694d3b293ba6e70cf07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 6495, "license_type": "no_license", "max_line_length": 764, "num_lines": 162, "path": "/Master Year 1/Programming Languages and Compilers/HW1/hw1.html", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "<!doctype html>\n\n<html lang=\"en\">\n\n<head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <title>CS 51530 - Homework 1</title>\n <meta name=\"description\" content=\"CS 51530 Homework 1\">\n <meta name=\"author\" content=\"Roger L. Kraft\">\n <link rel=\"stylesheet\" href=\"http://math.pnw.edu/~rlkraft/roger-v2.css\">\n <!--[if lt IE 9]>\n <script src=\"http://html5shiv.googlecode.com/svn/trunk/html5.js\"></script>\n <![endif]-->\n</head>\n\n<body>\n<!--\n<header>\n <a href=\"http://math.pnw.edu/~rlkraft/\" title=\"Roger L. Kraft\">Roger L. Kraft</a>\n</header>\n -->\n<nav>\n <ul class=\"nav-list\" role=\"navigation\">\n <li><a href=\"../cs51530.html\">CS 51530</a></li>\n <li><a href=\"../syllabus.html\">Syllabus</a></li>\n <li><a href=\"../class.html\">Lectures and Readings</a></li>\n <li><a href=\"../homework.html\">Homework Assignments</a></li>\n </ul>\n <ul class=\"nav-list\" role=\"navigation\">\n <li><a href=\"http://math.pnw.edu/~rlkraft/roger.html\">Roger Kraft</a></li>\n <li><a href=\"https://pnw.edu/computer-science/\">Computer Science Department</a></li>\n <li><a href=\"https://pnw.edu/engineering/\">School of Engineering</a></li>\n <li><a href=\"https://pnw.edu/college-of-engineering-sciences/\">College of Engineering & Sciences</a></li>\n <li><a href=\"https://pnw.edu/\">Purdue University Northwest</a></li>\n </ul>\n</nav>\n\n<section id=\"Content\">\n<h1>\nProgramming Assignment 1<br>\nCS 51530<br>\nProgramming Languages, Interpreters and Compilers<br>\nSpring, 2021\n</h1>\n\n<p>\nThis assignment makes use of the files contained in this <a href=\"hw1.zip\">zip file</a>.\nThis assignment is due Monday, January 25.\n</p>\n\n<p>\nThis assignment is an application of binary tree traversals. The main goal of this assignment is for you to write Java methods that \"prettyprint\" a binary tree into a string.\n</p>\n\n<p>\nIn the zip file there are four incomplete files, <code>PrettyPrinter1.java</code>, <code>PrettyPrinter2.java</code>, <code>PrettyPrinter3.java</code> and <code>Hw1.java</code>. Your assignment is to complete the four incomplete files. In addition, the zip file includes three files, <code>BTree.java</code>, <code>Traverse.java</code>, and <code>PrettyPrinter0.java</code>, that are written for you and should not be modified.\n</p>\n\n<p>\nThe term \"prettyprint\" usually means finding a way to represent a tree as a multi-line string. For example, this binary tree,\n<br>\n<img src=\"bTree1.png\" alt=\"a binary tree\" style=\"width:300px;\" border=\"0\">\n<br>\ncan be represented by the following string,\n<pre>\n (a (b d e) (c () (f (g () h) ())))\n</pre>\n</p>\n<p>\nwhich is not very easy to read. We can improve the readability of this string representation if we use multiple lines along with indentation.\n<pre>\n(a\n (b\n d\n e\n )\n (c\n ()\n (f\n (g\n ()\n h\n )\n ()\n )\n )\n)\n</pre>\n</p>\n<p>\nNotice that the root of a tree is printed right after an opening parenthesis, the two sub trees are printed below the root and slightly indented from it, and the closing parenthesis is printed on its own line just below its matching opening parenthesis. Also, the empty tree is treated as a special case and is represented by \"()\".\n</p>\n\n<p>\nThis binary tree can also be represented by the following, more compact, multi-line string. In this string, small sub trees that have depth 1 are \"inlined\".\n<pre>\n(a\n (b d e)\n (c\n ()\n (f\n (g () h)\n ()\n )\n )\n)\n</pre>\n</p>\n\n<p>\nThis representation can be made even more compact. In this string, the root of every left child is \"inlined\" with its parent's root.\n<pre>\n(a (b d e)\n (c ()\n (f (g () h)\n ()\n )\n )\n)\n</pre>\n</p>\n\n<p>\nIn the file <code>PrettyPrinter1.java</code>, complete the method <code>prettyPrinter()</code> so that it implements the first kind of prettyprinting described above. In the file <code>PrettyPrinter2.java</code>, complete the method <code>prettyPrinter()</code> so that it implements the second kind of prettyprinting described above. And in the file <code>PrettyPrinter3.java</code>, complete the method <code>prettyPrinter()</code> so that it implements the third kind of prettyprinting described above.\n</p>\n\n<p>\nIn the zip file there are image files for five binary trees, the tree above and the four trees below.\n<br>\n<img src=\"bTree2.png\" alt=\"a binary tree\" style=\"width:300px;\" border=\"0\">\n<img src=\"bTree3.png\" alt=\"a binary tree\" style=\"width:300px;\" border=\"0\">\n<img src=\"bTree4.png\" alt=\"a binary tree\" style=\"width:300px;\" border=\"0\">\n<img src=\"bTree5.png\" alt=\"a binary tree\" style=\"height:400px;\" border=\"0\">\n<br>\nIn the file <code>Hw1.java</code>, complete the Java declarations that instantiate binary trees that represent each of the binary trees in these image files (the first one is done for you). Then compile and run your program. The output of your program should look exactly like the contents of the file <code>output.txt</code> contained in the zip file.\n</p>\n\n<p>\nThe three prettyprinting methods are, for the most part, a variation on a preorder traversal of the binary tree. First you prettyprint the root, then you (recursively) prettyprint the left sub tree, then (recursively) prettyprint the right sub tree. For the first prettyprinter, you need to think about three cases, the empty tree, a tree of just a single node, and a tree with more than one node. For the second prettyprinter, you need to consider four cases, an empty tree, a tree of a single node, a tree of depth one, and a tree of depth greater than one. For the third prettyprinter, you have the same four cases, but the root of the left child is always in-line with its parent's root (and be sure to keep opening and closing parentheses vertically aligned).\n</p>\n\n<p>\n<a href=\"http://cs.pnw.edu/~rlkraft/cs51530/handin.html\">Turn in</a> a zip file called <code>CS51530Hw1Surname.zip</code> (where <code>Surname</code> is your last name) containing your versions of <code>PrettyPrinter1.java</code>, <code>PrettyPrinter2.java</code>, <code>PrettyPrinter3.java</code> and <code>Hw1.java</code> Be sure to put your name and email address in <b>every</b> file you turn in.\n</p>\n\n<p>\nThis assignment is due Monday, January 25.\n</p>\n\n</section>\n\n<footer>\n <script language=\"JavaScript\" type=\"text/javascript\">\n document.write(\"<font size=\\\"-2\\\"><i>Last modified on \" + document.lastModified + \".<\\/i><\\/font>\");\n </script>\n <br>\n <a href=\"mailto:[email protected]\"><font size=\"-2\"><i>compliments and criticisms</i></font></a>\n</footer>\n\n</body>\n</html>\n" }, { "alpha_fraction": 0.7191709876060486, "alphanum_fraction": 0.7191709876060486, "avg_line_length": 23.090909957885742, "blob_id": "9a2740bef4dff4e23379efc7c1d34723af0c17d1", "content_id": "64fbe36421596c895ad8657100c1648b604c6c54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1930, "license_type": "no_license", "max_line_length": 82, "num_lines": 77, "path": "/Year 3/Assignment3/Stack_Expr_Command_Factory.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor receieved any help\r\n// on this assignment.\r\n\r\n#include \"Stack_Expr_Command_Factory.h\"\r\n\r\nStack_Expr_Command_Factory::Stack_Expr_Command_Factory(Stack <int> stack):\r\nstack_(stack)\r\n{\r\n\t//default constructor\r\n}\r\n\r\n\r\nStack_Expr_Command_Factory:: ~Stack_Expr_Command_Factory(void)\r\n{\r\n\t//destructor\r\n}\r\n\r\nint Stack_Expr_Command_Factory:: answer (void)\r\n{\r\n\treturn stack_.top();\r\n}\r\n\r\nNum_Command * Stack_Expr_Command_Factory:: create_num_command (int num)\r\n{\r\n\t//return number command object\r\n\tNum_Command * Num_Object = new Num_Command(stack_,num);\r\n\treturn Num_Object;\r\n\t\r\n}\r\n\r\nAdd_Command * Stack_Expr_Command_Factory:: create_add_command (void)\r\n{\r\n\t//return add command object\r\n\tAdd_Command * Add_Object = new Add_Command(stack_);\r\n\treturn Add_Object;\r\n\t\r\n}\r\n\r\nSubtract_Command * Stack_Expr_Command_Factory::create_subtract_command (void)\r\n{\r\n\t//return subtract command object\r\n\tSubtract_Command * Subtract_Object = new Subtract_Command(stack_);\r\n\treturn Subtract_Object;\r\n\t\r\n}\r\n\r\nMultiply_Command * Stack_Expr_Command_Factory:: create_multiply_command (void)\r\n{\r\n\t//return multiply command object\r\n\tMultiply_Command * Multiply_Object = new Multiply_Command(stack_);\r\n\treturn Multiply_Object;\r\n\t\r\n}\r\n\r\nDivision_Command * Stack_Expr_Command_Factory::create_division_command (void)\r\n{\r\n\t//return division command object\r\n\tDivision_Command * Division_Object = new Division_Command(stack_);\r\n\treturn Division_Object;\r\n}\r\n\r\nModulus_Command * Stack_Expr_Command_Factory::create_modulus_command (void)\r\n{\r\n\t//return modulus command object\r\n\tModulus_Command * Modulus_Object = new Modulus_Command(stack_);\r\n\treturn Modulus_Object;\r\n\t\r\n}\r\n\r\nParenthesis_Command * Stack_Expr_Command_Factory::create_parenthesis_command(void)\r\n{\r\n\t//return parenthesis command object\r\n\tParenthesis_Command * Parenthesis_Object = new Parenthesis_Command(stack_);\r\n\treturn Parenthesis_Object;\r\n}" }, { "alpha_fraction": 0.5932802557945251, "alphanum_fraction": 0.598585307598114, "avg_line_length": 21.12244987487793, "blob_id": "4892c81085c262067ab934e220b6053574386b9a", "content_id": "372bbd223e0b174b7dbc8601ad237d1341431e52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1131, "license_type": "no_license", "max_line_length": 60, "num_lines": 49, "path": "/Year 2/Assignment #3/Player.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n\r\n#include \"Player.h\"\r\n\r\n\r\nPlayer::Player() {\r\n\r\n}\r\n//showPlayer function\r\nvoid Player::showPlayer()\r\n{\r\n\t//prints player in this format\r\n\tstd::cout << this->jerseyNum;\r\n\tstd::cout << (\")\" + this->fName + \" \" + this->lName + \" \");\r\n\tstd::cout << std::endl;\r\n\r\n}\r\n//crate addPlayer function\r\nvoid Player::addPlayer()\r\n{\r\n\t\t// ask user to enter attributes for Player being created\r\n\t\tstd::cout << \"Please enter a first name: \";\r\n\t\tstd::cin >> this->fName;\r\n\t\tstd::cout << std::endl;\r\n\t\tstd::cout << \"Please enter a last name: \";\r\n\t\tstd::cin >> this->lName;\r\n\t\tstd::cout << std::endl;\r\n\t\tstd::cout << \"Please enter a number (1-99)\";\r\n\t\tstd::cin >>this->jerseyNum;\r\n\t}\r\n\r\n\r\n//create printMenu function\r\nvoid Player::printMenu()\r\n{\r\n\t//show user what options they have in this program\r\n\tstd::cout << \"1) Add New Player\" << std::endl;\r\n\tstd::cout << \"2) View Player(s)\" << std::endl;\r\n\tstd::cout << \"3) End Program\" << std::endl;\r\n\tstd::cout << std::endl;\r\n\tstd::cout << \"Please enter your selection: \";\r\n}" }, { "alpha_fraction": 0.6570512652397156, "alphanum_fraction": 0.6602563858032227, "avg_line_length": 18.19354820251465, "blob_id": "be9ff54dce639c05132589d1bd56ef80236b814e", "content_id": "55423e0f9a346305054043795d6d12134c5e159e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 624, "license_type": "no_license", "max_line_length": 59, "num_lines": 31, "path": "/Year 3/Assignment3/Binary_Op_Command.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Expr_Command.h\"\r\n#ifndef _BINARY_OP_COMMAND_H\r\n#define _BINARY_OP_COMMAND_H\r\n\r\nclass Binary_Op_Command : public Expr_Command {\r\n\r\n\r\npublic:\r\n\tBinary_Op_Command(Stack <int> & s);\r\n\t\r\n\t~Binary_Op_Command(void);\r\n\t\r\n\t//algorithm used to operate on the numbers using the stack\r\n\tvirtual void execute (void);\r\n\t\r\n\t//evaluation of ints based on type of operator\r\n\tvirtual int evaluate (int, int ) const = 0;\r\n\t\r\n\t//precedence object\r\n\tvirtual int prec(void)const = 0;\r\n\t\r\nprivate:\r\n\tStack <int> & s_;\r\n\t\r\n};\r\n\r\n#endif" }, { "alpha_fraction": 0.4795607030391693, "alphanum_fraction": 0.5302013158798218, "avg_line_length": 31.780000686645508, "blob_id": "b172156f2ebedafa976cd541e08388f8b52bb109", "content_id": "785942de3362954de6e687022f51c0ac6738e3aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1639, "license_type": "no_license", "max_line_length": 86, "num_lines": 50, "path": "/Master Year 2/Operating Systems/HW1/hw1/hw1.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\n Do NOT modify this file.\n*/\n#include \"hw1.h\"\n\nint main()\n{\n struct StructA structA = {41, 42.43, \"cats\"};\n struct StructB structB = {\"pear\", 51, 52.53};\n struct StructC structC = {62.63, \"dogs\", 61};\n\n printf(\"structA = {%d, %f, %s}\\n\", structA.x, structA.y, structA.z);\n printf(\"structB = {%s, %d, %f}\\n\", structB.x, structB.y, structB.z);\n printf(\"structC = {%f, %s, %d}\\n\", structC.x, structC.y, structC.z);\n\n struct StructB structA2B = convertAtoB(structA);\n struct StructC *structA2C = convertAtoC(structA);\n struct StructC structB2C;\n convertBtoC(&structB, &structB2C);\n struct StructA *structC2A;\n convertCtoA(structC, &structC2A);\n\n printf(\"structA2B = {%s, %d, %f}\\n\", structA2B.x, structA2B.y, structA2B.z);\n printf(\"structA2C -> {%f, %s, %d}\\n\", structA2C->x, structA2C->y, structA2C->z);\n printf(\"structB2C = {%f, %s, %d}\\n\", structB2C.x, structB2C.y, structB2C.z);\n printf(\"structC2A -> {%d, %f, %s}\\n\", structC2A->x, structC2A->y, structC2A->z);\n\n free(structA2C);\n free(structC2A);\n\n struct StructA arrayA[] = {{1, 11.12, \"abcd\"},\n {2, 12.13, \"efgh\"},\n {3, 13.14, \"ijkl\"},\n {4, 14.15, \"mnop\"}};\n struct StructB arrayB[] = {{\"qrst\", 5, 15.16},\n {\"uvwx\", 6, 16.17},\n {\"yz@#\", 7, 17.18}};\n\n struct StructC * arrayC;\n bundleIntoC(arrayA, 4, arrayB, 3, &arrayC);\n\n for (int i = 0; i < 7; ++i)\n {\n printf(\"arrayC[%d] = {%f, %s, %d}\\n\", i, arrayC[i].x, arrayC[i].y, arrayC[i].z);\n }\n\n free(arrayC);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.624078631401062, "alphanum_fraction": 0.624078631401062, "avg_line_length": 14.359999656677246, "blob_id": "a3dbd3550960b89ecd9cc577b648940015a2f97b", "content_id": "2903b69dbe7d9ee7266553289dbba7a192e971e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 407, "license_type": "no_license", "max_line_length": 59, "num_lines": 25, "path": "/Year 3/Assignment 4/Unary_Expr_Node.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n#include \"Unary_Expr_Node.h\"\r\nUnary_Expr_Node::Unary_Expr_Node(void)\r\n{\r\n\t//default constructor\r\n}\r\nUnary_Expr_Node::~Unary_Expr_Node(void)\r\n {\r\n\t//destructor\r\n }\r\n\t\t\r\nUnary_Expr_Node::int eval(void)\r\n{\r\n\t//unary has only one child\r\n\tif(this->child_)\r\n\t{\r\n\t\treturn this->child_->eval();\r\n\t}\r\n}" }, { "alpha_fraction": 0.5514242649078369, "alphanum_fraction": 0.5637761354446411, "avg_line_length": 21.963747024536133, "blob_id": "1da3dd85396e108d8932e45bb672c3049874eadb", "content_id": "340854ddc57f032ae2ae85d55e99e5f9b412b614", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7934, "license_type": "no_license", "max_line_length": 224, "num_lines": 331, "path": "/Year 2/Project 1/Tour.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n* Tour.cpp\r\n*\r\n* Created on: Feb 6, 2018\r\n* Author: Cellus\r\n*/\r\n#include <ctime>\r\n#include <stdlib.h>\r\n#include <string>\r\n#include<iostream>\r\n#include \"Tour.h\"\r\n#include \"Stack.h\"\r\n#include \"LinkedList.h\"\r\n\r\n\r\nusing namespace std;\r\n\r\n\r\n//define DIMENSION to use for \"8 by 8\" board\r\n#define DIMENSION 8\r\n\r\n/*Board*/ int board[DIMENSION*DIMENSION];\r\n\r\n//created stack and linked list objects to store to use methods to store the necessary information\r\nLinkedList list;\r\nStack stack;\r\n//variables\r\n\r\n//state is the number that represents the order in which the knight moved, set x and y to 8 for 8 by 8 array\r\nint moveNum = 0;\r\n//int knight[DIMENSION][DIMENSION];\r\nstring decision;\r\n\r\n//create constant ints arrays to use for knight moves\r\nstatic int moveX[8] = { 1,1,2,2,-1,-1,-2,-2 };\r\nstatic int moveY[8] = { 2,-2,1,-1,2,-2,1,-1 };\r\n\r\n\r\n//function keeps knight within the board\r\nbool Tour:: limit( int x, int y)\r\n{\r\n\treturn ((x >= 0 && y >= 0) && (x < DIMENSION && y < DIMENSION));\r\n}\r\n\r\n// check if square is empty and hasn't been moved to yet\r\nbool Tour::isEmpty(int knight[], int x, int y)\r\n{\r\n\treturn (limit(x, y)) && (knight[y*DIMENSION + x] < 0);\r\n}\r\n\r\n//returns squares(knight not moved to yet)next to coordinates\r\nint Tour::emptyNextTo(int knight[], int x, int y)\r\n {\r\n\tint numOf = 0;\r\n\tfor (int i = 0; i < 8; i++)\r\n\t\t\tif (isEmpty(knight,(x + moveX[i]), (y + moveY[i])))\r\n\t\t\tnumOf++;\r\n\r\n\treturn numOf;\r\n\r\n}\r\n\r\n//FUNCTIONS TO IMPLEMENT\r\n//...\r\n//Warnsdoff nextMovev() function for algorithm\r\nbool Tour::nextMove(int w[], int *x, int *y)\r\n{\r\n\tint min_deg_idx = -1;\r\n\tint counter;\r\n\tint min_deg = (DIMENSION + 1);\r\n\tint newX;\r\n\tint newY;\r\n\r\n\t//Try all adjacent squares adjacent to knights position from the spot.\r\n\tint begin = rand() % DIMENSION;\r\n\tfor (int count = 0; count < DIMENSION; ++count)\r\n\t{\r\n\t\tint i = (begin + count) % DIMENSION;\r\n\t\tnewX = *x + moveX[i];\r\n\t\tnewY = *y + moveY[i];\r\n\t\tif ((isEmpty(w,newX, newY)) &&\r\n\t\t\t(counter = emptyNextTo(w, newX, newY)) < min_deg)\r\n\t\t{\r\n\t\t\tmin_deg_idx = i;\r\n\t\t\tmin_deg = counter;\r\n\t\t}\r\n\t}\r\n\r\n\t//if we can find next spot\r\n\tif (min_deg_idx == -1)\r\n\t\treturn false;\r\n\r\n\t// Store coordinates of the next spot on board\r\n\tnewX = *x + moveX[min_deg_idx];\r\n\tnewY = *y + moveY[min_deg_idx];\r\n\r\n\t//Have the board array hold that position, then push the board to the stack as an element \r\n\tboard[DIMENSION*DIMENSION] = w[newY*DIMENSION + newX];\r\n\tstack.push(board);\r\n\tw[newY*DIMENSION + newX] = moveNum;\t\t\t\t\t\t\t\t\t // mark next spot in order\r\n\r\n\t// Update next point\r\n\t*x = newX;\r\n\t*y = newY;\r\n\tmoveNum++;\r\n\treturn true;\r\n}\r\n\r\n\r\n \r\n\r\n\r\n\r\n//Function that does the warnsdoff algorithm for first 32 moves\r\nbool Tour::warnsdoffFunct(int knight[], int x , int y)\r\n{\r\n\t//use a warnsdoff algorithm function to keep picking next spots to move to\r\n\tfor (int b = 0; b < (DIMENSION*DIMENSION)/2; b++)\r\n\t{\r\n\t\tif (nextMove(knight, &x, &y) == 0)\r\n\t\t{\r\n\t\t\treturn false;\r\n\t\t}\r\n\t\t\r\n\t}\r\n\t\r\n}\r\n\r\n\r\n//Function that runs the whole tour along with other associated operations\r\nbool Tour:: tourFunct()\r\n{\r\n\t\r\n\t\r\n\tint knight[DIMENSION*DIMENSION];//int knight[DIMENSION*DIMENSION];\r\n\tfor (int i = 0; i < DIMENSION; i++)\r\n\t\tfor (int j = 0; j < DIMENSION; j++)\r\n\t\t\tknight[i*DIMENSION + j] = -1;\r\n\r\n\r\n\t//initial x and intitial y positions\r\n\tint initialX = 0;\r\n\tint initialY = 0;\r\n\r\n\t//random intial position to begin with\r\n\t//int randomX = rand() % DIMENSION;\r\n\t//int randomY = rand() % DIMENSION;\r\n\r\n\t//positions x and y will hold random positions\r\n\tint x;//= randomX;\r\n\tint y;//= randomY;\r\n\t//IMPORTANT\r\n\t//Ask user for starting positions from the user\r\n\t//These will be stored in the link lists\r\n\t//Use each of the stored coordinates for multiple runs\r\n\t//of the program\r\n\r\n\r\n\t\r\n\tdecision = \"Y\";\r\n\t//ask user for starting positions\r\n\t\r\n\twhile (decision == \"Y\")\r\n\t{\r\n\t\t//ask for row again\r\n\t\tcout << \"Enter row: \";\r\n\t\tcin >> initialX;\r\n\t\tif (initialX >= 0 && initialY < DIMENSION)\r\n\t\t{\r\n\t\t\tx = initialX;\r\n\t\t}\r\n\t\t//if x is not a legal number for the board, ask user to enter it again\r\n\t\telse {\r\n\t\t\tcout << \"Please use a number from 0 to 7. Enter row again: \";\r\n\t\t\tcin >> initialX;\r\n\t\t}\r\n\r\n\t\tcout << \"Okay choose the number for the colulmn: \";\r\n\t\tcin >> initialY;\r\n\t\t//Ask for Column\r\n\t\tif (initialY >= 0 && initialY < DIMENSION)\r\n\t\t{\r\n\t\t\ty = initialY;\r\n\t\t}\r\n\t\t//if y is not a legal number for baord, ask user to enter it again\r\n\t\telse {\r\n\r\n\t\t\tcout << \"Please use a number from 0 to 7. Enter again: \";\r\n\t\t\tcin >> initialY;\r\n\t\t}\r\n\r\n\t\t//store these in nodes\r\n\r\n\t\tlist.addNode(x,y);\r\n\r\n\t\tcout << \"Would you like to add another node to the linked list? 'Y' for yes, 'N' for no: \";\r\n\t\tcin >> decision;\r\n\t}\r\n\t//display the node\r\n\tlist.displayList();\r\n\t//delete node to pop off from list and use for the tour\r\n\t\r\n\tknight[y*DIMENSION + x] = moveNum;\r\n\t\r\n\tmoveNum++;\r\n\r\n\t\r\n\t\t//call warnsdoffFunction to execute first 32 moves\r\n\t\twarnsdoffFunct(knight, x, y);\r\n\r\n\t\t\r\n\r\n\t\t//while state is greater than 32, do exhaustive search(backtracking) for next spot on board \r\n\t\t\r\n\t\twhile (moveNum >= 32 && moveNum < 64)\r\n\t\t{\r\n\t\t\r\n\t\t\tif (isEmpty(knight, x - 2, y + 1))\r\n\t\t\t{\r\n\t\t\t\t//put the state of the coordinates on the board\r\n\t\t\t\tx -= 2;\r\n\t\t\t\ty += 1;\r\n\t\t\t\tboard[DIMENSION*DIMENSION] = knight[y*DIMENSION + x];\r\n\t\t\t\t//push knight to stack\r\n\t\t\t\tstack.push(board);\r\n\t\t\t\tknight[y*DIMENSION + x]= moveNum;\r\n\t\t\t\tmoveNum++;\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t\tif (isEmpty(knight, x - 1, y + 2))\r\n\t\t\t\t{\r\n\t\t\t\t\tx -= 1;\r\n\t\t\t\t\ty += 2;\r\n\t\t\t\t\tboard[DIMENSION*DIMENSION] = knight[y*DIMENSION + x];\r\n\t\t\t\t\t//push knight to stack\r\n\t\t\t\t\tstack.push(board);\r\n\t\t\t\t\tknight[y*DIMENSION + x]= moveNum;\r\n\t\t\t\t\tmoveNum++;\r\n\r\n\r\n\t\t\t\t}\r\n\t\t\t\telse\r\n\t\t\t\t\tif (isEmpty(knight, x + 1, y + 2))\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tx += 1;\r\n\t\t\t\t\t\ty += 2;\r\n\t\t\t\t\t\tboard[DIMENSION*DIMENSION] = knight[y*DIMENSION + x];\r\n\t\t\t\t\t//push knight to stack\r\n\t\t\t\t\tstack.push(board);\r\n\t\t\t\t\tknight[y*DIMENSION + x]= moveNum;\r\n\t\t\t\t\tmoveNum++;\r\n\t\t\t\t\t}\r\n\t\t\t\t\telse\r\n\t\t\t\t\t\tif (isEmpty(knight,x + 2, y + 1))\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\tx += 2;\r\n\t\t\t\t\t\t\ty += 1;\r\n\t\t\t\t\t\t\tboard[DIMENSION*DIMENSION] = knight[y*DIMENSION + x];\r\n\t\t\t\t\t//push knight to stack\r\n\t\t\t\t\tstack.push(board);\r\n\t\t\t\t\tknight[y*DIMENSION + x]= moveNum;\r\n\t\t\t\t\tmoveNum++;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse\r\n\t\t\t\t\t\t\tif (isEmpty(knight, x + 2, y - 1))\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tx += 2;\r\n\t\t\t\t\t\t\t\ty -= 1;\r\n\t\t\t\t\t\t\t\tboard[DIMENSION*DIMENSION] = knight[y*DIMENSION + x];\r\n\t\t\t\t\t//push knight to stack\r\n\t\t\t\t\tstack.push(board);\r\n\t\t\t\t\tknight[y*DIMENSION + x]= moveNum;\r\n\t\t\t\t\tmoveNum++;\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\telse\r\n\t\t\t\t\t\t\t\tif (isEmpty(knight, x + 1, y - 2))\r\n\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\tx += 1;\r\n\t\t\t\t\t\t\t\t\ty -= 2;\r\n\t\t\t\t\t\t\t\t\tboard[DIMENSION*DIMENSION] = knight[y*DIMENSION + x];\r\n\t\t\t\t\t\t\t\t//push knight to stack\r\n\t\t\t\t\t\t\t\tstack.push(board);\r\n\t\t\t\t\t\t\t\tknight[y*DIMENSION + x]= moveNum;\r\n\t\t\t\t\t\t\t\tmoveNum++;\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\telse\r\n\t\t\t\t\t\t\t\t\tif (isEmpty(knight, x - 1, y - 2))\r\n\t\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\t\tx -= 1;\r\n\t\t\t\t\t\t\t\t\t\ty -= 2;\r\n\t\t\t\t\t\t\t\t\t\tboard[DIMENSION*DIMENSION] = knight[y*DIMENSION + x];\r\n\t\t\t\t\t\t\t\t\t//push knight to stack\r\n\t\t\t\t\t\t\t\t\tstack.push(board);\r\n\t\t\t\t\t\t\t\t\tknight[y*DIMENSION + x]= moveNum;\r\n\t\t\t\t\t\t\t\t\tmoveNum++;\r\n\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\telse\r\n\t\t\t\t\t\t\t\t\t\tif (isEmpty(knight, x - 2, y - 1))\r\n\t\t\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\t\t\tx -= 2;\r\n\t\t\t\t\t\t\t\t\t\t\ty -= 1;\r\n\t\t\t\t\t\t\t\t\t\t\tboard[DIMENSION*DIMENSION] = knight[y*DIMENSION + x];\r\n\t\t\t\t\t\t\t\t\t\t\t//push knight to stack\r\n\t\t\t\t\t\t\t\t\t\t\tstack.push(board);\r\n\t\t\t\t\t\t\t\t\t\t\tknight[y*DIMENSION + x]= moveNum;\r\n\t\t\t\t\t\t\t\t\t\t\tmoveNum++;\r\n\t\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\t\telse\r\n\t\t\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\t\t\t//back track\r\n\t\t\t\t\t\t\t\t\t\t\t//Couldn't figure out intital back track: but process would be popping the previous state of the board (the whole array) from stack element and storing it as current board state, then you would use different move\r\n\t\t\t\t\t\t\t\t\t\t\t//knight[DIMENSION*DIMENSION]= stack.pop();\r\n\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t\t\t\tmoveNum--;\r\n\t\t\t\t\t\t\t\t\t\t}\r\n\r\n\r\n\r\n\r\n\t\t}\r\n\t\t\r\n\t\tfor (int i = 0; i < (DIMENSION); i++)\r\n\t\t\tfor (int j = 0; j < DIMENSION; j++)\r\n\t\t\t\tcout << knight[i*DIMENSION + j] << \" \";\r\n\t\t\t\t\r\n\r\n\r\n\t\treturn true;\r\n\t\t\r\n\t\t\r\n}\r\n\r\n" }, { "alpha_fraction": 0.5394617319107056, "alphanum_fraction": 0.5506501197814941, "avg_line_length": 29.798076629638672, "blob_id": "ca4752adae8451648d03d66445121f5ef82ddcc7", "content_id": "ad070f601fc6ddbdf372d7a1d9af6cf1a58ef645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3307, "license_type": "no_license", "max_line_length": 90, "num_lines": 104, "path": "/Master Year 1/Computer Graphics/HW2/renderer/models/ParametricCurve.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\nimport java.util.function.DoubleFunction;\r\nimport java.util.function.ToDoubleFunction; // could use this instead\r\nimport java.util.function.DoubleUnaryOperator; // could use this instead\r\n//https://docs.oracle.com/javase/8/docs/api/java/util/function/package-summary.html\r\n\r\n/**\r\n Create a wireframe model of a parametric curve in space.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Parametric_equation\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Parametric_equation</a>\r\n\r\n @see ParametricSurface\r\n*/\r\npublic class ParametricCurve extends Model\r\n{\r\n /**\r\n Create a trefoil knot as a parametric curve in space.\r\n <p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Trefoil_knot#Descriptions\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Trefoil_knot#Descriptions</a>\r\n */\r\n public ParametricCurve()\r\n {\r\n this(t -> 0.5*Math.sin(t) + Math.sin(2*t),\r\n t -> 0.5*Math.cos(t) - Math.cos(2*t),\r\n t -> -0.5*Math.sin(3*t),\r\n 0, 2*Math.PI, 60);\r\n }\r\n\r\n\r\n /**\r\n Create a parametric curve in the xy-plane,\r\n <pre>{@code\r\n x = x(t)\r\n y = y(t)\r\n }</pre>\r\n with the parameter {@code t} having the given parameter\r\n range and the given number of line segments.\r\n\r\n @param x component function in the x-direction\r\n @param y component function in the y-direction\r\n @param t1 beginning value of parameter range\r\n @param t2 ending value of parameter range\r\n @param n number of line segments in the curve\r\n */\r\n public ParametricCurve(final DoubleFunction<Double> x,\r\n final DoubleFunction<Double> y,\r\n final double t1, final double t2,\r\n final int n)\r\n {\r\n this(x, y, t->0.0, t1, t2, n);\r\n }\r\n\r\n\r\n /**\r\n Create a parametric curve in space,\r\n <pre>{@code\r\n x = x(t)\r\n y = y(t)\r\n z = z(t)\r\n }</pre>\r\n with the parameter {@code t} having the given parameter\r\n range and the given number of line segments.\r\n\r\n @param x component function in the x-direction\r\n @param y component function in the y-direction\r\n @param z component function in the z-direction\r\n @param t1 beginning value of parameter range\r\n @param t2 ending value of parameter range\r\n @param n number of line segments in the curve\r\n */\r\n public ParametricCurve(final DoubleFunction<Double> x,\r\n final DoubleFunction<Double> y,\r\n final DoubleFunction<Double> z,\r\n final double t1, final double t2,\r\n int n)\r\n {\r\n super(\"Parametric Curve\");\r\n\r\n if (n < 1) n = 1;\r\n\r\n // Create the curve's geometry.\r\n final double deltaT = (t2 - t1) / n;\r\n\r\n for (int i = 0; i < n + 1; ++i)\r\n {\r\n addVertex( new Vertex( x.apply(t1 + i * deltaT),\r\n y.apply(t1 + i * deltaT),\r\n z.apply(t1 + i * deltaT) ) );\r\n }\r\n\r\n for (int i = 0; i < n; ++i)\r\n {\r\n addLineSegment(new LineSegment(i, i+1));\r\n }\r\n }\r\n}//ParametricCurve\r\n" }, { "alpha_fraction": 0.46536508202552795, "alphanum_fraction": 0.4771329164505005, "avg_line_length": 28.15322494506836, "blob_id": "ee5cbcc6601f340b9f7e33680e6404614a1da415", "content_id": "73ada7c1d3d0936c9bf01b966dcf3f745a5ff709", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3739, "license_type": "no_license", "max_line_length": 80, "num_lines": 124, "path": "/Master Year 1/Computer Graphics/HW3/renderer/models/Disk.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a disk\r\n in the xy-plane centered at the origin.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Disk_(mathematics)\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Disk_(mathematics)</a>\r\n\r\n @see DiskSector\r\n*/\r\npublic class Disk extends Model\r\n{\r\n /**\r\n Create a disk in the xy-plane with radius 1,\r\n with 12 spokes coming out of the center, and\r\n with 6 concentric circles around the disk.\r\n */\r\n public Disk( )\r\n {\r\n this(1, 6, 12);\r\n }\r\n\r\n\r\n /**\r\n Create a disk in the xy-plane with radius\r\n {@code r}, with 12 spokes coming out of the\r\n center, and with 6 concentric circles around\r\n the disk.\r\n\r\n @param r radius of the disk\r\n */\r\n public Disk(final double r)\r\n {\r\n this(r, 6, 12);\r\n }\r\n\r\n\r\n /**\r\n Create a disk in the xy-plane with radius\r\n {@code r}, with {@code k} spokes coming out\r\n of the center, and with {@code n} concentric\r\n circles around the disk.\r\n <p>\r\n If there are {@code k} spokes, then each circle around the\r\n center will have {@code k} line segments.\r\n If there are {@code n} concentric circles around the\r\n center, then each spoke will have {@code n} line segments.\r\n <p>\r\n There must be at least three spokes and at least\r\n one concentric circle.\r\n\r\n @param r radius of the disk\r\n @param n number of concentric circles\r\n @param k number of spokes in the disk\r\n */\r\n public Disk(final double r, int n, int k)\r\n {\r\n super(\"Disk\");\r\n\r\n if (n < 1) n = 1;\r\n if (k < 3) k = 3;\r\n\r\n // Create the disk's geometry.\r\n\r\n final double deltaR = r / n;\r\n final double deltaTheta = 2 * Math.PI / k;\r\n\r\n // An array of vertices to be used to create line segments.\r\n final Vertex[][] v = new Vertex[n][k];\r\n\r\n // Create all the vertices.\r\n for (int j = 0; j < k; ++j) // choose a spoke (an angle)\r\n {\r\n final double c = Math.cos(j * deltaTheta);\r\n final double s = Math.sin(j * deltaTheta);\r\n for (int i = 0; i < n; ++i) // move along the spoke\r\n {\r\n final double ri = (i + 1) * deltaR;\r\n v[i][j] = new Vertex( ri * c,\r\n ri * s,\r\n 0 );\r\n }\r\n }\r\n final Vertex center = new Vertex(0,0,0);\r\n\r\n // Add all of the vertices to this model.\r\n for (int i = 0; i < n; ++i)\r\n {\r\n for (int j = 0; j < k; ++j)\r\n {\r\n addVertex( v[i][j] );\r\n }\r\n }\r\n addVertex( center );\r\n final int centerIndex = n * k;\r\n\r\n // Create the spokes connecting the center to the outer circle.\r\n for (int j = 0; j < k; ++j) // choose a spoke\r\n { // v[0][j]\r\n addLineSegment(new LineSegment( centerIndex, (0 * k) + j ));\r\n for (int i = 0; i < n - 1; ++i)\r\n { // v[i][j] v[i+1][j]\r\n addLineSegment(new LineSegment( (i * k) + j, ((i+1) * k) + j ));\r\n }\r\n }\r\n\r\n // Create the line segments around each concentric circle.\r\n for (int i = 0; i < n; ++i) // choose a circle\r\n {\r\n for (int j = 0; j < k - 1; ++j)\r\n { // v[i][j] v[i][j+1]\r\n addLineSegment(new LineSegment( (i * k) + j, (i * k) + (j + 1) ));\r\n }\r\n // close the circle\r\n addLineSegment(new LineSegment( (i * k) + (k-1), (i * k) + 0 ));\r\n } // v[i][k-1] v[i][0]\r\n }\r\n}//Disk\r\n" }, { "alpha_fraction": 0.4316546618938446, "alphanum_fraction": 0.43309351801872253, "avg_line_length": 20.419355392456055, "blob_id": "084da62de49eface747a763eac120b2ad3babeaf", "content_id": "e681841dcd20a5101873d4fda735cff513a743aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 695, "license_type": "no_license", "max_line_length": 58, "num_lines": 31, "path": "/Master Year 2/Operating Systems/HW3/hw3/filters/remove_vowels.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program reads lines from standard input, removes\r\n all the vowels, and writes the rest to standard output.\r\n\r\n When using Windows, if standard input is the console\r\n keyboard, use ^z (Control-z) to denote the end of file\r\n (and you must use ^z at the beginning of a line!).\r\n*/\r\n#include <stdio.h>\r\n\r\nint main()\r\n{\r\n char c;\r\n while ( (c = getchar()) != EOF )\r\n {\r\n if ( c != 'a'\r\n && c != 'e'\r\n && c != 'i'\r\n && c != 'o'\r\n && c != 'u'\r\n && c != 'A'\r\n && c != 'E'\r\n && c != 'I'\r\n && c != 'O'\r\n && c != 'U' )\r\n {\r\n printf(\"%c\", c); // echo the non-vowels\r\n }\r\n }\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.5599037408828735, "alphanum_fraction": 0.5708099603652954, "avg_line_length": 34.893489837646484, "blob_id": "d986e718449fe4255ed574500771a02de3e66bdd", "content_id": "530d4d5a4aaa0a65ee87837b2129bc7cad318acb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6235, "license_type": "no_license", "max_line_length": 77, "num_lines": 169, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/ConeSector.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a partial right circular cone with its\r\n base parallel to the xz-plane and its apex on the positive y-axis.\r\n<p>\r\n By a partial cone we mean a cone over a circular sector of the\r\n cone's base and also cutting off the top part of the cone (the\r\n part between the apex and a circle of latitude) leaving a frustum\r\n of the (partial) cone.\r\n\r\n @see Cone\r\n @see ConeFrustum\r\n*/\r\npublic class ConeSector extends Model\r\n{\r\n /**\r\n Create half of a right circular cone with its base in the xz-plane,\r\n a base radius of 1, height 1, and apex on the positive y-axis.\r\n */\r\n public ConeSector( )\r\n {\r\n this(1, 1, Math.PI/2, 3*Math.PI/2, 15, 8);\r\n }\r\n\r\n\r\n /**\r\n Create a part of the cone with its base in the xz-plane,\r\n a base radius of {@code r}, height {@code h}, and apex\r\n on the y-axis.\r\n <p>\r\n If {@code theta1 > 0} or {@code theta2 < 2pi},then the partial\r\n cone is a cone over the circular sector from angle {@code theta1}\r\n to angle {@code theta2}. In other words, the (partial) circles of\r\n latitude in the model extend from angle {@code theta1} to angle\r\n {@code theta2}.\r\n <p>\r\n The last two parameters determine the number of lines of longitude\r\n and the number of (partial) circles of latitude in the model.\r\n <p>\r\n If there are {@code n} circles of latitude in the model (including\r\n the bottom edge), then each line of longitude will have {@code n}\r\n line segments. If there are {@code k} lines of longitude, then each\r\n (partial) circle of latitude will have {@code k-1} line segments.\r\n <p>\r\n There must be at least four lines of longitude and at least\r\n one circle of latitude.\r\n\r\n @param r radius of the base in the xz-plane\r\n @param h height of the apex on the y-axis\r\n @param theta1 beginning longitude angle of the sector\r\n @param theta2 ending longitude angle of the sector\r\n @param n number of circles of latitude around the cone\r\n @param k number of lines of longitude\r\n */\r\n public ConeSector(double r,\r\n double h,\r\n double theta1, double theta2,\r\n int n, int k)\r\n {\r\n this(r, h, h, theta1, theta2, n+1, k);\r\n }\r\n\r\n\r\n /**\r\n Create a part of the cone with its base in the xz-plane,\r\n a base radius of {@code r}, height {@code h}, and apex\r\n on the y-axis.\r\n <p>\r\n If {@code 0 < t < h}, then the partial cone is a frustum\r\n with its base in the xz-plane and the top of the frustum at\r\n {@code y = t}.\r\n <p>\r\n If {@code theta1 > 0} or {@code theta2 < 2pi},then the partial\r\n cone is a cone over the circular sector from angle {@code theta1}\r\n to angle {@code theta2}. In other words, the (partial) circles of\r\n latitude in the model extend from angle {@code theta1} to angle\r\n {@code theta2}.\r\n <p>\r\n The last two parameters determine the number of lines of longitude\r\n (not counting one edge of any removed sector) and the number of\r\n (partial) circles of latitude (not counting the top edge of the\r\n frustum) in the model.\r\n <p>\r\n If there are {@code n} circles of latitude in the model (including\r\n the bottom edge but not the top edge of the frustum), then each\r\n line of longitude will have {@code n+1} line segments. If there are\r\n {@code k} lines of longitude (not counting one edge of any removed\r\n sector), then each (partial) circle of latitude will have {@code k}\r\n line segments.\r\n <p>\r\n There must be at least four lines of longitude and at least\r\n two circles of latitude.\r\n\r\n @param r radius of the base in the xz-plane\r\n @param h height of the apex on the y-axis\r\n @param t top of the frustum of the come\r\n @param theta1 beginning longitude angle of the sector\r\n @param theta2 ending longitude angle of the sector\r\n @param n number of circles of latitude around the cone\r\n @param k number of lines of longitude\r\n */\r\n public ConeSector(double r,\r\n double h,\r\n double t,\r\n double theta1, double theta2,\r\n int n, int k)\r\n {\r\n super(\"Cone Sector\");\r\n\r\n if (n < 2) n = 2;\r\n if (k < 4) k = 4;\r\n if (t > h) t = h;\r\n\r\n // Create the cone's geometry.\r\n\r\n double deltaH = h / (n - 1);\r\n double deltaTheta = (theta2 - theta1) / (k - 1);\r\n\r\n // An array of indexes to be used to create line segments.\r\n int[][] indexes = new int[n][k];\r\n\r\n // Create all the vertices.\r\n int index = 0;\r\n for (int j = 0; j < k; ++j) // choose an angle of longitude\r\n {\r\n double c = Math.cos(theta1 + j * deltaTheta);\r\n double s = Math.sin(theta1 + j * deltaTheta);\r\n for (int i = 0; i < n; ++i) // choose a circle of latitude\r\n {\r\n double slantRadius = r * (1 - i * deltaH / h);\r\n addVertex( new Vertex(slantRadius * c,\r\n i * deltaH,\r\n slantRadius * s) );\r\n indexes[i][j] = index++;\r\n }\r\n }\r\n addVertex( new Vertex(0, h, 0) ); // apex\r\n int apexIndex = index++;\r\n addVertex( new Vertex(0, 0, 0) ); // bottom center\r\n int bottomCenterIndex = index++;\r\n\r\n // Create the horizontal (partial) circles of latitude around the cone.\r\n for (int i = 0; i < n; ++i)\r\n {\r\n for (int j = 0; j < k - 1; ++j)\r\n {\r\n addLineSegment(new LineSegment(indexes[i][j], indexes[i][j+1]));\r\n }\r\n }\r\n\r\n // Create the slanted lines of longitude from the base to the\r\n // top circle of latitude, and the triangle fan in the base.\r\n for (int j = 0; j < k; ++j)\r\n {\r\n addLineSegment(new LineSegment(bottomCenterIndex, indexes[0][j]));\r\n\r\n for (int i = 0; i < n - 1; ++i)\r\n {\r\n addLineSegment(new LineSegment(indexes[i][j], indexes[i+1][j]));\r\n }\r\n }\r\n }\r\n}//ConeSector\r\n" }, { "alpha_fraction": 0.6388616561889648, "alphanum_fraction": 0.6395158767700195, "avg_line_length": 28.878787994384766, "blob_id": "fc558b66468263dbd6a8d6195e9839434b85dd6d", "content_id": "d3336876e1979d279be5cdff4e584c0df0d378c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3057, "license_type": "no_license", "max_line_length": 103, "num_lines": 99, "path": "/Master Year 1/Computer Graphics/HW4/renderer/scene/Position.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.scene;\r\n\r\n\r\n/**\r\n A {@code Position} data structure represents a geometric object in a\r\n distinct position (both location and orientation) in three-dimensional\r\n space as part of a {@link Scene}.\r\n<p>\r\n A {@code Position} object holds references to a {@link Model} object\r\n and a {@link Matrix} object. The {@link Model} represents the geometric\r\n object in the {@link Scene}. The {@link Matrix} determines the model's\r\n location and orientation in the {@link Camera}'s view coordinate system.\r\n The {@code Position}'s matrix helps us solve the problem of placing\r\n and moving a model in a scene.\r\n<p>\r\n When the renderer renders this {@code Position}'s {@link Model} into\r\n a {@link renderer.framebuffer.FrameBuffer}, the first stage of the\r\n rendering pipeline, {@link renderer.pipeline.Model2View}, multiplies\r\n every {@link Vertex} in the {@link Model}'s vertex list by this\r\n {@code Position}'s {@link Matrix}, which converts the coordinates in\r\n each {@link Vertex} from the model's own local coordinate system to\r\n the {@link Camera}'s view coordinate system (which is \"shared\" by all\r\n the other models in the scene). This matrix multiplication has the effect\r\n of \"placing\" the model in view space at an appropriate location (using\r\n the translation part of the matrix) and in the appropriate orientation\r\n (using the rotation part of the matrix).\r\n*/\r\npublic class Position\r\n{\r\n public Model model;\r\n public Matrix matrix;\r\n\r\n /**\r\n Construct a default {@code Position} with the identity {@link Matrix}\r\n and no {@link Model} object.\r\n */\r\n public Position()\r\n {\r\n this.model = null;\r\n this.matrix = Matrix.identity(); // identity matrix\r\n }\r\n\r\n\r\n /**\r\n Construct a {@code Position} with the identity {@link Matrix}\r\n and the given {@link Model} object.\r\n\r\n @param model {@link Model} object to place at this {@code Position}\r\n */\r\n public Position(final Model model)\r\n {\r\n this.model = model;\r\n this.matrix = Matrix.identity(); // identity matrix\r\n }\r\n\r\n\r\n /**\r\n Set this {@code Position}'s {@link Model} object.\r\n\r\n @param model {@link Model} object to place at this {@code Position}\r\n */\r\n public void setModel(final Model model)\r\n {\r\n this.model = model;\r\n }\r\n\r\n\r\n /**\r\n Reset this {@code Position}'s {@link Matrix} to the identity matrix.\r\n\r\n @return a reference to this {@code Position}'s {@link Matrix} to facilitate chaining method calls\r\n */\r\n public Matrix matrix2Identity()\r\n {\r\n this.matrix = Matrix.identity();\r\n return this.matrix;\r\n }\r\n\r\n\r\n /**\r\n For debugging.\r\n\r\n @return {@link String} representation of this {@code Position} object\r\n */\r\n @Override\r\n public String toString()\r\n {\r\n String result = \"\";\r\n result += \"This Position's Matrix is\\n\";\r\n result += matrix;\r\n result += \"This Position's Model is\\n\";\r\n result += (null == model) ? \"null\\n\" : model;\r\n return result;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.34007808566093445, "alphanum_fraction": 0.3691870868206024, "avg_line_length": 33.212501525878906, "blob_id": "5864d935169941e0e5ca5e8467fb680aa9633a11", "content_id": "6a858e34ca727dbac3c94fbe840985afcff5e77e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2823, "license_type": "no_license", "max_line_length": 81, "num_lines": 80, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/Cube.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "WINDOWS-1252", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a cube with its center\r\n at the origin, having edge length 2, and with its\r\n vertices at {@code (±1, ±1, ±1)}.\r\n<p>\r\n Here is a picture showing how the cube's eight vertices\r\n are labeled.\r\n<pre>{@code\r\n v[4]\r\n +-----------------+ v[5]\r\n /| /|\r\n / | / |\r\n / | / |\r\n / | / |\r\n v[7] +-----------------+ v[6] |\r\n | | | | y\r\n | | | | |\r\n | | | | |\r\n | v[0] +---------|-------+ v[1] |\r\n | / | / |\r\n | / | / +----. x\r\n | / | / /\r\n |/ |/ /\r\n +-----------------+ /\r\n v[3] v[2] z\r\n}</pre>\r\n See <a href=\"http://en.wikipedia.org/wiki/Cube\" target=\"_top\">\r\n http://en.wikipedia.org/wiki/Cube</a>\r\n\r\n @see Tetrahedron\r\n @see Octahedron\r\n @see Icosahedron\r\n @see Dodecahedron\r\n*/\r\npublic class Cube extends Model\r\n{\r\n /**\r\n Create a cube with its center at the origin, having edge\r\n length 2, and with its vertices at {@code (±1, ±1, ±1)}.\r\n */\r\n public Cube( )\r\n {\r\n super(\"Cube\");\r\n\r\n // Create the cube's geometry.\r\n Vertex v0 = new Vertex(-1, -1, -1); // four vertices around the bottom face\r\n Vertex v1 = new Vertex( 1, -1, -1);\r\n Vertex v2 = new Vertex( 1, -1, 1);\r\n Vertex v3 = new Vertex(-1, -1, 1);\r\n Vertex v4 = new Vertex(-1, 1, -1); // four vertices around the top face\r\n Vertex v5 = new Vertex( 1, 1, -1);\r\n Vertex v6 = new Vertex( 1, 1, 1);\r\n Vertex v7 = new Vertex(-1, 1, 1);\r\n\r\n // Add the cube's vertices to the model.\r\n addVertex(v0, v1, v2, v3);\r\n addVertex(v4, v5, v6, v7);\r\n\r\n // Create 12 line segments.\r\n addLineSegment(new LineSegment(0, 1), // bottom face\r\n new LineSegment(1, 2),\r\n new LineSegment(2, 3),\r\n new LineSegment(3, 0),\r\n new LineSegment(4, 5), // top face\r\n new LineSegment(5, 6),\r\n new LineSegment(6, 7),\r\n new LineSegment(7, 4),\r\n new LineSegment(0, 4), // back face\r\n new LineSegment(1, 5),\r\n new LineSegment(2, 6), // front face\r\n new LineSegment(3, 7));\r\n }\r\n}//Cube\r\n" }, { "alpha_fraction": 0.6651270389556885, "alphanum_fraction": 0.6651270389556885, "avg_line_length": 19.625, "blob_id": "8c46ab57a9a2b383182297baf8b0ff1324cb83df", "content_id": "02b6be96da64a7e890222ae17e607949402db9fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 866, "license_type": "no_license", "max_line_length": 103, "num_lines": 40, "path": "/Year 3/Assignment 4/Binary_Expr_Node.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": " // Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n#include \"Binary_Expr_Node.h\"\r\nBinary_Expr_Node::Binary_Expr_Node(void):\r\nright_(nullptr),\r\nleft_(nullptr)\r\n{\r\n\t//default constructor\r\n\tthis->right_leaf = right_;\r\n\tthis->left_leaf = left_;\r\n}\r\nBinary_Expr_Node::~Binary_Expr_Node(void)\r\n{\r\n // COMMENT You are not deleting the child node here. So, you\r\n // have a memory leak.\r\n \r\n //RESPONSE: Delete the left and right children nodes\r\n \r\n\t//destructor\r\n\tdelete right_;\r\n\tdelete left_;\r\n}\r\n\r\nvoid Binary_Expr_Node::accept(Expr_Node_Visitor & v)\r\n{\r\n\t\r\n}\r\n\t\t\r\nint Binary_Expr_Node::eval(void)\r\n{\r\n\t//result holds the calculation of left and right evaluation and returns the operation on those numbers\r\n\tint result = this->calculate(left_leaf->eval(),right_leaf->eval());\r\n\t\r\n\treturn result;\r\n}\r\n" }, { "alpha_fraction": 0.6816608905792236, "alphanum_fraction": 0.6816608905792236, "avg_line_length": 17.266666412353516, "blob_id": "6c30fc97203ea734061ddf16e3ecb14e35f9895e", "content_id": "3830ae0b3c95d7d6efd486a452c775c3cde4a47d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 289, "license_type": "no_license", "max_line_length": 32, "num_lines": 15, "path": "/Year 2/Project 3/InsertionSort.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//InsertionSort.h\r\n#ifndef INSERTIONSORT_H\r\n#define INSERTIONSORT_H\r\n#include \"Sort.h\"\r\nclass InsertionSort: public Sort\r\n{\r\n\tpublic:\r\n\t\t//constructor\r\n\t\tInsertionSort();\r\n\t\t//destructor\r\n\t\t~InsertionSort();\r\n\t\t//virtual sort method\r\n\t\tvoid sort(int *, int);\r\n};\r\n#endif//INSERTIONSORT_H\r\n" }, { "alpha_fraction": 0.602150559425354, "alphanum_fraction": 0.602150559425354, "avg_line_length": 9.625, "blob_id": "80e2d6b815e7f2c8b9f2d2532099b4ac189e8e3b", "content_id": "1c7706a0ec73b493039f672c9702586a1cf19bb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 93, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/Master Year 1/Computer Graphics/HW3/renderer/models/package-info.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\n/**\r\n A library of predefined wireframe models.\r\n*/\r\npackage renderer.models;\r\n" }, { "alpha_fraction": 0.6647287011146545, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 15.793103218078613, "blob_id": "fcc9da094b8f5ea381fe20e2b815236325ccba67", "content_id": "5a00fbdea4571d31385b6436b7aff57886d836fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 516, "license_type": "no_license", "max_line_length": 59, "num_lines": 29, "path": "/Year 3/Assignment3/Subtract_Command.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Subtract_Command.h\"\r\n\r\nSubtract_Command:: Subtract_Command(Stack <int> &s):\r\nBinary_Op_Command(s),\r\nprecedence(2)\r\n{\r\n\t//constructor\r\n}\r\n\r\nSubtract_Command::~Subtract_Command(void)\r\n{\r\n\t//destructor\r\n}\r\n\r\nint Subtract_Command::evaluate (int n1, int n2) const\r\n{\r\n\t//return result of subtracting integers\r\n\treturn n1 - n2;\r\n}\r\n\r\nint Subtract_Command::prec (void) const\r\n{\r\n\treturn precedence;\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.529891312122345, "alphanum_fraction": 0.54347825050354, "avg_line_length": 13.82608699798584, "blob_id": "dccf912f6815eed788e16324d88c6cae7e68a64c", "content_id": "91bdb1ee7881eb8a4f3d56804bec527435de1770", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 368, "license_type": "no_license", "max_line_length": 39, "num_lines": 23, "path": "/Year 2/Project 1/Tour.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n* Tour.h\r\n*\r\n* Created on: Feb 7, 2018\r\n* Author: Cellus\r\n*/\r\n\r\n#ifndef TOUR_H_\r\n#define TOUR_H_\r\n\r\nclass Tour {\r\npublic:\r\n\t\t\r\n\tbool limit(int, int);\r\n\tbool isEmpty( int [], int, int);\r\n\tint emptyNextTo(int[], int, int);\r\n\tbool nextMove(int [], int * , int *);\r\n\tbool warnsdoffFunct(int [], int, int);\r\n\t\r\n\tbool tourFunct();\r\n};\r\n\r\n#endif /* TOUR_H_ */\r\n\r\n\r\n" }, { "alpha_fraction": 0.6578947305679321, "alphanum_fraction": 0.6578947305679321, "avg_line_length": 17.125, "blob_id": "83ad1de4b5eb2808376076a354bd2eaf9163b265", "content_id": "64a5532fceb05c3b963d3aca720f879ad06b4611", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 304, "license_type": "no_license", "max_line_length": 33, "num_lines": 16, "path": "/Year 2/Project 3/QuickSort.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//QuickSort.h\r\n#ifndef QUICKSORT_H\r\n#define QUICKSORT_H\r\n#include \"newSort.h\"\r\nclass QuickSort: public newSort\r\n{\r\n\tpublic:\r\n\t\t//constructor\r\n\t\tQuickSort();\r\n\t\t//destructor\r\n\t\t~QuickSort();\r\n\t\t//virtual sort method\r\n\t\tint breakFunct(int*, int, int);\r\n\t\tvoid sort(int *, int,int);\r\n};\r\n#endif//QUICKSORT_H" }, { "alpha_fraction": 0.6146789193153381, "alphanum_fraction": 0.6238532066345215, "avg_line_length": 11.625, "blob_id": "36d183386e34a9b92f95676d74fc0ada0963d7e6", "content_id": "5c8da3f9095d18be113475c92e9908fcb6a6d53a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 109, "license_type": "no_license", "max_line_length": 61, "num_lines": 8, "path": "/Master Year 1/Computer Graphics/HW2/renderer/scene/package-info.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\n/**\r\n Data structures for describing a 3D scene to the renderer.\r\n*/\r\npackage renderer.scene;\r\n" }, { "alpha_fraction": 0.6653465628623962, "alphanum_fraction": 0.6752475500106812, "avg_line_length": 16.035715103149414, "blob_id": "572dc8c2f7793d1b6d283b70503c1183378662da", "content_id": "21d6251f9bf01cecc381771d31aa405c0a69f3a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 505, "license_type": "no_license", "max_line_length": 59, "num_lines": 28, "path": "/Year 3/Assignment3/Modulus_Command.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Modulus_Command.h\"\r\n\r\nModulus_Command::Modulus_Command(Stack <int> &s):\r\nBinary_Op_Command(s),\r\nprecedence(3)\r\n{\r\n\t//constructor\r\n}\r\n\r\nModulus_Command::~Modulus_Command(void)\r\n{\r\n\t//destructor\r\n}\r\n\r\nint Modulus_Command::evaluate (int n1, int n2) const\r\n{\r\n\t//return result of multiplying integers\r\n\treturn n1 % n2;\r\n}\r\n\r\nint Modulus_Command::prec (void) const\r\n{\r\n\treturn precedence;\r\n}\r\n" }, { "alpha_fraction": 0.5752009153366089, "alphanum_fraction": 0.5832376480102539, "avg_line_length": 16.1875, "blob_id": "91fec02492f2f262bf80ab81afcbc245508a8dbf", "content_id": "961692f263cb8bcf69a578fea680ad67a5c4eeb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 871, "license_type": "no_license", "max_line_length": 80, "num_lines": 48, "path": "/Year 2/Project 3/QuickSort.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include <iostream>\r\n#include \"QuickSort.h\"\r\n\r\n//QuickSort.cpp\r\n//default constructor\r\nQuickSort::QuickSort(){\r\n\t\r\n}\r\nQuickSort::~QuickSort(){\r\n}\r\n\r\n//sort method for Insertion sorting\r\n//pass array for it to be sorted, low is the first index, high is the last index\r\nint QuickSort::breakFunct(int array[], int high, int low)\r\n{\r\n\tint part = array[high];\r\n\tint x = low-1;\r\n\t\r\n\tfor (int y = low; y <= high-1;y++)\r\n\t{\r\n\t\t\r\n\t\tif (array[y] <= part)\r\n\t\t{\r\n\t\t\tx++;\r\n\t\t\tint temp = array[x];\r\n\t\t\tarray[x] = array[y];\r\n\t\t\tarray[y] = temp;\r\n\t\t}\r\n\t}\r\n\tint temp = array[x+1];\r\n\tarray[x+1] = array[high];\r\n\tarray[high] = temp;\r\n\t\r\n\treturn (x+1);\r\n\t\r\n}\r\n\r\nvoid QuickSort::sort(int array[], int high, int low)\r\n{\r\n\tif (low < high)\r\n\t{\r\n\t\tint z = breakFunct(array,high, low);\r\n\t\t\r\n\t\t//sort elements before & after breakFunction \r\n\t\tsort(array,z-1, low);\r\n\t\tsort(array,high,z +1);\r\n\t}\r\n}" }, { "alpha_fraction": 0.6041666865348816, "alphanum_fraction": 0.6625000238418579, "avg_line_length": 20.363636016845703, "blob_id": "7d0d56066d1674888f6b253435363b8182f6a4d0", "content_id": "6a5dbcf060c91b525932185623643b182ea4d595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 240, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/Year 2/README.txt", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "Course work from assignments/projects in second year.\nCS_240 (Computing II):\n Assignment #2\n Assignment #3\n Assignment #4\n Assignment #5\n Assignment #6\nCS_362 (Data Structures):\n Project 1\n Project 2\n Project 2\n \n" }, { "alpha_fraction": 0.6167502999305725, "alphanum_fraction": 0.6257668733596802, "avg_line_length": 45.3039665222168, "blob_id": "17337fba87503ffe07fb296622e7181a01852c77", "content_id": "7e6883a68c26cc81954854ba91b46b344d3215c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10758, "license_type": "no_license", "max_line_length": 698, "num_lines": 227, "path": "/Year 1/End of Semester Project/Project.py", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "\r\n \r\n#Define Object Player\r\nclass Player:\r\n #create intializer\r\n def __init__(self,name=None, totalPoints=0,fgMade=0,fgAttempt=0,totalAssists=0, offenseReb=0, defReb=0, totalSteals=0,totalBlocks=0,freeMade=0,freeAttempt=0,totalTurnovers=0,gamesPlayed=0):\r\n\r\n self.name = name\r\n self.totalPoints = totalPoints\r\n self.fgMade = fgMade\r\n self.fgAttempt = fgAttempt\r\n self.totalAssists = totalAssists\r\n self.offenseReb = offenseReb\r\n self.defReb = defReb\r\n self.totalSteals = totalSteals\r\n self.totalBlocks = totalBlocks\r\n self.freeMade = freeMade\r\n self.freeAttempt = freeAttempt\r\n self.totalTurnovers = totalTurnovers\r\n self.gamesPlayed = gamesPlayed\r\n \r\n #add while loop for error when entering stat not 0 or greater\r\n #add while for fg and freethrows to check if less than 0\r\n def setName(self,playerName):\r\n self.name = str(playerName) \r\n def setTotalPoints(self,points):\r\n self.totalPoints = int(points)\r\n \r\n #points = input(\"Enter total points: \")\r\n while int(points)<0:\r\n print(\"Total points must be 0 or greater than. \")\r\n points = input(\"Enter points again: \")\r\n self.totalPoints = int(points)\r\n \r\n \r\n #if int(points) >= 0:\r\n #self.totalPoints = int(points)\r\n #else:\r\n #print(\"Total points must be 0 or greater than. \")\r\n #points = input(\"Enter total points: \")\r\n def setFg(self,fgMade,fgAttempt):\r\n self.fgAttempt = int(fgAttempt)\r\n self.fgMade = int(fgMade)\r\n while int(fgMade) > int(fgAttempt):\r\n print(\"Field goals made must be more or the same as attempts.\")\r\n fgMade = input(\"Enter field goals made: \")\r\n fgAttempt = input(\"Enter field goals attempted: \")\r\n #self.fgAttempt = int(fgAttempt)\r\n #self.fgMade = int(fgMade)\r\n while int(fgMade)<0 or int(fgAttempt)<0:\r\n print(\"Error, re-enter field goals made and attempted\")\r\n fgMade = input(\"Enter field goals made: \")\r\n fgAttempt = input(\"Enter field goals attempted: \")\r\n self.fgAttempt = int(fgAttempt)\r\n self.fgMade = int(fgMade)\r\n\r\n \r\n def setTotalAssists(self,assists):\r\n while int(assists) < 0:\r\n print(\"Total assits must be 0 or greater \")\r\n assists = input(\"Enter total assists: \")\r\n self.totalAssists = int(assists)\r\n def setOffenseReb(self,offRebounds):\r\n while int(offRebounds) < 0:\r\n print(\"Offensive rebounds must be 0 or greater \")\r\n offRebounds = input(\"Enter offensive rebounds: \")\r\n self.offenseReb = int(offRebounds)\r\n def setDefReb(self,defenseReb):\r\n while int(defenseReb) < 0:\r\n print(\"Defense rebouunds must be 0 or greater \")\r\n defenseReb = input(\"Enter defensive rebounds: \")\r\n self.defReb = int(defenseReb)\r\n def setTotalSteals(self,steals):\r\n while int(steals) < 0:\r\n print(\"Steals must be 0 or greater \")\r\n steals = input(\"Enter steals: \")\r\n self.totalSteals = int(steals)\r\n def setTotalBlocks(self,blocks):\r\n while int(blocks) < 0:\r\n print(\"Blocks must be 0 or greater \")\r\n blocks = input(\"Enter blocks: \")\r\n self.totalBlocks = int(blocks)\r\n def setFreeThrows(self,freeMade,freeAttempt):\r\n self.freeAttempt = int(freeAttempt)\r\n self.freeMade = int(freeMade)\r\n while int(freeMade) > int(freeAttempt):\r\n print(\"Free throws made must be less than or the same as attempt\")\r\n freeMade = input(\"Enter free throws made: \")\r\n freeAttempt = input(\"Enter free throws attempted: \")\r\n #self.fgAttempt = int(fgAttempt)\r\n #self.fgMade = int(fgMade)\r\n while int(freeMade)<0 or int(freeAttempt)<0:\r\n print(\"Error. re-enter free throws made and attempted: \")\r\n freeMade = input(\"Enter free throws made: \")\r\n freeAttempt = input(\"Enter free throws attempted: \")\r\n self.freeMade = int(freeMade)\r\n self.freeAttempt = int(freeAttempt)\r\n \r\n def setTotalTurnovers(self,turnovers):\r\n while int(turnovers) < 0:\r\n print(\"Turnovers must be 0 or greater\")\r\n turnovers = input(\"Enter turnovers: \")\r\n self.totalTurnovers = int(turnovers)\r\n def setGamesPlayed(self,games):\r\n while int(games) <= 0:\r\n print(\"Games played must be greater than 0 \")\r\n games = input(\"Enter games played: \")\r\n self.gamesPlayed = int(games)\r\n return self.gamesPlayed\r\n def getPointsPer(self):\r\n pointsPer = (self.totalPoints/self.gamesPlayed)\r\n #print(pointsPer)\r\n return \"{0:.1f}\".format(pointsPer)\r\n def getFieldPercent(self):\r\n fieldPercent = (self.fgMade/self.fgAttempt) *100\r\n \r\n return \"{0:.1f}\".format(fieldPercent)\r\n def getFieldMadePer(self):\r\n fgMadePer = self.fgMade/self.gamesPlayed\r\n return \"{0:.1f}\".format(fgMadePer)\r\n def getFieldAttemptPer(self):\r\n fgAttemptPer = self.fgAttempt/self.gamesPlayed\r\n return \"{0:.1f}\".format(fgAttemptPer)\r\n def getAssistPer(self):\r\n assistPer = self.totalAssists/self.gamesPlayed\r\n return \"{0:.1f}\".format(assistPer)\r\n def getOffRebPer(self):\r\n offRebPer = self.offenseReb/self.gamesPlayed\r\n return \"{0:.1f}\".format(offRebPer)\r\n def getDefRebPer(self):\r\n defRebPer = self.defReb/self.gamesPlayed\r\n return \"{0:.1f}\".format(defRebPer)\r\n def getStealsPer(self):\r\n stealsPer = self.totalSteals/self.gamesPlayed\r\n return \"{0:.1f}\".format(stealsPer)\r\n def getBlocksPer(self):\r\n blocksPer = self.totalBlocks/self.gamesPlayed\r\n return \"{0:.1f}\".format(blocksPer)\r\n def getTurnoversPer(self):\r\n turnoversPer = self.totalTurnovers/self.gamesPlayed\r\n return \"{0:.1f}\".format(turnoversPer)\r\n def getFreePercent(self):\r\n freePercent = (self.freeMade/self.freeAttempt)*100\r\n return \"{0:.1f}\".format(freePercent)\r\n def getFreeMadePer(self):\r\n freeMadePer = self.freeMade/self.gamesPlayed\r\n return \"{0:.1f}\".format(freeMadePer)\r\n def getFreeAttemptPer(self):\r\n freeAttemptPer = self.freeAttempt/self.gamesPlayed\r\n return \"{0:.1f}\".format(freeAttemptPer)\r\n def getStats(self):\r\n string = \"\\n{}'s Stats \\nPoints Per Game: {} \\nField Goal Percentage: {} \\nField Goals Made Per Game: {} \\nField Goals Attempted Per Game: {} \\nAssists Per Game: {} \\nOffensive Rebounds Per Game: {} \\nDefensive Rebounds Per Game: {} \\nSteals Per Game: {} \\nBlocks Per Game: {} \\nTurnovers Per Game: {} \\nFree Throw Percentage: {} \\nFree Throws Made: {} \\nFree Throws Attempted: {} \".format(self.name,self.getPointsPer(),self.getFieldPercent(),self.getFieldMadePer(),self.getFieldAttemptPer(),self.getAssistPer(),self.getOffRebPer(),self.getDefRebPer(),self.getStealsPer(),self.getBlocksPer(),self.getTurnoversPer(),self.getFreePercent(),self.getFreeMadePer(),self.getFreeAttemptPer(), \"\\n\")\r\n return string\r\n #Save stats to be able to review them\r\n def saveStats(self):\r\n with open('Stats.txt', 'a')as f:\r\n f.write(self.getStats())\r\n \r\n \r\n \r\ndef main():\r\n print(\"Hello! This is the basketball stat machine program. The program asks for the name of the player, the totals of each stat and the number of games played. The program will calculate the averages and percentages of the players individual stats. These stats will be saved in a file in order to review and look at. It will also ask if you want to add in a second player's stats for comparison. Lets start with the first player's stats. \\n\")\r\n \r\n player1 = Player()\r\n name = input(\"Player name: \")\r\n player1.setName(name)\r\n totalPoints = input(\"Enter total points: \")\r\n player1.setTotalPoints(totalPoints)\r\n fgMade = input(\"Enter field goals made: \")\r\n fgAttempt = input(\"Enter field goals attempted: \")\r\n player1.setFg(fgMade,fgAttempt)\r\n totalAssists = input(\"Enter total assists: \")\r\n player1.setTotalAssists(totalAssists)\r\n offenseReb = input(\"Enter offensive rebounds: \")\r\n player1.setOffenseReb(offenseReb)\r\n defReb = input(\"Enter defensive rebounds: \")\r\n player1.setDefReb(defReb)\r\n totalSteals = input(\"Enter total steals: \")\r\n player1.setTotalSteals(totalSteals)\r\n totalBlocks = input(\"Enter total blocks: \")\r\n player1.setTotalBlocks(totalBlocks)\r\n freeMade = input(\"Enter free throws made: \")\r\n freeAttempt = input(\"Enter free throws attempted: \")\r\n player1.setFreeThrows(freeMade,freeAttempt)\r\n totalTurnovers = input(\"Enter total turnovers: \")\r\n player1.setTotalTurnovers(totalTurnovers)\r\n gamesPlayed = input(\"Enter games played: \")\r\n player1.setGamesPlayed(gamesPlayed)\r\n print(player1.getStats())\r\n player1.saveStats()\r\n response = input(\"Would you like to enter another player's stats for comparison: (yes or no)\\n\")\r\n if response == (\"yes\"):\r\n print(\"Player 2 Stats: \")\r\n player2 = Player()\r\n name = input(\"Player name: \")\r\n \r\n player2.setName(name)\r\n totalPoints = input(\"Enter total points: \")\r\n player2.setTotalPoints(totalPoints)\r\n fgMade = input(\"Enter field goals made: \")\r\n fgAttempt = input(\"Enter field goals attempted: \")\r\n player2.setFg(fgMade,fgAttempt)\r\n totalAssists = input(\"Enter total assists: \")\r\n player2.setTotalAssists(totalAssists)\r\n offenseReb = input(\"Enter offensive rebounds: \")\r\n player2.setOffenseReb(offenseReb)\r\n defReb = input(\"Enter defensive rebounds: \")\r\n player2.setDefReb(defReb)\r\n totalSteals = input(\"Enter total steals: \")\r\n player2.setTotalSteals(totalSteals)\r\n totalBlocks = input(\"Enter total blocks: \")\r\n player2.setTotalBlocks(totalBlocks)\r\n freeMade = input(\"Enter free throws made: \")\r\n freeAttempt = input(\"Enter free throws attempted: \")\r\n player2.setFreeThrows(freeMade,freeAttempt)\r\n totalTurnovers = input(\"Enter total turnovers: \")\r\n player2.setTotalTurnovers(totalTurnovers)\r\n gamesPlayed = input(\"Enter games played: \")\r\n player2.setGamesPlayed(gamesPlayed)\r\n print(player2.getStats())\r\n player2.saveStats()\r\n \r\n elif response== (\"no\"):\r\n print(\"Thanks for using the comparison stat machine\")\r\n else:\r\n print(\"Invalid response. \")\r\n response = input(\"(yes) or (no)\")\r\nif __name__== \"__main__\":\r\n main()\r\n \r\n" }, { "alpha_fraction": 0.7223974466323853, "alphanum_fraction": 0.7223974466323853, "avg_line_length": 21.64285659790039, "blob_id": "c7282037792a327282f0c9acc5760f43ce4815dc", "content_id": "5b99a65ca14c699b6b0cf9d4531ecfca98845ceb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 317, "license_type": "no_license", "max_line_length": 53, "num_lines": 14, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/errorCorrection.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#ifndef _ERRORCORRECTION_H_\n#define _ERRORCORRECTION_H_\n\n#include <iostream>\n#include <string>\n#include <math.h>\n\nvoid errorCorrection(std::string *, int, int *, int);\nvoid codeToDecimal(std::string *, int, int *);\nvoid getErrorWords(int*, int, int*, int);\nint convertToAlpha(int);\nint convertToInteger(int);\n\n#endif\n" }, { "alpha_fraction": 0.6789838075637817, "alphanum_fraction": 0.6859122514724731, "avg_line_length": 22.799999237060547, "blob_id": "6176e925f0d9d9c912fb2c2bc759509dae3e8917", "content_id": "e4306e3094bfa7d6734220d2019a3b80b1f3d49b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 866, "license_type": "no_license", "max_line_length": 67, "num_lines": 35, "path": "/Year 3/Assignment 4/Expr_Node_Visitor.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n#ifndef _EXPR_NODE_VISITOR\r\n#define _EXPR_NODE_VISITOR\r\n\r\n#include \"Expr_Node.h\"\r\n\r\nclass Add_Expr_Node;\r\nclass Subtract_Expr_Node;\r\nclass Num_Expr_Node;\r\nclass Multiply_Expr_Node;\r\nclass Modulus_Expr_Node;\r\nclass Division_Expr_Node;\r\n\r\nclass Expr_Node_Visitor \r\n{\r\n\tpublic:\r\n\t\tvirtual ~Expr_Node_Visitor(void);\r\n\t\t\r\n\t\t//methods for visiting nodes\r\n\t\tvirtual void Visit_Add_Node (Add_Expr_Node & node) = 0;\r\n\t\tvirtual void Visit_Subtract_Node(Subtract_Expr_Node & node)= 0;\r\n\t\tvirtual void Visit_Number_Node(Num_Expr_Node & node) = 0;\r\n\t\tvirtual void Visit_Multiply_Node(Multiply_Expr_Node & node) = 0;\r\n\t\tvirtual void Visit_Modulus_Node(Modulus_Expr_Node & node) = 0;\r\n\t\tvirtual void Visit_Division_Node (Division_Expr_Node & node) = 0;\r\n\t\t\r\n\t\t\r\n};\r\n#endif" }, { "alpha_fraction": 0.747474730014801, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 31.66666603088379, "blob_id": "07ea33444cc37a64ce32e2b9d072e44df613484e", "content_id": "12187bc7d1f6a07583b9182bcb46c12caf1a4668", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 99, "license_type": "no_license", "max_line_length": 49, "num_lines": 3, "path": "/Year 1/README.txt", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "Course work assignments/projects from first year.\nCS_230 (Computing I):\n End of Semester Project \n" }, { "alpha_fraction": 0.6746708154678345, "alphanum_fraction": 0.6754453778266907, "avg_line_length": 21.053571701049805, "blob_id": "cf3fc65b3198869ccd038aa70d31ee4ed4821205", "content_id": "4f02ae299f5d6f5e07ef5984a58bcaeacffb7c38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1291, "license_type": "no_license", "max_line_length": 116, "num_lines": 56, "path": "/Year 3/Assignment3/driver.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// COMMENT: Instead of using C functions to implement parts of the\r\n// calculator. It would be better suited to use a Wrapper Facade.\r\n\r\n//RESPONSE: Made a calculator class that now has the infix_to_postfix method and also an evaluation function.\r\n\r\n#include \"Calculator.h\"\r\n\r\n// COMMENT This is an incomplete assignment.\r\n//RESPONSE: My Program now correctly takes in the infix method and converts to postfix. Then evaluates it correctly.\r\n\r\nint main (int argc, char * argv [])\r\n{\r\n\r\nCalculator calc;\r\n\r\nStack <int> result;\r\nStack_Expr_Command_Factory factory (result);\r\n\r\n\r\nstd::string infix;\r\n//get input from STDIN\r\nstd::cout << \"Input expression. \" << std::endl;\r\nstd::getline (std::cin,infix);\r\n\r\n//unless infix says QUIT, then continue running\r\nwhile (infix != \"QUIT\")\r\n{\r\n\tStack <int> result;\r\n\tStack_Expr_Command_Factory factory (result);\r\n\tArray <Expr_Command *> postfix;\r\n\t\r\n\t//send infix to infix_to_postfix method\r\n\t//calls functions to perform conversion and evaluate (catch and exceptions thrown)\r\n\t\r\n\t\r\n\ttry\r\n\t{\r\n\t\tcalc.infix_to_postfix (infix, factory,postfix,result);\r\n\t\r\n\t} \r\n\tcatch (const char* msg)\r\n\t{\r\n\t\tstd::cerr << msg << std::endl;\r\n\t\t\r\n\t}\r\n\t\t\r\n\t\r\n\tstd::cout << \"output another expression. or QUIT. \";\r\n\tstd::getline (std::cin,infix);\r\n\t\r\n\r\n}\r\n\t\r\n\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.5893719792366028, "alphanum_fraction": 0.6159420013427734, "avg_line_length": 15.5600004196167, "blob_id": "2223573ef967ef0e8939d4d43ee8824fd352b4d6", "content_id": "aec261cffcd6e388b18d008d229448e00c9ea4f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 414, "license_type": "no_license", "max_line_length": 96, "num_lines": 25, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/finalMessage.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <iostream>\n#include <string>\n#include \"messagefun.h\"\n\nstd::string makeFinalMessage(std::string* codewords, std::string* data, int numCode, int numErr)\n{\n\tstd::string finalStr = \"\";\n\t\n\tfor(int i = 0; i < numCode; i++)\n\t{\n\t\tfinalStr += data[i];\n\t}\n\tfor(int i = 0; i < numErr; i++)\n\t{\n\t\tfinalStr += codewords[i];\n\t}\n\tif(numCode == 28)\n\t{\n\t\tfinalStr += \"0000000\";\n\t}\n\treturn finalStr;\n\t\n\n}\n" }, { "alpha_fraction": 0.6644737124443054, "alphanum_fraction": 0.6644737124443054, "avg_line_length": 17, "blob_id": "129b267b9b150707e3a437b550754301abc7de3d", "content_id": "b7ddd035a1e3121e443dce9452eb8bcefea1e397", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 152, "license_type": "no_license", "max_line_length": 69, "num_lines": 8, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/messagefun.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#ifndef _MESSAGEFUN_H_\r\n#define _MESSAGEFUN_H_\r\n\r\n#include <string>\r\n\r\nstd::string makeFinalMessage(std::string *, std::string *, int, int);\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.6151685118675232, "alphanum_fraction": 0.6221910119056702, "avg_line_length": 28.64583396911621, "blob_id": "6ad69cbe5bdf521bf1770384ff616d6f461364df", "content_id": "66b6b89c49729defc39aaf50108ad8007dac4aad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1424, "license_type": "no_license", "max_line_length": 108, "num_lines": 48, "path": "/Year 4/csci487Group4Project-makingGraphs/Utilities/UIDGenerator.py", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#UIDGenerator.py\n#Created 4/5/20 by Jasper Heist\n#Contains NumberGenerator Singleton class\n\nfrom threading import Lock\n\nlock = Lock()\n\n#singleton number generator class that generates our UIDs for all intersections, connections, vehicles, etc.\nclass NumberGenerator(object):\n _instance = None\n\n def __new__(self):\n lock.acquire()\n if not self._instance:\n self._instance = super(NumberGenerator, self).__new__(self)\n self.__next_vehicle_uid = 0\n self.__next_connection_uid = 0\n self.__next_intersection_uid = 0\n lock.release()\n return self._instance\n\n #gets next vehicle UID\n def vehicle_uid(self):\n lock.acquire()\n uidToReturn=self.__next_vehicle_uid\n # print(\"VehicleUID: \", uidToReturn)\n self.__next_vehicle_uid += 1\n lock.release()\n return uidToReturn\n\n #gets next connection UID\n def connection_uid(self):\n lock.acquire()\n uidToReturn=self.__next_connection_uid\n # print(\"ConnectionUID: \", uidToReturn)\n self.__next_connection_uid += 1\n lock.release()\n return uidToReturn\n\n #gets next intersection UID\n def intersection_uid(self):\n lock.acquire()\n uidToReturn=self.__next_intersection_uid\n # print(\"IntersectionUID: \", uidToReturn)\n self.__next_intersection_uid += 1\n lock.release()\n return uidToReturn\n\n" }, { "alpha_fraction": 0.6676190495491028, "alphanum_fraction": 0.677142858505249, "avg_line_length": 23.658536911010742, "blob_id": "58c9d0ae454ddf9ae5ff6b0bedbe1ed4c54fec4e", "content_id": "7f8092bd252cc9a81ee329db470820ebd1f4f8f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1050, "license_type": "no_license", "max_line_length": 59, "num_lines": 41, "path": "/Year 3/Assignment 4/Expr_Builder.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n#ifndef _EXPR_BUILDER\r\n#define _EXPR_BUILDER\r\n\r\n#include \"Num_Expr_Node.h\"\r\n#include \"Add_Expr_Node.h\"\r\n#include \"Subtract_Expr_Node.h\"\r\n#include \"Multiply_Expr_Node.h\"\r\n#include \"Division_Expr_Node.h\"\r\n#include \"Modulus_Expr_Node.h\"\r\n#include \"Expr_Node.h\"\r\n \r\nclass Expr_Builder\r\n{\r\n\tpublic:\r\n\t\tvirtual ~Expr_Builder(void);\r\n\t\t\r\n\t\t//starts a new expression\r\n\t\tvirtual void start_expression (void) = 0;\r\n\t\t\r\n\t\t//methods for building types of nodes\r\n\t\tvirtual void build_number (int n) = 0;\r\n\t\tvirtual void build_add_operator (void) = 0;\r\n\t\tvirtual void build_subtract_operator(void) = 0;\r\n\t\tvirtual void build_multiply_operator(void) = 0;\r\n\t\tvirtual void build_division_operator(void) = 0;\r\n\t\tvirtual void build_modulus_operator(void) = 0;\r\n\t\tvirtual void build_left_parenthesis(void) = 0;\r\n\t\tvirtual void build_right_parenthesis(void) = 0;\r\n\t\t\t\t\r\n\t\t//get current expression\r\n\t\tvirtual Expr_Node * get_expression (void) = 0;\r\n\t\t\r\n};\r\n#endif" }, { "alpha_fraction": 0.5812389254570007, "alphanum_fraction": 0.5879645943641663, "avg_line_length": 20.332015991210938, "blob_id": "575c2cecfe868c18a500f3de6ca5fd4ede3d5260", "content_id": "2dd9631eb90843b61ae599470a28604b61f8b466", "detected_licenses": [ "CC0-1.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5650, "license_type": "permissive", "max_line_length": 171, "num_lines": 253, "path": "/Year 4/Final Release/scrape.js", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/**************************************\r\n TITLE: jQuery\t\tscrape.js\t<----- Update the file name\r\n AUTHOR: Marcellus Hunt\t\t\t<----- *Your* name should be here\r\n CREATE DATE: 5 December 2019\r\n PURPOSE: To use practice use of functions\t\t<----- Also update the purpose of the file\r\n LAST MODIFIED ON: 5 December 2019\r\n LAST MODIFIED BY: Marcellus Hunt\r\n MODIFICATION HISTORY:\r\n 5 December: \r\n 5 December: Add comments \r\n***************************************/\r\n\r\n// The $ is the jQuery object\r\n// \"document\" is the document object\r\n// ready is a method of the jQuery object\r\n// function creates an anonymous function to contain the code that should run\r\n// In English, when the DOM has finished loading, execute the code in the function.\r\n// See pages 312-313 of the text for details.\r\n\r\n$(document).ready(function(){\r\n\t\r\n//$( \"#accordion\" ).accordion();\r\n\r\n\r\n\r\nvar availableTags = [\r\n\t\"John\",\r\n\t\"Gerald\",\r\n\t\"Robin\",\r\n\t\"Bruce\",\r\n\t\"Reginald\",\r\n\t\"Kent\",\r\n\t\"Ruth\",\r\n\t\"Beth\"\t\r\n];\r\n$( \"#autocomplete\" ).autocomplete({\r\n\tsource: availableTags\r\n});\r\n\r\n\r\n\r\n$( \"#button\" ).button();\r\n$( \"#button-icon\" ).button({\r\n\ticon: \"ui-icon-gear\",\r\n\tshowLabel: false\r\n});\r\n\r\n\r\n\r\n$( \"#radioset\" ).buttonset();\r\n\r\n\r\n\r\n$( \"#controlgroup\" ).controlgroup();\r\n\r\n\r\n\r\n$( \"#tabs\" ).tabs();\r\n\r\n\r\n\r\n$( \"#dialog\" ).dialog({\r\n\tautoOpen: false,\r\n\twidth: 400,\r\n\tbuttons: [\r\n\t\t{\r\n\t\t\ttext: \"Ok\",\r\n\t\t\tclick: function() {\r\n\t\t\t\t$( this ).dialog( \"close\" );\r\n\t\t\t}\r\n\t\t},\r\n\t\t{\r\n\t\t\ttext: \"Cancel\",\r\n\t\t\tclick: function() {\r\n\t\t\t\t$( this ).dialog( \"close\" );\r\n\t\t\t}\r\n\t\t}\r\n\t]\r\n});\r\n\r\n// Link to open the dialog\r\n$( \"#dialog-link\" ).click(function( event ) {\r\n\t$( \"#dialog\" ).dialog( \"open\" );\r\n\tevent.preventDefault();\r\n});\r\n\r\n\r\n\r\n$( \"#datepicker\" ).datepicker({\r\n\tinline: true\r\n});\r\n\r\n\r\n\r\n$( \"#spinner\" ).spinner();\r\n\r\n\r\n\r\n$( \"#menu\" ).menu();\r\n\r\n\r\n\r\n$( \"#tooltip\" ).tooltip();\r\n\r\n\r\n\r\n$( \"#selectmenu\" ).selectmenu();\r\n\r\n\r\n// Hover states on the static widgets\r\n$( \"#dialog-link, #icons li\" ).hover(\r\n\tfunction() {\r\n\t\t$( this ).addClass( \"ui-state-hover\" );\r\n\t},\r\n\tfunction() {\r\n\t\t$( this ).removeClass( \"ui-state-hover\" );\r\n\t}\r\n);\r\n//end of ui library\r\n\r\n\r\n$.validator.setDefaults({\r\n\t/*\r\nName: submit handler\r\n\r\nPURPOSE:\r\n\tScrapes data from the input and outputs it.\r\n\r\nPARAMETERS: \r\n\tnoy any\r\n\t\r\nRETURN VALUE: \r\n\tnot any\r\n*/\r\n\tsubmitHandler: function() {\r\n\t\t\r\n\t\talert(\"Invoking Submit\");\r\n\t\t//first name value\r\n\t\tvar firstName = new String($('#firstName').val());\r\n\t\t\r\n\t\t//middle name value\r\n\t\tvar midName = new String($('#autocomplete').val());\r\n\t\t\r\n\t\t//last name value\r\n\t\tvar lastName = new String($('#lastName').val());\r\n\t\t\r\n\t\t//phone value\r\n\t\tvar phone = new String($('#telephone').val());\r\n\t\t\r\n\t\t//email value\r\n\t\tvar email = new String($('#emailAutocomplete').val());\r\n\t\t\r\n\t\t//password value\r\n\t\tvar password = new String($('#password').val());\r\n\r\n\t\t//value of checked radiobox\r\n\t\tvar strRadioBox = $('input[name=\"radio\"]:checked').val();\r\n\r\n\t\t//scrape check boxes\r\n\t\tvar strCheckedBoxes = \" \";\r\n\t\t\r\n\t\t//loops through the checkbox for checked items\r\n\t\t$('input[name=\"sport\"]:checked').each(function() \r\n\t\t{\r\n\t\t\tstrCheckedBoxes += $(this).val() + \" \"; \r\n\t\t\r\n\t\t});\r\n\t\t\r\n\t\t//value of datepicker\r\n\t\tvar birthday = new String($('#datepicker').val());\r\n\r\n\t\t//value of spinner\r\n\t\tvar rate = new String ($('#spinner').val());\r\n\r\n\t\t// strContnent echoes all the input values\r\n\t\tvar strContent = new String (\"My name is \" + firstName + \" \" + midName + \"<br>\" + lastName + \". <br>\" +\r\n\t\t\"My phone number is \" + phone + \". <br>\" + \"My birthday is \" + birthday + \". <br>\" + \"My email is \" + email + \". <br>\" + \"My password is \" +\r\n\t\tpassword + \". <br>\" + \"The type of account I have is a \" + strRadioBox + \". <br>\" + \"My favorite sports are \" + strCheckedBoxes + \". <br>\" + \"Customer Rating is \" + rate\r\n\t\t);\r\n\r\n\t\t//strContent = \"Hello hi\";\r\n\t\t$(\"#output\").html(\"Your account has been registered! Thanks for being a part of Shield Athletics <br>\" + strContent);\r\n\t\t\r\n\t}//end submit handler\r\n\t\r\n});//end validator\r\n\r\n//\r\n$(\"#formId\").validate({\r\n\t\r\n\trules: {\r\n\tfirstName:{ //first name is required and must be at least 2 characters\r\n\t\trequired:true,\r\n\t\tminlength:2\r\n\t},\r\n\tlastName:{ //last name is required and must be at least 2 characters\r\n\t\trequired:true,\r\n\t\tminlength:2\r\n\t},\r\n\tphone:{ // phone is required and must be at most 10 digits\r\n\t\trequired: true, \r\n\t\tdigits: true, \r\n\t\tmaxlength: 10\r\n\t},\r\n\temail: { //email is required and must be an email\r\n\t\trequired:true,\r\n\t\temail:true\r\n\t},\r\n\tpassword:{ // password is required and must be at least 8 characters\r\n\t\trequired:true,\r\n\t\tminlength:8\r\n\t},\r\n\tconfirm_password: { // confirm_password is required and needs to be the same as password entered above\r\n\t\trequired: true,\r\n\t\tminlength: 8,\r\n\t\tequalTo: \"#password\"\r\n\t}\r\n\t\r\n\t}, //end rules\r\n\tmessages: { // displays messages for the given rules above.\r\n\t\tfirstName:{\r\n\t\t\trequired: \"Please enter your first name\",\r\n\t\t\tminlength: \"Must contain at least 2 characters. \"\r\n\t\t},\r\n\t\tlastName:{\r\n\t\t\trequired: \"Please enter your last name\",\r\n\t\t\tminlength: \"Must contain at least 2 characters. \"\r\n\t\t},\r\n\t\tphone: {\r\n\t\t\trequired: \"Must provide phone number. \",\r\n\t\t\tdigits: \"Must contain only digits. \",\r\n\t\t\tmaxlength: \"Can't have more than 10 digits. \"\r\n\t\t},\r\n\t\temail: {\r\n\t\t\trequired: \"Please enter an email address. \",\r\n\t\t\temail: \"Enter valid email. \"\r\n\t\t},\r\n\t\tpassword:{\r\n\t\t\trequired: \"Need to provide a password. \",\r\n\t\t\tminlength: \"Password not long enough, must be 8 characters. \"\r\n\t\t},\r\n\t\tconfirm_password: {\r\n\t\t\trequired: \"Need to retype password. \",\r\n\t\t\tminlength: \"Must be 8 characters long. \",\r\n\t\t\tequalTo: \"Password must be the same as the one above. \"\r\n\t\t}\r\n\t}\r\n\t\r\n}); //end .validate\r\n\r\n\r\n\t\r\n}); // end of $(document).ready()\r\n" }, { "alpha_fraction": 0.6199009418487549, "alphanum_fraction": 0.6412402987480164, "avg_line_length": 32.79282760620117, "blob_id": "4e7f7e1b21685ce4b84048f1954acaf2cfbd1da7", "content_id": "095d3099c45cd5b4027f1fecf62a8a3971dc4c22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8482, "license_type": "no_license", "max_line_length": 106, "num_lines": 251, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/mainwindow.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include \"mainwindow.h\"\n#include \"ui_mainwindow.h\"\n#include <string>\n#include <bitset>\n#include <map>\n#include <iostream>\n#include \"qrpage.h\"\n#include \"masking.h\"\n#include \"createPNG.h\"\n#include <algorithm>\n\nint level;\n\nMainWindow::MainWindow(QWidget *parent)\n : QMainWindow(parent)\n , ui(new Ui::MainWindow)\n{\n ui->setupUi(this);\n}\n\nMainWindow::~MainWindow() {\n delete ui;\n}\n\nint MainWindow::chooseVersion(std::string input) {\n if (input.length() <= 20) {\n level =1;\n return 1;\n }\n else if (input.length() <= 38) {\n level =2;\n return 2;\n }\n return 0;\n}\n\n// Takes decimal string and converts to a binary string of <binarySize> length\nstd::string MainWindow::decimalToBinary(std::string input, int binarySize) {\n int number = std::stoi(input);\n std::string binaryNumber;\n for (int i = binarySize; i >= 0; i--) {\n int temp = number >> i;\n if (temp & 1) {\n binaryNumber.append(\"1\");\n }\n else {\n binaryNumber.append(\"0\");\n }\n }\n return binaryNumber;\n}\n\n// Gets the character count code for the binary output\nstd::string MainWindow::characterCount(std::string input, int versionNumber) {\n int stringLength = input.length();\n std::string finalCount;\n finalCount = std::bitset<9>(stringLength).to_string();\n return finalCount;\n}\n\nstd::string * MainWindow::dataEncoding() {\n std::map<char, int> alphaNumericMap;\n alphaNumericMap.insert(std::make_pair('0', 0));\n alphaNumericMap.insert(std::make_pair('1', 1));\n alphaNumericMap.insert(std::make_pair('2', 2));\n alphaNumericMap.insert(std::make_pair('3', 3));\n alphaNumericMap.insert(std::make_pair('4', 4));\n alphaNumericMap.insert(std::make_pair('5', 5));\n alphaNumericMap.insert(std::make_pair('6', 6));\n alphaNumericMap.insert(std::make_pair('7', 7));\n alphaNumericMap.insert(std::make_pair('8', 8));\n alphaNumericMap.insert(std::make_pair('9', 9));\n alphaNumericMap.insert(std::make_pair('A', 10));\n alphaNumericMap.insert(std::make_pair('B', 11));\n alphaNumericMap.insert(std::make_pair('C', 12));\n alphaNumericMap.insert(std::make_pair('D', 13));\n alphaNumericMap.insert(std::make_pair('E', 14));\n alphaNumericMap.insert(std::make_pair('F', 15));\n alphaNumericMap.insert(std::make_pair('G', 16));\n alphaNumericMap.insert(std::make_pair('H', 17));\n alphaNumericMap.insert(std::make_pair('I', 18));\n alphaNumericMap.insert(std::make_pair('J', 19));\n alphaNumericMap.insert(std::make_pair('K', 20));\n alphaNumericMap.insert(std::make_pair('L', 21));\n alphaNumericMap.insert(std::make_pair('M', 22));\n alphaNumericMap.insert(std::make_pair('N', 23));\n alphaNumericMap.insert(std::make_pair('O', 24));\n alphaNumericMap.insert(std::make_pair('P', 25));\n alphaNumericMap.insert(std::make_pair('Q', 26));\n alphaNumericMap.insert(std::make_pair('R', 27));\n alphaNumericMap.insert(std::make_pair('S', 28));\n alphaNumericMap.insert(std::make_pair('T', 29));\n alphaNumericMap.insert(std::make_pair('U', 30));\n alphaNumericMap.insert(std::make_pair('V', 31));\n alphaNumericMap.insert(std::make_pair('W', 32));\n alphaNumericMap.insert(std::make_pair('X', 33));\n alphaNumericMap.insert(std::make_pair('Y', 34));\n alphaNumericMap.insert(std::make_pair('Z', 35));\n alphaNumericMap.insert(std::make_pair(' ', 36));\n alphaNumericMap.insert(std::make_pair('$', 37));\n alphaNumericMap.insert(std::make_pair('%', 38));\n alphaNumericMap.insert(std::make_pair('*', 39));\n alphaNumericMap.insert(std::make_pair('+', 40));\n alphaNumericMap.insert(std::make_pair('-', 41));\n alphaNumericMap.insert(std::make_pair('.', 42));\n alphaNumericMap.insert(std::make_pair('/', 43));\n alphaNumericMap.insert(std::make_pair(':', 44));\n\n // Mode Indicator: alphanumeric\n std::string codeWords = \"0010\";\n\n // Converting QTCreator qstring to c++ std::string\n std::string input = ui->userInput->text().toStdString();\n std::transform(input.begin(), input.end(), input.begin(), ::toupper);\n\n // Get version number from the string length\n int versionNumber = chooseVersion(input);\n std::string characterCountIndicator = characterCount(input, versionNumber);\n codeWords = codeWords.append(characterCountIndicator);\n\n // If string is odd number of characters, save last character for later\n char backCharacter;\n bool oddNumberStr = false;\n if (input.length() % 2 != 0) {\n backCharacter = input.back();\n input.pop_back();\n oddNumberStr = true;\n }\n\n // Encoding the input by taking two characters at a time and applying algorithm\n for (int i = 0; i < input.length(); i++) {\n std::map<char, int>::iterator it = alphaNumericMap.find(input.at(i));\n std::map<char, int>::iterator it2 = alphaNumericMap.find(input.at(i+1));\n int encodedDecimal = it->second * 45 + it2->second;\n std::string stringEncodedDecimal = std::to_string(encodedDecimal);\n std::string codeWordSection = decimalToBinary(stringEncodedDecimal, 10);\n codeWords = codeWords.append(codeWordSection);\n i++;\n }\n\n // Take odd numbered character and convert it to 6 digit binary\n if (oddNumberStr) {\n std::map<char, int>::iterator it = alphaNumericMap.find(backCharacter);\n int encodedDecimal = it->second;\n std::string stringEncodedDecimal = std::to_string(encodedDecimal);\n std::string codeWordSection = decimalToBinary(stringEncodedDecimal, 5);\n codeWords = codeWords.append(codeWordSection);\n }\n\n // Find number of code words based on version number * 8\n int numberOfCodewords;\n int numberOfBits;\n if (versionNumber == 1) {\n numberOfCodewords = 16;\n numberOfBits = numberOfCodewords * 8;\n }\n else if (versionNumber == 2) {\n numberOfCodewords = 28;\n numberOfBits = numberOfCodewords * 8;\n }\n\n // If bit string is shorter than number of required bits, add terminator 0's (up to four)\n int terminatorZeros = 0;\n while (terminatorZeros < 4) {\n if (codeWords.length() < numberOfBits) {\n codeWords = codeWords.append(\"0\");\n terminatorZeros++;\n }\n else {\n terminatorZeros = 4;\n }\n }\n\n // If number of bits in the string is not a multiple of 8, pad 0's till it is\n while (codeWords.length() % 8 != 0) {\n codeWords = codeWords.append(\"0\");\n }\n\n // If the string is still not long enough\n\n // Pad byte one (236)\n // Pad byte two (17)\n std::string padBytes[2] = {\"11101100\", \"00010001\"};\n int requiredPadBytes = (numberOfBits - codeWords.length()) / 8;\n for (int i = 0; i < requiredPadBytes; i++) {\n codeWords = codeWords.append(padBytes[i % 2]);\n }\n\n // Split codeWords string into an array every 8 bits\n std::string * codeWordsFinal = new std::string[numberOfCodewords];\n int byteCounter = 0;\n\n for (int i = 0; i < numberOfBits; i++) {\n if (i % 8 == 0 && i != 0) {\n byteCounter++;\n }\n std::string temp = codeWordsFinal[byteCounter];\n temp += codeWords.at(i);\n codeWordsFinal[byteCounter] = temp;\n }\n\n return codeWordsFinal;\n\n\n}\n\nstd::string MainWindow::getColor() {\n QString color = ui->comboBox->currentText();\n std::string selectedColor = color.toStdString();\n return selectedColor;\n}\n\nvoid MainWindow::on_submitButton_clicked() {\n int numCodeWords, numErrorWords;\n\n std::string * codewords;\n codewords = dataEncoding();\n\n if (level == 1){\n numCodeWords = 16;\n numErrorWords = 10;\n }\n else{\n numCodeWords = 28;\n numErrorWords = 16;\n }\n\n //to run error correction\n int * errorCorrectionWords = new int[numErrorWords];\n errorCorrection(codewords, numCodeWords, errorCorrectionWords, numErrorWords);\n //convert errorCorrection words to binary\n std::string * binaryErrorWords = new std::string[numErrorWords];\n for (int i=0; i < numErrorWords; i++){\n binaryErrorWords[i] = decimalToBinary(std::to_string(errorCorrectionWords[i]), 7);\n }\n\n\n std::string finalMessage = makeFinalMessage(binaryErrorWords, codewords, numCodeWords, numErrorWords);\n\n int ** layout = makeMatrix(finalMessage, level);\n\n\n\n genMaskingPatterns(layout, level, getColor());\n\n delete [] errorCorrectionWords;\n delete [] binaryErrorWords;\n\n QRPage *uiTwo = new QRPage(this);\n uiTwo->show();\n}\n" }, { "alpha_fraction": 0.5988483428955078, "alphanum_fraction": 0.6180422306060791, "avg_line_length": 20.65217399597168, "blob_id": "1e72e2bc8d7350f9b4e672bbb73fc32dcdff74bf", "content_id": "b184ff920e0b02e48494bb4d26239a577e04a735", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 521, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/Master Year 2/Operating Systems/HW3/hw3/filters/count_lines.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program counts the number of lines in its standard\r\n input and writes the resulting int to its standard output.\r\n\r\n When using Windows, if standard input is the console\r\n keyboard, use ^z (Control-z) to denote the end of file\r\n (and you must use ^z at the beginning of a line!).\r\n*/\r\n#include <stdio.h>\r\n\r\nint main()\r\n{\r\n int lineCount = 0;\r\n char oneLine [1000];\r\n\r\n while ( fgets(oneLine, 1000, stdin) != NULL )\r\n {\r\n lineCount++;\r\n }\r\n printf(\"%d\", lineCount);\r\n\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.5383692979812622, "alphanum_fraction": 0.5467625856399536, "avg_line_length": 24.90322494506836, "blob_id": "e85d6234b03c524630b586ae0d26811a8ab2125b", "content_id": "564b2bd9c8ece0b6ede5525d90d42053caaf6e78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1668, "license_type": "no_license", "max_line_length": 85, "num_lines": 62, "path": "/Master Year 1/Programming Languages and Compilers/HW5/hw5/Tree2dot.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/**\r\n This program converts a Tree data structure into\r\n a DOT description of the tree. The DOT description\r\n can then be processed by the dot.exe command to\r\n produce a graphical image of the tree data structure.\r\n\r\n Create a png file (from a dot file) with the following\r\n command line.\r\n > dot.exe -Tpng -O tree.dot\r\n\r\n See\r\n http://www.graphviz.org/Documentation.php\r\n*/\r\n\r\npublic class Tree2dot\r\n{\r\n private static int nodeCount;\r\n\r\n public static String tree2dot(Tree tree)\r\n {\r\n String result = \"graph {\\n\";\r\n result += \"node0 [label=\\\"\" + tree.getElement() + \"\\\"];\\n\";\r\n nodeCount = 0;\r\n result += tree2dot(tree, nodeCount);\r\n result += \"}\\n\";\r\n return result;\r\n }\r\n\r\n\r\n /**\r\n This tree2dot() method is essentially a\r\n preorder traversal of the tree.\r\n */\r\n public static String tree2dot(Tree tree, int nodeNumber)\r\n {\r\n String result = \"\";\r\n\r\n // create new nodes\r\n for (int i = 0; i < tree.degree(); i++)\r\n {\r\n result += \"node\" + (nodeCount+(i+1)) + \" \";\r\n result += \"[label=\\\"\" + tree.getSubTree(i).getElement() + \"\\\"];\\n\";\r\n }\r\n\r\n // create the edges\r\n for (int i = 0; i < tree.degree(); i++)\r\n {\r\n result += \"node\" + nodeNumber + \" -- \" + \"node\" + (nodeCount+(i+1)) + \";\\n\";\r\n }\r\n\r\n nodeNumber = nodeCount;\r\n nodeCount += tree.degree(); // count the nodes that we just created\r\n\r\n // convert each sub tree into a dot description\r\n for (int i = 0; i < tree.degree(); i++)\r\n {\r\n result += tree2dot(tree.getSubTree(i), nodeNumber+(i+1));\r\n }\r\n\r\n return result;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6878306865692139, "alphanum_fraction": 0.6878306865692139, "avg_line_length": 10.733333587646484, "blob_id": "67f4ca7c4b691919a45379d4dceba5049c71f3ea", "content_id": "08977ea5a9c464dbd911b4f03480a220397e382d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 189, "license_type": "no_license", "max_line_length": 25, "num_lines": 15, "path": "/Year 2/Project 2/CrossReference.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//CrossReference.h\r\n\r\n#ifndef CROSSREFERENCE_H_\r\n#define CROSSREFERENCE_H_\r\n\r\nclass CrossReference\r\n{\r\npublic:\r\n\r\n\tvoid printTable();\r\n\tvoid parseWords();\r\n\r\n\r\n};\r\n#endif //CROSSREFERENCE_H" }, { "alpha_fraction": 0.5832697153091431, "alphanum_fraction": 0.5902915596961975, "avg_line_length": 15.542929649353027, "blob_id": "11d76fa69ef7d6970d2d3b647a1691fdb918e8a8", "content_id": "85af68ee13b020592a25e84536a3d3c64696dcbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6551, "license_type": "no_license", "max_line_length": 103, "num_lines": 396, "path": "/Year 3/Assignment3/Array.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// $Id: Array.cpp 827 2011-02-07 14:20:53Z hillj $\n\n// Honor Pledge:\n//\n// I pledge that I have neither given nor receieved any help\n// on this assignment.\n\n#include \"Array.h\"\n#include <stdexcept> // for std::out_of_bounds exception\n//\n// Array\n//\ntemplate <typename T>\nArray <T>::Array (void):\ndata_(nullptr),\ncur_size_(0),\nmax_size_(10)\n{\n\t//default constructor (default size to 10 to start off with)\n\tthis->data_ = new T [10];\n\t\n}\n\n//\n// Array (size_t)\n//\ntemplate <typename T>\nArray <T>::Array (size_t length): \ndata_(nullptr), \ncur_size_(0), \nmax_size_(0)\n{\n\t\n\t//initialize constructor\n\tthis->max_size_ = length;\n\t\n\t//create new T array\n\tthis->data_ = new T [length];\t\n}\n\n//\n// Array (size_t, char)\n//\ntemplate <typename T>\nArray <T>::Array (size_t length, T fill): \ndata_(nullptr), \ncur_size_(0), \nmax_size_(0)\n{\n\t\n\t//initialize constructor\n\tthis->max_size_ = length;\n\tthis->cur_size_ = length;\n\t\n\t//create new T array\n\tthis->data_ = new T [length];\n\n\t//for every index in array\n\t//this index in the array holds fill\t\n\tfor(int i = 0; i < this->max_size_; i++)\n\t{\n\t\tthis->data_[i] = fill;\n\t\t\n\t}\t\n\t\n}\n\n//\n// Array (const Array &)\n//\ntemplate <typename T>\nArray <T>::Array (const Array & array): \ndata_(nullptr), \ncur_size_(0), \nmax_size_(0)\n{\n\t\n\t//copy constructor\n\tthis->max_size_ = array.max_size_;\n\tthis->cur_size_ = array.cur_size_;\n\t\n\t//Allocate the array\n\tthis->data_ = new T[this->max_size_];\n\t\n\t//for every index in array\n\tfor(int i = 0; i < this->max_size_; i++)\n\t{\n\t\t//pointer newVal holds data of next index\n\t\tT newVal = array.data_[i];\n\t\tthis->data_[i] = newVal;\n\t\t\t\t\n\t}\n\t\n}\n\n//\n// ~Array\n//\ntemplate <typename T>\nArray <T>::~Array (void)\n{\n\tdelete [] data_;\n}\n\n//\n// operator =\n//\ntemplate <typename T>\nconst Array <T> & Array <T>::operator = (const Array & rhs)\n{\n\t\n\tif (&rhs == this)\n\t{\n\t\treturn *this;\n\t}\n\t\n\t\tthis->data_ = new T [rhs.max_size_];\n\t\t\n\t\t//'this' max size holds size of object 'rhs'\n\t\tthis->max_size_ = rhs.max_size_;\n\t\t\n\t\t//for every index in array\n\t\tfor (int x = 0; x < this->max_size_; x++)\n\t\t{\n\t\t\t//thisVal holds the character of the index at 'this' array object\n\t\t\tT thisVal = rhs.data_[x];\n\t\t\n\t\t\t//this holds the character from the object of right hand size\n\t\t\tthis->data_[x] = thisVal;\n\t\t\t\t\n\t\t}\t\n\t\treturn *this;\n\t\n}\n\n//\n// operator []\n//\ntemplate <typename T>\nT & Array <T>::operator [] (size_t index)\n{\n\t\n\t//for every index in array\n\tfor (int i = 0; i < this->max_size_; i++)\n\t{\n\t\t//if index give is in array\n\t\tif(i==index)\n\t\t{\n\t\t\t//return character of index being pointed to\n\t\t\treturn this->data_[i];\n\t\t\t\n\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t}\n\tthrow std::out_of_range(\"Index not in array\");\n}\n\n//\n// operator [] \n//\ntemplate <typename T>\nconst T & Array <T>::operator [] (size_t index) const\n{\n\tfor (int i = 0; i < this->max_size_; i++)\n\t\t{\n\t\t\t//if index is in array\n\t\t\tif(i==index)\n\t\t\t{\n\t\t\t\t//return the value at index being pointed to.\n\t\t\t\treturn this->data_[i];\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t}\n\t\t\t\t\t\t\n\t\t}\n\t\t//if not out out of range exception\n\t\tthrow std::out_of_range(\"Index not in array\");\n}\n\n//\n// get\n//\ntemplate <typename T>\nT Array <T>::get (size_t index) const\n{\n\t//for every index in array\n\t\tfor (int i = 0; i < this->max_size_; i++)\n\t\t{\n\t\t\t//if index is in array\n\t\t\tif(i==index)\n\t\t\t{\n\t\t\t\t//return the character at index being pointed to.\n\t\t\t\treturn this->data_[i];\n\t\t\t\t\n\t\t\t}\n\t\t\t\t\t\n\t\t}\n\t\t// out of range exception\n\t\tthrow std::out_of_range(\"Index not in array in get method\");\n}\n\n//\n// set\n//\ntemplate <typename T>\nvoid Array <T>::set (size_t index, T value)\n{\n\t//if index is not valid then throw exception.\n\tif (index > this->max_size_ || index < 0 )\n\t{\n\t\tthrow std::out_of_range(\"Index not in array in set method\");\n\t\n\t}\n\t//for every index in array\n\tfor (int i = 0; i < this->max_size_; i++)\n\t{\t\t\n\t\t//if index is in array\n\t\tif (i == index)\n\t\t{\n\t\t\t//set character at index\n\t\t\tthis->data_[i] = value;\n\t\t\t//increment value of cur_size_\n\t\t\tthis->cur_size_++;\n\t\t\n\t\t}\n\t\t\n\t}\n\t\n}\n\n//\n// resize\n//\ntemplate <typename T>\nvoid Array <T>::resize (size_t new_size)\n{\n\t\n\t//if new_size is greater than cur_size_\n\tif (new_size > this->max_size_)\n\t{\n\t\t//then the array is made larger and the new elements are not initialized to anything.\n\t\tthis->max_size_ = new_size;\n\t\t\t\t\t\n\t}\n\t//if they are the same, nothing changes\n\t\n\t//if less than new size, throw exception\n\tif (new_size < this->max_size_)\n\t{\n\t\tthrow \"Resize can only make larger. Use shrink method. \";\n\t}\n\t\t\t\n}\n\n//\n// find (char)\n//\ntemplate <typename T>\nint Array <T>::find (T element) const\n{\n\t\n\t//for every index in array\n\tfor(int x = 0; x < this->max_size_;x++)\n\t{\n\t\t//if the contents of the index is equal to value, then return that index , if not in array, return -1\n\t\tif (this->data_[x] == element)\n\t\t{\n\t\t\treturn x;\n\t\t\t\n\t\t}\n\t\t\t\t\n\t}\n\t\n\treturn -1;\n\t\n}\n\n//\n// find (char, size_t) \n//\ntemplate <typename T>\nint Array <T>::find (T val, size_t start) const\n{\n\t//if start is out of index throw exception\n\tif (start >= this->max_size_ || start < 0)\n\t{\n\t\tthrow std::out_of_range(\"Start index is out of bounds. \");\n\t\t\n\t}\n\t\n\t//for indexes from start to end of array\n\tfor(int x = start; x < this->max_size_; x++)\n\t{\n\t\t//if value is at that index then return it\n\t\tif (this->data_[x] == val)\n\t\t{\n\t\t\treturn x;\n\t\t\t\n\t\t}\n\t\t\t\t\n\t}\n\t\n\treturn -1;\n}\n\n//\n// operator ==\n//\ntemplate <typename T>\nbool Array <T>::operator == (const Array & rhs) const\n{\n\t\n\t//Test the array for equality.\n\t\n\t//check for if objects are the same\n\tif (&rhs != this)\n\t{\n\t\treturn false;\n\t}\n\t\n\t//if contents are the same\n\tfor (int i = 0; i < this->max_size_;i++)\n\t{\n\t\tif (this->data_[i] != rhs.data_[i])\n\t\t{\n\t\t\t\n\t\t\treturn false;\n\t\t\t\n\t\t}\n\t}\n\t//if conditionals return false then the arrays are equal\n\treturn true;\n\t\n}\n\n//\n// operator !=\n//\ntemplate <typename T>\nbool Array <T>::operator != (const Array & rhs) const\n{\n\t\n\t//Test the array for inequality.\n\t\t\n\t//check for if the sizes are the same\n\tif (&rhs == this)\n\t{\n\t\treturn false;\n\t}\n\t\n\t//if contents are the same\n\tfor(int i = 0; i < this->max_size_; i++)\n\t{\n\t\tif (this->data_[i] == rhs.data_[i])\n\t\t{\n\t\t\t//return false\n\t\t\treturn false;\n\t\t\t\n\t\t}\n\t}\n\t\n\treturn true;\n\t\n}\n\n//\n// fill\n//\ntemplate <typename T>\nvoid Array <T>::fill (T value)\n{\n\n\t//Fill the contents of the array.\n\t\n\t//for every index in array\n\tfor(int x = 0; x < this->max_size_; x++)\n\t{\n\t\t//the index holds the value \n\t\tthis->data_[x] = value;\n\t\t\n\t}\n\t\n\tthis->cur_size_ = this->max_size_;\n}\n\ntemplate <typename T>\nvoid Array<T>::shrink()\n{\n\t//Shrink the array to reclaim unused space.\n\t\t\n\tif(this->cur_size_ < this->max_size_)\n\t{\n\t\tthis->max_size_ = this->cur_size_;\n\t}\n\t//if its the same then do nothing\n\t\n}\n" }, { "alpha_fraction": 0.618635356426239, "alphanum_fraction": 0.6236473321914673, "avg_line_length": 45.44973373413086, "blob_id": "1991aea71e21c71213e62a0aaaf9c95f31558df7", "content_id": "3237b3db42b9d6e456137a8c6a80865952f607cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8779, "license_type": "no_license", "max_line_length": 223, "num_lines": 189, "path": "/Year 4/csci487Group4Project-makingGraphs/CityStructure/City.py", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#City.py\n#Created 4/5/20 by Jasper Heist\n#File containing class definition of a city.\n\nfrom CityStructure.Intersection import Intersection, TrafficLight\nfrom Utilities.Definitions import Orientation, Colors, print_c, print_e, temp_format_clear\nfrom typing import Union, List\n\nclass City(object):\n \"\"\"City that contains a graph of intersections and connections\"\"\"\n\n def __init__(self, name):\n object.__init__(self)\n self.__name = name\n #single intersection in the graph so the city has reference to the whole thing. this is a list that must be inserted at thier uid. this is so we can look them up by uid (which we can store as an int in a connection)\n self.__intersections = dict()\n #intersection that we know we have. first one added to out town, since we cannot rely on the 0th intersection always being a part of our city\n self.__main_intersection = None\n\n self.__mapping_of_intersections = [[None, None, None], [None, None, None],[None, None, None]]\n\n def assign_connection_default(self, intersection, orientation_to_other_intersection:Orientation, other_intersection:Intersection = None):\n \"\"\"takes two takes two intersections and links them\n\n Parameters\n ----------\n intersection:Intersection\n main intersection we are binding to\n \n orientation_to_other_intersection:Orientation\n relation of main intersection to other intersection \n (if the main intersection is above the other intersection, this will be Orientation.Top)\n\n other_intersection:Intersection\n secondary intersection that we are linking with the main one\n\n \"\"\"\n #if int was passed in, get intersection\n if isinstance(intersection, int):\n intersection = self.__get_intersection_by_id(intersection)\n if(intersection == None):\n return\n #if int was passed in, get intersection\n if isinstance(other_intersection, int):\n other_intersection = self.__get_intersection_by_id(other_intersection)\n if(other_intersection == None):\n return\n\n #do not attach to intersection that already has connection here\n if not intersection.get_adjacent_intersection(orientation_to_other_intersection) == None:\n #if there is an intersection that already exists, just link the connections\n if intersection.get_adjacent_intersection(orientation_to_other_intersection) == self.__get_intersection_by_coordinates(intersection, orientation_to_other_intersection):\n intersection.link_intersection(other_intersection, orientation_to_other_intersection, True)\n other_intersection.link_intersection(intersection, orientation_to_other_intersection.other_side(), True)\n else:\n print_e(\"Intersection Already attached to {} side of intersection {}\".format(orientation_to_other_intersection.desc(), intersection.uid))\n return\n\n #if other intersection has not been supplied, default to a traffic light\n if other_intersection == None:\n other_intersection = TrafficLight()\n \n \n \n #first intersection assignment\n if(self.__main_intersection == None):\n self.__main_intersection = intersection\n\n if(intersection.coordinates == None):\n if(self.__main_intersection == intersection):\n intersection.coordinates = [0,0]\n else:\n print_e(\"{}Invalid. First Intersection argument must already exist in city OR be the first intersection (main intersection) you are adding to the city{}\".format(Colors.RED, Colors.RESET))\n return\n\n #add each of these to list of intersectionss\n if (not self.__intersections.__contains__(intersection)):\n self.__intersections[intersection.uid] = intersection\n if (not self.__intersections.__contains__(other_intersection)):\n self.__intersections[other_intersection.uid] = other_intersection\n\n current_coordinates = intersection.coordinates\n new_coordinates = [current_coordinates[0], current_coordinates[1]]\n\n #change coordinates based on orientation\n if (orientation_to_other_intersection == Orientation.Top):\n new_coordinates[0] -= 1\n elif (orientation_to_other_intersection == Orientation.Right):\n new_coordinates[1] += 1\n elif (orientation_to_other_intersection == Orientation.Bottom):\n new_coordinates[0] += 1\n elif (orientation_to_other_intersection == Orientation.Left):\n new_coordinates[1] -= 1\n\n intersection.link_intersection(other_intersection, orientation_to_other_intersection)\n\n return other_intersection.uid\n\n def __get_intersection_by_id(self, id):\n intersection = self.__intersections.get(id, None)\n if intersection == None:\n print_e(\"No intersection exists with supplied ID\")\n return intersection\n\n def __get_intersection_by_coordinates(self, intersection, orientation):\n new_coordinates = [intersection.coordinates[0], intersection.coordinates[1]]\n\n #change coordinates based on orientation\n if (orientation == Orientation.Top):\n new_coordinates[0] -= 1\n elif (orientation == Orientation.Right):\n new_coordinates[1] += 1\n elif (orientation == Orientation.Bottom):\n new_coordinates[0] += 1\n elif (orientation == Orientation.Left):\n new_coordinates[1] -= 1\n\n x = new_coordinates[0]\n y = new_coordinates[1]\n\n for intersection in self.__intersections.values():\n if (intersection.coordinates[0] == x) and (intersection.coordinates[1] == y):\n return intersection\n return None\n\n def print_info(self):\n print(\"City: \" + self.__name)\n print(\"+++++++++++++++++++++++++++++++\")\n for intersection in self.__intersections:\n intersection.print_info()\n\n def print_city_map(self):\n \"Assumes added in the order they are in main. No smart way of figuring out location based on coordinates yet, but this is on the way there.\"\n print_c(\"City: {}\".format(self.__name), Colors.WHITE)\n print_c(\"+++++++++++++++++++++++++++++++\", Colors.WHITE)\n \n westmost_index = 0\n rightmost_index = 0\n x_location_to_intersections = dict()\n for intersection in self.__intersections.values():\n x_coord = intersection.coordinates[0]\n y_coord = intersection.coordinates[1]\n if not x_location_to_intersections.__contains__(y_coord):\n x_location_to_intersections[y_coord] = list()\n\n x_location_to_intersections[y_coord].append(intersection)\n #check that y coordinate is not further left (need to know for printing)\n if x_coord < westmost_index:\n westmost_index = x_coord\n #gather bounds or right hand side\n if x_coord > rightmost_index:\n rightmost_index = x_coord\n\n #go through each row (sort the rows first)\n for row in sorted(x_location_to_intersections.keys()):\n sorted_row = sorted(x_location_to_intersections[row], key=lambda x: x.coordinates[0], reverse=False)\n self.__print_row_of_intersections(sorted_row, westmost_index)\n\n def __print_row_of_intersections(self, row_of_intersections, westmost_column):\n \"\"\"given a list of intersections to print, this prints the intersections next to each other in a row\"\"\"\n images = dict()\n last_index = len(row_of_intersections)\n #gather all ascii art of intersection\n for square in row_of_intersections:\n if isinstance(square, Intersection):\n images[square] = square.print_visual()\n \n #iterate and print them all together\n i = 0\n while i < 13:\n next_collumn = westmost_column\n line = \"\"\n collumn_index = 0\n while collumn_index < last_index:\n square = row_of_intersections[collumn_index]\n if square.coordinates[0] > next_collumn:\n line += self.__empty_square_line()\n else:\n line += images[square][i]\n collumn_index += 1\n\n next_collumn += 1\n print_c(line, Colors.WHITE)\n i += 1\n\n def __empty_square_line(self):\n \"\"\"returns an empty line for printing off empty spaces in the city\"\"\"\n #clear all formatting for empty field\n return temp_format_clear(\" \", Colors.WHITE)\n" }, { "alpha_fraction": 0.6215164065361023, "alphanum_fraction": 0.6221392154693604, "avg_line_length": 32.532257080078125, "blob_id": "a4e455c03d236ab70903ef77e5449bbd749f9a50", "content_id": "6bf6af6e89ff4cdbad5bc6e34aecff8a2dda573d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6423, "license_type": "no_license", "max_line_length": 98, "num_lines": 186, "path": "/Master Year 1/Computer Graphics/HW2/renderer/scene/Model.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.scene;\r\n\r\nimport java.util.List;\r\nimport java.util.ArrayList;\r\n\r\n/**\r\n A {@code Model} object represents a distinct geometric object in a\r\n {@link Scene}. A {@code Model} data structure is mainly a {@link List}\r\n of {@link Vertex} objects and another {@link List} of {@link LineSegment}\r\n objects.\r\n<p>\r\n The {@link Vertex} objects represents points from the geometric object\r\n that we are modeling. In the real world, a geometric object has an infinite\r\n number of points. In 3D graphics, we \"approximate\" a geometric object by\r\n listing just enough points to adequately describe the object. For example,\r\n in the real world, a rectangle contains an infinite number of points, but\r\n it can be adequately modeled by just its four corner points. (Think about\r\n a circle. How many points does it take to adequately model a circle? Look\r\n at the {@link renderer.models.Circle} model.)\r\n<p>\r\n Each {@link LineSegment} object contains two integers that are the indices\r\n of two {@link Vertex} objects from the {@code Model}'s vertex list. Each\r\n {@link Vertex} object contains the xyz-coordinates, in the camera coordinate\r\n system, for one of the line segment's two endpoints.\r\n<p>\r\n We use the {@link renderer.scene.LineSegment} objects to \"fill in\" some of\r\n the space between the model's vertices. For example, while a rectangle can\r\n be approximated by its four corner points, those same four points could also\r\n represent just two parallel line segments. By using four line segments that\r\n connect around the four points, we get a good representation of a rectangle.\r\n<p>\r\n If we modeled a circle using just points, we would probably need to draw\r\n hundreds of points. But if we connect every two adjacent points with a\r\n short line segment, we can get a good model of a circle with just a few\r\n dozen points.\r\n<p>\r\n Our {@code Model}'s represent geometric objects as a \"wire-frame\" of line\r\n segments, that is, a geometric object is drawn as a collection of \"edges\".\r\n This is a fairly simplistic way of doing 3D graphics and we will improve\r\n this in later renderers.\r\n<p>\r\n See\r\n<br> <a href=\"http://en.wikipedia.org/wiki/Wire-frame_model\" target=\"_top\">\r\n http://en.wikipedia.org/wiki/Wire-frame_model</a>\r\n<br>or\r\n<br> <a href=\"https://www.google.com/search?q=computer+graphics+wireframe&tbm=isch\" target=\"_top\">\r\n https://www.google.com/search?q=computer+graphics+wireframe&tbm=isch</a>\r\n*/\r\npublic class Model\r\n{\r\n public final List<Vertex> vertexList;\r\n public final List<LineSegment> lineSegmentList;\r\n\r\n public String name;\r\n public boolean visible;\r\n public boolean debug;\r\n\r\n /**\r\n Construct an empty {@code Model} object.\r\n */\r\n public Model()\r\n {\r\n vertexList = new ArrayList<>();\r\n lineSegmentList = new ArrayList<>();\r\n name = \"\";\r\n visible = true;\r\n debug = false;\r\n }\r\n\r\n\r\n /**\r\n Construct an empty {@code Model} object with the given name.\r\n\r\n @param name a {link String} that is a name for this {@code Model}\r\n */\r\n public Model(final String name)\r\n {\r\n this();\r\n this.name = name;\r\n }\r\n\r\n\r\n /**\r\n Construct a {@code Model} object with all the given data.\r\n\r\n @param vertexList a {@link Vertex} {link List} for this {@code Model}\r\n @param lineSegmentList a {@link LineSegment} {link List} for this {@code Model}\r\n @param name a {link String} that is a name for this {@code Model}\r\n @param visible a {@code boolean} that determines this {@code Model}'s visibility\r\n @param debug a {@code boolean} that turns debugging off/on for this {@code Model}\r\n */\r\n public Model(final List<Vertex> vertexList,\r\n final List<LineSegment> lineSegmentList,\r\n final String name,\r\n final boolean visible,\r\n final boolean debug)\r\n {\r\n this.vertexList = vertexList;\r\n this.lineSegmentList = lineSegmentList;\r\n this.name = name;\r\n this.visible = visible;\r\n this.debug = debug;\r\n }\r\n\r\n\r\n /**\r\n Add a {@link Vertex} (or vertices) to this {@code Model}'s\r\n {@link List} of vertices.\r\n\r\n @param vArray array of {@link Vertex} objects to add to this {@code Model}\r\n */\r\n public final void addVertex(final Vertex... vArray)\r\n {\r\n for (final Vertex v : vArray)\r\n {\r\n vertexList.add( v );\r\n }\r\n }\r\n\r\n\r\n /**\r\n Get a {@link LineSegment} from this {@code Model}'s\r\n {@link List} of line segments.\r\n\r\n @param index integer index of a {@link LineSegment} from this {@code Model}\r\n @return the {@link LineSegment} object at the given index\r\n */\r\n public final LineSegment getLineSegment(final int index)\r\n {\r\n return lineSegmentList.get(index);\r\n }\r\n\r\n\r\n /**\r\n Add a {@link LineSegment} (or LineSegments) to this {@code Model}'s\r\n {@link List} of line segments.\r\n <p>\r\n NOTE: This method does not add any vertices to the {@code Model}'s\r\n {@link Vertex} list. This method assumes that the appropriate vertices\r\n have been added to the {@code Model}'s {@link Vertex} list.\r\n\r\n @param lsArray array of {@link LineSegment} objects to add to this {@code Model}\r\n */\r\n public final void addLineSegment(final LineSegment... lsArray)\r\n {\r\n for (final LineSegment ls : lsArray)\r\n {\r\n lineSegmentList.add(ls);\r\n }\r\n }\r\n\r\n\r\n /**\r\n For debugging.\r\n\r\n @return {@link String} representation of this {@code Model} object\r\n */\r\n @Override\r\n public String toString()\r\n {\r\n String result = \"\";\r\n result += \"Model: \" + name + \"\\n\";\r\n result += \"This Model's visibility is: \" + visible + \"\\n\";\r\n result += \"Model has \" + vertexList.size() + \" vertices.\\n\";\r\n result += \"Model has \" + lineSegmentList.size() + \" line segments.\\n\";\r\n int i = 0;\r\n for (final Vertex v : this.vertexList)\r\n {\r\n result += i + \": \" + v.toString();\r\n ++i;\r\n }\r\n //result = \"Printing out this Model's \" + lineSegmentList.size() + \" Line segments:\\n\";\r\n i = 0;\r\n for (final LineSegment ls : this.lineSegmentList)\r\n {\r\n result += i + \": \" + ls.toString();\r\n ++i;\r\n }\r\n //result += \"Done printing out Model\\n\";\r\n return result;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.49322032928466797, "alphanum_fraction": 0.5076271295547485, "avg_line_length": 12.414633750915527, "blob_id": "ce37763b7c87c16df5835a0bb02e85057b71deb3", "content_id": "f0b53d551fb5d64aa01d17a0623a3fe0a575e9e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1180, "license_type": "no_license", "max_line_length": 64, "num_lines": 82, "path": "/Year 2/Project 3/MergeSort.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//MergeSort.cpp\r\n\r\n#include <iostream>\r\n#include \"MergeSort.h\"\r\n\r\n\r\n//default constructor\r\nMergeSort::MergeSort(){\r\n\t\r\n}\r\n\r\nMergeSort::~MergeSort(){\r\n}\r\n\r\nvoid MergeSort::merge(int array[], int left, int mid, int right)\r\n{\r\n\tint x;\r\n\tint y; \r\n\tint p;\r\n\t\r\n\tint a1 = mid - left +1;\r\n\tint a2 = right - mid;\r\n\tint Left[a1];\r\n\tint Right[a2];\r\n\t\r\n\t//copy to temporary arrays L[] and R[]\r\n\tfor(x = 0; x < a1; x++)\r\n\t\tLeft[x] = array[left + x];\r\n\tfor(y = 0; y < a2; y++)\r\n\t\tRight[y] = array[mid + left + y];\r\n\t\r\n\t// merge temporary arrays back into main array\r\n\tx = 0;\r\n\ty = 0;\r\n\tp = left;\r\n\t\r\n\twhile (x < a1 && y < a2)\r\n\t{\r\n\t\tif (Left[x] <= Right[y])\r\n\t\t{\r\n\t\t\tarray[p] = Left[x];\r\n\t\t\tx++;\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tarray[p] = Right[y];\r\n\t\t\ty++;\r\n\t\t}\r\n\t\tp++;\r\n\t}\r\n\t\r\n\twhile (x < a1)\r\n\t{\r\n\t\tarray[p] = Left[x];\r\n\t\tx++;\r\n\t\tp++;\r\n\t}\r\n\t\r\n\twhile(y < a2)\r\n\t{\r\n\t\tarray[p] = Right[y];\r\n\t\ty++;\r\n\t\tp++;\r\n\t}\r\n}\r\n\t\r\n\r\n//sort method for Insertion sorting\r\nvoid MergeSort::sort(int array[], int left, int right)\r\n{\r\n\tif (left < right)\r\n\t{\r\n\t\tint z = left+(right-left)/2;\r\n\t\t\r\n\t\tsort(array,left,z);\r\n\t\tsort(array,z+1,right);\r\n\t\t\r\n\t\t//call function to merge subarrays\r\n\t\tmerge(array,left,z,right);\r\n\t\t\r\n\t}\r\n}" }, { "alpha_fraction": 0.65727698802948, "alphanum_fraction": 0.65727698802948, "avg_line_length": 14.538461685180664, "blob_id": "4714cbfe12995475476783a5173f5ab0fc589b48", "content_id": "4c423fedc70b16074032c550bae54997a3a023b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 213, "license_type": "no_license", "max_line_length": 59, "num_lines": 13, "path": "/Year 3/Assignment 4/Expr_Node_Visitor.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n#include \"Expr_Node_Visitor.h\"\r\n\r\nExpr_Node_Visitor::~Expr_Node_Visitor(void)\r\n{\r\n\t//destructor\r\n}" }, { "alpha_fraction": 0.5928126573562622, "alphanum_fraction": 0.6044142842292786, "avg_line_length": 37.27777862548828, "blob_id": "702860d4d95272bcacb15d861beca1afab9ff153", "content_id": "b512f99509444653520e0f66aa289a392c638cbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7068, "license_type": "no_license", "max_line_length": 123, "num_lines": 180, "path": "/Year 2/Assignment #5/Driver.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\nimport java.io.*;\r\nimport java.util.Scanner;\r\npublic class Driver\r\n{\r\n\tpublic static void main (String[] args)\r\n\t{\r\n\t\t//arrays for each type of Employee\r\n\t\tStaffPharmacist managers [] = new StaffPharmacist[1];\r\n\t\tStaffTechnician seniors [] = new StaffTechnician[1];\r\n\t\tEmployee staffPhar [] = new Employee[1];\r\n\t\tEmployee staffTech [] = new Employee[1];\r\n\t\t\r\n\t\t//StaffPharmacist pharmacists \r\n\t\tSystem.out.println(\"1. Load Employees (From File)\" );\r\n\t\tSystem.out.println(\"2. Exit Program\");\r\n\t\tSystem.out.println();\r\n\t\tSystem.out.println( \"Enter your selection: \");\r\n\t\tScanner input = new Scanner(System.in);\r\n\t\tString decision = input.nextLine();\r\n\t\twhile(!decision.equals(\"2\")){\r\n\t\t\t//if decision equals 1, load players\r\n\t\t\tif(decision.equals(\"1\")){\r\n\t\t\t\t//load employees\r\n\t\t\t\ttry{\r\n\t\t\t\t\t// create buffered reader to read each line of students.txt\r\n\t\t\t\t\tBufferedReader buffer = new BufferedReader(new FileReader(\"employees.txt\"));\r\n\t\t\t\t\r\n\t\t\t\t\t//read second line in file\r\n\t\t\t\t\tbuffer.readLine();\r\n\t\t\t\t\tString line = buffer.readLine();\r\n\t\t\t\t\twhile (line != null)\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\t//break line into parts\r\n\t\t\t\t\t\tString[] part = line.split(\",\");\r\n\t\t\t\t\t\t//change employeeId in part[1] to int\r\n\t\t\t\t\t\tint employeeId = Integer.parseInt(part[1]);\r\n\t\t\t\t\t\t//If the first part of line(role id) equals its specified role, assign it to the type that corresponds with its role\r\n\t\t\t\t\t\tif (part[0].equals(\"1\")){\r\n\t\t\t\t\t\t\tStaffPharmacist employee1 = new PharmacyManager(employeeId,part[2],part[3]);\r\n\t\t\t\t\t\t\tmanagers[0] = employee1;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse if (part[0].equals(\"2\")){\r\n\t\t\t\t\t\t\tEmployee employee2 = new StaffPharmacist(employeeId,part[2],part[3]);\r\n\t\t\t\t\t\t\tstaffPhar[0] = employee2;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse if (part[0].equals(\"3\")){\r\n\t\t\t\t\t\t\tEmployee employee3 = new StaffTechnician(employeeId,part[2],part[3]);\r\n\t\t\t\t\t\t\tstaffTech[0] = employee3;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse if (part[0].equals(\"4\")){\r\n\t\t\t\t\t\t\tStaffTechnician employee4 = new SeniorTechnician(employeeId,part[2],part[3]);\r\n\t\t\t\t\t\t\tseniors[0] = employee4;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tline = buffer.readLine();\r\n\t\t\t\t\t}\r\n\t\t\t\t\t//close the buffer to stop reading the file\r\n\t\t\t\t\tbuffer.close();\r\n\t\t\t\t\tSystem.out.println(\"File Successfully Loaded! \");\r\n\t\t\t\t\tSystem.out.println();\r\n\t\t\t\t}\r\n\t\t\t\t//if file is not found, print IO Exception\r\n\t\t\t\tcatch(IOException e) {\r\n\t\t\t\t\tSystem.out.print(\"File was not found. \");\r\n\t\t\t\t}\r\n\t\t\t\t//Sub menu\r\n\t\t\t\tSystem.out.println(\"1. Print Employee Information \");\r\n\t\t\t\tSystem.out.println(\"2. Enter Hours Worked \");\r\n\t\t\t\tSystem.out.println(\"3. Calculate Paychecks \");\r\n\t\t\t\tSystem.out.println();\r\n\t\t\t\tSystem.out.println(\"4. Exit Program \");\r\n\t\t\t\tSystem.out.println(\"Enter Your Selection: \");\r\n\t\t\t\t//Ask for new input for sub menu\r\n\t\t\t\tScanner newInput = new Scanner(System.in);\r\n\t\t\t\tString subDecision = newInput.nextLine();\r\n\t\t\t\t//Ask for hours\r\n\t\t\t\tScanner hoursInput = new Scanner(System.in);\r\n\t\t\t\tint hours = 0;\r\n\t\t\t\t\r\n\t\t\t\t// while subDecision doesn't equal 4\r\n\t\t\t\twhile(!subDecision.equals(\"4\"))\r\n\t\t\t\t{\r\n\t\t\t\t\tif (subDecision.equals(\"1\")){\r\n\t\t\t\t\t\t// loop through arrays and print out the employees in each array\r\n\t\t\t\t\t\tfor (int i = 0; i < managers.length; i++) {\r\n\t\t\t\t\t\t\tSystem.out.println(managers[i].format());\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tfor (int a = 0; a <staffPhar.length; a++) {\r\n\t\t\t\t\t\t\tSystem.out.println(staffPhar[a].format());\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tfor (int b = 0; b < staffTech.length; b++) {\r\n\t\t\t\t\t\t\tSystem.out.println(staffTech[b].format());\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tfor (int c = 0; c < seniors.length; c++) {\r\n\t\t\t\t\t\t\tSystem.out.println(seniors[c].format());\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\t//Sub Menu\r\n\t\t\t\t\t\tSystem.out.println(\"1. Print Employee Information \");\r\n\t\t\t\t\t\tSystem.out.println(\"2. Enter Hours Worked \");\r\n\t\t\t\t\t\tSystem.out.println(\"3. Calculate Paychecks \");\r\n\t\t\t\t\t\tSystem.out.println();\r\n\t\t\t\t\t\tSystem.out.println(\"4. Exit Program \");\r\n\t\t\t\t\t\tSystem.out.println(\"Enter Your Selection: \");\r\n\t\t\t\t\t\t//ask for new input\r\n\t\t\t\t\t\tsubDecision = newInput.nextLine();\r\n\t\t\t\t\t}\r\n\t\t\t\t\t//ask user to enter hours worked for employees\r\n\t\t\t\t\telse if (subDecision.equals(\"2\")){\r\n\t\t\t\t\t\tSystem.out.println(\"Enter Hours for Employees\");\r\n\t\t\t\t\t\thours = hoursInput.nextInt();\r\n\t\t\t\t\t\tSystem.out.println(\"Hours Entered! \");\r\n\t\t\t\t\t\t//new input\r\n\t\t\t\t\t\tSystem.out.println(\"1. Print Employee Information \");\r\n\t\t\t\t\t\tSystem.out.println(\"2. Enter Hours Worked \");\r\n\t\t\t\t\t\tSystem.out.println(\"3. Calculate Paychecks \");\r\n\t\t\t\t\t\tSystem.out.println();\r\n\t\t\t\t\t\tSystem.out.println(\"4. Exit Program \");\r\n\t\t\t\t\t\tSystem.out.println(\"Enter Your Selection: \");\r\n\t\t\t\t\t\tsubDecision = newInput.nextLine();\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t}\r\n\t\t\t\t\t//calculate the employees paycheck based on how much they worked\r\n\t\t\t\t\telse if (subDecision.equals(\"3\")){\r\n\t\t\t\t\t\t//if hours is not a valid number, print error statement and ask to user to put a new decision in\r\n\t\t\t\t\t\tif (hours <= 0){\r\n\t\t\t\t\t\t\tSystem.out.println(\"Hours needs to be entered and must be greater than 0. \");\r\n\t\t\t\t\t\t\tSystem.out.println(\"1. Print Employee Information \");\r\n\t\t\t\t\t\t\tSystem.out.println(\"2. Enter Hours Worked \");\r\n\t\t\t\t\t\t\tSystem.out.println(\"3. Calculate Paychecks \");\r\n\t\t\t\t\t\t\tSystem.out.println();\r\n\t\t\t\t\t\t\tSystem.out.println(\"4. Exit Program \");\r\n\t\t\t\t\t\t\tSystem.out.println(\"Enter Your Selection: \");\r\n\t\t\t\t\t\t\tsubDecision = newInput.nextLine();\r\n\t\t\t\t\t\t//if hours is valid, print out employees paychecks\r\n\t\t\t\t\t\t}else{\r\n\t\t\t\t\t\t\t//loop through arrays to print out all of the employees paychecks\r\n\t\t\t\t\t\t\tfor(int x = 0; x < managers.length; x++){\r\n\t\t\t\t\t\t\t\tSystem.out.println(\"ID: \" + managers[x].getId() + \"\\t\" + \"Check Amount: \" + managers[x].getHourlyRate() * hours);\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\tfor(int y = 0; y < seniors.length; y++){\r\n\t\t\t\t\t\t\t\tSystem.out.println(\"ID: \" + seniors[y].getId() + \"\\t\" + \"Check Amount: \" + seniors[y].getHourlyRate() * hours);\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\tfor(int z = 0; z < staffPhar.length; z++){\r\n\t\t\t\t\t\t\t\tSystem.out.println(\"ID: \" + staffPhar[z].getId() + \"\\t\" + \"Check Amount: \" + staffPhar[z].getHourlyRate() * hours);\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\tfor(int a = 0; a < staffTech.length; a++){\r\n\t\t\t\t\t\t\t\tSystem.out.println(\"ID: \" + staffTech[a].getId() + \"\\t\" + \"Check Amount: \" + staffTech[a].getHourlyRate() * hours);\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\tSystem.out.println(\"1. Print Employee Information \");\r\n\t\t\t\t\t\t\tSystem.out.println(\"2. Enter Hours Worked \");\r\n\t\t\t\t\t\t\tSystem.out.println(\"3. Calculate Paychecks \");\r\n\t\t\t\t\t\t\tSystem.out.println();\r\n\t\t\t\t\t\t\tSystem.out.println(\"4. Exit Program \");\r\n\t\t\t\t\t\t\tSystem.out.println(\"Enter Your Selection: \");\r\n\t\t\t\t\t\t\tsubDecision = newInput.nextLine();\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\t// if input is not 1,2,3, or 4, print error statement and ask for new input\r\n\t\t\t\t\t}else{\r\n\t\t\t\t\tSystem.out.println(\"Wrong input, enter correct option: \");\r\n\t\t\t\t\tsubDecision = newInput.nextLine();\t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t\t//When sub menu breaks loop, break main loop to end program\r\n\t\t\t\tdecision = \"2\";\r\n\t\t\t//if decision is not 1 or 2, print error statement and ask for new decision\t\r\n\t\t\t}else{\r\n\t\t\t\t\tSystem.out.println(\"Wrong input, enter correct option: \");\r\n\t\t\t\t\tdecision = input.nextLine();\r\n\t\t\t}\r\n\t\t}\t\r\n\t}\r\n}" }, { "alpha_fraction": 0.6019108295440674, "alphanum_fraction": 0.6050955653190613, "avg_line_length": 13.699999809265137, "blob_id": "4304de5f8c874c9b8afcb8f825c849096b82b070", "content_id": "f53e81a3c007a795965b760a737658c9526f73d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 314, "license_type": "no_license", "max_line_length": 42, "num_lines": 20, "path": "/Year 2/Assignment #6/Sort.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n#ifndef SORT_H\r\n#define SORT_H\r\nclass Sort\r\n{\r\n\tpublic: \r\n\t\t//pure virtual sort method\r\n\t\tvirtual void sort(int*, int)= 0;\r\n\t\t//destructor\r\n\t\tvirtual ~Sort(){}\r\n};\r\n\r\n#endif//SORT_H\r\n" }, { "alpha_fraction": 0.6779661178588867, "alphanum_fraction": 0.6859946250915527, "avg_line_length": 24.714284896850586, "blob_id": "c2607f57ed99944d73de57d9d7ca8cdfbd206bc8", "content_id": "093ae02b221a74bd42dccdca05d06ea5408a69b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 70, "num_lines": 42, "path": "/Year 3/Assignment3/Expr_Command_Factory.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor receieved any help\r\n// on this assignment.\r\n\r\n#include \"Add_Command.h\"\r\n#include \"Subtract_Command.h\"\r\n#include \"Multiply_Command.h\"\r\n#include \"Division_Command.h\"\r\n#include \"Modulus_Command.h\"\r\n#include \"Num_Command.h\"\r\n#include \"Parenthesis_Command.h\"\r\n\r\n#ifndef _EXPR_COMMAND_FACOTRY_H\r\n#define _EXPR_COMMAND_FACOTRY_H\r\n\r\nclass Expr_Command_Factory\r\n{\r\n\tpublic:\r\n\t\r\n\t\tvirtual ~Expr_Command_Factory (void) = 0;\r\n\t\t\r\n\t\t//returns top of stack which holds the result of the operation\r\n\t\tvirtual int answer(void) = 0;\r\n\t\t\r\n\t\t//create the commands needed to perform operations\r\n\t\tvirtual Num_Command * create_num_command (int num) = 0;\r\n\t\t\r\n\t\tvirtual Add_Command * create_add_command (void) = 0;\r\n\t\t\r\n\t\tvirtual Subtract_Command * create_subtract_command (void) = 0;\r\n\t\t\r\n\t\tvirtual Multiply_Command * create_multiply_command (void) = 0;\r\n\t\t\r\n\t\tvirtual Division_Command * create_division_command (void) = 0;\r\n\t\t\r\n\t\tvirtual Modulus_Command * create_modulus_command (void) = 0;\r\n\t\t\r\n\t\tvirtual Parenthesis_Command * create_parenthesis_command (void) = 0;\r\n\t\t\r\n};\r\n#endif " }, { "alpha_fraction": 0.5720587968826294, "alphanum_fraction": 0.595588207244873, "avg_line_length": 13.166666984558105, "blob_id": "1c9f2e16a9f9f91bbe48beb378505b38c9bb4693", "content_id": "5464568f96627500fd6efb9b38b6c11733d32d80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 680, "license_type": "no_license", "max_line_length": 72, "num_lines": 48, "path": "/Year 3/composition-source/driver.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include \"Array.h\"\n#include \"Fixed_Array.h\"\n#include \"Stack.h\"\n#include \"Queue.h\"\n\n\nint main (int argc, char * argv [])\n{\n/******REMEMBER DELETE STAMENETS IN SOURCE FILE USED FOR TESTING*******/\n\n//ARRAY TESTING\n\tArray <int> array(10,5);\n\tarray.size();\n\tarray.max_size();\n\t//array.find(1);\n\t//array[3];\n\tarray.resize(20);\n\t//array.get(2);\n\tarray.set(19,3);\n \t\n \n/*\n//FIXED_ARRAY TESTING\n\tFixed_Array <int,10> newArray;\n\tnewArray.size();\n\tnewArray.max_size();\n\t\n\t\n\t\t\n//STACK TESTING\n\t\n\tStack <int> s; \n\ts.push(1);\n\tStack <int> z (s);\n\ts.size();\n\ts.pop();\n\t\n//Queue Testing\n\t\n\tQueue<int> q;\n\tq.enqueue(2);\t\n\tQueue<int> m(q);\n\tq.size();\n\tq.dequeue();\n\tq.clear();\n */\n return 0;\n}\n" }, { "alpha_fraction": 0.6224066615104675, "alphanum_fraction": 0.6244813203811646, "avg_line_length": 14, "blob_id": "a1c2f54ce2c2243ba8aecb48754a0d14a7c01716", "content_id": "eba5442405fe414ba8c99e30407a04777d9c1e2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 482, "license_type": "no_license", "max_line_length": 59, "num_lines": 30, "path": "/Year 3/Assignment 4/Num_Expr_Node.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n#include \"Num_Expr_Node.h\"\r\nNum_Expr_Node::Num_Expr_Node(int n):\r\nnum(0)\r\n{\r\n\t//constructor\r\n\tnum = n;\r\n}\r\nNum_Expr_Node::~Num_Expr_Node(void)\r\n{\r\n\t//constructor\r\n}\r\n\r\nint Num_Expr_Node::eval(void)\r\n{\r\n\t//return the number passed in the parameter\r\n\treturn this->num;\r\n\t\r\n}\r\n\r\nvoid Num_Expr_Node::accept (Expr_Node_Visitor & v)\r\n{\r\n\tv.Visit_Number_Node (*this);\r\n}\r\n\r\n" }, { "alpha_fraction": 0.7227272987365723, "alphanum_fraction": 0.7227272987365723, "avg_line_length": 17.33333396911621, "blob_id": "550df58b4560ab1c73ffb691899e29c18611d851", "content_id": "d6ece4060652f0172776c742aa3f69a4bfda1a30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 220, "license_type": "no_license", "max_line_length": 49, "num_lines": 12, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/masking.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#ifndef MASKING_H\n#define MASKING_H\n\n#include <vector>\n#include <string>\n\nint scorePattern(int**, int);\nint getLength(int);\nstd::vector<int> getBinary(int, int);\nvoid genMaskingPatterns(int**, int, std::string);\n\n#endif\n" }, { "alpha_fraction": 0.6734397411346436, "alphanum_fraction": 0.6734397411346436, "avg_line_length": 19.59375, "blob_id": "978d74cc0a7add1e4b067c8d6edd5717373c3095", "content_id": "6749b0cf50ed268dd6f06b6dfa83f8a4119092b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 689, "license_type": "no_license", "max_line_length": 139, "num_lines": 32, "path": "/Year 3/Assignment3/Calculator.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//Calculator Class\r\n\r\n//header\r\n#include \"Stack_Expr_Command_Factory.h\"\r\n#include \"Expr_Command.h\"\r\n#include <cstring>\r\n#include <iostream>\r\n#include <sstream>\r\n\r\n#ifndef _CALCULATOR_H\r\n#define _CALCULATOR_H\r\n\r\nclass Calculator \r\n{\r\n\tpublic:\r\n\t\tCalculator (void);\r\n\t\t~Calculator (void);\r\n\t\r\n\t//converts postfix to infix.\r\n\t\tvoid infix_to_postfix(const std::string & infix, Expr_Command_Factory & factory, Array <Expr_Command *> & postfix, Stack <int> & result);\r\n\t\r\n\t//evaluate postfix\r\n\t\tvoid postfix_eval(Array <Expr_Command *> & postfix,Expr_Command_Factory & fact);\r\n\t\r\n};\r\n\r\n#endif" }, { "alpha_fraction": 0.6148282289505005, "alphanum_fraction": 0.623869776725769, "avg_line_length": 17, "blob_id": "f8a309eb554bcc903a66d243bb1d147b4dbc1838", "content_id": "ff99191c60e8e4945851d121b1657ff186fce645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 553, "license_type": "no_license", "max_line_length": 59, "num_lines": 29, "path": "/Year 3/Assignment3/Binary_Op_Command.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Binary_Op_Command.h\"\r\n\r\nBinary_Op_Command::Binary_Op_Command (Stack <int > & s):\r\ns_(s)\r\n{\r\n\t//constructor\r\n}\r\n\r\nBinary_Op_Command::~Binary_Op_Command(void)\r\n{\r\n\t//destructor\r\n}\r\n\r\nvoid Binary_Op_Command::execute (void)\r\n{\r\n\t//pop 2 values from stack\r\n\tint n2 = s_.top();\r\n\ts_.pop();\r\n\tint n1 = s_.top();\r\n\t\r\n\t//evaluate the result based on opeartor and push to stack\r\n\tint result = this->evaluate (n1,n2);\r\n\ts_.push (result);\r\n\t\r\n}\r\n\r\n" }, { "alpha_fraction": 0.5559907555580139, "alphanum_fraction": 0.5596774220466614, "avg_line_length": 24.62576675415039, "blob_id": "7db3917b747739e4092e43f9fe47640e0788b35a", "content_id": "4dd162239b13a73a6881a59e385da16da48176b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4340, "license_type": "no_license", "max_line_length": 80, "num_lines": 163, "path": "/Master Year 1/Computer Graphics/HW4/renderer/scene/ModelShading.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.scene;\r\n\r\nimport java.util.List;\r\nimport java.util.ArrayList;\r\nimport java.awt.Color;\r\nimport java.util.Random;\r\n\r\n/**\r\n This is a library of static methods that\r\n add color shading to a {@link Model}.\r\n*/\r\npublic class ModelShading\r\n{\r\n /**\r\n Set each {@link Color} in the {@link Model}'s color list\r\n to the same {@link Color}.\r\n\r\n @param model {@link Model} whose color list is being manipulated\r\n @param c {@link Color} for all of this model's {@link Vertex} objects\r\n */\r\n public static void setColor(final Model model, final Color c)\r\n {\r\n if (model.colorList.isEmpty())\r\n {\r\n for (int i = 0; i < model.vertexList.size(); ++i)\r\n {\r\n model.colorList.add(c);\r\n }\r\n }\r\n else\r\n {\r\n for (int i = 0; i < model.colorList.size(); ++i)\r\n {\r\n model.colorList.set(i, c);\r\n }\r\n }\r\n }\r\n\r\n\r\n /**\r\n Set each {@link Color} in the {@link Model}'s color list\r\n to the same random {@link Color}.\r\n\r\n @param model {@link Model} whose color list is being manipulated\r\n */\r\n public static void setRandomColor(final Model model)\r\n {\r\n setColor(model, randomColor());\r\n }\r\n\r\n\r\n /**\r\n Set each {@link Color} in the {@link Model}'s color list\r\n to a different random {@link Color}.\r\n\r\n @param model {@link Model} whose color list is being manipulated\r\n */\r\n public static void setRandomColors(final Model model)\r\n {\r\n if (model.colorList.isEmpty())\r\n {\r\n setRandomVertexColors(model);\r\n }\r\n else\r\n {\r\n for (int i = 0; i < model.colorList.size(); ++i)\r\n {\r\n model.colorList.set(i, randomColor());\r\n }\r\n }\r\n }\r\n\r\n\r\n /**\r\n Set each {@link Vertex} in the {@link Model}\r\n to a different random {@link Color}.\r\n <p>\r\n NOTE: This will destroy whatever \"color structure\"\r\n the model might possess.\r\n\r\n @param model {@link Model} whose color list is being manipulated\r\n */\r\n public static void setRandomVertexColors(final Model model)\r\n {\r\n model.colorList = new ArrayList<Color>();\r\n for (int i = 0; i < model.vertexList.size(); ++i)\r\n {\r\n model.colorList.add( randomColor() );\r\n }\r\n for (final LineSegment ls : model.lineSegmentList)\r\n {\r\n ls.cIndex[0] = ls.vIndex[0];\r\n ls.cIndex[1] = ls.vIndex[1];\r\n }\r\n }\r\n\r\n\r\n /**\r\n Set each {@link LineSegment} in the {@link Model}\r\n to a different (uniform) random {@link Color}.\r\n <p>\r\n NOTE: This will destroy whatever \"color structure\"\r\n the model might possess.\r\n\r\n @param model {@link Model} whose color list is being manipulated\r\n */\r\n public static void setRandomLineSegmentColors(final Model model)\r\n {\r\n model.colorList = new ArrayList<>();\r\n int index = 0;\r\n for (final LineSegment ls : model.lineSegmentList)\r\n {\r\n model.colorList.add( randomColor() );\r\n ls.cIndex[0] = index;\r\n ls.cIndex[1] = index;\r\n ++index;\r\n }\r\n }\r\n\r\n\r\n /**\r\n Set each {@link LineSegment} in the {@link Model}\r\n to a different random {@link Color} at each endpoint.\r\n <p>\r\n NOTE: This will destroy whatever \"color structure\"\r\n the model might possess.\r\n\r\n @param model {@link Model} whose color list is being manipulated\r\n */\r\n public static void setRainbowLineSegmentColors(final Model model)\r\n {\r\n model.colorList = new ArrayList<>();\r\n int index = 0;\r\n for (final LineSegment ls : model.lineSegmentList)\r\n {\r\n model.colorList.add( randomColor() );\r\n model.colorList.add( randomColor() );\r\n ls.cIndex[0] = index;\r\n ls.cIndex[1] = index + 1;\r\n index += 2;\r\n }\r\n }\r\n\r\n\r\n /**\r\n Create a {@link Color} object with randomly generated {@code r},\r\n {@code g}, and {@code b} values.\r\n\r\n @return a reference to a randomly generated {@link Color} object\r\n */\r\n public static Color randomColor()\r\n {\r\n final Random generator = new Random();\r\n final float r = generator.nextFloat();\r\n final float g = generator.nextFloat();\r\n final float b = generator.nextFloat();\r\n return new Color(r, g, b);\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5698729753494263, "alphanum_fraction": 0.601814866065979, "avg_line_length": 24.747663497924805, "blob_id": "310ce4c88f32b62560f3504f160bd42894dbb031", "content_id": "5abed12b8a66df784ab180933458aebad362a079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2755, "license_type": "no_license", "max_line_length": 124, "num_lines": 107, "path": "/Year 4/csci487Group4Project-makingGraphs/Utilities/Definitions.py", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#Definitions.py\n#Created 4/5/20 by Jasper Heist\n#Contains constants and other definitions needed for our program.\n\nfrom enum import Enum, IntEnum\nimport sys\n\n#average length of car used as default (https://mechanicbase.com/cars/average-car-length/)\nAverageCarLength = 14.5\n#average length of semi varies, we will call it 80 (https://www.quora.com/What-is-the-length-of-a-normal-semi-truck?share=1)\nAverageSemiLength = 80\n\nclass Colors(Enum):\n '''enumeration that can supply color info for output'''\n\n RESET = 0\n GREEN = 1\n YELLOW = 2\n PURPLE = 3\n CYAN = 4\n RED = 5\n WHITE = 6\n\n def color(self):\n if self == Colors.GREEN:\n return \"\\033[1;32m\"\n elif self == Colors.YELLOW:\n return \"\\033[1;33m\"\n elif self == Colors.PURPLE:\n return \"\\033[1;35m\"\n elif self == Colors.CYAN:\n return \"\\033[1;36m\"\n elif self == Colors.RED:\n return \"\\033[1;37;41m\"\n elif self == Colors.WHITE:\n return \"\\033[0;30;47m\"\n else:\n return \"\\033[0m\"\n\ndef print_c(string:str, c:Colors):\n \"\"\"prints supplied string in specified color\"\"\"\n if sys.platform == \"win32\":\n print(string)\n return\n print(\"{}{}{}\".format(c.color(), string, c.RESET.color()))\n\ndef print_e(string:str):\n \"\"\"prints specified string in red Error format\"\"\"\n print_c(string, Colors.RED)\n\ndef temp_format_clear(string, c:Colors):\n if sys.platform == \"win32\":\n return string\n else:\n return \"{}{}{}\".format(Colors.RESET.color(), string, c.color())\n\n\n\n#enumeration for types of intersection\nclass IntersectionType(Enum):\n TwoWayStop=1\n FourWayStop=2\n TrafficLight=3\n\n#defines all possible directions for a road\nclass Direction(Enum):\n NorthboundOut = 0\n NorthboundIn = 1\n EastboundOut = 2\n EastboundIn = 3\n SouthboundOut = 4\n SouthboundIn = 5\n WestboundOut = 6\n WestboundIn = 7\n \nclass Orientation(IntEnum):\n Top = 0\n Right = 1\n Bottom = 2\n Left = 3\n\n\n def other_side(self):\n \"\"\"performs math to get the opposite side of an intersection. \n It is useful when going across an intersection (straight) and connecting two intersections\n \"\"\"\n return (self+2)%4\n \n\n def to_the_right(self):\n \"\"\"return enum as if turning to the right\"\"\"\n return (self-1)%4\n\n\n def to_the_left(self):\n \"\"\"return enum as if turning left\"\"\"\n return (self+1)%4\n\n def desc(self):\n if self == Orientation.Top:\n return \"TOP\"\n elif self == Orientation.Right:\n return \"RIGHT\"\n elif self == Orientation.Bottom:\n return \"BOTTOM\"\n elif self == Orientation.Left:\n return \"LEFT\"\n" }, { "alpha_fraction": 0.6104294657707214, "alphanum_fraction": 0.6196318864822388, "avg_line_length": 16.22222137451172, "blob_id": "ecf32d17a853e879dccaafd792b9b57504ae3113", "content_id": "95468b3a06ee6d7f7e4c6b8a023c56aa989b1976", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 326, "license_type": "no_license", "max_line_length": 36, "num_lines": 18, "path": "/Year 2/Project 1/Stack.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#ifndef STACK_H_\r\n#define STACK_H_\r\n\r\nclass Stack {\r\n\tint topElement;\r\npublic:\r\n\t//size of stack\r\n\tint stack[64];\r\n\r\n\tStack() { topElement = -1; }\r\n\t//push method(add to the stack)\r\n\tbool push(int p[]);\r\n\t//pop method(remove from the stack)\r\n\tint pop();\r\n\t//check if the stack is empty\r\n\tbool isEmpty();\r\n}; \r\n#endif //STACK_H" }, { "alpha_fraction": 0.5403566360473633, "alphanum_fraction": 0.553261399269104, "avg_line_length": 31.55905532836914, "blob_id": "6de2e4a3e20bdde3c4ad6a0c53b10610722ab7c1", "content_id": "54ac87a0c3ac5dad65a87c357d7ebb1c251af6d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4262, "license_type": "no_license", "max_line_length": 87, "num_lines": 127, "path": "/Master Year 1/Computer Graphics/HW3/renderer/models/ConeFrustum.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create a wireframe model of a frustum of a right circular cone\r\n with its base in the xz-plane.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Frustum\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Frustum</a>\r\n\r\n @see Cone\r\n @see ConeSector\r\n*/\r\npublic class ConeFrustum extends Model\r\n{\r\n /**\r\n Create a frustum of a right circular cone with its base in the\r\n xz-plane, a base radius of 1, top radius of 1/2, and height 1/2.\r\n */\r\n public ConeFrustum( )\r\n {\r\n this(1.0, 0.5, 0.5, 7, 16);\r\n }\r\n\r\n\r\n /**\r\n Create a frustum of a right circular cone with its base in the\r\n xz-plane, a base radius of {@code r}, top of the frustum at\r\n height {@code h}, and with the cone's apex on the y-axis at\r\n height {@code a}.\r\n <p>\r\n There must be at least three lines of longitude and at least\r\n two circles of latitude.\r\n\r\n @param n number of circles of latitude\r\n @param k number of lines of longitude\r\n @param r radius of the base in the xz-plane\r\n @param h height of the frustum\r\n @param a height of the apex of the cone\r\n */\r\n public ConeFrustum(final int n, final int k,\r\n final double r, final double h, final double a)\r\n {\r\n this(r, (1 - h/a)*r, h, n, k);\r\n }\r\n\r\n\r\n /**\r\n Create a frustum of a right circular cone with its base in the\r\n xz-plane, a base radius of {@code r1}, top radius of {@code r2},\r\n and height {@code h}.\r\n <p>\r\n This model works with either {@code r1 > r2} or {@code r1 < r2}.\r\n In other words, the frustum can have its \"apex\" either above or\r\n below the xz-plane.\r\n <p>\r\n There must be at least three lines of longitude and at least\r\n two circles of latitude.\r\n\r\n @param r1 radius of the base of the frustum\r\n @param h height of the frustum\r\n @param r2 radius of the top of the frustum\r\n @param n number of circles of latitude\r\n @param k number of lines of longitude\r\n */\r\n public ConeFrustum(final double r1, final double h, final double r2,\r\n int n, int k)\r\n {\r\n super(\"Cone Frustum\");\r\n\r\n if (n < 2) n = 2;\r\n if (k < 3) k = 3;\r\n\r\n // Create the frustum's geometry.\r\n\r\n final double deltaTheta = (2 * Math.PI) / k;\r\n\r\n // An array of indexes to be used to create line segments.\r\n final int[][] indexes = new int[n][k];\r\n\r\n // Create all the vertices.\r\n int index = 0;\r\n for (int j = 0; j < k; ++j) // choose an angle of longitude\r\n {\r\n double c = Math.cos(j * deltaTheta);\r\n double s = Math.sin(j * deltaTheta);\r\n for (int i = 0; i < n; ++i) // choose a circle of latitude\r\n {\r\n double slantRadius = (i/(double)(n-1)) * r1 + ((n-1-i)/(double)(n-1)) * r2;\r\n addVertex( new Vertex(slantRadius * c,\r\n h - (i*h)/(n-1),\r\n slantRadius * s) );\r\n indexes[i][j] = index++;\r\n }\r\n }\r\n addVertex( new Vertex(0, h, 0) ); // top center\r\n final int topCenterIndex = index++;\r\n addVertex( new Vertex(0, 0, 0) ); // bottom center\r\n final int bottomCenterIndex = index++;\r\n\r\n // Create all the horizontal circles of latitude around the frustum wall.\r\n for (int i = 0; i < n; ++i)\r\n {\r\n for (int j = 0; j < k-1; ++j)\r\n {\r\n addLineSegment(new LineSegment(indexes[i][j], indexes[i][j+1]));\r\n }\r\n // close the circle\r\n addLineSegment(new LineSegment(indexes[i][k-1], indexes[i][0]));\r\n }\r\n\r\n // Create the vertical half-trapazoids of longitude from north to south pole.\r\n for (int j = 0; j < k; ++j)\r\n {\r\n // Create the triangle fan at the top.\r\n addLineSegment(new LineSegment(topCenterIndex, indexes[0][j]));\r\n // Create the slant lines from the top to the base.\r\n addLineSegment(new LineSegment(indexes[0][j], indexes[n-1][j]));\r\n // Create the triangle fan at the base.\r\n addLineSegment(new LineSegment(indexes[n-1][j], bottomCenterIndex));\r\n }\r\n }\r\n}//ConeFrustum\r\n" }, { "alpha_fraction": 0.5975550413131714, "alphanum_fraction": 0.6066014766693115, "avg_line_length": 16.782608032226562, "blob_id": "625cf7c8f6bde101aa3a390a4a8f60a25b1a5fe7", "content_id": "ff72062e82f7c7eaab1380c684b2d3a7f3c1e432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4090, "license_type": "no_license", "max_line_length": 103, "num_lines": 230, "path": "/Year 3/composition-source/Fixed_Array.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// $Id: Fixed_Array.cpp 827 2011-02-07 14:20:53Z hillj $\n\n// Honor Pledge:\n//\n// I pledge that I have neither given nor received any help\n// on this assignment.\n\n//\n// Fixed_Array\n//\n#include \"Fixed_Array.h\"\n\n\n\ntemplate <typename T, size_t N>\nFixed_Array <T, N>::Fixed_Array (void):\ndata_(nullptr),\ncur_size_(0),\nmax_size_(0)\n{\n\t//members initialized in base class\n\t\n\t//default constructor\n\tthis->cur_size_ = 0;\n\tthis->max_size_ = N;\n\t\n\tthis->data_ = new T [this->max_size_];\n\t\n}\n\n//\n// Fixed_Array\n//\ntemplate <typename T, size_t N>\nFixed_Array <T, N>::Fixed_Array (const Fixed_Array <T, N> & arr):\ndata_(nullptr),\ncur_size_(0),\nmax_size_(0)\n{\n\t//members initialized in base class\n\t\n\t//copy constructor\n\t//need to understand what N is.\n\tthis->max_size_ = arr.max_size_;\n\tthis->cur_size_ = arr.cur_size_;\n\t\n\tthis->data_ = new T[this->max_size_];\n\t\n\t//for every index in array\n\tfor(int i = 0; i < this->max_size_; i++)\n\t{\n\t\t//newVal from arr object is stored in data.\n\t\tT newVal = arr.data_[i];\n\t\tthis->data_[i] = newVal;\n\t\t\t\t\n\t}\n\t\n}\n\n\n//\n// Fixed_Array\n//\ntemplate <typename T, size_t N>\nFixed_Array <T, N>::Fixed_Array (T fill):\ndata_(nullptr),\ncur_size_(0),\nmax_size_(0)\n{\n\t//members initialized in base class\n\t\n\tthis->cur_size_ = N;\n\tthis->max_size_ = N;\n\t\n\tthis->data_ = new T[this->max_size_];\n\t\n\t//Fill the contents of the array.\n\t\t\n\t//for every index in array\n\tfor(int x = 0; x < this->max_size_; x++)\n\t{\n\t\t//the index holds the fill\n\t\tthis->data_[x] = fill;\n\t\t\n\t}\n\t\n}\n\n//\n// ~Fixed_Array\n//\ntemplate <typename T, size_t N>\nFixed_Array <T, N>::~Fixed_Array (void)\n{\n\t//destructor called in base class\n\tdelete [] data_;\n}\n\n//\n// operator =\n//\ntemplate <typename T, size_t N>\nconst Fixed_Array <T, N> & Fixed_Array <T, N>::operator = (const Fixed_Array <T, N> & rhs)\n{\n\t//self assignment check if its same object\n\tif (&rhs == this)\n\t{\n\t\treturn *this;\n\t}\n\t\n\t//else set members of this object rhs's members\n\tthis->data_ = new T[N];\n\t\n\tthis->max_size_ = rhs.max_size_;\n\tthis->cur_size_ = rhs.cur_size_;\n\t\n\t//for every index in array\n\tfor (int x = 0; x < N; x++)\n\t{\n\t\t//thisVal from rhs object is stored in this object\n\t\tT thisVal = rhs.data_[x];\n\t\tthis->data_[x] = thisVal;\n\t\t\t\n\t}\t\n\t//return this object\n\treturn *this;\n\t\n}\n\ntemplate <typename T, size_t N>\nT Fixed_Array<T,N>::get(size_t index) const\n{\n\t//for every index in array\n\tfor (int i = 0; i < this->max_size_; i++)\n\t{\n\t\t//if index is in array\n\t\tif(i==index)\n\t\t{\n\t\t\t//return the character at index being pointed to.\n\t\t\treturn this->data_[i];\n\t\t\t\n\t\t}\n\t\t\t\t\t\n\t}\n\t// out of range exception\n\tthrow std::out_of_range(\"Index not in array\");\n}\n\ntemplate <typename T, size_t N>\nvoid Fixed_Array<T,N>::set(size_t index, T value)\n{\n\t//if index is not valid then throw exception.\n\tif (index > this->max_size_ - 1|| index < 0)\n\t{\n\t\tthrow std::out_of_range(\"Index not in array\");\n\t}\n\t\n\t//for every index in array\n\tfor (int i = 0; i < this->max_size_; i++)\n\t{\t\t\n\t\t//if index is in array\n\t\tif (i == index)\n\t\t{\n\t\t\t//set character at index\n\t\t\tthis->data_[i] = value;\n\t\t\n\t\t}\n\t\t\n\t}\n\t\t\n}\n\ntemplate <typename T, size_t N>\nint Fixed_Array<T,N>::find(T element) const\n{\n\t//for every index in array\n\tfor(int x = 0; x < this->max_size_;x++)\n\t{\n\t\t//if the contents of the index is equal to value, then return that index , if not in array, return -1\n\t\tif (this->data_[x] == element)\n\t\t{\n\t\t\treturn x;\n\t\t\t\n\t\t}\n\t\t\t\n\t}\n\t\n\treturn -1;\n\t\n}\n\ntemplate <typename T, size_t N>\nint Fixed_Array<T,N>::find(T element, size_t start) const\n{\n\t//if start is out of index throw exception\n\tif (start >= this->max_size_ || start < 0)\n\t{\n\t\tthrow std::out_of_range(\"Start index is out of bounds. \");\n\t\t\n\t}\n\t\n\t//for indexes from start to end of array\n\tfor(int x = start; x < this->max_size_; x++)\n\t{\n\t\t//if value is at that index then return it\n\t\tif (this->data_[x] == element)\n\t\t{\n\t\t\treturn x;\n\t\t\t\n\t\t}\n\t\t\t\n\t}\n\t\n\treturn -1;\n}\n \ntemplate <typename T, size_t N>\nvoid Fixed_Array<T,N>::fill (T element)\n{\n\t//Fill the contents of the array.\n\t\n\t//for every index in array\n\tfor(int x = 0; x < this->max_size_; x++)\n\t{\n\t\t//the index holds the value \n\t\tthis->data_[x] = element;\n\t\t\n\t}\n\t\n}\n" }, { "alpha_fraction": 0.34145790338516235, "alphanum_fraction": 0.35341909527778625, "avg_line_length": 28.55862045288086, "blob_id": "be13720e4cf85e01b4351c96b2978da45ea2efe2", "content_id": "31ae858b7edf55c2d66c31c970d107590aa6677c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4431, "license_type": "no_license", "max_line_length": 83, "num_lines": 145, "path": "/Master Year 1/Programming Languages and Compilers/HW3/hw3/Language_6_Examples.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program parses and evaluates strings from Language_6.\r\n*/\r\n\r\npublic class Language_6_Examples\r\n{\r\n public static void main(String[] args)\r\n {\r\n // IMPORTANT: Set this to 0 or 1 depending on whether you need\r\n // to see all of the interpreter's debugging information.\r\n Evaluate_6a.DEBUG = 1;\r\n\r\n String[] programs =\r\n {\r\n \"(if true 0 1)\",\r\n\r\n \"(if (== 0 1) (+ 3 -5) (^ 2 5))\",\r\n\r\n \"(prog\" +\r\n \"(var b true)\" +\r\n \"(if b (var x -1) (var x 1)))\",\r\n\r\n \"(prog\" +\r\n \"(var i -5)\" +\r\n \"(if ( >= i 0)\" +\r\n \"(begin\" +\r\n \"(print i))\" +\r\n \"(begin\" +\r\n \"(print (- i)))))\",\r\n\r\n \"(prog\" +\r\n \"(var i 5)\" +\r\n \"(while (> i 0)\" +\r\n \"(begin\" +\r\n \"(print i)\" +\r\n \"(set i (- i 1)))))\",\r\n\r\n \"(prog\" +\r\n \"(var i 5)\" +\r\n \"(while (> i 0)\" +\r\n \"(print (set i (- i 1)))))\",\r\n\r\n \"(prog\" +\r\n \"(var i -5)\" +\r\n \"(if (>= i 0)\" +\r\n \"(while (> i 0)\" +\r\n \"(begin\" +\r\n \"(print i)\" +\r\n \"(set i (- i 1))))\" +\r\n \"(while (< i 0)\" +\r\n \"(begin\" +\r\n \"(print i)\" +\r\n \"(set i (+ i 1))))))\",\r\n\r\n \"(prog\" +\r\n \"(var n 1)\" +\r\n \"(var sum 0)\" +\r\n \"(while (< n 5)\" +\r\n \"(begin\" +\r\n \"(var i n)\" +\r\n \"(set sum (+ sum i))\" +\r\n \"(set n (+ n 1))\" +\r\n \"(print n)\" +\r\n \"(print sum)))\" +\r\n \"sum)\",\r\n };\r\n\r\n\r\n int i = 0;\r\n for (i = 0; i < programs.length; i++)\r\n {\r\n System.out.println(i + \" =========================================\");\r\n\r\n // Build the abstract syntax tree that represents the expression.\r\n try\r\n {\r\n Tree ast = ParseTree.buildTree( programs[i] );\r\n\r\n // Print the AST as an S-expression\r\n //System.out.println( ast + \"\\n\" );\r\n\r\n // Pretty-print the abstract syntax tree.\r\n System.out.println( PrettyPrinter2.prettyPrint( ast ) + \"\\n\" );\r\n\r\n // Print the infix version of the expression.\r\n System.out.println( AST2infix_6a.ast2infix( ast ) + \"\\n\" );\r\n\r\n // Evaluate the expression (interpret the AST).\r\n try\r\n {\r\n Value value = Evaluate_6a.eval( ast );\r\n if (Evaluate_6a.DEBUG > 0)\r\n {\r\n System.out.println(\"result = \" + value + \"\\n\" );\r\n }\r\n else\r\n {\r\n System.out.println(\"result = \" + value.toSimpleString() + \"\\n\" );\r\n }\r\n }\r\n catch (EvalException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n\r\n\r\n // Create dot and png files from the AST.\r\n if (Evaluate_6a.DEBUG > 0)\r\n try\r\n {\r\n // Create the (empty) dot file.\r\n String baseName = String.format(\"Language_6.%02d\", i);\r\n java.io.PrintWriter out = new java.io.PrintWriter(\r\n new java.io.File(baseName + \".dot\") );\r\n // Put dot commands into the dot file\r\n out.println( Tree2dot.tree2dot(ast) + \"\\n\" );\r\n out.close();\r\n // Create a command line for running dot.exe.\r\n String[] cmd = {\"C:\\\\graphviz-2.38\\\\release\\\\bin\\\\dot.exe\",\r\n \"-Tpng\",\r\n baseName + \".dot\",\r\n \"-o\",\r\n baseName + \".png\"};\r\n // Execute the command line.\r\n java.lang.Runtime.getRuntime().exec(cmd);\r\n }\r\n catch (Exception e)\r\n {\r\n System.out.println( e );\r\n }\r\n }\r\n catch (TokenizeException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n catch (ParseException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5574963688850403, "alphanum_fraction": 0.5764192342758179, "avg_line_length": 13.177778244018555, "blob_id": "1097c62ccc3de7c9263c370f90274450645d1176", "content_id": "3cd2d79fac27192c14692abdfab78610fe486750", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 687, "license_type": "no_license", "max_line_length": 66, "num_lines": 45, "path": "/Year 2/Project 1/Stack.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n* Stack.h\r\n*\r\n* Created on: Feb 7, 2018\r\n* Author: Cellus\r\n*/\r\n#include \"Stack.h\"\r\n#include<iostream>\r\nusing namespace std;\r\n\r\n\r\n//if top is greater than or equal to 64 than its a stack over flow\r\nbool Stack::push(int p[])\r\n{\r\n\tif (topElement >= 64)\r\n\t{\r\n\t\tcout << \"Too much in Stack\";\r\n\t\treturn false;\r\n\t}\r\n\telse\r\n\t{\r\n\t\tstack[topElement++] = *p;\r\n\t\treturn true;\r\n\t}\r\n}\r\n//if top is less than 0 than there is a stack underflow\r\nint Stack::pop()\r\n{\r\n\tif (topElement < 0)\r\n\t{\r\n\t\tcout << \"Stack has negative number of elements in it\";\r\n\t\treturn 0;\r\n\t}\r\n\telse\r\n\t{\r\n\t\tint x = stack[topElement--];\r\n\t\treturn x;\r\n\t}\r\n}\r\n\r\nbool Stack::isEmpty()\r\n{\r\n\treturn (topElement < 0);\r\n\r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.6427104473114014, "alphanum_fraction": 0.6437371373176575, "avg_line_length": 19.688888549804688, "blob_id": "a4f751f0b45d23e8628858c47cfd1873cae8d20a", "content_id": "68ce76c477526ce62822e6f388347723218b8ac2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 974, "license_type": "no_license", "max_line_length": 103, "num_lines": 45, "path": "/Year 2/Assignment #5/Employee.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n//Employee class\r\npublic class Employee{\r\n\t//traits of employees that are passed to employee types\r\n\tprotected int id;\r\n\tprotected String first;\r\n\tprotected String last;\r\n\tprotected double hourlyRate;\r\n\t\r\n\t//default constructor\r\n\tpublic Employee(){\r\n\t\t\r\n\t}\r\n\t//overloaded constructor that pharmacists and technicians will use\r\n\tpublic Employee(int id, String first, String last)\r\n\t{\r\n\t\tthis.id = id;\r\n\t\tthis.first = first;\r\n\t\tthis.last = last;\r\n\t}\r\n\t\t//return id of Employee\r\n\t\tpublic int getId()\r\n\t{\r\n\t\treturn id;\r\n\t}\r\n\t//this method will be passed on and reset in the employee types\r\n\tpublic double getHourlyRate()\r\n\t{\r\n\t\thourlyRate = 0;\r\n\t\treturn hourlyRate;\r\n\t}\r\n\t//format method for Employees\r\n\tpublic String format(){\r\n\t\treturn (\"ID: \" + id + \"\\t\" + \"Name: \" + first + \" \" + last + \"\\t\" + \"Rate: \" + this.getHourlyRate());\r\n\t}\r\n\t\t\r\n\t\r\n\t}" }, { "alpha_fraction": 0.6340909004211426, "alphanum_fraction": 0.6340909004211426, "avg_line_length": 13.785714149475098, "blob_id": "7cf586dae9afb7b516d934cc36ae8e47d991ac5d", "content_id": "483c8ff430aaaa1d276506d29ddc0fd818360bac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 440, "license_type": "no_license", "max_line_length": 52, "num_lines": 28, "path": "/Year 2/Project 2/BinaryTree.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//BinaryTree.h\r\n#ifndef BINARYTREE_H_\r\n#define BINARYTREE_H_\r\n#include <vector>\r\n\r\nstruct node\r\n{\r\n\t//All elements of a node in the binary tree\r\n\tstd::string key;\r\n\tstd::vector<int> num;\r\n\t\r\n\t\r\n\tnode * left;\r\n\tnode * right;\r\n\t\r\n\t\r\n};\r\nclass BinaryTree\r\n{\r\npublic:\r\n\tBinaryTree();\r\n\t~BinaryTree(){};\r\n\tstruct node * mainRoot;\r\n\tnode* insert(struct node *&root ,std::string, int);\r\n\tvoid sortTree(node * mainRoot);\r\n};\r\n\r\n#endif/*BinaryTree*/" }, { "alpha_fraction": 0.3784465491771698, "alphanum_fraction": 0.39172831177711487, "avg_line_length": 32.783626556396484, "blob_id": "3415ac9d33c275484cd3411be95763a8cc81cb60", "content_id": "b74bbe2664873a6f20a91c31f3f2cdf0d8bef25b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5948, "license_type": "no_license", "max_line_length": 82, "num_lines": 171, "path": "/Master Year 1/Programming Languages and Compilers/HW4/hw4/Language_7_Examples.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program parses and evaluates strings from Language_7.\r\n*/\r\n\r\npublic class Language_7_Examples\r\n{\r\n public static void main(String[] args)\r\n {\r\n // IMPORTANT: Set this to 0 or 1 depending on whether you need\r\n // to see all of the interpreter's debugging information.\r\n Evaluate_7a.DEBUG = 1;\r\n\r\n String[] programs =\r\n {\r\n // 0, demonstrate a function call\r\n \"(prog\" +\r\n \"(var y 6) \" +\r\n \"(fun f (lambda x (* x y)))\" + // y is a non-local reference\r\n \"(var z (apply f 5)))\",\r\n\r\n // 1, demonstrate a function with multiple parameters\r\n \"(prog\" +\r\n \"(var w 10)\" +\r\n \"(fun f (lambda x y z (+ x y z w)))\" + // w is a non-local reference\r\n \"(var x (apply f 1 2 3))\" +\r\n \"(var y (apply f 4 5 x))\" +\r\n \"(var z (apply f (+ w x y) 10 20)))\",\r\n\r\n // 2, demonstrate composition of functions\r\n \"(prog\" +\r\n \"(var w 2)\" +\r\n \"(fun f (lambda x (* x x w)))\" +\r\n \"(fun g (lambda y (+ y y w)))\" +\r\n \"(var z (apply g (apply f 5))))\", // function composition\r\n\r\n // 3, demonstrate caling a function from within another function\r\n \"(prog\" +\r\n \"(var w 2)\" +\r\n \"(fun f (lambda x (var v (* x x w))))\" +\r\n \"(fun g (lambda y (var u (+ y w (apply f y)))))\" +\r\n \"(var z (apply g 5)))\",\r\n\r\n // 4, demonstrate local variables\r\n \"(prog\" +\r\n \"(var w 10)\" +\r\n \"(fun f (lambda x (var u (* x x w))))\" +\r\n \"(begin\" +\r\n \"(var x 0)\" +\r\n \"(begin\" +\r\n \"(var y 2)\" +\r\n \"(begin\" +\r\n \"(set x w)\" +\r\n \"(var z (apply f (+ x y w)))))))\",\r\n\r\n // 5, demonstrate local variables inside a function body\r\n \"(prog\" +\r\n \"(var n 1)\" +\r\n \"(fun f (lambda x (* n x)))\" +\r\n \"(fun g (lambda x (begin\" +\r\n \"(var n 5)\" +\r\n \"(var r (apply f x)))))\" +\r\n \"(var z (apply g 2)))\", // result = 2 with static scope\r\n // result = 10 with dynamic scope\r\n\r\n // 6, recursive function\r\n \"(prog\" +\r\n \"(fun fac (lambda n\" +\r\n \"( if (<= n 0)\" +\r\n \"1\" +\r\n \"(* n (apply fac (- n 1))))))\" +\r\n \"(apply fac 6))\",\r\n\r\n // 7, recursive function\r\n \"(prog\" +\r\n \"(fun even (lambda n\" +\r\n \"( && ( != n 1)\" + // use short-circuiting\r\n \"( || (<= n 0)\" + // use short-circuiting\r\n \"(apply even (- n 2))))))\" +\r\n \"(print (apply even 7))\" +\r\n \"(print (apply even 6))\" +\r\n \"(print (apply even 0))\" +\r\n \"(apply even -1))\",\r\n\r\n // 8, mutually recursive functions\r\n \"(prog\" +\r\n \"(fun even ( lambda n \" +\r\n \"(|| (<= n 0 ) \" + // use short-circuiting\r\n \"(apply odd (- n 1)))))\" +\r\n \"(fun odd (lambda n\" +\r\n \"(&& (> n 0)\" + // use short-circuiting\r\n \"(|| (== n 1)\" + // use short-circuiting\r\n \"(apply even (- n 1))))))\" +\r\n \"(print (apply even 8))\" +\r\n \"(print (apply odd 7))\" +\r\n \"(print (apply odd 6))\" +\r\n \"(print (apply even 3))\" +\r\n \"(apply even -1))\",\r\n };\r\n\r\n\r\n int i = 0;\r\n for (i = 0; i < programs.length; i++)\r\n {\r\n System.out.println(i + \" =========================================\");\r\n\r\n // Build and evaluate the AST that represents the expression.\r\n try\r\n {\r\n Tree ast = ParseTree.buildTree( programs[i] );\r\n\r\n // Print the AST as an S-expression\r\n //System.out.println( ast + \"\\n\" );\r\n\r\n // Pretty-print the abstract syntax tree.\r\n System.out.println( PrettyPrinter.prettyPrint( ast ) + \"\\n\" );\r\n\r\n // Print the infix version of the expression.\r\n System.out.println( AST2infix_7a.ast2infix( ast ) + \"\\n\" );\r\n\r\n // Evaluate the expression (interpret the AST).\r\n try\r\n {\r\n Value value = Evaluate_7a.eval( ast );\r\n\r\n System.out.println(\"result = \" + value + \"\\n\" );\r\n }\r\n catch (EvalException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n\r\n\r\n // Create dot and png files from the AST.\r\n if (Evaluate_7a.DEBUG > 0)\r\n try\r\n {\r\n // Create the (empty) dot file.\r\n String baseName = String.format(\"Language_6.%02d\", i);\r\n java.io.PrintWriter out = new java.io.PrintWriter(\r\n new java.io.File(baseName + \".dot\") );\r\n // Put dot commands into the dot file\r\n out.println( Tree2dot.tree2dot(ast) + \"\\n\" );\r\n out.close();\r\n // Create a command line for running dot.exe.\r\n String[] cmd = {\"C:\\\\graphviz-2.38\\\\release\\\\bin\\\\dot.exe\",\r\n \"-Tpng\",\r\n baseName + \".dot\",\r\n \"-o\",\r\n baseName + \".png\"};\r\n // Execute the command line.\r\n java.lang.Runtime.getRuntime().exec(cmd);\r\n }\r\n catch (Exception e)\r\n {\r\n System.out.println( e );\r\n }\r\n }\r\n catch (TokenizeException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n catch (ParseException e)\r\n {\r\n System.out.println(e);\r\n //e.printStackTrace();\r\n }\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.3649664521217346, "alphanum_fraction": 0.37503355741500854, "avg_line_length": 45.15189743041992, "blob_id": "3fe05ffcf63ac545ad9aa250df41a0c2f81d7823", "content_id": "9b788d14aeb1d29f75aa0c1786e8aa43a9ec483e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7450, "license_type": "no_license", "max_line_length": 93, "num_lines": 158, "path": "/Master Year 1/Programming Languages and Compilers/HW1/Hw1.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\nCourse: CS 51530\r\nName: Marcellus Hunt\r\nEmail: [email protected]\r\nAssignment: 1\r\n*/\r\n\r\n/**\r\n This program tests your pretty printing methods.\r\n Here is an example binary tree.\r\n\r\n a\r\n / \\\r\n / \\\r\n b c\r\n / \\ \\\r\n / \\ \\\r\n d e f\r\n /\r\n /\r\n g\r\n \\\r\n \\\r\n h\r\n*/\r\npublic class Hw1\r\n{\r\n public static void main(String[] args)\r\n {\r\n // Here is the Java declaration of the above binary tree.\r\n BTree bTree1 = new BTree(\"a\",\r\n new BTree(\"b\",\r\n new BTree(\"d\"),\r\n new BTree(\"e\")),\r\n new BTree(\"c\",\r\n null,\r\n new BTree(\"f\",\r\n new BTree(\"g\",\r\n null,\r\n new BTree(\"h\")),\r\n null)));\r\n\r\n // Write the Java declaration for the binary tree in picture bTree2.png.\r\n BTree bTree2 = new BTree(\"a\",\r\n new BTree(\"b\",\r\n new BTree(\"d\"),\r\n new BTree(\"e\",\r\n new BTree(\"h\"),\r\n new BTree(\"i\"))),\r\n new BTree(\"c\",\r\n new BTree(\"f\",\r\n new BTree (\"j\"),\r\n new BTree (\"k\")),\r\n new BTree(\"g\"))); \r\n\r\n\r\n // Write the Java declaration for the binary tree in picture bTree3.png.\r\n BTree bTree3 = new BTree(\"*\",\r\n new BTree(\"+\",\r\n new BTree(\"/\",\r\n new BTree(\"a\"),\r\n new BTree(\"2\")),\r\n new BTree(\"b\")),\r\n new BTree(\"^\",\r\n new BTree(\"c\"),\r\n new BTree(\"3\")));\r\n\r\n\r\n\r\n // Write the Java declaration for the binary tree in picture bTree4.png.\r\n BTree bTree4 = new BTree(\"/\",\r\n new BTree(\"+\",\r\n new BTree(\"-\",\r\n null,\r\n new BTree(\"b\")),\r\n new BTree(\"sqrt\",\r\n null,\r\n new BTree(\"-\",\r\n new BTree(\"^\",\r\n new BTree(\"b\"),\r\n new BTree(\"2\")),\r\n new BTree(\"*\",\r\n new BTree(\"a\"),\r\n new BTree(\"c\"))))),\r\n new BTree(\"*\",\r\n new BTree(\"2\"),\r\n new BTree(\"a\")));\r\n \r\n\r\n\r\n // Write the Java declaration for the binary tree in picture bTree5.png.\r\n BTree bTree5 = new BTree(\"if\",\r\n new BTree(\"<\",\r\n new BTree(\"a\"),\r\n new BTree(\"b\")),\r\n new BTree(\"while\", \r\n new BTree(\"!=\",\r\n new BTree(\"a\"),\r\n new BTree(\"b\")),\r\n new BTree(\"=\",\r\n new BTree(\"a\"),\r\n new BTree(\"+\",\r\n new BTree(\"a\"),\r\n new BTree(\"l\")))));\r\n\r\n\r\n\r\n\r\n System.out.println( \" Preorder traversal --> \" + Traverse.preOrder( bTree1 ) );\r\n System.out.println( \" Inorder traversal --> \" + Traverse.inOrder( bTree1 ) );\r\n System.out.println( \"Postorder traversal --> \" + Traverse.postOrder( bTree1 ) + \"\\n\" );\r\n\r\n System.out.println( PrettyPrinter0.prettyPrint( bTree1 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter1.prettyPrint( bTree1 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter2.prettyPrint( bTree1 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter3.prettyPrint( bTree1 ) + \"\\n\" );\r\n\r\n\r\n System.out.println( \" Preorder traversal --> \" + Traverse.preOrder( bTree2 ) );\r\n System.out.println( \" Inorder traversal --> \" + Traverse.inOrder( bTree2 ) );\r\n System.out.println( \"Postorder traversal --> \" + Traverse.postOrder( bTree2 ) + \"\\n\" );\r\n\r\n System.out.println( PrettyPrinter0.prettyPrint( bTree2 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter1.prettyPrint( bTree2 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter2.prettyPrint( bTree2 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter3.prettyPrint( bTree2 ) + \"\\n\" );\r\n\r\n\r\n System.out.println( \" Preorder traversal --> \" + Traverse.preOrder( bTree3 ) );\r\n System.out.println( \" Inorder traversal --> \" + Traverse.inOrder( bTree3 ) );\r\n System.out.println( \"Postorder traversal --> \" + Traverse.postOrder( bTree3 ) + \"\\n\" );\r\n\r\n System.out.println( PrettyPrinter0.prettyPrint( bTree3 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter1.prettyPrint( bTree3 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter2.prettyPrint( bTree3 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter3.prettyPrint( bTree3 ) + \"\\n\" );\r\n\r\n\r\n System.out.println( \" Preorder traversal --> \" + Traverse.preOrder( bTree4 ) );\r\n System.out.println( \" Inorder traversal --> \" + Traverse.inOrder( bTree4 ) );\r\n System.out.println( \"Postorder traversal --> \" + Traverse.postOrder( bTree4 ) + \"\\n\" );\r\n\r\n System.out.println( PrettyPrinter0.prettyPrint( bTree4 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter1.prettyPrint( bTree4 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter2.prettyPrint( bTree4 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter3.prettyPrint( bTree4 ) + \"\\n\" );\r\n\r\n\r\n System.out.println( \" Preorder traversal --> \" + Traverse.preOrder( bTree5 ) );\r\n System.out.println( \" Inorder traversal --> \" + Traverse.inOrder( bTree5 ) );\r\n System.out.println( \"Postorder traversal --> \" + Traverse.postOrder( bTree5 ) + \"\\n\" );\r\n\r\n System.out.println( PrettyPrinter0.prettyPrint( bTree5 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter1.prettyPrint( bTree5 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter2.prettyPrint( bTree5 ) + \"\\n\" );\r\n System.out.println( PrettyPrinter3.prettyPrint( bTree5 ) + \"\\n\" );\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6863070726394653, "alphanum_fraction": 0.6863070726394653, "avg_line_length": 23.14583396911621, "blob_id": "096a46e132abe1fa9b4ab9f1c574e073861f411e", "content_id": "b6e3b7170f681ed852e774b39b5d037967924a6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1205, "license_type": "no_license", "max_line_length": 61, "num_lines": 48, "path": "/Year 3/Assignment 4/Expr_Tree_Builder.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n\r\n#ifndef _EXPR_TREE_BUILDER\r\n#define _EXPR_TREE_BUILDER\r\n\r\n#include \"Expr_Builder.h\"\r\n#include \"Stack.h\"\r\n\r\nclass Expr_Tree_Builder : public Expr_Builder\r\n{\r\n\tpublic:\r\n\t\tExpr_Tree_Builder(void);\r\n\t\tvirtual ~Expr_Tree_Builder(void);\r\n\t\t\r\n\t\t//starts a new expression\r\n\t\tvirtual void start_expression (void);\r\n\t\t\r\n\t\t//methods for building types of nodes\r\n\t\tvirtual void build_number (int n);\r\n\t\tvirtual void build_add_operator (void);\r\n\t\tvirtual void build_subtract_operator(void);\r\n\t\tvirtual void build_multiply_operator(void);\r\n\t\tvirtual void build_division_operator(void) ;\r\n\t\tvirtual void build_modulus_operator(void);\r\n\t\tvirtual void build_left_parenthesis(void);\r\n\t\tvirtual void build_right_parenthesis(void);\r\n\t\t\t\t\r\n\t\t//get current expression\r\n\t\tvirtual Expr_Node * get_expression (void);\r\n\t\t\r\n\tprivate:\r\n\t\t//current state of expression tree\r\n\t\tExpr_Node * tree_;\r\n\t\t\r\n\t\t//subexpression of tree that will be pushed on stack\r\n\t\tExpr_Node * sub_;\r\n\t\t\r\n\t\t//create stack to put the operands when doing build process\r\n\t\tStack <Expr_Node *> temp;\r\n\t\tStack<Expr_Node *> sub_expression;\r\n\t};\r\n#endif" }, { "alpha_fraction": 0.6297786831855774, "alphanum_fraction": 0.6297786831855774, "avg_line_length": 16.481481552124023, "blob_id": "057644c747a88b1c3bc628b05d5416236a26ef65", "content_id": "365eab6c84efa5d3fa277bd2d68c248b9f00c486", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 497, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/Year 3/Assignment3/Num_Command.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Expr_Command.h\"\r\n\r\n#ifndef _NUM_COMMAND_H\r\n#define _NUM_COMMAND_H\r\nclass Num_Command: public Expr_Command {\r\n\tpublic:\r\n\t\tNum_Command (Stack <int> & s, int n);\r\n\t\t\r\n\t\t~Num_Command(void);\r\n\t\t\r\n\t\t//performs execution on number command\r\n\t\tvirtual void execute (void);\r\n\t\t\r\n\t\t//returns precedence\r\n\t\tvirtual int prec(void) const;\r\n\t\t\r\n\tprivate:\r\n\t\tStack<int> & s_;\r\n\t\tint n_;\r\n\r\n};\r\n\r\n#endif" }, { "alpha_fraction": 0.5568360686302185, "alphanum_fraction": 0.5683603882789612, "avg_line_length": 19.077777862548828, "blob_id": "9acea004aa6ae2e6ea07954782395f9543058be4", "content_id": "99f2ca760f6a841e51758b2f1159d12bee96fdab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1909, "license_type": "no_license", "max_line_length": 207, "num_lines": 90, "path": "/Year 2/Project 2/CrossReference.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include <iostream>\r\n#include <fstream>\r\n#include <string>\r\n#include <sstream>\r\n#include \"CrossReference.h\"\r\n#include \"BinaryTree.h\"\r\n\r\n//Cross Reference.cpp\r\n\r\n\r\nBinaryTree *tree = new BinaryTree();\r\n\r\n\r\n//prints out sorted strings\r\nvoid CrossReference::printTable()\r\n{\r\n\ttree->sortTree(tree->mainRoot);\r\n}\r\n\r\n//parses the text file being read and inserts into binary tree\r\nvoid CrossReference::parseWords()\r\n{\r\n\t//keep track of line number;\r\n\tint lineNum = 0;\r\n\r\n\t//used to keep track of what line number is currently being looked at\r\n\tstd::string line;\r\n\t\r\n\tstd::string word;\r\n\tstd::fstream file;\r\n\t//set word to only 10 characters\r\n\t\r\n\r\n\t//read data of text file\r\n\tstd::ifstream inputFile(\"example.txt\");\r\n\t\r\n\t\r\n\r\n\t//READ LINE\r\n\tif (inputFile.is_open())\r\n\t{\r\n\t\t\r\n\t\t//Will need while loop to continuously read multiple lines, research on this later.\r\n\t\twhile (std::getline(inputFile,line))\r\n\t\t{\r\n\t\t\t//keeps track of line\r\n\t\t\tlineNum++;\r\n\t\t\tstd::stringstream ssWord (line);\r\n\t\t\t//TOKENIZE LINE\r\n\t\t\t\r\n\t\t\twhile ( ssWord>> word)\r\n\t\t\t{\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\tstd:: string firstTen = word.substr(0, 10);\r\n\t\t\t\t\tstd:: string firstChar = firstTen.substr(0,1);\r\n\t\t\t\t\tstd:: string lastChar = firstTen.substr(firstTen.size()-1);\r\n\t\t\t\t\t\r\n\t\t\t\t\tif (firstChar == \"1\" || firstChar == \"2\" || firstChar == \"3\" || firstChar == \"4\" || firstChar == \"5\" || firstChar == \"6\" || firstChar == \"7\" || firstChar == \"8\" || firstChar == \"9\" || firstChar == \"0\" )\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tfirstTen.erase(0,1);\r\n\t\t\t\t\t}\r\n\t\t\t\t\t\r\n\t\t\t\t\tif(lastChar == \".\"|| lastChar == \"!\" || lastChar == \",\"|| lastChar == \"#\")\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\t//deletes string\r\n\t\t\t\t\t\tfirstTen.erase(firstTen.size()-1);\r\n\t\t\t\t\t}\r\n\t\t\t\t\t\r\n\t\t\t\t\tif (!firstTen.empty())\r\n\t\t\t\t\t\ttree->insert(tree->mainRoot,firstTen,lineNum);\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t}\r\n\t\tinputFile.close();\r\n\t\t\r\n\t\r\n\t}\r\n\telse\r\n\t{\r\n\t\tstd::cout << \"Unable to open file\" << std::endl;\r\n\t}\r\n\t\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5979971289634705, "alphanum_fraction": 0.5994277596473694, "avg_line_length": 13.840909004211426, "blob_id": "951673bbf7814203562d21ea862457c87fd87bfd", "content_id": "bf26a845dda86ebfb7389e5d8ae676d6d5ab4538", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 699, "license_type": "no_license", "max_line_length": 53, "num_lines": 44, "path": "/Year 3/Assignment 4/driver.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "\r\n#include \"Calculator.h\"\r\n\r\n#include <iostream>\r\nint main (int argc, char * argv [])\r\n{\r\n\r\n\r\n//unless infix says QUIT, then continue running\r\nstd::string infix;\r\n\r\nstd::cout << \"Input expression: \";\r\nstd::getline (std::cin,infix);\r\n\r\nwhile (infix != \"QUIT\")\r\n{\r\n\t\t\r\n\t//create the calculator object\r\n\tEval_Expr_Tree eval;\r\n\tExpr_Tree_Builder b;\r\n\tCalculator calc(b, eval);\r\n\t\r\n\t\r\n\t//catch any exceptions when running the program\t\r\n\ttry\r\n\t{\r\n\t\tcalc.build_expression(infix);\r\n\t\tcalc.postfix_eval();\r\n\t\r\n\t} \r\n\tcatch (const char* msg)\r\n\t{\r\n\t\tstd::cerr << msg << std::endl;\r\n\t\t\r\n\t}\r\n\t\t\r\n\t\r\n\tstd::cout << \"output another expression. or QUIT. \";\r\n\tstd::getline (std::cin,infix);\r\n\t\r\n\r\n}\r\n\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.6020942330360413, "alphanum_fraction": 0.6073298454284668, "avg_line_length": 12.84615421295166, "blob_id": "a05906b4ffd39563b85414f3c4437d4fbde4e8da", "content_id": "3731bc872027595138410ea77100c5c1210cfcdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 191, "license_type": "no_license", "max_line_length": 34, "num_lines": 13, "path": "/Year 2/Project 3/Sort.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//Sort.h\r\n#ifndef SORT_H\r\n#define SORT_H\r\nclass Sort\r\n{\r\n\tpublic: \r\n\t\t//pure virtual sort method\r\n\t\tvirtual void sort(int*, int)= 0;\r\n\t\t//destructor\r\n\t\tvirtual ~Sort(){}\r\n};\r\n\r\n#endif//SORT_H" }, { "alpha_fraction": 0.7604790329933167, "alphanum_fraction": 0.7604790329933167, "avg_line_length": 31.799999237060547, "blob_id": "7ee751e298aad965898e6a122159b6e132121f17", "content_id": "5bb8f3cbddc8688d92bcef710efcd48e37cfc948", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 167, "license_type": "no_license", "max_line_length": 68, "num_lines": 5, "path": "/Year 2/Project 3/README.txt", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "Commands to run program:\r\n\"make -f makeFile\"\r\nthen, \"./main\" to run the executable file created\r\n\r\nThe source files and project report is inside the submission folder." }, { "alpha_fraction": 0.5442177057266235, "alphanum_fraction": 0.5544217824935913, "avg_line_length": 11.454545021057129, "blob_id": "8fe8268af09c9e7894fc53e50399d902cb9353af", "content_id": "e29f9c31d71f04440694007c2cee9aa1e6570e9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 294, "license_type": "no_license", "max_line_length": 59, "num_lines": 22, "path": "/Year 3/composition-source/Queue.inl", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n// size\r\n//\r\ntemplate <typename T>\r\ninline\r\nsize_t Queue <T>::size (void) const\r\n{\r\n\tif (front == back == -1)\r\n\t{\r\n\t\treturn 0;\r\n\t}\r\n\telse\r\n\t{\r\n\treturn (back - front) + 1;\r\n\t}\r\n\t\t\r\n}" }, { "alpha_fraction": 0.7828054428100586, "alphanum_fraction": 0.8054298758506775, "avg_line_length": 30.285715103149414, "blob_id": "aa630a765c89fff36b57d9d253d2cc2bac5957cf", "content_id": "332a05140840ff7cd71ffbc32fad53474411fd2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 221, "license_type": "no_license", "max_line_length": 53, "num_lines": 7, "path": "/Year 4/README.txt", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "Course work from assignments/projects in fourth year.\nCS 487 (Artificial Intelligence)\n QRcodeSubmission(Final Project)\nBiometric Computing\n Assignment 1 & 2\nClient-Side Web Programming\n Final Release(Final Project)\n \n" }, { "alpha_fraction": 0.6498516201972961, "alphanum_fraction": 0.6528189778327942, "avg_line_length": 15.736842155456543, "blob_id": "816c42832b3a38460b050e0eb8d59f9319788701", "content_id": "7356dcf50188da33bde79721d019988bb78b610e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 337, "license_type": "no_license", "max_line_length": 86, "num_lines": 19, "path": "/Year 2/Project 2/Driver.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//Driver.cpp\r\n//Will execute the program\r\n\r\n#include <iostream>\r\n#include \"CrossReference.h\"\r\n#include \"BinaryTree.h\"\r\n\r\n\r\nint main()\r\n{\r\n\r\n\tCrossReference * run = new CrossReference();\r\n\tstd::cout << \"Welcome to the cross reference index program! Lets begin\" << std::endl;\r\n\r\n\trun->parseWords();\r\n\trun->printTable();\r\n\t\r\n\treturn 0;\r\n}\r\n" }, { "alpha_fraction": 0.48260870575904846, "alphanum_fraction": 0.4945652186870575, "avg_line_length": 21.049999237060547, "blob_id": "b58fbe7d5f3ce8ab34b1046f99eb4abc5de15f9f", "content_id": "e29a6899025abd5bc656c22f4c8f1675200fe589", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 920, "license_type": "no_license", "max_line_length": 66, "num_lines": 40, "path": "/Master Year 2/Operating Systems/HW3/hw3/filters/doubleN.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program reads one character at a time from standard input,\r\n and then writes each character N times to standard output.\r\n\r\n When using Windows, if standard input is the console\r\n keyboard, use ^z (Control-z) to denote the end of file\r\n (and you must use ^z at the beginning of a line!).\r\n*/\r\n#include <stdio.h>\r\n#include <stdlib.h>\r\n\r\nint main(int argc, char* argv[])\r\n{\r\n int i;\r\n char c;\r\n int n = 2; // default value for n\r\n // Check for a command line argument.\r\n if (argc > 1)\r\n {\r\n n = atoi(argv[1]);\r\n if (n <= 0) n = 2;\r\n }\r\n\r\n while ( (c = getchar()) != EOF )\r\n {\r\n if ( c != 10 && c != 13 ) // don't double LF or CR\r\n {\r\n for (i = 0; i < n; i++)\r\n {\r\n printf(\"%c\", c);\r\n }\r\n }\r\n else\r\n {\r\n printf(\"%c\", c);\r\n fflush(stdout); // try commenting this out\r\n }\r\n }\r\n return 0;\r\n}" }, { "alpha_fraction": 0.6618257164955139, "alphanum_fraction": 0.6618257164955139, "avg_line_length": 16.615385055541992, "blob_id": "704322d44b91b246c37ef817d3c384c7d6583d5d", "content_id": "c0621dd68de260041df37f2a375560028ac0d481", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 482, "license_type": "no_license", "max_line_length": 59, "num_lines": 26, "path": "/Year 3/Assignment3/Add_Command.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Binary_Op_Command.h\"\r\n\r\n#ifndef _ADD_COMMAND_H\r\n#define _ADD_COMMAND_H\r\n\r\nclass Add_Command: public Binary_Op_Command {\r\npublic: \r\n\r\n\tAdd_Command(Stack <int> & s);\r\n\t\r\n\t~Add_Command(void);\r\n\t\r\n\t//evaluates the addition of the integers popped\r\n\tvirtual int evaluate (int , int) const;\r\n\t\r\n\tvirtual int prec (void) const;\r\n\t\r\nprivate:\r\n\tint precedence;\r\n\t\t\r\n};\r\n#endif" }, { "alpha_fraction": 0.5462499856948853, "alphanum_fraction": 0.5462499856948853, "avg_line_length": 15.494505882263184, "blob_id": "651e6cdad44b3810095dbdb99255c1c9a0e87285", "content_id": "79aa4669ba41b4c7bbdffb822ea7a34360d3a5ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1600, "license_type": "no_license", "max_line_length": 132, "num_lines": 91, "path": "/Year 2/Project 2/BinaryTree.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//Binary Tree.cpp\r\n//\r\n#include <iostream>\r\n#include <string>\r\n#include \"BinaryTree.h\"\r\n\r\n\r\nBinaryTree::BinaryTree() {\r\n\tmainRoot = NULL;\r\n}\r\n\r\n node* BinaryTree::insert(node * &root,std::string word, int line)\r\n{\r\n\t\r\n\t//create new node\r\n\tnode * temp = new node;\r\n\t//node * root;\r\n\ttemp -> key = word;\r\n\ttemp -> num.push_back(line);\r\n\t\r\n\ttemp->left = temp->right = NULL;\r\n\t\r\n\t\r\n\tif (root == NULL)\r\n\t\t//create new binary node\r\n\t\r\n\t\troot = temp;\r\n\t\t\r\n\t\t\r\n\t// if keys are equal then\r\n\t\r\n\tif (root ->key == temp-> key)\r\n\t{\r\n\t\tif (root->num != temp->num)\r\n\t\t\troot->num.push_back(line);\r\n\t}\r\n\t\r\n\t// if word is larger\r\n\telse if (root-> key < temp->key){\r\n\t\t//insert that node\r\n\t\t//if it is not null, insert\r\n\t\tif(root->right){\r\n\t\t\t\r\n\t\t\troot -> right = insert(root->right,word, line);\r\n\t\t}\r\n\t\t\r\n\t\telse{\r\n\r\n\t\t\troot-> right = temp;\r\n\t\t}\r\n\t\t\r\n\t\t\r\n\t}\r\n\t//if word is smaller\r\n\telse if (root-> key > temp->key){\r\n\t\t//insert that node\r\n\t\tif (root->left){\r\n\t\t\t\r\n\t\t\troot->left = insert(root->left,word, line);\r\n\t\t}\r\n\t\telse{\r\n\t\t\t\r\n\t\t\troot->left = temp;\r\n\t\t}\r\n\t\t\r\n\t}\r\n\t\r\n\treturn root;\r\n\t\r\n}\r\n\r\n\r\n\r\nvoid BinaryTree::sortTree(node *mainRoot)\r\n{\r\n\tif (mainRoot != NULL)\r\n\t\t{\r\n\t\t\t//print out left subtree, which is smaller elements then print out the root followed by the right side which hold larger elements\r\n\t\t\tsortTree(mainRoot ->left);\r\n\t\t\tstd::cout << mainRoot -> key ;\r\n\t\t\tstd::cout << '\\t';\r\n\t\t\tstd::vector <int>:: iterator i;\r\n\t\t\tfor (i = mainRoot->num.begin(); i != mainRoot->num.end(); ++i)\r\n\t\t\t\tstd::cout << *i << '\\t';\r\n \r\n\t\t\tstd::cout << std::endl;\r\n\t\t\t\r\n\t\t\r\n\t\t\tsortTree(mainRoot -> right);\r\n\t\t}\r\n}\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6543909311294556, "alphanum_fraction": 0.6543909311294556, "avg_line_length": 15.699999809265137, "blob_id": "eefd016fd137398f1f5069db561499f41b707b63", "content_id": "7b075dd12348b4ec53496b88276ae2f8a5fc7bd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 706, "license_type": "no_license", "max_line_length": 69, "num_lines": 40, "path": "/Year 3/Assignment 4/Calculator.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//Calculator Class\r\n\r\n//header\r\n\r\n#include <string>\r\n#include <iostream>\r\n#include <sstream>\r\n#include <cctype>\r\n\r\n#ifndef _CALCULATOR_H\r\n#define _CALCULATOR_H\r\n\r\n#include \"Expr_Tree_Builder.h\"\r\n#include \"Eval_Expr_Tree.h\"\r\n#include \"Expr_Node.h\"\r\n\r\nclass Calculator \r\n{\r\n\tpublic:\r\n\t\tCalculator (Expr_Tree_Builder & builder, Eval_Expr_Tree & visitor);\r\n\t\t~Calculator (void);\r\n\t\r\n\t\t//converts postfix to infix.\r\n\t\tvoid build_expression(const std::string & infix);\r\n\t\r\n\t\t//evaluate postfix\r\n\t\tvoid postfix_eval();\r\n\t\r\n\tprivate:\r\n\t\tExpr_Tree_Builder & build_;\r\n\t\tEval_Expr_Tree & visitor_;\r\n\t\r\n};\r\n\r\n#endif" }, { "alpha_fraction": 0.5408011674880981, "alphanum_fraction": 0.5474777221679688, "avg_line_length": 16.76388931274414, "blob_id": "f8faf16fedf7c6d73ef7b131d1dd756c4400f9e7", "content_id": "759b570c2bb5002e6b9ed920e617c196d0b81b23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1348, "license_type": "no_license", "max_line_length": 94, "num_lines": 72, "path": "/Year 3/composition-source/Queue.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//==============================================================================\r\n/**\r\n * Honor Pledge:\r\n *\r\n * I pledge that I have neither given nor received any help\r\n * on this assignment.\r\n */\r\n//==============================================================================\r\n\r\n#ifndef _CS507_QUEUE_H_\r\n#define _CS507_QUEUE_H_\r\n\r\n#include \"Array.h\"\r\n\r\n#include <exception>\r\n\n// COMMENT You should use aggregation instead of inheritance since logically\n// a queue is not an array.\r\n\r\n//RESPONSE: Instead of inheriting. I give the Queue class an object of array type called queue\n\r\n/**\r\n * @class Queue\r\n *\r\n * Basic queue for abitrary elements.\r\n */\r\ntemplate <typename T>\r\nclass Queue\r\n{\r\n\tpublic:\r\n /// Type definition of the type.\r\n typedef T type;\r\n\r\n /// Default constructor.\r\n Queue (void);\r\n\r\n /// Copy constructor.\r\n Queue (const Queue & q);\r\n\r\n /// Destructor.\r\n ~Queue (void);\r\n \r\n //Enqueue\r\n void enqueue(T element);\r\n \r\n //Dequeue\r\n T dequeue(void);\r\n \r\n //is_empty\r\n bool is_empty(void);\r\n \r\n //size\r\n size_t size(void)const;\r\n \r\n //clear\r\n void clear(void);\r\n \r\n private:\r\n\tArray<T> queue;\r\n\tsize_t front;\r\n\tsize_t back;\r\n\t\r\n\t\r\n};\r\n\r\n// include the inline files\r\n#include \"Queue.inl\"\r\n\r\n// include the source file since template class\r\n#include \"Queue.cpp\"\r\n\r\n#endif // !defined _CS507_FIXED_ARRAY_H_\r\n" }, { "alpha_fraction": 0.5516252517700195, "alphanum_fraction": 0.5571223497390747, "avg_line_length": 28.764705657958984, "blob_id": "96068d837c6bf83a892ecf54981988e2a963dca0", "content_id": "15f57c12bca39d7957d23863803ebe8fa9b41ece", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4184, "license_type": "no_license", "max_line_length": 87, "num_lines": 136, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/GRSModel.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\nimport java.util.Scanner;\r\nimport java.io.File;\r\nimport java.io.FileInputStream;\r\nimport java.io.IOException;\r\nimport java.io.FileNotFoundException;\r\n\r\n/**\r\n Create a wirefram model from a GRS file.\r\n<p>\r\n GRS files are a simple file format for describing two-dimensional\r\n drawings made up of \"polylines\". The format was created for the textbook\r\n \"Computer Graphics Using OpenGL\", 3rd Ed, by Francis S Hill\r\n and Stephen M Kelley (see pages 61-63).\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Polyline\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Polyline</a>\r\n<p>\r\n The structure of a GRS file is:\r\n <ol>\r\n <li>A number of comment lines followed by a line\r\n starting with an asterisk, {@code '*'}.\r\n <li>A line containing the \"extent\" (bounding box)\r\n of the drawing given as four doubles in model\r\n coordinates (left, top, right, bottom).\r\n <li>The number of line-strips (i.e., polylines)\r\n in the drawing.\r\n <li>The list of line-strips. Each line-strip starts\r\n with the number of vertices in the line-strip,\r\n followed by the (x, y) model coordinates for\r\n each vertex.\r\n </ol>\r\n*/\r\npublic class GRSModel extends Model\r\n{\r\n // the figure's extents (bounding box)\r\n public double left = 0.0;\r\n public double top = 0.0;\r\n public double right = 0.0;\r\n public double bottom = 0.0;\r\n public int numLineStrips = 0;\r\n\r\n /**\r\n Create a wireframe model from the contents of an GRS file.\r\n\r\n @param grsFile {@link File} object for the GRS data file\r\n */\r\n public GRSModel(File grsFile)\r\n {\r\n super(\"GRS Model\");\r\n\r\n // Open the GRS file.\r\n String grsName = null;\r\n FileInputStream fis = null;\r\n try\r\n {\r\n grsName = grsFile.getCanonicalPath();\r\n fis = new FileInputStream( grsFile );\r\n }\r\n catch (FileNotFoundException e)\r\n {\r\n e.printStackTrace(System.err);\r\n System.err.printf(\"ERROR! Could not find GRS file: %s\\n\", grsName);\r\n System.exit(-1);\r\n }\r\n catch (IOException e)\r\n {\r\n e.printStackTrace(System.err);\r\n System.err.printf(\"ERROR! Could not open GRS file: %s\\n\", grsName);\r\n System.exit(-1);\r\n }\r\n\r\n this.name = grsName;\r\n\r\n Scanner scanner = new Scanner(fis);\r\n\r\n // Get the geometry from the GRS file.\r\n try\r\n {\r\n // skip over the comment lines\r\n String line = scanner.nextLine();\r\n while ( ! line.startsWith(\"*\") )\r\n {\r\n //System.err.println(line);\r\n line = scanner.nextLine();\r\n }\r\n\r\n // read the figure extents\r\n this.left = scanner.nextDouble();\r\n this.top = scanner.nextDouble();\r\n this.right = scanner.nextDouble();\r\n this.bottom = scanner.nextDouble();\r\n\r\n // read the number of line-strips\r\n this.numLineStrips = scanner.nextInt();\r\n\r\n int index = -1;\r\n\r\n // read each line-strip\r\n for(int j = 0; j < this.numLineStrips; j++)\r\n {\r\n // read the number of vertices in this line-strip\r\n int numVertices = scanner.nextInt();\r\n\r\n // put this line-strip in the Model object\r\n double x = scanner.nextDouble(); // read the first vertex in the line-strip\r\n double y = scanner.nextDouble();\r\n addVertex( new Vertex(x, y, 0) );\r\n index++;\r\n for (int i = 1; i < numVertices; i++)\r\n {\r\n // read the next model coordinate pair\r\n x = scanner.nextDouble();\r\n y = scanner.nextDouble();\r\n addVertex( new Vertex(x, y, 0) );\r\n index++;\r\n // create a new LineSegment in the Model\r\n addLineSegment(new LineSegment(index - 1, index));\r\n }\r\n }\r\n fis.close();\r\n }\r\n catch (IOException e)\r\n {\r\n e.printStackTrace(System.err);\r\n System.err.printf(\"ERROR! Could not read GRS file: %s\\n\", grsName);\r\n System.exit(-1);\r\n }\r\n }\r\n}//GRSModel\r\n" }, { "alpha_fraction": 0.8238993883132935, "alphanum_fraction": 0.8238993883132935, "avg_line_length": 158, "blob_id": "c50e47d14fa90974f24f0d78f9b0c7a3c4d16f5f", "content_id": "ecacf6a04197cc2844c80fa2f2f6cf335c1d49b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 159, "license_type": "no_license", "max_line_length": 158, "num_lines": 1, "path": "/Master Year 1/Database Systems/README.txt", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "My Database Systems project assignment description is in the \"Design Document - The Internet Airline\" document with my source files in the Submission zip file\n" }, { "alpha_fraction": 0.6466019153594971, "alphanum_fraction": 0.6504854559898376, "avg_line_length": 17.884614944458008, "blob_id": "3d36e734e2760d5c253f9665655e6527f959a09e", "content_id": "36ae39eb5b2900f50251e1e65f6703b5df809de0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 515, "license_type": "no_license", "max_line_length": 59, "num_lines": 26, "path": "/Year 3/Assignment 4/Add_Expr_Node.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#ifndef _ADD_EXPR_NODE\r\n#define _ADD_EXPR_NODE\r\n\r\n#include \"Binary_Expr_Node.h\"\r\n#include \"Expr_Node_Visitor.h\"\r\n\r\nclass Add_Expr_Node : public Binary_Expr_Node\r\n{\r\n\tpublic:\r\n\t\tAdd_Expr_Node(void);\r\n\t\tvirtual ~Add_Expr_Node(void);\r\n\t\t\r\n\t\t//does calculation of the ints\t\t\r\n\t\tvirtual int calculate(int num1, int num2);\r\n\t\t\r\n\t\t//visits the add node\r\n\t\tvirtual void accept (Expr_Node_Visitor & v);\r\n\t\r\n};\r\n#endif" }, { "alpha_fraction": 0.6034482717514038, "alphanum_fraction": 0.6068965792655945, "avg_line_length": 15.058823585510254, "blob_id": "a62f0a18fa7dba67dec87b9fce2546b9cb6c3e58", "content_id": "37dfeef5390f1b9365d9896320ef61af86d849cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 290, "license_type": "no_license", "max_line_length": 62, "num_lines": 17, "path": "/Year 2/Project 1/Driver.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include <ctime>\r\n#include <iostream>\r\n#include<stdlib.h>\r\n#include \"Tour.h\"\r\n#include \"Stack.h\"\r\n\r\nusing namespace std;\r\nint main()\r\n{\r\n\tsrand(time(NULL));\r\n\tTour * run = new Tour();\r\n\tcout << \"Welcome to the Knight's Tour!! Lets Begin.\" << endl;\r\n\t\r\n\trun->tourFunct();\r\n\t\r\n\treturn 0;\r\n}\r\n" }, { "alpha_fraction": 0.6559766530990601, "alphanum_fraction": 0.6559766530990601, "avg_line_length": 17.16666603088379, "blob_id": "d00636e1b2c5061c1d452d81a9ca39c008a1e027", "content_id": "588c88defa707549937b385be7482e5f0df45c79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 343, "license_type": "no_license", "max_line_length": 52, "num_lines": 18, "path": "/Year 2/Project 3/HeapSort.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//HeapSort.h\r\n\r\n#ifndef HEAPSORT_H\r\n#define HEAPSORT_H\r\n#include \"Sort.h\"\r\nclass HeapSort: public Sort\r\n{\r\n\tpublic:\r\n\t\t//constructor\r\n\t\tHeapSort();\r\n\t\t//destructor\r\n\t\t~HeapSort();\r\n\t\t//create another function to build heap from array\r\n\t\tvoid rearrange(int *, int,int);\r\n\t\t//virtual sort method\r\n\t\tvoid sort(int *, int);\r\n};\r\n#endif//HEAPSORT_H" }, { "alpha_fraction": 0.5036496520042419, "alphanum_fraction": 0.5127737522125244, "avg_line_length": 19.076923370361328, "blob_id": "37d4d6bf33e98bc48923ad451102b1804c3fe31e", "content_id": "81da79afff71d7ef898b789a014c849768f289e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 548, "license_type": "no_license", "max_line_length": 62, "num_lines": 26, "path": "/Master Year 2/Operating Systems/HW3/hw3/filters/make_one_line.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program concatenates all the lines of its input\r\n into one single output line.\r\n\r\n When using Windows, if standard input is the console\r\n keyboard, use ^z (Control-z) to denote the end of file\r\n (and you must use ^z at the beginning of a line!).\r\n*/\r\n#include <stdio.h>\r\n\r\nint main()\r\n{\r\n char c;\r\n while ( (c = getchar()) != EOF )\r\n {\r\n if ( c == 10 || c == 13 ) // replace LF or CR with space\r\n {\r\n printf(\" \");\r\n }\r\n else\r\n {\r\n printf(\"%c\", c);\r\n }\r\n }\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.6409668326377869, "alphanum_fraction": 0.641143262386322, "avg_line_length": 21.915611267089844, "blob_id": "9420299ad9f8e4cbe827e26d14151a496d9c32af", "content_id": "833199800af379d5564de8905e65ada968d11755", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5668, "license_type": "no_license", "max_line_length": 171, "num_lines": 237, "path": "/Year 3/Assignment 4/Expr_Tree_Builder.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n#include \"Expr_Tree_Builder.h\"\r\n\r\n// COMMENT The algorithm below seems to work, but will fail once\r\n// you have an expression with parenthesis.\r\n\r\n//RESPONSE: I create build parenthesis methods that will handle the parenthesis and also fixed the start expression so that it works properly with it\r\n\r\nExpr_Tree_Builder::Expr_Tree_Builder(void)\r\n{\r\n\t\r\n}\r\n\r\nExpr_Tree_Builder::~Expr_Tree_Builder(void)\r\n{\r\n\tdelete this->sub_;\r\n\tdelete this->tree_;\r\n}\r\n\t\t\r\n//starts a new expression\r\nvoid Expr_Tree_Builder::start_expression (void)\r\n{\r\n\t\t\r\n\t//push current sub tree to sub tree stack\r\n\twhile (this->sub_ != nullptr )\r\n\t{\r\n\t\tthis->sub_expression.push(this->sub_);\r\n\t}\r\n\t\r\n\t//this creates a new sub tree\r\n\tExpr_Node * sub_tree;\r\n\tthis->sub_ = sub_tree;\r\n\t\r\n\t\t\t\r\n}\r\n\t\r\n//methods for building types of nodes\r\nvoid Expr_Tree_Builder::build_number (int n)\r\n{\r\n\tNum_Expr_Node * newNum = new Num_Expr_Node(n);\r\n\t\r\n\tthis->temp.push(newNum);\r\n\t\r\n\t//if there are two operands\r\n\twhile (temp.size() == 2)\r\n\t{\r\n\t\t//check if left or right is null and put into the null one first (from left to right)\r\n\t\tif(this->sub_->left_leaf == nullptr)\r\n\t\t{\r\n\t\t\tthis->sub_->left_leaf = temp.top();\r\n\t\t\ttemp.pop();\r\n\t\t}\r\n\t\r\n\t\telse if(this->sub_->right_leaf == nullptr)\r\n\t\t{\r\n\t\t\tthis->sub_->right_leaf = temp.top();\r\n\t\t\ttemp.pop();\r\n\t\t}\r\n\t}\r\n\t\r\n}\r\n\r\nvoid Expr_Tree_Builder::build_add_operator (void)\r\n{\r\n\t//build add node\r\n\t//if tree is not null then tree root is the binary node, and set its protected members to the elements of the tree leaves\r\n\tAdd_Expr_Node * newAdd = new Add_Expr_Node();\r\n\t\r\n\t\r\n\tif(this->sub_ == nullptr)\r\n\t{\r\n\t\tthis->sub_ = newAdd;\r\n\t}\r\n\t//else start new expression and make operator the root\r\n\telse\r\n\t{\r\n\t\tthis->start_expression();\r\n\t}\r\n\t\r\n}\r\n\r\nvoid Expr_Tree_Builder::build_subtract_operator(void)\r\n{\r\n\t//build subtract node\r\n\t//if tree is not null then tree root is the binary node, and set its protected members to the elements of the tree leaves\r\n\t\r\n\tSubtract_Expr_Node * newSubtract = new Subtract_Expr_Node();\r\n\t\r\n\tif(this->sub_ == nullptr)\r\n\t{\r\n\t\tthis->sub_ = newSubtract;\r\n\t}\r\n\t//else start new expression and make operator the root\r\n\telse\r\n\t{\r\n\t\tthis->start_expression();\r\n\t\t\t\r\n\t}\r\n}\r\n\r\nvoid Expr_Tree_Builder::build_multiply_operator(void)\r\n{\r\n\t//build multiply node\r\n\t//if tree is not null then tree root is the binary node, and set its protected members to the elements of the tree leaves\r\n\tMultiply_Expr_Node * newMultiply = new Multiply_Expr_Node();\r\n\t\r\n\tif (this->sub_ == nullptr)\r\n\t{\r\n\t\tthis->sub_ = newMultiply;\r\n\t\t\r\n\t}\r\n\t//else start new expression and make operator the root\r\n\telse\r\n\t{\r\n\t\tthis->start_expression();\r\n\t\t\r\n\t}\r\n\t\r\n}\r\nvoid Expr_Tree_Builder::build_division_operator(void)\r\n{\r\n\t//build division node\r\n\t//if tree is not null then tree root is the binary node, and set its protected members to the elements of the tree leaves\r\n\tDivision_Expr_Node * newDivision = new Division_Expr_Node();\r\n\t\r\n\tif (this->sub_ == nullptr)\r\n\t{\r\n\t\tthis->sub_ = newDivision;\r\n\t}\r\n\t//else start new expression and make operator the root\r\n\telse\r\n\t{\r\n\t\tthis->start_expression();\r\n\t}\r\n\t\r\n}\r\n\r\nvoid Expr_Tree_Builder::build_modulus_operator(void)\r\n{\r\n\t//build modulus node\r\n\t//if tree is not null then tree root is the binary node, and set its protected members to the elements of the tree leaves\r\n\tModulus_Expr_Node * newModulus = new Modulus_Expr_Node();\r\n\t\t\r\n\tif(this->sub_ == nullptr)\r\n\t{\r\n\t\tthis->sub_ = newModulus;\r\n\t}\r\n\t//else start new expression and make operator the root\r\n\telse\r\n\t{\r\n\t\tthis->start_expression();\r\n\t}\r\n\t\r\n}\r\n\r\n//build a left parenthesis\r\nvoid Expr_Tree_Builder::build_left_parenthesis(void)\r\n{\r\n\t//start_expression\r\n\t\t//if the stack is not empty, empty it and set the the whatever operand (if any) to one of the leaves and the leaf that contains nullptr, I will create a new root ptr_fun\r\n\t\twhile (!temp.is_empty())\r\n\t\t{\r\n\t\t\t//check if left or right is null and put into the null one first (from left to right)\r\n\t\t\tif(this->sub_->left_leaf == nullptr)\r\n\t\t\t{\r\n\t\t\t\tthis->tree_->left_leaf = temp.top();\r\n\t\t\t\ttemp.pop();\r\n\t\t\t}\r\n\t\r\n\t\t\telse if(this->tree_->right_leaf == nullptr)\r\n\t\t\t{\r\n\t\t\t\tthis->tree_->right_leaf = temp.top();\r\n\t\t\t\ttemp.pop();\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\tthis->start_expression();\r\n\t\r\n}\r\n\r\n//build a right parenthesis\r\nvoid Expr_Tree_Builder::build_right_parenthesis(void)\r\n{\r\n\t//once again I'm emptying the stack but this time I want to go back to the tree above that node.\r\n\t\twhile (!this->temp.is_empty())\r\n\t\t{\r\n\t\t\t//check if left or right is null and put into the null one first (from left to right)\r\n\t\t\tif(this->tree_->left_leaf == nullptr)\r\n\t\t\t{\r\n\t\t\t\tthis->tree_->left_leaf = temp.top();\r\n\t\t\t\ttemp.pop();\r\n\t\t\t}\r\n\t\r\n\t\t\telse if(this->tree_->right_leaf == nullptr)\r\n\t\t\t{\r\n\t\t\t\tthis->tree_->right_leaf = temp.top();\r\n\t\t\t\ttemp.pop();\r\n\t\t\t}\r\n\t\t}\r\n\t\t//believe I'll need to pop the last sub expression off the stack to go back to my current expression\r\n\t\t\r\n}\r\n\r\n//get current expression\r\nExpr_Node* Expr_Tree_Builder::get_expression (void)\r\n{\r\n\twhile(!this->sub_expression.is_empty())\r\n\t{\r\n\t\t\r\n\t\tif (this->tree_ == nullptr)\r\n\t\t{\r\n\t\t\tthis->tree_ == this->sub_expression.top();\r\n\t\t\tthis->sub_expression.pop();\r\n\t\t}\r\n\t\t\r\n\t\t//check if left or right is null and put into the null one first (from left to right)\r\n\t\telse if(this->tree_->right_leaf == nullptr)\r\n\t\t{\r\n\t\t\tthis->tree_->right_leaf = this->sub_expression.top();\r\n\t\t\tthis->sub_expression.pop();\r\n\t\t}\r\n\t\r\n\t\telse if(this->tree_->left_leaf == nullptr)\r\n\t\t{\r\n\t\t\tthis->tree_->left_leaf = this->sub_expression.top();\r\n\t\t\tthis->sub_expression.pop();\r\n\t\t}\r\n\t}\t\t\r\n\r\n\treturn this->tree_;\r\n}\r\n" }, { "alpha_fraction": 0.6180850863456726, "alphanum_fraction": 0.6372340321540833, "avg_line_length": 17.102041244506836, "blob_id": "1594944df3a662fbf55567d663ecde4652e8ba88", "content_id": "5887ba2dd03f27d4ad87780d12ecb9c6a11c4e76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 940, "license_type": "no_license", "max_line_length": 60, "num_lines": 49, "path": "/Year 3/Assignment3/Array_Iterator.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// $Id: Array.cpp 827 2011-02-07 14:20:53Z hillj $\r\n\r\n// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor receieved any help\r\n// on this assignment.\r\n\r\n#include \"Array_Iterator.h\"\r\n\r\ntemplate <typename T>\r\nArray_Iterator<T>::Array_Iterator(Array <T> & a):\r\na_(a),\r\ncurr_(0)\r\n{\r\n\t//default constructor\r\n}\r\n\r\ntemplate <typename T>\r\n~Array_Iterator::Array_Iterator (void)\r\n{\r\n\t//default constructor\r\n}\r\n\r\ntemplate <typename T>\r\nbool Array_Iterator <T>::is_done (void)\r\n{\r\n\t//checks if done\r\n\treturn this->curr_ >= this->a_.cur_size_;\r\n}\r\n\r\ntemplate <typename T>\r\nbool Array_Iterator<T>::advance(void)\r\n{\r\n\t++this->curr_;\r\n}\r\n\r\ntemplate <typename T>\r\nT & Array_Iterator<T>:: operator * (void)\r\n{\r\n\t//return data at current size index\r\n\treturn this->a.data_[this->curr_];\r\n}\r\n\r\ntemplate <typename T>\r\nT * Array_Iterator<T>:: operator & (void)\r\n{\r\n\t//return address at current size index\r\n\treturn &this->a_.data_this->curr_];\r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.4350542724132538, "alphanum_fraction": 0.46662282943725586, "avg_line_length": 24.657894134521484, "blob_id": "b7d3e8b262cd88b375a42e94c2bb6c64a183bbdf", "content_id": "4695d632ce185591ee2e580db9035a86536a0188", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3041, "license_type": "no_license", "max_line_length": 75, "num_lines": 114, "path": "/Master Year 1/Object Oriented Design/PrimeNumbers.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "public class PrimeNumbers {\r\n\r\n //keeps track of prime numbers\r\n static int i;\r\n\r\n //used for hundred threads - 100 prime numbers per thread\r\n static int hundred = 100;\r\n\r\n //used for thousand threads - 10 prime numbers per thread\r\n static int ten = 10;\r\n\r\n public void primeCalc(int range)\r\n {\r\n String primeNumbers = \"\";\r\n int num = 0;\r\n\r\n for (i = i ; i <= range; i++) {\r\n int c = 0;\r\n for (num = i; num >= 1; num--) {\r\n \r\n // check if prime\r\n if (i % num == 0) {\r\n \r\n // increment counter\r\n c = c + 1;\r\n }\r\n }\r\n \r\n if (c == 2) {\r\n primeNumbers = primeNumbers + i + \" \";\r\n }\r\n }\r\n System.out.println(\"\\nPrime numbers: \\n\" + primeNumbers);\r\n \r\n }\r\n\r\n /*** Thread runs entire prime number calculation */\r\n public class one_thread extends Thread\r\n {\r\n public void run()\r\n {\r\n System.out.println(\"Thread \" + Thread.currentThread().getId());\r\n \r\n //Calcualtes all prime number from 1 - 10,000\r\n primeCalc(10000);\r\n\r\n \r\n }\r\n //loop to 10,000 for prime numbers\r\n }\r\n\r\n public class hundred_threads extends Thread\r\n {\r\n public void run()\r\n {\r\n System.out.println(\"Thread \" + Thread.currentThread().getId());\r\n \r\n //Calcualtes all prime number from 1 - 10,000\r\n //this does every hundred prime numbers\r\n primeCalc(hundred);\r\n hundred+=100;\r\n }\r\n }\r\n\r\n public class thousand_threads extends Thread\r\n {\r\n public void run()\r\n {\r\n System.out.println(\"Thread \" + Thread.currentThread().getId());\r\n \r\n //Calcualtes all prime number from 1 - 10,000\r\n //this does every ten prime numbers\r\n primeCalc(ten);\r\n ten+=10;\r\n }\r\n }\r\n\r\n public static void main(String[] args) throws Exception {\r\n\r\n //1 thread for all 10,000 numbers\r\n PrimeNumbers p1 = new PrimeNumbers();\r\n PrimeNumbers.one_thread t1 = p1.new one_thread();\r\n \r\n //one thread run\r\n //t1.run();\r\n System.out.println(\"End of 1 thread\");\r\n\r\n //***** 100 threads**\r\n System.out.println(\"Beginning of 100 threads\");\r\n for(int x = 0; x < 100; x++)\r\n {\r\n PrimeNumbers.hundred_threads t2 = p1.new hundred_threads();\r\n //t2.start();\r\n\r\n }\r\n\r\n System.out.println(\"End of 100 threads\");\r\n\r\n \r\n //*** 1,000 threads *\r\n System.out.println(\"Beginning of 1,000 threads\");\r\n for(int x = 0; x < 1000; x++)\r\n {\r\n \r\n PrimeNumbers.thousand_threads t3 = p1.new thousand_threads();\r\n t3.start();\r\n\r\n }\r\n System.out.println(\"End of 1,000 threads\");\r\n\r\n\r\n }\r\n\r\n}\r\n\r\n" }, { "alpha_fraction": 0.40988245606422424, "alphanum_fraction": 0.45189377665519714, "avg_line_length": 20.367441177368164, "blob_id": "11d825957340fbe6c09362afb9bac44316e6f007", "content_id": "afe584cae4b37a7cda5d0e24f343459f499eb461", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4594, "license_type": "no_license", "max_line_length": 81, "num_lines": 215, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/buildMatrix.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include \"matrixfun.h\"\nusing namespace std;\n\n\nint **makeMatrix(std::string data, int version)\n{\n int dim = (((version-1)*4)+21);\n int **matrix;\n\tmatrix = new int*[dim];\n\tfor(int i = 0; i < dim; i++)\n\t\tmatrix[i] = new int[dim];\n\n for (int i = 0; i < dim; i++)\n {\n for (int j = 0; j < dim; j++)\n {\n matrix[i][j] = 0;\n }\n }\n\tint **patterns;\n\tpatterns = new int*[dim];\n\tfor(int j = 0; j < dim; j++)\n\t\tpatterns[j] = new int[dim];\n\n for (int i = 0; i < dim; i++)\n {\n for (int j = 0; j < dim; j++)\n {\n patterns[i][j] = 0;\n }\n }\n \n\t//Set finder patterns\n\taddFinder(matrix, patterns, 0, 0);\n\taddFinder(matrix, patterns, (((version-1)*4)+21) - 7, 0);\n\taddFinder(matrix, patterns, 0, (((version-1)*4)+21) - 7);\n\tfor(int i = 0; i < 8; i++)\n\t{\n\t for(int j = 0; j < 8; j++)\n\t {\n\t patterns[i][j] = 1;\n\t patterns[(((version-1)*4)+21) - 8 + i][j] = 1;\n\t patterns[i][(((version-1)*4)+21) - 8 + j] = 1;\n\t }\n\t}\n if (version == 2)\n\t{\n\t\tfor(int i= 0; i < 5; i++)\n\t\t{\n\t\t\tmatrix[16 + i][16] = 1;\n\t\t\tmatrix[16][16 + i] = 1;\n\t\t\tmatrix[20][16 + i] = 1;\n\t\t\tmatrix[16 + i][20] = 1;\n\t\t}\n matrix[18][18] = 1;\n\t\tfor(int i= 0; i < 5; i++)\n\t\t{\n\t\t\tfor(int j= 0; j < 5; j++)\n\t\t {\n\t\t\t patterns[16 + i][16 + j] = 1;\n\t\t }\n\t\t}\n\t}\n\t//Add timing patterns\n\tint timePat = dim - 7;\n\tfor(int i = 8; i < dim - 7; i = i+2)\n\t{\n\t matrix[i][6] = 1;\n\t matrix[6][i] = 1;\n\t}\n\tfor(int i = 8; i < dim - 7; i++)\n\t{\n\t patterns[i][6] = 1;\n\t patterns[6][i] = 1;\n\t}\n\t//Add dark module\n\tint darkx = (4 * version) + 9;\n\tmatrix[darkx][8] = 1;\n\tpatterns[darkx][8] = 1;\n\t//Reserve format info with value 2 (format)\n\tsetInfoAreas(matrix, patterns, dim - 1);\n \tlayoutData(matrix, patterns, data, dim - 1, version);\n\n\t\n\treturn matrix;\n}\nvoid addFinder(int **matrix, int **patterns, int cornerx, int cornery)\n{\n\t//map[cornerx][cornery] = 1;\n\tfor(int i=0; i < 7; i++)\n\t{\n\t\tmatrix[cornerx + i][cornery] = 1;\n\t\tmatrix[cornerx][cornery + i] = 1;\n\t\tmatrix[cornerx + 6][cornery + i] = 1;\n\t\tmatrix[cornerx + i][cornery + 6] = 1;\n\t}\n\tfor(int j=2; j < 5; j++)\n\t{\n\t\tfor(int k=2; k < 5; k++)\n\t\t{\n\t\t\tmatrix[cornerx + j][cornery + k] = 1;\n\t\t}\n\t}\n\t//Designate area of the patterns as a patterns in patterns array\n\tfor(int i=0; i < 7; i++)\n\t{\n\t\tfor(int j=0; j < 7; j++)\n\t\t{\n\t\t patterns[cornerx + i][cornery + j] = 1;\n\t\t}\n\t}\n}\nvoid setInfoAreas(int **matrix, int **patterns, int dimval)\n{\n for(int i = 0; i < 7; i++)\n\t{\n\t matrix[dimval - i][8] = 0;\n\t patterns[dimval - i][8] = 1;\n\t}\n\tfor(int k = 0; k < 8; k++)\n\t{\n\t matrix[8][dimval - k] = 0;\n\t patterns[8][dimval - k] = 1;\n\t}\n\tfor(int i = 0; i < 6; i++)\n\t {\n\t matrix[i][8] = 0;\n\t matrix[8][i] = 0;\n\t patterns[i][8] = 1;\n\t patterns[8][i] = 1;\n\t }\n\tfor(int j = 7; j < 9; j++)\n\t{\n\t matrix[j][8] = 0;\n\t matrix[8][j] = 0;\n\t patterns[j][8] = 1;\n\t patterns[8][j] = 1;\n\t}\n}\nvoid layoutData(int **matrix, int **patterns, std::string data, int dim, int ver)\n{\n int leftx = dim - 1;\n int rightx = dim;\n int currenty = dim;\n //Direction - -1 if sorting upwards, 1 if sorting downwards\n int direction = -1;\n int totalBits;\n if(ver == 1)\n {\n totalBits = 128;\n }\n else\n {\n totalBits = 224;\n }\n int dataBits = 0;\n totalBits = data.length() - 1;\n\n\n int col = 0;\n while (dataBits < totalBits)\n {\n\n\n if(patterns[rightx][currenty] == 0)\n {\n matrix[currenty][rightx] = int(data.at(dataBits));\n dataBits++;\n }\n if(patterns[leftx][currenty] == 0)\n {\n matrix[currenty][leftx] = int(data.at(dataBits));\n dataBits++;\n }\n if(currenty == 0 && direction == -1)\n {\n direction = 1;\n rightx = rightx - 2;\n leftx = leftx - 2;\n col++;\n }\n else if(currenty == dim && direction == 1)\n {\n direction = -1;\n rightx = rightx - 2;\n leftx = leftx - 2;\n col++;\n }\n else\n {\n currenty = currenty + direction;\n }\n if (rightx == 6)\n {\n rightx -= 1;\n leftx -= 1;\n }\n if (leftx < 0)\n {\n dataBits = totalBits;\n }\n }\n for (int i = 0; i < dim + 1; i++)\n {\n for (int j = 0; j < dim + 1; j++)\n {\n if (matrix[i][j] == 48 || matrix[i][j] == 49)\n {\n matrix[i][j] -= 48;\n }\n }\n }\n\n}\n" }, { "alpha_fraction": 0.6522767543792725, "alphanum_fraction": 0.6570076942443848, "avg_line_length": 32.161617279052734, "blob_id": "c1e8ebd57de7259d67e23a2c331c2027d31c291d", "content_id": "8f947e8e79d6c85d664f791b3f9ff06851da92e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3382, "license_type": "no_license", "max_line_length": 79, "num_lines": 99, "path": "/Master Year 1/Computer Graphics/HW2/renderer/scene/Camera.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.scene;\r\n\r\n/**\r\n This {@code Camera} data structure represents a camera\r\n located at the origin, looking down the negative z-axis.\r\n<p>\r\n This {@code Camera} has associated to it a \"view volume\"\r\n that determines what part of space the camera \"sees\" when\r\n we use the camera to take a picture (that is, when we\r\n render a {@link Scene}).\r\n<p>\r\n This {@code Camera} can \"take a picture\" two ways, using\r\n a perspective projection or a parallel (orthographic)\r\n projection. Each way of taking a picture has a different\r\n shape for its view volume.\r\n<p>\r\n For the perspective projection, the view volume is an\r\n infinitely long pyramid that is formed by the pyramid\r\n with its apex at the origin and its base in the plane\r\n {@code z = -1} with edges {@code x = -1}, {@code x = +1},\r\n {@code y = -1}, and {@code y = +1}.\r\n<p>\r\n For the orthographic projection, the view volume is an\r\n infinitely long rectangular cylinder parallel to the\r\n z-axis and with sides {@code x = -1}, {@code x = +1},\r\n {@code y = -1}, and {@code y = +1} (an infinite parallelepiped).\r\n<p>\r\n When the graphics rendering {@link renderer.pipeline.Pipeline}\r\n uses this {@code Camera} to render a {@link Scene}, the renderer\r\n \"sees\" the geometry from the scene that is contained in this\r\n camera's view volume. (Notice that this means the orthographic\r\n camera will see geometry that is behind the camera. In fact, the\r\n perspective camera also sees geometry that is behind the camera.)\r\n The renderer's {@link renderer.pipeline.Rasterize_Clip} pipeline\r\n stage is responsible for making sure that the scene's geometry\r\n that is outside of this camera's view volume is not visible.\r\n<p>\r\n The plane {@code z = -1} is the camera's \"image plane\". The\r\n rectangle in the image plane with corners {@code (-1, -1, -1)}\r\n and {@code (+1, +1, -1)} is the camera's \"view rectangle\". The\r\n view rectangle is like the film in a real camera, it is where\r\n the camera's image appears when you take a picture. The contents\r\n of the camera's view rectangle is what gets rasterized, by the\r\n renderer's {@link renderer.pipeline.Rasterize_Clip} pipeline\r\n stage, into a {@link renderer.framebuffer.FrameBuffer}'s\r\n {@link renderer.framebuffer.FrameBuffer.Viewport}.\r\n*/\r\npublic final class Camera\r\n{\r\n // Choose either perspective or parallel projection.\r\n public boolean perspective;\r\n\r\n /**\r\n The default {@code Camera} uses perspective projection.\r\n */\r\n public Camera()\r\n {\r\n perspective = true;\r\n }\r\n\r\n\r\n /**\r\n Set up this {@code Camera}'s view volume as a perspective projection\r\n of an infinite view pyramid extending along the negative z-axis.\r\n */\r\n public void projPerspective()\r\n {\r\n perspective = true;\r\n }\r\n\r\n\r\n /**\r\n Set up this {@code Camera}'s view volume as a parallel (orthographic)\r\n projection of an infinite view parallelepiped extending along the z-axis.\r\n */\r\n public void projOrtho()\r\n {\r\n perspective = false;\r\n }\r\n\r\n\r\n /**\r\n For debugging.\r\n\r\n @return {@link String} representation of this {@code Camera} object\r\n */\r\n @Override\r\n public String toString()\r\n {\r\n String result = \"\";\r\n result += \"Camera: \\n\";\r\n result += \"perspective = \" + perspective + \"\\n\";\r\n return result;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6182212829589844, "alphanum_fraction": 0.6268980503082275, "avg_line_length": 15, "blob_id": "9c4b6fa6b06d6fb107efd6c71bf7a9d1246e710a", "content_id": "f48e7bed9e9b753c02b63c93e00ec26adc189d19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 461, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/Year 3/Assignment 4/Add_Expr_Node.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#include \"Add_Expr_Node.h\"\r\n\r\nAdd_Expr_Node::Add_Expr_Node(void)\r\n{\r\n\t//constructor\r\n}\r\nAdd_Expr_Node::~Add_Expr_Node(void)\r\n{\r\n\t//destructor\r\n}\r\n\t\t\r\nint Add_Expr_Node::calculate(int num1, int num2)\r\n{\r\n\t//return addition of two numbers\r\n\treturn num1 + num2;\r\n}\r\n\r\nvoid Add_Expr_Node:: accept (Expr_Node_Visitor &v) \r\n{\r\n\tv.Visit_Add_Node (*this);\r\n}\r\n\r\n" }, { "alpha_fraction": 0.7160326242446899, "alphanum_fraction": 0.7160326242446899, "avg_line_length": 19.44444465637207, "blob_id": "04d40367e3ca1df23414a686755e67c645076bbc", "content_id": "3635de334bacc8e755f45a263c6fcc29e291b2ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 736, "license_type": "no_license", "max_line_length": 69, "num_lines": 36, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/mainwindow.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#ifndef MAINWINDOW_H\n#define MAINWINDOW_H\n\n#include <QMainWindow>\n#include <string>\n#include <map>\n#include \"errorCorrection.h\"\n#include \"messagefun.h\"\n#include \"matrixfun.h\"\n\nQT_BEGIN_NAMESPACE\nnamespace Ui { class MainWindow; }\nQT_END_NAMESPACE\n\nclass MainWindow : public QMainWindow\n{\n Q_OBJECT\n\npublic:\n MainWindow(QWidget *parent = nullptr);\n ~MainWindow();\n\nprivate slots:\n void on_submitButton_clicked();\n\nprivate:\n Ui::MainWindow *ui;\n\nprivate:\n std::string * dataEncoding();\n int chooseVersion(std::string input);\n std::string characterCount(std::string input, int versionNumber);\n std::string decimalToBinary(std::string input, int binarySize);\n std::string getColor();\n};\n#endif // MAINWINDOW_H\n" }, { "alpha_fraction": 0.5176848769187927, "alphanum_fraction": 0.5225080251693726, "avg_line_length": 21.037036895751953, "blob_id": "09db27adbfe73dd4cc441c17ce7d7f9b7d8d9e2a", "content_id": "6d29c8fa1a11056d9d208fab6422094b198c5970", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 622, "license_type": "no_license", "max_line_length": 65, "num_lines": 27, "path": "/Master Year 2/Operating Systems/HW3/hw3/filters/to_lower_case.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program reads lines from standard input, converts all\r\n the letters to lower case, and writes them to standard output.\r\n\r\n When using Windows, if standard input is the console\r\n keyboard, use ^z (Control-z) to denote the end of file\r\n (and you must use ^z at the beginning of a line!).\r\n*/\r\n#include <stdio.h>\r\n\r\nint main()\r\n{\r\n char c;\r\n while ( (c = getchar()) != EOF )\r\n {\r\n if ( 'A' <= c && c <= 'Z' )\r\n {\r\n printf(\"%c\", c + 32);\r\n }\r\n else\r\n {\r\n printf(\"%c\", c);\r\n fflush(stdout); // try commenting this out\r\n }\r\n }\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.6012269854545593, "alphanum_fraction": 0.6012269854545593, "avg_line_length": 12.260869979858398, "blob_id": "2c1e7a2227753abd225435292c39fcbc1d09ea71", "content_id": "9b4073ef342b94dd44d808f441e861c93a04f675", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 326, "license_type": "no_license", "max_line_length": 59, "num_lines": 23, "path": "/Year 3/Assignment 4/Expr_Tree.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "//Expr_Tree class\r\n\r\n// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n#ifndef _EXPR_TREE\r\n#define _EXPR_TREE\r\n\r\n#include \"Expr_Node.h\"\r\n\r\nclass Expr_Tree\r\n{\r\n\tpublic:\r\n\t\tExpr_Tree(void);\r\n\t\t~Expr_Tree(void);\r\n\t\t\r\n\t\tExpr_Node * left_;\r\n\t\tExpr_Node * right_;\r\n\t\t\r\n};\r\n#endif" }, { "alpha_fraction": 0.5453934073448181, "alphanum_fraction": 0.5527908802032471, "avg_line_length": 22.78333282470703, "blob_id": "71121eb99bfd1550cced5787e1d6b65667472c3d", "content_id": "54acad4451381cf738c08895fc951c0eb42e4417", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1487, "license_type": "no_license", "max_line_length": 72, "num_lines": 60, "path": "/Master Year 1/Programming Languages and Compilers/HW1/PrettyPrinter1.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\nCourse: CS 51530\r\nName: Marcellus Hunt\r\nEmail: [email protected]\r\nAssignment: 1\r\n*/\r\n\r\n/**\r\n The prettyPrint() method takes a BTree and\r\n converts it into a well formated string.\r\n*/\r\npublic class PrettyPrinter1\r\n{\r\n public static String prettyPrint(BTree tree)\r\n {\r\n return prettyPrint(tree, \"\");\r\n }\r\n\r\n\r\n /**\r\n This prettyPrint() method is essentially a\r\n preorder traversal of the tree.\r\n */\r\n private static String prettyPrint(BTree tree, String indentation)\r\n {\r\n String result = \"\";\r\n \r\n //empty tree\r\n if (tree == null) // empty tree (stops the recursion)\r\n {\r\n result += indentation + \"()\" + \"\\n\";\r\n }\r\n\r\n //tree of single node\r\n else if (tree.depth() == 0) // depth==0 stops the recursion also\r\n {\r\n result += indentation + tree.getElement() + \"\\n\" ;\r\n }\r\n else\r\n {\r\n //indent after new line\r\n //increase indentation before starting new tree\r\n result += indentation + \"(\" + tree.getElement() + \"\\n\";\r\n indentation +=\" \";\r\n\r\n //begin left tree\r\n result += prettyPrint( tree.getLeftTree() ,indentation) ;\r\n\r\n //begin right tree\r\n result += prettyPrint( tree.getRightTree(),indentation);\r\n\r\n //decrease indentation before printing end parenthesis\r\n indentation = indentation.substring(0, indentation.length()-1);\r\n result+= indentation + \")\" + \"\\n\";\r\n }\r\n\r\n\r\n return result;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.531734824180603, "alphanum_fraction": 0.5387870073318481, "avg_line_length": 22.44827651977539, "blob_id": "89673f1d9c435a484a303574ac7a18c6ba427978", "content_id": "8f101666c74504f15f0200603ffa89ff9aeb59d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 709, "license_type": "no_license", "max_line_length": 66, "num_lines": 29, "path": "/Master Year 2/Operating Systems/HW3/hw3/filters/shift.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program reads one character at a time from standard input,\r\n shifts the character by one place in the ASCII table, and then\r\n writes the new character to standard output.\r\n\r\n When using Windows, if standard input is the console\r\n keyboard, use ^z (Control-z) to denote the end of file\r\n (and you must use ^z at the beginning of a line!).\r\n*/\r\n#include <stdio.h>\r\n\r\nint main()\r\n{\r\n char c;\r\n while ( (c = getchar()) != EOF )\r\n {\r\n if ( c != 10 && c != 13 ) // don't change LF or CR\r\n {\r\n c++;\r\n printf(\"%c\", c);\r\n }\r\n else\r\n {\r\n printf(\"%c\", c);\r\n fflush(stdout); // try commenting this out\r\n }\r\n }\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.5141404271125793, "alphanum_fraction": 0.5265331864356995, "avg_line_length": 17.30674934387207, "blob_id": "707e1eecbf36c0d8c9ad6e08a696515de49549b8", "content_id": "2ba9e2b7840824df562355fc5deff1b16f0866a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3147, "license_type": "no_license", "max_line_length": 74, "num_lines": 163, "path": "/Year 2/Assignment #6/Driver.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n#include <iostream>\r\n#include <fstream>\r\n#include <string>\r\n#include <sstream>\r\n\r\n#include \"Sort.h\"\r\n#include \"InsertionSort.h\"\r\n#include \"BubbleSort.h\"\r\n\r\n\r\nint num [50];\r\n\r\n// use as a variable to close programs when the user chooses the option to\r\nbool bothGoin = true;\r\n\r\n//LOAD INTS\r\n\tvoid loadInts()\r\n\t{\r\n\t\t//line to read in file\r\n\t\tstd::string line;\r\n\t\tstd::stringstream ss;\r\n\t\tstd::stringstream ss2;\r\n\t\tstd::string stringnum;\r\n\t\t//read data.txt\r\n\t\tstd::ifstream inputFile(\"data.txt\");\r\n\t\t\t//if file is open\r\n\t\tif (inputFile.is_open())\r\n\t\t{\r\n\t\t\tss.clear();\r\n\t\t\tss.str(\"\");\r\n\t\t\tint i = 0;\r\n\t\t\t\t//Read line\r\n\t\t\tstd::getline(inputFile,line);\r\n\t\t\tss.str(line);\r\n\t\t\t\t//Tokenize Line\r\n\t\t\twhile(std::getline(ss,stringnum, ','))\r\n\t\t\t{\t\r\n\t\t\t\t//Parse \r\n\t\t\t\tss2.clear();\r\n\t\t\t\tss2.str(\"\");\r\n\t\t\t\tss2.str(stringnum);\r\n\t\t\t\tss2>>num[i];\t\r\n\t\t\t\tstd::cout <<num[i]<< \",\";\r\n\t\t\t\ti++;\t\t\r\n\t\t\t}\r\n\t\t\tinputFile.close();\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tstd::cout << \"Unable to open file\" << std::endl;\r\n\t\t}\r\n\t}\r\n\t\r\n\t//MENU 2 (sub menu)\r\n\tvoid menu2()\r\n\t{\r\n\t\tbool keepGoing = true;\r\n\t\twhile (keepGoing)\r\n\t\t{\r\n\t\t\t//input for sub menu\r\n\t\t\tstd::string subDecision;\r\n\t\t\t//print menu\r\n\t\t\tstd::cout<<std::endl;\r\n\t\t\tstd::cout << \"1. Insertion Sort \" << std::endl;\r\n\t\t\tstd::cout << \"2. Bubble Sort \" << std::endl;\r\n\t\t\tstd::cout << \"3. Exit Program \" << std::endl;\r\n\t\t\tstd::cout << \"Please enter your selection: \";\r\n\t\t\tstd::cin >> subDecision;\r\n\t\t\t// if equals 1 then do insertion sort\r\n\t\t\tif (subDecision == \"1\")\r\n\t\t\t{\r\n\t\t\t\t//insertion object\r\n\t\t\t\tSort * insert = new InsertionSort;\r\n\t\t\t\t\r\n\t\t\t\t//insert sort\r\n\t\t\t\tinsert -> sort(num, 50);\r\n\t\t\t\t\r\n\t\t\t\t//prints out array\r\n\t\t\t\tfor (int m = 0; m < 50; m++)\r\n\t\t\t\t{\r\n\t\t\t\t\tstd::cout << num[m]<< \", \";\r\n\t\t\t\t}\r\n\t\t\t\tkeepGoing = false;\t\r\n\t\t\t\tdelete insert;\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t//if equals 2 then do bubble sort\r\n\t\t\telse if (subDecision == \"2\")\r\n\t\t\t{\r\n\t\t\t\t//bubble object\r\n\t\t\t\tSort * bubble = new BubbleSort;\r\n\t\t\t\t\r\n\t\t\t\t//bubble sort\r\n\t\t\t\tbubble -> sort(num, 50); \r\n\t\t\t\t\r\n\t\t\t\t//prints out arrray\r\n\t\t\t\tfor (int b = 0; b < 50; b++)\r\n\t\t\t\t{\r\n\t\t\t\t\tstd::cout << num[b]<<\", \";\r\n\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t\tkeepGoing = false;\r\n\t\t\t\t\r\n\t\t\t\tdelete bubble;\r\n\t\t\t}\r\n\t\t\t\t\r\n\t\t\telse if(subDecision == \"3\")\r\n\t\t\t{\r\n\t\t\t\tkeepGoing = false;\r\n\t\t\t\tbothGoin = false;\r\n\t\t\t}\t\t\t\t\r\n\t\t\r\n\t\t}\r\n\t\t\r\n\t}\r\n\t//MENU1\r\n\tvoid menu1()\r\n\t{\r\n\t\tbool keepGoing = true;\r\n\t\t\r\n\t\t//do loop\r\n\t\t//bool keepGoing = true;\r\n\t\twhile (keepGoing || bothGoin)\r\n\t\t{\r\n\t\t\t//input for main menu\r\n\t\t\tstd::string decision;\r\n\t\t\t//main menu\r\n\t\t\tstd::cout << std::endl;\r\n\t\t\tstd::cout << \"1. Load Integers (From File) \" << std::endl;\r\n\t\t\tstd::cout << \"2. Exit Program\" << std::endl;\r\n\t\t\tstd::cout << \"Please enter your selection: \";\r\n\t\t\tstd::cin >> decision;\r\n\t\t\t// if equals 1, load the integers and call menu 2 function\r\n\t\t\tif (decision == \"1\")\r\n\t\t\t{\r\n\t\t\t\tloadInts();\r\n\t\t\t\tmenu2();\r\n\t\t\t\tkeepGoing = false;\r\n\t\t\t}\r\n\t\t\t//if equals 2, end program\r\n\t\t\telse if(decision == \"2\")\r\n\t\t\t{\r\n\t\t\t\tkeepGoing = false;\r\n\t\t\t\tbothGoin = false;\r\n\t\t\t}\r\n\t\t\t\t\r\n\t\t}\r\n\t}\r\nint main()\r\n{\r\n\t//Start Program\r\n\tmenu1();\r\n\t\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.6141732335090637, "alphanum_fraction": 0.6141732335090637, "avg_line_length": 10.800000190734863, "blob_id": "370b142151ade876fe23f976a859ce2a19bb551e", "content_id": "a5818cba752c3e41723280444218b1cbb938431b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 254, "license_type": "no_license", "max_line_length": 24, "num_lines": 20, "path": "/Year 2/Project 1/LinkedList.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#ifndef LINKEDLIST_H_\r\n#define LINKEDLIST_H_\r\n\r\nstruct node\r\n{\r\n\tint data;\r\n\tnode *next;\r\n};\r\n\r\n\tclass LinkedList\r\n\t{\r\n\tprivate:\r\n\t\tnode * head, *tail;\r\n\tpublic:\r\n\t\r\n\t//add method\r\n\tvoid addNode(int, int);\r\n\tvoid displayList();\r\n};\r\n#endif //LINKEDLIST_H" }, { "alpha_fraction": 0.5844155550003052, "alphanum_fraction": 0.6120129823684692, "avg_line_length": 16.727272033691406, "blob_id": "4e2640de84a23985c99b19932a5d4f17c6365b11", "content_id": "372b213477cffbe58361d8cfe855fb78f45f94fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 616, "license_type": "no_license", "max_line_length": 60, "num_lines": 33, "path": "/Year 3/Assignment3/Array_Iterator.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// $Id: Array.cpp 827 2011-02-07 14:20:53Z hillj $\r\n\r\n// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor receieved any help\r\n// on this assignment.\r\n\r\n#ifndef _ARRAY_ITERATOR_H_\r\n#define _ARRAY_ITERATOR_H_\r\n\r\ntemplate <typename T>\r\nclass Array_Interator \r\n{\r\n\tpublic:\r\n\t\t//constructor\r\n\t\tArray_Interator(Array <T> & a)\r\n\t\t\r\n\t\t//destructor\r\n\t\t~Array_Interator (void)\r\n\t\t\r\n\t\tbool is_done (void);\r\n\t\tbool advance (void);\r\n\t\tT & operator * (void);\r\n\t\tT * operator -> (void);\r\n\t\r\n\tprivate:\r\n\t\tArray <T> & a_;\r\n\t\tsize_t curr_;\r\n\t\t\r\n};\r\n#include \"Array_Interator.cpp\"\r\n\r\n#endif // !defined _ARRAY_ITERATOR_H_" }, { "alpha_fraction": 0.6369814872741699, "alphanum_fraction": 0.6410794854164124, "avg_line_length": 19.714284896850586, "blob_id": "ec3a289d4900994f4713d047ded1ce812cb3294d", "content_id": "3c1770bc51cc021cefb98b30164267ea875c91e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10005, "license_type": "no_license", "max_line_length": 111, "num_lines": 483, "path": "/Year 3/array-source/Array.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// $Id: Array.cpp 820 2011-01-17 15:30:41Z hillj $\n\n// Honor Pledge:\n//\n// I pledge that I have neither given nor receieved any help\n// on this assignment.\n\n#include \"Array.h\"\n\n\nArray::Array (void): data_(nullptr),cur_size_(0),max_size_(0)\n{\n\t//initialize constructor\t\n\t//Comment changes made\n}\n\nArray::Array (size_t length): data_(nullptr), cur_size_(0), max_size_(length)\n{\n\t//Comment changes made\n\t\n\t//initialize constructor\n\tthis->data_ = new char [length];\t\n\t\t\n}\n\nArray::Array (size_t length, char fill): data_(nullptr), cur_size_(length), max_size_(length)\n{\n\t//Comment changes made\n\t\n\t//initialize constructor\n\t//Comment changes made\n\t\n\t//set data to initial state\n\tthis->data_= new char[length];\n\t\n\t//for every index in array\n\tfor(int i = 0; i < this->max_size_; i++)\n\t{\n\t\t//this index in the array holds fill\t\t\n\t\tthis->data_[i] = fill;\n\t\t\n\t}\n\t\t\n}\n\n\nArray::Array (const Array & array): data_(array.data_), cur_size_(array.cur_size_), max_size_(array.max_size_)\n{\n\t//Comment changes made\n\t\n\t//copy constructor\n\tthis->max_size_ = array.max_size_;\n\t\n\t//Allocate the array\n\tthis->data_ = new char[this->max_size_];\n\t\n\t//for every index in array\n\tfor(int i = 0; i < this->max_size_; i++)\n\t{\n\t\t//pointer newVal holds data of next index\n\t\tchar newVal = array.data_[i];\n\t\tnewVal = this->data_[i];\n\t\t\t\t\n\t}\n\t\n}\n\nArray::~Array (void)\n{\n\t//destructor\n\t\n\t//delete data\n\tdelete [] data_;\n\t\n}\n\nconst Array & Array::operator = (const Array & rhs)\n{\n\t//Comment changes made\n\t\n\t//Assignment Operation- look over\n\t\n\tif (&rhs != this)\n\t{\n\t\t// delete old memory\n\t\tdelete [] data_;\n\t\t\n\t\tthis->data_ = new char [rhs.max_size_];\n\t\t\n\t\t//'this' max size holds size of object 'rhs'\n\t\tthis->max_size_ = rhs.max_size_;\n\t\t\n\t\t//for every index in array\n\t\tfor (int x = 0; x < this->max_size_; x++)\n\t\t{\n\t\t\t//thisVal holds the character of the index at 'this' array object\n\t\t\tchar thisVal = rhs.data_[x];\n\t\t\n\t\t\t//this holds the character from the object of right hand size\n\t\t\tthis->data_[x] = thisVal;\n\t\t\t\t\n\t\t}\n\t\t\n\t}\n\t//return array\n\treturn *this;\n}\n\nchar & Array::operator [] (size_t index)\n{\n\t//Comment changes made\n\t\n\t//Get the character at the specified index. If the index is not\n\t//within the range of the array, then std::out_of_range exception\n\t//is thrown.\n\t\t\t\t\n\t\t//for every index in array\n\t\tfor (int i = 0; i < this->max_size_; i++)\n\t\t{\n\t\t\t//if index give is in array\n\t\t\tif(i==index)\n\t\t\t{\n\t\t\t\t//return character of index being pointed to\n\t\t\t\treturn this->data_[i];\n\t\t\t\t\n\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t}\n\t\t\n\t\tthrow \"Out of range exception. \";\n\t\t\n}\nconst char & Array::operator [] (size_t index) const\n{\n\t//Comment changes made\n\t\n\t//@overload // look above\n\t//The returned character is not modifiable.\n\t//check if index is in range of array\n\t\n\t//for every index in array\n\t\tfor (int i = 0; i < this->max_size_; i++)\n\t\t{\n\t\t\t//if index is in array\n\t\t\tif(i==index)\n\t\t\t{\n\t\t\t\t//return the character at index being pointed to.\n\t\t\t\treturn this->data_[i];\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t}\n\t\t\t\n\t\t}\n\t\t//if not out out of range exception\n\t\tthrow \"Out of range exception. \";\n\t\n}\n\nchar Array::get (size_t index) const\n{\n\t//Comment changes made\n\t\n\t//Get the character at the specified index. If the \\a index is not within the range of the array\n\t//, then std::out_of_range exception is thrown.\n\t\n\t\t//for every index in array\n\t\tfor (int i = 0; i < this->max_size_; i++)\n\t\t{\n\t\t\t//if index is in array\n\t\t\tif(i==index)\n\t\t\t{\n\t\t\t\t//return the character at index being pointed to.\n\t\t\t\treturn this->data_[i];\n\t\t\t\t\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tthrow \"Index out of bounds\";\n\t\t\t}\n\t\t\t\n\t\t}\n\t\t// out of range exception\n\t\tthrow \"Out of range exception. \";\n\t\t\n}\n\nvoid Array::set (size_t index, char value)\n{\n\t//Comment changes made\n\t\n\t//Set the character at the specified \\a index. If the \\a index is not\n //within range of the array, then std::out_of_range exception is \n // thrown.\n\t\n\t//for every index in array\n\tfor (int i = 0; i < this->max_size_; i++)\n\t{\t\t\n\t\t//if index has a char\n\t\tif (i == index)\n\t\t{\n\t\t\t//set character at index\n\t\t\tthis->data_[i] = value;\n\t\t\t\t\t\t\n\t\t}\n\t}\n\t\t\n}\n\nvoid Array::resize (size_t new_size)\n{\n\t//Comment changes made\n\t\n\t/* Set a new size for the array. If \\a new_size is less than the current\n * size, then the array is truncated. If \\a new_size if greater then the\n * current size, then the array is made larger and the new elements are\n * not initialized to anything. If \\a new_size is the same as the current\n * size, then nothing happens.\n *\n * The array's original contents are preserved regardless of whether the \n * array's size is either increased or decreased.\n */\n \n\t//if new_size is greater than cur_size_\n\tif (new_size > this->max_size_)\n\t{\n\t\t//then the array is made larger and the new elements are not initialized to anything.\n\t\tthis->max_size_ = new_size;\n\t\t\t\n\t}\n\t//if they are the same, nothing changes\n\t\n\t//if less than new size, throw exception\n\tif (new_size < max_size_)\n\t{\n\t\tthrow \"Resize can only make larger. Use shrink method. \";\n\t}\n\t\n}\n\nint Array::find (char ch) const\n{\n\t//Locate the specified character in the array. The index of the first\n\t//occurrence of the character is returned. If the character is not\n\t//found in the array, then -1 is returned.\n\t\n\t//for every index in array\n\tfor(int x = 0; x < this->max_size_;x++)\n\t{\n\t\t//if the contents of the index is equal to value of ch, then return that index , if not in array, return -1\n\t\tif (this->data_[x] = ch)\n\t\t{\n\t\t\treturn x;\n\t\t\t\n\t\t}\n\t\telse\n\t\t{\n\t\t\treturn -1;\n\t\t\t\n\t\t}\n\t\t\n\t}\n\t\n}\n\nint Array::find (char ch, size_t start) const\n{\n\t//Comment changes made\n\t\n\t//@overload\n\t\n\t//This version allows you to specify the start index of the search. If\n\t//the start index is not within the range of the array, then the\n\t//std::out_of_range exception is thrown.\n\t\n\t//if start is out of index thrwo exception\n\tif (start > this->max_size_ || start < 0)\n\t{\n\t\tthrow \"Start index is out of bounds. \";\n\t\t\n\t}\n\t\n\t//for indexes from start to end of array\n\tfor(int x = start; x < this->max_size_; x++)\n\t{\n\t\t//if ch is at that index then return it\n\t\tif (this->data_[x] = ch)\n\t\t{\n\t\t\treturn x;\n\t\t\t\n\t\t}\n\t\telse\n\t\t{\n\t\t\treturn -1;\n\t\t\t\n\t\t}\n\t\t\n\t}\n\t\n}\n\nbool Array::operator == (const Array & rhs) const\n{\n\t//Comment changes made\n\t\n\t//Test the array for equality.\n\t\n\t//check for if the sizes are the same\n\tif (&rhs != this)\n\t{\n\t\treturn false;\n\t}\n\t\n\t//if contents are the same\n\tfor (int i = 0; i < this->max_size_;i++)\n\t{\n\t\tif (this->data_[i] != rhs.data_[i])\n\t\t{\n\t\t\t\n\t\t\treturn false;\n\t\t\t\n\t\t}\n\t}\n\t//if conditionals return false then the arrays are equal\n\treturn true;\n}\n\nbool Array::operator != (const Array & rhs) const\n{\n\t//Comment changes made\n\t\n\t//Test the array for inequality.\n\t\t\n\t//check for if the sizes are the same\n\tif (&rhs == this)\n\t{\n\t\treturn false;\n\t}\n\t\n\t//if contents are the same\n\tfor(int i = 0; i < this->max_size_; i++)\n\t{\n\t\tif (this->data_[i] == rhs.data_[i])\n\t\t{\n\t\t\t//return false\n\t\t\treturn false;\n\t\t\t\n\t\t}\n\t}\n\t\n\treturn true;\n}\n\nvoid Array::fill (char ch)\n{\n\t//Comment changes made\n\t\n\t//Fill the contents of the array.\n\t\n\t//for every index in array\n\tfor(int x = 0; x < this->max_size_; x++)\n\t{\n\t\t//the index holds the ch value \n\t\tthis->data_[x] = ch;\n\t\t\n\t}\n\t\n\t\n}\n\nvoid Array::shrink (void)\n{\n\t//Comment changes made\n\t\n\t//Shrink the array to reclaim unused space.\n\t\n\t\n\tif(this->cur_size_ < this->max_size_)\n\t{\n\t\tthis->max_size_ = this->cur_size_;\n\t}\n\t//if its the same then do nothing\n\t\t\n}\n\nvoid Array::reverse (void)\n{\n\t//Reverse the contents of the array such that the first element is now\n\t// the last element and the last element is the first element.\n\t\n\t//create begin pointer that points to the first data element\n\tint begin = 0;\n\t\n\t//create end pointer that points to the last data element\n\tint end = this->max_size_ -1;\n\t\n\t\t\n\t//while the begin index is less than the end index\n\twhile (begin < end)\n\t{\n\t\t//swap begin and end index, then increment begin and decrement end to move to the next indexes to swap\n\t\t\n\t\t//pointer t holds begin\n\t\tchar t = this->data_[begin];\n\t\t//begin then holds end\n\t\tthis->data_[begin] = this->data_[end];\n\t\t//end holds t\n\t\tthis->data_[end] = t;\n\t\t//swap completed\n\t\t\t\t\n\t\t//increment begin moves to next value\n\t\tbegin++;\n\t\t\n\t\t//decrement end moves to previous value\n\t\tend--;\n\t\t\n\t}\n\t\n}\n\nArray Array::slice (size_t begin) const\n{\n\t/*\n\tThe slice() method returns a shallow copy of a portion of an array into\n\ta new array object selected from begin to end (end not included). The original\n\tarray will not be modified.\n\t*/\n\t\n\t//var used for keeping track of indexes and inserting elements in to correct spot in new array object\n\tint newIndex = 0;\n\tint newSize = this->max_size_ - begin;\n\t// create new array object, with size being current size of original array minus the number of beginning index\n\tArray newArray(newSize);\n\t\n\t//Take portion of the array from begin to end of the array and store in the new array object\n\t\n\t//for (indexes from begin to end of old array)\n\tfor(int i = begin; i < newSize; i++)\n\t{\n\t\t//store contents in new array\n\t\t\n\t\t//create pointer that points to address of the new array\n\t\tchar newVal = this->data_[i];\n\t\t\n\t\t//this pointer then holds the content of the index of old array\n\t\tnewArray.data_[newIndex] = newVal;\n\t\tnewIndex++;\n\t\t\n\t}\n\t\n\t//return the new Array\n\treturn newArray;\n\t\t\n}\n\nArray Array::slice (size_t begin, size_t end) const\n{\n\t//overload - look at first slice method (slice from begin to end)\n\t\n\t//var used for keeping track of indexes and inserting elements in to correct spot in new array object\n\tint newIndex = 0;\n\t\n\tint newSize = ((end - begin) + 1);\n\t// create new array object \n\tArray newArray(newSize);\n\t\n\t//Take portion of the array from begin to end of the array and store in the new array object\n\t\n\t//for (indexes from begin to end of old array)\n\tfor(int i = begin; i <= newSize; i++)\n\t{\n\t\t//store contents in new array\n\t\t\n\t\t//create pointer that points to address of the new array\n\t\tchar newVal = this->data_[i];\n\t\t\n\t\t//this pointer then holds the content of the index of old array\n\t\tnewArray.data_[newIndex] = newVal;\n\t\tnewIndex++;\n\t\t\n\t}\n\t\n\t//return the new Array\t\n\treturn newArray;\n}\n" }, { "alpha_fraction": 0.40468910336494446, "alphanum_fraction": 0.5178389549255371, "avg_line_length": 19.799999237060547, "blob_id": "4d74e4baaff4125fb10094b68965d1098a8682c8", "content_id": "4b59e75df12141ddddd48441caaa490afd8583f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 981, "license_type": "no_license", "max_line_length": 43, "num_lines": 45, "path": "/Master Year 1/Computer Graphics/HW2/W.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\nimport renderer.scene.*;\r\n\r\n/**\r\n A two-dimensional model of the letter W.\r\n*/\r\npublic class W extends Model\r\n{\r\n /**\r\n The letter W.\r\n */\r\n public W()\r\n {\r\n super(\"W\");\r\n\r\n addVertex(new Vertex(0.00,1.00,0.0),\r\n\t\t\t\tnew Vertex(0.2, 0.00,0.0),\r\n\t\t\t\tnew Vertex(0.4, 0.00,0.0),\r\n\t\t\t\tnew Vertex(0.5, 0.5, 0.0),\r\n\t\t\t\tnew Vertex(0.6, 0.00,0.0),\r\n\t\t\t\tnew Vertex(0.8, 0.00,0.0),\r\n\t\t\t\tnew Vertex(1.00,1.00,0.0),\r\n\t\t\t\tnew Vertex(0.8, 1.00,0.0),\r\n\t\t\t\tnew Vertex(0.7, 0.5, 0.0),\r\n\t\t\t\tnew Vertex(0.5, 1.00,0.0),\r\n\t\t\t\tnew Vertex(0.3, 0.5, 0.0),\r\n\t\t\t\tnew Vertex(0.2, 1.00,0.0));\r\n\r\n addLineSegment(new LineSegment(0,1),\r\n\t\t\t\t\t new LineSegment(1,2),\r\n\t\t\t\t\t new LineSegment(2,3),\r\n\t\t\t\t\t new LineSegment(3,4),\r\n\t\t\t\t\t new LineSegment(4,5),\r\n\t\t\t\t\t new LineSegment(5,6),\r\n\t\t\t\t\t new LineSegment(6,7),\r\n\t\t\t\t\t new LineSegment(7,8),\r\n\t\t\t\t\t new LineSegment(8,9),\r\n\t\t\t\t\t new LineSegment(9,10),\r\n\t\t\t\t\t new LineSegment(10,11),\r\n\t\t\t\t\t new LineSegment(11,0));\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5208167433738708, "alphanum_fraction": 0.5278353691101074, "avg_line_length": 35.09467315673828, "blob_id": "f3e96613a8d04e4f3ffa8fb6d448aaceeffdfe91", "content_id": "37fc59193129ea81573ae00be5e078eb993a6b92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6269, "license_type": "no_license", "max_line_length": 81, "num_lines": 169, "path": "/Master Year 1/Computer Graphics/HW2/renderer/models/ObjSimpleModel.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\nimport java.util.Scanner;\r\nimport java.io.File;\r\nimport java.io.FileInputStream;\r\nimport java.io.IOException;\r\nimport java.io.FileNotFoundException;\r\nimport java.util.regex.*;\r\n\r\n/**\r\n<p>\r\n A simple demonstration of loading and drawing a basic OBJ file.\r\n<p>\r\n A basic OBJ file is a text file that contains three kinds of lines:\r\n lines that begin with the character {@code 'v'}, lines that begin\r\n with the character {@code 'f'}, and lines that begin with the\r\n character {@code '#'}.\r\n<p>\r\n A line in an OBJ file that begins with {@code '#'} is a comment line\r\n and can be ignored.\r\n<p>\r\n A line in an OBJ file that begins with {@code 'v'} is a line that\r\n describes a vertex in 3-dimensional space. The {@code 'v'} will always\r\n be followed on the line by three doubles, the {@code x}, {@code y},\r\n and {@code z} coordinates of the vertex.\r\n<p>\r\n A line in an OBJ file that begins with {@code 'f'} is a line that\r\n describes a \"face\". The {@code 'f'} will be followed on the line by\r\n a sequence of positive integers. The integers are the indices of the\r\n vertices that make up the face. The \"index\" of a vertex is the order\r\n in which the vertex was listed in the OBJ file. So a line like this\r\n<pre>{@code\r\n f 2 4 1\r\n}</pre>\r\n would represent a triangle made up of the 2nd vertex read from the file,\r\n the 4th vertex read from the file, and the 1st vertex read from the file.\r\n And a line like this\r\n<pre>{@code\r\n f 2 4 3 5\r\n}</pre>\r\n would represent a quadrilateral made up of the 2nd vertex read from the file,\r\n the 4th vertex read from the file, the 3rd vertex read from the file, and\r\n the 5th vertex read from the file.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Wavefront_.obj_file\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Wavefront_.obj_file</a>\r\n*/\r\npublic class ObjSimpleModel extends Model\r\n{\r\n /**\r\n Create a wireframe model from the contents of an OBJ file.\r\n\r\n @param objFile {@link File} object for the OBJ data file\r\n */\r\n public ObjSimpleModel(final File objFile)\r\n {\r\n super(\"OBJ Model\");\r\n\r\n // Open the OBJ file.\r\n String objName = null;\r\n FileInputStream fis = null;\r\n try\r\n {\r\n objName = objFile.getCanonicalPath();\r\n fis = new FileInputStream( objFile );\r\n }\r\n catch (FileNotFoundException e)\r\n {\r\n e.printStackTrace(System.err);\r\n System.err.printf(\"ERROR! Could not find OBJ file: %s\\n\", objName);\r\n System.exit(-1);\r\n }\r\n catch (IOException e)\r\n {\r\n e.printStackTrace(System.err);\r\n System.err.printf(\"ERROR! Could not open OBJ file: %s\\n\", objName);\r\n System.exit(-1);\r\n }\r\n\r\n this.name = objName;\r\n\r\n // Get the geometry from the OBJ file.\r\n try\r\n {\r\n // Pattern for parsing lines that start with \"f\"\r\n final Pattern p = Pattern.compile(\"^(\\\\d*)[/]?(\\\\d*)[/]?(\\\\d*)\");\r\n\r\n final Scanner scanner = new Scanner(fis);\r\n while ( scanner.hasNext() )\r\n {\r\n final String token = scanner.next();\r\n if ( token.startsWith(\"#\")\r\n || token.startsWith(\"vt\")\r\n || token.startsWith(\"vn\")\r\n || token.startsWith(\"s\")\r\n || token.startsWith(\"g\")\r\n || token.startsWith(\"o\")\r\n || token.startsWith(\"usemtl\")\r\n || token.startsWith(\"mtllib\") )\r\n {\r\n scanner.nextLine(); // skip over these lines\r\n }\r\n else if ( token.startsWith(\"v\") )\r\n {\r\n final double x = scanner.nextDouble();\r\n final double y = scanner.nextDouble();\r\n final double z = scanner.nextDouble();\r\n addVertex( new Vertex(x, y, z) );\r\n }// parse vertex\r\n else if ( token.startsWith(\"f\") )\r\n {\r\n // tokenize the rest of the line\r\n final String restOfLine = scanner.nextLine();\r\n final Scanner scanner2 = new Scanner( restOfLine );\r\n // parse three vertices and make two line segments\r\n final int[] vIndex = new int[3];\r\n for (int i = 0; i < 3; ++i)\r\n {\r\n // parse a \"v/vt/vn\" group\r\n final String faceGroup = scanner2.next();\r\n final Matcher m = p.matcher( faceGroup );\r\n if ( m.find() )\r\n {\r\n vIndex[i] = Integer.parseInt(m.group(1)) - 1;\r\n final String vt = m.group(2); // don't need\r\n final String vn = m.group(3); // don't need\r\n }\r\n else\r\n System.err.println(\"Error: bad face: \" + faceGroup);\r\n }\r\n addLineSegment(new LineSegment(vIndex[0], vIndex[1]),\r\n new LineSegment(vIndex[1], vIndex[2]));\r\n\r\n // parse another vertex (if there is one) and make a line segment\r\n while (scanner2.hasNext())\r\n {\r\n vIndex[1] = vIndex[2];\r\n final String faceGroup = scanner2.next();\r\n final Matcher m = p.matcher( faceGroup );\r\n if ( m.find() )\r\n {\r\n vIndex[2] = Integer.parseInt(m.group(1)) - 1;\r\n final String vt = m.group(2); // don't need\r\n final String vn = m.group(3); // don't need\r\n }\r\n else\r\n System.err.println(\"Error: bad face: \" + faceGroup);\r\n\r\n addLineSegment(new LineSegment(vIndex[1], vIndex[2]));\r\n }\r\n // close the line loop around this face\r\n addLineSegment(new LineSegment(vIndex[2], vIndex[0]));\r\n }// parse face\r\n }// parse one line\r\n fis.close();\r\n }\r\n catch (Exception e)\r\n {\r\n e.printStackTrace(System.err);\r\n System.err.printf(\"ERROR! Could not read OBJ file: %s\\n\", objName);\r\n System.exit(-1);\r\n }\r\n }\r\n}//ObjSimpleModel\r\n" }, { "alpha_fraction": 0.6611418128013611, "alphanum_fraction": 0.6648250222206116, "avg_line_length": 16.79310417175293, "blob_id": "7abdc243907c05ced421c20b86fac8c680de92a7", "content_id": "82a6bce6890cc35959eacf37850d4e48d47c0a76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 543, "license_type": "no_license", "max_line_length": 59, "num_lines": 29, "path": "/Year 3/Assignment 4/Multiply_Expr_Node.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n\r\n\r\n\r\n#ifndef _MULTIPLY_EXPR_NODE\r\n#define _MULTIPLY_EXPR_NODE\r\n\r\n#include \"Binary_Expr_Node.h\"\r\n#include \"Expr_Node_Visitor.h\"\r\n\r\nclass Multiply_Expr_Node : public Binary_Expr_Node\r\n{\r\n\tpublic:\r\n\t\tMultiply_Expr_Node(void);\r\n\t\tvirtual ~Multiply_Expr_Node(void);\r\n\t\t\r\n\t\t//does multiplication on operands\r\n\t\tvirtual int calculate(int num1, int num2);\r\n\t\t\r\n\t\t//visits the node\r\n\t\tvirtual void accept (Expr_Node_Visitor & v);\r\n\t\r\n};\r\n#endif" }, { "alpha_fraction": 0.6969696879386902, "alphanum_fraction": 0.6969696879386902, "avg_line_length": 19.18181800842285, "blob_id": "ad976e64981a03b1db27ee2e25b3c2f15cb42510", "content_id": "6d0c852965f1a07436d5592ec44fc1be71d0267e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 231, "license_type": "no_license", "max_line_length": 60, "num_lines": 11, "path": "/Year 3/Assignment3/Expr_Command_Factory.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor receieved any help\r\n// on this assignment.\r\n\r\n#include \"Expr_Command_Factory.h\"\r\n\r\nExpr_Command_Factory::~Expr_Command_Factory (void)\r\n{\r\n\t//pure virtual destructor\r\n}" }, { "alpha_fraction": 0.6485943794250488, "alphanum_fraction": 0.6566265225410461, "avg_line_length": 16.370370864868164, "blob_id": "c8938f023e06720ed6ae852d4dff80d002ca7c2c", "content_id": "43a8ce19e6d54804367802553c67285664cb7be4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 498, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/Year 3/Assignment 4/Modulus_Expr_Node.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#include \"Modulus_Expr_Node.h\"\r\n\r\nModulus_Expr_Node::Modulus_Expr_Node(void)\r\n{\r\n\t//constructor\r\n}\r\nModulus_Expr_Node::~Modulus_Expr_Node(void)\r\n{\r\n\t//destructor\r\n}\r\n\t\t\r\nint Modulus_Expr_Node::calculate(int num1, int num2)\r\n{\r\n\t//return multiplication of two numbers\r\n\treturn num1 % num2;\r\n}\r\n\r\nvoid Modulus_Expr_Node::accept (Expr_Node_Visitor & v)\r\n{\r\n\tv.Visit_Modulus_Node (*this);\r\n}\r\n\r\n" }, { "alpha_fraction": 0.625806450843811, "alphanum_fraction": 0.6362903118133545, "avg_line_length": 26.55555534362793, "blob_id": "76bf0784694f9c0d034f667ce3008dd705a220db", "content_id": "a4a2dbba057f09e4d7b6d109918757f7c919784f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1240, "license_type": "no_license", "max_line_length": 85, "num_lines": 45, "path": "/Master Year 1/Computer Graphics/HW3/renderer/scene/LineSegment.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\n\n*/\n\npackage renderer.scene;\n\n/**\n A {@code LineSegment} object has two integers that\n represent the endpoints of the line segment. The\n integers are indices into the {@link Vertex}\n {@link java.util.List} of a {@link Model} object.\n*/\npublic final class LineSegment\n{\n public final int[] index = new int[2]; // indices for this line segment's vertices\n\n /**\n Construct a {@code LineSegment} object with two integer indices.\n <p>\n NOTE: This method does not put any {@link Vertex} objects into\n this {@code LineSegment}'s {@link Model}. This method assumes\n that the given {@link Vertex} indices are valid (or will be\n valid by the time this {@code LineSegment} gets rendered).\n\n @param i0 index of 1st endpoint {@link Vertex} of the new {@code LineSegment}\n @param i1 index of 2nd endpoint {@link Vertex} of the new {@code LineSegment}\n */\n public LineSegment(final int i0, final int i1)\n {\n index[0] = i0;\n index[1] = i1;\n }\n\n\n /**\n For debugging.\n\n @return {@link String} representation of this {@code LineSegment} object\n */\n @Override\n public String toString()\n {\n return \"Line Segment: (\" + index[0] + \", \" + index[1] + \")\\n\";\n }\n}\n" }, { "alpha_fraction": 0.7088235020637512, "alphanum_fraction": 0.7088235020637512, "avg_line_length": 28.909090042114258, "blob_id": "0bd90a27ebb93334af4dc467f947c4ac286a2a54", "content_id": "2d70cd023ced62907ab5a9863501bae3ec107ce9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 340, "license_type": "no_license", "max_line_length": 82, "num_lines": 11, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/matrixfun.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#ifndef MATRIXFUN\r\n#define MATRIXFUN\r\n\r\n#include <string>\r\n\r\nint **makeMatrix(std::string data, int version);\r\nvoid addFinder(int **matrix, int **patterns, int cornerx, int cornery);\r\nvoid setInfoAreas(int **matrix, int **patterns, int dimval);\r\nvoid layoutData(int **matrix, int **patterns, std::string data, int dim, int ver);\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.6305220723152161, "alphanum_fraction": 0.6439089775085449, "avg_line_length": 17.657894134521484, "blob_id": "8f0f30758e5dbdc968dcf3118d7b7087f27f6468", "content_id": "6f6c6d882d1260f5eea56253fddc0d49512b0821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 747, "license_type": "no_license", "max_line_length": 94, "num_lines": 38, "path": "/Year 3/Assignment3/Division_Command.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Division_Command.h\"\r\n\r\nDivision_Command::Division_Command(Stack <int> &s):\r\nBinary_Op_Command(s),\r\nprecedence(3)\r\n{\r\n\t//constructor\r\n}\r\n\r\nDivision_Command::~Division_Command(void)\r\n{\r\n\t//destructor \r\n}\r\n\r\nint Division_Command::evaluate (int n1, int n2) const\r\n{\r\n // COMMENT: You are not handling divide by zero.\r\n \r\n //RESPONSE: I will check if either of the ints are zero and if so I will throw an exception.\r\n \r\n\tif (n1 == 0 || n2 == 0)\r\n\t{\r\n\t\tthrow (\"Can't divide by 0. \");\r\n\t\t\r\n\t}\r\n\telse\r\n\t\t//return result of dividing integers\r\n\t\treturn n1 / n2;\r\n}\r\n\r\nint Division_Command::prec (void) const\r\n{\r\n\treturn precedence;\r\n}\r\n" }, { "alpha_fraction": 0.5395392179489136, "alphanum_fraction": 0.5541718602180481, "avg_line_length": 30.44444465637207, "blob_id": "f74d37b2de61cdf9148bfddd4f3233a99a9a376a", "content_id": "13774e1e889b814a31bc5230535ebf1bf1355fde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3212, "license_type": "no_license", "max_line_length": 73, "num_lines": 99, "path": "/Master Year 1/Computer Graphics/HW3/renderer/models/Axes2D.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\nimport renderer.scene.*;\r\n\r\n/**\r\n Create an x and y axis in the xy-plane, along with \"tick marks\".\r\n*/\r\npublic class Axes2D extends Model\r\n{\r\n /**\r\n Create an x and y axis from -1 to +1 on each axis.\r\n */\r\n public Axes2D( )\r\n {\r\n this(-1, 1, -1, 1, 5, 5, 0.0);\r\n }\r\n\r\n\r\n /**\r\n Create an x-axis from {@code xMin} to {@code xMax}\r\n and a y-axis from {@code yMin} to {@code yMax}.\r\n\r\n @param xMin left end point for the x-axis\r\n @param xMax right end point for the x-axis\r\n @param yMin bottom end point for the y-axis\r\n @param yMax top end point for the y-axis\r\n @param xMarks number of evenly spaced tick marks on the x-axis\r\n @param yMarks number of evenly spaced tick marks on the y-axis\r\n */\r\n public Axes2D(final double xMin, final double xMax,\r\n final double yMin, final double yMax,\r\n final int xMarks, final int yMarks)\r\n {\r\n this(xMin, xMax, yMin, yMax, xMarks, yMarks, 0.0);\r\n }\r\n\r\n\r\n /**\r\n Create an x-axis from {@code xMin} to {@code xMax}\r\n and a y-axis from {@code yMin} to {@code yMax}.\r\n <p>\r\n The {@code z} parameter is so that we can put the axis just above\r\n or just below the xy-plane (say {@code z=0.01} or {@code z=-0.01}).\r\n This way, the axes can be just in front of or just behind whatever\r\n is being drawn in the xy-plane.\r\n\r\n @param xMin left end point for the x-axis\r\n @param xMax right end point for the x-axis\r\n @param yMin bottom end point for the y-axis\r\n @param yMax top end point for the y-axis\r\n @param xMarks number of evenly spaced tick marks on the x-axis\r\n @param yMarks number of evenly spaced tick marks on the y-axis\r\n @param z offset of the axes away from the xy-plane\r\n */\r\n public Axes2D(final double xMin, final double xMax,\r\n final double yMin, final double yMax,\r\n final int xMarks, final int yMarks,\r\n final double z)\r\n {\r\n super(\"Axes 2D\");\r\n\r\n // x-axis\r\n addVertex(new Vertex(xMin, 0, z),\r\n new Vertex(xMax, 0, z));\r\n addLineSegment(new LineSegment(0, 1));\r\n\r\n // y-axis\r\n addVertex(new Vertex(0, yMin, z),\r\n new Vertex(0, yMax, z));\r\n addLineSegment(new LineSegment(2, 3));\r\n\r\n int index = 4;\r\n\r\n // Put evenly spaced tick marks on the x-axis.\r\n double xDelta = (xMax - xMin)/xMarks;\r\n double yDelta = (yMax - yMin)/50;\r\n for (double x = xMin; x <= xMax; x += xDelta)\r\n {\r\n addVertex(new Vertex(x, yDelta/2, z),\r\n new Vertex(x, -yDelta/2, z));\r\n addLineSegment(new LineSegment(index+0, index+1));\r\n index += 2;\r\n }\r\n\r\n // Put evenly spaced tick marks on the y-axis.\r\n yDelta = (yMax - yMin)/yMarks;\r\n xDelta = (xMax - xMin)/50;\r\n for (double y = yMin; y <= yMax; y += yDelta)\r\n {\r\n addVertex(new Vertex( xDelta/2, y, z),\r\n new Vertex(-xDelta/2, y, z));\r\n addLineSegment(new LineSegment(index+0, index+1));\r\n index += 2;\r\n }\r\n }\r\n}//Axes2D\r\n" }, { "alpha_fraction": 0.558943510055542, "alphanum_fraction": 0.5595877170562744, "avg_line_length": 20.389423370361328, "blob_id": "673784cc810e6c37f0806ddd30d3e560e19149d8", "content_id": "d8917d64e7e18bc1887f1041d93f849f7b6210dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4657, "license_type": "no_license", "max_line_length": 148, "num_lines": 208, "path": "/Year 3/Assignment3/Calculator.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//Calculator Class\r\n\r\n//source file\r\n\r\n#include \"Calculator.h\"\r\n\r\n\r\nCalculator::Calculator()\r\n{\r\n\t//constructor\r\n}\r\n\r\nCalculator::~Calculator()\r\n{\r\n\t//destructor\r\n}\r\n\r\nvoid Calculator::postfix_eval(Array <Expr_Command *> & postfix, Expr_Command_Factory & fact)\r\n{\r\n\t//take contents of postfix and put evaluate using the Stack.\r\n\t//loop through array, and execute each command in array\r\n\t\r\n\tfor (int i = 0; i < postfix.size(); i++)\r\n\t{\r\n\t\tpostfix[i]->execute();\r\n\t}\r\n\t\r\n\t//STDOUT\r\n\tstd::cout << fact.answer() << std::endl;\r\n}\r\nvoid Calculator::infix_to_postfix(const std::string & infix, Expr_Command_Factory & factory, Array <Expr_Command *> & postfix, Stack <int> & result)\r\n{\r\n\t//create stream parser\r\n\tstd::istringstream input(infix);\r\n\t\r\n\t//current token\r\n\tstd::string token;\r\n\t\r\n\t//create Command object\r\n\tExpr_Command * cmd = 0;\r\n\t\r\n\tStack <Expr_Command *> temp; //temporarily holds operators\r\n\t\r\n\t// used for indexing\r\n\tint i = 0;\r\n\t\r\n\t//while the input is being read, \r\n\twhile (!input.eof())\r\n\t{\r\n\t\t// COMMENT You are not handling parenthesis.\r\n\t\t//RESPONSE: I added statements that handle the parenthesis\r\n\t\t\r\n\t\t//read each token from input\r\n\t\tinput >> token;\r\n\t\t\r\n\t\t\t///if the token is a add operator \r\n\t\t\tif (token == \"+\")\r\n\t\t\t{\r\n\t\t\t\t//create add command\r\n\t\t\t\tcmd = factory.create_add_command ();\r\n\t\t\t\t\r\n\t\t\t\t// if temp has a command of higher precedence. then remove that first and add to postfix\r\n\t\t\t\tif(!temp.is_empty() && temp.top()->prec() >= cmd->prec() )\r\n\t\t\t\t{\r\n\t\t\t\t\tpostfix.set(i,temp.top());\r\n\t\t\t\t\ttemp.pop();\r\n\t\t\t\t\ti++;\r\n\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t\t\r\n\t\t\t\t//push command to stack\r\n\t\t\t\ttemp.push(cmd);\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t///if the token is a subtraction operator \r\n\t\t\telse if (token == \"-\")\r\n\t\t\t{\r\n\t\t\t\t//create command\r\n\t\t\t\tcmd = factory.create_subtract_command();\r\n\t\t\t\t\r\n\t\t\t\t// if temp has a command of higher precedence. then remove that first and add to postfix\r\n\t\t\t\tif(!temp.is_empty() && temp.top()->prec() >= cmd->prec() )\r\n\t\t\t\t{\r\n\t\t\t\t\tpostfix.set(i,temp.top());\r\n\t\t\t\t\ttemp.pop();\r\n\t\t\t\t\ti++;\r\n\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t\ttemp.push(cmd);\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t///if the token is a multiplication operator \r\n\t\t\telse if (token == \"*\")\r\n\t\t\t{\r\n\t\t\t\t//create command\r\n\t\t\t\tcmd = factory.create_multiply_command();\r\n\t\t\t\t\r\n\t\t\t\t// if temp has a command of higher precedence. then remove that first and add to postfix\r\n\t\t\t\tif(!temp.is_empty() && temp.top()->prec() >= cmd->prec() )\r\n\t\t\t\t{\r\n\t\t\t\t\tpostfix.set(i,temp.top());\r\n\t\t\t\t\ttemp.pop();\r\n\t\t\t\t\ti++;\r\n\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t\t\ttemp.push(cmd);\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t///if the token is a division operator \r\n\t\t\telse if (token == \"/\")\r\n\t\t\t{\r\n\t\t\t\t//create dlvision\r\n\t\t\t\tcmd = factory.create_division_command();\r\n\t\t\t\t\r\n\t\t\t\t// if temp has a command of higher precedence. then remove that first and add to postfix\r\n\t\t\t\tif(!temp.is_empty() && temp.top()->prec() >= cmd->prec() )\r\n\t\t\t\t{\r\n\t\t\t\t\tpostfix.set(i,temp.top());\r\n\t\t\t\t\ttemp.pop();\r\n\t\t\t\t\ti++;\r\n\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t\ttemp.push(cmd);\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t///if the token is a modulus operator \r\n\t\t\telse if (token == \"%\")\r\n\t\t\t{\r\n\t\t\t\t//create modulus command\r\n\t\t\t\tcmd = factory.create_modulus_command();\r\n\t\t\t\t\r\n\t\t\t\t// if temp has a command of higher precedence. then remove that first and add to postfix\r\n\t\t\t\tif(!temp.is_empty() && temp.top()->prec() >= cmd->prec() )\r\n\t\t\t\t{\r\n\t\t\t\t\tpostfix.set(i,temp.top());\r\n\t\t\t\t\ttemp.pop();\r\n\t\t\t\t\ti++;\r\n\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t\ttemp.push(cmd);\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t//else if the token is a left parenthesis\r\n\t\t\telse if (token == \"(\")\r\n\t\t\t{\r\n\t\t\t\t// push onto stack (create parenthesis command)\r\n\t\t\t\tcmd = factory.create_parenthesis_command();\r\n\t\t\t\ttemp.push(cmd);\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t//else if token is right parenthesis\r\n\t\t\telse if (token == \")\")\r\n\t\t\t{\r\n\t\t\t\t//while top of temp doesn't equal parenthesis command\r\n\t\t\t\twhile(temp.top() != factory.create_parenthesis_command() && !temp.is_empty())\r\n\t\t\t\t{\r\n\t\t\t\t\t//remove objects from temp and add to \r\n\t\t\t\t\tpostfix.set(i, temp.top());\r\n\t\t\t\t\ttemp.pop();\r\n\t\t\t\t\tstd::cout << postfix.size() << std::endl;\r\n\t\t\t\t\ti++;\r\n\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t//it must be a number command so add to postfix\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tint num = std::stoi(token);\r\n\t\t\t\t\r\n\t\t\t\tcmd = factory.create_num_command(num);\r\n\t\t\t\tpostfix.set(i,cmd);\r\n\t\t\t\ti++;\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\r\n\t}\r\n\t\r\n\t//if temp is not empty return top element and add to postfix then pop\r\n\t//I want this to be the last thing done after the last thing is read in.\r\n\tif (input.eof())\r\n\t{\r\n\t\twhile (!temp.is_empty())\r\n\t\t{\r\n\t\t\tpostfix.set(i,temp.top());\r\n\t\t\ttemp.pop();\r\n\t\t\ti++;\r\n\t\t}\r\n\t}\r\n\t\r\n\t\r\n\t//call evaluation function\r\n\t\r\n\tpostfix_eval(postfix, factory);\r\n\t\r\n\t\t\r\n}\r\n" }, { "alpha_fraction": 0.4461994171142578, "alphanum_fraction": 0.4718657433986664, "avg_line_length": 23.325000762939453, "blob_id": "1e498c8d57faf81268a643c23831a31ebedb982e", "content_id": "25fc123e46acfe8361f8bea8966be20ed382966c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 61, "num_lines": 40, "path": "/Master Year 2/Operating Systems/HW3/hw3/filters/twiddle.c", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n This program reads two characters at a time from\r\n standard input, and then writes the two characters\r\n to standard output in their reverse order.\r\n\r\n When using Windows, if standard input is the console\r\n keyboard, use ^z (Control-z) to denote the end of file\r\n (and you must use ^z at the beginning of a line!).\r\n*/\r\n#include <stdio.h>\r\n\r\nint main()\r\n{\r\n char c1 = 1, c2 = 1;\r\n while( c2 != EOF && (c1 = getchar()) != EOF )\r\n {\r\n if ( c1 == 10 || c1 == 13 ) // don't twidle LF or CR\r\n {\r\n printf(\"%c\", c1);\r\n fflush(stdout); // try commenting this out\r\n }\r\n else if ( (c2 = getchar()) != EOF )\r\n {\r\n if ( c2 == 10 || c2 == 13 ) // don't twidle LF or CR\r\n {\r\n printf(\"%c%c\", c1, c2);\r\n fflush(stdout); // try commenting this out\r\n }\r\n else\r\n {\r\n printf(\"%c%c\", c2, c1);\r\n }\r\n }\r\n else\r\n {\r\n printf(\"%c\", c1);\r\n }\r\n }\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.35361841320991516, "alphanum_fraction": 0.38322368264198303, "avg_line_length": 15.88888931274414, "blob_id": "bfb388e13bdbb50ad4d4e3f2a737091d6c515586", "content_id": "c417442a60df543f77f16583a4f9e5a1977eefba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 608, "license_type": "no_license", "max_line_length": 42, "num_lines": 36, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/createPNG.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include <QImage>\n#include <iostream>\n\nvoid makeImage(int **code, int v)\n{\n QSize size;\n if (v == 1)\n {\n size.setWidth(29);\n size.setHeight(29);\n }\n else\n {\n size.setWidth(33);\n size.setHeight(33);\n }\n\n QImage img(size, QImage::Format_Mono);\n\n for (int i = 0; i < 29; i++)\n {\n for (int j = 0; j < 29; j++)\n {\n if (code[i][j] >= 1)\n {\n img.setPixel(j, i, 0);\n }\n else\n {\n img.setPixel(j, i, 1);\n }\n }\n }\n\n img.save(\"QR.png\");\n}\n" }, { "alpha_fraction": 0.6037099361419678, "alphanum_fraction": 0.6172006726264954, "avg_line_length": 15.323529243469238, "blob_id": "1a354870f57131ca02c22136358fe43189752fd1", "content_id": "3fcfe88cf8029bd71a6663793bb175132ba77ddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 593, "license_type": "no_license", "max_line_length": 59, "num_lines": 34, "path": "/Year 3/Assignment 4/Division_Expr_Node.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#include \"Division_Expr_Node.h\"\r\n\r\nDivision_Expr_Node::Division_Expr_Node(void)\r\n{\r\n\t//constructor\r\n}\r\nDivision_Expr_Node::~Division_Expr_Node(void)\r\n{\r\n\t//destructor\r\n}\r\n\t\t\r\nint Division_Expr_Node::calculate(int num1, int num2)\r\n{\r\n\t//return division of two numbers\r\n\tif (num1 == 0 || num2 == 0)\r\n\t{\r\n\t\tthrow (\"Can't divide by zero. \");\r\n\t}\r\n\telse\r\n\t{\r\n\t\treturn num1 / num2;\r\n\t}\r\n}\r\n\r\nvoid Division_Expr_Node::accept (Expr_Node_Visitor & v)\r\n{\r\n\tv.Visit_Division_Node (*this);\r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.6425600051879883, "alphanum_fraction": 0.650879979133606, "avg_line_length": 21.402984619140625, "blob_id": "ec0a2da9c7b34a23f087efcd2278c8ca529bd58b", "content_id": "c20d3a29370287bc20a44eb02c3bcc3e7b17ca20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3125, "license_type": "no_license", "max_line_length": 127, "num_lines": 134, "path": "/Year 3/composition-source/Queue.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n// Queue\r\n//\r\ntemplate <typename T>\r\nQueue<T>::Queue (void): \r\nfront(0), \r\nback(0)\r\n{\r\n\t//default constructor\r\n\t//other members initialized in base class\r\n\t\r\n\t//set values of members\r\n\tfront = -1;\r\n\tback = -1;\r\n\t\r\n}\r\n\r\n template <typename T>\r\nQueue<T>::Queue(const Queue & q):\r\nqueue(q.queue),\r\nfront(0), \r\nback(0)\r\n{\r\n\t//copy constructor\r\n\t//set members of this object to those of the q object\r\n\tfront = q.front;\r\n\tback = q.back;\r\n\t\r\n}\r\n\r\ntemplate <typename T>\r\nQueue<T>::~Queue(void)\r\n{\r\n\t//destructor called in base class\r\n}\r\n \r\ntemplate <typename T>\r\nvoid Queue<T>::enqueue(T element)\r\n{\r\n\t//Enqueue - insert element into queue\r\n\t\n // COMMENT The queue should grow if it out of space.\n\n // COMMENT The queue will fail if the array runs out of space. Since this\n // is an unboudned queue, you need to resize the array to make space for\n // new elements. Also, make sure you do not waste any space if you must\n // resize the array to accommodate new elements. This will require updates\n // to your dequeue method.\n \r\n //RESPONSE: I resize array to 10 more than the original size and removed\r\n //the else statement so after the resize it can go back to adding an element\r\n //to the stack without any problem and indexes properly for dequeue.\r\n \r\n \r\n\t//if queue is empty, resize the array by adding 10 more spaces. , else // increment back and insert data at rear\r\n\tif (back == queue.max_size() -1)\r\n\t{\r\n\t\tqueue.resize(queue.max_size() + 10);\r\n\t\t\r\n\t}\r\n\t//if front isn't in array then set it to 0 which is the first value that would be removed\r\n\tif (front == -1)\r\n\t{\r\n\t\tfront = 0;\r\n\t\t\r\n\t}\r\n\t//this value is incremented and put in the back of the queue\r\n\tback++;\r\n\tqueue.set(back, element);\r\n\t\r\n}\r\n\t\r\n\r\n \r\ntemplate <typename T>\r\nT Queue<T>::dequeue(void)\r\n{\r\n\t//Dequeue - delete element at the front of the queue\r\n\t//if queue is empty(front equals back), throw an exception, else just increment front\r\n\tif (is_empty())\r\n\t{\r\n\t\tthrow \"Queue is empty. \";\r\n\t\t\r\n\t}\r\n\t\r\n\t//if front and back have same value(pointing to last value), then set both to -1 because now it would be empty after this call\r\n\telse if (front == back)\r\n\t{\r\n\t\tfront = -1;\r\n\t\tback = -1;\r\n\t\t\r\n\t}\r\n\t//return data at front of the queue and then implement\t\r\n\telse\r\n\t{\r\n\t\treturn queue.get(front);\r\n\t\tfront++;\r\n\t\t\r\n\t}\r\n\t\r\n}\r\n \r\ntemplate <typename T>\r\nbool Queue<T>::is_empty(void)\r\n{\r\n\t//return true if front and back values are -1\r\n\treturn (this->front == -1 && this->back==-1);\r\n\t\r\n}\r\n\r\ntemplate <typename T>\r\nvoid Queue<T>::clear(void)\r\n{\n // COMMENT This works, but is a very expensive operation. You should find a\n // why to just reset the state without having the dequeue every element.\n \n // COMMENT Your queue will no compile if T cannot be assigned the value 0.\r\n \r\n //RESPONSE call array destructor to delete memory and then create an array newQueue and assign it to queue\n \r\n\tqueue.~Array<T>();\r\n\tArray<T> newQueue;\r\n\t\r\n\tqueue = newQueue;\r\n\t//reset front and back to value of \"-1\"\r\n\tthis->front = -1;\r\n\tthis->back = -1;\r\n\t\r\n}\r\n\r\n" }, { "alpha_fraction": 0.6641509532928467, "alphanum_fraction": 0.6641509532928467, "avg_line_length": 11.045454978942871, "blob_id": "f76ccce71b7a31864293f4ade7843e6eb755d940", "content_id": "5097e695556d8396ff2977efee7ab913b180ffb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 265, "license_type": "no_license", "max_line_length": 47, "num_lines": 22, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/qrpage.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#ifndef QRPAGE_H\n#define QRPAGE_H\n\n#include <QMainWindow>\n\nnamespace Ui {\nclass QRPage;\n}\n\nclass QRPage : public QMainWindow\n{\n Q_OBJECT\n\npublic:\n explicit QRPage(QWidget *parent = nullptr);\n ~QRPage();\n\nprivate:\n Ui::QRPage *ui;\n};\n\n#endif // QRPAGE_H\n" }, { "alpha_fraction": 0.7941681146621704, "alphanum_fraction": 0.8010291457176208, "avg_line_length": 33.29411697387695, "blob_id": "29f62d9b1880ce36571620e640beb802a390caee", "content_id": "138fa5953dde6b9fec7e009b751664075f2a2146", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 583, "license_type": "no_license", "max_line_length": 160, "num_lines": 17, "path": "/Master Year 1/README.txt", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "Algorithms and Data Structures\n Final Project: Implement knapsack recursive solution showing optimal substructure\n \nComputer Graphics\n Homeworks 1 - 4 (more detail in folder)\n \nStatistical computing\n Stats Project using R to analyze risk-associated cancers.\n\nDatabase Systems\n My Database Systems project assignment description is in the \"Design Document - The Internet Airline\" document with my source files in the Submission zip file\n\nObject Oriented Design\n Project description is in README.\n \nProgramming Languages and Compilers\n Homeworks 1 - 5 (more detail in folder)\n" }, { "alpha_fraction": 0.6231343150138855, "alphanum_fraction": 0.6231343150138855, "avg_line_length": 14.6875, "blob_id": "3fda7f9f9060216503071e27f8b9f80cc5e61693", "content_id": "bcec2d45389f6cb8b1db5fd1a057f2f2350de625", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 536, "license_type": "no_license", "max_line_length": 42, "num_lines": 32, "path": "/Year 2/Assignment #3/Player.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n\r\n#include <iostream>\r\n#include <string>\r\n\r\n//create instance of class\r\nclass Player\r\n{\r\n\t//attributes of player\r\n\tprivate:\r\n\t\tstd::string fName;\r\n\t\tstd::string lName;\r\n\t\tint jerseyNum;\r\n\t \r\n\t\r\n\tpublic:\r\n\t\t//Default (void) Constructor\r\n\t\tPlayer();\r\n\t\t// add player to team\r\n\t\tvoid addPlayer();\r\n\t\t// print players on the team\r\n\t\tvoid showPlayer();\r\n\t\t//prints menu when called\r\n\t\tvoid printMenu();\r\n};\r\n\t\t" }, { "alpha_fraction": 0.5011106133460999, "alphanum_fraction": 0.5015548467636108, "avg_line_length": 23.579545974731445, "blob_id": "a9437fffdab79da4b797fe9f31ae568c22e6ba0a", "content_id": "498367bc64e6db10febe30c21a63f64ff8e575cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2251, "license_type": "no_license", "max_line_length": 80, "num_lines": 88, "path": "/Master Year 1/Programming Languages and Compilers/HW5/hw5/Value.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/**\r\n This class is a kind of \"tagged union\". Each instance of this class holds\r\n a value for our simple language. But our language has three kinds of values,\r\n ints, booleans, and functions. So each instance of this class holds both an\r\n int value, a boolean value, and a Tree value. The \"tag\" field is used to\r\n determine which of the three values that an instance holds, the int, boolean,\r\n or Tree, is valid.\r\n\r\n See also\r\n http://en.wikipedia.org/wiki/Tagged_union\r\n*/\r\n\r\npublic class Value\r\n{\r\n public String tag; // either \"int\" or \"bool\" or \"lambda\"\r\n public int valueI = 0;\r\n public boolean valueB = false;\r\n public Tree valueL = null;\r\n\r\n public static final String INT_TAG = \"int\";\r\n public static final String BOOL_TAG = \"bool\";\r\n public static final String LAMBDA_TAG = \"lambda\";\r\n\r\n /**\r\n Construct a Value object that holds an int value.\r\n */\r\n public Value(int value)\r\n {\r\n this.tag = INT_TAG;\r\n this.valueI = value;\r\n }\r\n\r\n /**\r\n Construct a Value object that holds a boolean value.\r\n */\r\n public Value(boolean value)\r\n {\r\n this.tag = BOOL_TAG;\r\n this.valueB = value;\r\n }\r\n\r\n /**\r\n Construct a Value object that holds a \"lambda value\".\r\n */\r\n public Value(Tree value)\r\n {\r\n this.tag = LAMBDA_TAG;\r\n this.valueL = value;\r\n }\r\n\r\n public String toString()\r\n {\r\n return \"[tag->\" + tag\r\n + \", valueI->\" + valueI\r\n + \", valueB->\" + valueB\r\n + \", valueL->\" + valueL\r\n + \"]\";\r\n }\r\n\r\n\r\n public String toSimpleString()\r\n {\r\n String result = \"\";\r\n\r\n if ( tag.equals(INT_TAG) )\r\n {\r\n result += valueI;\r\n }\r\n else if ( tag.equals(BOOL_TAG) )\r\n {\r\n result += valueB;\r\n }\r\n else if ( tag.equals(LAMBDA_TAG) )\r\n {\r\n result += valueL;\r\n }\r\n else // bad tag (shouldn't get here)\r\n {\r\n result += \"[tag->\" + tag\r\n + \", valueI->\" + valueI\r\n + \", valueB->\" + valueB\r\n + \", valueL->\" + valueL\r\n + \"]\";\r\n }\r\n\r\n return result;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5947955250740051, "alphanum_fraction": 0.5985130071640015, "avg_line_length": 12.94444465637207, "blob_id": "52955fd6f02195a180bd575d0d2a523f37b994dc", "content_id": "58139f9984e1d9ec676ce6924a073ac18e9e1dbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 269, "license_type": "no_license", "max_line_length": 54, "num_lines": 18, "path": "/Master Year 1/Programming Languages and Compilers/HW2/hw2/EvalException.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/**\r\n\r\n */\r\n\r\npublic class EvalException extends java.lang.Exception\r\n{\r\n private static final long serialVersionUID = 0;\r\n\r\n public EvalException()\r\n {\r\n super();\r\n }\r\n\r\n public EvalException(String errMessage)\r\n {\r\n super(errMessage);\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6779904365539551, "alphanum_fraction": 0.6899521350860596, "avg_line_length": 31.91338539123535, "blob_id": "e90c3aa24a7d9dcda72c4368f7567465c4a19b58", "content_id": "527c3a81400da24f7ab1aa6cddcefb3b35ffa7f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4180, "license_type": "no_license", "max_line_length": 125, "num_lines": 127, "path": "/Year 4/csci487Group4Project-makingGraphs/main.py", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "from CityStructure.Intersection import Intersection, TrafficLight\nfrom CityStructure.Connection import Connection\nfrom Agents.Vehicles import Car\nfrom Utilities.Definitions import Orientation\nfrom CityStructure.City import City\n\ndef main():\n # while True:\n # action = input(\"Enter one character: \").upper()\n # if len(action) != 1:\n # print(\"Not a single character...\")\n # continue\n # else:\n # print(\"Input received\")\n # continue\n # testObjects()\n createFourStar()\n createOrangetown()\n createGreentown()\n \n\ndef createFourStar():\n fourstar = City(\"Four Star\")\n mainStreet = TrafficLight()\n fourstar.assign_connection_default(mainStreet, Orientation.Top)\n fourstar.assign_connection_default(mainStreet, Orientation.Right)\n fourstar.assign_connection_default(mainStreet, Orientation.Bottom)\n fourstar.assign_connection_default(mainStreet, Orientation.Left)\n\n\n fourstar.print_city_map()\n\ndef createGreentown():\n green = City(\"Greentown\")\n mainStreet = TrafficLight()\n t = green.assign_connection_default(mainStreet, Orientation.Top)\n r = green.assign_connection_default(mainStreet, Orientation.Right)\n b = green.assign_connection_default(mainStreet, Orientation.Bottom)\n l = green.assign_connection_default(mainStreet, Orientation.Left)\n green.assign_connection_default(t, Orientation.Left)\n i = green.assign_connection_default(t, Orientation.Right)\n i = green.assign_connection_default(i, Orientation.Right)\n s = green.assign_connection_default(i, Orientation.Right)\n r = green.assign_connection_default(s, Orientation.Bottom)\n p = green.assign_connection_default(r, Orientation.Bottom)\n l = green.assign_connection_default(p, Orientation.Right)\n l = green.assign_connection_default(l, Orientation.Right)\n l = green.assign_connection_default(l, Orientation.Top)\n s = green.assign_connection_default(s, Orientation.Right)\n s = green.assign_connection_default(s, Orientation.Right)\n green.assign_connection_default(s, Orientation.Top)\n s = green.assign_connection_default(s, Orientation.Bottom)\n\n\n green.print_city_map()\n\ndef createOrangetown():\n city = City(\"Orangetown\")\n tl1 = TrafficLight()\n tl2 = TrafficLight()\n tl3 = TrafficLight()\n tl4 = TrafficLight()\n tl5 = TrafficLight()\n city.assign_connection_default(tl1, Orientation.Top, tl2)\n city.assign_connection_default(tl1, Orientation.Bottom, tl3)\n city.assign_connection_default(tl1, Orientation.Left, tl4)\n city.assign_connection_default(tl1, Orientation.Right, tl5)\n city.assign_connection_default(tl4, Orientation.Left)\n city.assign_connection_default(tl4, Orientation.Top)\n city.assign_connection_default(tl4, Orientation.Bottom)\n city.assign_connection_default(22, Orientation.Right, TrafficLight())\n city.assign_connection_default(0, Orientation.Right)\n city.assign_connection_default(4, Orientation.Right)\n city.assign_connection_default(9, Orientation.Top)\n city.assign_connection_default(6, Orientation.Right, 1)\n\n\n\n # tl2.print_visual()\n # tl1.print_visual()\n # tl3.print_visual()\n \n city.print_city_map()\n\n # city.print_info()\n \n \n\n\n\n#tests Intersection,connection, and car creation. Also this tests the NumberGenerator singleton that gives us our unique ID's\ndef testObjects():\n intersection = TrafficLight()\n print(intersection.intersectionType)\n\n connection1 = Connection(\"connection1\", 50)\n print(connection1.uid)\n\n i = 0\n #connection ids should start at 1 at this point because one has already been created\n while(i < 10):\n connection = Connection(\"connection\"+str(i))\n print(connection.uid)\n i += 1\n\n #vehicle id's should start at 0\n while(i < 20):\n car = Car()\n print(car.uid)\n i += 1\n\n #intersection ID's should start at 0\n while(i < 30):\n intersection = TrafficLight()\n intersection.print_info()\n i += 1\n\n #vehicle id's should pick back up at 10\n while(i < 40):\n car = Car()\n print(car.uid)\n i += 1\n \n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7127659320831299, "alphanum_fraction": 0.7234042286872864, "avg_line_length": 9.55555534362793, "blob_id": "3a7e94003fb30edab19e00faf40be63620780ef6", "content_id": "c8d4e78a24ad0e1d954c58193d647d200aa1b140", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 94, "license_type": "no_license", "max_line_length": 16, "num_lines": 9, "path": "/Year 4/csci487Group4Project-makingGraphs/makefile", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "trafficControl:\n\nclean:\n\trm *.pyc\n\nrun: main.py\n\tpython main.py\nrunm: main.py\n\tpython3 main.py" }, { "alpha_fraction": 0.6365503072738647, "alphanum_fraction": 0.6365503072738647, "avg_line_length": 16.11111068725586, "blob_id": "3f197f0fb81572206e6d1024bc97ebc40e2c4f6d", "content_id": "d07968441cd7145d1ef921e19e5503b44441d5b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 487, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/Year 3/Assignment 4/Num_Expr_Node.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n\r\n//\r\n#ifndef _NUM_EXPR_NODE\r\n#define _NUM_EXPR_NODE\r\n\r\n#include \"Expr_Node.h\"\r\n#include \"Expr_Node_Visitor.h\"\r\n\r\nclass Num_Expr_Node : public Expr_Node\r\n{\r\n\tpublic:\r\n\t\tNum_Expr_Node(int n);\r\n\t\tvirtual ~Num_Expr_Node(void);\r\n\t\t\r\n\t\t//returns the number\r\n\t\tvirtual int eval(void);\r\n\t\t\r\n\t\t//visits the node\r\n\t\tvirtual void accept (Expr_Node_Visitor & v);\r\n\tprivate:\r\n\t\tint num;\r\n};\r\n#endif" }, { "alpha_fraction": 0.6690647602081299, "alphanum_fraction": 0.6690647602081299, "avg_line_length": 17.928571701049805, "blob_id": "4770b9fc7cdb0ebcc901cf2668801c8c0bf3caeb", "content_id": "7745bc14917cb7c6ce1245d47cedba0e56a02ec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 556, "license_type": "no_license", "max_line_length": 59, "num_lines": 28, "path": "/Year 3/Assignment3/Modulus_Command.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Binary_Op_Command.h\"\r\n\r\n#ifndef _MODULUS_COMMAND_H\r\n#define _MODULUS_COMMAND_H\r\n//Modulus Class\r\nclass Modulus_Command : public Binary_Op_Command{\r\n\tpublic:\r\n\t\t\r\n\t\tModulus_Command(Stack <int> &s);\r\n\t\t\r\n\t\t~Modulus_Command(void);\r\n\t\t\r\n\t\t//evaluate modulus operator between the integers\r\n\t\tvirtual int evaluate (int, int) const;\r\n\t\t\r\n\t\t//returns precedence\r\n\t\tvirtual int prec (void) const;\r\n\t\t\r\n\tprivate:\r\n\t\tint precedence;\r\n\t\t\r\n};\r\n\r\n#endif" }, { "alpha_fraction": 0.5511596202850342, "alphanum_fraction": 0.5729877352714539, "avg_line_length": 11.859648704528809, "blob_id": "9581abdf2589abac525752b547fdc6eede838fb5", "content_id": "bdcf2b3436924322c065329398cb20df4bf2182d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 733, "license_type": "no_license", "max_line_length": 49, "num_lines": 57, "path": "/Year 3/array-source/driver.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include \"Array.h\"\n\nint main (int argc, char * argv [])\n{\n // TODO Add code here to test your Array class.\n\t//construct an array\n\tArray nArray;\n\tArray wArray(5);\n\tArray zArray(10 , 'a');\n\t//Array wArray = zArray\n\t//wArray[3] = 'C';\n\t\n\t\t\n\t//zArray.get(11);\n\t\n\t\n\t\n\t\n\t//zArray[3];\n\t\n\t//zArray.set (4, 'J');\n\t//assignment operation (works)\n\t//nArray = wArray;\n\t\n\tzArray.resize(12);\n\t//set value at specified index\n\t\n\t//zArray.find('a');\n\t\n\t\n\t\t\n\t//zArray.find('a',12);\n\t\n\t\n\t\n\t//Get seg fault error\n\t\n\t//zArray == wArray;\n\t\n\t//wArray.fill('b');\n\t\n\tzArray.shrink();\n\t\n\t//zArray.reverse();\n\t\n\t//FIX SLICE METHODS\n\t//zArray.slice(1);\n\t\n\t//wArray.slice(1,4);\n\tstd::cout << \"worked\" << std::endl;\n\t\n\t\n\t//set method (works)\n\t\t\n \n return 0;\n}\n" }, { "alpha_fraction": 0.6646943092346191, "alphanum_fraction": 0.6646943092346191, "avg_line_length": 16.851852416992188, "blob_id": "d9470d67bc79877dad82606b758a7200feb17dc6", "content_id": "92f302cf42336d3c274a0b809fa0fe0d5bcf81d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 507, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/Year 3/Assignment3/Parenthesis_Command.h", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Parenthesis class\r\n\r\n// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Expr_Command.h\"\r\n\r\n#ifndef _PARENTEHSIS_COMMAND_H\r\n#define _PARENTEHSIS_COMMAND_H\r\n\r\nclass Parenthesis_Command : public Expr_Command\r\n{\r\n\tpublic:\r\n\t\tParenthesis_Command(Stack <int > & s);\r\n\t\t\r\n\t\t//performs operation on parenthesis\r\n\t\tvirtual void execute (void);\r\n\t\t\r\n\t\t//returns precedence\r\n\t\tvirtual int prec(void) const;\r\n\tprivate:\r\n\t\tStack <int> & s_;\r\n\t\r\n};\r\n\r\n#endif" }, { "alpha_fraction": 0.6018715500831604, "alphanum_fraction": 0.6086771488189697, "avg_line_length": 29.7702693939209, "blob_id": "95c5e7af4eddb1fd153a71fbf98cd49befde3192", "content_id": "b9938548af7efe9f88ad02a3e38ff21f97ef3ea1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2351, "license_type": "no_license", "max_line_length": 78, "num_lines": 74, "path": "/Master Year 1/Computer Graphics/HW4/renderer/models/Cylinder.java", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "/*\r\n\r\n*/\r\n\r\npackage renderer.models;\r\n\r\n/**\r\n Create a wireframe model of a right circular cylinder\r\n with its axis along the y-axis.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Cylinder\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Cylinder</a>\r\n<p>\r\n This model can also be used to create right k-sided polygonal prisms.\r\n<p>\r\n See <a href=\"https://en.wikipedia.org/wiki/Prism_(geometry)\" target=\"_top\">\r\n https://en.wikipedia.org/wiki/Prism_(geometry)</a>\r\n\r\n @see CylinderSector\r\n*/\r\npublic class Cylinder extends CylinderSector\r\n{\r\n /**\r\n Create a right circular cylinder with radius 1 and its\r\n axis along the y-axis from {@code y = -1} to {@code y = 1}.\r\n */\r\n public Cylinder( )\r\n {\r\n this(1, 1, 15, 16);\r\n }\r\n\r\n\r\n /**\r\n Create a right circular cylinder with radius {@code r} and\r\n its axis along the y-axis from {@code y = -h} to {@code y = h}.\r\n\r\n @param r radius of the cylinder\r\n @param h height of the cylinder (from -h to h along the y-axis)\r\n */\r\n public Cylinder(double r, double h)\r\n {\r\n this(r, h, 15, 16);\r\n }\r\n\r\n\r\n /**\r\n Create a right circular cylinder with radius {@code r} and\r\n its axis along the y-axis from {@code y = -h} to {@code y = h}.\r\n <p>\r\n The last two parameters determine the number of lines of longitude\r\n and the number of circles of latitude in the model.\r\n <p>\r\n If there are {@code n} circles of latitude in the model (including\r\n the top and bottom edges), then each line of longitude will have\r\n {@code n+1} line segments. If there are {@code k} lines of longitude,\r\n then each circle of latitude will have {@code k} line segments.\r\n <p>\r\n There must be at least three lines of longitude and at least\r\n two circles of latitude.\r\n <p>\r\n By setting {@code k} to be a small integer, this model can also be\r\n used to create k-sided polygonal prisms.\r\n\r\n @param r radius of the cylinder\r\n @param h height of the cylinder (from -h to h along the y-axis)\r\n @param n number of circles of latitude around the cylinder\r\n @param k number of lines of longitude\r\n */\r\n public Cylinder(double r, double h, int n, int k)\r\n {\r\n super(r, h, 0, 2*Math.PI, n, k);\r\n name = \"Cylinder\";\r\n }\r\n}//Cylinder\r\n" }, { "alpha_fraction": 0.8266666531562805, "alphanum_fraction": 0.8266666531562805, "avg_line_length": 73, "blob_id": "05ff7e5034c1774bec490ec8935cedeca059bd92", "content_id": "5f0a8936284a3763f840560b0add50b0634e35b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 75, "license_type": "no_license", "max_line_length": 73, "num_lines": 1, "path": "/Master Year 1/Programming Languages and Compilers/README.txt", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "Instructions for each assignment are in the html files within the folders \n" }, { "alpha_fraction": 0.6138392686843872, "alphanum_fraction": 0.6160714030265808, "avg_line_length": 13.517241477966309, "blob_id": "bebf64e9ef53826f08775338a18c26257966ee3a", "content_id": "f2fbe83cb65afad33f63385d9305acd36299f148", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 448, "license_type": "no_license", "max_line_length": 59, "num_lines": 29, "path": "/Year 3/Assignment3/Num_Command.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor received any help\r\n// on this assignment.\r\n#include \"Num_Command.h\"\r\n\r\nNum_Command::Num_Command (Stack <int> & s, int n):\r\ns_(s),\r\nn_(n)\r\n{\r\n\t//constructor\r\n}\r\n\r\nNum_Command::~Num_Command(void)\r\n{\r\n\t\r\n}\r\nvoid Num_Command::execute (void)\r\n{\r\n\t//push number onto stack\r\n\ts_.push(this->n_);\r\n}\r\n\r\nint Num_Command::prec (void) const\r\n{\r\n\t//number doesn't have precedence\r\n\treturn 0;\r\n\t\r\n}" }, { "alpha_fraction": 0.5675675868988037, "alphanum_fraction": 0.6004299521446228, "avg_line_length": 21.242856979370117, "blob_id": "35bd4ad0d87c44106717f765144ec16bb4334058", "content_id": "f24910ed8f98c2fdaa045d89cf13abcdd6bc5921", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3256, "license_type": "no_license", "max_line_length": 105, "num_lines": 140, "path": "/Year 2/Assignment #2/Candy.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "// \r\n//Honor Pledge:\r\n//\r\n// I pledge that I have neither given nor \r\n// received any help on this assignment\r\n//.\r\n//\r\n// huntmk\r\n#include <iostream>\r\n#include <stdlib.h> \r\n#include <ctime>\r\n#include <iomanip>\r\n\r\n#define GAME_LENGTH 50\r\n\r\n\r\n\r\nvoid advancePlayerA(int* ptrPlayerA);\r\nvoid advancePlayerB(int* ptrPlayerB);\r\nvoid printPosition(int* ptrPlayerA, int* ptrPlayerB);\r\n\r\n\r\nint main()\r\n{\r\n//needs fixing\r\n//The first player that reaches space 50, should trigger a message to notify the player who won the game.\r\n\r\n\tsrand(time(NULL));\r\n//Both players will start at space 0.\r\n\r\n\tint indexA = 0;\r\n\tint indexB = 0;\r\n// Keep running program until one of the players reach space 50.\r\nwhile(indexA< GAME_LENGTH && indexB < GAME_LENGTH){\r\n\tadvancePlayerA(&indexA);\r\n\tadvancePlayerB(&indexB);\r\n// if playerA and playerB at the same spot, move playerA back 1 spot\r\n\tif (indexA == indexB){\r\n\t indexA = indexA - 1;\r\n\t}\r\n// if playerA and playerB values are less than 0, they hold the value 0;\t\r\n\tif (indexA < 0){\r\n\t\tindexA = 0;\r\n\t}\r\n\tif (indexB < 0){\r\n\t\tindexB = 0;\r\n\t}\r\n// Print position of playerA and playerB\r\n\tprintPosition(&indexA, &indexB);\r\n//If position of playerA or playerB are greater or equal to 50 then print //message saying who won\r\n\tif (indexA >= 50){\r\n\t\tindexA = 50;\r\n\t\tstd:: cout << \"You have won the game!\";\t\r\n\t}\r\n\tif (indexB >= 50){\r\n\t\tstd:: cout << \"Your friend has won the game, goodluck next time.\";\t\r\n\t\r\n\t}\r\n}\t\r\n\treturn 0;\r\n}\r\n//Function advances playerA \r\nvoid advancePlayerA(int* indexA)\r\n{\r\n\t\r\n\t\r\n\t// use randomGenerator for cards drawn.\r\n\tint num = (rand()%100) +1;\r\n// Forward 1 card\r\n\tif (num >= 1 && num < 40){\r\n\t\t*indexA= *indexA + 1;\r\n//Forward 2 card\r\n\t}else if(num >= 40 && num < 60){\r\n\t\t*indexA = *indexA + 2;\r\n//Mountain card(Forward 3)\r\n\t}else if(num >= 60 && num < 70){\r\n\t\t*indexA = *indexA + 3;\r\n//Rainbow card(Forward 5)\r\n\t}else if(num >= 70 && num<80){\r\n\t\t*indexA = *indexA + 5;\r\n//Cherry card (Back 3)\r\n\t}else if(num >= 80 && num<90){\r\n\t\t*indexA = *indexA - 3;\r\n//Molasses card (back 5)\r\n\t}else if(num >=90 && num<100){\r\n\t\t*indexA = *indexA - 5;\r\n\t}\r\n\t\r\n\t\r\n}\r\n//Advances playerB\r\nvoid advancePlayerB(int* indexB)\r\n{\r\n// use randomGenerator for cards drawn.\r\n\tint num = (rand()%100) +1;\r\n// Forward 1 card\r\n\tif (num >= 1 && num < 30){\r\n\t\t*indexB = *indexB + 1;\r\n//Forward 2 card\r\n\t}else if(num >= 30 && num < 40){\r\n\t\t*indexB = *indexB + 2;\r\n//Mountain card(Forward 3)\r\n\t}else if(num >= 40 && num < 60){\r\n\t\t*indexB = *indexB + 3;\r\n//Rainbow card(Forward 5)\r\n\t}else if(num >= 60 && num<70){\r\n\t\t*indexB = *indexB + 5;\r\n//Cherry card (Back 3)\r\n\t}else if(num >= 70 && num<90){\r\n\t\t*indexB = *indexB - 3;\r\n//Molasses card (back 5)\r\n\t}else if(num >=90 && num<100){\r\n\t\t*indexB = *indexB - 5;\r\n\t}\r\n\t\r\n\t\r\n\t\r\n}\r\n// prints position of playerA and playerB after every turn\r\nvoid printPosition(int* playerA, int* playerB)\r\n{\t\r\n\t\r\n\tint i;\r\n\tstd::cout<< std::endl;\r\n//for each line and each line less than or equal to size of GAME_LENGTH, //increment\r\n\tfor (i=0;i <= GAME_LENGTH; i++){\r\n// if player == the value on the line, print at that position\t\t\t\r\n\t\tif (*playerA==i){\r\n\t\t\tstd::cout <<std::setw(i);\r\n\t\t\tstd::cout << \"A\";\r\n\t\t}\r\n\t\tif (*playerB==i){\r\n\t\t\tstd::cout <<std::setw(i);\r\n\t\t\tstd::cout << \"B\";\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n\t\t\t\t\r\n\t}\r\n}\r\n\r\n" }, { "alpha_fraction": 0.6417445540428162, "alphanum_fraction": 0.6604361534118652, "avg_line_length": 17.882352828979492, "blob_id": "8bd96d59e710170ce4b1ef799a90541269dd2cba", "content_id": "13e41d5acb523a81f7e10ec1df83c518f6bc2654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 321, "license_type": "no_license", "max_line_length": 108, "num_lines": 17, "path": "/Year 4/QRcodeSubmission/QRCodeGenerator/qrpage.cpp", "repo_name": "huntmk/SchoolWork", "src_encoding": "UTF-8", "text": "#include \"qrpage.h\"\n#include \"ui_qrpage.h\"\n#include <iostream>\n\nQRPage::QRPage(QWidget *parent) :\n QMainWindow(parent),\n ui(new Ui::QRPage)\n{\n ui->setupUi(this);\n ui->label->setPixmap(QPixmap(\"QR.png\").scaled(400, 400, Qt::IgnoreAspectRatio, Qt::FastTransformation));\n\n}\n\nQRPage::~QRPage()\n{\n delete ui;\n}\n" } ]
195
stephkananth/sortify
https://github.com/stephkananth/sortify
c328568cdaa31f1d191af0bc51c1454ead278996
2071a1ec84ba6678886a9eb736c418b83afb6553
34cc056060008c88053c1df372c83615e2dcdd12
refs/heads/master
2021-01-19T10:05:33.642500
2020-11-21T22:14:58
2020-11-21T22:14:58
87,825,173
2
1
null
null
null
null
null
[ { "alpha_fraction": 0.5001904964447021, "alphanum_fraction": 0.5120084285736084, "avg_line_length": 33.88432312011719, "blob_id": "149b25fa62fea80199d97212095fd9710c512872", "content_id": "ed8cbb5a039e76d9a4dd7988bd5fd24bf8e37db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115502, "license_type": "no_license", "max_line_length": 80, "num_lines": 3311, "path": "/sortify.py", "repo_name": "stephkananth/sortify", "src_encoding": "UTF-8", "text": "################################################################################\n\nimport copy\nimport json\nimport math\nimport os\nimport unicodedata\nimport webbrowser\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import simpledialog\n\nimport matplotlib\nmatplotlib.use('TKAgg')\nimport matplotlib.pyplot as plt\n\nimport spotipy\nimport spotipy.oauth2 as oauth2\nimport spotipy.util as util\n\nimport numpy as np\nimport seaborn as sns\n\n################################################################################\n\n# Credentials from developer.spotify.com\n\nos.environ['SPOTIPY_CLIENT_ID'] = '5c14698fcf8e41adb4f39b5518e55100'\nos.environ['SPOTIPY_CLIENT_SECRET'] = '355d085eaf6049a1bda2e8dff93bdefd'\nos.environ['SPOTIPY_REDIRECT_URI'] = 'https://www.spotify.com/us/'\n\n\n################################################################################\n\n# Adapted from:\n# cs.cmu.edu/~112/notes/keyEventsDemo.py\n# cs.cmu.edu/~112/notes/mouseEventsDemo.py\n# cs.cmu.edu/~112/notes/resizableDemo.py\n\n\ndef run(width=825, height=510):\n def redraw_all_wrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height, fill=white,\n width=0)\n redraw_all(canvas, data)\n canvas.update()\n\n def mouse_wrapper(mouse_function, event, canvas, data):\n mouse_function(event, data)\n redraw_all_wrapper(canvas, data)\n\n def key_wrapper(key_function, event, canvas, data):\n key_function(event, data)\n redraw_all_wrapper(canvas, data)\n\n def timer_fired_wrapper(canvas, data):\n timer_fired(data)\n redraw_all_wrapper(canvas, data)\n # Pause, then call timer_fired again\n canvas.after(data.timer_delay, timer_fired_wrapper, canvas, data)\n\n # Create the root before data is initialized because data contains an image\n root = Tk()\n\n # Set up data and call init\n class Struct(object):\n pass\n\n data = Struct()\n data.width = width\n data.height = height\n data.timer_delay = 100 # milliseconds\n init(data)\n\n # Create the resizeable canvas\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack(fill=BOTH, expand=YES)\n\n # Set up events\n root.bind('<Button-1>',\n lambda event: mouse_wrapper(left_pressed, event, canvas, data))\n root.bind('<Button-3>',\n lambda event: mouse_wrapper(right_pressed, event, canvas, data))\n canvas.bind('<Motion>',\n lambda event: mouse_wrapper(mouse_moved, event, canvas, data))\n canvas.bind('<B1-Motion>',\n lambda event: mouse_wrapper(left_moved, event, canvas, data))\n canvas.bind('<B3-Motion>',\n lambda event: mouse_wrapper(right_moved, event, canvas, data))\n root.bind('<B1-ButtonRelease>',\n lambda event: mouse_wrapper(left_released, event, canvas, data))\n root.bind('<B3-ButtonRelease>',\n lambda event: mouse_wrapper(right_released, event, canvas, data))\n root.bind('<KeyPress>',\n lambda event: key_wrapper(key_pressed, event, canvas, data))\n root.bind('<KeyRelease>',\n lambda event: key_wrapper(key_released, event, canvas, data))\n\n # 4 extra pixels for frame boundaries\n def size_changed(event):\n data.width = event.width - 4\n data.height = event.height - 4\n redraw_all_wrapper(canvas, data)\n\n # Resizes buttons with canvas after they have been initizlized\n if (data.mode == 'simplify'):\n simplify_buttons(data)\n\n root.bind('<Configure>', size_changed)\n root.minsize(785 + 4, 485 + 4)\n\n timer_fired_wrapper(canvas, data)\n # Launch the program\n root.mainloop() # Blocks until window is closed\n print('Bye!')\n\n\n################################################################################\n\n# MODEL\n# Store variables in data, a Struct, for easy access\n\n\ndef init(data):\n # MODEL\n data.songs = dict()\n data.songs_top = dict()\n data.top_songs = list()\n data.artists = dict()\n data.artists_top = dict()\n data.top_artists = list()\n data.playlists = dict()\n\n # VIEW --> LOGIN/HELP\n data.logo = PhotoImage(file='sortify_logo.gif')\n data.logo_scale = math.ceil(data.logo.width() / (data.width / scale ** 2))\n data.logo = data.logo.subsample(data.logo_scale, data.logo_scale)\n data.rows = 30\n data.columns = 20\n\n # CONTROLLER\n data.mode = 'login'\n data.username = ''\n data.tokens = dict()\n\n data.ctrl = False\n data.shift = False\n data.is_help = False\n data.is_songs = False\n data.is_artists = True\n\n # EVENT\n data.mouse_moved_x = 0\n data.mouse_moved_y = 0\n data.left_moved_x = 0\n data.left_moved_y = 0\n data.left_pressed_x = 0\n data.left_pressed_y = 0\n data.left_released_x = 0\n data.left_released_y = 0\n data.keysym_pressed = ''\n data.keysym_released = ''\n\n data.values = list()\n data.song_ids = list()\n data.stage_ids = list()\n data.artist_ids = list()\n data.artist_song_ids = list()\n data.selected_list = ''\n data.selected_ids = set()\n data.selected_artists = set()\n data.original_ids = list()\n data.analyze_ids = list()\n\n data.sort_mode = 0\n data.start_list = 0\n data.start_song = 0\n data.end_song = 10\n data.start_artist = 0\n data.end_artist = 50\n data.start_stage = 0\n data.start_values = 0\n\n # OOP\n data.buttons = login_buttons(data)\n data.sliders = list()\n data.stage_songs = list()\n data.analyze_songs = list()\n data.viewing_songs = list()\n data.viewing_artists = list()\n data.viewing_artist_songs = list()\n data.parameter = 'duration_ms'\n\n\n################################################################################\n\n# MODEL\n# Everything that can/should be reset between modes\n\n\ndef clean(data):\n data.mode = ''\n data.is_songs = False\n data.is_artists = False\n del data.song_ids[:]\n del data.artist_ids[:]\n del data.artist_song_ids[:]\n data.selected_list = ''\n data.selected_ids.clear()\n data.selected_artists.clear()\n del data.original_ids[:]\n data.sort_mode = 0\n data.start_list = 0\n data.start_song = 0\n data.end_song = 10\n data.start_artist = 0\n data.end_artist = 50\n data.start_stage = 0\n del data.values[:]\n del data.song_ids[:]\n del data.artist_ids[:]\n del data.artist_song_ids[:]\n del data.original_ids[:]\n del data.analyze_ids[:]\n del data.buttons[:]\n del data.sliders[:]\n del data.stage_songs[:]\n del data.analyze_songs[:]\n del data.viewing_songs[:]\n del data.viewing_artists[:]\n del data.viewing_artist_songs[:]\n\n\n################################################################################\n\n# CONTROLLER\n# EVENTS\n\n# MOUSE\n\n\ndef mouse_moved(event, data):\n data.mouse_moved_x, data.mouse_moved_y = event.x, event.y\n if (not data.is_help):\n if (data.mode == 'login'):\n login_mouse_moved(data)\n elif (data.mode == 'simplify'):\n simplify_mouse_moved(data)\n elif (data.mode == 'analyze'):\n analyze_mouse_moved(data)\n\n\n################################################################################\n\n# CONTROLLER\n# EVENTS\n\n# LEFT\n\n\ndef left_moved(event, data):\n data.left_moved_x, data.left_moved_y = event.x, event.y\n if (not data.is_help):\n if (data.mode == 'analyze'):\n analyze_left_moved(data)\n\n\ndef left_pressed(event, data):\n data.left_pressed_x, data.left_pressed_y = event.x, event.y\n if (not data.is_help):\n if (data.mode == 'simplify'):\n simplify_left_pressed(data)\n elif (data.mode == 'analyze'):\n analyze_left_pressed(data)\n\n\ndef left_released(event, data):\n data.left_released_x, data.left_released_y = event.x, event.y\n if (not data.is_help):\n if (data.mode == 'login'):\n login_left_released(data)\n elif (data.mode == 'simplify'):\n simplify_left_released(data)\n\n\n################################################################################\n\n# CONTROLLER\n# EVENTS\n\n# RIGHT\n\n\ndef right_moved(event, data):\n pass\n\n\ndef right_pressed(event, data):\n pass\n\n\ndef right_released(event, data):\n pass\n\n\n################################################################################\n\n# CONTROLLER\n# EVENTS\n\n# KEY\n\n# Adapted From:\n# cs.cmu.edu/~112/notes/keyEventsDemo.py\n\n\n# Returns boolean of whether to ignore keysym\ndef ignore_key(event):\n ignore_sym = ['Shift_L', 'Shift_R', 'Control_L', 'Control_R', 'Caps_Lock']\n return (event.keysym in ignore_sym)\n\n\n# Stores shift and control as boolean values in data\ndef set_event_info(event, data):\n data.ctrl = ((event.state & 0x0004) != 0)\n data.shift = ((event.state & 0x0001) != 0)\n\n\ndef key_pressed(event, data):\n if (not ignore_key(event)):\n data.keysym_pressed = event.keysym\n set_event_info(event, data)\n if (data.keysym_pressed == 'space'):\n data.is_help = not data.is_help\n elif (not data.is_help):\n if (data.mode == 'simplify'):\n simplify_key_pressed(data)\n elif (data.mode == 'analyze'):\n analyze_key_pressed(data)\n\n\ndef key_released(event, data):\n if (not ignore_key(event)):\n data.keysym_released = event.keysym\n set_event_info(event, data)\n if (not data.is_help):\n if (data.mode == 'analyze'):\n analyze_key_released(data)\n\n\n################################################################################\n\n# CONTROLLER\n# EVENTS\n\n# TIMER\n\n\ndef timer_fired(data):\n pass\n\n\n################################################################################\n\n# VIEW\n\n\ndef redraw_all(canvas, data):\n if data.is_help:\n help_redraw_all(canvas, data)\n elif (data.mode == 'login'):\n login_redraw_all(canvas, data)\n elif (data.mode == 'simplify'):\n simplify_redraw_all(canvas, data)\n elif (data.mode == 'analyze'):\n analyze_redraw_all(canvas, data)\n\n\n################################################################################\n\n# VIEW\n\n\nscale = (1 + 5 ** 0.5) / 2 # golden ratio\nc_width = 6 # average pixels/character\nblack = '#000000'\nblack_background = '#131313'\nblack_shadow = '#191919'\ngray_hover = '#1E1E1E'\ngray_select = '#262626'\ngray = '#8F8F8F'\nred_hover = '#C64C57'\nred = '#A34349'\ngreen_hover = '#57C64C'\ngreen = '#49A343'\nblue_hover = '#4C57C6'\nblue = '#4349A3'\nwhite = '#FFFFFF'\n\n\n################################################################################\n\n# VIEW\n# String/Text Functions\n\n\n# Shortens strings to fit into given area\ndef truncate(s, width):\n length = math.floor(width / c_width)\n if (len(s) <= length):\n return s\n return s[:length] + '...'\n\n\n# Ignore 'the' and capitalization when sorting\ndef sort_format(s):\n if (s.upper()[:4] == 'THE '):\n return s.upper()[4:]\n return s.upper()\n\n\n# Slightly modified from:\n# gist.github.com/j4mie/557354\n# Normalizes a string (i.e. strips the accents) so it is viewable and printable\ndef strip(s):\n if isinstance(s, str):\n return (unicodedata.normalize('NFKD', s).encode('ASCII',\n 'ignore').decode(\n 'utf-8', 'ignore'))\n return s\n\n\n################################################################################\n\n# OOP\n\n\nclass Button(object):\n def __init__(self, data, text, cx_scale, cy_scale, function, width_scale=0,\n height_scale=0, color=None, color_hover=None, text_color=white,\n text_color_hover=white, text_size=10):\n self.function = function\n self.text = text\n self.text_color = text_color\n self.text_color_hover = text_color_hover\n self.text_fill = self.text_color\n self.text_size = text_size\n self.color = color\n self.color_hover = color_hover\n self.fill = self.color\n self.cx_scale = cx_scale\n self.cy_scale = cy_scale\n if (width_scale == 0):\n self.width_scale = 1 / scale ** 3\n else:\n self.width_scale = width_scale\n if (height_scale == 0):\n self.height_scale = 1 / scale ** 6\n else:\n self.height_scale = height_scale\n self.cx = data.width * self.cx_scale\n self.cy = data.height * self.cy_scale\n self.width = data.width * self.width_scale\n self.height = data.height * self.height_scale\n self.x1 = self.cx - self.width / 2\n self.x2 = self.cx + self.width / 2\n self.y1 = self.cy - self.height / 2\n self.y2 = self.cy + self.height / 2\n\n # To maintain scalability\n def update_dimensions(self, data):\n self.cx = data.width * self.cx_scale\n self.cy = data.height * self.cy_scale\n self.width = data.width * self.width_scale\n self.height = data.height * self.height_scale\n self.x1 = self.cx - self.width / 2\n self.x2 = self.cx + self.width / 2\n self.y1 = self.cy - self.height / 2\n self.y2 = self.cy + self.height / 2\n\n # Useful for selecting and hovering\n def is_within_bounds(self, data, x, y):\n self.update_dimensions(data)\n return ((self.x1 <= x <= self.x2) and (self.y1 <= y <= self.y2))\n\n def hover(self):\n self.fill = self.color_hover\n self.text_fill = self.text_color_hover\n\n def unhover(self):\n self.fill = self.color\n self.text_fill = self.text_color\n\n # Buttons perform functions when pressed\n def press(self, data):\n self.function(data)\n\n def unpress(self, data):\n pass\n\n def draw(self, canvas, data):\n self.update_dimensions(data)\n canvas.create_rectangle(self.x1, self.y1, self.x2, self.y2,\n fill=self.fill, width=0)\n canvas.create_text(self.cx, self.cy, fill=self.text_fill,\n text=truncate(self.text, self.width),\n font=('Proxima %d' % (self.text_size,)))\n\n\n################################################################################\n\n# HELP\n\n# CONTROLLER\n# VIEW\n\n\n# Opens a splash screen with instructions\ndef help_init(data):\n data.is_help = True\n\n\ndef help_redraw_all(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill=black_background)\n canvas.create_rectangle(data.width * (1 / 2 - scale / 5), 0,\n data.width * (1 / 2 + scale / 5), data.height,\n fill=black)\n canvas.create_line(data.width * (1 / 2 - scale / 5), 0,\n data.width * (1 / 2 - scale / 5), data.height,\n fill=black_shadow)\n canvas.create_line(data.width * (1 / 2 + scale / 5), 0,\n data.width * (1 / 2 + scale / 5), data.height,\n fill=black_shadow)\n canvas.create_image(data.width / 2, data.height / (3 * scale),\n image=data.logo)\n canvas.create_text(data.width / 2, 5 * data.height / scale ** 6, fill=gray,\n text='Simplify. Organize. Analyze. ', font='Proxima 14')\n canvas.create_line(data.width * (1 / 2 - 1 / scale ** 3),\n data.height * scale / 5,\n data.width * (1 / 2 + 1 / scale ** 3),\n data.height * scale / 5,\n fill=gray_select)\n canvas.create_text(data.width / 2, data.height * scale * 6 / 24,\n text='Maximize the window for optimal viewing.',\n fill=white, font='Proxima 14 bold')\n canvas.create_text(data.width / 2, data.height * scale * 7.25 / 24,\n text='Use the up and down arrows on your keypad to scroll.',\n fill=white, font='Proxima 14 bold')\n canvas.create_text(data.width / 2, data.height * scale * 7.75 / 24,\n text='(Make sure your cursor is within the right section!)',\n fill=white, font='Proxima 14 bold')\n canvas.create_text(data.width / 2, data.height * scale * 9 / 24,\n text='Click on the headings to sort.',\n fill=white, font='Proxima 14 bold')\n canvas.create_text(data.width / 2, data.height * scale * 10.25 / 24,\n text='Use control and shift to select multiple songs/artists.',\n fill=white, font='Proxima 14 bold')\n canvas.create_text(data.width / 2, data.height * scale * 10.75 / 24,\n text='(Use control + a to SELECT ALL.)',\n fill=white, font='Proxima 14 bold')\n canvas.create_text(data.width / 2, data.height * scale * 12 / 24,\n text='Use the sliders to view and select more/less songs or artists.',\n fill=white, font='Proxima 14 bold')\n canvas.create_text(data.width / 2, data.height * scale * 13 / 24, text=\n 'Use buttons or hold control + drag to add to the playlist.',\n fill=white, font='Proxima 14 bold')\n canvas.create_text(data.width / 2, data.height * (1 - 1 / scale ** 6),\n text='PRESS THE SPACEBAR TO RETURN.',\n fill=gray, font='Proxima 14')\n\n\n################################################################################\n\n# LOGIN\n\n# MODEL\n# Initializes the buttons for the login page\n\n\ndef login_buttons(data):\n return [\n Button(data, 'USERNAME', 1 / 2, 1 / scale ** 2, input_username,\n text_color=green, text_color_hover=green_hover, text_size=20),\n Button(data, 'LOG IN', 1 / 2, 1 - 1 / scale ** 4,\n authorize,\n color=green, color_hover=green_hover, text_size=14),\n Button(data, 'PRESS THE SPACEBAR FOR HELP AT ANY TIME', 1 / 2,\n 1 - 1 / scale ** 6, help_init, width_scale=1 / 2,\n text_color=gray,\n text_color_hover=white, text_size=14)\n ]\n\n\n################################################################################\n\n# LOGIN\n# CONTROLLER\n\n# Adapted from:\n# cs.cmu.edu/~112/notes/dialogs-demo1.py\n\n\n# Simple message box asking for the user's Spotify username\ndef input_username(data):\n message = 'Please enter your Spotify username.'\n title = 'Username'\n data.username = simpledialog.askstring(title, message)\n\n\n# Checks that there is a valid username and attempts to authenticate the user\n# Raises a warning box if no username has been entered\ndef authorize(data):\n if (data.username == ''):\n message = 'Please enter your username to proceed.'\n title = 'Username Required'\n messagebox.showwarning(title, message)\n else:\n simplify_init(data)\n\n\n################################################################################\n\n# LOGIN\n# CONTROLLER\n\n\n# Very slightly modified from:\n# github.com/plamere/spotipy/blob/master/spotipy/util.py\n# cs.cmu.edu/~112/notes/dialogs-demo1.py\n\n# Checks if the user has already authorized an action\n# If not, redirects them to an authorization page and opens a message box\n# Adds the new token to data\n\ndef prompt_for_user_token(data, scope):\n client_id = os.getenv('SPOTIPY_CLIENT_ID')\n client_secret = os.getenv('SPOTIPY_CLIENT_SECRET')\n redirect_uri = os.getenv('SPOTIPY_REDIRECT_URI')\n sp_oauth = oauth2.SpotifyOAuth(client_id, client_secret, redirect_uri,\n scope=scope,\n cache_path='.cache-' + data.username)\n token_info = sp_oauth.get_cached_token()\n if not token_info:\n auth_url = sp_oauth.get_authorize_url()\n webbrowser.open(auth_url)\n msg = 'Enter the URL you were redirected to.'\n title = 'Authorize'\n response = simpledialog.askstring(title, msg)\n code = sp_oauth.parse_response_code(response)\n token_info = sp_oauth.get_access_token(code)\n data.tokens[scope] = token_info['access_token']\n\n\n################################################################################\n\n# LOGIN\n\n# CONTROLLER\n# EVENTS\n\n# MOUSE\n\n\ndef login_mouse_moved(data):\n for button in data.buttons:\n if (button.is_within_bounds(data, data.mouse_moved_x,\n data.mouse_moved_y)):\n button.hover()\n else:\n button.unhover()\n\n\n################################################################################\n\n# LOGIN\n\n# CONTROLLER\n# EVENTS\n\n# LEFT\n\n\ndef login_left_released(data):\n for button in data.buttons:\n if (button.is_within_bounds(data, data.left_released_x,\n data.left_released_y)):\n button.press(data)\n\n\n################################################################################\n\n# LOGIN\n# VIEW\n\n\ndef login_redraw_all(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill=black_background)\n canvas.create_rectangle(data.width * (1 / 2 - scale / 5), 0,\n data.width * (1 / 2 + scale / 5), data.height,\n fill=black)\n canvas.create_line(data.width * (1 / 2 - scale / 5), 0,\n data.width * (1 / 2 - scale / 5), data.height,\n fill=black_shadow)\n canvas.create_line(data.width * (1 / 2 + scale / 5), 0,\n data.width * (1 / 2 + scale / 5), data.height,\n fill=black_shadow)\n canvas.create_image(data.width / 2, data.height / (3 * scale),\n image=data.logo)\n canvas.create_text(data.width / 2, 5 * data.height / scale ** 6, fill=gray,\n text='Simplify. Analyze. Organize. ', font='Proxima 14')\n canvas.create_line(data.width * (1 / 2 - 1 / scale ** 3),\n data.height * scale / 5,\n data.width * (1 / 2 + 1 / scale ** 3),\n data.height * scale / 5,\n fill=gray_select)\n canvas.create_text(data.width / 2, data.height / scale, fill=white,\n font='Proxima 15', text=\n'''\nPlease enter your username above.\nThen click on the button below.\nUser authentication requires\ninteraction with your web browser.\nOnce you enter your credentials\nand give authorization to Sortify,\nyou will be redirected to a URL.\nPaste the URL you were directed to\nto complete the authorization.\nThis may take some time.\n''')\n for button in data.buttons:\n button.draw(canvas, data)\n\n\n################################################################################\n\n# LOGIN\n# MODEL\n\n# Initializes 'simplify' by pulling data\n# Preference is to pull data from directory for speed an efficiency\n# Directory is labeled with user's username and stored\n# Creates buttons immediately afterward\n\ndef simplify_init(data):\n if (os.path.isdir(data.username)):\n import_user_data(data)\n else:\n prompt_for_user_token(data, 'user-library-modify')\n get_user_data(data)\n export_user_data(data)\n clean(data)\n data.mode = 'simplify'\n simplify_buttons(data)\n\n\n################################################################################\n\n# LOGIN \n# MODEL\n\n\n# Gets user's top songs, artists, and their public playlists from Spotify\ndef get_user_data(data):\n for term in ['short_term', 'medium_term', 'long_term']:\n data.songs_top[term] = list()\n data.artists_top[term] = list()\n get_top_songs(data)\n get_top_artists(data)\n get_playlists(data)\n\n\n# Saves user's top songs and their features on their computer using json\ndef export_user_data(data):\n os.makedirs(data.username)\n songs = open(data.username + os.sep + 'songs.txt', 'w')\n songs = json.dump(data.songs, songs)\n songs_top = open(data.username + os.sep + 'songs_top.txt', 'w')\n songs_top = json.dump(data.songs_top, songs_top)\n top_songs = open(data.username + os.sep + 'top_songs.txt', 'w')\n top_songs = json.dump(data.top_songs, top_songs)\n artists = open(data.username + os.sep + 'artists.txt', 'w')\n artists = json.dump(data.artists, artists)\n artists_top = open(data.username + os.sep + 'artists_top.txt', 'w')\n artists_top = json.dump(data.artists_top, artists_top)\n top_artists = open(data.username + os.sep + 'top_artists.txt', 'w')\n top_artists = json.dump(data.top_artists, top_artists)\n playlists = open(data.username + os.sep + 'playlists.txt', 'w')\n playlists = json.dump(data.playlists, playlists)\n\n\n# Gets user's top songs and features that have been saved in a directory\ndef import_user_data(data):\n songs = open(data.username + os.sep + 'songs.txt', 'r')\n data.songs.update(json.load(songs))\n songs_top = open(data.username + os.sep + 'songs_top.txt', 'r')\n data.songs_top.update(json.load(songs_top))\n top_songs = open(data.username + os.sep + 'top_songs.txt', 'r')\n data.top_songs += json.load(top_songs)\n artists = open(data.username + os.sep + 'artists.txt', 'r')\n data.artists.update(json.load(artists))\n artists_top = open(data.username + os.sep + 'artists_top.txt', 'r')\n data.artists_top.update(json.load(artists_top))\n top_artists = open(data.username + os.sep + 'top_artists.txt', 'r')\n data.top_artists += json.load(top_artists)\n playlists = open(data.username + os.sep + 'playlists.txt', 'r')\n data.playlists.update(json.load(playlists))\n\n\n################################################################################\n\n# LOGIN\n# MODEL\n\n# Adapted from:\n# github.com/plamere/spotipy/blob/master/examples/my_top_tracks.py\n# github.com/plamere/spotipy/blob/master/examples/my_top_artists.py\n\n# Pulls a user's top songs for each time range\n# Puts the IDs into dictionaries by term and into meta dictionary for stats.\n\n\ndef get_top_songs(data):\n sp = spotipy.Spotify(auth=data.tokens['user-library-modify'])\n sp.trace = False\n for term in data.songs_top:\n songs = sp.current_user_top_tracks(time_range=term, limit=50)\n for song in songs['items']:\n data.songs_top[term] += [song['id']]\n if (song['id'] not in data.top_songs):\n data.top_songs += [song['id']]\n if (song['id'] not in data.songs):\n features = sp.audio_features([song['id']])[0]\n data.songs[song['id']] = {\n 'title': strip(song['name']),\n 'artist_id': song['artists'][0]['id'],\n 'artist_name': strip(song['artists'][0]['name']),\n 'album_name': strip(song['album']['name']),\n 'acousticness': strip(features['acousticness']),\n 'danceability': strip(features['danceability']),\n 'energy': strip(features['energy']),\n 'liveness': strip(features['liveness']),\n 'loudness': strip(features['loudness']),\n 'duration_ms': strip(song['duration_ms']),\n 'popularity': strip(song['popularity']),\n 'speechiness': strip(features['speechiness']),\n 'tempo': strip(features['tempo']),\n 'valence': strip(features['valence'])\n }\n\n\n# Pulls a user's top artists for each time range\n# Puts the IDs into dictionaries by term and into meta dictionary for stats.\n\ndef get_top_artists(data):\n sp = spotipy.Spotify(auth=data.tokens['user-library-modify'])\n sp.trace = False\n for term in data.artists_top:\n artists = sp.current_user_top_artists(time_range=term, limit=50)\n for artist in artists['items']:\n data.artists_top[term] += [artist['id']]\n if (artist['id'] not in data.top_artists):\n data.top_artists += [artist['id']]\n if (artist['id'] not in data.artists):\n data.artists[artist['id']] = {\n 'name': artist['name'],\n 'songs': list()\n }\n songs = sp.artist_top_tracks(artist['id'])\n for song in songs['tracks']:\n data.artists[artist['id']]['songs'] += [song['id']]\n if (song['id'] not in data.songs):\n features = sp.audio_features([song['id']])[0]\n data.songs[song['id']] = {\n 'title': strip(song['name']),\n 'artist_id': song['artists'][0]['id'],\n 'artist_name': strip(song['artists'][0]['name']),\n 'album_name': strip(song['album']['name']),\n 'acousticness': strip(features['acousticness']),\n 'danceability': strip(features['danceability']),\n 'energy': strip(features['energy']),\n 'liveness': strip(features['liveness']),\n 'loudness': strip(features['loudness']),\n 'duration_ms': strip(song['duration_ms']),\n 'popularity': strip(song['popularity']),\n 'speechiness': strip(features['speechiness']),\n 'tempo': strip(features['tempo']),\n 'valence': strip(features['valence'])\n }\n\n\n################################################################################\n\n# LOGIN\n# MODEL\n\n# Gets a user's playlists and playlist contents from Spotify\n# Puts data into a dictionary --> directory with json\n\ndef get_playlists(data):\n scope = 'user-library-modify'\n token = util.prompt_for_user_token(data.username, scope)\n sp = spotipy.Spotify(auth=token)\n playlists = sp.user_playlists(data.username)\n for playlist in playlists['items']:\n data.playlists[playlist['id']] = {\n 'name': playlist['name'],\n 'songs': list()\n }\n uri = playlist['uri']\n username = uri.split(':')[2]\n playlist_id = uri.split(':')[4]\n results = sp.user_playlist(username, playlist_id)\n for item in results['tracks']['items']:\n data.playlists[playlist['id']]['songs'] += [item['track']['id']]\n for playlist_id in data.playlists:\n for song_id in data.playlists[playlist_id]['songs']:\n if (song_id not in data.songs):\n song = sp.track(song_id)\n features = sp.audio_features([song_id])[0]\n data.songs[song_id] = {\n 'title': strip(song['name']),\n 'artist_id': song['artists'][0]['id'],\n 'artist_name': strip(song['artists'][0]['name']),\n 'album_name': strip(song['album']['name']),\n 'acousticness': strip(features['acousticness']),\n 'danceability': strip(features['danceability']),\n 'energy': strip(features['energy']),\n 'liveness': strip(features['liveness']),\n 'loudness': strip(features['loudness']),\n 'duration_ms': strip(song['duration_ms']),\n 'popularity': strip(song['popularity']),\n 'speechiness': strip(features['speechiness']),\n 'tempo': strip(features['tempo']),\n 'valence': strip(features['valence'])\n }\n\n\n################################################################################\n\n# OOP\n# Sidebar selects input source\n\n\nclass Sidebar(Button):\n def __init__(self, data, text, row, list_of_ids):\n self.row = row\n super().__init__(data, text, 1.75 / data.columns,\n (row + 1 / 2) / data.rows,\n refresh_view, 3 / data.columns, 1 / data.rows,\n text_color=gray)\n self.list_of_ids = copy.deepcopy(list_of_ids)\n\n def is_selected(self, data):\n return (data.selected_list == self.text)\n\n def values(self, data):\n del data.values[:]\n if (data.is_songs):\n for song_id in copy.deepcopy(self.list_of_ids):\n data.values += [data.songs[song_id][data.parameter]]\n\n def press(self, data):\n data.selected_list = self.text\n data.is_songs = True\n data.is_artists = False\n data.start_song = 0\n data.sort_mode = 0\n del data.song_ids[:]\n del data.artist_ids[:]\n del data.artist_song_ids[:]\n data.song_ids += self.list_of_ids\n self.values(data)\n refresh_view(data)\n analyze_view(data)\n if (data.mode == 'analyze'):\n analyze_buttons(data)\n else:\n simplify_buttons(data)\n\n def draw(self, canvas, data):\n if (self.is_selected(data)):\n self.text_fill = white\n self.update_dimensions(data)\n canvas.create_text(self.x1 + 1 / data.columns / 3, self.cy,\n text=truncate(self.text, self.width),\n font='Proxima 10',\n fill=self.text_fill, anchor=W)\n if self.is_selected(data):\n canvas.create_rectangle(0, self.y1, data.width / data.columns / 9,\n self.y2, fill=green_hover)\n\n\n################################################################################\n\n# OOP\n# Displays top artists for a user & their the artists' 10 top songs\n\nclass ArtistSidebar(Sidebar):\n def __init__(self, data, text, row, list_of_artist_ids):\n super().__init__(data, text, row, list_of_artist_ids)\n\n def press(self, data):\n data.selected_list = self.text\n data.is_songs = False\n data.is_artists = True\n data.start_song = 0\n data.start_artist = 0\n data.sort_mode = 0\n del data.song_ids[:]\n del data.artist_ids[:]\n del data.artist_song_ids[:]\n data.artist_ids += self.list_of_ids\n refresh_view(data)\n simplify_buttons(data)\n\n\n################################################################################\n\n# OOP\n\n\nclass SongHeader(Button):\n def __init__(self, data, text, position, function):\n self.position = position\n width_scale = 1 / data.columns\n height_scale = 1 / data.rows\n cy_scale = (1 / 2) * height_scale\n self.symbol = ''\n if (self.position == 0):\n cx_scale = 4.25 / data.columns\n elif (self.position == 1):\n cx_scale = 8.25 / data.columns\n elif (self.position == 2):\n cx_scale = 11.5 / data.columns\n elif (self.position == 3):\n cx_scale = 15.5 / data.columns\n super().__init__(data, text, cx_scale, cy_scale, function,\n width_scale=width_scale, height_scale=height_scale,\n color=None, color_hover=None, text_color=gray,\n text_color_hover=white, text_size=10)\n\n def unpress(self, data):\n pass\n\n def get_symbol(self, data):\n if (self.position == 0):\n if (data.sort_mode == 1):\n self.symbol = '/\\\\'\n elif (data.sort_mode == 2):\n self.symbol = '\\\\/'\n else:\n self.symbol = ''\n elif (self.position == 1):\n if (data.sort_mode == 3):\n self.symbol = '/\\\\'\n elif (data.sort_mode == 4):\n self.symbol = '\\\\/'\n else:\n self.symbol = ''\n elif (self.position == 2):\n if (data.sort_mode == 5):\n self.symbol = '/\\\\'\n elif (data.sort_mode == 6):\n self.symbol = '\\\\/'\n else:\n self.symbol = ''\n elif (self.position == 3):\n if (data.sort_mode == 7):\n self.symbol = '/\\\\'\n elif (data.sort_mode == 8):\n self.symbol = '\\\\/'\n else:\n self.symbol = ''\n else:\n self.symbol = ''\n\n def draw(self, canvas, data):\n if (data.is_songs):\n self.update_dimensions(data)\n self.get_symbol(data)\n canvas.create_text(self.x1, self.cy, text=self.text,\n fill=self.text_fill, font='Proxima 10', anchor=W)\n canvas.create_text(self.x1 - 10, self.cy, text=self.symbol,\n fill=green, font='Proxima 10', anchor=E)\n\n\n################################################################################\n\n# OOP\n# Slightly different from SongHeader for functions and postitions\n\n\nclass ArtistHeader(Button):\n def __init__(self, data, text, position, function):\n self.position = position\n width_scale = 1 / data.columns\n height_scale = 1 / data.rows\n cy_scale = (1 / 2) * height_scale\n self.symbol = ''\n if (self.position == 0):\n cx_scale = 4.25 / data.columns\n elif (self.position == 1):\n cx_scale = 7.75 / data.columns\n elif (self.position == 2):\n cx_scale = 11.5 / data.columns\n elif (self.position == 3):\n cx_scale = 15.5 / data.columns\n super().__init__(data, text, cx_scale, cy_scale, function,\n width_scale=width_scale, height_scale=height_scale,\n color=None, color_hover=None, text_color=gray,\n text_color_hover=white, text_size=10)\n\n def unpress(self, data):\n pass\n\n def get_symbol(self, data):\n if (self.position == 0):\n if (data.sort_mode == 1):\n self.symbol = '/\\\\'\n elif (data.sort_mode == 2):\n self.symbol = '\\\\/'\n else:\n self.symbol = ''\n elif (self.position == 1):\n if (data.sort_mode == 3):\n self.symbol = '/\\\\'\n elif (data.sort_mode == 4):\n self.symbol = '\\\\/'\n else:\n self.symbol = ''\n elif (self.position == 2):\n if (data.sort_mode == 5):\n self.symbol = '/\\\\'\n elif (data.sort_mode == 6):\n self.symbol = '\\\\/'\n else:\n self.symbol = ''\n elif (self.position == 3):\n if (data.sort_mode == 7):\n self.symbol = '/\\\\'\n elif (data.sort_mode == 8):\n self.symbol = '\\\\/'\n else:\n self.symbol = ''\n else:\n self.symbol = ''\n\n def draw(self, canvas, data):\n if (data.is_artists):\n self.update_dimensions(data)\n self.get_symbol(data)\n canvas.create_text(self.x1, self.cy, text=self.text,\n fill=self.text_fill, font='Proxima 10', anchor=W)\n canvas.create_text(self.x1 - 10, self.cy, text=self.symbol,\n fill=green, font='Proxima 10', anchor=E)\n\n\n################################################################################\n\n# OOP\n# Able to select, add to playlist, display artists' top songs, etc.\n\n\nclass Artist(Button):\n def __init__(self, data, row, artist_id):\n self.row = row\n self.artist_id = artist_id\n self.artist_name = data.artists[self.artist_id]['name']\n self.artist_songs = copy.deepcopy(data.artists[artist_id]['songs'])\n text = self.artist_name\n width_scale = 3.5 / data.columns\n height_scale = 1 / data.rows\n cx_scale = 4.75 / data.columns\n cy_scale = (self.row + 1 / 2) * height_scale\n super().__init__(data, text, cx_scale, cy_scale, function=None,\n width_scale=width_scale, height_scale=height_scale,\n color=black_shadow, color_hover=gray_hover,\n text_color=white, text_color_hover=white,\n text_size=10)\n self.update_dimensions(data)\n self.line_fill = gray_hover\n self.plus_color = green\n self.plus_color_hover = green_hover\n self.minus_color = red\n self.minus_color_hover = red_hover\n self.plus_fill = self.plus_color\n self.minus_fill = self.minus_color\n self.symbol_size = 10\n\n def is_selected(self, data):\n return (self.artist_id in data.selected_artists)\n\n def is_all_staged(self, data):\n for song_id in self.artist_songs:\n if (song_id not in data.stage_ids):\n return False\n return True\n\n def get_id(self):\n return self.artist_id\n\n def get_row(self):\n return self.row\n\n def hover(self):\n super().hover()\n self.plus_fill = self.plus_color_hover\n self.minus_fill = self.minus_color_hover\n self.symbol_size = self.text_size + 3\n\n def unhover(self):\n super().unhover()\n self.plus_fill = self.plus_color\n self.minus_fill = self.minus_color\n self.symbol_size = self.text_size + 1\n\n def press(self, data):\n if (data.left_pressed_x <=\n self.x1 + data.width / data.columns * 0.75):\n data.selected_artists.add(self.artist_id)\n if (self.is_all_staged(data)):\n self.remove_from_stage(data)\n else:\n self.add_all_to_stage(data)\n elif (self.artist_id not in data.selected_artists):\n data.selected_artists.add(self.artist_id)\n elif (data.ctrl):\n data.selected_artists.remove(self.artist_id)\n data.selected_ids.clear()\n del data.artist_song_ids[:]\n data.artist_song_ids += self.artist_songs\n refresh_view(data)\n\n def unpress(self, data):\n if (self.artist_id in data.selected_artists):\n data.selected_artists.remove(self.artist_id)\n refresh_view(data)\n\n def add_all_to_stage(self, data):\n for song_id in self.artist_songs:\n if (song_id not in data.stage_ids):\n data.stage_ids += [song_id]\n refresh_view(data)\n\n def remove_from_stage(self, data):\n for song_id in self.artist_songs:\n if (song_id in data.stage_ids):\n data.stage_ids.remove(song_id)\n refresh_view(data)\n\n def draw(self, canvas, data):\n self.update_dimensions(data)\n if (self.is_selected(data)):\n self.fill = gray_select\n self.line_fill = black_shadow\n canvas.create_rectangle(self.x1 + data.width / data.columns / 3,\n self.y1,\n self.x2 - data.width / data.columns / 3,\n self.y2,\n fill=self.fill, width=0)\n canvas.create_line(self.x1 + data.width / data.columns / 3, self.y1,\n self.x2 - data.width / data.columns / 3, self.y1,\n fill=self.line_fill)\n canvas.create_line(self.x1 + data.width / data.columns / 3, self.y2,\n self.x2 - data.width / data.columns / 3, self.y2,\n fill=self.line_fill)\n canvas.create_text(self.x1 + data.width / data.columns * 0.75,\n self.cy,\n text=truncate(self.artist_name,\n data.width / data.columns * 4),\n fill=self.text_fill, anchor=W, font='Proxima 10')\n if (not self.is_all_staged(data)):\n canvas.create_text(self.x1 + data.width / data.columns / 2,\n self.cy,\n text='+', fill=self.plus_fill,\n font=(\n 'Proxima %d bold' % (self.symbol_size,)))\n else:\n canvas.create_text(self.x1 + data.width / data.columns / 2,\n self.cy,\n text='-', fill=self.minus_fill,\n font=(\n 'Proxima %d bold' % (self.symbol_size,)))\n\n\n################################################################################\n\n# OOP\n# Stores and displays all songs in playlists, etc. \n\n\nclass Song(Button):\n def __init__(self, data, row, song_id):\n text = ''\n width_scale = (16 - 3) / data.columns\n height_scale = 1 / data.rows\n cx_scale = (3 + 16) / 2 / data.columns\n cy_scale = (row + 1 / 2) * height_scale\n self.line_fill = gray_hover\n self.plus_color = green\n self.plus_color_hover = green_hover\n self.minus_color = red\n self.minus_color_hover = red_hover\n self.plus_fill = self.plus_color\n self.minus_fill = self.minus_color\n self.symbol_size = 10\n self.row = row\n self.song_id = song_id\n self.title = data.songs[self.song_id]['title']\n self.artist_name = data.songs[self.song_id]['artist_name']\n self.album_name = data.songs[self.song_id]['album_name']\n duration_ms = data.songs[self.song_id]['duration_ms']\n self.duration = '%d:%02d' % (duration_ms // 60000,\n duration_ms % 60000 // 1000)\n super().__init__(data, text, cx_scale, cy_scale, function=None,\n width_scale=width_scale, height_scale=height_scale,\n color=black_shadow, color_hover=gray_hover,\n text_color=white, text_color_hover=white, text_size=10)\n\n def is_selected(self, data):\n return (self.song_id in data.selected_ids)\n\n def get_id(self):\n return (self.song_id)\n\n def get_row(self):\n return (self.row)\n\n def hover(self):\n super().hover()\n self.plus_fill = self.plus_color_hover\n self.minus_fill = self.minus_color_hover\n self.symbol_size = self.text_size + 3\n\n def unhover(self):\n super().unhover()\n self.plus_fill = self.plus_color\n self.minus_fill = self.minus_color\n self.symbol_size = self.text_size + 1\n\n def press(self, data):\n if (data.left_pressed_x <= self.x1 + data.width / data.columns * 0.75):\n data.selected_ids.add(self.song_id)\n if (self.song_id in data.stage_ids):\n remove_selected_songs_from_stage(data)\n else:\n add_selected_songs_to_stage(data)\n elif (self.song_id not in data.selected_ids):\n data.selected_ids.add(self.song_id)\n elif (data.ctrl):\n data.selected_ids.remove(self.song_id)\n if (data.mode == 'simplify'):\n refresh_view(data)\n elif (data.mode == 'analyze'):\n analyze_view(data)\n\n def unpress(self, data):\n if (self.song_id in data.selected_ids):\n data.selected_ids.remove(self.song_id)\n if (data.mode == 'simplify'):\n refresh_view(data)\n elif (data.mode == 'analyze'):\n analyze_view(data)\n\n def draw(self, canvas, data):\n self.update_dimensions(data)\n if (self.is_selected(data)):\n self.fill = gray_select\n self.line_fill = black_shadow\n canvas.create_rectangle(self.x1 + data.width / data.columns / 3,\n self.y1,\n self.x2 - data.width / data.columns / 3,\n self.y2,\n fill=self.fill, width=0)\n canvas.create_line(self.x1 + data.width / data.columns / 3, self.y1,\n self.x2 - data.width / data.columns / 3, self.y1,\n fill=self.line_fill)\n canvas.create_line(self.x1 + data.width / data.columns / 3, self.y2,\n self.x2 - data.width / data.columns / 3, self.y2,\n fill=self.line_fill)\n canvas.create_text(self.x1 + data.width / data.columns * 0.75, self.cy,\n text=truncate(self.title,\n data.width / data.columns * 4),\n fill=self.text_fill, anchor=W, font='Proxima 10')\n canvas.create_text(data.width * 7.75 / data.columns, self.cy,\n text=truncate(self.artist_name,\n data.width / data.columns * (3.25)),\n fill=self.text_fill, anchor=W, font='Proxima 10')\n canvas.create_text(data.width * 11 / data.columns, self.cy,\n text=truncate(self.album_name,\n data.width / data.columns * (4)),\n fill=self.text_fill, anchor=W, font='Proxima 10')\n canvas.create_text(data.width * 15 / data.columns, self.cy,\n text=self.duration, fill=gray, anchor=W,\n font='Proxima 10')\n if (self.song_id not in data.stage_ids):\n canvas.create_text(self.x1 + data.width / data.columns / 2, self.cy,\n text='+', fill=self.plus_fill,\n font=('Proxima %d bold' % (self.symbol_size,)))\n else:\n canvas.create_text(self.x1 + data.width / data.columns / 2, self.cy,\n text='-', fill=self.minus_fill,\n font=('Proxima %d bold' % (self.symbol_size,)))\n\n\n################################################################################\n\n# OOP\n# Stores/displays each user's top artists' top songs (up to 10)\n\n\nclass ArtistSong(Song):\n def __init__(self, data, rows, song_id):\n super().__init__(data, rows, song_id)\n self.cx_scale = 1 / data.columns * (6.5 + 16) / 2\n self.width_scale = (16 - 6.5) / data.columns\n self.update_dimensions(data)\n\n def press(self, data):\n self.update_dimensions(data)\n if (data.left_pressed_x <= self.x1 + data.width / data.columns * 0.75):\n data.selected_ids.add(self.song_id)\n if (self.song_id in data.stage_ids):\n remove_selected_songs_from_stage(data)\n else:\n add_selected_songs_to_stage(data)\n elif (self.song_id not in data.selected_ids):\n data.selected_ids.add(self.song_id)\n else:\n data.selected_ids.remove(self.song_id)\n\n def unpress(self, data):\n if (self.song_id in data.selected_ids):\n data.selected_ids.remove(self.song_id)\n\n def draw(self, canvas, data):\n if (self.song_id in data.selected_ids):\n self.fill = gray_select\n canvas.create_rectangle(self.x1 + data.width / data.columns / 3,\n self.y1,\n self.x2 - data.width / data.columns / 3,\n self.y2,\n fill=self.fill, width=0)\n canvas.create_line(self.x1 + data.width / data.columns / 3, self.y1,\n self.x2 - data.width / data.columns / 3, self.y1,\n fill=self.line_fill)\n canvas.create_line(self.x1 + data.width / data.columns / 3, self.y2,\n self.x2 - data.width / data.columns / 3, self.y2,\n fill=self.line_fill)\n canvas.create_text(self.x1 + data.width / data.columns * 0.75, self.cy,\n text=truncate(self.title,\n data.width / data.columns * 4),\n fill=self.text_fill, anchor=W, font='Proxima 10')\n canvas.create_text(data.width * 11 / data.columns, self.cy,\n text=truncate(self.album_name,\n data.width / data.columns * (4)),\n fill=self.text_fill, anchor=W, font='Proxima 10')\n canvas.create_text(data.width * 15 / data.columns, self.cy,\n text=self.duration, fill=gray, anchor=W,\n font='Proxima 10')\n if (self.song_id not in data.stage_ids):\n canvas.create_text(self.x1 + data.width / data.columns / 2, self.cy,\n text='+', fill=self.plus_fill,\n font=('Proxima %d bold' % (self.symbol_size,)))\n else:\n canvas.create_text(self.x1 + data.width / data.columns / 2, self.cy,\n text='-', fill=self.minus_fill,\n font=('Proxima %d bold' % (self.symbol_size,)))\n\n\n################################################################################\n\n# OOP\n# Stores and displays the staging playlist\n\n\nclass StageSong(Song):\n def __init__(self, data, row, song_id):\n super().__init__(data, row, song_id)\n self.width_scale = 1 / data.columns * 3\n self.height_scale = 1 / data.rows * 2 / 3\n self.cx_scale = 1 / data.columns * 18\n self.cy_scale = self.height_scale * (row + 6.5 / 2)\n self.fill = black_background\n self.color = black_background\n self.color_hover = gray_hover\n self.text_color = white\n self.text_color_hover = white\n self.row = row\n self.song_id = song_id\n self.title = data.songs[self.song_id]['title']\n self.artist_name = data.songs[self.song_id]['artist_name']\n self.update_dimensions(data)\n\n def is_selected(self, data):\n return (self.song_id in data.selected_ids)\n\n def press(self, data):\n if (data.left_pressed_x <= self.x1 + 20):\n remove_selected_songs_from_stage(data)\n elif (self.song_id not in data.selected_ids):\n data.selected_ids.add(self.song_id)\n elif (data.ctrl):\n data.selected_ids.remove(self.song_id)\n refresh_view(data)\n\n def unpress(self, data):\n if (self.song_id in data.selected_ids):\n data.selected_ids.remove(self.song_id)\n refresh_view(data)\n\n def draw(self, canvas, data):\n if (self.row < 40):\n self.update_dimensions(data)\n if (self.is_selected(data)):\n self.fill = gray_select\n canvas.create_rectangle(self.x1, self.y1, self.x2, self.y2,\n fill=self.fill, width=0)\n canvas.create_text(self.x1 + 15, self.cy, text=truncate(\n ('%d. %s | %s') % (self.row + data.start_stage, self.title,\n self.artist_name), self.width),\n fill=self.text_fill,\n font='Proxima 9', anchor=W)\n canvas.create_text(self.x1 + 5, self.cy, text='-',\n fill=self.minus_fill,\n font=('Proxima %d bold' % (self.symbol_size,)))\n\n\n################################################################################\n\n# SIMPLIFY\n# MODEL\n\n# Creates Buttons/Sidebars/Headers for 'simplify'\n# OOP to easily loop through many similar data types/values\n\n\ndef simplify_buttons(data):\n del data.buttons[:]\n data.buttons += [\n Button(data, '+ ADD TO PLAYLIST +', 18 / data.columns, 1 / data.rows,\n add_selected_songs_to_stage, 3 / data.columns, .95 / data.rows,\n green, green_hover),\n Button(data, '- REMOVE FROM PLAYLIST -', 18 / data.columns,\n 2 / data.rows,\n remove_selected_songs_from_stage, 3 / data.columns,\n .95 / data.rows,\n red, red_hover),\n Button(data, 'CREATE & ANALYZE PLAYLISTS >>', 18 / data.columns,\n 29 / data.rows, analyze_init, 3 / data.columns, .95 / data.rows,\n blue,\n blue_hover),\n Sidebar(data, 'YOUR TOP SONGS', 0 - data.start_list, data.top_songs),\n Sidebar(data, 'Short Term Top Songs', 1 - data.start_list,\n data.songs_top['short_term']),\n Sidebar(data, 'Medium Term Top Songs', 2 - data.start_list,\n data.songs_top['medium_term']),\n Sidebar(data, 'Long Term Top Songs', 3 - data.start_list,\n data.songs_top['long_term']),\n ArtistSidebar(data, 'YOUR TOP ARTISTS', 5 - data.start_list,\n data.top_artists),\n ArtistSidebar(data, 'Short Term Top Artists', 6 - data.start_list,\n data.artists_top['short_term']),\n ArtistSidebar(data, 'Medium Term Top Artists', 7 - data.start_list,\n data.artists_top['medium_term']),\n ArtistSidebar(data, 'Long Term Top Artists', 8 - data.start_list,\n data.artists_top['long_term']),\n Sidebar(data, 'YOUR PLAYLISTS', 10 - data.start_list, data.stage_ids)\n ]\n for offset, playlist_id in enumerate(data.playlists):\n data.buttons += [\n Sidebar(data, truncate(data.playlists[playlist_id]['name'],\n data.width / data.columns * 3),\n 11 + offset - data.start_list,\n data.playlists[playlist_id]['songs'])\n ]\n if (data.is_songs):\n data.buttons += [\n SongHeader(data, truncate('TITLE', data.width / data.columns * 3),\n 0,\n order_songs_by_title),\n SongHeader(data, truncate('ARTIST', data.width / data.columns * 3),\n 1,\n order_songs_by_artist),\n SongHeader(data, truncate('ALBUM', data.width / data.columns * 3),\n 2,\n order_songs_by_album),\n SongHeader(data, truncate('TIME', data.width / data.columns * 3), 3,\n order_songs_by_time)\n ]\n elif (data.is_artists):\n data.buttons += [\n ArtistHeader(data,\n truncate('ARTIST', data.width / data.columns * 3),\n 0, order_artists_by_name),\n ArtistHeader(data, truncate('TITLE', data.width / data.columns * 3),\n 1, order_artists_by_title),\n ArtistHeader(data, truncate('ALBUM', data.width / data.columns * 3),\n 2, order_artists_by_album),\n ArtistHeader(data, truncate('TIME', data.width / data.columns * 3),\n 3, order_artists_by_time)\n ]\n if (len(data.stage_ids) > 39):\n data.buttons += [\n Button(data, '/\\\\', 19.75 / data.columns, 3 / data.rows,\n stage_to_top,\n text_color=gray, text_color_hover=white),\n Button(data, '\\\\/', 19.75 / data.columns, 4 / data.rows,\n stage_to_bottom, text_color=gray, text_color_hover=white),\n ]\n\n\n################################################################################\n\n# SIMPLIFY\n# MODEL\n\n# Every time the view is changed (scrolling, new playlist, etc.)\n# This function updates the view without desctructively modifying the source\n\n\ndef refresh_view(data):\n if (data.mode == 'analyze'):\n analyze_view(data)\n return\n del data.stage_songs[:]\n del data.viewing_songs[:]\n del data.viewing_artists[:]\n del data.viewing_artist_songs[:]\n for row, song_id in enumerate(data.stage_ids[data.start_stage:]):\n data.stage_songs += [StageSong(data, row + 1, song_id)]\n if (data.is_songs):\n for row, song_id in enumerate(data.song_ids[data.start_song:]):\n data.viewing_songs += [Song(data, row + 1, song_id)]\n elif (data.is_artists):\n for row, artist_id in enumerate(data.artist_ids[data.start_artist:]):\n data.viewing_artists += [Artist(data, row + 1, artist_id)]\n for row, song_id in enumerate(data.artist_song_ids):\n data.viewing_artist_songs += [ArtistSong(data, row + 1, song_id)]\n\n\n################################################################################\n\n# SIMPLIFY\n\n# CONTROLLER\n# SORTING\n\n# Following functions sort songs/artists by given parameters\n# Takes case accents/not normal lettering into account\n\n\ndef order_songs_by_title(data):\n if (data.sort_mode == 0):\n del data.original_ids[:]\n data.original_ids += data.song_ids\n data.sort_mode = 1\n elif (data.sort_mode == 1):\n data.sort_mode = 2\n elif (data.sort_mode == 2):\n data.sort_mode = 0\n else:\n data.sort_mode = 1\n result = list()\n if (data.sort_mode == 0):\n result += data.original_ids\n elif (data.sort_mode == 1):\n sorting_dictionary = dict()\n for song_id in data.song_ids:\n name = sort_format(data.songs[song_id]['title'])\n if (name in sorting_dictionary):\n sorting_dictionary[name] += [song_id]\n else:\n sorting_dictionary[name] = [song_id]\n for name in sorted(sorting_dictionary.keys()):\n result += sorted(sorting_dictionary[name])\n elif (data.sort_mode == 2):\n result += data.song_ids[::-1]\n del data.song_ids[:]\n data.song_ids += result\n data.start_song = 0\n refresh_view(data)\n\n\ndef order_songs_by_artist(data):\n if (data.sort_mode == 0):\n del data.original_ids[:]\n data.original_ids += data.song_ids\n data.sort_mode = 3\n elif (data.sort_mode == 3):\n data.sort_mode = 4\n elif (data.sort_mode == 4):\n data.sort_mode = 0\n else:\n data.sort_mode = 3\n result = list()\n if (data.sort_mode == 0):\n result += data.original_ids\n elif (data.sort_mode == 3):\n sorting_dictionary = dict()\n for song_id in data.song_ids:\n artist_name = sort_format(data.songs[song_id]['artist_name'])\n if (artist_name in sorting_dictionary):\n sorting_dictionary[artist_name] += [song_id]\n else:\n sorting_dictionary[artist_name] = [song_id]\n for artist_name in sorted(sorting_dictionary.keys()):\n result += sorted(sorting_dictionary[artist_name])\n elif (data.sort_mode == 4):\n result += data.song_ids[::-1]\n del data.song_ids[:]\n data.song_ids += result\n data.start_song = 0\n refresh_view(data)\n\n\ndef order_songs_by_album(data):\n if (data.sort_mode == 0):\n del data.original_ids[:]\n data.original_ids += data.song_ids\n data.sort_mode = 5\n elif (data.sort_mode == 5):\n data.sort_mode = 6\n elif (data.sort_mode == 6):\n data.sort_mode = 0\n else:\n data.sort_mode = 5\n result = list()\n if (data.sort_mode == 0):\n result += data.original_ids\n elif (data.sort_mode == 5):\n sorting_dictionary = dict()\n for song_id in data.song_ids:\n album_name = sort_format(data.songs[song_id]['album_name'])\n if (album_name in sorting_dictionary):\n sorting_dictionary[album_name] += [song_id]\n else:\n sorting_dictionary[album_name] = [song_id]\n for album_name in sorted(sorting_dictionary.keys()):\n result += sorted(sorting_dictionary[album_name])\n elif (data.sort_mode == 6):\n result += data.song_ids[::-1]\n del data.song_ids[:]\n data.song_ids += result\n data.start_song = 0\n refresh_view(data)\n\n\ndef order_songs_by_time(data):\n if (data.sort_mode == 0):\n del data.original_ids[:]\n data.original_ids += data.song_ids\n data.sort_mode = 7\n elif (data.sort_mode == 7):\n data.sort_mode = 8\n elif (data.sort_mode == 8):\n data.sort_mode = 0\n else:\n data.sort_mode = 7\n result = list()\n if (data.sort_mode == 0):\n result += data.original_ids\n elif (data.sort_mode == 7):\n sorting_dictionary = dict()\n for song_id in data.song_ids:\n duration_ms = data.songs[song_id]['duration_ms']\n if (duration_ms in sorting_dictionary):\n sorting_dictionary[duration_ms] += [song_id]\n else:\n sorting_dictionary[duration_ms] = [song_id]\n for duration_ms in sorted(sorting_dictionary.keys()):\n result += sorted(sorting_dictionary[duration_ms])\n elif (data.sort_mode == 8):\n result += data.song_ids[::-1]\n del data.song_ids[:]\n data.song_ids += result\n data.start_song = 0\n refresh_view(data)\n\n\ndef order_artists_by_name(data):\n if (data.sort_mode == 0):\n del data.original_ids[:]\n data.original_ids += data.artist_ids\n data.sort_mode = 1\n elif (data.sort_mode == 1):\n data.sort_mode = 2\n elif (data.sort_mode == 2):\n data.sort_mode = 0\n else:\n data.sort_mode = 1\n result = list()\n if (data.sort_mode == 0):\n result += data.original_ids\n elif (data.sort_mode == 1):\n sorting_dictionary = dict()\n for artist_id in data.artist_ids:\n artist_name = sort_format(data.artists[artist_id]['name'])\n if (artist_name in sorting_dictionary):\n sorting_dictionary[artist_name] += [artist_id]\n else:\n sorting_dictionary[artist_name] = [artist_id]\n for artist_name in sorted(sorting_dictionary.keys()):\n result += sorted(sorting_dictionary[artist_name])\n elif (data.sort_mode == 2):\n result += data.artist_ids[::-1]\n del data.artist_ids[:]\n data.artist_ids += result\n data.start_artist = 0\n refresh_view(data)\n\n\ndef order_artists_by_title(data):\n if (data.sort_mode == 0):\n del data.original_ids[:]\n data.original_ids += data.artist_song_ids\n data.sort_mode = 3\n elif (data.sort_mode == 3):\n data.sort_mode = 4\n elif (data.sort_mode == 4):\n data.sort_mode = 0\n else:\n data.sort_mode = 3\n result = list()\n if (data.sort_mode == 0):\n result += data.original_ids\n elif (data.sort_mode == 3):\n sorting_dictionary = dict()\n for song_id in data.artist_song_ids:\n name = sort_format(data.songs[song_id]['title'])\n if (name in sorting_dictionary):\n sorting_dictionary[name] += [song_id]\n else:\n sorting_dictionary[name] = [song_id]\n for name in sorted(sorting_dictionary.keys()):\n result += sorted(sorting_dictionary[name])\n elif (data.sort_mode == 4):\n result += data.artist_song_ids[::-1]\n del data.artist_song_ids[:]\n data.artist_song_ids += result\n data.start_artist = 0\n refresh_view(data)\n\n\ndef order_artists_by_album(data):\n if (data.sort_mode == 0):\n del data.original_ids[:]\n data.original_ids += data.artist_song_ids\n data.sort_mode = 5\n elif (data.sort_mode == 5):\n data.sort_mode = 6\n elif (data.sort_mode == 6):\n data.sort_mode = 0\n else:\n data.sort_mode = 5\n result = list()\n if (data.sort_mode == 0):\n result += data.original_ids\n elif (data.sort_mode == 5):\n sorting_dictionary = dict()\n for song_id in data.artist_song_ids:\n album_name = sort_format(data.songs[song_id]['album_name'])\n if (album_name in sorting_dictionary):\n sorting_dictionary[album_name] += [song_id]\n else:\n sorting_dictionary[album_name] = [song_id]\n for album_name in sorted(sorting_dictionary.keys()):\n result += sorted(sorting_dictionary[album_name])\n elif (data.sort_mode == 6):\n result += data.artist_song_ids[::-1]\n del data.artist_song_ids[:]\n data.artist_song_ids += result\n data.start_artist = 0\n refresh_view(data)\n\n\ndef order_artists_by_time(data):\n if (data.sort_mode == 0):\n del data.original_ids[:]\n data.original_ids += data.artist_song_ids\n data.sort_mode = 7\n elif (data.sort_mode == 7):\n data.sort_mode = 8\n elif (data.sort_mode == 8):\n data.sort_mode = 0\n else:\n data.sort_mode = 7\n result = list()\n if (data.sort_mode == 0):\n result += data.original_ids\n elif (data.sort_mode == 7):\n sorting_dictionary = dict()\n for song_id in data.artist_song_ids:\n duration_ms = data.songs[song_id]['duration_ms']\n if (duration_ms in sorting_dictionary):\n sorting_dictionary[duration_ms] += [song_id]\n else:\n sorting_dictionary[duration_ms] = [song_id]\n for duration_ms in sorted(sorting_dictionary.keys()):\n result += sorted(sorting_dictionary[duration_ms])\n elif (data.sort_mode == 8):\n result += data.artist_song_ids[::-1]\n del data.artist_song_ids[:]\n data.artist_song_ids += result\n data.start_artist = 0\n refresh_view(data)\n\n\n################################################################################\n\n# SIMPLIFY\n\n# CONTROLLER\n# Playlist/staging functions for buttons and stage\n\n\ndef add_selected_songs_to_stage(data):\n for song_id in copy.deepcopy(list(data.selected_ids)):\n if (song_id not in data.stage_ids):\n data.stage_ids += [song_id]\n refresh_view(data)\n simplify_buttons(data)\n\n\ndef remove_selected_songs_from_stage(data):\n data.start_stage = 0\n for song_id in copy.deepcopy(list(data.selected_ids)):\n if (song_id in data.stage_ids):\n data.stage_ids.remove(song_id)\n refresh_view(data)\n simplify_buttons(data)\n\n\ndef stage_to_top(data):\n data.start_stage = 0\n refresh_view(data)\n\n\ndef stage_to_bottom(data):\n if (len(data.stage_ids) > 39):\n data.start_stage = len(data.stage_ids) - 39\n refresh_view(data)\n\n\n################################################################################\n\n# SIMPLIFY\n\n# CONTROLLER\n# EVENTS\n\n# MOUSE\n\n\ndef simplify_mouse_moved(data):\n for song in data.viewing_songs:\n if (song.is_within_bounds(data, data.mouse_moved_x,\n data.mouse_moved_y)):\n song.hover()\n else:\n song.unhover()\n\n for button in data.buttons:\n if (button.is_within_bounds(data, data.mouse_moved_x,\n data.mouse_moved_y)):\n button.hover()\n else:\n button.unhover()\n\n for song in data.stage_songs:\n if (song.is_within_bounds(data, data.mouse_moved_x,\n data.mouse_moved_y)):\n song.hover()\n else:\n song.unhover()\n\n for artist in data.viewing_artists:\n if (artist.is_within_bounds(data, data.mouse_moved_x,\n data.mouse_moved_y)):\n artist.hover()\n else:\n artist.unhover()\n\n for song in data.viewing_artist_songs:\n if (song.is_within_bounds(data, data.mouse_moved_x,\n data.mouse_moved_y)):\n song.hover()\n else:\n song.unhover()\n\n\n################################################################################\n\n# SIMPLIFY\n# CONTROLLER\n# EVENTS \n\n# LEFT\n# PRESSED \n\n\ndef simplify_left_pressed(data):\n for button in data.buttons:\n if (button.is_within_bounds(data, data.left_pressed_x,\n data.left_pressed_y)):\n button.press(data)\n else:\n button.unpress(data)\n\n for i, song in enumerate(data.viewing_songs):\n if (song.is_within_bounds(data, data.left_pressed_x,\n data.left_pressed_y)):\n if (data.shift):\n data.selected_ids.add(song.get_id())\n for j, song_j in enumerate(data.viewing_songs):\n if (i == j):\n continue\n elif (song_j.is_selected(data)):\n data.selected_ids.clear()\n if (j < i):\n for k in range(j, i + 1):\n data.selected_ids.add(\n data.viewing_songs[k].get_id()\n )\n elif (j > i):\n for k in range(i, j + 1):\n data.selected_ids.add(\n data.viewing_songs[k].get_id()\n )\n break\n else:\n song.press(data)\n elif (not (data.ctrl or data.shift)):\n song.unpress(data)\n\n for i, song in enumerate(data.stage_songs):\n if (song.is_within_bounds(data, data.left_pressed_x,\n data.left_pressed_y)):\n if (data.shift):\n data.selected_ids.add(song.get_id())\n for j, song_j in enumerate(data.stage_songs):\n if (i == j):\n continue\n elif (song_j.is_selected(data)):\n data.selected_ids.clear()\n if (j < i):\n for k in range(j, i + 1):\n data.selected_ids.add(\n data.stage_songs[k].get_id()\n )\n elif (j > i):\n for k in range(i, j + 1):\n data.selected_ids.add(\n data.stage_songs[k].get_id()\n )\n break\n else:\n song.press(data)\n elif (not (data.ctrl or data.shift)):\n song.unpress(data)\n\n for i, artist in enumerate(data.viewing_artists):\n if (artist.is_within_bounds(data, data.left_pressed_x,\n data.left_pressed_y)):\n if (data.shift):\n artist.press(data)\n for j, artist_j in enumerate(data.viewing_artists):\n if (i == j):\n continue\n elif (artist_j.is_selected(data)):\n data.selected_artists.clear()\n if (j < i):\n for k in range(j, i + 1):\n data.viewing_artists[k].press(data)\n elif (j > i):\n for k in range(i, j + 1):\n data.viewing_artists[k].press(data)\n break\n else:\n artist.press(data)\n elif ((data.width / data.columns * 3 < data.left_pressed_x\n < data.width / data.columns * 6) and (\n not (data.ctrl or data.shift))):\n artist.unpress(data)\n\n for i, song in enumerate(data.viewing_artist_songs):\n if (song.is_within_bounds(data, data.left_pressed_x,\n data.left_pressed_y)):\n if (data.shift):\n data.selected_ids.add(song.get_id())\n for j, song_j in enumerate(data.viewing_artist_songs):\n if (i == j):\n continue\n elif (song_j.is_selected(data)):\n data.selected_ids.clear()\n if (j < i):\n for k in range(j, i + 1):\n data.selected_ids.add(\n data.viewing_artist_songs[k].get_id()\n )\n elif (j > i):\n for k in range(i, j + 1):\n data.selected_ids.add(\n data.viewing_artist_songs[k].get_id()\n )\n break\n else:\n song.press(data)\n elif (not (data.ctrl or data.shift)):\n song.unpress(data)\n\n\n################################################################################\n\n# SIMPLIFY\n# CONTROLLER\n# EVENTS \n\n# LEFT\n# RELEASED\n\n\ndef simplify_left_released(data):\n if (data.left_released_x >= data.width / data.columns * 16):\n add_selected_songs_to_stage(data)\n refresh_view(data)\n\n\n################################################################################\n\n# SIMPLIFY\n# CONTROLLER\n# EVENTS \n\n# KEY\n\n\ndef simplify_key_pressed(data):\n if (data.mouse_moved_x < (data.width / data.columns * 3)):\n lists_key_pressed(data)\n elif (data.mouse_moved_x > (data.width / data.columns * 16)):\n stage_key_pressed(data)\n elif (data.is_songs):\n songs_key_pressed(data)\n elif (data.is_artists):\n if (data.mouse_moved_x < (data.width / data.columns * 7)):\n artists_key_pressed(data)\n elif (data.mouse_moved_x > (data.width / data.columns * 7)):\n artist_songs_key_pressed(data)\n\n\ndef lists_key_pressed(data):\n if (data.keysym_pressed == 'Escape'):\n lists_escape(data)\n elif (data.keysym_pressed == 'Up'):\n lists_move_up(data)\n elif (data.keysym_pressed == 'Down'):\n lists_move_down(data)\n\n\ndef stage_key_pressed(data):\n if (data.keysym_pressed == 'Escape'):\n stage_escape(data)\n elif ((data.keysym_pressed == 'a') and data.ctrl):\n stage_select_all(data)\n elif (data.keysym_pressed == 'Up'):\n if (data.shift):\n stage_shift_up(data)\n else:\n stage_move_up(data)\n elif (data.keysym_pressed == 'Down'):\n if (data.shift):\n stage_shift_down(data)\n else:\n stage_move_down(data)\n\n\ndef songs_key_pressed(data):\n if (data.keysym_pressed == 'Escape'):\n songs_escape(data)\n elif ((data.keysym_pressed == 'a') and data.ctrl):\n songs_select_all(data)\n elif (data.keysym_pressed == 'Up'):\n if (data.shift):\n songs_shift_up(data)\n else:\n songs_move_up(data)\n elif (data.keysym_pressed == 'Down'):\n if (data.shift):\n songs_shift_down(data)\n else:\n songs_move_down(data)\n\n\ndef artists_key_pressed(data):\n if (data.keysym_pressed == 'Escape'):\n artists_escape(data)\n elif ((data.keysym_pressed == 'a') and data.ctrl):\n artists_select_all(data)\n elif (data.keysym_pressed == 'Up'):\n if (data.shift):\n artists_shift_up(data)\n else:\n artists_move_up(data)\n elif (data.keysym_pressed == 'Down'):\n if (data.shift):\n artists_shift_down(data)\n else:\n artists_move_down(data)\n\n\ndef artist_songs_key_pressed(data):\n if (data.keysym_pressed == 'Escape'):\n artist_songs_escape(data)\n elif ((data.keysym_pressed == 'a') and data.ctrl):\n artist_songs_select_all(data)\n elif (data.keysym_pressed == 'Up'):\n if (data.shift):\n artist_songs_shift_up(data)\n else:\n artist_songs_move_up(data)\n elif (data.keysym_pressed == 'Down'):\n if (data.shift):\n artist_songs_shift_down(data)\n else:\n artist_songs_move_down(data)\n\n\ndef lists_escape(data):\n data.selected_list = ''\n data.is_songs = False\n data.is_artists = False\n del data.song_ids[:]\n del data.artist_ids[:]\n del data.artist_song_ids[:]\n refresh_view(data)\n simplify_buttons(data)\n\n\ndef lists_move_up(data):\n sidebar_buttons = list()\n for button in data.buttons:\n if (isinstance(button, Sidebar)):\n sidebar_buttons += [button]\n for i, sidebar_button in enumerate(sidebar_buttons):\n if (data.ctrl):\n data.start_list = 0\n sidebar_button[0].press(data)\n return\n elif ((i > 0) and sidebar_button.is_selected(data)):\n if ((sidebar_button.get_row() == 0) and (data.start_list > 0)):\n data.start_list -= 1\n sidebar_buttons[i - 1].press(data)\n break\n\n\ndef lists_move_down(data):\n sidebar_buttons = list()\n for button in data.buttons:\n if (isinstance(button, Sidebar)):\n sidebar_buttons += [button]\n for i, sidebar_button in enumerate(sidebar_buttons):\n if (data.ctrl):\n sidebar_button[-1].press(data)\n return\n if ((i < (len(sidebar_buttons) - 1)) and sidebar_button.is_selected(\n data)):\n if (sidebar_button.get_row() == (data.rows - 1)):\n data.start_list += 1\n sidebar_buttons[i + 1].press(data)\n break\n\n\ndef stage_escape(data):\n data.selected_ids.clear()\n refresh_view(data)\n\n\ndef stage_select_all(data):\n data.selected_ids.update(set(copy.deepcopy(data.stage_ids)))\n refresh_view(data)\n\n\ndef stage_shift_up(data):\n for i, song in enumerate(data.stage_songs):\n if song.is_selected(data):\n if ((i == 0) and (data.start_stage == 0)):\n continue\n elif ((i == 0) and (data.start_stage > 0)):\n data.start_stage -= 1\n refresh_view(data)\n data.selected_ids.add(data.stage_songs[i].get_id())\n refresh_view(data)\n break\n else:\n data.selected_ids.add(data.stage_songs[i - 1].get_id())\n refresh_view(data)\n break\n\n\ndef stage_move_up(data):\n if (data.ctrl):\n data.start_stage = 0\n data.selected_ids.clear()\n data.selected_ids.add(data.stage_ids[0])\n refresh_view(data)\n return\n for i, song in enumerate(data.stage_songs):\n if ((i == 0) and (data.start_stage == 0)):\n continue\n elif song.is_selected(data):\n if ((i == 0) and (data.start_stage > 0)):\n data.selected_ids.clear()\n data.start_stage -= 1\n refresh_view(data)\n data.selected_ids.add(data.stage_songs[i].get_id())\n refresh_view(data)\n return\n elif (i > 0):\n data.selected_ids.clear()\n data.selected_ids.add(data.stage_songs[i - 1].get_id())\n refresh_view(data)\n return\n if (data.start_stage > 0):\n data.start_stage -= 1\n refresh_view(data)\n return\n\n\ndef stage_shift_down(data):\n for i, song in enumerate(data.stage_songs[::-1]):\n index = len(data.stage_songs) - i - 1\n if ((0 < song.get_row() < 40) and song.is_selected(data)):\n if (index == len(data.stage_songs) - 1):\n return\n elif (song.get_row() == 39):\n if (index < len(data.stage_songs) - 1):\n data.start_stage += 1\n refresh_view(data)\n data.selected_ids.add(data.stage_songs[index].get_id())\n refresh_view(data)\n return\n else:\n data.selected_ids.add(data.stage_songs[index + 1].get_id())\n refresh_view(data)\n return\n\n\ndef stage_move_down(data):\n if (data.ctrl):\n data.start_stage = len(data.stage_ids) - 39\n data.selected_ids.clear()\n data.selected_ids.add(data.stage_ids[-1])\n refresh_view(data)\n return\n for i, song in enumerate(data.stage_songs[::-1]):\n index = len(data.stage_songs) - i - 1\n if ((0 < song.get_row() < 40) and song.is_selected(data)):\n if ((song.get_row() == 39) and (index < len(data.stage_songs) - 1)):\n data.selected_ids.clear()\n data.start_stage += 1\n refresh_view(data)\n data.selected_ids.add(data.stage_songs[index].get_id())\n refresh_view(data)\n return\n elif (\n (song.get_row() < 39) and (\n index < len(data.stage_songs) - 1)):\n data.selected_ids.clear()\n data.selected_ids.add(data.stage_songs[index + 1].get_id())\n refresh_view(data)\n return\n if (data.start_stage + 39 == len(data.stage_ids)):\n return\n else:\n data.start_stage += 1\n refresh_view(data)\n return\n\n\ndef songs_escape(data):\n data.selected_ids.clear()\n refresh_view(data)\n\n\ndef songs_select_all(data):\n data.selected_ids.update(set(data.song_ids))\n refresh_view(data)\n\n\ndef songs_shift_up(data):\n for i, song in enumerate(data.viewing_songs):\n if song.is_selected(data):\n if ((i == 0) and (data.start_song == 0)):\n continue\n elif ((i == 0) and (data.start_song > 0)):\n data.start_song -= 1\n refresh_view(data)\n data.selected_ids.add(data.viewing_songs[i].get_id())\n refresh_view(data)\n break\n else:\n data.selected_ids.add(data.viewing_songs[i - 1].get_id())\n refresh_view(data)\n break\n\n\ndef songs_move_up(data):\n if (data.ctrl):\n data.start_song = 0\n data.selected_ids.clear()\n data.selected_ids.add(data.song_ids[0])\n refresh_view(data)\n return\n for i, song in enumerate(data.viewing_songs):\n if ((i == 0) and (data.start_song == 0)):\n continue\n elif song.is_selected(data):\n if ((i == 0) and (data.start_song > 0)):\n data.selected_ids.clear()\n data.start_song -= 1\n refresh_view(data)\n data.selected_ids.add(data.viewing_songs[i].get_id())\n refresh_view(data)\n return\n elif (i > 0):\n data.selected_ids.clear()\n data.selected_ids.add(data.viewing_songs[i - 1].get_id())\n refresh_view(data)\n return\n if (data.start_song > 0):\n data.start_song -= 1\n refresh_view(data)\n return\n\n\ndef songs_shift_down(data):\n for i, song in enumerate(data.viewing_songs[::-1]):\n index = len(data.viewing_songs) - i - 1\n if ((0 < song.get_row() < 30) and song.is_selected(data)):\n if (index == len(data.viewing_songs) - 1):\n return\n elif (song.get_row() == 29):\n if (index < len(data.viewing_songs) - 1):\n data.start_song += 1\n refresh_view(data)\n data.selected_ids.add(data.viewing_songs[index].get_id())\n refresh_view(data)\n return\n else:\n data.selected_ids.add(data.viewing_songs[index + 1].get_id())\n refresh_view(data)\n return\n\n\ndef songs_move_down(data):\n if (data.ctrl):\n data.start_song = len(data.song_ids) - 29\n data.selected_ids.clear()\n data.selected_ids.add(data.song_ids[-1])\n refresh_view(data)\n return\n for i, song in enumerate(data.viewing_songs[::-1]):\n index = len(data.viewing_songs) - i - 1\n if ((0 < song.get_row() < 30) and song.is_selected(data)):\n if ((song.get_row() == 29) and (\n index < len(data.viewing_songs) - 1)):\n data.selected_ids.clear()\n data.start_song += 1\n refresh_view(data)\n data.selected_ids.add(data.viewing_songs[index].get_id())\n refresh_view(data)\n return\n elif ((song.get_row() < 29) and (\n index < len(data.viewing_songs) - 1)):\n data.selected_ids.clear()\n data.selected_ids.add(data.viewing_songs[index + 1].get_id())\n refresh_view(data)\n return\n if (data.start_song + 29 == len(data.song_ids)):\n return\n else:\n data.start_song += 1\n refresh_view(data)\n return\n\n\ndef artists_escape(data):\n data.selected_ids.clear()\n data.selected_artists.clear()\n refresh_view(data)\n\n\ndef artists_select_all(data):\n data.selected_artists.update(set(data.artist_ids))\n for artist_id in data.artist_ids:\n data.selected_ids.updat(set(data.artists[artist_id]['songs']))\n refresh_view(data)\n\n\ndef artists_shift_up(data):\n for i, artist in enumerate(data.viewing_artists):\n if artist.is_selected(data):\n if ((i == 0) and (data.start_artist == 0)):\n continue\n elif ((i == 0) and (data.start_artist > 0)):\n data.start_artist -= 1\n refresh_view(data)\n data.selected_artists.add(data.viewing_artists[i].get_id())\n refresh_view(data)\n break\n else:\n data.selected_artists.add(data.viewing_artists[i - 1].get_id())\n refresh_view(data)\n break\n\n\ndef artists_move_up(data):\n if (data.ctrl):\n data.start_artist = 0\n data.selected_artists.clear()\n data.viewing_artists[0].press(data)\n refresh_view(data)\n return\n for i, artist in enumerate(data.viewing_artists):\n if ((i == 0) and (data.start_artist == 0)):\n continue\n elif artist.is_selected(data):\n if ((i == 0) and (data.start_artist > 0)):\n data.selected_artists.clear()\n data.start_artist -= 1\n refresh_view(data)\n data.viewing_artists[i].press(data)\n refresh_view(data)\n return\n elif (i > 0):\n data.selected_artists.clear()\n data.viewing_artists[i - 1].press(data)\n refresh_view(data)\n return\n if (data.start_artist > 0):\n data.start_artist -= 1\n refresh_view(data)\n return\n\n\ndef artists_shift_down(data):\n for i, song in enumerate(data.viewing_artists[::-1]):\n index = len(data.viewing_artists) - i - 1\n if ((0 < song.get_row() < 30) and song.is_selected(data)):\n if (index == len(data.viewing_artists) - 1):\n return\n elif (song.get_row() == 29):\n if (index < len(data.viewing_artists) - 1):\n data.start_artist += 1\n refresh_view(data)\n data.viewing_artists[index].press(data)\n refresh_view(data)\n return\n else:\n data.viewing_artists[index + 1].press(data)\n refresh_view(data)\n return\n\n\ndef artists_move_down(data):\n if (data.ctrl):\n data.start_artist = len(data.artist_ids) - 29\n data.selected_artists.clear()\n data.viewing_artists[-1].press(data)\n refresh_view(data)\n return\n for i, artist in enumerate(data.viewing_artists[::-1]):\n index = len(data.viewing_artists) - i - 1\n if ((0 < artist.get_row() < 30) and artist.is_selected(data)):\n if ((artist.get_row() == 29) and (\n index < len(data.viewing_artists) - 1)):\n data.selected_artists.clear()\n data.start_artist += 1\n refresh_view(data)\n data.viewing_artists[index].press(data)\n refresh_view(data)\n return\n elif ((artist.get_row() < 29) and (\n index < len(data.viewing_artists) - 1)):\n data.selected_artists.clear()\n data.viewing_artists[index + 1].press(data)\n refresh_view(data)\n return\n if (data.start_artist + 29 == len(data.artist_ids)):\n return\n else:\n data.start_artist += 1\n refresh_view(data)\n return\n\n\ndef artist_songs_escape(data):\n data.selected_ids.clear()\n refresh_view(data)\n\n\ndef artist_songs_select_all(data):\n data.selected_ids.update(set(data.viewing_artist_songs))\n refresh_view(data)\n\n\ndef artist_songs_shift_up(data):\n for i, song in enumerate(data.viewing_artist_songs):\n if song.is_selected(data):\n if ((i == 0) and (data.start_song == 0)):\n continue\n elif ((i == 0) and (data.start_song > 0)):\n data.start_song -= 1\n refresh_view(data)\n data.viewing_artist_songs[i].press(data)\n refresh_view(data)\n break\n else:\n data.viewing_artist_songs[i - 1].press(data)\n refresh_view(data)\n break\n\n\ndef artist_songs_move_up(data):\n if (data.ctrl):\n data.start_song = 0\n data.selected_ids.clear()\n data.selected_ids.add(data.song_ids[0])\n refresh_view(data)\n return\n for i, song in enumerate(data.viewing_artist_songs):\n if ((i == 0) and (data.start_song == 0)):\n continue\n elif song.is_selected(data):\n if ((i == 0) and (data.start_song > 0)):\n data.selected_ids.clear()\n data.start_song -= 1\n refresh_view(data)\n data.viewing_artist_songs[i].press(data)\n refresh_view(data)\n return\n elif (i > 0):\n data.selected_ids.clear()\n data.viewing_artist_songs[i - 1].press(data)\n refresh_view(data)\n return\n if (data.start_song > 0):\n data.start_song -= 1\n refresh_view(data)\n return\n\n\ndef artist_songs_shift_down(data):\n for i, song in enumerate(data.viewing_artist_songs[::-1]):\n index = len(data.viewing_artist_songs) - i - 1\n if ((0 < song.get_row() < 30) and song.is_selected(data)):\n if (index == len(data.viewing_artist_songs) - 1):\n return\n elif (song.get_row() == 29):\n if (index < len(data.viewing_artist_songs) - 1):\n data.start_song += 1\n refresh_view(data)\n data.viewing_artist_songs[index].press(data)\n refresh_view(data)\n return\n else:\n data.viewing_artist_songs[index + 1].press(data)\n refresh_view(data)\n return\n\n\ndef artist_songs_move_down(data):\n if (data.ctrl):\n data.start_song = len(data.song_ids) - 29\n data.selected_ids.clear()\n data.selected_ids.add(data.song_ids[-1])\n refresh_view(data)\n return\n for i, song in enumerate(data.viewing_artist_songs[::-1]):\n index = len(data.viewing_artist_songs) - i - 1\n if ((0 < song.get_row() < 30) and song.is_selected(data)):\n if ((song.get_row() == 29) and (\n index < len(data.viewing_artist_songs) - 1)):\n data.selected_ids.clear()\n data.start_song += 1\n refresh_view(data)\n data.viewing_artist_songs[index].press()\n refresh_view(data)\n return\n elif ((song.get_row() < 29) and (\n index < len(data.viewing_artist_songs) - 1)):\n data.selected_ids.clear()\n data.viewing_artist_songs[index + 1].press()\n refresh_view(data)\n return\n if (data.start_song + 29 == len(data.song_ids)):\n return\n else:\n data.start_song += 1\n refresh_view(data)\n return\n\n\n################################################################################\n\n# SIMPLIFY\n# VIEW\n\n\ndef simplify_redraw_all(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill=black_background, width=0)\n canvas.create_rectangle(data.width / data.columns * 3, 0,\n data.width / data.columns * 16, data.height,\n fill=black_shadow)\n for song in data.viewing_songs:\n song.draw(canvas, data)\n for artist in data.viewing_artists:\n artist.draw(canvas, data)\n for song in data.viewing_artist_songs:\n song.draw(canvas, data)\n for song in data.stage_songs:\n song.draw(canvas, data)\n for button in data.buttons:\n button.draw(canvas, data)\n\n\n################################################################################\n\nclass AnalyzeSong(Song):\n def __init__(self, data, row, song_id):\n super().__init__(data, row, song_id)\n self.parameter = data.parameter\n self.time = self.duration\n\n def draw(self, canvas, data):\n self.update_dimensions(data)\n if (self.is_selected(data)):\n self.fill = gray_select\n self.line_fill = black_shadow\n canvas.create_rectangle(self.x1 + data.width / data.columns / 3,\n self.y1,\n self.x2 - data.width / data.columns / 3,\n self.y2,\n fill=self.fill, width=0)\n canvas.create_line(self.x1 + data.width / data.columns / 3, self.y1,\n self.x2 - data.width / data.columns / 3, self.y1,\n fill=self.line_fill)\n canvas.create_line(self.x1 + data.width / data.columns / 3, self.y2,\n self.x2 - data.width / data.columns / 3, self.y2,\n fill=self.line_fill)\n canvas.create_text(self.x1 + data.width / data.columns * 0.75, self.cy,\n text=truncate(self.title,\n data.width / data.columns * 4),\n fill=self.text_fill, anchor=W, font='Proxima 10')\n canvas.create_text(data.width * 7.75 / data.columns, self.cy,\n text=truncate(self.artist_name,\n data.width / data.columns * (3.25)),\n fill=self.text_fill, anchor=W, font='Proxima 10')\n canvas.create_text(data.width * 11 / data.columns, self.cy,\n text=truncate(self.album_name,\n data.width / data.columns * (4)),\n fill=self.text_fill, anchor=W, font='Proxima 10')\n canvas.create_text(data.width * 15 / data.columns, self.cy,\n text=self.duration, fill=gray, anchor=W,\n font='Proxima 10')\n if (self.song_id not in data.stage_ids):\n canvas.create_text(self.x1 + data.width / data.columns / 2, self.cy,\n text='+', fill=self.plus_fill,\n font=('Proxima %d bold' % (self.symbol_size,)))\n else:\n canvas.create_text(self.x1 + data.width / data.columns / 2, self.cy,\n text='-', fill=self.minus_fill,\n font=('Proxima %d bold' % (self.symbol_size,)))\n\n\n################################################################################\n\n# ANALYZE\n# MODEL\n\ndef analyze_init(data):\n clean(data)\n data.mode = 'analyze'\n analyze_buttons(data)\n\n\ndef analyze_buttons(data):\n del data.buttons[:]\n data.buttons += [\n Button(data, 'DISPLAY HISTOGRAM', 18 / data.columns, 1 / data.rows,\n histogram, 3 / data.columns, .95 / data.rows,\n green, green_hover),\n Button(data, 'DISPLAY DISTRIBUTION PLOT', 18 / data.columns,\n 2 / data.rows,\n distribution_plot, 3 / data.columns,\n .95 / data.rows,\n red, red_hover),\n Button(data, ' << STAGE CUSTOM PLAYLIST', 18 / data.columns,\n 29 / data.rows,\n simplify_init, 3 / data.columns, .95 / data.rows, blue,\n blue_hover),\n Sidebar(data, 'YOUR TOP SONGS', 0 - data.start_list, data.top_songs),\n Sidebar(data, 'Short Term Top Songs', 1 - data.start_list,\n data.songs_top['short_term']),\n Sidebar(data, 'Medium Term Top Songs', 2 - data.start_list,\n data.songs_top['medium_term']),\n Sidebar(data, 'Long Term Top Songs', 3 - data.start_list,\n data.songs_top['long_term']),\n Sidebar(data, 'YOUR CUSTOM PLAYLIST', 5 - data.start_list,\n data.stage_ids)\n ]\n for offset, playlist_id in enumerate(data.playlists):\n data.buttons += [\n Sidebar(data, truncate(data.playlists[playlist_id]['name'],\n data.width / data.columns * 3),\n 6 + offset - data.start_list,\n data.playlists[playlist_id]['songs'])\n ]\n if (data.is_songs):\n data.buttons += [\n SongHeader(data, truncate('TITLE', data.width / data.columns * 3),\n 0,\n order_songs_by_title),\n SongHeader(data, truncate('ARTIST', data.width / data.columns * 3),\n 1,\n order_songs_by_artist),\n SongHeader(data, truncate('ALBUM', data.width / data.columns * 3),\n 2,\n order_songs_by_album),\n SongHeader(data, truncate('TIME', data.width / data.columns * 3), 3,\n order_songs_by_time)\n ]\n\n data.buttons += [\n Button(data, 'acousticness', 18 / data.columns, 4 / data.rows,\n acousticness, 3 / data.columns, .9 / data.rows,\n blue, blue_hover),\n Button(data, 'danceability', 18 / data.columns, 5 / data.rows,\n danceability, 3 / data.columns,\n .9 / data.rows,\n blue, blue_hover),\n Button(data, 'energy', 18 / data.columns, 6 / data.rows,\n energy, 3 / data.columns, .9 / data.rows,\n blue, blue_hover),\n Button(data, 'liveness', 18 / data.columns, 7 / data.rows,\n liveness, 3 / data.columns,\n .9 / data.rows,\n blue, blue_hover),\n Button(data, 'time', 18 / data.columns, 8 / data.rows,\n duration_ms, 3 / data.columns, .9 / data.rows,\n blue, blue_hover),\n Button(data, 'loudness', 18 / data.columns, 9 / data.rows,\n loudness, 3 / data.columns,\n .9 / data.rows,\n blue, blue_hover),\n Button(data, 'popularity', 18 / data.columns, 10 / data.rows,\n popularity, 3 / data.columns, .9 / data.rows,\n blue, blue_hover),\n Button(data, 'speechiness', 18 / data.columns, 11 / data.rows,\n speechiness, 3 / data.columns,\n .9 / data.rows,\n blue, blue_hover),\n Button(data, 'tempo', 18 / data.columns, 12 / data.rows,\n tempo, 3 / data.columns, .9 / data.rows,\n blue, blue_hover),\n Button(data, 'valence', 18 / data.columns, 13 / data.rows,\n valence, 3 / data.columns,\n .9 / data.rows,\n blue, blue_hover),\n Button(data, 'CREATE IN SPOTIFY', 18 / data.columns, 15 / data.rows,\n create_playlist, 3 / data.columns, 1 / data.rows,\n green, green_hover)\n ]\n\n\ndef acousticness(data):\n data.parameter = 'acousticness'\n analyze_view(data)\n\n\ndef danceability(data):\n data.parameter = 'danceability'\n analyze_view(data)\n\n\ndef energy(data):\n data.parameter = 'energy'\n analyze_view(data)\n\n\ndef liveness(data):\n data.parameter = 'liveness'\n analyze_view(data)\n\n\ndef duration_ms(data):\n data.parameter = 'duration_ms'\n analyze_view(data)\n\n\ndef loudness(data):\n data.parameter = 'loudness'\n analyze_view(data)\n\n\ndef popularity(data):\n data.parameter = 'popularity'\n analyze_view(data)\n\n\ndef speechiness(data):\n data.parameter = 'speechiness'\n analyze_view(data)\n\n\ndef tempo(data):\n data.parameter = 'tempo'\n analyze_view(data)\n\n\ndef valence(data):\n data.parameter = 'valence'\n analyze_view(data)\n\n\ndef create_playlist(data):\n try:\n token = util.prompt_for_user_token(data.username, 'user-library-modify')\n sp = spotipy.Spotify(auth=token)\n except:\n prompt_for_user_token(data.username, 'user-library-modify')\n sp = spotipy.Spotify(auth=token)\n sp.trace = False\n playlist = sp.user_playlist_create(data.username, data.selected_list)\n playlist_uri = playlist['uri']\n results = sp.user_playlist_add_tracks(data.username, playlist_uri,\n data.song_ids)\n\n\n################################################################################\n\n\n# github.com/mwaskom/seaborn/blob/master/examples/distplot_options.py\n\ndef distribution_plot(data):\n sns.set(style=\"dark\", palette=\"muted\", color_codes=True)\n rs = np.random.RandomState(10)\n\n # Set up the matplotlib figure\n f, axes = plt.subplots(1, 1, figsize=(7, 7), sharex=True)\n sns.despine(left=True)\n\n # Plot a filled kernel density estimate\n sns.distplot(data.values, hist=False, color=\"g\",\n kde_kws={\"shade\": True})\n\n f.suptitle('%s of %s' % (data.parameter, data.selected_list), fontsize=14,\n fontweight='bold')\n\n plt.setp(axes, yticks=[])\n plt.tight_layout()\n plt.show()\n\n\n# github.com/mwaskom/seaborn/blob/master/examples/distplot_options.py\n\ndef histogram(data):\n sns.set(style=\"white\", palette=\"muted\", color_codes=True)\n rs = np.random.RandomState(10)\n\n # Set up the matplotlib figure\n f, axes = plt.subplots(1, 1, figsize=(7, 7), sharex=True)\n sns.despine(left=True)\n\n # Plot a histogram and kernel density estimate\n sns.distplot(data.values, color=\"g\")\n\n f.suptitle('%s of %s' % (data.parameter, data.selected_list), fontsize=14,\n fontweight='bold')\n\n plt.setp(axes, yticks=[])\n plt.tight_layout()\n plt.show()\n\n\n################################################################################\n\n\ndef analyze_view(data):\n del data.values[:]\n del data.stage_songs[:]\n del data.viewing_songs[:]\n del data.viewing_artists[:]\n del data.viewing_artist_songs[:]\n for row, song_id in enumerate(data.song_ids[data.start_song:]):\n data.viewing_songs += [AnalyzeSong(data, row + 1, song_id)]\n for song_id in data.song_ids:\n data.values += [data.songs[song_id][data.parameter]]\n\n\n################################################################################\n\n# ANALYZE\n# CONTROLLER\n# EVENTS\n\n# MOUSE\n\n\ndef analyze_mouse_moved(data):\n for song in data.viewing_songs:\n if (song.is_within_bounds(data, data.mouse_moved_x,\n data.mouse_moved_y)):\n song.hover()\n else:\n song.unhover()\n\n for button in data.buttons:\n if (button.is_within_bounds(data, data.mouse_moved_x,\n data.mouse_moved_y)):\n button.hover()\n else:\n button.unhover()\n\n\n################################################################################\n\n# ANALYZE\n# CONTROLLER\n# EVENTS\n\n# LEFT\n# PRESSED\n\n\ndef analyze_left_pressed(data):\n for button in data.buttons:\n if (button.is_within_bounds(data, data.left_pressed_x,\n data.left_pressed_y)):\n button.press(data)\n else:\n button.unpress(data)\n\n for i, song in enumerate(data.viewing_songs):\n if (song.is_within_bounds(data, data.left_pressed_x,\n data.left_pressed_y)):\n if (data.shift):\n data.selected_ids.add(song.get_id())\n for j, song_j in enumerate(data.viewing_songs):\n if (i == j):\n continue\n elif (song_j.is_selected(data)):\n data.selected_ids.clear()\n if (j < i):\n for k in range(j, i + 1):\n data.selected_ids.add(\n data.viewing_songs[k].get_id()\n )\n elif (j > i):\n for k in range(i, j + 1):\n data.selected_ids.add(\n data.viewing_songs[k].get_id()\n )\n break\n else:\n song.press(data)\n elif (not (data.ctrl or data.shift)):\n song.unpress(data)\n\n\n################################################################################\n\n# ANALYZE\n# CONTROLLER\n# EVENTS\n\n# KEY\n\n\ndef analyze_key_pressed(data):\n if (data.mouse_moved_x < (data.width / data.columns * 3)):\n lists_key_pressed(data)\n elif (data.mouse_moved_x > (data.width / data.columns * 16)):\n stage_key_pressed(data)\n elif (data.is_songs):\n songs_key_pressed(data)\n\n\ndef lists_key_pressed(data):\n if (data.keysym_pressed == 'Escape'):\n lists_escape(data)\n elif (data.keysym_pressed == 'Up'):\n lists_move_up(data)\n elif (data.keysym_pressed == 'Down'):\n lists_move_down(data)\n\n\ndef stage_key_pressed(data):\n if (data.keysym_pressed == 'Escape'):\n stage_escape(data)\n elif ((data.keysym_pressed == 'a') and data.ctrl):\n stage_select_all(data)\n elif (data.keysym_pressed == 'Up'):\n if (data.shift):\n stage_shift_up(data)\n else:\n stage_move_up(data)\n elif (data.keysym_pressed == 'Down'):\n if (data.shift):\n stage_shift_down(data)\n else:\n stage_move_down(data)\n\n\ndef songs_key_pressed(data):\n if (data.keysym_pressed == 'Escape'):\n songs_escape(data)\n elif ((data.keysym_pressed == 'a') and data.ctrl):\n songs_select_all(data)\n elif (data.keysym_pressed == 'Up'):\n if (data.shift):\n songs_shift_up(data)\n else:\n songs_move_up(data)\n elif (data.keysym_pressed == 'Down'):\n if (data.shift):\n songs_shift_down(data)\n else:\n songs_move_down(data)\n\n\ndef lists_escape(data):\n data.selected_list = ''\n data.is_songs = False\n data.is_artists = False\n del data.song_ids[:]\n del data.artist_ids[:]\n del data.artist_song_ids[:]\n analyze_view(data)\n analyze_buttons(data)\n\n\ndef lists_move_up(data):\n sidebar_buttons = list()\n for button in data.buttons:\n if (isinstance(button, Sidebar)):\n sidebar_buttons += [button]\n for i, sidebar_button in enumerate(sidebar_buttons):\n if (data.ctrl):\n data.start_list = 0\n sidebar_button[0].press(data)\n return\n elif ((i > 0) and sidebar_button.is_selected(data)):\n if ((sidebar_button.get_row() == 0) and (data.start_list > 0)):\n data.start_list -= 1\n sidebar_buttons[i - 1].press(data)\n break\n\n\ndef lists_move_down(data):\n sidebar_buttons = list()\n for button in data.buttons:\n if (isinstance(button, Sidebar)):\n sidebar_buttons += [button]\n for i, sidebar_button in enumerate(sidebar_buttons):\n if (data.ctrl):\n sidebar_button[-1].press(data)\n return\n if ((i < (len(sidebar_buttons) - 1)) and sidebar_button.is_selected(\n data)):\n if (sidebar_button.get_row() == (data.rows - 1)):\n data.start_list += 1\n sidebar_buttons[i + 1].press(data)\n break\n\n\ndef songs_escape(data):\n data.selected_ids.clear()\n analyze_view(data)\n\n\ndef songs_select_all(data):\n data.selected_ids.update(set(data.song_ids))\n analyze_view(data)\n\n\ndef songs_shift_up(data):\n for i, song in enumerate(data.viewing_songs):\n if song.is_selected(data):\n if ((i == 0) and (data.start_song == 0)):\n continue\n elif ((i == 0) and (data.start_song > 0)):\n data.start_song -= 1\n analyze_view(data)\n data.selected_ids.add(data.viewing_songs[i].get_id())\n analyze_view(data)\n break\n else:\n data.selected_ids.add(data.viewing_songs[i - 1].get_id())\n analyze_view(data)\n break\n\n\ndef songs_move_up(data):\n if (data.ctrl):\n data.start_song = 0\n data.selected_ids.clear()\n data.selected_ids.add(data.song_ids[0])\n analyze_view(data)\n return\n for i, song in enumerate(data.viewing_songs):\n if ((i == 0) and (data.start_song == 0)):\n continue\n elif song.is_selected(data):\n if ((i == 0) and (data.start_song > 0)):\n data.selected_ids.clear()\n data.start_song -= 1\n analyze_view(data)\n data.selected_ids.add(data.viewing_songs[i].get_id())\n analyze_view(data)\n return\n elif (i > 0):\n data.selected_ids.clear()\n data.selected_ids.add(data.viewing_songs[i - 1].get_id())\n analyze_view(data)\n return\n if (data.start_song > 0):\n data.start_song -= 1\n analyze_view(data)\n return\n\n\ndef songs_shift_down(data):\n for i, song in enumerate(data.viewing_songs[::-1]):\n index = len(data.viewing_songs) - i - 1\n if ((0 < song.get_row() < 30) and song.is_selected(data)):\n if (index == len(data.viewing_songs) - 1):\n return\n elif (song.get_row() == 29):\n if (index < len(data.viewing_songs) - 1):\n data.start_song += 1\n analyze_view(data)\n data.selected_ids.add(data.viewing_songs[index].get_id())\n analyze_view(data)\n return\n else:\n data.selected_ids.add(data.viewing_songs[index + 1].get_id())\n analyze_view(data)\n return\n\n\ndef songs_move_down(data):\n if (data.ctrl):\n data.start_song = len(data.song_ids) - 29\n data.selected_ids.clear()\n data.selected_ids.add(data.song_ids[-1])\n analyze_view(data)\n return\n for i, song in enumerate(data.viewing_songs[::-1]):\n index = len(data.viewing_songs) - i - 1\n if ((0 < song.get_row() < 30) and song.is_selected(data)):\n if ((song.get_row() == 29) and (\n index < len(data.viewing_songs) - 1)):\n data.selected_ids.clear()\n data.start_song += 1\n analyze_view(data)\n data.selected_ids.add(data.viewing_songs[index].get_id())\n analyze_view(data)\n return\n elif ((song.get_row() < 29) and (\n index < len(data.viewing_songs) - 1)):\n data.selected_ids.clear()\n data.selected_ids.add(data.viewing_songs[index + 1].get_id())\n analyze_view(data)\n return\n if (data.start_song + 29 == len(data.song_ids)):\n return\n else:\n data.start_song += 1\n analyze_view(data)\n return\n\n\n################################################################################\n\n# ANALYZE\n# VIEW\n\n\ndef analyze_redraw_all(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill=black_background, width=0)\n canvas.create_rectangle(data.width / data.columns * 3, 0,\n data.width / data.columns * 16, data.height,\n fill=black_shadow)\n for song in data.viewing_songs:\n song.draw(canvas, data)\n for button in data.buttons:\n button.draw(canvas, data)\n\n\n################################################################################\n\nrun()\n\n################################################################################\n" }, { "alpha_fraction": 0.8054245114326477, "alphanum_fraction": 0.8089622855186462, "avg_line_length": 35.826087951660156, "blob_id": "b9f0baacd7e82b93751a70fb709c1e4f96b9a5a9", "content_id": "8c36f9a7612907751954443a8b4bfa274f414a9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 850, "license_type": "no_license", "max_line_length": 176, "num_lines": 23, "path": "/readme.txt", "repo_name": "stephkananth/sortify", "src_encoding": "UTF-8", "text": "This program's purpose is to sort/organize/analyze a user’s music from Spotify. It takes his or her top songs, top artists, and playlists, compares them, and creates playlists.\n\n\nThe third party libraries are spotipy, seaborn, and matplotlib. These are included in the directory. This program should be run using python 3.\n\n\nOnce the program directory been downloaded, download the dependencies by running the following terminal commands within the sortify directory:\n\npip install requests\npip install spotipy (or go inside the spotipy directory and run: python3 -i setup.py install)\npip install seaborn\n\n\nRun the program using the following terminal command within the sortify directory:\n\npython3 -i sortify.py\n\n\nThe dependencies can also be installed from:\n\ngithub.com/kennethreitz/requests\ngithub.com/plamere/spotipy\ngithub.com/mwaskom/seaborn\n\n" } ]
2
schambers/rotkehlchen
https://github.com/schambers/rotkehlchen
6431747602fb2df86826b856427b2977b4b13c9d
fd25217aad3bc945d1df3e7869da520c62c00c5a
b7d183551dcf723d81b4f1a39bbacbb5cdd6ca20
refs/heads/master
2020-03-31T03:10:22.530962
2018-10-06T11:29:58
2018-10-06T11:29:58
151,855,648
0
0
BSD-3-Clause
2018-10-06T15:50:42
2018-10-06T15:37:07
2018-10-06T14:54:43
null
[ { "alpha_fraction": 0.6306934952735901, "alphanum_fraction": 0.6315141320228577, "avg_line_length": 42.51785659790039, "blob_id": "ceb1e08922cc36ea149c33f5292f1cca96b816c8", "content_id": "4a250f05b87a997f8753af678392f33b1668484f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 4874, "license_type": "permissive", "max_line_length": 116, "num_lines": 112, "path": "/ui/navigation.ts", "repo_name": "schambers/rotkehlchen", "src_encoding": "UTF-8", "text": "import {add_taxreport_listeners, create_taxreport_ui} from './taxreport';\nimport {create_or_reload_dashboard} from './dashboard';\nimport {add_user_settings_listeners, create_user_settings} from './user_settings';\nimport {add_accounting_settings_listeners, create_accounting_settings} from './accounting_settings';\nimport {add_otctrades_listeners, create_otctrades_ui} from './otctrades';\nimport {add_settings_listeners, assert_exchange_exists, create_settings_ui, pages, settings} from './settings';\n\nexport function determine_location(url: string) {\n const split = url.split('#');\n if (split.length === 1 || split[1] === '') {\n return '';\n }\n return split[1];\n}\n\nfunction save_current_location() {\n if (!settings.current_location) {\n return; // we are at the start of the program\n }\n\n if (settings.current_location === 'index') {\n console.log('Saving index ... ');\n pages.page_index = $('#page-wrapper').html();\n } else if (settings.current_location === 'otctrades') {\n console.log('Saving otc trades ... ');\n pages.page_otctrades = $('#page-wrapper').html();\n } else if (settings.current_location === 'settings') {\n console.log('Saving settings ... ');\n pages.settings = $('#page-wrapper').html();\n } else if (settings.current_location.startsWith('exchange_')) {\n const exchange_name = settings.current_location.substring(9);\n assert_exchange_exists(exchange_name);\n console.log('Saving exchange ' + exchange_name);\n pages.page_exchange[exchange_name] = $('#page-wrapper').html();\n } else if (settings.current_location === 'user_settings') {\n console.log('Saving user settings ...');\n pages.page_user_settings = $('#page-wrapper').html();\n } else if (settings.current_location === 'accounting_settings') {\n console.log('Saving accounting settings ...');\n pages.page_accounting_settings = $('#page-wrapper').html();\n } else if (settings.current_location === 'taxreport') {\n console.log('Saving tax report ...');\n pages.page_taxreport = $('#page-wrapper').html();\n } else {\n throw new Error('Invalid link location ' + settings.current_location);\n }\n}\n\nexport function change_location(target: string) {\n save_current_location();\n console.log('Changing location to ' + target);\n settings.current_location = target;\n}\n\nfunction create_or_reload_page(name: string, create_callback: () => void, always_callback: () => void) {\n change_location(name);\n if (!pages['page_' + name]) {\n console.log(`At create/reload ${name} with a null page index`);\n create_callback();\n } else {\n console.log(`At create/reload ${name} with a populated page index`);\n $('#page-wrapper').html(pages['page_' + name] as string);\n }\n always_callback();\n}\n\nexport function init_navigation() {\n $('#side-menu a').click(event => {\n event.preventDefault();\n const target = event.target as HTMLAnchorElement;\n const target_location = determine_location(target.href);\n\n if (target_location === 'otctrades') {\n create_or_reload_page('otctrades', create_otctrades_ui, add_otctrades_listeners);\n } else if (target_location === 'index') {\n create_or_reload_dashboard();\n } else if (target_location === 'taxreport') {\n create_or_reload_page('taxreport', create_taxreport_ui, add_taxreport_listeners);\n }\n // else do nothing -- no link\n });\n\n $('#settingsbutton a').click(event => {\n event.preventDefault();\n const target = event.target as HTMLAnchorElement;\n const target_location = determine_location(target.href);\n if (target_location !== 'settings') {\n throw new Error('Invalid link location ' + target_location);\n }\n create_or_reload_page('settings', create_settings_ui, add_settings_listeners);\n });\n\n $('#user_settings_button a').click(event => {\n event.preventDefault();\n const target = event.target as HTMLAnchorElement;\n const target_location = determine_location(target.href);\n if (target_location !== 'user_settings') {\n throw new Error('Invalid link location ' + target_location);\n }\n create_or_reload_page('user_settings', create_user_settings, add_user_settings_listeners);\n });\n\n $('#accounting_settings_button a').click(event => {\n event.preventDefault();\n const target = event.target as HTMLAnchorElement;\n const target_location = determine_location(target.href);\n if (target_location !== 'accounting_settings') {\n throw new Error('Invalid link location ' + target_location);\n }\n create_or_reload_page('accounting_settings', create_accounting_settings, add_accounting_settings_listeners);\n });\n}\n" }, { "alpha_fraction": 0.5514904856681824, "alphanum_fraction": 0.5608524084091187, "avg_line_length": 29.404495239257812, "blob_id": "d1971f4990e84f8d6364a278302a08bab0509ca3", "content_id": "e02553f3deb1a5160cacd8efc106741419bbfe95", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 8119, "license_type": "permissive", "max_line_length": 130, "num_lines": 267, "path": "/ui/utils.ts", "repo_name": "schambers/rotkehlchen", "src_encoding": "UTF-8", "text": "import {Tail} from 'tail';\nimport * as fs from 'fs';\nimport {settings} from './settings';\nimport {remote} from 'electron';\nimport {service} from './rotkehlchen_service';\nimport Timer = NodeJS.Timer;\n\n// Prompt a directory selection dialog and pass selected directory to callback\n// Callback should be a function which accepts a single argument which will be\n// a list of pathnames. The list should only contain 1 entry.\nexport function prompt_directory_select_async(callback: (directories: string[]) => void) {\n remote.dialog.showOpenDialog({\n title: 'Select a directory',\n properties: ['openDirectory']\n }, callback);\n}\n\n\nexport function utc_now() {\n return Math.floor(Date.now() / 1000);\n}\n\nexport function timestamp_to_date(ts: number) {\n const date = new Date(ts * 1000);\n return (\n ('0' + date.getUTCDate()).slice(-2) + '/' +\n ('0' + (date.getUTCMonth() + 1)).slice(-2) + '/' +\n date.getUTCFullYear() + ' ' +\n ('0' + date.getUTCHours()).slice(-2) + ':' +\n ('0' + date.getUTCMinutes()).slice(-2)\n );\n}\n\nlet log_searcher: Timer;\nlet client_auditor: Timer;\n\n/**\n * This function is called periodically, query some data from the\n * client and update the UI with the response.\n */\nfunction periodic_client_query() {\n // for now only query when was the last time balance data was saved\n service.query_last_balance_save_time().then(value => {\n settings.last_balance_save = value;\n }).catch(reason => {\n console.log('Error at periodic client query' + reason);\n });\n}\n\nfunction _setup_log_watcher(callback: (alert_text: string, alert_time: number) => void) {\n if (log_searcher) {\n if (!fs.existsSync('rotkehlchen.log')) {\n return;\n }\n clearInterval(log_searcher);\n }\n\n const tail = new Tail('rotkehlchen.log');\n const rePattern = new RegExp('.*(WARNING|ERROR):.*:(.*)');\n tail.on('line', (data) => {\n const matches = data.match(rePattern);\n\n if (matches != null) {\n callback(matches[2], new Date().getTime() / 1000);\n console.log(matches[2]);\n }\n });\n\n tail.on('error', function (error) {\n console.error('TAIL ERROR: ', error);\n });\n}\n\n// Show an error with TITLE and CONTENT\n// If CALLBACK is given then it should be a callback\n// to call when close is pressed\nexport function showError(title: string, content?: string, callback?: () => void) {\n if (!callback) {\n callback = () => {\n };\n }\n $.confirm({\n title: title,\n content: content,\n type: 'red',\n typeAnimated: true,\n buttons: {\n close: callback\n }\n });\n}\n\n// Show an Info message with TITLE and CONTENT\nexport function showInfo(title: string, content: string) {\n $.confirm({\n title: title,\n content: content,\n type: 'green',\n typeAnimated: true,\n buttons: {\n close: () => {\n }\n }\n });\n}\n\nexport function showWarning(title: string, content: string) {\n $.confirm({\n title: title,\n content: content,\n type: 'yellow',\n typeAnimated: true,\n buttons: {\n close: () => {\n }\n }\n });\n}\n\n// TODO: Remove this/replace with something else. In the case of a huge log hangs the entire app\nexport function setup_log_watcher(callback: (alert_text: string, alert_time: number) => void) {\n // if the log file is not found keep trying until it is\n if (!fs.existsSync('rotkehlchen.log')) {\n log_searcher = setInterval(function () {\n _setup_log_watcher(callback);\n }, 5000);\n return;\n }\n _setup_log_watcher(callback);\n}\n\nexport function setup_client_auditor() {\n if (!client_auditor) {\n client_auditor = setInterval(periodic_client_query, 60000);\n }\n}\n\n\nexport function reload_table_currency_val(table: DataTables.Api, colnum: number) {\n table.rows().invalidate();\n $(table.column(colnum).header()).text(settings.main_currency.ticker_symbol + ' value');\n table.draw();\n}\n\nexport function reload_table_currency_val_if_existing(table: DataTables.Api, colnum: number) {\n if (table) {\n reload_table_currency_val(table, colnum);\n }\n}\n\nexport function string_capitalize(s: string) {\n return s && s[0].toUpperCase() + s.slice(1);\n}\n\nexport function date_text_to_utc_ts(txt: string) {\n // for now assuming YYY/MM/DD HH:MM\n if (settings.datetime_format !== 'd/m/Y G:i') {\n throw new Error('Invalid datetime format');\n }\n const m = txt.match(/\\d+/g);\n\n if (!m) {\n throw new Error('match failed for ' + txt);\n }\n\n const day = parseInt(m[0], 10);\n const month = parseInt(m[1], 10) - 1;\n const year = parseInt(m[2], 10);\n const hours = parseInt(m[3], 10);\n const seconds = parseInt(m[4], 10);\n return (new Date(Date.UTC(year, month, day, hours, seconds))).getTime() / 1000;\n}\n\ninterface MenuItem {\n readonly name: string;\n readonly icon: string;\n}\n\nexport function dt_edit_drawcallback(\n id: string,\n edit_fn?: ((row: DataTables.RowMethods) => void) | null,\n delete_fn?: ((row: DataTables.RowMethods) => void) | null\n) {\n return () => {\n const ctx_menu_items: { [key: string]: string | MenuItem } = {};\n\n if (edit_fn) {\n ctx_menu_items['edit'] = {name: 'Edit', icon: 'fa-edit'};\n }\n\n if (delete_fn) {\n ctx_menu_items['delete'] = {name: 'Delete', icon: 'fa-trash'};\n }\n ctx_menu_items['sep1'] = '---------';\n ctx_menu_items['quit'] = {name: 'Quit', icon: 'fa-sign-out'};\n\n // idea taken from:\n // https://stackoverflow.com/questions/43161236/how-to-show-edit-and-delete-buttons-on-datatables-when-right-click-to-rows\n $.contextMenu({\n selector: `#${id}_body tr td`,\n callback: (key: string, options: { $trigger: JQuery }) => {\n const tr = options.$trigger.closest('tr');\n const row = $(`#${id}`).DataTable().row(tr);\n console.log(row);\n // TODO: When move to SQL instead of files, simply use the primary key/id to select\n switch (key) {\n case 'delete' :\n if (delete_fn) {\n delete_fn(row);\n }\n break;\n case 'edit' :\n if (edit_fn) {\n edit_fn(row);\n }\n break;\n case 'quit':\n break;\n }\n },\n items: ctx_menu_items\n });\n };\n}\n\n\nexport function unsuggest_element(selector: string) {\n $(selector).pulsate('destroy');\n $(selector).removeAttr('style');\n}\n\nexport function suggest_element(selector: string, state_to_set: string) {\n settings.start_suggestion = state_to_set;\n $(selector).pulsate({\n color: '#e45325', // set the color of the pulse\n reach: 20, // how far the pulse goes in px\n speed: 1000, // how long one pulse takes in ms\n pause: 0, // how long the pause between pulses is in ms\n glow: true, // if the glow should be shown too\n repeat: true, // will repeat forever if true, if given a number will repeat for that many times\n onHover: false // if true only pulsate if user hovers over the element\n });\n}\n\nexport function suggest_element_until_click(selector: string, state_to_set: string) {\n suggest_element(selector, state_to_set);\n $(selector).click(() => {\n unsuggest_element(selector);\n });\n}\n\nexport function format_asset_title_for_ui(asset: string): string {\n let symbol, str;\n if (asset === 'IOTA') {\n symbol = 'MIOTA';\n } else {\n symbol = asset;\n }\n\n const path = settings.ICON_MAP_LIST[symbol.toLowerCase()];\n if (path !== undefined) {\n str = `<img src=\"../${path}\" /> ${asset}`;\n } else {\n str = ` ¤ ${asset}`;\n }\n return str;\n}\n" }, { "alpha_fraction": 0.585300087928772, "alphanum_fraction": 0.5960890054702759, "avg_line_length": 25.48214340209961, "blob_id": "1a2999856207a2ed3726ec5e5cf022105678006a", "content_id": "46d4218cfe55fcec5df6845368077b8157405632", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1483, "license_type": "permissive", "max_line_length": 102, "num_lines": 56, "path": "/ui/tests/spec.ts", "repo_name": "schambers/rotkehlchen", "src_encoding": "UTF-8", "text": "// Nice overview for electron tests with the chai.should model:\n// https://dzone.com/articles/write-automated-tests-for-electron-with-spectron-m\n\nimport {Application} from 'spectron';\n\nimport * as electron from 'electron';\nimport * as path from 'path';\nimport * as chaiAsPromised from 'chai-as-promised';\nimport * as chai from 'chai';\n\nchai.should();\nchai.use(chaiAsPromised);\n\nfunction initialiseSpectron() {\n\n return new Application({\n path: electron as any,\n args: [path.join(__dirname, '../..')],\n env: {\n ELECTRON_ENABLE_LOGGING: true,\n ELECTRON_ENABLE_STACK_DUMPING: true,\n NODE_ENV: 'development'\n },\n startTimeout: 10000,\n chromeDriverLogPath: '../chromedriverlog.txt'\n });\n}\n\ndescribe('Application launch', function () {\n // @ts-ignore\n this.timeout(10000);\n let app: Application;\n\n beforeEach(() => {\n app = initialiseSpectron();\n return app.start();\n });\n\n afterEach(() => {\n if (app && app.isRunning()) {\n return app.stop();\n } else {\n return Promise.reject('app should be running');\n }\n });\n\n it('assert we got 1 window running', () => {\n return app.client.getWindowCount().should.eventually.equal(1);\n });\n\n it('make sure we get the loging popup', () => {\n // @ts-ignore\n return app.client.waitForExist('.jconfirm-box-container', 5000).should.eventually.equal(true);\n });\n\n});\n" }, { "alpha_fraction": 0.7785714268684387, "alphanum_fraction": 0.8142856955528259, "avg_line_length": 9, "blob_id": "4a73df24bc08375767945deff36525ce72acacd9", "content_id": "81a2e147019f301fc145d733c550c12faf8dfe93", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 140, "license_type": "permissive", "max_line_length": 19, "num_lines": 14, "path": "/requirements_dev.txt", "repo_name": "schambers/rotkehlchen", "src_encoding": "UTF-8", "text": "-r requirements.txt\n\npytest\nmypy\nbump2version==0.5.8\npylint\nflake8\nisort\n\n# Documentation\nsphinx\nsphinx-autobuild\nsphinx_rtd_theme\nreleases\n" }, { "alpha_fraction": 0.618277907371521, "alphanum_fraction": 0.6204538345336914, "avg_line_length": 31.494949340820312, "blob_id": "9b14fc996c0a788e3402c911c3350d0aaf0a99c9", "content_id": "bae51a1519da0dca57e3da85cf25fce01677f025", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3217, "license_type": "permissive", "max_line_length": 94, "num_lines": 99, "path": "/rotkehlchen/inquirer.py", "repo_name": "schambers/rotkehlchen", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport logging\nfrom typing import Dict, Iterable, Optional, cast\n\nimport requests\n\nfrom rotkehlchen import typing\nfrom rotkehlchen.constants import FIAT_CURRENCIES, S_DATACOIN, S_IOTA, S_RDN, S_USD\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.logging import RotkehlchenLogsAdapter\nfrom rotkehlchen.utils import query_fiat_pair, retry_calls, rlk_jsonloads\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\n\ndef get_fiat_usd_exchange_rates(\n currencies: Optional[Iterable[typing.FiatAsset]] = None,\n) -> Dict[typing.FiatAsset, FVal]:\n rates = {S_USD: FVal(1)}\n if not currencies:\n currencies = FIAT_CURRENCIES[1:]\n for currency in currencies:\n rates[currency] = query_fiat_pair(S_USD, currency)\n\n return rates\n\n\ndef world_to_cryptocompare(asset):\n # Adjust some ETH tokens to how cryptocompare knows them\n if asset == S_RDN:\n # remove this if cryptocompare changes the symbol\n asset = cast(typing.EthToken, 'RDN*')\n elif asset == S_DATACOIN:\n asset = cast(typing.NonEthTokenBlockchainAsset, 'DATA')\n elif asset == S_IOTA:\n asset = cast(typing.NonEthTokenBlockchainAsset, 'IOT')\n\n return asset\n\n\nclass Inquirer(object):\n def __init__(self, kraken=None): # TODO: Add type after fixing cyclic dependency\n self.kraken = kraken\n self.session = requests.session()\n\n def query_kraken_for_price(\n self,\n asset: typing.Asset,\n asset_btc_price: FVal,\n ) -> FVal:\n if asset == 'BTC':\n return self.kraken.usdprice['BTC']\n return asset_btc_price * self.kraken.usdprice['BTC']\n\n def find_usd_price(\n self,\n asset: typing.Asset,\n asset_btc_price: Optional[FVal] = None,\n ) -> FVal:\n if self.kraken and self.kraken.first_connection_made and asset_btc_price is not None:\n price = self.query_kraken_for_price(asset, asset_btc_price)\n log.debug('Get usd price from kraken', asset=asset, price=price)\n return price\n\n log.debug('Get usd price from cryptocompare', asset=asset)\n asset = world_to_cryptocompare(asset)\n resp = retry_calls(\n 5,\n 'find_usd_price',\n 'requests.get',\n requests.get,\n u'https://min-api.cryptocompare.com/data/price?'\n 'fsym={}&tsyms=USD'.format(asset)\n )\n\n if resp.status_code != 200:\n raise RemoteError('Cant reach cryptocompare to get USD value of {}'.format(asset))\n\n resp = rlk_jsonloads(resp.text)\n\n # If there is an error in the response skip this token\n if 'USD' not in resp:\n error_message = ''\n if resp['Response'] == 'Error':\n error_message = resp['Message']\n\n log.error(\n 'Cryptocompare usd price query failed',\n asset=asset,\n error=error_message,\n )\n return FVal(0)\n\n price = FVal(resp['USD'])\n log.debug('Got usd price from cryptocompare', asset=asset, price=price)\n return price\n" } ]
5
philipptrenz/433MHz-Wireless-Sockets-API
https://github.com/philipptrenz/433MHz-Wireless-Sockets-API
508f09b9b3f06b71e28a4fb78d95e4750bb32f97
b2be40f6d758494e26baf358ebfb9425db02e82b
e91c5f1da2969745adc534dd8351e7d7ea4d1f43
refs/heads/master
2021-01-12T03:27:31.675972
2018-11-01T18:27:08
2018-11-01T18:27:08
78,212,010
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.7004743814468384, "alphanum_fraction": 0.7205782532691956, "avg_line_length": 34.41600036621094, "blob_id": "eb4b8c49a28be29a7330d3c09a35e58ce122fbed", "content_id": "b51a30a9c527b7c8eafd231bced9be566769ef9b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4427, "license_type": "permissive", "max_line_length": 333, "num_lines": 125, "path": "/README.md", "repo_name": "philipptrenz/433MHz-Wireless-Sockets-API", "src_encoding": "UTF-8", "text": "# 433MHz Wireless Sockets API\nThis project provides a RESTlike web API, written in Python, to control cheap remote control sockets based on 433 MHz via a Raspberry Pi. It also provides a web interface for a handy use of the API. All you need is a 433 MHz RF transmitter for a few bucks, a Pi and some minutes to get it running.\n\n## What it is\n\nI searched for an easy to use web API to control 433MHz wireless sockets, like the ones from Elro or Mumbi via WLAN. Previously I used [PowerPi](http://raspberrypiguide.de/howtos/powerpi-raspberry-pi-haussteuerung/) (German, sorry), but I'm more the Python guy and wanted a flexible communication to build my own apps and extensions.\n\nSo this project provides a simple API with a few endpoints to control and bookmark 433MHz wireless sockets via HTTP. To get it a bit more comfortable the project also includes a web interface to turn the sockets on and off. And of course it's responsive ;)\n\n![screenshot 1](/screenshots/screen_1.png?raw=true)\n\nFeatures:\n* Trigger wireless sockets to turn on and off via GET requests\n* Store ('bookmark'), remove and list your devices with name and state via POST requests\n* Simple web interface to control and manage your remote control sockets\n* [MacOS Status Bar App](https://github.com/philipptrenz/433MHz-Wireless-Sockets-MacOS-App)\n\nPlanned features (in this order):\n* Scheduler for time and event based tasks\n* Improve security\n* Code documentation (yeah, sorry ...)\n\n**Feel free to ask, report bugs and improve!**\n\n## Install\n\n### Software\n\n```bash\n# install needed dependencies\nsudo apt-get install git python3 python3-pip\nsudo pip3 install flask tinydb RPi.GPIO\n\n# clone this repo\ngit clone https://github.com/philipptrenz/433MHz-Wireless-Sockets-API\ncd 433MHz-Wireless-Sockets-API\n\n# and start\nsudo python3 433PyApi.py\n```\n\nIf you want to run the script as a service on every boot:\n```bash\n# make the scripts executable\nsudo chmod 755 433PyApi.py\n\n# add the bash script to the service folder\nsudo cp 433PyApi.sh /etc/init.d/433PyApi\nsudo chmod 755 /etc/init.d/433PyApi\nsudo update-rc.d 433PyApi defaults\n\n```\nNow you can start and stop your script via `sudo service 433PyApi start` or `stop` and it automatically starts on boot.\n\n### Hardware\n\nI used [this](http://www.watterott.com/de/RF-Link-Sender-434MHz) transmitter, but also others should work. Connect the transmitter to the Pi like this:\n\n```\n\t ___________\n\t| ___ |\n\t| / \t \\ |\n\t| |\t | |\n\t| \\ ___ / |\n\t|___________|\n\t| |\t| |\n\t| |\t| |\n\t| |\t| |_ antenna - 17cm cable\n\t| |\t|_ 5V - pin 4\n\t| |_ data - gpio 17\n\t|_ ground - pin 6\n```\n\n\n## Get started\n\n### For an easy use\n\nWhen you just want to control your sockets, install the project and navigate in a browser to the ip address of your Raspberry Pi. You will see an quite empty webpage\n* Click on the gear at the top right. Now you can bookmark your sockets\n* First of all type in the house code of your remote controlled sockets\n* Followed by the letter of the specific socket\n* Now choose a name for this socket\n* Click the green button\n\nThe socket should now appear above. Now switch back to the first page and you see your socket ready to work.\n\n![screenshot 2](/screenshots/screen_2.png?raw=true)\n\n### Extended\n\nBesides the web interface you can speak directly to the Web API. For turning sockets on and off use a simple GET request like:\n\n```bash\ncurl http://<ip-of-your-pi>/11011A/on\ncurl http://<ip-of-your-pi>/11011A/off\n```\n\nAdditionally you can use POST requests to bookmark, update, remove and list sockets:\n```bash\ncurl -H \"Content-Type: application/json\" -X POST -d '{\"secret\":\"test\",\"name\":\"My First Socket\", \"state\":\"off\"}' http://<ip-of-your-pi/11011A/add\n```\nRepeat this for all of your sockets. You can use this endpoint also to update data. The `state` is optional and can be `on` and `off`.\n\nYou can also remove bookmarked sockets:\n```bash\ncurl -H \"Content-Type: application/json\" -X POST -d '{\"secret\":\"test\"}' http://<ip-of-your-pi/11011A/remove\n```\nAnd let's get a list of all bookmarks:\n\n```bash\ncurl -H \"Content-Type: application/json\" -X POST -d '{\"secret\":\"test\"}' http://<ip-of-your-pi/list\n```\n\n### Overview of all endpoints\n\n```\nGET: \t/<house code + letter>/on\nGET:\t/<house code + letter>/off\n\nPOST: \t/<house code + letter>/add\nPOST: \t/<house code + letter>/remove\nPOST:\t/list\n```\nThe POST requests need a secret sent via JSON, by default it is `test` (see above in curls)\n" }, { "alpha_fraction": 0.5491740703582764, "alphanum_fraction": 0.5524777770042419, "avg_line_length": 29.75, "blob_id": "258818b15d773062c2ede4319edb50490197aa8a", "content_id": "0043593f7fed18256d7fb34df41ba187466be8f0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3935, "license_type": "permissive", "max_line_length": 373, "num_lines": 128, "path": "/static/js/main.js", "repo_name": "philipptrenz/433MHz-Wireless-Sockets-API", "src_encoding": "UTF-8", "text": "let secret = 'test';\n\n$( document ).ready(function() {\n\tloadDevices();\n\n\t$('input').on( \"keyup\", function() {\n\t\tvar device = $('#new-device-id').val();\n\t\tvar name = $('#new-device-name').val();\n\t\tif (/^([01]{5}[A-G])$/.test(device) && name.length != 0) {\n\t\t\tconsole.log('input valid');\n\t\t\t$('#add-button').attr(\"disabled\", false);\n\t\t} else {\n\t\t\t$('#add-button').attr(\"disabled\", true);\n\t\t}\n\t});\n\n});\nvar responseArray;\nfunction loadDevices() {\n\tconsole.log('loading list of devices ...')\n\t$.ajax({\n type: \"POST\",\n url: \"/list\",\n async: true,\n data: JSON.stringify({ secret: secret }),\n contentType: \"application/json\",\n complete: function (data) {\n \tresponseArray = data.responseJSON;\n \t$('#switch-table').empty();\n\n \tfor (var i=0; i<responseArray.length; i++){\n \t\tvar device = responseArray[i].device\n \t\tvar name = responseArray[i].name\n \t\tvar state = responseArray[i].state\n \t\tvar html = '<div class=\"row\"><div class=\"cell\">'+name+'</div><div class=\"cell\"><div class=\"switch\"><label><input type=\"checkbox\" class=\"device-switches\" id=\"'+device+'\"><span class=\"lever\"></span></label></div></div></div>';\n \t\t$('#switch-table').append(html);\n \t\t$('#'+device).prop('checked', state == 'on' ? true : false);\n \t}\n \twait = false;\n \t/* Button event */\n\t\t\t$('input.device-switches').on('change', function() {\n\t\t\t\tif ($(this).prop('checked')) {\n\t\t\t\t\t$.get($(this).attr('id')+'/on');\n\t\t\t\t} else {\n\t\t\t\t\t$.get($(this).attr('id')+'/off');\n\t\t\t\t}\n\t\t\t});\n\t\t\tconsole.log(responseArray.length+\" devices loaded\");\n \t}\n\t});\n}\n\nvar isSettings = false;\nfunction settings() {\n\tisSettings = !isSettings;\n\tif (isSettings) {\n\t\t$('#settings-button').html('replay');\n\t\t$('#new-switch-form').show();\n\t\tloadDevicesSettings();\n\t} else {\n\t\t$('#settings-button').html('settings');\n\t\t$('#new-switch-form').hide();\n\t\tloadDevices();\n\t}\n}\n\nfunction remove(device) {\n\t$.ajax({\n type: \"POST\",\n url: \"/\"+device+\"/remove\",\n async: true,\n data: JSON.stringify({ secret: secret }),\n contentType: \"application/json\",\n complete: function (data) {\n \tif (data.status == 200) {\n \t\tconsole.log(device+' sucessfully deleted');\n \t\tloadDevicesSettings();\n \t}\n \t}\n\t});\n}\n\nfunction addNewSwitch(){\n\tvar device = $('#new-device-id').val();\n\tvar name = $('#new-device-name').val();\n\n\t$.ajax({\n type: \"POST\",\n url: \"/\"+device+\"/add\",\n async: true,\n data: JSON.stringify({ secret: secret, name: name}),\n contentType: \"application/json\",\n complete: function (data) {\n \tif (data.status == 200) {\n \t\tconsole.log(device+' sucessfully deleted');\n \t\tloadDevicesSettings();\n \t}\n \t}\n\t});\n\n\t$('#new-device-id').val('');\n\t$('#new-device-name').val('');\n\t$('#add-button').attr(\"disabled\", true);\n}\n\nfunction loadDevicesSettings(){\n\tconsole.log('loading list of devices for settings ...')\n\t$.ajax({\n type: \"POST\",\n url: \"/list\",\n async: true,\n data: JSON.stringify({ secret: secret }),\n contentType: \"application/json\",\n complete: function (data) {\n \tresponseArray = data.responseJSON;\n \t$('#switch-table').empty();\n\n \tfor (var i=0; i<responseArray.length; i++){\n \t\tvar device = responseArray[i].device\n \t\tvar name = responseArray[i].name\n \t\tvar state = responseArray[i].state\n \t\tvar oldSwitches = '<div class=\"row device-settings\" id=\"'+device+'\"><div class=\"cell\"><div class=\"input-field\">'+device+'</div></div><div class=\"cell\"><div class=\"input-field\">'+name+'</div></div><div class=\"cell\"><a class=\"btn-floating btn-medium red accent-1\" type=\"submit\" onclick=\"remove(\\''+device+'\\')\"><i class=\"material-icons\">delete</i></a></div></div>';\n \t\t\t$('#switch-table').append(oldSwitches);\n \t}\n \twait = false;\n \t}\n\t});\n}" }, { "alpha_fraction": 0.5830467939376831, "alphanum_fraction": 0.6086657643318176, "avg_line_length": 26.563980102539062, "blob_id": "4b61c593ff347fcde9a82d9aaea2b1db0e0a3162", "content_id": "2eb0d58e4650a1f93a68bcc7c067c39e6cbf9035", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5816, "license_type": "permissive", "max_line_length": 99, "num_lines": 211, "path": "/433PyApi.py", "repo_name": "philipptrenz/433MHz-Wireless-Sockets-API", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"\n\n\"\"\"\nimport time, atexit, re, flask\nfrom flask import Flask, render_template, request, json, jsonify\nimport RPi.GPIO as GPIO\nfrom tinydb import TinyDB, Query\n\napp = Flask(__name__)\n\[email protected]('/')\ndef home():\n\treturn render_template('index.html')\n\n###############################################\n\[email protected]('/<device_id>/on', methods=['GET', 'POST'])\ndef switchOn(device_id):\n\tdevice_id = validate_device_id(device_id)\n\tprint('turned ', device_id, ' on')\n\tremoteSwitch.switchOn(device_id)\n\tdb.switch_state(device_id, 'on')\n\treturn 'switched on'\n\[email protected]('/<device_id>/off', methods=['GET', 'POST'])\ndef switchOff(device_id):\n\tdevice_id = validate_device_id(device_id)\n\tprint('turned ', device_id, ' off')\n\tremoteSwitch.switchOff(device_id)\n\tdb.switch_state(device_id, 'off')\n\treturn 'switched off'\n\n###############################################\n\[email protected]('/<device_id>/add', methods=['POST'])\ndef add(device_id):\n\tdevice_id = validate_device_id(device_id)\n\tif is_authorized(request):\n\t\tif request.headers['Content-Type'] == 'application/json':\n\t\t\tcontent = request.json\n\t\t\tif 'name' not in content: flask.abort(400)\n\t\t\tname = content['name']\n\t\t\tif 'state' in content:\n\t\t\t\tstate = content['state']\n\t\t\t\tif state == 'on' or state == 'off':\n\t\t\t\t\treturn db.add(device_id, name, state)\n\t\t\t\telse:\n\t\t\t\t\tflask.abort(400)\n\t\t\telse:\n\t\t\t\treturn db.add(device_id, name)\n\t\telse:\n\t\t\tflask.abort(400)\n\[email protected]('/<device_id>/remove', methods=['POST'])\ndef remove(device_id):\n\tdevice_id = validate_device_id(device_id)\n\tif is_authorized(request):\n\t\treturn db.remove(device_id)\n\[email protected]('/list', methods=['POST'])\ndef list():\n\tif is_authorized(request):\n\t\tdevices_list = db.list()\n\t\treturn jsonify(devices_list)\n\n\n#######################################################################\n\ndef validate_device_id(device_id):\n\tdevice_id = device_id[0:5]+device_id[5].upper()\n\tif not device_regex.match(device_id):\n\t\tprint('device id not matching regex')\n\t\tflask.abort(400)\n\treturn device_id\n\n\ndef is_authorized(request):\n\tif request.headers['Content-Type'] == 'application/json':\n\t\tnewInput = request.json\n\t\tif 'secret' in newInput and newInput['secret'] == secret:\n\t\t\tprint('request authorized')\n\t\t\treturn True\n\telif request.args['secret'] == secret:\n\t\tprint('request authorized')\n\t\treturn True\n\telse:\n\t\tflask.abort(550)\n\n\ndef cleanup():\n\tGPIO.cleanup()\n\n#######################################################################\n\nclass RemoteSwitch(object):\n\trepeat = 10 # Number of transmissions\n\tpulselength = 300 # microseconds\n\tGPIOMode = GPIO.BCM\n\t\n\tdef __init__(self, pin):\n\t\tself.pin = pin\n\t\t''' \n\t\tdevices: A = 1, B = 2, C = 4, D = 8, E = 16 \n\t\tkey: according to dipswitches on your Elro receivers\n\t\tpin: according to Broadcom pin naming\n\t\t'''\t\t\n\t\tself.device_letter = { \"A\":1, \"B\":2, \"C\":4, \"D\":8, \"E\":16, \"F\":32, \"G\":64 } \n\n\t\tGPIO.setmode(self.GPIOMode)\n\t\tGPIO.setup(self.pin, GPIO.OUT)\n\t\t\n\tdef switchOn(self, device_id):\n\t\tkey = [int(device_id[0]),int(device_id[1]),int(device_id[2]),int(device_id[3]),int(device_id[4])]\n\t\tdevice = self.device_letter[device_id[5]]\n\t\tself._switch(GPIO.HIGH, key, device)\n\n\tdef switchOff(self, device_id):\n\t\tkey = [int(device_id[0]),int(device_id[1]),int(device_id[2]),int(device_id[3]),int(device_id[4])]\n\t\tdevice = self.device_letter[device_id[5]]\n\t\tself._switch(GPIO.LOW, key, device)\n\n\tdef _switch(self, switch, key, device):\n\t\tself.bit = [142, 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, 136, 128, 0, 0, 0]\t\t\n\n\t\tfor t in range(5):\n\t\t\tif key[t]:\n\t\t\t\tself.bit[t]=136\t\n\t\tx=1\n\t\tfor i in range(1,6):\n\t\t\tif device & x > 0:\n\t\t\t\tself.bit[4+i] = 136\n\t\t\tx = x<<1\n\n\t\tif switch == GPIO.HIGH:\n\t\t\tself.bit[10] = 136\n\t\t\tself.bit[11] = 142\n\t\t\t\t\n\t\tbangs = []\n\t\tfor y in range(16):\n\t\t\tx = 128\n\t\t\tfor i in range(1,9):\n\t\t\t\tb = (self.bit[y] & x > 0) and GPIO.HIGH or GPIO.LOW\n\t\t\t\tbangs.append(b)\n\t\t\t\tx = x>>1\n\t\t\t\t\n\t\tGPIO.output(self.pin, GPIO.LOW)\n\t\tfor z in range(self.repeat):\n\t\t\tfor b in bangs:\n\t\t\t\tGPIO.output(self.pin, b)\n\t\t\t\ttime.sleep(self.pulselength/1000000.)\n\n#######################################################################\n\nclass Database:\n\n\tdef __init__(self, file):\n\t\tself.tinydb = TinyDB(file)\n\t\tself.devices_table = self.tinydb.table('devices')\n\t\tself.Device = Query()\n\n\tdef get_eid(self, device_id):\n\t\tel_list = self.devices_table.search(self.Device.device == device_id)\n\t\tif len(el_list) < 1: return False \t# nothing found\n\t\teid = el_list[0].eid\n\t\tprint(device_id,' already exists in db with eid ',eid)\n\t\treturn eid\n\n\tdef add(self, device_id, name, state='off'):\n\t\teid = self.get_eid(device_id)\n\t\tif eid is False:\n\t\t\tself.devices_table.insert({'device':device_id,'name':name,'state':state})\n\t\t\tprint('added: ',device_id,', ',name,', state: ', state)\n\t\t\treturn 'added'\n\t\telse:\n\t\t\tself.devices_table.update({'name':name,'state':state}, eids=[eid])\n\t\t\tprint('updated: ',device_id,', ',name,', ', state)\n\t\t\treturn 'updated'\n\n\tdef remove(self, device_id):\n\t\teid = self.get_eid(device_id)\n\t\tif eid is False: flask.abort(400) # does not exist\n\t\tself.devices_table.remove(eids=[eid])\n\t\tprint('removed: ',device_id)\n\t\treturn 'removed'\n\n\tdef list(self):\n\t\tprint('sending list ...')\n\t\treturn self.devices_table.all()\n\n\tdef switch_state(self, device_id, state):\n\t\teid = self.get_eid(device_id)\n\t\tif eid is False: return False # does not exist\n\t\tself.devices_table.update({'state':state}, eids=[eid])\n\t\treturn True\n\n\n#######################################################################\n\nif __name__ == '__main__':\n\tatexit.register(cleanup)\n\t\n\tdefault_GPIO_pin = 17\t\t\t\t\t# change the GPIO pin according to your wiring\n\tremoteSwitch = RemoteSwitch(pin=default_GPIO_pin)\n\n\tsecret = 'test' \t\t\t\t\t# TODO: changable\n\n\tdevice_regex = re.compile(\"[01]{5}[A-G]\")\t# regex to test device indentifiers\n\n\tdb = Database(file='db.json')\n\tapp.run(host='0.0.0.0', port=80)\n" } ]
3
RoRyou/nn
https://github.com/RoRyou/nn
0a73092be74783c7e0d6d0bb9ad2881aa585e6af
d48bf4a5f9d5c4e668b8c2a09527ebd7425dfe1f
43b9c21686b5ffba14bc273a2c146de7572d6b27
refs/heads/main
2023-01-31T06:56:25.649859
2020-12-11T02:34:30
2020-12-11T02:34:30
313,647,263
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.553108811378479, "alphanum_fraction": 0.5731865167617798, "avg_line_length": 24.65517234802246, "blob_id": "14ff461f7aaa9a4900e3706b34fbd430e04d2541", "content_id": "68cfba0b4046bdd8ccdc330bd0c734445e5ec351", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1544, "license_type": "permissive", "max_line_length": 90, "num_lines": 58, "path": "/4_4_.py", "repo_name": "RoRyou/nn", "src_encoding": "UTF-8", "text": "import torch\r\nfrom torch import nn\r\n\r\nclass CenteredLayer(nn.Module):\r\n def __init__(self,**kwargs):\r\n super(CenteredLayer,self).__init__(**kwargs)\r\n def forward(self,x):\r\n return x -x.mean()\r\n\r\nlayer = CenteredLayer()\r\nlayer(torch.tensor([1,2,3,4,5],dtype = torch.float))\r\n\r\nnet = nn.Sequential(nn.Linear(8,128),CenteredLayer())\r\ny=net(torch.rand(4,8))\r\nprint(y.mean().item())\r\n\r\nclass MyDense(nn.Module):\r\n def __init__(self,**kwargs):\r\n super(MyDense,self).__init__()\r\n self.params = nn.ParameterList([nn.Parameter(torch.randn(4,4)) for i in range(3)])\r\n self.params.append(nn.Parameter((torch.randn(4,1))))\r\n\r\n def forward(self,x):\r\n for i in range(len(self.params)):\r\n x = torch.mm(x,self.params[i])\r\n return x\r\nnet = MyDense()\r\nprint(net)\r\n\r\nclass MyDictDense(nn.Module):\r\n def __init__(self,**kwargs):\r\n super(MyDictDense,self).__init__( **kwargs)\r\n self.params = nn.ParameterDict({\r\n 'linear1':nn.Parameter(torch.randn(4,4)),\r\n 'linear2':nn.Parameter(torch.randn(4,1))\r\n })\r\n self.params.update(\r\n {\r\n 'linear3':nn.Parameter(torch.randn(4,2))\r\n }\r\n )\r\n def forward(self,x,choice='linear1'):\r\n return torch.mm(x,self.params[choice])\r\n\r\nnet = MyDictDense()\r\nprint(net)\r\nx = torch.ones(1, 4)\r\nprint(net(x, 'linear1'))\r\nprint(net(x, 'linear2'))\r\nprint(net(x, 'linear3'))\r\n\r\n\r\nnet = nn.Sequential(\r\n MyDictDense(),\r\n MyDense()\r\n)\r\nprint(net)\r\nprint(net(x))" }, { "alpha_fraction": 0.6739811897277832, "alphanum_fraction": 0.7032392621040344, "avg_line_length": 26.941606521606445, "blob_id": "c96063762183acb7857fb12a0d408016e1c9ba9e", "content_id": "b81b956ff7cec4cb0f6317b257107344fcf6103b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5845, "license_type": "permissive", "max_line_length": 93, "num_lines": 137, "path": "/3_2_linearRegression.py", "repo_name": "RoRyou/nn", "src_encoding": "UTF-8", "text": "import torch\nfrom time import time\nfrom IPython import display\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\na = torch.ones(1000)\nb = torch.ones(1000)\n\nstart = time()\nc = torch.zeros(1000)\nfor i in range(1000):\n c[i] = a[i] + b[i]\nprint(time() - start)\n\nstart = time()\nd = a + b\nprint(time() - start)\n\na = torch.ones(3)\nb = 10\nprint(a + b)\n\n# y = Xw + b + e\nnum_inputs = 2\nnum_examples = 1000\ntrue_w = [2, -3.4]\ntrue_b = 4.2\nfeatures = torch.randn(num_examples, num_inputs, dtype=torch.float32)\nlabels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b\n# 噪声项 ϵ 服从均值为0、标准差为0.01的正态分布\nlabels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float32)\n# features的每一行是一个长度为2的向量,而labels的每一行是一个长度为1的向量(标量)\nprint(features[0], labels[0])\n\n\n# 通过生成第二个特征features[:, 1]和标签 labels 的散点图,可以更直观地观察两者间的线性关系\ndef use_svg_display():\n display.set_matplotlib_formats('svg')\n\n\ndef set_figsize(figsize=(3.5, 2.5)):\n use_svg_display()\n plt.rcParams['figure.figsize'] = figsize\n\n\nset_figsize()\nplt.scatter(features[:, 1].numpy(), labels.numpy(), 1)\n\n\n# plt.show()\n\n\n# 在训练模型的时候,我们需要遍历数据集并不断读取小批量数据样本。\n# 这里我们定义一个函数:它每次返回batch_size(批量大小)个随机样本的特征和标签。\n\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices) # shuffle() 方法将序列的所有元素随机排序。\n for i in range(0, num_examples, batch_size):\n #torch.LongTensor 是64位整型\n j = torch.LongTensor(indices[i:min(i + batch_size, num_examples)]) # 最后一次可能不足一个batch\n # j是一个tensor\n yield features.index_select(0, j), labels.index_select(0, j)\n #\n\n\n\n\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n print(X, y)\n break\n\n# 3.2.3 初始化模型参数\n# 我们将权重初始化成均值为0、标准差为0.01的正态随机数,偏差则初始化成0。\n\nw = torch.tensor(np.random.normal(0, 0.1, (num_inputs, 1)), dtype=torch.float32)\nb = torch.tensor(1, dtype=torch.float32)\n# 之后的模型训练中,需要对这些参数求梯度来迭代参数的值,因此我们要让它们的requires_grad=True。\nw.requires_grad_(requires_grad=True)\nb.requires_grad_(requires_grad=True)\n\n# 3.2.4 定义模型\n# 下面是线性回归的矢量计算表达式的实现。我们使用mm函数做矩阵乘法\ndef linreg(X, w, b): # 本函数已保存在d2lzh_pytorch包中方便以后使用\n return torch.mm(X, w) + b\n\n\n# 3.2.5 定义损失函数\n# 我们使用上一节描述的平方损失来定义线性回归的损失函数。\n# 在实现中,我们需要把真实值y变形成预测值y_hat的形状。以下函数返回的结果也将和y_hat的形状相同。\ndef squared_loss(y_hat, y): # 本函数已保存在d2lzh_pytorch包中方便以后使用\n # 注意这里返回的是向量, 另外, pytorch里的MSELoss并没有除以 2\n return (y_hat - y.view(y_hat.size())) ** 2 / 2\n\n\n# 3.2.6 定义优化算法\n# 以下的sgd函数实现了上一节中介绍的小批量随机梯度下降算法。\n# 它通过不断迭代模型参数来优化损失函数。这里自动求梯度模块计算得来的梯度是一个批量样本的梯度和。\n# 我们将它除以批量大小来得到平均值\ndef sgd(params, lr, batch_size): # 本函数已保存在d2lzh_pytorch包中方便以后使用\n for param in params:\n param.data -= lr * param.grad / batch_size # 注意这里更改param时用的param.data\n\n\n# 3.2.7 训练模型\n# 在训练中,我们将多次迭代模型参数。\n# 在每次迭代中,我们根据当前读取的小批量数据样本(特征X和标签y),通过调用反向函数backward计算小批量随机梯度,并调用优化算法sgd迭代模型参数。\n# 由于我们之前设批量大小batch_size为10,每个小批量的损失l的形状为(10, 1)。回忆一下自动求梯度一节。\n# 由于变量l并不是一个标量,所以我们可以调用.sum()将其求和得到一个标量,再运行l.backward()得到该变量有关模型参数的梯度。\n# 注意在每次更新完参数后不要忘了将参数的梯度清零。\n# 在一个迭代周期(epoch)中,我们将完整遍历一遍data_iter函数,并对训练数据集中所有样本都使用一次(假设样本数能够被批量大小整除)。\n# 这里的迭代周期个数num_epochs和学习率lr都是超参数,分别设3和0.03。\n# 在实践中,大多超参数都需要通过反复试错来不断调节。虽然迭代周期数设得越大模型可能越有效,但是训练时间可能过长。\n# 而有关学习率对模型的影响,我们会在后面“优化算法”一章中详细介绍。\n\nlr = 0.03\nnum_epochs = 3 #迭代周期个数\nnet = linreg #线性回归\nloss = squared_loss #损失函数\n\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):# 在每次迭代中,我们根据当前读取的小批量数据样本(特征X和标签y)\n l = loss(net(X, w, b), y).sum() #损失函数 #由于变量l并不是一个标量,所以我们可以调用.sum()将其求和得到一个标量\n l.backward()#通过调用反向函数backward计算小批量随机梯度\n sgd([w, b], lr, batch_size)#并调用优化算法sgd迭代模型参数。\n\n w.grad.data.zero_ #清零\n b.grad.data.zero_\n train_l = loss(net(features, w, b), labels)\n print('epoch %d,loss %f' % (epoch + 1, train_l.mean().item()))\n\nprint(true_w, '\\n', w)\nprint(true_b, '\\n', b)\n" }, { "alpha_fraction": 0.5654520988464355, "alphanum_fraction": 0.5843454599380493, "avg_line_length": 19.852941513061523, "blob_id": "7691dc1f307da413cf58f8bf3e4c04909f5f945e", "content_id": "735ac98a192b7e5c23de40f316ecd8c3f1c55ab6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 741, "license_type": "permissive", "max_line_length": 68, "num_lines": 34, "path": "/4_5_readandsave.py", "repo_name": "RoRyou/nn", "src_encoding": "UTF-8", "text": "import torch\r\nfrom torch import nn\r\n\r\nx = torch.ones(3)\r\nprint(x)\r\ntorch.save(x,'x.pt')\r\nx2 = torch.load('x.pt')\r\nprint(x2)\r\n\r\ny = torch.zeros(4)\r\nprint(y)\r\ntorch.save([x,y],'xy.pt')\r\nxy_list = torch.load('xy.pt')\r\nprint(xy_list)\r\n\r\ntorch.save({'x':x,'y':y},'xy_dict.pt')\r\nxy=torch.load('xy_dict.pt')\r\nprint(xy)\r\n\r\nclass MLP(nn.Module):\r\n def __init__(self):\r\n super(MLP,self).__init__()\r\n self.hidden = nn.Linear(4,3)\r\n self.act = nn.ReLU()\r\n self.output = nn.Linear(2,1)\r\n\r\n def forward(self,x):\r\n a = self.act(self.hidden(x))\r\n return self.output(a)\r\nnet =MLP()\r\nprint(net.state_dict())\r\n\r\noptimizer =torch.optim.SGD(net.parameters(),lr= 0.001,momentum=0.9)\r\nprint(optimizer.state_dict())" }, { "alpha_fraction": 0.621052622795105, "alphanum_fraction": 0.6526315808296204, "avg_line_length": 15.454545021057129, "blob_id": "fa8332747d83fc59fecf4b92034a46bc0b473ca5", "content_id": "6ea0807ce1be08621252a483c8c592a643b2c116", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "permissive", "max_line_length": 60, "num_lines": 11, "path": "/4_2_.py", "repo_name": "RoRyou/nn", "src_encoding": "UTF-8", "text": "import torch\r\nfrom torch import nn\r\nfrom torch.nn import init\r\n\r\nnet = nn.Sequential(nn.Linear(4,3),nn.ReLU(),nn.Linear(3,1))\r\nprint(net)\r\n\r\nX = torch.rand(3,4)\r\nY = net(X).sum()\r\n\r\nprint(Y)" }, { "alpha_fraction": 0.7192896008491516, "alphanum_fraction": 0.746916651725769, "avg_line_length": 21.0108699798584, "blob_id": "aa7ed2f08d6c1375023888d20710c29bc04cc82f", "content_id": "7bcefbc8a5e4f7a5d81a415bea851d0317d20a8f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3265, "license_type": "permissive", "max_line_length": 145, "num_lines": 92, "path": "/2_3_pytorch_train.py", "repo_name": "RoRyou/nn", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport torch\nimport numpy as np\n\n# 创建一个Tensor并设置requires_grad=True:\nx = torch.ones(2, 2, requires_grad=True)\nprint(x)\n# grad_fn积分方法名,默认为None\nprint(x.grad_fn)\n\ny = x + 2\nprint(y)\nprint(y.grad_fn)\n# 注意x是直接创建的,所以它没有grad_fn, 而y是通过一个加法操作创建的,所以它有一个为<AddBackward>的grad_fn\n\n# 像x这种直接创建的称为叶子节点,叶子节点对应的grad_fn是None\n# 叶子节点\nprint(x.is_leaf, y.is_leaf)\n\nz = y * y * 3\n\nout = z.mean()\nprint(z)\nprint(out)\n\n# 通过.requires_grad_()来用in-place的方式改变requires_grad属性\na = torch.randn(2, 2)\na = ((a * 3) / (a - 1))\nprint(a.requires_grad)\na.requires_grad_(True)\nprint(a.requires_grad)\nb = (a * a).sum()\nprint(b.grad_fn)\n\n#因为out是一个标量,所以调用backward()时不需要指定求导变量\nout.backward()\nprint(x.grad)\n\n# 再来反向传播一次,注意grad是累加的\nout2 = x.sum()\nout2.backward()\nprint(x.grad)\n\nout3 = x.sum()\nx.grad.data.zero_() # 清0\nout3.backward()\nprint(x.grad)\n\nx = torch.tensor([1.0,2.0,3.0,4.0],requires_grad=True)\ny = 2*x\nz = y.view(2,2)\nprint(z)\n#现在 z 不是一个标量,所以在调用backward时需要传入一个和z同形的权重向量进行加权求和得到一个标量。\nv= torch.tensor([[1.0,0.1],[0.01,0.001]],dtype=torch.float)\nz.backward(v)\nprint(x.grad)\n#x.grad是和x同形的张量。\nx = torch.tensor(1.0,requires_grad=True)\ny1 = x **2\nwith torch.no_grad():\n y2=x**3\ny3 = y1+y2\nprint(x.requires_grad)\nprint(y1,y1.requires_grad)\nprint(y2,y2.requires_grad)\nprint(y3,y3.requires_grad)\n\ny3.backward()\nprint(x.grad)\n\n#如果我们想要修改tensor的数值,但是又不希望被autograd记录(即不会影响反向传播),那么我么可以对tensor.data进行操作\nx = torch.ones(1,requires_grad=True)\n\nprint(x.data) # 还是一个tensor\nprint(x.data.requires_grad) # 但是已经是独立于计算图之外\n\ny = 2 * x\nx.data *= 100 # 只改变了值,不会记录在计算图,所以不会影响梯度传播\n\ny.backward()\nprint(x) # 更改data的值也会影响tensor的值\nprint(x.grad)\n\n#线性回归输出是一个连续值,因此适用于回归问题。回归问题在实际中很常见,如预测房屋价格、气温、销售额等连续值的问题。与回归问题不同,分类问题中模型的最终输出是一个离散值。我们所说的图像分类、垃圾邮件识别、疾病检测等输出为离散值的问题都属于分类问题的范畴。softmax回归则适用于分类问题。\n\n#由于线性回归和softmax回归都是单层神经网络,它们涉及的概念和技术同样适用于大多数的深度学习模型。我们首先以线性回归为例,介绍大多数深度学习模型的基本要素和表示方法\n\n\n\n# 当模型和损失函数形式较为简单时,上面的误差最小化问题的解可以直接用公式表达出来。这类解叫作解析解(analytical solution)。\n# 本节使用的线性回归和平方误差刚好属于这个范畴。\n# 大多数深度学习模型并没有解析解,只能通过优化算法有限次迭代模型参数来尽可能降低损失函数的值。这类解叫作数值解(numerical solution)。\n\n\n" }, { "alpha_fraction": 0.5418994426727295, "alphanum_fraction": 0.5418994426727295, "avg_line_length": 17.44444465637207, "blob_id": "8060c948367e55315cd55cc22e12a5f95a4ae7da", "content_id": "f87d878c38e59ed54b44bac2605e0e1f0e53e1ce", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "permissive", "max_line_length": 35, "num_lines": 9, "path": "/5_5_CNN.py", "repo_name": "RoRyou/nn", "src_encoding": "UTF-8", "text": "import torch\r\nfrom torch import nn\r\n\r\nclass CNN(nn.Module):\r\n def __init__(self,**kwargs):\r\n super(CNN, self).__init__()\r\n\r\n def forward(self):\r\n return\r\n\r\n\r\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 5.5, "blob_id": "a6c5e6021156240a06ba0ae7f9ff14f6b0e9e72a", "content_id": "a20632dc0696eabbdd865d3fa725e09995bdee8e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 13, "license_type": "permissive", "max_line_length": 7, "num_lines": 2, "path": "/README.md", "repo_name": "RoRyou/nn", "src_encoding": "UTF-8", "text": "# nn\npytorch\n" }, { "alpha_fraction": 0.7128577828407288, "alphanum_fraction": 0.7369377613067627, "avg_line_length": 26.512500762939453, "blob_id": "fd7e5e8a939f87558f36cd8adc38471aefa6574e", "content_id": "4b46670b4938f18f65701481f9960278a9d23ac5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7280, "license_type": "permissive", "max_line_length": 148, "num_lines": 160, "path": "/3_3_brief_linreg.py", "repo_name": "RoRyou/nn", "src_encoding": "UTF-8", "text": "import torch\nfrom time import time\nfrom IPython import display\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport torch.utils.data as Data\nfrom torch.nn import init\nimport torch.nn as nn\n# 3.3.1 生成数据集\n# 我们生成与上一节中相同的数据集。其中features是训练数据特征,labels是标签\nnum_inputs = 2 #x1,x2 有几个x\nnum_examples = 1000\ntrue_w = [2, -3.4]\ntrue_b = 4.2\nfeatures = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)\nlabels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b\nlabels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)\n\n# 3.3.2 读取数据\n# PyTorch提供了data包来读取数据。由于data常用作变量名,我们将导入的data模块用Data代替。\n# 在每一次迭代中,我们将随机读取包含10个数据样本的小批量。\n\nbatch_size = 10\n# 将训练数据的特征和标签组合\ndataset = Data.TensorDataset(features, labels)\n# 随机读取小批量\ndata_iter = Data.DataLoader(dataset, batch_size, shuffle=True)\n# 这里data_iter的使用跟上一节中的一样。让我们读取并打印第一个小批量数据样本。\n\nfor X, y in data_iter:\n print(X, y)\n break\n\n\n#\n# 3.3.3 定义模型\n# 在上一节从零开始的实现中,我们需要定义模型参数,并使用它们一步步描述模型是怎样计算的。\n# 当模型结构变得更复杂时,这些步骤将变得更繁琐。\n# 其实,PyTorch提供了大量预定义的层,这使我们只需关注使用哪些层来构造模型。\n# 下面将介绍如何使用PyTorch更简洁地定义线性回归。\n#\n# 首先,导入torch.nn模块。\n# 实际上,“nn”是neural networks(神经网络)的缩写。\n# 顾名思义,该模块定义了大量神经网络的层。\n# 之前我们已经用过了autograd,而nn就是利用autograd来定义模型。\n# nn的核心数据结构是Module,它是一个抽象概念,既可以表示神经网络中的某个层(layer),也可以表示一个包含很多层的神经网络。\n# 在实际使用中,最常见的做法是继承nn.Module,撰写自己的网络/层。一个nn.Module实例应该包含一些层以及返回输出的前向传播(forward)方法。\n# 下面先来看看如何用nn.Module实现一个线性回归模型。\n\nclass LinearNet(nn.Module):\n def __init__(self, n_feature):\n super(LinearNet, self).__init__()\n self.linear = nn.Linear(n_feature, 1)\n\n # forward 定义前向传播\n def forward(self, x):\n y = self.linear(x)\n return y\n\n\nnet = LinearNet(num_inputs)\nprint(net) # 使用print可以打印出网络的结构\n\n# 事实上我们还可以用nn.Sequential来更加方便地搭建网络,Sequential是一个有序的容器,网络层将按照在传入Sequential的顺序依次被添加到计算图中。\n\n# 写法一\nnet = nn.Sequential(\n nn.Linear(num_inputs, 1)#in_features = 2, out_features = 1 输入为2维,输出为1维\n # 此处还可以传入其他层\n)\n\n# 写法二\nnet = nn.Sequential()\nnet.add_module('linear', nn.Linear(num_inputs, 1))\n# net.add_module ......\n\n# 写法三\nfrom collections import OrderedDict\n\nnet = nn.Sequential(OrderedDict([\n ('linear', nn.Linear(num_inputs, 1))\n # ......\n]))\n\nprint(net)\nprint(net[0])\n#\n# 可以通过net.parameters()来查看模型所有的可学习参数,此函数将返回一个生成器。\n\nfor param in net.parameters():\n print(param)\n\n# 回顾图3.1中线性回归在神经网络图中的表示。\n# 作为一个单层神经网络,线性回归输出层中的神经元和输入层中各个输入完全连接。\n# 因此,线性回归的输出层又叫全连接层。\n# 注意:torch.nn仅支持输入一个batch的样本不支持单个样本输入,如果只有单个样本,可使用input.unsqueeze(0)来添加一维。\n\n# 3.3.4 初始化模型参数\n# 在使用net前,我们需要初始化模型参数,如线性回归模型中的权重和偏差。\n# PyTorch在init模块中提供了多种参数初始化方法。\n# 这里的init是initializer的缩写形式。\n# 我们通过init.normal_将权重参数每个元素初始化为随机采样于均值为0、标准差为0.01的正态分布。偏差会初始化为零。\n\n\ninit.normal_(net[0].weight, mean=0, std=0.01)\ninit.constant_(net[0].bias, val=0) # 也可以直接修改bias的data: net[0].bias.data.fill_(0)\n# 注:如果这里的net是用3.3.3节一开始的代码自定义的,那么上面代码会报错,net[0].weight应改为net.linear.weight,bias亦然。因为net[0]这样根据下标访问子模块的写法只有当net是个ModuleList或者Sequential实例时才可以,详见4.1节。\n\n# 3.3.5 定义损失函数\n# PyTorch在nn模块中提供了各种损失函数,这些损失函数可看作是一种特殊的层,PyTorch也将这些损失函数实现为nn.Module的子类。\n# 我们现在使用它提供的均方误差损失作为模型的损失函数。\n\nloss = nn.MSELoss()\n\n# 3.3.6 定义优化算法\n# 同样,我们也无须自己实现小批量随机梯度下降算法。\n# torch.optim模块提供了很多常用的优化算法比如SGD、Adam和RMSProp等。\n# 下面我们创建一个用于优化net所有参数的优化器实例,并指定学习率为0.03的小批量随机梯度下降(SGD)为优化算法。\n\nimport torch.optim as optim\n\noptimizer = optim.SGD(net.parameters(), lr=0.03)\nprint(optimizer)\n\n# 我们还可以为不同子网络设置不同的学习率,这在finetune时经常用到。例:\n#\n# optimizer = optim.SGD([\n# # 如果对某个参数不指定学习率,就使用最外层的默认学习率\n# {'params': net.subnet1.parameters()}, # lr=0.03\n# {'params': net.subnet2.parameters(), 'lr': 0.01}], lr=0.03)\n# 有时候我们不想让学习率固定成一个常数,那如何调整学习率呢?\n# 主要有两种做法。一种是修改optimizer.param_groups中对应的学习率,\n# 另一种是更简单也是较为推荐的做法——新建优化器,由于optimizer十分轻量级,构建开销很小,故而可以构建新的optimizer。\n# 但是后者对于使用动量的优化器(如Adam),会丢失动量等状态信息,可能会造成损失函数的收敛出现震荡等情况。\n\n# 调整学习率\nfor param_group in optimizer.param_groups:\n param_group['lr'] *= 0.1 # 学习率为之前的0.1倍\n\n# 3.3.7 训练模型\n# 在使用Gluon训练模型时,我们通过调用optim实例的step函数来迭代模型参数。\n# 按照小批量随机梯度下降的定义,我们在step函数中指明批量大小,从而对批量中样本梯度求平均。\n\nnum_epochs = 3\nfor epoch in range(1, num_epochs + 1):\n for X, y in data_iter:\n output = net(X)\n l = loss(output, y.view(-1, 1))\n optimizer.zero_grad() # 梯度清零,等价于net.zero_grad()\n l.backward()\n optimizer.step()\n print('epoch %d, loss: %f' % (epoch, l.item()))\n\n# 下面我们分别比较学到的模型参数和真实的模型参数。\n# 我们从net获得需要的层,并访问其权重(weight)和偏差(bias)。学到的参数和真实的参数很接近。\n\ndense = net[0]\nprint(true_w, dense.weight)\nprint(true_b, dense.bias)\n" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.5579710006713867, "avg_line_length": 19.894737243652344, "blob_id": "03e85bbc946ed9858583b02e399642d2b5e3c82f", "content_id": "c993264f37d973346463babdb898a24f43adf974", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "permissive", "max_line_length": 42, "num_lines": 19, "path": "/4_1_.py", "repo_name": "RoRyou/nn", "src_encoding": "UTF-8", "text": "import torch\r\nfrom torch import nn\r\n\r\nclass MLP(nn.Module):\r\n def __init__(self,**kwargs):\r\n super(MLP,self).__init__(**kwargs)\r\n self.hidden = nn.Linear(784,256)\r\n self.act = nn.ReLU()\r\n self.output = nn.Linear(256,10)\r\n\r\n def forward(self,x):\r\n a = self.act(self.hidden(x))\r\n return self.output(a)\r\n\r\n\r\nX = torch.rand(2,784)\r\nnet = MLP()\r\nprint(net)\r\nprint(net(X))" } ]
9
ABaldeosinghASC/ASC-Folder
https://github.com/ABaldeosinghASC/ASC-Folder
e83a74c45e6408c7e3a08c4e55dc40da4421bf33
ef9c90116703585fb2bb9dd5c56a3a9aca9c07c6
8cf771927f64ab227c64640a18f6f3a0bb0aaf36
refs/heads/master
2020-04-06T06:56:21.915450
2017-07-24T20:29:44
2017-07-24T20:29:44
63,092,056
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5876887440681458, "alphanum_fraction": 0.5981416702270508, "avg_line_length": 39.095237731933594, "blob_id": "5dbd9f66160679a011ba4f8348178de74eab5c68", "content_id": "fc89c03a106d07d569492b393d754577c3224522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 861, "license_type": "no_license", "max_line_length": 140, "num_lines": 21, "path": "/rehabilitation-yoga/upload/pablo.html", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "<div id=\"body\">\r\n\t\t\r\n\t</h2><div class=\"content\">\r\n\t\t\t<div>\r\n\t\t\t\t<link class=\"jsbin\" href=\"http://ajax.googleapis.com/ajax/libs/jqueryui/1/themes/base/jquery-ui.css\" rel=\"stylesheet\" type=\"text/css\" />\r\n\t\t\t\t<script class=\"jsbin\" src=\"http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js\"></script>\r\n\t\t\t\t<script class=\"jsbin\" src=\"http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.0/jquery-ui.min.js\"></script>\r\n\t\t\t\t<meta charset=utf-8 />\r\n\t\t\t\t<title>JS Bin</title>\r\n\t\t\t\t<!--[if IE]>\r\n\t\t\t\t <script src=\"http://html5shiv.googlecode.com/svn/trunk/html5.js\"></script>\r\n\t\t\t\t<![endif]-->\r\n\t\t\t\t<style>\r\n\t\t\t\t article, aside, figure, footer, header, hgroup, \r\n\t\t\t\t menu, nav, section { display: block; }\r\n\t\t\t\t</style>\r\n\t\t\t\t</head>\r\n\t\t\t\t<body>\r\n\t\t\t\t <input type='file' onchange=\"readURL(this);\" />\r\n\t\t\t\t <img id=\"blah\" src=\"\" alt=\"your image\" />\r\n\t\t\t\t</body>" }, { "alpha_fraction": 0.6507936716079712, "alphanum_fraction": 0.6984127163887024, "avg_line_length": 20.33333396911621, "blob_id": "2cf99ae94fd34da867f7949a4579b2c32555b95a", "content_id": "63a593a2ddf6eaac5f1767c387179007fd6f6fc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/Test5.py", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from Myro import *\ninit(\"sim\") #Start simulator\nmotors(1, 0, 2)" }, { "alpha_fraction": 0.4024205803871155, "alphanum_fraction": 0.4886535406112671, "avg_line_length": 14.574999809265137, "blob_id": "ae457b5a50c254329c626ddb1fa1575d36a64a9d", "content_id": "b770c41899868a7cb3fd8aa6b3e82483e94691d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 661, "license_type": "no_license", "max_line_length": 33, "num_lines": 40, "path": "/Semi_Pong/Semi_Pong.pyde", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from time import *\n\nfrom random import *\nxPos = randint(0,690)\nyPos = randint(0,690)\nxSpeed = 5 \nySpeed = 5\n\ndef setup():\n size(700, 700)\n background(255, 255, 255)\ndef draw():\n global xPos\n \n global yPos\n \n global xSpeed\n \n global ySpeed\n \n\n xPos = xPos + xSpeed\n yPos = yPos + ySpeed\n \n fill(255, 0, 0)\n background(255, 255, 255) \n ellipse(xPos, yPos, 20, 20)\n \n\n if xPos >= 690:\n xSpeed = xSpeed * -1\n\n if yPos >= 690:\n ySpeed = ySpeed * - 1 \n \n if xPos <= 10:\n xSpeed = xSpeed * -1\n\n if yPos <= 10:\n ySpeed = ySpeed * - 1 \n \n \n\n \n \n \n \n " }, { "alpha_fraction": 0.37022900581359863, "alphanum_fraction": 0.4694656431674957, "avg_line_length": 13.277777671813965, "blob_id": "f3b2df63fe867631d540f7ced10841407ea9a30c", "content_id": "0da256c07cfb25f1576c32bea8b841f1777e0b5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 262, "license_type": "no_license", "max_line_length": 21, "num_lines": 18, "path": "/dancingrobot.py", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from Myro import *\ninit(\"sim\")\n\ni = 0\nwhile i < 3:\n motors(3, 0, 2)\n motors(0, -3, 2)\n turnBy(45, \"deg\")\n motors(-3, 0, 2)\n forward(3,1)\n backward(3,1)\n i = i + 1\n\nj = 0\nwhile j <360:\n forward(1,1)\n turnBy(1, \"deg\")\n j = j + 1\n " }, { "alpha_fraction": 0.40534520149230957, "alphanum_fraction": 0.5167037844657898, "avg_line_length": 18.34782600402832, "blob_id": "907385c6691b1e72d57d05c3b0bd59e3bfc6d86f", "content_id": "85d054c2c4c1803bb085b0e76dcd4cdff8870b9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 449, "license_type": "no_license", "max_line_length": 21, "num_lines": 23, "path": "/LetterB.py", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "def letterB():\n forward(3,2)\n turnBy(90, \"deg\")\n forward(2,2)\n turnBy(45, \"deg\")\n forward(1,1)\n turnBy(35, \"deg\")\n forward(.5,.25)\n turnBy(15, \"deg\")\n forward(.5,.25)\n forward(1,.80)\n turnBy(90,\"deg\")\n forward(3,1.80)\n turnBy(90, \"deg\")\n forward(2,2)\n turnBy(45, \"deg\")\n forward(1,1)\n turnBy(35, \"deg\")\n forward(.5,.25)\n turnBy(15, \"deg\")\n forward(.5,.25)\n forward(1,.80)\nletterB()\n " }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.5580357313156128, "avg_line_length": 24.294116973876953, "blob_id": "5f8a6888fd3e713ca72d4981196062087c573b1f", "content_id": "d54c680313f944e566187bd35e4a97022e3740ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 448, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/diagnol_white_square/diagnol_white_square.pyde", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from random import *\nfrom math import *\n\ndef setup():\n size (1000,1000)\n background(255, 255, 255)\n\ndef draw():\n \n ellipse(mouseX, mouseY, 50, 50)\n fill(randint(0, 255),randint(0, 255),randint(0, 255)) \n noStroke()\n for i in range(10):\n x = randrange(0,25)\n \n ellipse(mouseX + randint(-50,50), mouseY + randint(-50,50), x, x)\n fill(randint(0, 255),randint(0, 255),randint(0, 255))\n \n \n\n\n\n\n " }, { "alpha_fraction": 0.5301724076271057, "alphanum_fraction": 0.6336206793785095, "avg_line_length": 16.923076629638672, "blob_id": "62ebe17b2734381b20fc90b11ae33491cfa6908e", "content_id": "ccb30a84affdb2837fe26d9f4254de747f15a523", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 28, "num_lines": 13, "path": "/test.py", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from Myro import *\ninit(\"sim\") #Start simukator\nforward(3,1)\nbackward (3,2)\nturnBy(-90, \"deg\") #=> \nturnBy(-33, \"deg\") #=>\nforward (3,3)\nturnBy(90, \"deg\") \nforward(1,2)\nturnBy(75, \"deg\")\nforward(3,8)\nturnBy(155, \"deg\")\nforward(3,20)" }, { "alpha_fraction": 0.6335616707801819, "alphanum_fraction": 0.6378424763679504, "avg_line_length": 33.15151596069336, "blob_id": "2525a8fe3ab3ddf8eb8f7afa35819e718e4f3e65", "content_id": "60f8a74b099f9c3059961df0b5c1319dce7214fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1168, "license_type": "no_license", "max_line_length": 184, "num_lines": 33, "path": "/Mash.js", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "\r\n\r\n\r\nvar job = [\"You worked for Donald Trump, but killed yourself after an hour on the job\", \"You work as a butler for Batman\", \r\n\t\"Your an exotic dancer\", \r\n\t\"Your prefessional hobo, you actually make a 40$ per hour\"];\r\n\t\r\nvar wife = [\"Your wife is Serena Williams\", \"Your wife is Nicki Minaj\",\r\n\t\t\t\"Your wife is Angelina Jolie, you have 15 children\", \r\n\t\t\t\"You don't have a wife, but your single and ready to mingle\"];\r\n\r\nvar car = [\"You have a Lamborghini Veneno\", \"You don't have a car you got a shopping cart ;-)\", \"You drive a Suzuki Hayabusa\", \"You don't drive you ride a bike. Earth Friendly much?\"];\r\n\r\nvar homes = [\"Mansion\",\"Apartment\",\"Shack\",\"House\"];\r\n\r\nvar x = prompt(\"What job do you want to have\");\r\njob.push(x) \r\n\r\nvar y = prompt(\"Who do you want to be your wife\");\r\nwife.push(y) \r\n\r\nvar z = prompt(\"What car do you want\");\r\ncar.push(z)\r\n\r\nvar roll = Math.floor((Math.random() * wife.length) + 1); \r\nalert(\"Welcome to Anthony's game of chance\") \r\n\r\nalert(\"Your wife will be \" + wife[roll]);\r\n\r\nalert(\"Your job will be \" + job[roll]);\r\n\r\nalert(\"Your car will be \" + car[roll]);\r\n\r\nalert(\"Your form of residency will be \" + (homes[roll]));\r\n\r\nalert(\"Thanks for playing\")\r\n\r\n" }, { "alpha_fraction": 0.6499517560005188, "alphanum_fraction": 0.6894888877868652, "avg_line_length": 47.47618865966797, "blob_id": "f7e7fedd4f1f407c8ffc218a80fee0ff9bc526f5", "content_id": "545c1c8c1ad65275d6eba09b46591bc36e7c3a60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1037, "license_type": "no_license", "max_line_length": 108, "num_lines": 21, "path": "/rehabilitation-yoga/upload/pabloit.js", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "$(document).ready(function(){\r\n\tvar sites = [\r\n\t\"http://www.businessinsider.com/health-benefits-of-medical-marijuana-2014-4\",\r\n\t\"http://www.cracked.com/article_20023_5-illegal-drugs-with-surprisingly-wholesome-medical-uses.html\", \r\n\t\"http://dilanka.cc/ten-reasons-why-you-should-start-doing-drugs/\",\r\n\t\"http://www.medicinalmarijuanaassociation.com/medical-marijuana-blog/5-great-websites-to-buy-weed-online\", \r\n\t\"http://a056-crimestoppers.nyc.gov/crimestoppers/public/index.html\",\r\n\t\"http://www.thetoptens.com/ways-get-arrested/\",\r\n\t\"https://blog.tsheets.com/2016/just-for-fun/robots-taking-your-job\",\r\n\t\"http://www.textfiles.com/anarchy/FDR/fdr-0366.txt\",\r\n\t\"http://listverse.com/2008/08/07/11-tips-for-outrunning-the-cops/\",\r\n\t\"http://www.askmen.com/money/how_to_300/310_how_to.html\",\r\n\t];\r\n\tvar random;\r\n\tvar input;\r\n\t$(\"#look\").click(function(){\r\n\t\trandom = sites[Math.floor(Math.random()*sites.length)]\r\n\t\tinput = $(\"#search_input\").val();\r\n\t\t$('#search_stuff').append('<a href=\"'+ random +'\"><h3>'+ input +'</h3></a>');\r\n\t});\r\n});" }, { "alpha_fraction": 0.6096345782279968, "alphanum_fraction": 0.6096345782279968, "avg_line_length": 31.44444465637207, "blob_id": "b0c11d221f867ae29cc77bc2792a19c6c454a9d3", "content_id": "6da94d453f8175501dd92e6b73a94bcc105951fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1204, "license_type": "no_license", "max_line_length": 64, "num_lines": 36, "path": "/Rehabilitaion yoga/rehabilitation-yoga/upload/firebasemployer.js", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "var database = firebase.database().ref(); //link to our database\r\n\r\n//function for sending stuff to my database\r\nfunction sendMessage(){\r\n var nestablishment = $(\"#nestablishment\").val();\r\n var establishment = $(\"#establishment\").val();\r\n var mhours = $(\"#mhours\").val();\r\n var thours = $(\"#thours\").val();\r\n var whours = $(\"#whours\").val();\r\n var thhours = $(\"#thhours\").val();\r\n var fhorus = $(\"#fhours\").val();\r\n var shours = $(\"#shours\").val();\r\n var suhours = $(\"#suhours\").val();\r\n var specialties = $(\"#specialties\").val();\r\n var service = $(\"#service\").val();\r\n var pnumber = $(\"#pnumber\").val();\r\n var location = $(\"#location\").val();\r\n\r\n database.push({\r\n 'Owner of establishment':establishment,\r\n 'Name of establishment':nestablishment,\r\n 'Monday Hours':mhours,\r\n 'Tuesday Hours':thours,\r\n 'Wednesday Hours':whours,\r\n 'Thursday Hours':thhours,\r\n 'Friday Hours':fhours,\r\n 'Saturday Hours':shours,\r\n 'Sunday Hours':suhours,\r\n 'What are you looking for ?':specialties,\r\n 'What services do you offer ?':service,\r\n 'Location of Restaurant':location,\r\n 'Email':email,\r\n 'Phone Number ?':pnumber,\r\n });\r\n location.href = \"congrats.html\";\r\n}\r\n" }, { "alpha_fraction": 0.26259946823120117, "alphanum_fraction": 0.546419084072113, "avg_line_length": 17.799999237060547, "blob_id": "302fc432e79ad5b08b4363482b5630f66536be31", "content_id": "37d75c01d792ddd4afc255905470b652f673db5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 42, "num_lines": 20, "path": "/car/car.pyde", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "\ndef makeCar():\n noStroke()\n fill(0, 0, 0)\n rect(320, 350, 100, 35)\n fill(255, 0, 0)\n ellipse(320,350, 20, 20)\n fill(255, 0, 0)\n ellipse(320,385, 20, 20)\n fill(255, 0, 0)\n ellipse(420, 350, 20, 20)\n fill(255, 0, 0)\n ellipse(420, 385, 20, 20)\n triangle(420, 385, 420, 350, 450, 364)\n\nsize(700, 700)\nbackground(255, 255, 255)\n\n\n \nmakeCar() " }, { "alpha_fraction": 0.40625, "alphanum_fraction": 0.4895833432674408, "avg_line_length": 14.166666984558105, "blob_id": "26f22a723dd05268c6eb3762413f2b0f7d923b56", "content_id": "164b32e1b6b11f490cd874770aa3c0b5de495f2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 20, "num_lines": 6, "path": "/circlingrobot.py", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from Myro import *\nj = 0\nwhile j <360:\n forward(1,1)\n turnBy(1, \"deg\")\n j = j + 1\n " }, { "alpha_fraction": 0.5893155336380005, "alphanum_fraction": 0.5893155336380005, "avg_line_length": 24.04347801208496, "blob_id": "1b1cabaa165312e50e2f92a978e066253a3e7a0a", "content_id": "f9054b0bb0bb624498b9dc711440c6d4fd8e5887", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 599, "license_type": "no_license", "max_line_length": 64, "num_lines": 23, "path": "/rehabilitation-yoga/upload/Firebase.js", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "var database = firebase.database().ref(); //link to our database\r\n\r\n//function for sending stuff to my database\r\nfunction sendMessage(){\r\n var firstname = $(\"#firstName\").val();\r\n var lastname = $(\"#lastName\").val();\r\n var specialty = $(\"#Specialty\").val();\r\n var email = $(\"#Email\").val();\r\n var phoneNumber = $(\"#phoneNumber\").val();\r\n var age = $(\"#Age\").val();\r\n \r\n \r\n\r\n database.push({\r\n 'LASTNAME':lastname,\r\n 'SPECIALTY':specialty,\r\n 'FIRSTNAME':firstname,\r\n 'EMAIL': email, \r\n 'PHONENUMBER':phoneNumber,\r\n 'AGE': age,\r\n });\r\n location.href = \"congrats.html\"\r\n}\r\n" }, { "alpha_fraction": 0.553505539894104, "alphanum_fraction": 0.6494464874267578, "avg_line_length": 14.941176414489746, "blob_id": "695de45b8cc70322ca32e02128576b76f03682e6", "content_id": "e2a640243e1b480cc59117824cdeb37d0e20a19e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 28, "num_lines": 17, "path": "/Taco.py", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from Myro import *\ninit(\"com3) #Start simulator\nforward(1,4) \nturnBy(90, \"deg\")\nforward(1,4)\nturnBy(90, \"deg\")\nforward(1,4)\nturnBy(90, \"deg\")\nforward(1,4)\na = 3\nforward(1, a)\nturnBy(90, \"deg\")\nforward(1, a)\nturnBy(90, \"deg\")\nforward(1, a)\nturnBy(90, \"deg\")\nforward(1, a)\n" }, { "alpha_fraction": 0.3321722745895386, "alphanum_fraction": 0.47023433446884155, "avg_line_length": 22.601503372192383, "blob_id": "d2b5296691746a270b3187c75bb0eb3479cb403a", "content_id": "a5b8301fd4696086f79bdace1bb2eedcc0869e13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3158, "license_type": "no_license", "max_line_length": 92, "num_lines": 133, "path": "/diagnol_white_square/Paint_Finished______/Paint_Finished______.pyde", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from random import *\ndef setup():\n size(500,500)\n stroke(0, 0, 0) \n #RED\n fill(255,255,255) \n rect(0,0,100,100)\n #GREEN\n fill(255,255,255)\n rect(100,0,100,100)\n #BLUE\n fill(255, 255, 255) \n rect(200,0,100,100)\n #YELLOW\n fill(255, 255, 255)\n rect(300,0,100,100)\n #PINK\n fill(255, 255, 255)\n rect(400,0,100,100)\n #CYAN\n fill(255, 255, 255)\n rect(500,0,100,100)\n #WHITE\n fill(255, 255, 255)\n rect(600,0,100,100)\n #BLACK\n fill(255, 255, 255)\n rect(700,0,100,100)\n y = 0\n x = 0\n i = 0\n while i < 5 : # Second row\n #y + 100\n fill(255, 255, 255)\n rect(i * 100, y + 100, 100, 100)\n i = i + 1\n i = 0\n while i < 5 : #third row\n #y + 100\n fill(255, 255, 255)\n rect(i * 100, y + 200, 100, 100)\n \n i = i + 1\n i = 0\n while i < 5: \n fill(255, 255, 255)\n rect(i * 100, y + 300, 100, 100)\n \n i = i + 1\n i = 0\n while i < 5: \n fill(255, 255, 255)\n rect(i * 100, y + 400, 100, 100)\n i = i + 1\n\n \n \n \n \n \n \n \n \n\n\ntheSetColor = 1\n\ndef draw():\n global theSetColor \n \n if mouseY < 100:\n if mouseX < 100 and mousePressed: # Red Box #My current color is now going to be red\n ran \n \n if mouseY < 100: \n if mouseX > 100 and mousePressed < 200:\n theSetColor = 2 \n \n if mouseY < 100:\n if mouseX > 200 and mousePressed < 300:\n theSetColor = 4\n \n if mouseY < 100:\n if mouseX > 300 and mousePressed < 400:\n theSetColor = 3\n \n if mouseY < 100:\n if mouseX > 400 and mousePressed < 500:\n theSetColor = 53\n \n if mouseY < 100:\n if mouseX > 500 and mousePressed < 600:\n theSetColor = 6\n \n if mouseY < 100:\n if mouseX > 600 and mousePressed < 700:\n theSetColor = 15\n if mouseY < 100:\n if mouseX > 700 and mousePressed < 800:\n theSetColor = 0\n\n elif mouseY > 100:\n if mousePressed and theSetColor == 1: \n fill(255, 0, 0)\n ellipse(mouseX, mouseY, 20 ,20)\n \n if mousePressed and theSetColor == 2:\n fill(0, 255, 0)\n ellipse(mouseX, mouseY, 20 ,20)\n \n if mousePressed and theSetColor == 4:\n fill(0, 0, 255)\n ellipse(mouseX, mouseY, 20, 20)\n \n if mousePressed and theSetColor == 3:\n fill(255, 215, 0,)\n ellipse(mouseX, mouseY, 20, 20)\n \n if mousePressed and theSetColor == 53:\n fill(255, 20, 147)\n ellipse(mouseX, mouseY, 20, 20)\n \n if mousePressed and theSetColor == 6:\n fill(0, 255, 255)\n ellipse(mouseX, mouseY, 20, 20)\n \n if mousePressed and theSetColor == 15:\n fill(255, 255, 255)\n ellipse(mouseX, mouseY, 20, 20,)\n \n if mousePressed and theSetColor == 0:\n fill(0, 0, 0)\n ellipse(mouseX, mouseY, 20, 20,)\n \n " }, { "alpha_fraction": 0.6419001221656799, "alphanum_fraction": 0.6979293823242188, "avg_line_length": 29.259260177612305, "blob_id": "f2a73ea76cce7a72c75c4d0cdb71e0862a5cd8fc", "content_id": "faba4d72f97a403e58dd3d83212b8d875355eeb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 821, "license_type": "no_license", "max_line_length": 50, "num_lines": 27, "path": "/Obamafication.py", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from Myro import *\npic = makePicture(\"ferrari.jpg\")\npixels = getPixels(pic)\n\nobamaDarkBlue = makeColor(0,51,76)\nobamaRed = makeColor(217, 26, 33)\nobamaBlue = makeColor(112,150,158)\nobamaYellow = makeColor(252, 227, 166)\n##If a pixel's gray value is greater than 180, \n#then change that pixel's color to Obama-Yellow. \n#If the gray value is greater than 120, then the \n#pixel should be changed to Obama-Blue. \n#If the gray value is greater than 60, \n#then the pixel should be changed to Obama-Red. \n#Otherwise the pixel should be Obama-DarkBlue\n\nfor pixel in pixels:\n gray = getGray(pixel)\n if gray > 180:\n setColor(pixel,obamaYellow)\n elif gray > 120:\n setColor(pixel,obamaBlue)\n elif gray > 60: \n setColor(pixel,obamaRed)\n else: \n setColor(pixel,obamaDarkBlue)\nshow(pic) " }, { "alpha_fraction": 0.5211267471313477, "alphanum_fraction": 0.5211267471313477, "avg_line_length": 13.75, "blob_id": "bdc75a677462fe9dd991a9301a3ba8a994e5c68d", "content_id": "40d145b18be638b112a19414d40d4fcce8c89db4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 71, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/rehabilitation-yoga/upload/pull.js", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "\r\n function retrieveData(){\r\n \tlocation.href = \"f.html\" \r\n \r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.7002801299095154, "alphanum_fraction": 0.7021475434303284, "avg_line_length": 28.75, "blob_id": "3dfc6e2cfa0e15b617494412f7598fe794f3ab03", "content_id": "38ea7f8f62ee52548b52aa9984e0cf29496c1a2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1071, "license_type": "no_license", "max_line_length": 96, "num_lines": 36, "path": "/GameMaSh.py", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from random import *\n\nwives =[\"Gwyneth Paltrow\", \"You don't have one she left.\",\n \"Polygamy, buddy Beyonce and Taylor Swift\",\n \"You have an internet girlfirend, but your probably being catfished\"]\n\njobs = [\"Your a college droput, you live on the street between the alley and the chinese store\",\n \"You handout newspapers by the L on 14th street.\",\n \"You got hired by All Star Code to hack Goldman and steal their money\"]\n\ncars =[\"Got reposessed you have to buy a weekly metrocard\",\n \"Your whippin a toyota hybrid, you need a tax break\",\n \"You have a butler named Alfred who drives you in a Uber \"]\n \nhomes = [\"Mansion\",\"Apartment\",\"Shack\",\"House\"]\n \nx = raw_input(\"Who do you want to be your wife\")\nwives.append(x) \n\ny = raw_input(\"What job do you want to have\")\njobs.append(y) \n\nz = raw_input(\"What car do you want\")\ncars.append(z)\n\nprint(\"Welcome to Anthony's game of chance\") \n\nprint(\"Your wife will be\", choice(wives))\n\nprint(\"Your job will be\", choice(jobs))\n\nprint(\"Your car will be\", choice(cars))\n\nprint(\"Your form of residency will be\",choice(homes))\n\nprint(\"Thanks for playing\") " }, { "alpha_fraction": 0.47417840361595154, "alphanum_fraction": 0.5539906024932861, "avg_line_length": 14.285714149475098, "blob_id": "ce4e8eca61372a2510d12f267c2376b07a882363", "content_id": "c122911abd217df211e54ea35a8d8b67c6cf376d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 23, "num_lines": 14, "path": "/draw.py", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from Myro import *\ninit (\"sim\")\npenDown()\n\ndef letterA():\n turnBy(60, \"deg\")\n forward(3,1)\n turnBy(-135, \"deg\")\n forward(3,1)\n backward(3,.40)\n turnBy(-115, \"deg\")\n forward(1,1)\n \nletterA()" }, { "alpha_fraction": 0.45385777950286865, "alphanum_fraction": 0.49773070216178894, "avg_line_length": 25.404254913330078, "blob_id": "780cb066c74f7d30bec4f10f009aef9d6fe372b4", "content_id": "1f6644eda19e9a6ae5045dbb657d21a868a6d93d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1322, "license_type": "no_license", "max_line_length": 133, "num_lines": 47, "path": "/Roger_screwed_me_over_/Roger_screwed_me_over_.pyde", "repo_name": "ABaldeosinghASC/ASC-Folder", "src_encoding": "UTF-8", "text": "from random import*\nfrom time import *\n\ndef setup():\n size(500, 500)\n background(255, 255, 255)\n\ndef afunc():\n return[choice(alphabet), x, y]\n \nalphabet = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\"]\nx = 0\ny = randint(0, 300)\nletandpos = [] #letter and position\nletandpos.append(afunc()) #make a new letter and add to our list\nscore = 0 # count of score\n\n\ndef draw():\n print(letandpos)\n background(255, 255, 255)\n global x\n global y\n global letandpos\n global alphabet\n global score\n item = 0\n textSize(30)\n text(\"Score\", 0, 30)\n text(score, 90, 30)\n\n while item < len(letandpos):\n print(\"hello world\")\n fill(255,0,0)\n text(letandpos[item][0],letandpos[item][1], letandpos[item][2])\n letandpos[item][1] = letandpos[item][1] + 2 #updating the x coordinates\n #sleep(1)\n if keyPressed:\n if key == letandpos[item][0]:\n letandpos.pop(item)\n letandpos.append(afunc()) #make a new letter and add to our list\n score += 1\n \n \n if letandpos[item][1] > 500:\n letandpos.pop(item)\n # \n \n \n \n \n \n \n\n\n\n\n\n \n\n \n " } ]
20
cesarca/toolium
https://github.com/cesarca/toolium
3dca229c4aa4cdad387ee8111c5076a2a1712a2b
f4f5b98302239a9f6ce4f42e07db49891dc1c362
dce79f4e81e7eccd1039c23d505f4a181805f9c6
refs/heads/master
2020-12-25T06:55:05.702598
2016-07-04T13:35:18
2016-07-04T13:35:18
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7290452718734741, "alphanum_fraction": 0.7324069738388062, "avg_line_length": 45.968421936035156, "blob_id": "aecf1d1cbc0f9189168a2088188a0a1b58405903", "content_id": "56bef7b583f23338a45e955e41b737ff0a17b383", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4464, "license_type": "permissive", "max_line_length": 117, "num_lines": 95, "path": "/toolium/test/pageobjects/test_mobile_page_object.py", "repo_name": "cesarca/toolium", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nu\"\"\"\nCopyright 2016 Telefónica Investigación y Desarrollo, S.A.U.\nThis file is part of Toolium.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport unittest\n\nfrom nose.tools import assert_is_instance, assert_equal, assert_true\n\nfrom toolium.config_files import ConfigFiles\nfrom toolium.driver_wrappers_pool import DriverWrappersPool\nfrom toolium.test.pageobjects.examples.android.login import AndroidLoginPageObject\nfrom toolium.test.pageobjects.examples.base.login import BaseLoginPageObject\nfrom toolium.test.pageobjects.examples.ios.login import IosLoginPageObject\nfrom toolium.test.pageobjects.examples.login_one_file import AndroidLoginOneFilePageObject, IosLoginOneFilePageObject\nfrom toolium.test.pageobjects.examples.login_one_file import BaseLoginOneFilePageObject\n\n\nclass TestMobilePageObject(unittest.TestCase):\n def setUp(self):\n # Configure properties\n config_files = ConfigFiles()\n root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n config_files.set_config_directory(os.path.join(root_path, 'conf'))\n config_files.set_config_properties_filenames('properties.cfg')\n self.driver_wrapper = DriverWrappersPool.get_default_wrapper()\n self.driver_wrapper.configure(tc_config_files=config_files)\n\n def test_mobile_page_object_ios(self):\n self.driver_wrapper.config.set('Driver', 'type', 'ios')\n page_object = BaseLoginPageObject(self.driver_wrapper)\n\n # Check instance type, specific locator and common method\n assert_is_instance(page_object, IosLoginPageObject)\n assert_equal(page_object.username.locator[1], 'username_id_ios')\n assert_true(hasattr(page_object, 'login'))\n\n def test_mobile_page_object_android(self):\n self.driver_wrapper.config.set('Driver', 'type', 'android')\n page_object = BaseLoginPageObject(self.driver_wrapper)\n\n # Check instance type, specific locator and common method\n assert_is_instance(page_object, AndroidLoginPageObject)\n assert_equal(page_object.username.locator[1], 'username_id_android')\n assert_true(hasattr(page_object, 'login'))\n\n def test_mobile_page_object_default(self):\n self.driver_wrapper.config.set('Driver', 'type', 'unknown')\n page_object = BaseLoginPageObject(self.driver_wrapper)\n\n # Check instance type, specific locator and common method\n assert_is_instance(page_object, AndroidLoginPageObject)\n assert_equal(page_object.username.locator[1], 'username_id_android')\n assert_true(hasattr(page_object, 'login'))\n\n def test_mobile_page_object_one_file_ios(self):\n self.driver_wrapper.config.set('Driver', 'type', 'ios')\n page_object = BaseLoginOneFilePageObject(self.driver_wrapper)\n\n # Check instance type, specific locator and common method\n assert_is_instance(page_object, IosLoginOneFilePageObject)\n assert_equal(page_object.username.locator[1], 'username_id_ios')\n assert_true(hasattr(page_object, 'login'))\n\n def test_mobile_page_object_one_file_android(self):\n self.driver_wrapper.config.set('Driver', 'type', 'android')\n page_object = BaseLoginOneFilePageObject(self.driver_wrapper)\n\n # Check instance type, specific locator and common method\n assert_is_instance(page_object, AndroidLoginOneFilePageObject)\n assert_equal(page_object.username.locator[1], 'username_id_android')\n assert_true(hasattr(page_object, 'login'))\n\n def test_mobile_page_object_one_file_default(self):\n self.driver_wrapper.config.set('Driver', 'type', 'unknown')\n page_object = BaseLoginOneFilePageObject(self.driver_wrapper)\n\n # Check instance type, specific locator and common method\n assert_is_instance(page_object, AndroidLoginOneFilePageObject)\n assert_equal(page_object.username.locator[1], 'username_id_android')\n assert_true(hasattr(page_object, 'login'))\n" }, { "alpha_fraction": 0.7375097274780273, "alphanum_fraction": 0.7375097274780273, "avg_line_length": 33.80180358886719, "blob_id": "f4124daa431def57dfdc318d6819a5a23e481042", "content_id": "9b2aa42bb70f0808e0d526b2b82500775a177507", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3863, "license_type": "permissive", "max_line_length": 122, "num_lines": 111, "path": "/docs/bdd_integration.rst", "repo_name": "cesarca/toolium", "src_encoding": "UTF-8", "text": ".. _bdd_integration:\n\nBDD Integration\n===============\n\nToolium can be also used with behave and lettuce tests.\n\nBehave\n~~~~~~\n\nBehave tests should be developed as usual, only *environment.py* file should be modified to initialize driver and the\nrest of Toolium configuration.\n\nEnvironment methods should call to the corresponding Toolium environment methods, as can be seen in the following\nexample:\n\n.. code-block:: python\n\n from toolium.behave.environment import (before_all as toolium_before_all, before_scenario as toolium_before_scenario,\n after_scenario as toolium_after_scenario, after_all as toolium_after_all)\n\n\n def before_all(context):\n toolium_before_all(context)\n\n\n def before_scenario(context, scenario):\n toolium_before_scenario(context, scenario)\n\n\n def after_scenario(context, scenario):\n toolium_after_scenario(context, scenario)\n\n\n def after_all(context):\n toolium_after_all(context)\n\n\nAfter initialization, the following attributes will be available in behave context:\n\n- context.toolium_config: dictionary with Toolium configuration, readed from properties.cfg\n- context.driver_wrapper: :ref:`DriverWrapper <driver_wrapper>` instance\n- context.driver: Selenium or Appium driver instance\n- context.utils: :ref:`Utils <utils>` instance\n\nBehave userdata properties\n--------------------------\n\nBy default, Toolium configuration is loaded from properties.cfg and local-properties.cfg files. If different properties\nfiles are used for different environments, they can be selected using behave user property named *env*. For example, if\n*env* value is *android*, Toolium configuration will be loaded from properties.cfg, android-properties.cfg and\nlocal-android-properties.cfg files:\n\n.. code:: console\n\n $ behave -D env=android\n\nMoreover, Toolium properties can be modified from behave userdata configuration. For example, to select the driver type\nfrom command line instead of using the driver type defined in properties.cfg:\n\n.. code:: console\n\n $ behave -D Driver_type=chrome\n\nBehave tags\n-----------\n\nToolium defines some scenario tags to configure Appium tests:\n\n* @no_reset_app: mobile app will not be reset before test (i.e. no-reset Appium capability is set to true)\n* @reset_app: mobile app will be reset before test (i.e. no-reset and full-reset Appium capabilities are set to false)\n* @full_reset_app: mobile app will be full reset before test (i.e. full-reset Appium capability is set to true)\n* @android_only: identifies a scenario that should only be executed in Android\n* @ios_only: identifies a scenario that should only be executed in iOS\n\nLettuce\n~~~~~~~\n\nLettuce tests should be developed as usual, only *terrain.py* file should be modified to initialize driver and the rest\nof Toolium configuration.\n\nTerrain methods should call to the corresponding Toolium terrain methods, as can be seen in the following example:\n\n.. code-block:: python\n\n from lettuce import after, before\n from toolium.lettuce.terrain import (setup_driver as toolium_setup_driver, teardown_driver as toolium_teardown_driver,\n teardown_driver_all as toolium_teardown_driver_all)\n\n\n @before.each_scenario\n def setup_driver(scenario):\n toolium_setup_driver(scenario)\n\n\n @after.each_scenario\n def teardown_driver(scenario):\n toolium_teardown_driver(scenario)\n\n\n @after.all\n def teardown_driver_all(total):\n toolium_teardown_driver_all(total)\n\n\nAfter initialization, the following attributes will be available in world object:\n\n- world.toolium_config: dictionary with Toolium configuration, readed from properties.cfg\n- world.driver_wrapper: :ref:`DriverWrapper <driver_wrapper>` instance\n- world.driver: Selenium or Appium driver instance\n- world.utils: :ref:`Utils <utils>` instance\n" }, { "alpha_fraction": 0.6875551342964172, "alphanum_fraction": 0.70020592212677, "avg_line_length": 38.98823547363281, "blob_id": "4c711888d3acc715f3ca76ba32e051538fec5c81", "content_id": "599ceb2f51e2ef712e12e6a98f115227ff51c3fa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3401, "license_type": "permissive", "max_line_length": 114, "num_lines": 85, "path": "/toolium/test/behave/test_environment.py", "repo_name": "cesarca/toolium", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nu\"\"\"\nCopyright 2016 Telefónica Investigación y Desarrollo, S.A.U.\nThis file is part of Toolium.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport unittest\n\nimport mock\nfrom ddt import ddt, data, unpack\nfrom nose.tools import assert_equal, assert_is_none\n\nfrom toolium.behave.environment import get_jira_key_from_scenario, before_all\nfrom toolium.config_files import ConfigFiles\n\ntags = (\n ([\"jira('PROJECT-32')\"], 'PROJECT-32'),\n ([\"jira=PROJECT-32\"], 'PROJECT-32'),\n ([\"jira(PROJECT-32)\"], 'PROJECT-32'),\n ([\"jira='PROJECT-32'\"], 'PROJECT-32'),\n ([\"jiraPROJECT-32\"], 'PROJECT-32'),\n ([\"jira\"], None),\n ([\"PROJECT-32\"], None),\n (['slow', \"jira('PROJECT-32')\", 'critical'], 'PROJECT-32'),\n (['slow', \"PROJECT-32\", 'critical'], None),\n (['slow', \"jira('PROJECT-32')\", \"jira('PROJECT-33')\"], 'PROJECT-32'),\n)\n\n\n@ddt\nclass EnvironmentJiraTests(unittest.TestCase):\n @data(*tags)\n @unpack\n def test_get_jira_key_from_scenario(self, tag_list, jira_key):\n scenario = mock.Mock()\n scenario.tags = tag_list\n\n # Extract Jira key and compare with expected key\n assert_equal(jira_key, get_jira_key_from_scenario(scenario))\n\n\nclass EnvironmentTests(unittest.TestCase):\n @mock.patch('toolium.behave.environment.create_and_configure_wrapper')\n def test_before_all(self, create_and_configure_wrapper):\n # Create context mock\n context = mock.MagicMock()\n context.config.userdata.get.return_value = None\n context.config_files = ConfigFiles()\n\n before_all(context)\n\n # Check that configuration folder is the same as environment folder\n expected_config_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')\n assert_equal(context.config_files.config_directory, expected_config_directory)\n assert_is_none(context.config_files.config_properties_filenames)\n assert_is_none(context.config_files.config_log_filename)\n\n @mock.patch('toolium.behave.environment.create_and_configure_wrapper')\n def test_before_all_env(self, create_and_configure_wrapper):\n # Create context mock\n context = mock.MagicMock()\n context.config.userdata.get.return_value = 'os'\n context.config_files = ConfigFiles()\n\n before_all(context)\n\n # Check that configuration folder is the same as environment folder and configuration files are 'os' files\n expected_config_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')\n expected_config_properties_filenames = 'properties.cfg;os-properties.cfg;local-os-properties.cfg'\n assert_equal(context.config_files.config_directory, expected_config_directory)\n assert_equal(context.config_files.config_properties_filenames, expected_config_properties_filenames)\n assert_is_none(context.config_files.config_log_filename)\n" } ]
3
boshika/oh-no-broken-code
https://github.com/boshika/oh-no-broken-code
af152b0ba3cc5e179c3ab5a167ae3e2ab458c867
3c0e6eee8a4a560c172b435ce56358c58eb4acc8
e2851df0ce58d1eaaee6b7b155935d366af80b13
refs/heads/master
2016-12-22T04:22:23.455496
2015-05-08T16:50:09
2015-05-08T16:50:09
35,139,183
1
3
null
2015-05-06T04:20:59
2015-05-05T22:42:04
2014-10-31T02:43:59
null
[ { "alpha_fraction": 0.6591478586196899, "alphanum_fraction": 0.6591478586196899, "avg_line_length": 27.535715103149414, "blob_id": "f1acb41f24b1527a670409b2c8b65a1442c17f38", "content_id": "3d20fc72d6838ea73bdc5a50b26c9277e07cfa5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 91, "num_lines": 28, "path": "/github.py", "repo_name": "boshika/oh-no-broken-code", "src_encoding": "UTF-8", "text": "# I'm so close with this one.. I've got it to print out some stuff I got\n# from the GitHub API, but not what I want!\n# I saved the API stuff in a file to save time! :-)\n\nimport json\n\ndef github_api_response():\n with open('github.json') as f:\n github = json.loads(f.read())\n return github\n\n\ndef print_user_repository_names():\n\n # I just want to print out the names and descriptions of the repositories..\n # like 'Hackbright-Curriculum: Exercises for the Hackbright Academy Fellowship Program'\n\n repos = github_api_response()\n\n for repo in repos:\n # I don't think I have these keys right\n # Also I'd like to print it on one line.\n print repo['repo_name']\n print repo['repo_description']\n\n\nif __name__==\"__main__\":\n print_user_repository_names()" }, { "alpha_fraction": 0.716312050819397, "alphanum_fraction": 0.716312050819397, "avg_line_length": 27.840909957885742, "blob_id": "8d2de2048f358a5a52355a96d3ae1f8a6cba5b83", "content_id": "188e38ed3897bfae8b115d63a414960606c543b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1269, "license_type": "no_license", "max_line_length": 174, "num_lines": 44, "path": "/README.md", "repo_name": "boshika/oh-no-broken-code", "src_encoding": "UTF-8", "text": "## Oh No! Broken Code!\n\nThis repository has a few files that need a bit of love. Can you fix the errors and add the missing code?\n\nDifficulty Order:\n\n* `icecream.py` (lists)\n* `birthday.py` (dictionaries, if)\n* `keyboard.py` (indentation)\n* `github.py` (finding a key in a json file)\n* `halloween.py` (random, lists, input, if)\n* `vacation.py` (functions, returns, dictionaries)\n\n### Getting Started\n\nYou are going to create your own copy of this code, a process called [forking](https://help.github.com/articles/fork-a-repo/).\n\nFirst of all, **fork** the repository. Hit the 'Fork' button in the top right corner.\n\nOnce you have forked it, you will have your own copy under your own GitHub account!\n\nGo to that copy and **clone** the repository to a local folder.\n\nAt the right of the page you will see a section called \"HTTPS clone URL\". Copy that URL and in a Terminal window, go to your projects folder (or wherever) and clone the repo:\n\n```\ncd projects\ngit clone [PASTE HERE]\n```\n\nNow you should have a new directory called `oh-no-broken-code`:\n\n```\ncd oh-no-broken-code\nls\n```\n\nWhen you are done making changes to your files, **commit** and **push** them back to your GitHub repo!\n\n```\ngit add *.py\ngit commit -m \"I fixed all the bugz\"\ngit push origin master\n```\n" }, { "alpha_fraction": 0.6649873852729797, "alphanum_fraction": 0.6675062775611877, "avg_line_length": 30.760000228881836, "blob_id": "eb309c19d241958bb33640049513aa5c11603419", "content_id": "4e15c271400849767a44ed9fde73af58b13e4413", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 794, "license_type": "no_license", "max_line_length": 75, "num_lines": 25, "path": "/keyboard.py", "repo_name": "boshika/oh-no-broken-code", "src_encoding": "UTF-8", "text": "# I had this great script, but my keyboard broke and I can't indent\n# any of my code. Can you indent it for me?\n\ncats = ['Grizabella', 'Rum Tum Tugger', 'Demeter', 'Munkustrap',\n'Mistoffelees', 'Macavity', 'Rumpleteazer', 'Mungo Jerry', 'Skimbleshanks']\n\ninstruments = ['keyboard', 'cello', 'bass', 'flute', 'pipe', 'piano',\n'violin', 'oboe', 'triangle']\n\ndef get_cat_and_instrument(position):\n cat = cats[position]\n instrument = instruments[position]\n return \"{} plays the {}\".format(cat, instrument)\n\n# Print out my cat orchestra one by one\ntotal_cats = len(cats)\nposition = 0\n\nwhile True:\n if position <= total_cats:\n print get_cat_and_instrument(position)\n position += 1\n break\n\n# Could you do the assignment of cats and instruments any other ways?\n" }, { "alpha_fraction": 0.620309054851532, "alphanum_fraction": 0.6501103639602661, "avg_line_length": 25.647058486938477, "blob_id": "5283a4b260404f4d63bf0fe3e0f3bd4bf24126f7", "content_id": "d85ae68c5a120dcac5afa07433a978f7fc736778", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 906, "license_type": "no_license", "max_line_length": 68, "num_lines": 34, "path": "/birthday.py", "repo_name": "boshika/oh-no-broken-code", "src_encoding": "UTF-8", "text": "from datetime import datetime\n\nnumber_endings = {\n 1: 'st',\n 2: 'nd',\n 3: 'rd'\n}\n\ntoday = datetime.now()\ntodays_day = today.day\n\n# get the right ending, e.g. 1 => 1st, 2 => 2nd\n# but beware! 11 => 11th, 21 => 21st, 31 => 31st\n\n# test your code by forcing todays_day to be something different\n# todays_day = 11\n\nending = 'th'\n\nif todays_day < 10 or todays_day > 20:\n # x % y (mod) will give the remainder when x is divided by y\n # -- but x needs to be an integer!\n number = todays_day % 10\n # look up number in number_endings\n # and if you find it as a key, put the value in ending\n if number in number_endings:\n\n# make this print ending, not 'th'\n print \"Today is the {}th\".format(todays_day)\n\nbirthday = int(raw_input(\"What day of the month is your birthday?\"))\n\n# make this print the birthday, and the right ending\nprint \"Your birthday is on the {}th!\".format(birthday)\n" }, { "alpha_fraction": 0.6377245783805847, "alphanum_fraction": 0.6377245783805847, "avg_line_length": 22.85714340209961, "blob_id": "a86698e831bfe479ca829fa16333a74e106f4e36", "content_id": "4d31bc174dce5103ebae5a67c2e2cce71915427e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1002, "license_type": "no_license", "max_line_length": 69, "num_lines": 42, "path": "/halloween.py", "repo_name": "boshika/oh-no-broken-code", "src_encoding": "UTF-8", "text": "# I can't be bothered to think of a Hallowe'en costume so\n# can you help me generate one randomly?\n\nimport random\nnouns = []\nadjectives = []\n\nwith open('things.txt') as f:\n # We don't want those stinky \\n newline characters\n # so we call strip() before adding it to our nouns list.\n for line in f:\n nouns.append(line.strip())\n\nwith open('descriptors.txt') as f:\n for line in f:\n adjectives.append(line.strip())\n\n\ndef generate_costume():\n\n # pick something random from the nouns and adjectives list\n\n noun = random.choice(nouns)\n adj = random.choice(adjectives)\n print \"You got dressed as a {} {} to the party.\".format(noun,adj)\n \n\n\nwhile True:\n generate_costume()\n\n \n\n happy = raw_input(\"Are you happy with this choice? \")\n\n # Check if the user typed something like 'yes' or 'y' and\n # quit the program if they are happy.\n if happy == 'yes':\n exit()\n else:\n print \"OK, I will choose another costume. Hold on...\"\n print\n" }, { "alpha_fraction": 0.6390934586524963, "alphanum_fraction": 0.6390934586524963, "avg_line_length": 22.851350784301758, "blob_id": "b37d3462a1164268dce9567fa5b63f05e9edc1e3", "content_id": "16e7997883d104fb306774b01fe9d150c740ce68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1765, "license_type": "no_license", "max_line_length": 111, "num_lines": 74, "path": "/vacation.py", "repo_name": "boshika/oh-no-broken-code", "src_encoding": "UTF-8", "text": "# This file has a few mistakes and some things I forgot to put in.\n# When I run it I don't get anything... can you fix it so I\n# get asked for my vacation spot, and get a recommendation?\n# Hint:\n# Start at the bottom and work upwards.\n\n\nvacation_spots = ['Tahoe', 'Hawaii', 'New York', 'Mexico']\n\nseasons = ['spring', 'summer', 'fall', 'winter']\n\nweather_patterns = {\n 'spring': 'rain',\n 'summer': 'sun',\n 'fall': 'wind',\n 'winter': 'snow'\n}\n\nactivities = {\n 'rain': 'visiting museums',\n 'wind': 'kiteboarding',\n 'sun': 'sunbathing',\n 'snow': 'skiing'\n}\n\n\ndef best_vacation_spot(weather_type):\n # Look at the weather type and return the vacation spot that\n # is the best for that weather.\n # You can use this mapping:\n # snow = Tahoe\n # wind = Hawaii\n # rain = New York\n # sun = Mexico\n\n return \"Stay at home\"\n\n\ndef vacation_activity(weather_type):\n # Look up the vacation activity from activities\n # and return just the activity itself\n print activity\n\n\ndef get_my_vacation():\n\n season = raw_input(\"What season do you want to travel? \")\n\n # check if season is in the seasons list\n if season == \n if not seasons:\n print \"Sorry, that isn't a season. I can't help you.\"\n\n # look up the weather type for that season\n weather = weather_patterns[season]\n\n # get the best vacation spot for that type\n best_vacation_spot(weather_type)\n\n # get the best vacation activity for that type\n vacation_activity(weather_type)\n\n print \"You should travel to {}, where you can spend your time {}!\".format(vacation_spot, vacation_activity)\n\n\ndef main():\n print \"Welcome to the Vacation-o-Matic!\"\n while True:\n get_my_vacation()\n \n\n\nif __name__==\"__main__\":\n main()\n" }, { "alpha_fraction": 0.5896551609039307, "alphanum_fraction": 0.5896551609039307, "avg_line_length": 27.899999618530273, "blob_id": "83766eccd415c2bd0bef14beb38b6e6914d6e7f0", "content_id": "fca5072d57d385ce4ff762eb9e4550f56c6aad63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/icecream.py", "repo_name": "boshika/oh-no-broken-code", "src_encoding": "UTF-8", "text": "# I want to print out my favorite ice cream flavors.\n\nall_flavors = ['chocolate', 'mint', 'strawberry', 'caramel', 'pecan',\n 'cookie dough', 'vanilla', 'lemon']\nmy_faves = ['mint', 'caramel']\n\n\nfor item in my_faves:\n if my_faves:\n print \"I like {}\".format(item)\n\n" } ]
7
Pari555/Functions
https://github.com/Pari555/Functions
16e2baf1527002640aca2ada6368ffb2b143d57b
270ea91febfbd51da3ef68e6e699c7a9fc92f40d
dceb821d5ce1413c0b70a3999de6adf2ea37a611
refs/heads/master
2023-04-13T13:27:22.211409
2021-04-15T23:32:30
2021-04-15T23:32:30
358,419,887
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 22.799999237060547, "blob_id": "e41eec5865e4ff3ac725d657d7612e8bc25cbfe8", "content_id": "4ed48cc4bef4273d3be03820b0799e3eec3dfd78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 356, "license_type": "no_license", "max_line_length": 74, "num_lines": 15, "path": "/README.md", "repo_name": "Pari555/Functions", "src_encoding": "UTF-8", "text": "## Functions\nA function is a group of related statements that performs a specific task.\nFuctions help us breakdown our program into sections.\n\n### DRY: Don't Repeat Yourself\n\n### Formula:\n```python\ndef functionName(optional parameters):\n\t#BODY\n\t#Optional 'return' statements\n```\n\nTo use a fuction, we have to __call__ it\n`functionName(optional parameters)`" }, { "alpha_fraction": 0.6476293206214905, "alphanum_fraction": 0.6842672228813171, "avg_line_length": 21.634145736694336, "blob_id": "e18685dcdb361138ef6f767b9e38b612cc430e85", "content_id": "c1f0169695ea2369c411bd827360cc5da5a7ab30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 76, "num_lines": 41, "path": "/main.py", "repo_name": "Pari555/Functions", "src_encoding": "UTF-8", "text": "# get Name function\ndef getName():\n\tuserName = input(\"Please tell me your name: \")\n\t# userName variable is specific to this function\n\treturn userName\n\n\ndef greetUser(name): #parameter variables are specific to the function\n\tprint(\"Hi \" + name)\n\n#name1 = getName()\n#print(\"Hi \" + name1)\n# Instead of doing the 2 lines of code above just do this\ngreetUser(getName())\n\n#name2 = getName()\n#print(\"Hi \" + name2)\ngreetUser(getName())\n\n#name3 = getName()\n#print(\"Hi \" + name3)\ngreetUser(getName())\n\n #Ask the user for 2 numbers - num1, num2\n #Write a function that takes num1 and num2 as parameters and prints the sum\nnum1 = int(input(\"Choose a number: \"))\nnum2 = int(input(\"Choose another number: \"))\n\ndef sum(num1, num2):\n\tprint(\"The sum is \" + str(num1+num2))\n\nsum(num1, num2)\n\n# prints the sum\ndef sum(numList):\n\ttotal = 0\n\tfor index in numList:\n\t\ttotal += index\n\tprint(\"The sum is \" + str(total))\n\nsum([1,2,3,4,5,6,7,8,9,10]) #55\n" } ]
2
swishaaz/Python-Design-Project
https://github.com/swishaaz/Python-Design-Project
4d1c30c2e96f11e18c14b1a8b9d79ad68f8eab56
5b0762be0c75870243f869840a7db8eb613c0332
0a87293709b404b326db1b2374ab09d61c27457c
refs/heads/master
2020-09-09T18:45:20.070251
2019-11-13T19:41:13
2019-11-13T19:41:13
221,531,979
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7622950673103333, "alphanum_fraction": 0.7786885499954224, "avg_line_length": 60, "blob_id": "4cc2f01015178ec855f36fb87521559c2446fb9e", "content_id": "87f2984358f100779006802260e209747431ec29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 122, "license_type": "no_license", "max_line_length": 90, "num_lines": 2, "path": "/README.md", "repo_name": "swishaaz/Python-Design-Project", "src_encoding": "UTF-8", "text": "<h1>Python-Design-Project</h1>\n<img src=\"https://github.com/swishaaz/Python-Design-Project/blob/master/tessellation.PNG\">\n" }, { "alpha_fraction": 0.4771573543548584, "alphanum_fraction": 0.5634517669677734, "avg_line_length": 15.1304349899292, "blob_id": "77e330a50abb4f4f1448edc28d8e816df9386630", "content_id": "5fca0dbda7551a23f2e4211d729e7cc15d60b21b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "no_license", "max_line_length": 25, "num_lines": 23, "path": "/tesellation proj.py", "repo_name": "swishaaz/Python-Design-Project", "src_encoding": "UTF-8", "text": "import turtle\r\nbob=turtle.Turtle()\r\nturtle.bgcolor(\"black\")\r\n\r\nbob.color('white')\r\nbob.speed(10)\r\nbob.left(20)\r\nfor i in range(0,21,1):\r\n bob.left(10)\r\n for i in range(0,10,1):\r\n \r\n bob.forward(10)\r\n bob.left(10)\r\n \r\n bob.forward(90)\r\n bob.right(55)\r\n bob.backward(90)\r\n for i in range(0,10,1):\r\n \r\n bob.left(10)\r\n bob.backward(10)\r\n \r\n bob.backward(90)\r\n" } ]
2
xjunming/kanzhun
https://github.com/xjunming/kanzhun
1bddd20115205de5f9dfaf038274adaf1933036e
99373fca2d1f28d29a8cd56e8923fee1faafdc17
8882458a740dd8a9891201deced82ab35aef26fc
refs/heads/master
2018-11-08T01:24:18.283074
2018-08-30T00:54:47
2018-08-30T00:54:47
145,226,169
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46829384565353394, "alphanum_fraction": 0.49377840757369995, "avg_line_length": 31.3914737701416, "blob_id": "c62a1c124463dc00e6d37ffcd35bf44b6471dc60", "content_id": "9a0e1e256e45fda8f54247cc5114ad7b9a7cca4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8932, "license_type": "no_license", "max_line_length": 149, "num_lines": 258, "path": "/kanzhun.py", "repo_name": "xjunming/kanzhun", "src_encoding": "UTF-8", "text": "import csv,requests,time,random,re,threading\nfrom bs4 import BeautifulSoup\nfrom contextlib import closing\n\nglobal lastip,notime,countUA,st,et,uselessip\nlastip,notime,countUA=0,0,0\n\nuselessip = []\nst = time.time()\net = st+20+1\ndef UA(lastip):\n global countUA, notime,st,et,uselessip\n\n UAPOOLS = [\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.22 Safari/537.36 SE 2.X MetaSr 1.0\",\n \"Mozilla/5.0 (Windows NT 6.1; rv:49.0) Gecko/20100101 Firefox/49.0\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36\",\n ]\n hheaders = random.choice(UAPOOLS)\n\n headers = {\n 'User-Agent': hheaders\n }\n #print(et-st)\n if et-st>20:\n #print(\"new\")\n uselessip = []\n st = time.time()\n ipurl = 'http://mvip.piping.mogumiao.com/proxy/api/get_ip_bs?appKey=326f5af54338455a880c0eaaef2e94f2&count=5&expiryDate=0&format=2&newLine=1'\n ipp = requests.get(ipurl, headers=headers, timeout=5)\n ipp = [i for i in ipp.text.split(' ') if i!='']\n else:\n ipp = lastip\n et = time.time()\n\n #print(ipp)\n ipp = [i for i in ipp if i not in uselessip]\n if len(ipp)==0:\n time.sleep(5)\n ip = random.choice(ipp)\n proxy = {\"http\": \"http://\" + ip, \"https\": \"https://\" + ip}\n # print(\"当前使用的用户代理是:\"+hheaders)\n # print(\"当前使用的ip是:\"+ip)\n return headers, proxy, ipp, ip\n\n\ndef url_request(url): # request without memory leak\n global lastip,ip,uselessip\n headers, proxy, ipp, ip= UA(lastip)\n lastip = ipp\n\n res = None\n with closing(requests.get(url, headers=headers, proxies=proxy, verify=True, timeout=5)) as resp:\n res = resp\n #print(res)\n\n return res\n\ndef GetCompany(url):\n global countUA, notime,uselessip\n while 1:\n try:\n res = url_request(url)\n if res.status_code == 200:\n pass\n else:\n notime = 0\n #print('try again')\n continue\n if re.search('京ICP备14013441号', str(res.text)):\n pass\n else:\n notime = 0\n continue\n result = {}\n o = 'commentNo{}'\n soup = BeautifulSoup(res.text,'lxml')\n company = soup.select('.f_24')[0].text\n print('正在下载:'+company,res.url,time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))\n result['公司'] = company\n result['公司信息'] = soup.select('.co_info')[0].text.replace(' \\xa0','').replace('\\n','')\n result['url'] = res.url\n try:\n result['公司类型'] = soup.select('.industry')[0].text.replace('\\n','')\n except:\n result['公司类型'] = \"None\"\n try:\n result['公司规模'] = soup.select('.person')[0].text.replace('\\n','')\n except:\n result['公司规模'] = \"None\"\n try:\n result['所在地'] = soup.select('.city')[0].text.replace('\\n','')\n except:\n result['所在地'] = \"None\"\n try:\n result['地址'] = soup.select('.location')[0].text.replace('\\n','')\n except:\n result['地址'] = \"None\"\n try:\n result['公司评分'] = soup.select('.number_font')[0].text.replace('\\n','')\n except:\n result['公司评分'] = \"None\"\n try:\n result['行业排名'] = soup.select('.ranking')[0].text.replace('\\n','')\n except:\n result['行业排名'] = \"None\"\n try:\n result['推荐率'] = soup.select('.doughnua_chart')[0].text.replace('\\n','')\n except:\n result['推荐率'] = \"None\"\n try:\n result['面试难度'] = soup.select('.head')[1].select('.profile em')[0].text\n except:\n result['面试难度'] = \"None\"\n try:\n result['分享人数'] = soup.select('.f_12')[0].text.replace('\\n','')\n except:\n result['分享人数'] = \"None\"\n try:\n result['月平均工资'] = soup.select('.profile dt')[2].text.replace('\\n','')\n except:\n result['月平均工资'] = \"None\"\n try:\n result['职位'] = soup.select('.grey_33')[0].text\n except:\n result['职位'] = \"None\"\n try:\n result['员工分享'] = soup.select('.grey_33')[2].text\n except:\n result['员工分享'] = \"None\"\n try:\n result['关注人数'] = soup.select('.follow_num')[0].text\n except:\n result['关注人数'] = \"None\"\n curl = result['url'].replace('gso','gsr')\n #print(result)\n while 1:\n try:\n cres = url_request(curl)\n if cres.status_code == 200:\n pass\n else:\n notime = 0\n # print('try again')\n continue\n if re.search('京ICP备14013441号', str(cres.text)):\n pass\n else:\n notime = 0\n continue\n csoup = BeautifulSoup(cres.text,'lxml')\n for i in range(0,3):\n comment = o.format(i+1)\n try:\n result[comment] = csoup.select('article')[i].text.replace('...查看全文', '').replace('\\n', '')\n except:\n result[comment] = \"None\"\n #print(comment)\n break\n except Exception as e:\n #print(e)\n pass\n break\n except Exception as e:\n uselessip.append(ip)\n #print(e)\n pass\n return result\n\n#global countN\n#countN = 0\ndef SaveFile(t):\n global countN,filename\n with open(filename, 'a', newline='',encoding=\"utf_8_sig\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([t['公司'],t['公司信息'],t['url'],t['公司类型'],t['公司规模'],\n t['所在地'],t['地址'],t['公司评分'],t['行业排名'],t['推荐率'],\n t['面试难度'],t['分享人数'],t['月平均工资'],t['职位'],t['员工分享'],\n t['关注人数'],t['commentNo1'],t['commentNo2'],t['commentNo3']])\n csvfile.close()\n #countN = countN + 1\n #print(countN)\n\n\nurl3=[]\nopenfile = \"/root/kanzhun/companyURL.csv\"\n#openfile = \"companyURL.csv\"\ncsv_reader = csv.reader(open(openfile,'r',encoding=\"utf_8_sig\"))\nfor row in csv_reader:\n url3.append(row[2])\nurl3.pop(0)\nurl3 = set(url3)\n\nstname = time.strftime('%Y%m%d', time.localtime())\nfilename = '/root/kanzhun_output/'+stname+'kanzhun.csv'\n#filename =stname + 'kanzhun.csv'\nwith open(filename, 'w', newline='',encoding=\"utf_8_sig\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['公司','公司信息','url','公司类型','公司规模','所在地','地址','公司评分','行业排名','推荐率',\n '面试难度','分享人数','月平均工资','职位','员工分享','关注人数',\n 'commentNo1','commentNo2','commentNo3'])\n csvfile.close()\n\n\n\ndef main(k):\n try:\n result = GetCompany(k)\n except Exception as e:\n result = []\n #print(e)\n print('无法下载',k)\n SaveFile(result)\n\n #print('...已写入...')\n\n\nexitFlag = 0\nclass myThread(threading.Thread): # 继承父类threading.Thread\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n\n def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数\n #print(\"Starting \" + self.name)\n thre(self.name, self.counter, 1)\n #print(\"Exiting \" + self.name)\n\n\ndef thre(threadName, delay, counter):\n countN = 0\n while len(url3):\n k = url3.pop()\n countN = countN+1\n print('剩下',len(url3))\n if exitFlag:\n (threading.Thread).exit()\n time.sleep(delay)\n main(k)\n\n\n# 创建新线程\nthread1 = myThread(1, \"Thread-1\", 1)\nthread2 = myThread(2, \"Thread-2\", 2)\nthread3 = myThread(3, \"Thread-3\", 3)\nthread4 = myThread(4, \"Thread-4\", 4)\nthread5 = myThread(5, \"Thread-5\", 5)\n# 开启线程\nthread1.start()\nthread2.start()\nthread3.start()\nthread4.start()\nthread5.start()\nprint(\"Exiting Main Thread\")\n\nprint('BINGO!')\n\n" } ]
1
jUnion44/plastic
https://github.com/jUnion44/plastic
51da5bc0ff615845d73e3cd8b8babf26926c2733
5b7029aeb2a77928b56fc06d4e8a4ee1a41553df
e65e935f4c908d43a0e2619ab4a2b6d6d7ca709e
refs/heads/master
2023-01-09T03:21:43.469503
2020-10-29T23:34:21
2020-10-29T23:34:21
308,462,708
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5191693305969238, "alphanum_fraction": 0.5638977885246277, "avg_line_length": 24.040000915527344, "blob_id": "d710a5c9338727bc7421fce5d77f09ec2fb484be", "content_id": "00fd0c74e4d0def792cdbddaea335e4c4828d884", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "no_license", "max_line_length": 64, "num_lines": 25, "path": "/core/migrations/0012_auto_20200723_1728.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-07-23 17:28\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0011_mcpsschool'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='mcpsschool',\n name='lat',\n field=models.FloatField(default=1, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='mcpsschool',\n name='long',\n field=models.FloatField(default=22, max_length=200),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5199005007743835, "alphanum_fraction": 0.5970149040222168, "avg_line_length": 21.33333396911621, "blob_id": "84238ddc9dfdce220446094a7f697450e24bad60", "content_id": "c8c17093f6083174d93e19283fff7393ac4f9810", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 402, "license_type": "no_license", "max_line_length": 67, "num_lines": 18, "path": "/core/migrations/0007_blogpost_compares.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-07-13 15:06\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0006_auto_20200712_2017'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='blogpost',\n name='compares',\n field=models.ManyToManyField(to='core.companyCompare'),\n ),\n ]\n" }, { "alpha_fraction": 0.5358851552009583, "alphanum_fraction": 0.5837320685386658, "avg_line_length": 21, "blob_id": "8330ac0589a4ca8f8b1ef8f29da55e4e73ff0287", "content_id": "f5482de2de82ca8847202bbd852f91ffa9727711", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 52, "num_lines": 19, "path": "/core/migrations/0024_zipcode_population.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-08-25 16:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0023_zipcode_area'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='zipcode',\n name='population',\n field=models.BigIntegerField(default=0),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5723732709884644, "alphanum_fraction": 0.6142542362213135, "avg_line_length": 25.6862735748291, "blob_id": "6d618dd4d7f8e053c6348d66c3891968b0de74fd", "content_id": "2fb64f851ae740d57c78f46d346ac3b4792877c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1361, "license_type": "no_license", "max_line_length": 108, "num_lines": 51, "path": "/extractionhigh.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport camelot\nimport requests\nimport csv\nfrom core.models import mcpsSchool\n\nROOT_URL = \"https://www.montgomeryschoolsmd.org/\"\n\ndatadump = [[\"High School\",\"ppp_2014_2015\",\"ppp_2015_2016\",\"ppp_2016_2017\",\"ppp_2017_2018\",\"ppp_2018_2019\"]]\n\nhtml = open(\"high school links.html\",\"r\").read()\n\nsoup = BeautifulSoup(html, 'html.parser')\n\nlinks = soup.find_all('a')\nfor link in links:\n school = link.contents[0]\n href = link['href']\n pdfdata = requests.get(ROOT_URL+href).content\n\n temppdf = open(\"temp.pdf\",\"wb\")\n temppdf.write(pdfdata)\n temppdf.close()\n\n \n tables = camelot.read_pdf(\"temp.pdf\")\n print(\"Total tables extracted for \" + school + \":\", tables.n)\n tables[0].to_csv(\"temp.csv\")\n\n datareader = csv.reader(open(\"temp.csv\"))\n schooltable = ['\"'+school+'\"',0.0,0.0,0.0,0.0,0.0]\n \n \n \n for row in datareader:\n for count in range(len(row)):\n try:\n row[count] = row[count].replace(\"*\",\"\")\n schooltable[count] = schooltable[count] + float(row[count])\n except:\n pass\n for count in range(1,6):\n schooltable[count] = str(schooltable[count]/12)\n\n datadump.append(schooltable)\n\nout = open(\"outputhigh.csv\",\"w\")\nfor row in datadump:\n rowstring = \",\".join(row)\n out.write(rowstring+\"\\n\")\nout.close()\n" }, { "alpha_fraction": 0.516539454460144, "alphanum_fraction": 0.5491093993186951, "avg_line_length": 30.190475463867188, "blob_id": "c4685caa273f76610a1afb348d25587236b47e33", "content_id": "ff5277381cade183f27e3d7c38287cfe7b026ace", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1965, "license_type": "no_license", "max_line_length": 64, "num_lines": 63, "path": "/core/migrations/0004_auto_20200708_1746.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-07-08 17:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0003_attachment'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='attachment',\n name='link',\n field=models.TextField(blank=True),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='testField1',\n field=models.FloatField(default=-1, max_length=200),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='testField2',\n field=models.FloatField(default=-1, max_length=200),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='testField3',\n field=models.FloatField(default=-1, max_length=200),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='testField4',\n field=models.FloatField(default=-1, max_length=200),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='testField5',\n field=models.FloatField(default=-1, max_length=200),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='testField6',\n field=models.FloatField(default=-1, max_length=200),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='testField7',\n field=models.FloatField(default=-1, max_length=200),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='testField8',\n field=models.FloatField(default=-1, max_length=200),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='testField9',\n field=models.FloatField(default=-1, max_length=200),\n ),\n ]\n" }, { "alpha_fraction": 0.5437018275260925, "alphanum_fraction": 0.5758354663848877, "avg_line_length": 26.785715103149414, "blob_id": "c2b655e9574dff9b62ebbb16c92b1f06cc39ce50", "content_id": "d4e7e9e157dbadd990f4b13c1c346c89d78d735e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 778, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/core/migrations/0008_auto_20200716_1415.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-07-16 14:15\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0007_blogpost_compares'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='companycompare',\n name='lat',\n field=models.FloatField(blank=True, max_length=200, null=True),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='long',\n field=models.FloatField(blank=True, max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='blogpost',\n name='compares',\n field=models.ManyToManyField(blank=True, to='core.companyCompare'),\n ),\n ]\n" }, { "alpha_fraction": 0.5259515643119812, "alphanum_fraction": 0.5790081024169922, "avg_line_length": 36.69565200805664, "blob_id": "ac9ada286882cfa0fe7253a45a2be22739dafc2c", "content_id": "cce44cb1c905957b94ae984478cddd408d4552aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "no_license", "max_line_length": 214, "num_lines": 23, "path": "/core/migrations/0011_mcpsschool.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-07-23 17:25\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0010_blogpost_script'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='mcpsSchool',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('schooltype', models.CharField(choices=[('elementarty', 'Elementary School'), ('middle', 'Middle School'), ('high', 'High School'), ('special', 'Special Schools')], default='high', max_length=20)),\n ('name', models.CharField(max_length=500)),\n ('ppp_2014_2015', models.FloatField(max_length=200)),\n ('ppp_2018_2019', models.FloatField(max_length=200)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.4972776770591736, "alphanum_fraction": 0.5190562605857849, "avg_line_length": 38.28571319580078, "blob_id": "3769e3d6f49cfd6416b3f21989fd2660e01a48ce", "content_id": "8ff5f1a8a994680cecc016b63db43fb34dd4884e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 551, "license_type": "no_license", "max_line_length": 75, "num_lines": 14, "path": "/processAsset/copyFiles.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "\nimport boto3, sys\n\nfileName = sys.argv[1]\nfileAccName = sys.argv[2]\nsession = boto3.session.Session()\nclient = session.client('s3',\n region_name='sfo2',\n endpoint_url='https://sfo2.digitaloceanspaces.com',\n aws_access_key_id=\"POTR4I5FSNCEF3OUGSV5\",\n aws_secret_access_key=\"3aJaqDuBbwfBEME41ILK7zJuNxYZH/64I97g2X3xp7M\")\nclient.put_object(Bucket='plastic',\n Key=fileName.split(\"/\")[-1],\n Body=open(fileName,\"rb\").read(),\n ACL='public-read')\n" }, { "alpha_fraction": 0.5102505683898926, "alphanum_fraction": 0.5899772047996521, "avg_line_length": 22.105262756347656, "blob_id": "316b813fb1ac1a3ba777aed6453cfad387cab77c", "content_id": "e4e3f8cd40bb3f8995ef8eefc8d876829965b41b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 439, "license_type": "no_license", "max_line_length": 63, "num_lines": 19, "path": "/core/migrations/0021_zipcode_average_income.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-08-10 15:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0020_auto_20200810_1551'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='zipcode',\n name='average_income',\n field=models.FloatField(default=0, max_length=200),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.49735450744628906, "alphanum_fraction": 0.579365074634552, "avg_line_length": 20, "blob_id": "2fed212a19fb89ce72c689d09c15b63b87247e36", "content_id": "9fd52b909f556143d500eef2007eec65d7f72189", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/core/migrations/0009_blogpost_desc.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-07-18 17:48\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0008_auto_20200716_1415'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='blogpost',\n name='desc',\n field=models.TextField(blank=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.5752688050270081, "avg_line_length": 19.66666603088379, "blob_id": "f84d9e55abd1830be9b4db0ad8c10ad5fef0b144", "content_id": "2e9716ee99d73844adde8562efac37b7fff5e196", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 51, "num_lines": 18, "path": "/core/migrations/0018_auto_20200810_0214.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-08-10 02:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0017_zipcode'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='zipcode',\n name='code',\n field=models.CharField(max_length=500),\n ),\n ]\n" }, { "alpha_fraction": 0.6779661178588867, "alphanum_fraction": 0.6779661178588867, "avg_line_length": 28.5, "blob_id": "983ef8ffc4ca32388623329e782e4109f88dbf1e", "content_id": "525a5f980a56d9f4e7faa6f8e03f332e29d2238b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 118, "license_type": "no_license", "max_line_length": 57, "num_lines": 4, "path": "/static/openlayers/openlayers-workshop-en/examples/vectortile/bright.js", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "import 'ol/ol.css';\nimport {apply} from 'ol-mapbox-style';\n\nconst map = apply('map-container', './data/bright.json');\n" }, { "alpha_fraction": 0.6567410826683044, "alphanum_fraction": 0.7082988619804382, "avg_line_length": 39.741573333740234, "blob_id": "ca015a11a236c4d0a1c62d78644ac71616224f5c", "content_id": "27d365e8c9e42eda9a6b8bab58e4d6e6317fdf4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3627, "license_type": "no_license", "max_line_length": 142, "num_lines": 89, "path": "/core/models.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth import get_user_model\nfrom django.conf import settings\n\ndef get_sentinel_user():\n return get_user_model().objects.get_or_create(username='Deleted User')[0]\n\n# Create your models here.\n\nclass companyCompare(models.Model):\n name = models.CharField(max_length=200)\n desc = models.TextField(blank=True)\n longdesc = models.TextField(blank=True)\n \n ppp = models.FloatField(null=True)\n plasticPpp = models.FloatField(null=True)\n testField1 = models.FloatField(max_length=200,default=-1)\n testField2 = models.FloatField(max_length=200,default=-1)\n testField3 = models.FloatField(max_length=200,default=-1)\n testField4 = models.FloatField(max_length=200,default=-1)\n testField5 = models.FloatField(max_length=200,default=-1)\n testField6 = models.FloatField(max_length=200,default=-1)\n testField7 = models.FloatField(max_length=200,default=-1)\n testField8 = models.FloatField(max_length=200,default=-1)\n testField9 = models.FloatField(max_length=200,default=-1)\n\n lat = models.FloatField(max_length=200,blank=True,null=True)\n long = models.FloatField(max_length=200,blank=True,null=True)\n\n tags = models.TextField(blank=True)\n def __str__(self):\n return self.name\n\n\n\nclass blogpost(models.Model):\n name = models.CharField(max_length=200)\n author = models.ForeignKey(\"auth.user\",on_delete=models.SET(get_sentinel_user))\n desc = models.TextField(blank=True)\n content = models.TextField()\n script = models.TextField(blank=True)\n\n compares = models.ManyToManyField(companyCompare,blank=True)\n def __str__(self):\n return self.name + \" by \" + self.author.first_name + \" \" + self.author.last_name\n\nclass attachment(models.Model):\n post = models.ForeignKey(blogpost, on_delete=models.CASCADE)\n file = models.FileField(upload_to=\"processAsset/\")\n link = models.TextField(blank=True)\n def __str__(self):\n return self.file.filename\n\nclass mcpsSchool(models.Model):\n schooltype_choices = [(\"elementarty\",\"Elementary School\"),(\"middle\",\"Middle School\"),(\"high\",\"High School\"),(\"special\",\"Special Schools\")]\n schooltype = models.CharField(max_length=20,choices=schooltype_choices,default=\"high\")\n name = models.CharField(max_length=500)\n ppp_2014_2015 = models.FloatField(max_length=200)\n ppp_2015_2016 = models.FloatField(max_length=200)\n ppp_2016_2017 = models.FloatField(max_length=200)\n ppp_2017_2018 = models.FloatField(max_length=200)\n ppp_2018_2019 = models.FloatField(max_length=200)\n ppp_2014_2015_rank = models.FloatField(max_length=200)\n ppp_2015_2016_rank = models.FloatField(max_length=200)\n ppp_2016_2017_rank = models.FloatField(max_length=200)\n ppp_2017_2018_rank = models.FloatField(max_length=200)\n ppp_2018_2019_rank = models.FloatField(max_length=200)\n lat = models.FloatField(max_length=200)\n long = models.FloatField(max_length=200)\n desc = models.TextField(blank=True,null=True)\n link = models.TextField(blank=True,null=True)\n def __str__(self):\n return self.schooltype + \" - \" + self.name\n\nclass pums(models.Model):\n code = models.CharField(max_length=500)\n geodata = models.TextField(blank=True,null=True)\n def __str__(self):\n return self.code\n\nclass zipcode(models.Model):\n code = models.CharField(max_length=10)\n geodata = models.TextField(blank=True,null=True)\n area = models.FloatField()\n population = models.BigIntegerField()\n average_income = models.FloatField(max_length=200)\n density = models.FloatField()\n def __str__(self):\n return self.code\n\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5601217746734619, "avg_line_length": 29.55813980102539, "blob_id": "9c688d1bc0a8ce40acf4a506ec64fd6fc2561642", "content_id": "77805cf1d05c07a2196001611dc6a0730b3ffd8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1314, "license_type": "no_license", "max_line_length": 63, "num_lines": 43, "path": "/core/migrations/0022_auto_20200810_1726.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-08-10 17:26\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0021_zipcode_average_income'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='mcpsschool',\n name='ppp_2014_2015_rank',\n field=models.FloatField(default=0, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='mcpsschool',\n name='ppp_2015_2016_rank',\n field=models.FloatField(default=0, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='mcpsschool',\n name='ppp_2016_2017_rank',\n field=models.FloatField(default=0, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='mcpsschool',\n name='ppp_2017_2018_rank',\n field=models.FloatField(default=0, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='mcpsschool',\n name='ppp_2018_2019_rank',\n field=models.FloatField(default=0, max_length=200),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.48439306020736694, "alphanum_fraction": 0.5641618371009827, "avg_line_length": 26.90322494506836, "blob_id": "98b753b98ee25b5d8e1216806cf43127a045a882", "content_id": "c3dbaa51ee4673ad673b8690535f03987febc62d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "no_license", "max_line_length": 64, "num_lines": 31, "path": "/core/migrations/0014_auto_20200729_2304.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-07-29 23:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0013_auto_20200724_1722'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='mcpsschool',\n name='ppp_2015_2016',\n field=models.FloatField(default=11, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='mcpsschool',\n name='ppp_2016_2017',\n field=models.FloatField(default=1, max_length=200),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='mcpsschool',\n name='ppp_2017_2018',\n field=models.FloatField(default=23, max_length=200),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.6651002764701843, "alphanum_fraction": 0.6716791987419128, "avg_line_length": 30.920000076293945, "blob_id": "005519cbf45a4573b024c180cd58485c04d6639a", "content_id": "575b4bafc7c9ac8e50f17fe5d7b88e5a26c5857d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3192, "license_type": "no_license", "max_line_length": 149, "num_lines": 100, "path": "/core/views.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse,JsonResponse\nfrom .models import *\nfrom django.db.models import Model\nfrom django.db.models import Field\nfrom django.forms.models import model_to_dict\nfrom django.core import serializers\nimport json\n\n\n\ndef getName(fieldString):\n return fieldString.split(\".\")[-1]\n\n# Create your views here.\ndef index(request):\n return render(request,\"core/index.html\",{\"entities\":companyCompare.objects.all()})\n\ndef getfield(request,eid,name,index):\n companies_json = serializers.serialize(\"json\", [get_object_or_404(companyCompare,pk=eid)])\n cjl = json.loads(companies_json)\n \n return JsonResponse({\"index\":index,\"pk\":eid,\"val\":cjl[0][\"fields\"][name]})\n\ndef getfieldmcps(request,eid,name,index):\n companies_json = serializers.serialize(\"json\", [get_object_or_404(mcpsSchool,pk=eid)])\n cjl = json.loads(companies_json)\n \n return JsonResponse({\"index\":index,\"pk\":eid,\"val\":cjl[0][\"fields\"][name]})\n\n\ndef explore(request):\n s=False\n g=False\n p=False\n try:\n objectfilter = request.GET[\"filter\"]\n if objectfilter==\"s\":\n s=True\n if objectfilter==\"g\":\n g=True\n if objectfilter==\"p\":\n p=True\n except:\n pass\n return render(request,\"core/explore.html\",{\"entities\":companyCompare.objects.all(),\"s\":s,\"g\":g,\"p\":p})\n\ndef plasticmap(request):\n s=False\n g=False\n p=False\n try:\n objectfilter = request.GET[\"filter\"]\n if objectfilter==\"s\":\n s=True\n if objectfilter==\"g\":\n g=True\n if objectfilter==\"p\":\n p=True\n except:\n pass\n\n companies_json = serializers.serialize(\"json\", companyCompare.objects.all())\n cjl = json.loads(companies_json)\n\n return render(request,\"core/plasticmap.html\",{\"rawjson\":companies_json,\"entities\":cjl,\"fields\":cjl[0][\"fields\"].items()})\n\n\ndef plasticmapmcps(request):\n\n companies_json = serializers.serialize(\"json\", mcpsSchool.objects.all())\n cjl = json.loads(companies_json)\n zipcodes = zipcode.objects.all()\n\n return render(request,\"core/plasticmapmcps.html\",{\"zipcodes\":zipcodes,\"rawjson\":companies_json,\"entities\":cjl,\"fields\":cjl[0][\"fields\"].items()})\n\n\ndef explorespecific(request,eid):\n e = get_object_or_404(companyCompare,pk=eid)\n posts = e.blogpost_set.all()\n return render(request,\"core/explorespecific.html\",{\"e\":e,\"bps\":posts})\n\ndef blog(request):\n return render(request,\"core/exploreblog.html\",{\"bs\":blogpost.objects.all()})\n\ndef blogspecific(request,eid):\n e = get_object_or_404(blogpost,pk=eid)\n return render(request,\"core/blogpost.html\",{\"b\":e})\n\n\ndef database(request):\n fields = companyCompare._meta.fields\n fields = list(fields)\n fields = list(map(str,fields))\n fields = list(map(getName,fields))\n companies = list(companyCompare.objects.all())\n companies = list(map(model_to_dict,companies))\n companies_json = serializers.serialize(\"json\", companyCompare.objects.all())\n cjl = json.loads(companies_json)\n return render(request,\"core/db.html\",{\"rawjson\":companies_json,\"entities\":cjl,\"fields\":cjl[0][\"fields\"].items()})\n" }, { "alpha_fraction": 0.577565610408783, "alphanum_fraction": 0.584725558757782, "avg_line_length": 19.950000762939453, "blob_id": "9f7b4a163f5d21b9aeb63607ab1299a263102ec6", "content_id": "1529060aaa80b9c8047cfa0053ef674307b55edb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 419, "license_type": "no_license", "max_line_length": 61, "num_lines": 20, "path": "/static/openlayers/openlayers-workshop-en/examples/basics/map.js", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "import 'ol/ol.css';\nimport {Map, View} from 'ol';\nimport TileLayer from 'ol/layer/Tile';\nimport XYZSource from 'ol/source/XYZ';\nimport {fromLonLat} from 'ol/proj';\n\nnew Map({\n target: 'map-container',\n layers: [\n new TileLayer({\n source: new XYZSource({\n url: 'http://tile.stamen.com/terrain/{z}/{x}/{y}.jpg'\n })\n })\n ],\n view: new View({\n center: fromLonLat([0, 0]),\n zoom: 2\n })\n});\n" }, { "alpha_fraction": 0.5038639903068542, "alphanum_fraction": 0.5378670692443848, "avg_line_length": 27.409090042114258, "blob_id": "c14a1cd4d70ffbeb3ac650b1df3ef560e726341f", "content_id": "be57d6e8f1eb2afcc9ca88d468e9b4bfd42456de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 647, "license_type": "no_license", "max_line_length": 114, "num_lines": 22, "path": "/core/migrations/0002_companycompare.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2020-07-03 11:21\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('core', '0001_initial'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='companyCompare',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('name', models.CharField(max_length=200)),\r\n ('ppp', models.FloatField(null=True)),\r\n ('plasticPpp', models.FloatField(null=True)),\r\n ],\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.6572052240371704, "alphanum_fraction": 0.6593886613845825, "avg_line_length": 23.756755828857422, "blob_id": "4f3e411eefb3eab543ebdbe0473b3796dd9cea69", "content_id": "22a4653a86b78f11737707766a02f4e51eebef01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 916, "license_type": "no_license", "max_line_length": 69, "num_lines": 37, "path": "/static/openlayers/openlayers-workshop-en/examples/vectortile/interact.js", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "import 'ol/ol.css';\nimport {apply} from 'ol-mapbox-style';\n//! [import-layer]\nimport VectorLayer from 'ol/layer/Vector';\nimport VectorSource from 'ol/source/Vector';\nimport {Style, Stroke} from 'ol/style';\n//! [import-layer]\n//! [import-interaction]\nimport {Feature} from 'ol';\nimport {fromExtent} from 'ol/geom/Polygon';\n//! [import-interaction]\n\nconst map = apply('map-container', './data/bright.json');\n//! [layer]\nconst source = new VectorSource();\nnew VectorLayer({\n map: map,\n source: source,\n style: new Style({\n stroke: new Stroke({\n color: 'red',\n width: 4\n })\n })\n});\n//! [layer]\n//! [interaction]\nmap.on('pointermove', function(event) {\n source.clear();\n map.forEachFeatureAtPixel(event.pixel, function(feature) {\n const geometry = feature.getGeometry();\n source.addFeature(new Feature(fromExtent(geometry.getExtent())));\n }, {\n hitTolerance: 2\n });\n});\n//! [interaction]\n" }, { "alpha_fraction": 0.5072463750839233, "alphanum_fraction": 0.5634058117866516, "avg_line_length": 23, "blob_id": "2d4790d112ce04d13236f1718d8a2beaeacea9ce", "content_id": "24f0a367d24102fd207bdae3d72938802d81f707", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "no_license", "max_line_length": 58, "num_lines": 23, "path": "/core/migrations/0013_auto_20200724_1722.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-07-24 17:22\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0012_auto_20200723_1728'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='mcpsschool',\n name='desc',\n field=models.TextField(blank=True, null=True),\n ),\n migrations.AddField(\n model_name='mcpsschool',\n name='link',\n field=models.TextField(blank=True, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.436026930809021, "alphanum_fraction": 0.5942760705947876, "avg_line_length": 23.75, "blob_id": "c15adf203ecd7814fa96e060d994fcf7ce6a7850", "content_id": "21832b002075f3571ee2396d877eb0c057b506be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 594, "license_type": "no_license", "max_line_length": 49, "num_lines": 24, "path": "/middle.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "print(models)\nimport csv\ndatareader = csv.reader(open(\"outputmiddle.csv\"))\nfirst=False\nfor row in datareader:\n if not first:\n first = True\n continue\n s = models.mcpsSchool()\n s.name = row[0]\n s.schooltype=\"middle\"\n s.ppp_2014_2015 = row[1]\n s.ppp_2015_2016 = row[2]\n s.ppp_2016_2017 = row[3]\n s.ppp_2017_2018 = row[4]\n s.ppp_2018_2019 = row[5]\n s.ppp_2014_2015_rank = row[6]\n s.ppp_2015_2016_rank = row[7]\n s.ppp_2016_2017_rank = row[8]\n s.ppp_2017_2018_rank = row[9]\n s.ppp_2018_2019_rank = row[10]\n s.lat=0\n s.long=0\n s.save()\n" }, { "alpha_fraction": 0.6548019051551819, "alphanum_fraction": 0.6548019051551819, "avg_line_length": 40.36111068725586, "blob_id": "b166616302f65756242dcd4618bdef0581e69f07", "content_id": "5718f9e6f109f7757145cce1541eab85305c9927", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1489, "license_type": "no_license", "max_line_length": 101, "num_lines": 36, "path": "/core/urls.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\napp_name = \"core\"\nurlpatterns = [\n #Front Page\n path('', views.index, name='index'),\n path('explore/', views.explore, name='explore'),\n path('explore/<int:eid>/', views.explorespecific, name='explorespecific'),\n path('blog/', views.blog, name='blog'),\n path('blog/<int:eid>/', views.blogspecific, name='blogspecific'),\n\n path('database/', views.database, name='db'),\n path('map/', views.plasticmap, name='plasticmap'),\n path('map/mcps/', views.plasticmapmcps, name='plasticmapmcps'),\n path('getField/<int:eid>/<str:name>/<int:index>/', views.getfield, name='getfield'),\n path('getField/mcps/<int:eid>/<str:name>/<int:index>/', views.getfieldmcps, name='getfieldmcps'),\n\n\n\n## #Course Signups and Stuff\n## path('course/<str:cid>/', views.loadCourse, name='coursePage'),\n## path('course/<str:cid>/join/', views.joinCourse, name='joinCourse'),\n## path('course/private/join/', views.joinCoursePrivate, name='joinCoursePrivate'),\n## path('courseList/', views.courseList, name=\"courseList\"),\n## path('courseView/', views.courseView, name=\"courseView\"),\n## path('leaveCourse/<str:cid>/', views.leaveCourse, name=\"courseLeave\"),\n##\n## #Account Management\n## path('register/', views.register, name=\"register\"),\n## path('login/', views.loginu, name=\"login\"),\n## path('logout/', views.logoutu, name=\"logout\"),\n## #path('manage/', views.manageAccount, name=\"manageAccount\"),\n \n]\n" }, { "alpha_fraction": 0.6524300575256348, "alphanum_fraction": 0.6789396405220032, "avg_line_length": 36.44444274902344, "blob_id": "17d5c2ec699011d1d6fd5f55cd30372a67a7453b", "content_id": "83a184235577e75a9d28d9dde3872348d78dfad4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "no_license", "max_line_length": 181, "num_lines": 18, "path": "/locatem.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "import csv\nimport requests, json\nfrom core.models import mcpsSchool\n\nfrom django.shortcuts import get_object_or_404\ndatareader = csv.reader(open(\"middleaddr.csv\"))\n\nBAD = [41,46]\n\nfor row in datareader:\n if int(row[0]) in BAD:\n continue\n obj = get_object_or_404(mcpsSchool,pk=row[0])\n pdfdata = json.loads(requests.get(\"https://geocoding.geo.census.gov/geocoder/locations/address?street=\"+row[2]+\"&benchmark=9&format=json&zip=\"+row[3].split(\" \")[-1]+\"\").content)\n print(pdfdata)\n obj.long = float(pdfdata[\"result\"][\"addressMatches\"][0]['coordinates'][\"x\"])\n obj.lat = float(pdfdata[\"result\"][\"addressMatches\"][0]['coordinates'][\"y\"])\n obj.save()\n \n" }, { "alpha_fraction": 0.5696671009063721, "alphanum_fraction": 0.59186190366745, "avg_line_length": 29.19230842590332, "blob_id": "efa43ec412071623b81656506f46eda062df1cc2", "content_id": "cb1f585d894461ac5e4a8d3505321689e5887733", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 128, "num_lines": 26, "path": "/core/migrations/0001_initial.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2020-07-03 11:13\r\n\r\nimport core.models\r\nfrom django.conf import settings\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='blogpost',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('name', models.CharField(max_length=200)),\r\n ('content', models.TextField()),\r\n ('author', models.ForeignKey(on_delete=models.SET(core.models.get_sentinel_user), to=settings.AUTH_USER_MODEL)),\r\n ],\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.494047611951828, "alphanum_fraction": 0.586309552192688, "avg_line_length": 18.764705657958984, "blob_id": "b720db3a7461a46b9994a58bedf4edf3550c9f59", "content_id": "21855ccc3ca20158a97194c94b9bbf1772468518", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 336, "license_type": "no_license", "max_line_length": 47, "num_lines": 17, "path": "/core/migrations/0020_auto_20200810_1551.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-08-10 15:51\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0019_auto_20200810_1551'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='zipcodeArchive',\n new_name='zipcode',\n ),\n ]\n" }, { "alpha_fraction": 0.8191489577293396, "alphanum_fraction": 0.8191489577293396, "avg_line_length": 24.636363983154297, "blob_id": "e2bf7a2a256e4a3e179d7471b3de1e1d8a03fb3f", "content_id": "4b5727c535c364cc99ed33f89ae9a8ac1a42b681", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 36, "num_lines": 11, "path": "/core/admin.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import *\n\n\n# Register your models here.\nadmin.site.register(blogpost)\nadmin.site.register(companyCompare)\nadmin.site.register(attachment)\nadmin.site.register(mcpsSchool)\nadmin.site.register(zipcode)\n#admin.site.register(zipcodeArchive)\n" }, { "alpha_fraction": 0.527999997138977, "alphanum_fraction": 0.5786666870117188, "avg_line_length": 19.83333396911621, "blob_id": "457d2491f001e383ef853b265aed5fc3805d7d79", "content_id": "217046249847b8871c538433aa1130321532933c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 375, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/core/migrations/0010_blogpost_script.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-07-18 18:32\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0009_blogpost_desc'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='blogpost',\n name='script',\n field=models.TextField(blank=True),\n ),\n ]\n" }, { "alpha_fraction": 0.7682032585144043, "alphanum_fraction": 0.7847040295600891, "avg_line_length": 53.5428581237793, "blob_id": "ab195e87ce559f1fd463b85fd29aa65984e5af77", "content_id": "9de58a8af06fa73d9dad7233496a845a7b49e76f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3818, "license_type": "no_license", "max_line_length": 515, "num_lines": 70, "path": "/README.md", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Installation Instructions\n\n## Required Dependencies:\n-Python3\n\n-Django\n\n-Camelot (Python implementation of Tabula)\n\n-BeautifulSoup\n\n-GeoJSON [Area](https://github.com/mapbox/geojson-area)\n\n## Setting up the database\n\nNavigate to the folder where you wish to start your project. Run 'django-admin startproject your_project_name' (plastic is recommended). Navigate to the folder the command created, which should have the same name as the project. In that folder, run `python3 manage.py startapp core`. This command should make a folder called \"core.\" Please copy the admin, views, models, and url files from the core folder in this repository into the folder you created. Run `python3 manage.py migrate` to set up the database.\n\nIf using the settings module provided, set the \"SECRET_KEY\" environmental variable to something secure. Django uses the secret key \n\nRun `python3 manage.py createsuperuser` to make an administrator account on the server, which will allow the user to edit any data.\n\n## Importing data\n\nA list of Zip Codes in Montgomery County can be downloaded [here](https://www.zip-codes.com/county/md-montgomery.asp). Some Zip Codes do not have geographical areas. A list of Zip Codes used is also located in the geojson folder.\n\nA data table with Median Household Income and population for Zip Code Tabulation Areas (Census-designated boundaries which roughly mimic Zip Codes) can be found [here](https://data.census.gov/cedsci/table?q=United%20States&t=Income%20and%20Earnings&g=0100000US.860000&tid=ACSST5Y2018.S1903&hidePreview=true).\n\nRun extracthigh.py and extractmiddle.py to download all of the recycling data, which will be saved to outputmiddle.csv and outputhigh.csv. For each file, there will be columns with the recycling pounds per person rate for five school years. Make new columns to store the rankings of each school using the Excel rank function. Save this output as a csv.\n\nTo obtain the latitude and longitude coordinates of middle schools, run `python3 manage.py shell` and then `exec(open(\"locatem.py\").read())` to load in middle school addresses. Some schools like Silver Creek MS must be manually searched on Google Maps. High schools were manually searched before the automatic scraping program was written. The program uses data from the Census Bureau API. There is no charge or API Key required.\n\nThe data above, apart from the coordinates, were manually inputted into the database in the command line using `python3 manage.py shell.` Depending on how the CSVs are set up with the ranking, import the data by reading each line in each CSV file and writting to the appropriate fields. Import data into the mcpsSchool and zipcode models.\n\nWhen the data importing is complete one may run `python3 manage.py runserver` to run the server on `127.0.0.1:8000` by default.\n\n## Alternate Method\n\nSimply clone the repo and run `python3 manage.py runserver` using the default admin credentials (username plastic password plastic).\n\n# Model Field Reference\n\n## mcpsSchool\n\nschooltype - the type of school (\"middle\" or \"high\")\n\nname - the name of the school\n\nppp_20XX_20XY - the pounds per person rate for the 20XX-20XY school year\n\nppp_20XX_20XY_rank - the pounds per person ranking for the 20XX-20XY school year relative to other schools\n\nlat/long - geographical coordinates\n\ndesc - a description of any trends noted in each school\n\nlink - not used\n\n## zipcode\n\ncode - 5 digit name of the Zip Code\n\ngeodata - geoJSON geometry of the Zip Code\n\narea - Area of the Zip Code in square meters obtained with the area function\n\npopulation - Total population of the Zip Code\n\naverage_income - Median household income (the mean household income was originally used, hence the field name)\n\ndensity - Population density in persons per square meter (must be calculated manually)\n" }, { "alpha_fraction": 0.5340699553489685, "alphanum_fraction": 0.5690608024597168, "avg_line_length": 22.60869598388672, "blob_id": "c8250ea8112bc96696ad88715f8a14f3e3ede218", "content_id": "627c73e5e4631763cb49b4a477fdcca25424bdec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "no_license", "max_line_length": 47, "num_lines": 23, "path": "/core/migrations/0006_auto_20200712_2017.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-07-12 20:17\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0005_companycompare_tags'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='companycompare',\n name='desc',\n field=models.TextField(blank=True),\n ),\n migrations.AddField(\n model_name='companycompare',\n name='longdesc',\n field=models.TextField(blank=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5336538553237915, "alphanum_fraction": 0.5817307829856873, "avg_line_length": 20.894737243652344, "blob_id": "02ad7196e9aa3bd0f31af6e08fc12ad6c1a298fe", "content_id": "ed320061c0e48abec387d8b770baead088c5a48e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 416, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/core/migrations/0025_zipcode_density.py", "repo_name": "jUnion44/plastic", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-08-25 16:48\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0024_zipcode_population'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='zipcode',\n name='density',\n field=models.FloatField(default=0),\n preserve_default=False,\n ),\n ]\n" } ]
30
adithyamadhusoodanan/Song-suggestions-based-on-moods-using-sentiment-analysis-
https://github.com/adithyamadhusoodanan/Song-suggestions-based-on-moods-using-sentiment-analysis-
003b0588806caa661251e7b80f1f30e613aef0fe
87dea58282d4eb473f08fe340eeb60d997fb1f9b
1b757a3aef87eb85b2e9f3e9ca2e4e27795e77c2
refs/heads/main
2023-01-19T19:39:30.328383
2020-11-30T20:07:38
2020-11-30T20:07:38
316,499,437
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5319148898124695, "alphanum_fraction": 0.5450081825256348, "avg_line_length": 17.515151977539062, "blob_id": "3664ec9709cc0d5901e967649d71670519ab7992", "content_id": "b21ac1e8467327541a43f0f95b1716fd30f672fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1222, "license_type": "no_license", "max_line_length": 68, "num_lines": 66, "path": "/get_sentiment.py", "repo_name": "adithyamadhusoodanan/Song-suggestions-based-on-moods-using-sentiment-analysis-", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, redirect, url_for\nfrom joblib import load\n\n\n\npipeline = load(\"text_classification.joblib\")\n\n\ndef requestResults(name):\n name=[name]\n tweets = pipeline.predict(name)\n \n if(tweets==0):\n res='anger'\n if(tweets==1):\n res='boredom'\n if(tweets==2):\n res='empty'\n if(tweets==3):\n res='enthusiasm'\n if(tweets==4):\n res='fun'\n if(tweets==5):\n res='happiness'\n if(tweets==6):\n res='hate'\n if(tweets==7):\n res='love'\n if(tweets==8):\n res='neutral'\n if(tweets==9):\n res='sadness'\n if(tweets==10):\n res='relief'\n if(tweets==11):\n res='surprise'\n if(tweets==12):\n res='worry'\n\n\n \n return res\n\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n return render_template('home.html')\n\n\[email protected]('/', methods=['POST', 'GET'])\ndef get_data():\n if request.method == 'POST':\n user = request.form['search']\n return redirect(url_for('success', name=user))\n\n\[email protected]('/success/<name>')\ndef success(name):\n return \"<xmp>\" + str(requestResults(name)) + \" </xmp> \"\n\n\nif __name__ == '__main__' :\n app.run(debug=True)\n" }, { "alpha_fraction": 0.717391312122345, "alphanum_fraction": 0.717391312122345, "avg_line_length": 9.222222328186035, "blob_id": "21aab5ab04b6f1b29c8a6ec37aa379b15cfdfb9f", "content_id": "ce6578f50d2355b9d73e1aaf252188a69b3c9deb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 52, "num_lines": 9, "path": "/README.md", "repo_name": "adithyamadhusoodanan/Song-suggestions-based-on-moods-using-sentiment-analysis-", "src_encoding": "UTF-8", "text": "This project was done as the IT Minor Course Project\n\nTeam Members\n\nAdithya Madhusoodanan \n\nRohit M Nair\n\nSam Johnny George\n" } ]
2
nacleric/flask_blog
https://github.com/nacleric/flask_blog
7bbc31ce41f011db4e561d43cb7b13f6910e797a
61a924e6ff2c0cb422c1ef3368e6de5808fa76f6
8e2fefba8b28e81f28f34656fb999df462152076
refs/heads/new_models
2022-03-26T21:46:05.804463
2018-11-17T20:06:04
2018-11-17T20:06:04
116,421,159
1
0
null
2018-01-05T19:49:27
2018-11-17T22:14:22
2019-12-17T16:41:32
Python
[ { "alpha_fraction": 0.7441860437393188, "alphanum_fraction": 0.7441860437393188, "avg_line_length": 20, "blob_id": "30f47c578379f66b19c91c8dc8f910c212811c95", "content_id": "98bff9a714b56d06b5cf4cfba7f0ee223aab5b08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 43, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/README.md", "repo_name": "nacleric/flask_blog", "src_encoding": "UTF-8", "text": "# flask_blog\na toy project to learn flask\n\n" }, { "alpha_fraction": 0.6274768710136414, "alphanum_fraction": 0.6618229746818542, "avg_line_length": 24.233333587646484, "blob_id": "e6931655f62fb33f430cd9c659af89400bb07527", "content_id": "b08b9dc0cb9b26f56bb98d4dfcc6597770bce2f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 757, "license_type": "no_license", "max_line_length": 73, "num_lines": 30, "path": "/migrations/versions/d1fb66c4a7b3_actual_post_model_improved.py", "repo_name": "nacleric/flask_blog", "src_encoding": "UTF-8", "text": "\"\"\"actual post model improved\n\nRevision ID: d1fb66c4a7b3\nRevises: 62d69e8c9126\nCreate Date: 2018-02-12 17:51:36.119489\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd1fb66c4a7b3'\ndown_revision = '62d69e8c9126'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('post', sa.Column('intro', sa.String(), nullable=True))\n op.add_column('post', sa.Column('title', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('post', 'title')\n op.drop_column('post', 'intro')\n # ### end Alembic commands ###\n" } ]
2
dib-lab/sour-rice
https://github.com/dib-lab/sour-rice
70e1ac1758e18eba75768f1b3a6d67ac0c66c591
6cc9ce0bb56cd320ba8f0c2abb2c6010b51316b3
8d84e3f3177f6a2628dabb7bedf6ae4338e8b2d3
refs/heads/master
2021-01-20T10:04:45.218365
2017-05-04T23:00:32
2017-05-04T23:00:32
90,316,980
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6692307591438293, "alphanum_fraction": 0.6692307591438293, "avg_line_length": 8.285714149475098, "blob_id": "4a1d5703f3b6b414fd8bae56b2f4c6f9c3f56282", "content_id": "7e1e4643b32fa3aa10aa9d6ae2c07f4025ac7ea0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 130, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/README.md", "repo_name": "dib-lab/sour-rice", "src_encoding": "UTF-8", "text": "# sourrice\n\n## Dependencies\n\n- pandas\n- doit\n- sourmash\n- SRA Toolkit (disable file caching)\n\n## Pipeline execution\n\n```\ndoit\n```\n" }, { "alpha_fraction": 0.5925020575523376, "alphanum_fraction": 0.6006519794464111, "avg_line_length": 33.08333206176758, "blob_id": "5923fd190929a4558932ff5cac5b34538fbf7196", "content_id": "ce342b04639a3f406c0b378ccd0a136550d2a6ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2454, "license_type": "no_license", "max_line_length": 79, "num_lines": 72, "path": "/sourrice.py", "repo_name": "dib-lab/sour-rice", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport argparse\nimport pandas\nimport subprocess\nimport sys\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-k', '--ksize', metavar='K', type=int, default=27,\n help='word size')\n parser.add_argument('--metadata', metavar='FILE',\n default='seq_file_mapping_to_SRA.txt',\n help='file from which metadata will be read; default '\n 'is \"seq_file_mapping_to_SRA.txt\"')\n parser.add_argument('--dry-run', action='store_true', help='show the '\n 'commands to be executed but do not run the commands!')\n parser.add_argument('sample', nargs='+', help='sample accession(s)')\n return parser\n\n\ndef get_runs_by_sample(infile):\n run_seen = set()\n metadata = pandas.read_table(infile)\n for run, sample in zip(metadata['Run_acc'], metadata['Sample_Acc']):\n if run not in run_seen:\n run_seen.add(run)\n yield run, sample\n\n\ndef collect_runs(infile, sample_accessions):\n run_index = dict()\n for sample in sample_accessions:\n run_index[sample] = set()\n\n for run, sample in get_runs_by_sample(infile):\n if sample in run_index:\n run_index[sample].add(run)\n\n for sample in sorted(list(run_index.keys())):\n runs = run_index[sample]\n yield sample, sorted(list(runs))\n\n\ndef build_stream(runlist, outfile, ksize=27, dryrun=False):\n cmd1 = ['fastq-dump', '--split-files', '-Z'] + runlist\n cmd2 = ['sourmash', 'compute', '-o', outfile, '--ksizes', str(ksize), '-']\n cmd1str = ' '.join(cmd1)\n cmd2str = ' '.join(cmd2)\n if dryrun:\n return None, cmd1str, cmd2str\n\n dumpprocess = subprocess.Popen(cmd1, stdout=subprocess.PIPE)\n minhashprocess = subprocess.Popen(cmd2, stdin=dumpprocess.stdout)\n return minhashprocess, cmd1str, cmd2str\n\n\ndef main(args):\n for sample, runlist in collect_runs(args.metadata, args.sample):\n outfile = '{:s}.minhash'.format(sample)\n process, cmd1, cmd2 = build_stream(runlist, outfile, args.ksize,\n args.dry_run)\n print('[sourrice]', cmd1, '|', cmd2, file=sys.stderr)\n if process:\n print('[sourrice] computing', outfile, file=sys.stderr)\n process.communicate()\n\n\nif __name__ == '__main__':\n main(get_parser().parse_args())\n" }, { "alpha_fraction": 0.535538375377655, "alphanum_fraction": 0.5545390844345093, "avg_line_length": 26.326923370361328, "blob_id": "7583f6e2a20ca1e93d29862a0fac3791a89de358", "content_id": "add27c487ba4938a017019c727cfd4cf7910e444", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1421, "license_type": "no_license", "max_line_length": 74, "num_lines": 52, "path": "/dodo.py", "repo_name": "dib-lab/sour-rice", "src_encoding": "UTF-8", "text": "import os\nimport pandas\nfrom doit.tools import run_once\ntry:\n from urllib.request import urlretrieve\nexcept:\n from urllib import urltrerieve\n\n\ndef task_get_metadata():\n def get_metadata(targets):\n url = ('ftp://climb.genomics.cn/pub/10.5524/200001_201000/200001/'\n 'seq_file_mapping_to_SRA.txt')\n urlretrieve(url, targets[0])\n return True\n\n return {\n 'actions': [get_metadata],\n 'targets': ['metadata.tsv'],\n 'uptodate': [run_once],\n }\n\n\ndef task_get_samples():\n def get_samples(targets):\n metadata = pandas.read_table('metadata.tsv')\n with open('sample-accessions.txt', 'w') as outfile:\n for accession in metadata['Sample_Acc'].unique():\n print(accession, file=outfile)\n\n return {\n 'actions': [get_samples],\n 'targets': ['sample-accessions.txt'],\n 'file_dep': ['metadata.tsv'],\n }\n\n\ndef task_run_sourrice():\n if not os.path.exists('sample-accessions.txt'):\n return False\n\n with open('sample-accessions.txt', 'r') as infile:\n for line in infile:\n accession = line.strip()\n cmd = './sourrice.py -k 27 ' + accession\n outfile = accession + '.minhash'\n yield {\n 'name': outfile,\n 'actions': [cmd],\n 'targets': [outfile],\n 'uptodate': [run_once],\n }\n" } ]
3
Conor-Ryan8/Python-Web-Data-Coursera
https://github.com/Conor-Ryan8/Python-Web-Data-Coursera
894435d1f024790b85a725990388a10d6c8923d1
d8f1980d892d7eccb2b5271a0899a8f26ef8314f
b3d565cd55e7028976170df854085d876e958fd6
refs/heads/master
2022-07-16T09:20:52.849548
2020-05-18T20:17:09
2020-05-18T20:17:09
265,051,365
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6970874071121216, "alphanum_fraction": 0.7165048718452454, "avg_line_length": 23.5238094329834, "blob_id": "b5f9ea20a554231114f398dc52eaf0c388048070", "content_id": "f61a3f9c3642db774e2a7ded38c983ca247a0991", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 57, "num_lines": 21, "path": "/xml_test.py", "repo_name": "Conor-Ryan8/Python-Web-Data-Coursera", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\nimport urllib.request, urllib.parse, urllib.error\nimport ssl\n\nCTX = ssl.create_default_context()\nCTX.check_hostname = False\nCTX.verify_mode = ssl.CERT_NONE\n\nURL = 'http://py4e-data.dr-chuck.net/comments_227372.xml'\nData = urllib.request.urlopen(URL, context=CTX).read()\nTree = ET.fromstring(Data)\nlst = Tree.findall('comments/comment')\n\nCount = 0\nSum = 0\nfor item in lst:\n Sum = Sum + int(item.find('count').text)\n Count = Count + 1\n\nprint(Count, 'Entries!')\nprint('Sum:', Sum)\n" }, { "alpha_fraction": 0.6849315166473389, "alphanum_fraction": 0.6927592754364014, "avg_line_length": 23.33333396911621, "blob_id": "a80d91bc14af26021e6218b116a08ccd846e23a1", "content_id": "f00de96dfd9aa1fde01bbcc5f6eba86064ea51bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 511, "license_type": "no_license", "max_line_length": 56, "num_lines": 21, "path": "/bs_links.py", "repo_name": "Conor-Ryan8/Python-Web-Data-Coursera", "src_encoding": "UTF-8", "text": "import urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\nCTX = ssl.create_default_context()\nCTX.check_hostname = False\nCTX.verify_mode = ssl.CERT_NONE\n\nPos = input('Position: ')\nCount = input('Counter: ')\nURL = input('URL: ')\n\ndef getURL(URL):\n HTML = urllib.request.urlopen(URL, context=CTX).read()\n Soup = BeautifulSoup(HTML, \"html.parser\")\n Tag = Soup('a')\n return str(Tag[int(Pos)-1].get('href',None))\n\nfor i in range(0,int(Count)+1):\n print(URL)\n URL = getURL(URL)\n" }, { "alpha_fraction": 0.720802903175354, "alphanum_fraction": 0.7281022071838379, "avg_line_length": 22.826086044311523, "blob_id": "b380a092c1f072f81f4a3e84f4070d99675234a2", "content_id": "a1554125998d5061bdc4cb9f1d4ae527c2eafe36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 58, "num_lines": 23, "path": "/json_api.py", "repo_name": "Conor-Ryan8/Python-Web-Data-Coursera", "src_encoding": "UTF-8", "text": "import urllib.request, urllib.parse, urllib.error\nimport json\nimport ssl\n\napi_key = 42\nservice = 'http://py4e-data.dr-chuck.net/json?'\n\nCTX = ssl.create_default_context()\nCTX.check_hostname = False\nCTX.verify_mode = ssl.CERT_NONE\n\naddress = 'Indian Institute of Technology Kharagpur India'\nparms = dict()\nparms['address'] = address\nparms['key'] = api_key\nurl = service + urllib.parse.urlencode(parms)\n\nhandle = urllib.request.urlopen(url)\ndata = handle.read().decode()\n\njs = json.loads(data)\nlocation = js['results'][0]['place_id']\nprint(location)\n" }, { "alpha_fraction": 0.7962962985038757, "alphanum_fraction": 0.7962962985038757, "avg_line_length": 26, "blob_id": "a0fa553003a8a35fdb24442a38405e87e7986f8f", "content_id": "9c36beeff8984712e66bdd96221cd0f531cee76f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 54, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/README.md", "repo_name": "Conor-Ryan8/Python-Web-Data-Coursera", "src_encoding": "UTF-8", "text": "# Python-Web-Data-Coursera\nfiles from web data course\n" }, { "alpha_fraction": 0.6972476840019226, "alphanum_fraction": 0.7155963182449341, "avg_line_length": 23.22222137451172, "blob_id": "50da5cdcc4b0778478cadbefb13e37c87d21c327", "content_id": "8af3908c8e01f483de2f85cf37e17f1b5dff29d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 58, "num_lines": 18, "path": "/json_test.py", "repo_name": "Conor-Ryan8/Python-Web-Data-Coursera", "src_encoding": "UTF-8", "text": "import urllib.request, urllib.parse, urllib.error\nimport json\nimport ssl\n\nCTX = ssl.create_default_context()\nCTX.check_hostname = False\nCTX.verify_mode = ssl.CERT_NONE\n\nURL = 'http://py4e-data.dr-chuck.net/comments_227373.json'\nData = urllib.request.urlopen(URL, context=CTX).read()\nDict = json.loads(Data)\nprint('User Count:',len(Dict['comments']))\n\nSum = 0\nfor Item in Dict['comments']:\n Sum = Sum + Item['count']\n\nprint('Sum:',Sum)\n" }, { "alpha_fraction": 0.6245487332344055, "alphanum_fraction": 0.6425992846488953, "avg_line_length": 22.08333396911621, "blob_id": "d1c7a6b1e7bb372937d3e7b07956c4b8c6502d8c", "content_id": "f75bb3767d73b9159fdfad7ddfdfad32ea28c424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/regex.py", "repo_name": "Conor-Ryan8/Python-Web-Data-Coursera", "src_encoding": "UTF-8", "text": "import re\nFilename = input('Input Filename: ')\nHandle = open(Filename)\nSum = 0\nCount = 0\nfor Line in Handle:\n Line = Line.rstrip()\n List = re.findall('[0-9]+',Line)\n for Value in List:\n Sum = Sum + int(Value)\n Count = Count+1\nprint(Count,'Values, with a Sum of:',Sum)\n" } ]
6
tominsam/feedify
https://github.com/tominsam/feedify
cfd65ae241d4f13ee4b8d03b52eaba87c833d301
427e277352f27fac1122b6d15564468342a2aad2
ec2a367eedbef0e8e0bea86df88b4e27c0166324
refs/heads/main
2021-07-23T11:03:09.251700
2014-09-15T04:22:55
2014-09-15T04:22:55
3,222,199
1
0
null
2012-01-19T23:28:49
2017-07-28T21:57:10
2021-06-10T17:40:19
Python
[ { "alpha_fraction": 0.5468127727508545, "alphanum_fraction": 0.5577689409255981, "avg_line_length": 32.46666717529297, "blob_id": "159841f1400f340d137b53051aeea12b1c5b6eed", "content_id": "4e31a7faada8c59989064e059178ec57a287c610", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 114, "num_lines": 30, "path": "/instagram/migrations/0001_initial.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='AccessToken',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('key', models.CharField(unique=True, max_length=100)),\n ('created', models.DateTimeField(default=datetime.datetime.utcnow)),\n ('fetched', models.DateTimeField(null=True)),\n ('updated', models.DateTimeField()),\n ('username', models.CharField(max_length=100)),\n ('userid', models.CharField(unique=True, max_length=20)),\n ('feed_secret', models.CharField(unique=True, max_length=13)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 32, "blob_id": "3bc83453eb6f9fd74f8f9022d06a6a3b1bda479d", "content_id": "0910864673b89a3a7ae1d58cbd2322063311183d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 363, "license_type": "no_license", "max_line_length": 145, "num_lines": 11, "path": "/templates/instagram/anon.html", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}{% load cache %}\n\n{% block content %}\n\n{% include \"instagram/_about.html\" %}\n\n<p class=\"authenticate\">Want to get a feed for yourself?<br>Get started by <a href=\"/instagram/auth/\">authenticating with Instagram here</a>.</p>\n\n<p>(If you've done this in the past, this won't make you a new feed, it'll just log you in.)</p>\n\n{% endblock %}\n" }, { "alpha_fraction": 0.5881478786468506, "alphanum_fraction": 0.5954110026359558, "avg_line_length": 35.487953186035156, "blob_id": "ea555ecb4f478ebfef4275150bfb292e062296db", "content_id": "abbab0213dc036c7614f1b858b98f67322294355", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6058, "license_type": "no_license", "max_line_length": 166, "num_lines": 166, "path": "/flickr/models.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.conf import settings\nfrom django.core.cache import cache\n\nimport urlparse\nimport urllib\nimport datetime\nimport oauth2\nimport uuid\nimport json\nimport time\nimport logging\n\nEXTRAS = \"date_upload,date_taken,owner_name,icon_server,original_format,description,geo,tags,machine_tags,o_dims,media,path_alias,url_t,url_s,url_m,url_z,url_l,url_o\"\n\nclass FlickrException(Exception):\n def __init__(self, code, message):\n self.code = code\n super(FlickrException, self).__init__(message)\n\n def __unicode__(self):\n return u\"%s: %s\"%(self.code, self.message)\n\nclass RequestToken(models.Model):\n key = models.CharField(max_length=100, null=False, blank=False, unique=True)\n secret = models.CharField(max_length=100, null=False, blank=False)\n created = models.DateTimeField(default=datetime.datetime.utcnow)\n\n def __str__(self):\n data = {\"oauth_token\": self.key, \"oauth_token_secret\": self.secret}\n return urllib.urlencode(data)\n\n @classmethod\n def from_string(cls, string):\n data = dict(urlparse.parse_qsl(string))\n token, created = cls.objects.get_or_create(key=data[\"oauth_token\"], defaults=dict(secret = data[\"oauth_token_secret\"]))\n if not created:\n token.secret = data[\"oauth_token_secret\"]\n token.save()\n return token\n\n def token(self):\n return oauth2.Token(self.key, self.secret)\n\n\nclass AccessToken(models.Model):\n key = models.CharField(max_length=100, null=False, blank=False, unique=True)\n secret = models.CharField(max_length=100, null=False, blank=False)\n created = models.DateTimeField(default=datetime.datetime.utcnow, null=False, blank=False)\n fetched = models.DateTimeField(null=True)\n updated = models.DateTimeField(blank=False, null=False)\n username = models.CharField(max_length=100, null=False, blank=False)\n nsid = models.CharField(max_length=20, null=False, blank=False, unique=True)\n fullname = models.CharField(max_length=100, null=False, blank=False)\n\n feed_secret = models.CharField(max_length=13, null=False, blank=False, unique=True)\n\n def __str__(self):\n data = {\"oauth_token\": self.key, \"oauth_token_secret\": self.secret}\n return urllib.urlencode(data)\n\n @classmethod\n def from_string(cls, string):\n data = dict(urlparse.parse_qsl(string))\n properties = dict(\n key = data[\"oauth_token\"],\n secret = data[\"oauth_token_secret\"],\n username=data[\"username\"],\n nsid=data[\"user_nsid\"],\n fullname = data.get(\"fullname\", data[\"username\"]),\n updated = datetime.datetime.utcnow(),\n )\n\n # remove old IDs for this user.\n cls.objects.filter(nsid=properties[\"nsid\"]).exclude(key=properties[\"key\"]).delete()\n token, created = cls.objects.get_or_create(key=properties[\"key\"], defaults=properties)\n if not created:\n for k, v in properties.items():\n setattr(token, k, v)\n token.save()\n return token\n\n def token(self):\n return oauth2.Token(self.key, self.secret)\n\n def save(self, *args, **kwargs):\n if not self.feed_secret:\n self.feed_secret = str(uuid.uuid4())[:13]\n return super(AccessToken, self).save(*args, **kwargs)\n\n\n def call(self, method, name, **kwargs):\n consumer = oauth2.Consumer(key=settings.FLICKR_API_KEY, secret=settings.FLICKR_API_SECRET)\n client = oauth2.Client(consumer, self.token())\n\n args = dict(\n method = name,\n format = \"json\",\n nojsoncallback = \"1\",\n )\n args.update(kwargs)\n params = urllib.urlencode(args)\n\n start = time.time()\n\n if method == \"get\":\n resp, content = client.request(\"%s?%s\"%(settings.FLICKR_API_URL, params), \"GET\")\n else:\n resp, content = client.request(settings.FLICKR_API_URL, \"POST\", body=params)\n\n self.last_time = time.time() - start\n\n if resp['status'] != '200':\n raise FlickrException(0, \"flickr API error : %s %s\"%(resp[\"status\"], content))\n\n if args[\"format\"] == \"json\":\n data = json.loads(content)\n if data[\"stat\"] != \"ok\":\n raise FlickrException(data[\"code\"], data[\"message\"])\n return data\n return content\n\n def recent_photos(self, no_instagram=False, just_friends=False, include_self=False):\n self.last_time = None\n\n cache_key = 'flickr_items_%s_%s_%s_%s'%(self.id, no_instagram, just_friends, include_self)\n photos = cache.get(cache_key)\n\n if not photos:\n try:\n response = self.call(\"get\", \"flickr.photos.getContactsPhotos\",\n count = 50,\n extras = EXTRAS,\n just_friends = (just_friends and \"1\" or \"0\"),\n include_self = (include_self and \"1\" or \"0\"),\n )\n photos = response[\"photos\"][\"photo\"]\n except FlickrException, e:\n logging.error(e)\n # don't cache failure\n return []\n\n def filter_instagram(p):\n mt = p[\"machine_tags\"].split()\n return not \"uploaded:by=instagram\" in mt\n if no_instagram:\n photos = filter(filter_instagram, photos)\n\n def filter_aaron(p):\n mt = p[\"machine_tags\"].split()\n return not \"uploaded:by=parallelflickr\" in mt\n photos = filter(filter_aaron, photos)\n\n cache.set(cache_key, photos, 120)\n\n for p in photos:\n p[\"description\"] = p[\"description\"][\"_content\"]\n p[\"link\"] = \"https://flickr.com/photos/%s/%s\"%(p[\"pathalias\"] or p[\"owner\"], p['id'])\n p[\"upload_date\"] = datetime.datetime.utcfromtimestamp(float(p[\"dateupload\"]))\n p[\"tags\"] = p[\"tags\"].split()\n\n return photos\n\n def touch(self):\n self.fetched = datetime.datetime.utcnow()\n self.save()\n\n" }, { "alpha_fraction": 0.6279518008232117, "alphanum_fraction": 0.6327710747718811, "avg_line_length": 35.68141555786133, "blob_id": "e0423bd6f67580ea0697ec7457f92400fd0dac36", "content_id": "047b3a6553ea27d275237d4aa8f3300f6050473d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4150, "license_type": "no_license", "max_line_length": 124, "num_lines": 113, "path": "/flickr/views.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "from flickr.models import RequestToken, AccessToken, FlickrException\n\nfrom django.http import HttpResponseRedirect\nfrom django.contrib import messages\nfrom django.shortcuts import render\nfrom django.conf import settings \n\nimport oauth2\nimport urllib\nimport logging\n\n\n# decorator, for some reason\ndef flickr_auth(fn):\n def wrapper(request, *args, **kwargs):\n request.access_token = None\n if request.session.get(\"fa\"):\n try:\n request.token = AccessToken.objects.get(id=request.session['fa'])\n except AccessToken.DoesNotExist:\n logging.info(\"bad access token %s\"%request.session['fa'])\n del request.session['fa']\n \n return fn(request, *args, **kwargs)\n return wrapper\n\n\n@flickr_auth\ndef index(request):\n if not hasattr(request, \"token\"):\n return render(request, \"flickr/anon.html\", dict(title=\"flickr\"))\n\n no_instagram = request.REQUEST.get(\"no_instagram\")\n just_friends = request.REQUEST.get(\"just_friends\")\n include_self = request.REQUEST.get(\"include_self\")\n\n try:\n photos = request.token.recent_photos(\n no_instagram=no_instagram,\n just_friends=just_friends,\n include_self=include_self,\n )\n except FlickrException, e:\n if e.code == 98:\n # token error\n request.token.delete()\n return HttpResponseRedirect(\"/flickr/auth/?logout\")\n\n return render(request, \"flickr/index.html\", dict(\n title = \"flickr\",\n token = request.token,\n photos = photos,\n time = request.token.last_time,\n ))\n\n\n\n\ndef auth(request):\n consumer = oauth2.Consumer(key=settings.FLICKR_API_KEY, secret=settings.FLICKR_API_SECRET)\n\n if request.GET.get(\"logout\") is not None:\n del request.session[\"fa\"]\n return HttpResponseRedirect(\"/flickr/\")\n\n # bounce step 1\n if not request.GET.get(\"oauth_token\"):\n client = oauth2.Client(consumer)\n # callback url support! SO AWESOME.\n params = urllib.urlencode(dict(oauth_callback = settings.SITE_URL+\"/flickr/auth/\",))\n resp, content = client.request(\"%s?%s\"%(settings.FLICKR_REQUEST_TOKEN_URL, params), \"GET\")\n\n if resp['status'] != '200':\n messages.add_message(request, messages.INFO, \"Error talking to flickr: (%s) %s\"%(resp['status'], content[:100]))\n return HttpResponseRedirect(\"/flickr/\")\n \n request_token = RequestToken.from_string(content)\n\n # keep session small\n request.session['fr'] = request_token.id\n return HttpResponseRedirect(\"%s?perms=read&oauth_token=%s\"%(settings.FLICKR_AUTHORIZE_URL, request_token.key))\n\n\n else:\n # step 2\n try:\n rt = RequestToken.objects.get(key = request.GET.get(\"oauth_token\"))\n except RequestToken.DoesNotExist:\n messages.add_message(request, messages.INFO, \"Bad token when talking to flickr. Try re-doing auth.\")\n return HttpResponseRedirect(\"/flickr/\")\n\n if rt.id != request.session.get('fr', None):\n logging.warn(\"tokens %r and %r do not match\"%(rt.id, request.session.get('fr', \"(None)\")))\n messages.add_message(request, messages.INFO, \"Bad token when talking to flickr. Try re-doing auth.\")\n return HttpResponseRedirect(\"/flickr/\")\n\n token = rt.token()\n token.set_verifier(request.GET.get(\"oauth_verifier\"))\n client = oauth2.Client(consumer, token)\n\n resp, content = client.request(settings.FLICKR_ACCESS_TOKEN_URL, \"POST\")\n if resp['status'] != '200':\n messages.add_message(request, messages.INFO, \"Error talking to flickr: (%s) %s\"%(resp['status'], content[:100]))\n return HttpResponseRedirect(\"/flickr/\")\n\n # this creates/updates a token object about this user. It's the user record, for all intents and purposes.\n access_token = AccessToken.from_string(content)\n\n # keep session small\n request.session['fa'] = access_token.id\n if 'fr' in request.session:\n del request.session['fr']\n return HttpResponseRedirect(\"/flickr/\")\n\n\n\n\n\n" }, { "alpha_fraction": 0.5665204524993896, "alphanum_fraction": 0.5694444179534912, "avg_line_length": 34.467533111572266, "blob_id": "9c77c226cee89035f3f2c5008a3cf80f4226fab1", "content_id": "892ac8a50129e1e8715dbaa7b194ab5e9580740b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2736, "license_type": "no_license", "max_line_length": 122, "num_lines": 77, "path": "/instagram/feeds.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "from instagram.models import AccessToken\nfrom flickr.feeds import GeoFeed\n\nfrom django.contrib.syndication.views import Feed\nfrom django.shortcuts import get_object_or_404\n\n\nclass InstagramPhotoFeed(Feed):\n feed_type = GeoFeed\n description_template = 'instagram/_photo.html'\n\n def get_object(self, request, token_secret):\n token = get_object_or_404(AccessToken, feed_secret = token_secret)\n token.filter_liked = request.REQUEST.get(\"liked\", False)\n token.filter_mine = request.REQUEST.get(\"mine\", False)\n return token\n \n def link(self, obj):\n return \"http://feedify.movieos.org/instagram/\"\n \n def title(self, obj):\n return u\"instagram feed for %s\"%obj.username\n\n def items(self, obj):\n obj.touch()\n if obj.filter_liked:\n return obj.get_photos(\"users/self/media/liked\")\n elif obj.filter_mine:\n return obj.get_photos(\"users/self/media/recent\")\n else:\n return obj.get_photos(\"users/self/feed\")\n\n def item_title(self, item):\n try:\n caption = (item[\"caption\"] or {})[\"text\"]\n except KeyError:\n caption = \"{no caption}\"\n\n return u\"%s - %s\"%(item[\"user\"][\"full_name\"], caption)\n\n def item_author_name(self, item):\n return item[\"user\"][\"full_name\"]\n \n \n def item_link(self, item):\n return item[\"link\"]\n \n def item_pubdate(self, item):\n return item[\"created_time\"]\n\n def item_extra_kwargs(self, item):\n extra = {}\n if \"location\" in item and item[\"location\"] and \"latitude\" in item[\"location\"] and \"longitude\" in item[\"location\"]:\n extra[\"latitude\"] = item[\"location\"][\"latitude\"]\n extra[\"longiutude\"] = item[\"location\"][\"longitude\"]\n\n # https://groups.google.com/forum/?fromgroups=#!topic/instagram-api-developers/ncB18unjqyg\n if isinstance(item[\"images\"][\"thumbnail\"], dict):\n extra[\"media:thumbnail\"] = dict(\n url = item[\"images\"][\"thumbnail\"][\"url\"],\n width = str(item[\"images\"][\"thumbnail\"][\"width\"]),\n height = str(item[\"images\"][\"thumbnail\"][\"height\"]),\n )\n extra[\"media:content\"] = dict(\n url = item[\"images\"][\"standard_resolution\"][\"url\"],\n width = str(item[\"images\"][\"standard_resolution\"][\"width\"]),\n height = str(item[\"images\"][\"standard_resolution\"][\"height\"]),\n )\n else:\n extra[\"media:thumbnail\"] = dict(\n url = item[\"images\"][\"thumbnail\"],\n )\n extra[\"media:content\"] = dict(\n url = item[\"images\"][\"standard_resolution\"],\n )\n\n return extra\n \n" }, { "alpha_fraction": 0.6189330220222473, "alphanum_fraction": 0.6221420168876648, "avg_line_length": 32.17333221435547, "blob_id": "e85a1a109113063f529da696d1c1a9885ba84bbd", "content_id": "0e3b091ba009bbf7680840adde114103ef50afec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2493, "license_type": "no_license", "max_line_length": 97, "num_lines": 75, "path": "/flickr/feeds.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "from flickr.models import AccessToken\n\nfrom django.contrib.syndication.views import Feed\nfrom django.utils.feedgenerator import Atom1Feed\nfrom django.shortcuts import get_object_or_404\n\n\n\nclass GeoFeed(Atom1Feed):\n def root_attributes(self):\n attrs = super(GeoFeed, self).root_attributes()\n attrs['xmlns:georss'] = 'http://www.georss.org/georss'\n attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'\n return attrs\n\n def add_item_elements(self, handler, item):\n super(GeoFeed, self).add_item_elements(handler, item)\n if \"latitude\" in item and \"longitude\" in item:\n handler.addQuickElement('georss:point', '%(latitude)s %(longitude)s'%item)\n \n if \"media:thumbnail\" in item:\n handler.addQuickElement(\"media:thumbnail\", attrs = item[\"media:thumbnail\"])\n\n if \"media:content\" in item:\n handler.addQuickElement(\"media:content\", attrs = item[\"media:content\"])\n\n handler.addQuickElement(\"media:title\", item[\"title\"])\n \n\n\nclass FlickrPhotoFeed(Feed):\n feed_type = GeoFeed\n description_template = 'flickr/_photo.html'\n\n def get_object(self, request, token_secret):\n self.no_instagram = request.REQUEST.get(\"no_instagram\")\n self.just_friends = request.REQUEST.get(\"just_friends\")\n self.include_self = request.REQUEST.get(\"include_self\")\n return get_object_or_404(AccessToken, feed_secret = token_secret)\n \n def link(self, obj):\n return \"http://feedify.movieos.org/flickr/\"\n \n def title(self, obj):\n return u\"flickr photos for contacts of %s\"%obj.fullname\n\n def items(self, obj):\n obj.touch()\n return obj.recent_photos(\n no_instagram=self.no_instagram,\n just_friends=self.just_friends,\n include_self=self.include_self,\n )\n\n def item_title(self, item):\n return u\"%s - %s\"%(item[\"ownername\"], item[\"title\"])\n \n def item_author_name(self, item):\n return item[\"ownername\"]\n \n def item_link(self, item):\n return item[\"link\"]\n \n def item_pubdate(self, item):\n return item[\"upload_date\"]\n\n def item_extra_kwargs(self, item):\n extra = {}\n if \"latitude\" in item and \"longitude\" in item and item[\"latitude\"] and item[\"longitude\"]:\n extra[\"latitude\"] = item[\"latitude\"]\n extra[\"longiutude\"] = item[\"longitude\"]\n \n #extra[\"thumbnail\"] = item[\"url_t\"]\n\n return extra\n \n" }, { "alpha_fraction": 0.6941747665405273, "alphanum_fraction": 0.6941747665405273, "avg_line_length": 21.88888931274414, "blob_id": "e384e20899312ba259e099939ef3b7031d8a3d11", "content_id": "5b95fe8ca198c479139e1cb579d8e468d3b328ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 71, "num_lines": 9, "path": "/instagram/admin.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "from instagram.models import *\n\nfrom django.contrib import admin\n\n\nadmin.site.register(AccessToken,\n list_display = (\"key\", \"userid\", \"username\", \"created\", \"fetched\"),\n date_hierarchy = \"created\",\n)\n" }, { "alpha_fraction": 0.6103816032409668, "alphanum_fraction": 0.6224866509437561, "avg_line_length": 24.24870491027832, "blob_id": "d45fc1f38cf409a068bc7c25a2157ca0107fae9a", "content_id": "66f005f151cd66f7c0206e5bd359952f2a45407f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4874, "license_type": "no_license", "max_line_length": 101, "num_lines": 193, "path": "/settings.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "# Django settings for feedify project.\nimport os\n\nROOT = os.path.dirname(__file__)\n\nADMINS = (\n (\"Tom Insam\", \"[email protected]\"),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(os.path.dirname(__file__), 'default.db'),\n }\n}\n\nAPPEND_SLASH = True\n\nTIME_ZONE = 'UTC'\nLANGUAGE_CODE = 'en-us'\nSITE_ID = 1\nUSE_I18N = False\nUSE_L10N = False\n\nUSE_ETAGS = True\n\nMEDIA_ROOT = ''\nMEDIA_URL = ''\n\nSITE_URL=\"http://localhost:8002\"\n\nALLOWED_HOSTS=[\"*\"]\n\nSTATIC_URL='/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(os.path.dirname(__file__), \"static\"),\n)\n\nSECRET_KEY = 'dev-secret-key'\n\nSESSION_COOKIE_NAME = \"feedify_session\"\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.contrib.messages.context_processors.messages\",\n \"core.context_processors.all_settings\",\n)\n\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.common.CommonMiddleware',\n 'session.middleware.SessionMiddleware', # 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'core.exception_handling.ExceptionMiddleware',\n]\n\n# if not PRODUCTION:\n# MIDDLEWARE_CLASSES.append('debug_toolbar.middleware.DebugToolbarMiddleware')\n# INTERNAL_IPS = ('127.0.0.1',)\n# DEBUG_TOOLBAR_CONFIG = {\n# \"INTERCEPT_REDIRECTS\": False,\n# }\n\n\nROOT_URLCONF = 'urls'\n\nTEMPLATE_DIRS = (\n os.path.join(ROOT, \"templates\"),\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.admin',\n 'django.contrib.staticfiles',\n\n # deps\n #\"debug_toolbar\",\n\n # my apps\n \"core\",\n \"flickr\",\n \"instagram\",\n)\n\nLOGGING = {\n 'version': 1,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s|%(asctime)s|%(process)d|%(module)s|%(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n },\n # 'file': {\n # 'level': 'DEBUG',\n # 'class': 'logging.FileHandler',\n # 'formatter': 'verbose',\n # 'filename': '/var/log/feedify/django.log',\n # },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n },\n 'loggers': {\n 'django': {\n 'level': 'INFO', # SQL loggiung on debug\n 'handlers': ['console', \"mail_admins\"],\n },\n '': {\n 'level': 'INFO', # SQL logging on debug\n 'handlers': ['console', \"mail_admins\"],\n },\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11211',\n }\n}\n\n\nFLICKR_REQUEST_TOKEN_URL=\"https://www.flickr.com/services/oauth/request_token\"\nFLICKR_ACCESS_TOKEN_URL=\"https://www.flickr.com/services/oauth/access_token\"\nFLICKR_AUTHORIZE_URL=\"https://www.flickr.com/services/oauth/authorize\"\n\nINSTAGRAM_AUTHORIZE_URL=\"https://api.instagram.com/oauth/authorize/\"\nINSTAGRAM_ACCESS_TOKEN_URL=\"https://api.instagram.com/oauth/access_token\"\nINSTAGRAM_API_URL=\"https://api.instagram.com/v1/\"\n\nFLICKR_API_URL=\"https://api.flickr.com/services/rest/\"\n\n\nPRODUCTION = os.environ.get(\"PRODUCTION\", False)\nif PRODUCTION:\n DEBUG=False\n EMAIL_BACKEND=\"sendmail.EmailBackend\"\n SERVER_EMAIL=\"[email protected]\"\n DEFAULT_FROM_EMAIL=\"[email protected]\"\n STATIC_URL='http://feedify.movieos.org/static/'\n\n # ugh, hard-coding things sucks. Import production settings\n # from a python file in my home directory, rather than checking\n # them in.\n import imp\n prod = imp.load_source(\"production_settings\", \"/home/tomi/deploy/seatbelt/feedify_production.py\")\n for k in filter(lambda a: a[0] != \"_\", dir(prod)):\n locals()[k] = getattr(prod, k)\n\nelse:\n DEBUG=True\n\n # these are dev keys\n FLICKR_API_KEY=\"2d56dbb2d5cf87796478b53e4949dc66\"\n FLICKR_API_SECRET=\"c27d752ea2bdba80\"\n\n # these are dev keys\n INSTAGRAM_API_KEY=\"2ee26d19721040c98b4f93da87d7b485\"\n INSTAGRAM_API_SECRET=\"4acc3891a73147dfb77262b0daf3cc01\"\n\n\nTEMPLATE_DEBUG = DEBUG\n\n" }, { "alpha_fraction": 0.6584967374801636, "alphanum_fraction": 0.6584967374801636, "avg_line_length": 28.14285659790039, "blob_id": "2c2cf8aeb8bc7e9ab0040c797f69773b0bc16b8a", "content_id": "f1d8c476b492afd4564918c6898235fd71abefe6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 612, "license_type": "no_license", "max_line_length": 60, "num_lines": 21, "path": "/urls.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "from django.conf.urls import *\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom flickr.feeds import FlickrPhotoFeed\nfrom instagram.feeds import InstagramPhotoFeed\n\n\nurlpatterns = patterns('',\n (r'^feedify-admin/', include(admin.site.urls)),\n\n url(r'^$', \"core.views.index\"),\n\n url(r'^flickr/$', \"flickr.views.index\"),\n url(r'^flickr/auth/$', \"flickr.views.auth\"),\n url(r'^flickr/feed/([^/]+)/$', FlickrPhotoFeed()),\n\n url(r'^instagram/$', \"instagram.views.index\"),\n url(r'^instagram/auth/$', \"instagram.views.auth\"),\n url(r'^instagram/feed/([^/]+)/$', InstagramPhotoFeed()),\n)\n" }, { "alpha_fraction": 0.7845804691314697, "alphanum_fraction": 0.7845804691314697, "avg_line_length": 47.88888931274414, "blob_id": "53cd10d826afceb3673a0e5b1d0d04e605548021", "content_id": "a2d1acc11551e5ba51a75179c17376a6ab557e46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 441, "license_type": "no_license", "max_line_length": 77, "num_lines": 9, "path": "/README.markdown", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "Feedify makes Atom feeds out of your friends' Flickr and Instagram photo\nfeeds, and other things that don't already provide them, because I like\nsubscribing to things in a feed reader rather than having to check everything\nevery day.\n\nAs with most stuff I make it's fragile and will probably break. And you have\nto be quite nerdy to understand why you'd even care. Nevertheless, I like it.\n\nFeedify is live at <http://feedify.movieos.org/>\n\n" }, { "alpha_fraction": 0.5903179049491882, "alphanum_fraction": 0.5971820950508118, "avg_line_length": 32.75609588623047, "blob_id": "fdc85f8f0008d0b1c6c3e541abe644236a706122", "content_id": "d4264a8b1e42ba3ef9513a08a227a231636913b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2768, "license_type": "no_license", "max_line_length": 100, "num_lines": 82, "path": "/instagram/models.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.core.cache import cache\n\nimport urllib2\nimport datetime\nimport uuid\nimport json\nimport time\n\nclass InstagramException(Exception):\n def __init__(self, code, message):\n self.code = code\n super(InstagramException, self).__init__(message)\n\n def __unicode__(self):\n return u\"%s: %s\"%(self.code, self.message)\n\n\nclass AccessToken(models.Model):\n key = models.CharField(max_length=100, null=False, blank=False, unique=True)\n created = models.DateTimeField(default=datetime.datetime.utcnow, null=False, blank=False)\n fetched = models.DateTimeField(null=True)\n updated = models.DateTimeField(blank=False, null=False)\n username = models.CharField(max_length=100, null=False, blank=False)\n userid = models.CharField(max_length=20, null=False, blank=False, unique=True)\n feed_secret = models.CharField(max_length=13, null=False, blank=False, unique=True)\n\n def __str__(self):\n return self.key\n\n @classmethod\n def from_string(cls, string):\n data = json.loads(string)\n properties = dict(\n key = data[\"access_token\"],\n username = data[\"user\"][\"username\"],\n userid = data[\"user\"][\"id\"],\n updated = datetime.datetime.utcnow(),\n )\n\n token, created = cls.objects.get_or_create(userid=properties[\"userid\"], defaults=properties)\n if not created:\n for k, v in properties.items():\n setattr(token, k, v)\n token.save()\n return token\n\n\n def save(self, *args, **kwargs):\n if not self.feed_secret:\n self.feed_secret = str(uuid.uuid4())[:13]\n return super(AccessToken, self).save(*args, **kwargs)\n\n\n def get_photos(self, method=\"users/self/feed\"):\n cache_key = 'instagram_items_%s_%s'%(self.id, method)\n self.last_time = None\n photos = cache.get(cache_key)\n\n if not photos:\n url = \"https://api.instagram.com/v1/%s?access_token=%s\"%(method, self.key)\n start = time.time()\n try:\n conn = urllib2.urlopen(url)\n data = json.loads(conn.read())\n except Exception:\n return []\n self.last_time = time.time() - start\n photos = data[\"data\"]\n cache.set(cache_key, photos, 120)\n\n for p in photos:\n p[\"created_time\"] = datetime.datetime.utcfromtimestamp(float(p[\"created_time\"]))\n if not p[\"link\"]:\n # private photos don't have public links. link to full-rez image instead.\n p[\"link\"] = p[\"images\"][\"standard_resolution\"][\"url\"]\n\n return photos\n\n def touch(self):\n self.fetched = datetime.datetime.utcnow()\n self.save()\n" }, { "alpha_fraction": 0.5510493516921997, "alphanum_fraction": 0.564662516117096, "avg_line_length": 32.233009338378906, "blob_id": "2c9f7623110a279a4524c1ecf0dd4c90c898dc04", "content_id": "bd6f16f4346d2b14a39f2b53aa2ebe76ee73ee32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3526, "license_type": "no_license", "max_line_length": 77, "num_lines": 103, "path": "/session/middleware.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "# http://scratchpad.cmlenz.net/370f3e0d58804d38c3bc14e514272fda/\r\n\r\nfrom base64 import b64decode, b64encode\r\nimport hashlib\r\nfrom time import time\r\nimport zlib\r\nimport logging\r\n\r\nfrom django.conf import settings\r\nfrom django.contrib.sessions.backends.base import SessionBase\r\nfrom django.utils.cache import patch_vary_headers\r\nfrom django.utils.http import cookie_date\r\nimport json\r\n\r\nMAX_COOKIE_SIZE = 4096\r\n\r\n\r\nclass SessionMiddleware(object):\r\n\r\n def process_request(self, request):\r\n cookie = request.COOKIES.get(settings.SESSION_COOKIE_NAME)\r\n request.session = SessionStore(cookie)\r\n\r\n def process_response(self, request, response):\r\n try:\r\n session = request.session\r\n except AttributeError:\r\n return response # 404 page, for instance\r\n if session.deleted:\r\n response.delete_cookie(settings.SESSION_COOKIE_NAME)\r\n else:\r\n if session.accessed:\r\n patch_vary_headers(response, ('Cookie',))\r\n if session.modified or settings.SESSION_SAVE_EVERY_REQUEST:\r\n if session.get_expire_at_browser_close():\r\n max_age = None\r\n expires = None\r\n else:\r\n max_age = session.get_expiry_age()\r\n expires = cookie_date(time() + max_age)\r\n cookie = session.encode(session._session)\r\n if len(cookie) <= MAX_COOKIE_SIZE:\r\n response.set_cookie(settings.SESSION_COOKIE_NAME, cookie,\r\n max_age = max_age, expires=expires,\r\n domain = settings.SESSION_COOKIE_DOMAIN,\r\n path = settings.SESSION_COOKIE_PATH,\r\n secure = settings.SESSION_COOKIE_SECURE or None\r\n )\r\n else:\r\n # The data doesn't fit into a cookie, not sure what's the\r\n # best thing to do in this case. Right now, we just leave\r\n # the old cookie intact if there was one. If Django had\r\n # some kind of standard logging interface, we could also\r\n # log a warning here.\r\n pass\r\n return response\r\n\r\n\r\nclass SessionStore(SessionBase):\r\n\r\n def __init__(self, cookie):\r\n SessionBase.__init__(self, 'cookie')\r\n self.cookie = cookie\r\n self.deleted = False\r\n\r\n def exists(self, session_key):\r\n return self.cookie and not self.deleted\r\n\r\n def create(self):\r\n pass\r\n\r\n def save(self, must_create=False):\r\n pass\r\n\r\n def delete(self, session_key=None):\r\n self.deleted = True\r\n\r\n def load(self):\r\n if self.cookie:\r\n return self.decode(self.cookie)\r\n return {}\r\n\r\n def cycle_key(self):\r\n pass\r\n\r\n def encode(self, session_dict):\r\n data = json.dumps(session_dict)\r\n json_md5 = hashlib.md5(data + settings.SECRET_KEY).hexdigest()\r\n try:\r\n return b64encode(zlib.compress(data + json_md5))\r\n except Exception:\r\n return ''\r\n\r\n def decode(self, session_data):\r\n try:\r\n data = zlib.decompress(b64decode(session_data))\r\n except Exception:\r\n return {}\r\n data, json_md5 = data[:-32], data[-32:]\r\n if hashlib.md5(data + settings.SECRET_KEY).hexdigest() != json_md5:\r\n logging.error('User tampered with session cookie')\r\n return {}\r\n return json.loads(data)\r\n" }, { "alpha_fraction": 0.5098039507865906, "alphanum_fraction": 0.686274528503418, "avg_line_length": 24.5, "blob_id": "e7d5058c2f79d679d58c714c73ab3a676573821c", "content_id": "be57dab899e130cd6a26b62400bd6a9ca3b200f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 51, "license_type": "no_license", "max_line_length": 34, "num_lines": 2, "path": "/run-server.sh", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "cd `dirname $0`\n./manage.py runserver 0.0.0.0:8002\n" }, { "alpha_fraction": 0.6478431224822998, "alphanum_fraction": 0.6494117379188538, "avg_line_length": 33.917808532714844, "blob_id": "3e00c5c142e495041e68f970fbaa2c13fb45e5f3", "content_id": "1462ffb611863f66b08351dd6ce627ac794e4a92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2550, "license_type": "no_license", "max_line_length": 162, "num_lines": 73, "path": "/instagram/views.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "from instagram.models import AccessToken, InstagramException\n\nfrom django.http import HttpResponseRedirect\nfrom django.contrib import messages\nfrom django.shortcuts import render\nfrom django.conf import settings \n\nimport urllib, urllib2\nimport logging\n\n\ndef index(request):\n if not request.session.get(\"i\"):\n return render(request, \"instagram/anon.html\", dict(title=\"instagram\"))\n \n try:\n token = AccessToken.objects.get(pk=request.session[\"i\"])\n except AccessToken.DoesNotExist:\n del request.session[\"i\"]\n return HttpResponseRedirect(\"/instagram/\")\n\n try:\n photos = token.get_photos()\n except InstagramException, e:\n logging.error(\"can't talk to instagram: %s\"%e)\n return HttpResponseRedirect(\"/instagram/auth/?logout\")\n\n return render(request, \"instagram/list.html\", dict(\n title = \"instagram\",\n token = token,\n photos = photos,\n time = token.last_time,\n ))\n\n\ndef auth(request):\n if request.GET.get(\"logout\") is not None:\n del request.session[\"i\"]\n return HttpResponseRedirect(\"/instagram/\")\n\n redirect = \"%s/instagram/auth/\"%settings.SITE_URL\n\n # bounce step 1\n if not request.GET.get(\"code\") and not request.GET.get(\"error\"):\n return HttpResponseRedirect(\"%s?client_id=%s&redirect_uri=%s&response_type=code\"%(settings.INSTAGRAM_AUTHORIZE_URL, settings.INSTAGRAM_API_KEY, redirect))\n\n # error in auth. Probably turned us down.\n error = request.REQUEST.get(\"error\")\n if error:\n messages.add_message(request, messages.INFO, \"Problem talking to instagram: %s. Try re-doing auth.\"%error)\n return HttpResponseRedirect(\"/instagram/\")\n\n # successful auth\n code = request.REQUEST.get(\"code\")\n if code:\n try:\n conn = urllib2.urlopen(settings.INSTAGRAM_ACCESS_TOKEN_URL, urllib.urlencode(dict(\n client_id = settings.INSTAGRAM_API_KEY,\n client_secret = settings.INSTAGRAM_API_SECRET,\n grant_type= \"authorization_code\",\n redirect_uri= redirect,\n code = code,\n )))\n except urllib2.HTTPError, e:\n messages.add_message(request, messages.INFO, \"Problem talking to instagram: %s. Try re-doing auth.\"%e.read())\n return HttpResponseRedirect(\"/instagram/\")\n\n # saves the token as well.\n token = AccessToken.from_string(conn.read())\n\n # keep session small\n request.session['i'] = token.id\n return HttpResponseRedirect(\"/instagram/\")\n\n" }, { "alpha_fraction": 0.7210526466369629, "alphanum_fraction": 0.7210526466369629, "avg_line_length": 22.5, "blob_id": "44e7bdf01524168ed06c885f7f3d5a5621678121", "content_id": "fc5944f36606b048bc432f93bdf442f4c889a38c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/core/exception_handling.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "import logging\nimport traceback\n\nclass ExceptionMiddleware(object):\n\n def process_exception(self, request, exception):\n logging.error(traceback.format_exc())\n return None\n\n\n" }, { "alpha_fraction": 0.7131242752075195, "alphanum_fraction": 0.7177700400352478, "avg_line_length": 38.181819915771484, "blob_id": "32fd5fdaa44e4b6661bc64ccb50ad4f2a1f7d0d2", "content_id": "18ecbca7775224365468fe74bfca7505c443c506", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 861, "license_type": "no_license", "max_line_length": 240, "num_lines": 22, "path": "/templates/index.html", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}{% load cache %}\n\n{% block content %}\n<p>Feedify makes Atom feeds out of your friends' Flickr and Instagram photo feeds, and other things that don't already provide them, because I like subscribing to things in a feed reader rather than having to check everything every day.</p>\n\n<p>As with most stuff I make it's fragile and will probably break. And you have to be quite nerdy to understand why you'd even care. Nevertheless, I like it.</p>\n\n\n<h2>flickr</h2>\n<p>Feedify recent uploads from your Flickr contacts.</p>\n<p><a href=\"/flickr/\">start here</a></p>\n\n<h2>instagram</h2>\n<p>Feedify recent uploads from your Instagram contacts.</p>\n<p><a href=\"/instagram/\">start here</a></p>\n\n\n<hr>\n<p>If you're interested, you can find the <a href=\"https://github.com/tominsam/feedify\">source code for Feedify on Github</a>.</p>\n\n\n{% endblock %}" }, { "alpha_fraction": 0.6709677577018738, "alphanum_fraction": 0.6709677577018738, "avg_line_length": 21.14285659790039, "blob_id": "3674bf6a978de2cfb97cb6d903a7faf570660c45", "content_id": "87c93ef9cdd843482ad92170dc61dbdd83793072", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 69, "num_lines": 14, "path": "/flickr/admin.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "from flickr.models import *\n\nfrom django.contrib import admin\n\n\nadmin.site.register(RequestToken, \n list_display = (\"key\", \"created\"),\n date_hierarchy = \"created\",\n)\n\nadmin.site.register(AccessToken,\n list_display = (\"key\", \"nsid\", \"fullname\", \"created\", \"fetched\"),\n date_hierarchy = \"created\",\n)\n" }, { "alpha_fraction": 0.7443609237670898, "alphanum_fraction": 0.7969924807548523, "avg_line_length": 11.090909004211426, "blob_id": "51b75d71e1671f996403113f50e79c667d848587", "content_id": "b50e2c4c51719aff24d64dd63089fa72bd3d8b36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 133, "license_type": "no_license", "max_line_length": 35, "num_lines": 11, "path": "/requirements.txt", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "Django==1.7\ndjango-debug-toolbar\ngunicorn\npython-memcached\ndjango-redis-cache\n\neventlet\n\n\nhttplib2==0.7.2 # certificate thing\noauth2\n" }, { "alpha_fraction": 0.7230392098426819, "alphanum_fraction": 0.7279411554336548, "avg_line_length": 80.4000015258789, "blob_id": "bf7530785042679f322c9e3e97833425282b53bf", "content_id": "cb19bde3fc9e06652c03f661097176afd446d20c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 408, "license_type": "no_license", "max_line_length": 230, "num_lines": 5, "path": "/templates/instagram/_about.html", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "<h2>feedify/instagram</h2>\n\n<p>The intent was to get an RSS feed of the photos of my contacts. Instagram don't provide this, or any other RSS feeds. So I made one. I have a <a href=\"/flickr/\">similar feed generator for Flickr photos if you want that</a>.</p>\n\n<p>It uses the <a href=\"http://instagram.com/developer/\">Instagram API</a> to fetch the photos, but is not endorsed or certified by Instagram.</p>\n\n" }, { "alpha_fraction": 0.5446153879165649, "alphanum_fraction": 0.5600000023841858, "avg_line_length": 22.214284896850586, "blob_id": "ea219b907c04ec0f665d26c3a3c94c64ba084fd6", "content_id": "c3e45d39e7d4887e0b34741b3098b11a1e62f023", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 325, "license_type": "no_license", "max_line_length": 81, "num_lines": 14, "path": "/fabfile.py", "repo_name": "tominsam/feedify", "src_encoding": "UTF-8", "text": "import os, sys\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\", \"deployinator\"))\nfrom deployinator.deployinator import *\n\nfab_init(\"feedify\",\n database = \"feedify\",\n postgresql = True,\n rules = {\n \"nginx\": \"deploy/nginx.conf\",\n \"gunicorn\": {\n \"port\": 8002,\n }\n }\n)\n" } ]
20
andywarburton/Retropie-cleanup
https://github.com/andywarburton/Retropie-cleanup
a8fe820b8230aaf6bf8271c36219fb6d87d43e0f
7d60697cdd5c8943f183442103e2653583240f78
653f2b6a75a5ac6914fae8551aee512937530ab1
refs/heads/master
2021-01-17T12:44:32.611442
2018-12-02T10:56:59
2018-12-02T10:56:59
56,989,773
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7537190318107605, "alphanum_fraction": 0.7537190318107605, "avg_line_length": 34.588233947753906, "blob_id": "3512c0a4d36bcee9c60858c089fd05e6aa82cf3f", "content_id": "b2572d652745992709e6a535d7a0f66ef1a2d28d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 605, "license_type": "permissive", "max_line_length": 124, "num_lines": 17, "path": "/README.md", "repo_name": "andywarburton/Retropie-cleanup", "src_encoding": "UTF-8", "text": "# RetroPie-Tools\n\nA collection of simple python scripts that I use for cleaning up my RetroPie installation.\n\n* `remove-imageless-roms.py` - (nearly finished) removes roms that do not have any artwork\n\nTo run, simply copy the above file to any location on your Raspberry Pi and type:\n\n```sudo python ./remove-imageless-roms.py``` \n\nThen follow the onscreen instructions!\n \n### Coming Soon... ###\n\n* `remove-romless-images.py` - (unstarted) removes images that do not have any roms (for instance when rom has been deleted)\n\nGot a suggestion for a useful script? Please send suggestions via the issues tab!\n" }, { "alpha_fraction": 0.4817347228527069, "alphanum_fraction": 0.48896268010139465, "avg_line_length": 35.82014465332031, "blob_id": "397b4e49703dffaec1ce1ed748457cf480d741e5", "content_id": "b2f42f7b8be27cd14be158c38cfff1396ad61502", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5119, "license_type": "permissive", "max_line_length": 80, "num_lines": 139, "path": "/remove-imageless-roms.py", "repo_name": "andywarburton/Retropie-cleanup", "src_encoding": "UTF-8", "text": "# This program is intended to remove any roms from RetroPie that do not\n# have any artwork. It will work on a Raspberry Pi or a Linux computer\n\n\nimport os\nimport os.path\nimport sys\nimport string\n\n# config vars, if you are using non-standard stuff change these\n# os.environ['HOME'] pulls the users current home directory, wherever\n# that is, and changes with the username\n# /opt/ does not need to change as this is standard across RetroPie and\n# linux distros\nrom_dir = os.environ['HOME'] + '/RetroPie/roms'\nbak_dir = os.environ['HOME'] + '/RetroPie/cleaned_up'\nimg_dir = '/opt/retropie/configs/all/emulationstation/downloaded_images'\nallowed_systems = [\n 'amiga','amstradcpc','apple2',\n 'arcade','atari800','atari2600',\n 'atari5200', 'atari7800','atarilynx',\n 'atarist','c64','coco',\n 'dragon32','dreamcast','fba',\n 'fds','gamegear','gb',\n 'gba','gbc','intellivision',\n 'macintosh','mame-advmame', 'mame-libretro',\n 'mame-mame4all','mastersystem','megadrive',\n 'msx','n64', 'neogeo',\n 'nes','ngp','ngpc',\n 'pc','pcengine','psp',\n 'psx','sega32x','segacd',\n 'sg-1000','snes','vectrex',\n 'videopac','wonderswan','wonderswancolor',\n 'zmachine','zxspectrum'\n ]\n\n# The number of deleted roms\ndelete_count = 0\n# The total number of roms\ntotal_count = 0\n\nquestion = \"\"\"\n*****************************************************************\n WARNING! THIS SCRIPT WILL PERMANENTLY DELETE FILES!\n\n ===== PLEASE BACKUP BEFORE RUNNING THIS! =====\n ===== IT IS RECOMMENDED TO DO \"TEST\" FIRST! =====\n\n TYPE \"TEST\" TO DO A TEST RUN THAT WILL TELL YOU HOW\n MANY FILES WOULD BE CHANGED BY AN ACTUAL RUN\n\n TYPE \"CLEAN\" TO MOVE UNWANTED FILES TO A BACKUP DIRECTORY\n (/home/pi/RetroPie/cleaned_up)\n\n TYPE \"DELETE\" IF YOU ARE SOUND MIND,\n UNDERSTAND THE RISKS, AND WISH TO PROCEED\n*****************************************************************\n: \"\"\"\n\nspacer = \"*****************************************************************\"\n\n# question.replace() replaces the output text's CLEAN directory (what is\n# presented to the user) with the actual directory\nuser_input = raw_input(question.replace('/home/pi',os.environ['HOME'])).upper()\n\n# verify user input\nif user_input in ['DELETE','TEST','CLEAN']:\n\n # for the cleanup process, we need somewhere to put our backups\n if user_input == 'CLEAN':\n if not os.path.isdir(bak_dir):\n os.makedirs(bak_dir)\n\n # for all the files and directories to said files in the rom directory\n for root, subdirs, files in os.walk(rom_dir):\n\n list_file_path = os.path.join(root, 'foo.txt')\n\n with open(list_file_path, 'wb') as list_file:\n\n os.remove(list_file_path)\n\n for filename in files:\n file_path = os.path.join(root, filename)\n\n rom_name = filename.split('.')[0]\n\n system_name = file_path.split('/')[-2]\n\n # verify system is allowed (do not want to mess with\n # other folders in the roms directory)\n if system_name in allowed_systems:\n\n total_count += 1\n\n image_path = img_dir + '/' + system_name + '/' +\\\n rom_name + '-image.jpg'\n\n if not os.path.isfile(image_path):\n ## no image found, we should delete the rom!\n\n if user_input == 'DELETE':\n\n print \"DELETING: \" + system_name + \"/\" + rom_name +\\\n \" (\" + filename + \")\"\n os.remove(file_path)\n delete_count += 1\n\n elif user_input == 'CLEAN':\n\n print \"CLEANING: \" + system_name + \"/\" + rom_name +\\\n \" (\" + filename + \")\"\n system_bak_dir = bak_dir + '/' + system_name\n bak_file_path = system_bak_dir + '/' + filename\n\n if not os.path.isdir(system_bak_dir):\n os.makedirs(system_bak_dir)\n\n os.rename(file_path, bak_file_path)\n delete_count += 1\n\n else:\n\n print \"TESTING: \" + system_name + \"/\" + rom_name +\\\n \" (\" + filename + \")\"\n ## do nothing\n delete_count += 1\n\n remaining_roms = total_count - delete_count\n\n print \"\\n\" + spacer\n print user_input + \" COMPLETE: \" + str(delete_count) + \" of \" +\\\n str(total_count) + \" total (\" str(remaining_roms) + \" remain)\"\n print spacer\n\nelse:\n \n print \"INPUT NOT RECOGNIZED, PLEASE TRY AGAIN\"\n sys.exit()\n\n" } ]
2
dhhagan/eps236-project
https://github.com/dhhagan/eps236-project
8abbc864964e86f2e1bb16de1c7087303c0b9c4d
705993fef72df86216f59c14dcaca34e5429a0f0
75a66cbf797141b00b9dacaa586d7228d03e5bca
refs/heads/master
2021-01-19T10:13:50.394893
2017-04-26T13:18:12
2017-04-26T13:18:12
87,838,015
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.607413649559021, "alphanum_fraction": 0.634372353553772, "avg_line_length": 33.89706039428711, "blob_id": "d6ac059cc490ed197097a8f1a8074a75b36af5b9", "content_id": "b72a38688e9b1a590f33d61f74316716fa58fa40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2374, "license_type": "no_license", "max_line_length": 119, "num_lines": 68, "path": "/montecarlo.R", "repo_name": "dhhagan/eps236-project", "src_encoding": "UTF-8", "text": "# Run MonteCarlo Simulations to determine the optimal settings for the model\n\nlibrary(feather)\nlibrary(MonteCarlo)\n\n# Initialize the model params\nsource(\"model_initialize.r\")\n\n# Import our run.model function that takes various params and returns the results (box-by-box concentration timeseries)\nsource(\"utils.r\")\n\n# Calculate params for SF6\nspecies = \"SF6\"\n\n# Bring in our observations for SF6\nidx.low <- 218\nidx.high <- 383\n\n# Observations go from 1995.125-2008.875\nsf6.observations <- data.frame(\n ghg.observations[,paste(species, \"box\", 1:4, sep=\".\")], \n row.names=ghg.observations[,\"Year\"])[idx.low:idx.high,]\n\n# Get the year from the index, and then average the observations by year\nsf6.observations <- cbind(sf6.observations, year=floor(as.numeric(row.names.data.frame(sf6.observations))))\n\nsf6.observations.boxed.annual.means <- aggregate(sf6.observations, list(sf6.observations$year), mean)\n\n\n# Define the output function to send to the Monte Carlo Simulation\n# Essentially, we need to give just the params and it will return the minimized values\nmonte_carlo_model <- function(tau.stratosphere, tau.hemisphere.inter, tau.hemisphere.intra, \n strat.nh.fraction) {\n \n species <- \"SF6\"\n \n # Run the model\n model.results <- run.model(species, tau.stratosphere, tau.hemisphere.inter, \n tau.hemisphere.intra, strat.nh.fraction)\n \n \n # Return the min values for all boxes\n min <- min.cost(model.results, sf6.observations.boxed.annual.means, box.no = NaN)\n #list(\"min\"=min)\n return (min)\n}\n\n# Set up the param grid to use\ntau.strat.grid <- seq(1, 10, .1)\ntau.hemi.inter.grid <- seq(1, 5, .1)\ntau.hemi.intra.grid <- seq(0.1, 1, 0.05)\nstrat.nh.grid <- seq(0.4, 0.6, 0.05)\n\ngrid_search = list(\n \"tau.stratosphere\"=tau.strat.grid,\n \"tau.hemisphere.inter\"=tau.hemi.inter.grid,\n \"tau.hemisphere.intra\"=tau.hemi.intra.grid,\n \"strat.nh.fraction\"=strat.nh.grid)\n\n# Run the Monte Carlo Simulation\nmc.results <- MonteCarlo::MonteCarlo(\n func=monte_carlo_model, \n nrep=10, \n param_list=grid_search, \n max_grid=1000000,\n time_n_test=TRUE,\n save_res=TRUE,\n ncpus = 4)\n\n" }, { "alpha_fraction": 0.7153745293617249, "alphanum_fraction": 0.7287779450416565, "avg_line_length": 36.663368225097656, "blob_id": "2729ffcfb225764f5ba8c83f344e0f98c969092f", "content_id": "f9f65ff17b316b23e56f0ffedcff99e4443b9ad5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3805, "license_type": "no_license", "max_line_length": 375, "num_lines": 101, "path": "/README.md", "repo_name": "dhhagan/eps236-project", "src_encoding": "UTF-8", "text": "# EPS236 Final Class Project\nEPS236 project source for David Hagan (MIT), Chris Lim (MIT), and Sidhant Pai (MIT)\n\n## Objective\n\nSet up a 5-box model and minimize the residuals between experimental results from EDGAR and that of our model output.\n\n## Requirements\n\nThe following R packages are required:\n\n * feather\n * MonteCarlo\n * dplyr\n * progress\n \nThe following Python packages are required to make the plots:\n\n * seaborn\n * matplotlib\n * pandas\n * feather\n\n## Getting Started\n\nWe have attempted to minimize the error in the model in 3 different ways:\n \n 1. Iterative grid-search\n 2. Monte Carlo using the MonteCarlo package\n 3. Monte Carlo as written by us\n \nEach method is contained within it's own file. Below, we show how to run each file.\n\n### Iterative Grid Search\n\nThe run file for this method is `iterative_gridsearch.R`. To run the file, you should be able to just run from within RStudio or from the R command line. The output is three files, all in feather format.\n\n 1. **results/iterative_gridsearch_results.feather**\n \n Contains results for each individual run in a table containing columns for each of the optimized parameters, as well as the sum of the squared residuals.\n \n 2. **results/model_results_minimized.feather**\n \n Contains the final model results from the run with the minimized residuals as a timeseries.\n \n 3. **results/sf6_emissions.feather**\n \n Contains the observed SF6 emissions as a timeseries (for plotting purposes)\n\n### Monte Carlo using the MonteCarlo package\n\nWe found a MonteCarlo package, however it appears that it works for as an iterative grid-search than a true Monte Carlo simulation. We tried parsing the source, but it was written pretty horribly...Regardless, the results output the files from running `montecarlo.R`:\n\nNot updated...\n\n### Monte Carlo from Scratch\n\nWe weren't totally sure the MonteCarlo package was working the way we thought it should, so we took matters into our own hands and wrote the file `monte_carlo_mit.R`. This version is a Monte Carlo algorithm we wrote to pick from input distributions for each parameter. You can change the number of iterations through the `num.iterations` variable. There are two output files:\n\n 1. **results/mc_results_by_iter.feather**\n \n Contains the run-by-run results of the Monte Carlo simulation with parameters and results.\n \n 2. **results/mc_results_final.feather**\n \n Contains the final model results as found by optimizing the parameters.\n\n\n## Final Model Parameters\n\nAfter running a Monte Carlo for 1M iterations, we arrived at our optimal solution:\n\n| Variable | Result |\n|:--------:|:------:|\n| `t.strat` | 5.21 yr |\n| `t.hemi.inter` | 0.774 |\n| `t.hemi.intra` | 0.150 |\n| `strat.frac` | 0.484 |\n\n\n## Variable Definitions\n\n| Variable Name | Definition |\n|:-------------:|:-----------|\n|`tau.global.lifetimes.years`| Global mean photochemical lifetime for several species|\n|`ghg.observations`| NASA GHG observations for several species by box for many years (>1977)|\n|`loss.monthly`|Monthly loss rate for several compounds by box per month|\n|`sf6.sources`|Yearly SF6 emissions by box from EDGAR|\n|`time.range.sf6.sources`|Vector with the first and last year of EDGAR sources |\n|`mass.global`|Total mass of the atmosphere in units of kg|\n|`mass.stratosphere`|Total mass of the stratosphere in kg|\n|`molecular.wt.all`|Molecular weight of various species in terms of just terms of Carbon and Nitrogen|\n\n## Description of Data Files\n\n| Filename | Description |\n|:---------|:------------|\n| data/tglobal_all.txt | Table of global lifetimes in years |\n| data/ghg.GMD.conc.tbl | NASA Greenhouse Gas emissions |\n| data/loss.monthly.tbl | Box-by-box losses as a function of month |\n| data/SF6.emissions.bybox.tbl | SF6 Emissions by box from 1970-2008 |\n\n" }, { "alpha_fraction": 0.6287251710891724, "alphanum_fraction": 0.6572847962379456, "avg_line_length": 33.028167724609375, "blob_id": "1c6c3f0e8291ba1eeb99845f1cf8969faa58f522", "content_id": "9638d90a197731af679b3ea0a3bb4acc1afa0108", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4832, "license_type": "no_license", "max_line_length": 110, "num_lines": 142, "path": "/visualize_results.py", "repo_name": "dhhagan/eps236-project", "src_encoding": "UTF-8", "text": "\"\"\"Visualize the results using Python\n\"\"\"\n\n# Make imports\nimport feather\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport math\nfrom datetime import datetime\n\n# Set default seaborn parameters\nsns.set(\"talk\", style='ticks', palette='dark', font_scale=1.5, color_codes=True)\n\n# Define the color palette as a list of RGB values\ncp = sns.color_palette()\n\n# Simple function to plot the timeseries\ndef tsplot(modeled, observed, max_y_ticks=None, alpha=1, title=''):\n \"\"\"Plot the observed sf6 timeseries against modeled results.\n \"\"\"\n fig, ax = plt.subplots(1, figsize=(14, 10))\n\n # Plot the modeled results\n ax.plot(modeled['box.1'], lw=4, label='North-High Lat.', c=cp[0], alpha=alpha)\n ax.plot(modeled['box.2'], lw=4, label='North Tropics', c=cp[1], alpha=alpha)\n ax.plot(modeled['box.3'], lw=4, label='South Tropics', c=cp[2], alpha=alpha)\n ax.plot(modeled['box.4'], lw=4, label='South-High Lat.', c=cp[3], alpha=alpha)\n ax.plot(modeled['box.5'], lw=4, label='Stratosphere', c=cp[4], alpha=alpha)\n\n ax.legend(loc='best')\n\n ax.plot(observed['SF6.box.1'], 'o', c=cp[0])\n ax.plot(observed['SF6.box.2'], 'o', c=cp[1])\n ax.plot(observed['SF6.box.3'], 'o', c=cp[2])\n ax.plot(observed['SF6.box.4'], 'o', c=cp[3])\n\n if max_y_ticks is not None:\n yloc = plt.MaxNLocator(max_y_ticks)\n ax.yaxis.set_major_locator(yloc)\n\n ax.set_ylabel(\"SF6 (ppt)\", fontsize=24)\n ax.set_xlabel(\"\")\n ax.set_title(title, y=1.05, fontsize=36)\n\n sns.despine(offset=5)\n\n return ax\n\n# Build an array of each individual pulsed response to plot\npulses = []\npulses.append((\"North-High Lat.\", \"results/pulse.box1.feather\"))\npulses.append((\"North Tropics\", \"results/pulse.box2.feather\"))\npulses.append((\"South Tropics\", \"results/pulse.box3.feather\"))\npulses.append((\"South-High Lat.\", \"results/pulse.box4.feather\"))\n\nfor pulse in pulses:\n df = feather.read_dataframe(pulse[1])\n\n # Plot the data\n fig, ax = plt.subplots(1, figsize=(14, 10))\n\n ax.plot(df['time'], df['box.1'], c=cp[0], lw=6, label=\"North-High Lat.\")\n ax.plot(df['time'], df['box.2'], c=cp[1], lw=6, label=\"North Tropics\")\n ax.plot(df['time'], df['box.3'], c=cp[2], lw=6, label=\"South Tropics\")\n ax.plot(df['time'], df['box.4'], c=cp[3], lw=6, label=\"South-High Lat.\")\n ax.plot(df['time'], df['box.5'], c=cp[4], lw=6, label=\"Stratosphere\")\n\n sns.despine(offset=5)\n\n ax.set_ylabel(\"SF6 (ppt)\", fontsize=24)\n ax.legend(loc='best')\n\n yloc = plt.MaxNLocator(3)\n ax.yaxis.set_major_locator(yloc)\n\n\n# Next, we want to plot the grid-searched results\n# Set up a dataframe with the SF6 Actual Measurements\nsf6 = feather.read_dataframe(\"results/sf6_emissions.feather\")\ngs = feather.read_dataframe(\"results/gridsearch_optimal.feather\")\nsw = feather.read_dataframe(\"results/sw-results.feather\")\n\n# Convert the year to an actual datetime\nsf6['year'] = sf6['year'].apply(lambda x: datetime(math.floor(x), 1, 1))\ngs['year'] = gs['year'].apply(lambda x: datetime(math.floor(x), 1, 1))\nsw['year'] = sw['year'].apply(lambda x: datetime(math.floor(x), 1, 1))\n\n# Set the datetime as the index\nsf6.set_index(\"year\", inplace=True)\ngs.set_index(\"year\", inplace=True)\nsw.set_index(\"year\", inplace=True)\n\n# Make the Grid Search Plot\nax = tsplot(gs, sf6, max_y_ticks=3)\n\n# Make the Grid Search Plot\nax = tsplot(sw, sf6, max_y_ticks=3)\n\n# Make the Monte Carlo Results\nmod = feather.read_dataframe(\"results/mc_results_1M_final.feather\")\nres = feather.read_dataframe(\"results/mc_results_1M_iters.feather\")\n\n# Convert the year to an actual timestamp\nmod['year'] = mod['year'].apply(lambda x: datetime(math.floor(x), 1, 1))\n\n# Convert the index to a datetime\nmod.set_index(\"year\", inplace=True)\n\nax = tsplot(mod, sf6)\n\n# Plot the KDE of various Parameters\nbest = res.sort_values('min.box.all').head(1000)\nbest.tail()\n\n\nwith sns.axes_style(\"white\"):\n f, ax = plt.subplots(2, 2, figsize=(14, 10))\n\n sns.despine(left=True)\n\n # Plot the t.strat distribution\n sns.distplot(best['t.strat'], kde=True, hist=False, color='b', ax=ax[0, 0], kde_kws=dict(shade=True))\n\n # Plot the t.hemi.inter distribution\n sns.distplot(best['t.hemi.inter'], kde=True, hist=False, color='g', ax=ax[0, 1], kde_kws=dict(shade=True))\n\n # Plot the t.hemi.inter distribution\n sns.distplot(best['t.hemi.intra'], kde=True, hist=False, color='r', ax=ax[1, 0], kde_kws=dict(shade=True))\n\n # Plot the t.hemi.inter distribution\n sns.distplot(best['strat.frac'], kde=True, hist=False, color='m', ax=ax[1, 1], kde_kws=dict(shade=True))\n\n ax[0,0].set_ylim([0, ax[0,0].get_ylim()[-1]])\n ax[0,1].set_ylim([0, ax[0,1].get_ylim()[-1]])\n ax[1,0].set_ylim([0, ax[1,0].get_ylim()[-1]])\n ax[1,1].set_ylim([0, ax[1,1].get_ylim()[-1]])\n\n plt.setp(ax, yticks=[])\n\n plt.tight_layout()\n" }, { "alpha_fraction": 0.6394094228744507, "alphanum_fraction": 0.7115275263786316, "avg_line_length": 47.94444274902344, "blob_id": "ff7260975d46f34a741c9d01367c1a23b7c310b6", "content_id": "5b4b5359bd78a95a68c51c2e974e91e068a3a8c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1761, "license_type": "no_license", "max_line_length": 112, "num_lines": 36, "path": "/model_initialize.r", "repo_name": "dhhagan/eps236-project", "src_encoding": "UTF-8", "text": "# Model Initialization\n# Read in experimental data for emissions and losses\n# Initalize global variables for mass of atmosphere, troposphere, and the individual compounds\n# All neccesary files are in the \"data\" folder\n\n## GLOBAL LIFETIMES in YEARS (REQUIRED file): vvvvvvvvvvvvvv\ntau.global.lifetimes.years <- scan(\"data/tglobal_all.txt\",skip=1)\n\n# Name the columns in the tglobal.all array\nnames(tau.global.lifetimes.years) <- scan(\"data/tglobal_all.txt\", nlines=1, what=character())\n\n# Read in NASA Greenhouse Gas Global Monitoring Devision values\nghg.observations <- read.table(\"data/ghg.GMD.conc.tbl\", header = TRUE)\n\n# Read in losses as a function of month\nloss.monthly <- read.table(\"data/loss.monthly.tbl\", header=TRUE)\n\n# EDGAR 2015 emissions of SF6 by box, 1970 to 2008; for 2008 to 2014, we will use our derived model!\n# Read in SF6 Emissions Table from EDGAR\nsf6.sources <- read.table(\"data/SF6.emissions.bybox.tbl\", header=TRUE)\n\n# Select the time range that is covered by the EDGAR SF6 Data (min, max)\ntime.range.sf6.sources <- range(sf6.sources[,\"yr\"])\n\n# Calculate the masses in each of the boxes\nmass.global <- ( 5.1e14 * 0.984e5)/9.8 #5.27e18 kg; M*g g=9.8 M=0.984/9.8 kg/m2 surface area of earth=5.1e14\nmass.stratosphere <- 0.1577 * mass.global\n\n#semi-hemi masses N-temp N-trop S-trop S-temp (subtr strat mass from each)\nmass.troposphere = (1 - c(exp(-12/7), exp(-14/7), exp(-14/7), exp(-12/7) ) ) * mass.global/4\n\n# Make molecular weights in terms of just Carbon and Nitrogen\nmolecular.wt.all <- c(28, 137.37, 32.065 + 6*18.998, 120.91, 86.47, 12, 102, 117)\n\n# Name the columns for the molecular weights vector\nnames(molecular.wt.all) <- c(\"N2O\", \"CFC11\", \"SF6\", \"CFC12\", \"HCFC22\", \"CO2\", \"HFC134A\", \"HCFC141B\")" }, { "alpha_fraction": 0.636727511882782, "alphanum_fraction": 0.6590842604637146, "avg_line_length": 39.16470718383789, "blob_id": "022138c5ca6568f1e6882449dfcdd64a0c2de00e", "content_id": "e6ec46a7438e8084d84bb1243fc9ee36fda86fe7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 10243, "license_type": "no_license", "max_line_length": 134, "num_lines": 255, "path": "/utils.r", "repo_name": "dhhagan/eps236-project", "src_encoding": "UTF-8", "text": "# Function definitions file\n# Contains the following functions:\n# 1) calc.flux.and.lifetimes\n# 2) run.model\n# 3) min.cost\n\n\n# calc.flux.and.lifetimes calculates the flux and lifetimes for a given species\ncalc.flux.and.lifetimes = function(species, tau.stratosphere=4, tau.hemisphere.inter=2, tau.hemisphere.intra=0.5,\n strat.nh.fraction=0.55, print_header=F) {\n # data is molecular.wt.all\n # Source data\n #source(\"model_initialize.r\")\n \n # Grab the mol. weight of 'species'\n mol.wt.ind.species <- molecular.wt.all[species]\n \n # Define the photochemical lifetime (years) of 'species'\n tau.global.ind.species <- tau.global.lifetimes.years[species]\n \n # Get data for 'species\n conc.observations <- ghg.observations[, c(\"Year\", paste(species, \"box\", 1:4, sep='.'))]\n \n # Get species-specific loss rates from the greater loss file (by month)\n loss.freq.ind.species <- loss.monthly[, paste(species, \"box\", 1:5, sep=\".\")]\n \n # Change the rownames to be 1:12\n rownames(loss.freq.ind.species) <- 1:12\n \n # Calculate the annual frequencies of photochemical loss\n loss.freq.annual <- apply(loss.freq.ind.species, MARGIN=2, FUN=mean)\n \n # Define a vector with initial guesses for lifetimes\n initial.lifetime.guesses <- c(tau.hemisphere.intra, tau.hemisphere.inter, strat.nh.fraction, tau.stratosphere)\n \n names(initial.lifetime.guesses) <- c(\n deparse(substitute(tau.hemisphere.intra)),\n deparse(substitute(tau.hemisphere.inter)),\n deparse(substitute(strat.nh.fraction)),\n deparse(substitute(tau.stratosphere)))\n \n # definition of exch rates kg/yr\n flux.midlat <- mass.troposphere[1] / tau.hemisphere.intra # Midlat-Tropics exchange flux kg/yr (box 1-2 advection)\n flux.ns <- sum(mass.troposphere[1:2]) / tau.hemisphere.inter # north-source hemisphere flux\n \n # Define advection flux and stuff\n flux.trop.to.strat <- mass.stratosphere / tau.stratosphere # mass flux of air from the troposphere to the stratosphere\n \n # Define advection\n flux.adv.ind.trop.to.strat <- flux.trop.to.strat / 2. # box 2, 3 to stratosphere\n flux.adv.strat.to.nh <- flux.trop.to.strat * strat.nh.fraction # stratosphere to box 1 \n flux.adv.strat.to.sh <- flux.trop.to.strat * (1 - strat.nh.fraction) # strat to box 4\n flux.adv.ntrop.to.strop <- flux.trop.to.strat * (2*strat.nh.fraction - 1) / 2 # box 2 to box 3 (watch for divide by 2)\n \n # compute the photochemical lifetimes to match the global lifetime and model box structure\n mass.frac.trop <- 0.842\n mass.frac.strat <- 1 - mass.frac.trop\n \n # Fudge/Estimate parameter\n alpha <- (1/tau.global.ind.species + 1/tau.stratosphere) / \n (1/tau.stratosphere - (1/tau.global.ind.species)*(mass.frac.trop/mass.frac.strat))\n \n # Effective checical-loss lifetime in the stratosphere\n tau.strat.eff <- tau.global.ind.species * mass.frac.strat / (mass.frac.trop * alpha + mass.frac.strat)\n \n header <- sprintf(\n \"\n Species: %s\n Mol Wt. %.3f\n tglobal: %.1f\n taustrat: %.1f\n strat.nh.fract: %.2f\n tau.intrahemis: %.3f\n tau.interhemis: %.3f\n alpha: %.3f\n tstrat: %.3f\n \",\n species, mol.wt.ind.species, tau.global.ind.species, tau.stratosphere, strat.nh.fraction, \n tau.hemisphere.intra, tau.hemisphere.inter, alpha, tau.strat.eff)\n \n if (print_header == T){\n cat(header)\n }\n \n vars.to.return <- list(\n species=species, \n mol.wt=mol.wt.ind.species,\n flux.midlat=flux.midlat, \n flux.ns=flux.ns,\n flux.trop.to.strat=flux.trop.to.strat,\n flux.adv.ind.trop.to.strat=flux.adv.ind.trop.to.strat,\n flux.adv.ntrop.to.strop=flux.adv.ntrop.to.strop,\n flux.adv.strat.to.nh=flux.adv.strat.to.nh,\n flux.adv.strat.to.sh=flux.adv.strat.to.sh,\n tau.strat.eff=tau.strat.eff,\n mass.global=mass.global,\n mass.stratosphere=mass.stratosphere,\n mass.troposphere=mass.troposphere)\n \n return (vars.to.return)\n}\n\n# run.model runs a 5-box model for a specific species and returns the results in a list\nrun.model <- function(species, tau.stratosphere=4, tau.hemisphere.inter=2, tau.hemisphere.intra=0.5, strat.nh.fraction=0.55,\n DEBUG = FALSE, AVG.1.YR=TRUE) {\n # Run the model and return the matrix of box-by-box results\n # First, calculate the fluxes\n # We start in 1995 because that's where the observations begin and go to 2008 because that's where EDGAR sources end\n # EDGAR scaling factor of 1.1 is added\n vals <- calc.flux.and.lifetimes(species = species, tau.stratosphere = tau.stratosphere, tau.hemisphere.inter = tau.hemisphere.inter,\n tau.hemisphere.intra = tau.hemisphere.intra, strat.nh.fraction = strat.nh.fraction)\n \n # Set up the matrix and run the model\n \n ##################################Start definition of K matrix #########################################\n k.matrix <- matrix(nrow=5, ncol=5, data=0)\n \n k.matrix[1, 1] = -1*(vals$flux.adv.strat.to.nh + vals$flux.midlat)\n k.matrix[1, 2] = vals$flux.midlat\n k.matrix[1, 5] = vals$flux.adv.strat.to.nh\n \n k.matrix[2, 1] = vals$flux.adv.strat.to.nh + vals$flux.midlat\n k.matrix[2, 2] = -1 * (vals$flux.midlat + vals$flux.adv.ind.trop.to.strat + vals$flux.adv.ntrop.to.strop + vals$flux.ns)\n k.matrix[2, 3] = vals$flux.ns\n \n k.matrix[3, 2] = vals$flux.adv.ntrop.to.strop + vals$flux.ns\n k.matrix[3, 3] = -1 * (vals$flux.midlat + vals$flux.adv.ind.trop.to.strat + vals$flux.ns)\n k.matrix[3, 4] = vals$flux.adv.strat.to.sh + vals$flux.midlat\n \n k.matrix[4, 3] = vals$flux.midlat\n k.matrix[4, 4] = -1 * (vals$flux.adv.strat.to.sh + vals$flux.midlat)\n k.matrix[4, 5] = vals$flux.adv.strat.to.sh\n \n k.matrix[5, 2] = vals$flux.adv.ind.trop.to.strat\n k.matrix[5, 3] = vals$flux.adv.ind.trop.to.strat\n k.matrix[5, 5] = -1 * (vals$flux.adv.strat.to.nh + vals$flux.adv.strat.to.sh + (vals$mass.stratosphere / vals$tau.strat.eff))\n \n # Normalize the k-matrix to the mass in each box of our 5-box model\n # We divide by the mass in each box to convert from kg/yr to conc./yr\n k.matrix.norm <- k.matrix / c(vals$mass.troposphere, vals$mass.stratosphere)\n \n if (DEBUG == TRUE) {\n print(\"Rowsum Check\")\n print(apply(k.matrix.norm, 1, sum) / apply(k.matrix.norm, 1, max)) \n \n print(\"Colsum Checks\")\n print(apply(k.matrix.norm, 2, sum) / apply(k.matrix.norm, 2, max)) \n print(apply(k.matrix, 2, sum) / apply(k.matrix, 2, max)) \n }\n \n ## compute the eigenvalues and eigenvectors\n eigen.out <- eigen(k.matrix.norm)\n \n eigen.vals <- eigen.out$values\n eigen.vecs <- eigen.out$vectors\n lifetimes <- -1 * round(1/eigen.vals, 3)\n \n # Calculate the max tropospheric lifetime\n max.tropo.lifetime <- max(lifetimes[1:4])\n \n if (DEBUG == TRUE) {\n print (\"time constants (yr-1): \")\n print (lifetimes)\n print (round(eigen.vecs, 3))\n }\n \n # global.conv is the conversion factor between Gg to mole fraction in ppt (0.029 is the mol. wt of air)\n gg.to.ppt <- (1e12 * 1e9 / vals$mol.wt) / (vals$mass.global / 0.029)\n \n # First, add a fifth row (stratosphere) with zero emissions\n # Then, convert from Gg to ppt using the above-defined global.conv\n sources.sf6 <- t(cbind(sf6.sources[, 2:5], rep(0, nrow(sf6.sources))))*gg.to.ppt\n \n # Set the names of the rows and columns\n dimnames(sources.sf6) <- list(1:5, sf6.sources[,1])\n \n #note EDGAR adds 2.9 ppt bet 1995 and end 2008, but atm adds 3.23, so we should scale up total sources by 10% +-\n ## initial conditions, time range, and time step (.025 years, apprx 9 days)\n delta <- .025;\n time.range.years <- c(1995.125,2009 - delta)\n \n # Build a vector of timestamps\n timestamps <- seq(time.range.years[1],time.range.years[2], delta) ## time period to be considered for this run\n \n # Query the ghg.observations file for measurements between the start and end times\n mod.start <- ghg.observations[,\"Year\"] >= time.range.years[1]\n mod.end <- ghg.observations[,\"Year\"] <= time.range.years[2]\n \n \n # Initial conditions\n # Grab the index in ghg.observations which corresponds to the beginning of our measurements\n obs.start.idx <- min(which(mod.start))\n \n # Grab the Species box emissions observations for the 4 boxes from ghg emissions\n # Find the time delay for 1 e-fold in the troposphere\n delay <- floor(max.tropo.lifetime * 12)\n \n obs.initial <- as.numeric(ghg.observations[obs.start.idx, paste(species, \"box\", 1:4, sep=\".\")])\n obs.delayed <- as.numeric(ghg.observations[obs.start.idx + delay, paste(species, \"box\", 1:4, sep=\".\")])\n \n # Find our initial guess for the forward solver\n initial.guess <- c(obs.initial, 2*mean(obs.initial) - mean(obs.delayed))\n \n # Create a matrix of the initial guesses\n magic.matrix <- matrix(ncol=1, nrow=5, data=initial.guess)\n \n for (tstep in timestamps) {\n gradient <- (k.matrix.norm %*% magic.matrix[, ncol(magic.matrix)] + \n sources.sf6[, as.character(trunc(tstep))] / c(mass.troposphere, mass.stratosphere) *1.1* mass.global) * delta\n \n # Forward step\n new.vals <- magic.matrix[, ncol(magic.matrix)] + gradient\n \n magic.matrix <- cbind(magic.matrix, new.vals)\n }\n \n dimnames(magic.matrix) <- list(paste(\"box\", 1:5, sep='.'), c(timestamps[1] - delta, timestamps))\n \n results <- data.frame(t(magic.matrix)[-1,])\n results <- cbind(results, year=floor(as.numeric(timestamps)))\n \n if (AVG.1.YR == TRUE) {\n results <- aggregate(results, list(results$year), mean)\n }\n #year=floor(as.numeric(row.names.data.frame(sf6.observations)))\n \n return (results)\n}\n\n# min.cost is a cost function that calculates the square of the residuals and returns it by box or in total\nmin.cost <- function(model.output, obs.output, box.no=1, squared=TRUE) {\n # if box.no == NA, return total sum\n # min.cost(model.results, sf6.observations.boxed.annual.means, box.no = 4)\n x1 <- obs.output[, c(\"SF6.box.1\", \"SF6.box.2\", \"SF6.box.3\", \"SF6.box.4\")]\n x2 <- model.output[, c(\"box.1\", \"box.2\", \"box.3\", \"box.4\")]\n \n if (squared == TRUE) {\n colsums <- colSums((x2 - x1)^2)\n }\n else {\n colsums <- colSums(abs(x2 - x1))\n }\n \n if (is.nan(box.no)) {\n res <- sum(colsums)\n }\n else if (box.no == 'all') {\n res <- colsums\n }\n else {\n res <- colsums[box.no]\n }\n \n return (res)\n}\n\n" }, { "alpha_fraction": 0.6639326214790344, "alphanum_fraction": 0.6847705841064453, "avg_line_length": 42.786407470703125, "blob_id": "5ef49879584a5f91f348765e810de2a63dc29ec1", "content_id": "495c1f5ca6f6d6c57ffb3ccc5889166a7cd0fea6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 4511, "license_type": "no_license", "max_line_length": 113, "num_lines": 103, "path": "/iterative_gridsearch.r", "repo_name": "dhhagan/eps236-project", "src_encoding": "UTF-8", "text": "# Initialize the Model and define utility functions\n# Inititalize the Model by reading in all experimental data\n\n# Import libraries to use\nlibrary(dplyr)\nlibrary(feather)\nlibrary(progress)\n\n# Source the model initialization file (import all data, etc.)\nsource(\"model_initialize.r\")\n\n# Import our run.model function\nsource(\"utils.r\")\n\n# Define the species of interest (NOTE: SF6 is the only one that works...too many things are hard-coded)\nspecies = \"SF6\"\n\n# Bring in our observations for SF6\n# Again, we're lazy and R is dumb. Normally, we would never want to hardcode the index, but it's easier \n# since working with real datetimes in R is unbelievably complex for it being a \"good\" language to do \n# timeseries analysis in...\nidx.low <- 218\nidx.high <- 383\n\n# Grab the SF6 observations from 1995.125-2008.875\nsf6.observations <- data.frame(\n ghg.observations[,paste(species, \"box\", 1:4, sep=\".\")], \n row.names=ghg.observations[,\"Year\"])[idx.low:idx.high,]\n\n# Get the year from the index\nsf6.observations <- cbind(sf6.observations, year=floor(as.numeric(row.names.data.frame(sf6.observations))))\n\n# Average the observations by year\nsf6.observations.boxed.annual.means <- aggregate(sf6.observations, list(sf6.observations$year), mean)\n\n# stevewofsy results (sw.res) using the default params listed on the slides\n# tau.stratosphere <- 4\n# tau.hemisphere.inter <- 2\n# tau.hemisphere.intra <- 0.5\n# strat.nh.fraction <- 0.55\nsw.res <- run.model(species, 4, 2, 0.5, 0.55)\n\n# Perform a grid-search to run all possible combinations as defined by the following grids\ntau.stratosphere.list = seq(1,20,0.5)\ntau.hemisphere.inter.list = seq(1,5,0.5)\ntau.hemisphere.intra.list = seq(0.1, 1, 0.1)\nstrat.nh.fraction.list = seq(0.4, 0.6, 0.05)\n\n# Define an empty matrix that we will use to store results\nres.matrix = matrix(ncol=9, nrow=0)\n\n# determine the number of total runs to initiate the sweet progress bar\nn_runs = length(tau.stratosphere.list) * length(tau.hemisphere.inter.list) * length(tau.hemisphere.intra.list) * \n length(strat.nh.fraction.list)\n\n# Initialize the progress bar so you know how many years you will have to wait...\nprogress.bar <- progress::progress_bar$new(total=n_runs, format=\" running the codez [:bar] :percent eta: :eta\")\n\n# Perform the grid-search\nfor (t.strat in tau.stratosphere.list) {\n for (t.hemi.inter in tau.hemisphere.inter.list) {\n for (t.hemi.intra in tau.hemisphere.intra.list) {\n for (strat.frac in strat.nh.fraction.list) {\n # Run the model\n res.ind <- run.model(species, tau.stratosphere = t.strat, tau.hemisphere.inter = t.hemi.inter, \n tau.hemisphere.intra = t.hemi.intra, strat.nh.fraction = strat.frac)\n \n # Return the residuals for each box\n # NOTE: Ended up only using the min.box.all results\n min.box.1 <- min.cost(res.ind, sf6.observations.boxed.annual.means, box.no = 1)\n min.box.2 <- min.cost(res.ind, sf6.observations.boxed.annual.means, box.no = 2)\n min.box.3 <- min.cost(res.ind, sf6.observations.boxed.annual.means, box.no = 3)\n min.box.4 <- min.cost(res.ind, sf6.observations.boxed.annual.means, box.no = 4)\n min.box.all <- min.cost(res.ind, sf6.observations.boxed.annual.means, box.no = NaN)\n \n res.matrix <- rbind(res.matrix, c(t.strat, t.hemi.inter, t.hemi.intra, strat.frac, min.box.1, min.box.2, \n min.box.3, min.box.4, min.box.all))\n \n # Update the progress bar\n progress.bar$tick()\n }\n }\n }\n}\n\n# Store the results as a data.frame rather than a matrix\nres.df <- data.frame(res.matrix)\n\n# rename the columns in the data.frame\ncolnames(res.df) <- c(\"t.strat\", \"t.hemi.inter\", \"t.hemi.intra\", \"strat.frac\", \"min.box.1\", \n \"min.box.2\", \"min.box.3\", \"min.box.4\", \"min.box.all\")\n\n# Find the row with the lowest min.box.all value\nmin.params <- arrange(res.df, min.box.all)[1,]\n\n# Obtain the actual model results with the minimum params\nmin.results <- run.model(species, min.params$t.strat, min.params$t.hemi.inter, min.params$t.hemi.intra,\n min.params$strat.frac)\n\n# Save the data in feather format so we can plot them later in Python\nfeather::write_feather(res.df, \"results/iterative_gridsearch_results.feather\")\nfeather::write_feather(min.results, \"results/model_results_minimized.feather\")\nfeather::write_feather(sf6.observations.boxed.annual.means, \"results/sf6_emissions.feather\")\n\n" }, { "alpha_fraction": 0.6942915916442871, "alphanum_fraction": 0.7162998914718628, "avg_line_length": 34.0361442565918, "blob_id": "8c66ba85118230454b76e20303be00a5064d6858", "content_id": "4919a515dc212736900baaa4a26a8cdee12d904d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2908, "license_type": "no_license", "max_line_length": 119, "num_lines": 83, "path": "/monte_carlo_mit.R", "repo_name": "dhhagan/eps236-project", "src_encoding": "UTF-8", "text": "# Our attempt at a Monte Carlo Simulation\n# Run MonteCarlo Simulations to determine the optimal settings for the model\n\nlibrary(dplyr)\nlibrary(feather)\nlibrary(progress)\n\n# Initialize the model params\nsource(\"model_initialize.r\")\n\n# Import our run.model function that takes various params and returns the results (box-by-box concentration timeseries)\nsource(\"utils.r\")\n\n# Calculate params for SF6\nspecies = \"SF6\"\n\n# Define the number of iterations to use in the Monte Carlo simulation\nnum.iterations <- 100000\n\n# Bring in our observations for SF6\nidx.low <- 218\nidx.high <- 383\n\n# Observations go from 1995.125-2008.875\nsf6.observations <- data.frame(\n ghg.observations[,paste(species, \"box\", 1:4, sep=\".\")], \n row.names=ghg.observations[,\"Year\"])[idx.low:idx.high,]\n\n# Get the year from the index, and then average the observations by year\nsf6.observations <- cbind(sf6.observations, year=floor(as.numeric(row.names.data.frame(sf6.observations))))\n\nsf6.observations.boxed.annual.means <- aggregate(sf6.observations, list(sf6.observations$year), mean)\n\n# Set up our version of a monte carlo where we choose values from a random sequence thousands of time.\n\n\n# Set up the progress bar\nprogress.bar <- progress::progress_bar$new(total=num.iterations, format=\" running the codez [:bar] :percent eta: :eta\")\n\n# Set up an empty matrix for results\nres.matrix = matrix(ncol=5, nrow=0)\n\n# Min and Max values\nt.strat.min <- 1\nt.strat.max <- 15\nt.hemi.inter.min <- 0.01\nt.hemi.inter.max <- 2.0\nt.hemi.intra.min <- 0.01\nt.hemi.intra.max <- 0.5\nstrat.frac.min <- 0.2\nstrat.frac.max <- 0.8\n\nfor (i in 1:num.iterations) {\n # Get random values between min and max for each param\n t.strat <- runif(1, min=t.strat.min, max=t.strat.max)\n t.hemi.inter <- runif(1, min=t.hemi.inter.min, max=t.hemi.inter.max)\n t.hemi.intra <- runif(1, min=t.hemi.intra.min, max=t.hemi.intra.max)\n strat.frac <- runif(1, min=strat.frac.min, max=strat.frac.max)\n \n # Run the model\n res.ind <- run.model(species, t.strat, t.hemi.inter, t.hemi.intra, strat.frac)\n \n min.box.all <- min.cost(res.ind, sf6.observations.boxed.annual.means, box.no = NaN)\n \n res.matrix <- rbind(res.matrix, c(t.strat, t.hemi.inter, t.hemi.intra, strat.frac, min.box.all))\n \n # Update the progress bar\n progress.bar$tick()\n}\n\nres.df <- data.frame(res.matrix)\ncolnames(res.df) <- c(\"t.strat\", \"t.hemi.inter\", \"t.hemi.intra\", \"strat.frac\", \"min.box.all\")\n\n# Find the row with the lowest min.box.all value\nmin.params <- arrange(res.df, min.box.all)[1,]\n\n# Obtain the actual model results with the minimum params and write to a feather file\nmin.results <- run.model(species, min.params$t.strat, min.params$t.hemi.inter, min.params$t.hemi.intra,\n min.params$strat.frac)\n\n# Write results to file\nfeather::write_feather(res.df, \"results/mc_results_by_iter.feather\")\nfeather::write_feather(min.results, \"results/mc_results_final.feather\")\n" } ]
7
Yelgors/TestPython
https://github.com/Yelgors/TestPython
4a831a4da499901243c80e27df7c51efb1399721
99f7e91f6cf0ef88d61d24c992c0f34544c7fda1
81d6e6112b65609884514d0dfd0a2cb32d74c16a
refs/heads/master
2016-09-19T18:27:46.786513
2016-08-21T02:02:15
2016-08-21T02:02:15
66,068,947
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46786633133888245, "alphanum_fraction": 0.48071980476379395, "avg_line_length": 18.36842155456543, "blob_id": "930540502c2d63e01081709aef1be499d8a6bf1a", "content_id": "837ea7aff6e4ae23a26914852f9b3324faf7b6f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/reLastName/renameLastName.py", "repo_name": "Yelgors/TestPython", "src_encoding": "UTF-8", "text": "import os,sys\r\n\r\nGIF = '.gif'\r\nTXT = '.txt'\r\n\r\n\r\n\r\n\r\ndef reLastName(a):\r\n if isinstance(a,str):\r\n for x in os.listdir(\".\"):\r\n if x==os.path.split(sys.argv[0])[1]:\r\n continue\r\n os.rename(x,os.path.splitext(x)[0]+a)\r\n print(x+ ' --> ' +os.path.splitext(x)[0]+a)\r\n return '修改成功'\r\n else:return 0\r\n\r\nprint(reLastName(GIF))\r\n\r\n" } ]
1
vladimirsvsv77/mlstm_flask
https://github.com/vladimirsvsv77/mlstm_flask
f8cb9f92ff2ddc332c9759588202b779d68f0582
0a12fcd36887aa64025b838492dfc0196f9e9c59
e97a535e84adeba93dc595798e23cb7abf4391fc
refs/heads/master
2021-04-15T08:37:35.049025
2019-02-27T21:10:26
2019-02-27T21:10:26
126,685,097
7
0
null
null
null
null
null
[ { "alpha_fraction": 0.6355000138282776, "alphanum_fraction": 0.6470000147819519, "avg_line_length": 31.770492553710938, "blob_id": "b0c8d781cc8e9167f1648b98cf84da37600f8614", "content_id": "83664021e969a5b2c2836de712e6d8330d0b4be7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "no_license", "max_line_length": 90, "num_lines": 61, "path": "/demo.py", "repo_name": "vladimirsvsv77/mlstm_flask", "src_encoding": "UTF-8", "text": "__author__ = 'VladimirSveshnikov'\nfrom sentiment_classifier import SentimentClassifier\nfrom codecs import open\nimport time\nfrom flask import Flask, render_template, request, jsonify\nimport rake\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n\nprint (\"Preparing classifier\")\nstart_time = time.time()\nclassifier = SentimentClassifier()\nprint (\"Classifier is ready\")\nprint (time.time() - start_time, \"seconds\")\nrake_object = rake.Rake(\"SmartStoplist.txt\")\n\n\[email protected](\"/sentiment-demo\", methods=[\"POST\", \"GET\"])\ndef index_page(text=\"\", prediction_message=\"\"):\n if request.method == \"POST\":\n text = request.form[\"text\"]\n logfile = open(\"ydf_demo_logs.txt\", \"a\", \"utf-8\")\n print (text)\n print (\"<response>\", file=logfile)\n print (text, file=logfile)\n prediction_message, score = classifier.get_prediction_message(text)\n print (prediction_message, ', ', score)\n print (prediction_message, file=logfile)\n print (\"</response>\", file=logfile)\n logfile.close()\n\t\t\n return render_template('hello.html', text=text, prediction_message=prediction_message)\n\n\[email protected](\"/sentiment-api\", methods=[\"GET\"])\ndef api(text=\"\", prediction_message=\"\"):\n if request.method == \"GET\":\n text = request.args.get('text')\n prediction_message, class_pred, score = classifier.get_prediction_message(text)\n \n return jsonify({'class_pred': class_pred, 'score': str(score)}) \n\n\[email protected](\"/keywords-api\", methods=[\"GET\"])\ndef keywords(text=\"\", prediction_message=\"\"):\n if request.method == \"GET\":\n text = request.args.get('text')\n keywords = rake_object.run(text)\n \n return jsonify({'keywords': keywords}) \n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=80, debug=False)\n \n # run with ssl\n # openssl req -x509 -newkey rsa:4096 -nodes -out cert.pem -keyout key.pem -days 365\n # app.run(host='0.0.0.0', port=80, debug=False, ssl_context=('cert.pem', 'key.pem')) \n" }, { "alpha_fraction": 0.7446393966674805, "alphanum_fraction": 0.7621832489967346, "avg_line_length": 23.428571701049805, "blob_id": "51a584b351ff9e0489283314db9b4b18cf7f7681", "content_id": "146b8d6687d62cbc0606efb2d173e12587b9495b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 513, "license_type": "no_license", "max_line_length": 181, "num_lines": 21, "path": "/README.md", "repo_name": "vladimirsvsv77/mlstm_flask", "src_encoding": "UTF-8", "text": "# Sentiment Neuron deminstration on Flask\n\nRequires flask, tensorflow, numpy, pandas, sklearn and python 3.5.\n\nA detailed description of the model is here: [https://github.com/openai/generating-reviews-discovering-sentiment](https://github.com/openai/generating-reviews-discovering-sentiment)\n\n## Usage\n\nRun the following command:\n\n```\ngit clone https://github.com/vladimirsvsv77/mlstm_flask.git\ncd mlstm_flask\nsudo python3 demo.py\n```\n\nThen you can open it in the browser:\n\n```\nhttp://0.0.0.0/sentiment-demo\n```\n" } ]
2
giuscond/TombolaPC
https://github.com/giuscond/TombolaPC
c45f6408f9a0e14b91444fe3b8e6330e1db2a86b
92c67994c889049eabd1b160d9a24a2eaa7b43d8
4e44b68cf2bb39e9aa86074a2c6faed6f1350532
refs/heads/master
2023-02-12T11:03:32.092141
2021-01-09T19:41:56
2021-01-09T19:41:56
328,232,462
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.774108350276947, "alphanum_fraction": 0.774108350276947, "avg_line_length": 35.0476188659668, "blob_id": "bd60fdd58c497a9a210c5e2c72628bbc872e426e", "content_id": "490db37e796531337c1b90423108c963bd91357c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 761, "license_type": "no_license", "max_line_length": 146, "num_lines": 21, "path": "/README.MD", "repo_name": "giuscond/TombolaPC", "src_encoding": "UTF-8", "text": "# TombolaPC\n## Cosa è\nTombolaPC e' un piccolo programma scritto in Python per fornire un Tombolone digitale sia con estrazione manuale sia con estrazione al computer.\nL'idea base è di eseguirlo su un Raspberry Pi collegato alla TV per animare le partite a Tombola con amici e parenti durante il periodo natalizio.\n\n## Avvio\nE' necessario Python installato.\nInoltre bisogna installare la libreria **numpy** con il comando:\n```\npip install numpy\n```\nSuccessivamente è possibile eseguire **TombolaPC**:\n```\npy tombolapc.py\n```\n\n## Modalita' disponibili\nAll'avvio è richiesta la scelta della modalita' di gioco:\n\n- **Modalita' Tombolone**: i numeri verranno inseriti manualmente;\n- **Modalita' Estrazione**: i numeri saranno estratti casualmente dal programma.\n" }, { "alpha_fraction": 0.47793567180633545, "alphanum_fraction": 0.49788081645965576, "avg_line_length": 30.527559280395508, "blob_id": "307b093ba8cdf581a4aca0538f36b850e95ac973", "content_id": "a4d629ec72ad7d9010cfd2fc7198fd504c45e845", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4017, "license_type": "no_license", "max_line_length": 82, "num_lines": 127, "path": "/tombolapc.py", "repo_name": "giuscond/TombolaPC", "src_encoding": "UTF-8", "text": "# !/usr/bin/python\n#\n# TombolaPC\n\n\nimport numpy as np\nfrom numpy import random\n\nm = np.zeros( (9,10) , dtype=int ) # Matrice del tombolone\nnumero = int(0) # Numero estratto\nnestratti=[0,0,0,0,0] # Ultimi 5 numeri in memoria\n\n# Funzione stampa a schermo il Tombolone\ndef stampa():\n for i in range(9):\n if i == 0 or i == 3 or i == 6:\n print(\"-----------------------------------\")\n print(end=\"| \") \n if i == 0: # If correttivo per le caselle a singola cifra (DA MIGLIORARE)\n for j in range(10):\n if j != 4:\n if m[i][j] == 0:\n print(\"__\", end=\" \")\n else:\n print(\"_\", m[i][j], sep=\"\", end=\" \")\n elif j == 4:\n if m[i][j] == 0:\n print(\"__\", end=\" | \")\n else:\n print(\"_\", m[i][j], sep=\"\", end=\" | \")\n # Stampa tutte le altre cifre\n else:\n for j in range(10):\n if j != 4:\n if m[i][j] == 0:\n print(\"__\", end=\" \")\n else:\n print(m[i][j], end=\" \")\n elif j == 4:\n if m[i][j] == 0:\n print(\"__\", end=\" | \")\n else:\n print(m[i][j], end=\" | \")\n print(end=\"|\\n\")\n print(\"-----------------------------------\")\n\n\n# Funzione numero immesso dall'utente. Se già presente il numero, rimuovilo\ndef estratto():\n # Aggiungi numero alla matrice\n global numero\n numero = input(\"Prossimo numero estratto: \")\n numero = int(numero)\n if numero>0 and numero<=90:\n if numero%10 == 0: # Calcola la posizione dei multipli di 10 nella matrice\n a=int(numero / 10)-1\n b=9\n else: # Calcola la posizione di tutti gli altri numeri\n a=int(numero / 10)\n b=(numero%10)-1\n # Controlla se il numero è presente, altrimenti rimuovilo\n if m[a][b]==0:\n m[a][b]=numero\n else:\n m[a][b]=0\n # Esci con 999\n# elif numero == 999:\n# exit(0)\n # Se il numero non è valido\n else: # Messaggio d'errore se il numero non è valido\n print(\"Numero non valido. Riprova.\")\n estratto()\n\n# Funzione che stampa e annota l'ultimo numero estretto\ndef ultimiestratti(): \n for i in range(4):\n nestratti[i]=nestratti[i+1]\n nestratti[4] = numero\n print(\"Ultimi numeri estratti:\", nestratti, sep=\" \", end=\"\\n\")\n\n# Funzione che genera un numero casuale non ancora estratto\ndef rand():\n while True:\n global numero\n numero = random.randint(1,91)\n if numero%10 == 0: # Calcola la posizione dei multipli di 10 nella matrice\n a=int(numero / 10)-1\n b=9\n else: # Calcola la posizione di tutti gli altri numeri\n a=int(numero / 10)\n b=(numero%10)-1 \n if m[a][b]==0: # Controlla se il numero è presente, altrimenti ritenta\n m[a][b]=numero\n print(\"Estratto il numero: \", numero)\n break\n\n\n# Modalita' 2: Il computer estrae un numero\ndef mod_estrazione():\n while True:\n stampa()\n ultimiestratti()\n print(\"Estratto il numero: \", numero)\n input(\"Premi INVIO per estranne un nuovo numero: \")\n rand()\n\n# Modalita' 1: il numero estratto va immesso manualmente\ndef mod_tombolone():\n while True:\n stampa()\n ultimiestratti()\n estratto()\n\n# Fine funzioni\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\n# Inizio di TombolaPC\n\nprint(\"Benvenuto su TOMBOLAPC!\\n Puoi scegliere due modalita' di gioco:\")\nprint(\"- 1 Modalita' Tombolone: i numeri verranno inseriti manualmente;\")\nprint(\"- 2 Modalita' Estrazione: i numeri saranno estratti dal programma.\")\nc = input (\"\\nDigita la modalità di gioco: \")\nc = int(c)\nif c == 1:\n mod_tombolone()\nelif c == 2:\n mod_estrazione()\n\n\n\n\n\n\n\n" } ]
2
chycoz21/dumbways
https://github.com/chycoz21/dumbways
b2227328eb23da2748a740b80e165ba496e633c1
acd1a6f2f33e3730f856a352311f89be62a982fa
bf79214d596a901eb4ed6fd0b1c9698c5982f40b
refs/heads/master
2023-01-18T14:09:47.466627
2020-11-28T14:30:35
2020-11-28T14:30:35
316,748,518
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5859484672546387, "alphanum_fraction": 0.6351287961006165, "avg_line_length": 20.565656661987305, "blob_id": "01028e65a7d2e6ae8e1b60909b6451beec17125b", "content_id": "95bcd6783577eb881dd8ac4d16bc592e3b593c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2135, "license_type": "no_license", "max_line_length": 79, "num_lines": 99, "path": "/soal 4/regusers.sql", "repo_name": "chycoz21/dumbways", "src_encoding": "UTF-8", "text": "-- phpMyAdmin SQL Dump\n-- version 4.8.3\n-- https://www.phpmyadmin.net/\n--\n-- Host: 127.0.0.1\n-- Waktu pembuatan: 28 Nov 2020 pada 14.57\n-- Versi server: 10.1.37-MariaDB\n-- Versi PHP: 7.2.12\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\nSET AUTOCOMMIT = 0;\nSTART TRANSACTION;\nSET time_zone = \"+00:00\";\n\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!40101 SET NAMES utf8mb4 */;\n\n--\n-- Database: `regusers`\n--\n\n-- --------------------------------------------------------\n\n--\n-- Struktur dari tabel `admin`\n--\n\nCREATE TABLE `admin` (\n `id` int(11) NOT NULL,\n `username` varchar(25) NOT NULL,\n `password` varchar(25) NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n\n--\n-- Dumping data untuk tabel `admin`\n--\n\nINSERT INTO `admin` (`id`, `username`, `password`) VALUES\n(1, 'admin', 'admin');\n\n-- --------------------------------------------------------\n\n--\n-- Struktur dari tabel `users`\n--\n\nCREATE TABLE `users` (\n `userid` int(11) NOT NULL,\n `username` varchar(20) NOT NULL,\n `description` varchar(50) NOT NULL,\n `userprofile` varchar(200) NOT NULL\n) ENGINE=MyISAM DEFAULT CHARSET=latin1;\n\n--\n-- Dumping data untuk tabel `users`\n--\n\nINSERT INTO `users` (`userid`, `username`, `description`, `userprofile`) VALUES\n(56, 'eza1', 'test1', '494024.jpg');\n\n--\n-- Indexes for dumped tables\n--\n\n--\n-- Indeks untuk tabel `admin`\n--\nALTER TABLE `admin`\n ADD PRIMARY KEY (`id`);\n\n--\n-- Indeks untuk tabel `users`\n--\nALTER TABLE `users`\n ADD PRIMARY KEY (`userid`);\n\n--\n-- AUTO_INCREMENT untuk tabel yang dibuang\n--\n\n--\n-- AUTO_INCREMENT untuk tabel `admin`\n--\nALTER TABLE `admin`\n MODIFY `id` int(11) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=2;\n\n--\n-- AUTO_INCREMENT untuk tabel `users`\n--\nALTER TABLE `users`\n MODIFY `userid` int(11) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=57;\nCOMMIT;\n\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n" }, { "alpha_fraction": 0.7084870934486389, "alphanum_fraction": 0.7343173623085022, "avg_line_length": 26.200000762939453, "blob_id": "11c07a4cc88df14961ea5c3348285631396634eb", "content_id": "fa0c157422b20fcc96850695bc8389f150276e13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 271, "license_type": "no_license", "max_line_length": 51, "num_lines": 10, "path": "/2.js", "repo_name": "chycoz21/dumbways", "src_encoding": "UTF-8", "text": "const imArray = [1,2,3,4]\n\nconst imTambah = imArray.map(tambah => tambah + 1)\nconst imTambahq = imArray.map(tambah => tambah + 2)\nconst imTambahw = imArray.map(tambah => tambah + 3)\n\nconsole.log(imArray)\nconsole.log(imTambah)\nconsole.log(imTambahq)\nconsole.log(imTambahw)" }, { "alpha_fraction": 0.6962264180183411, "alphanum_fraction": 0.7075471878051758, "avg_line_length": 12.175000190734863, "blob_id": "3248d4b09c3d13ac81d67b25461c98f6475fbbde", "content_id": "620b7c3bcbe075e23f10158f5ea7b6a99f063c40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 530, "license_type": "no_license", "max_line_length": 121, "num_lines": 40, "path": "/readme.md", "repo_name": "chycoz21/dumbways", "src_encoding": "UTF-8", "text": "\n\n[![Build Status](https://travis-ci.org/joemccann/dillinger.svg?branch=master)](https://travis-ci.org/joemccann/dillinger)\n\n## risky fahriza\n\nberikut hasil dari tugas yang di berikan oleh dumbways ke saya tugas berupa yaitu :\n\n\n* javascript\n* python\n* php\n\n### Installation\n\nsoal 4\n\n\n\n```sh\npindahkan folder ke htdocs\nmasuk ke phpmyadmin import db\n```\n\nsoal 3\n\n```sh\ninstal python v.3 ++\njalankan source code\n```\nsoal 2\n\n```sh\ninstal node.js \njalankan source code\n```\nsoal 3\n\n```sh\ninstal python v.3 ++\njalankan source code\n```\n\n" }, { "alpha_fraction": 0.6099511384963989, "alphanum_fraction": 0.6179475784301758, "avg_line_length": 33.64615249633789, "blob_id": "0ae4290d433f1c34735a09c3c821c45baeed9be4", "content_id": "08e2ba293277b34c4b8639d1a08c8b483c40ef72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2251, "license_type": "no_license", "max_line_length": 237, "num_lines": 65, "path": "/soal 4/index.php", "repo_name": "chycoz21/dumbways", "src_encoding": "UTF-8", "text": "<?php\nrequire_once 'dbcon.php';\n\nif(isset($_GET['delete_id']))\n{\n\t$stmt_select = $DB_con->prepare('SELECT userprofile FROM users WHERE userid =:uid');\n\t$stmt_select->execute(array(':uid'=>$_GET['delete_id']));\n\t$imgRow=$stmt_select->fetch(PDO::FETCH_ASSOC);\n\tunlink(\"user_images/\".$imgRow['userprofile']);\n\t$stmt_delete = $DB_con->prepare('DELETE FROM users WHERE userid =:uid');\n\t$stmt_delete->bindParam(':uid',$_GET['delete_id']);\n\t$stmt_delete->execute();\t\n\theader(\"Location: index.php\");\n}\n?>\n<!DOCTYPE html>\n<html>\n<head>\n\t<title>DUMBWAYS CRUD</title>\n\t<link rel=\"stylesheet\" href=\"bootstrap/css/bootstrap.min.css\">\n\t<script src=\"bootstrap/js/bootstrap.min.js\"></script>\n</head>\n<body>\n\t<div class=\"container\">\n\t\t<div class=\"page-header\">\n\t\t\t<h1 class=\"h2\">&nbsp; Dumb Gram<a class=\"btn btn-success\" href=\"addmember.php\" style=\"margin-left: 770px;\"><span class=\"glyphicon glyphicon-user\"></span>&nbsp; Add Users </a></h1><hr>\n\t\t</div>\n\t\t<div class=\"row\">\n\t\t\t<?php\n\t\t\t$stmt = $DB_con->prepare('SELECT userid, username, description, userprofile FROM users ORDER BY userid DESC');\n\t\t\t$stmt->execute();\n\t\t\tif($stmt->rowCount() > 0)\n\t\t\t{\n\t\t\t\twhile($row=$stmt->fetch(PDO::FETCH_ASSOC))\n\t\t\t\t{\n\t\t\t\t\textract($row);\n\t\t\t\t\t?>\n\t\t\t\t\t<div class=\"col-xs-3\">\n\t\t\t\t\t\t<h3 class=\"page-header\" style=\"background-color:cadetblue\" align=\"center\"><?php echo $username.\"<br>\".$description; ?></h3>\n\t\t\t\t\t\t<img src=\"uploads/<?php echo $row['userprofile']; ?>\" class=\"img-rounded\" width=\"250px\" height=\"250px\" /><hr>\n\t\t\t\t\t\t<p class=\"page-header\" align=\"center\">\n\t\t\t\t\t\t\t<span>\n\t\t\t\t\t\t\t\t<a class=\"btn btn-primary\" href=\"editform.php?edit_id=<?php echo $row['userid']; ?>\"><span class=\"glyphicon glyphicon-pencil\"></span> Edit</a> \n\t\t\t\t\t\t\t\t<a class=\"btn btn-warning\" href=\"?delete_id=<?php echo $row['userid']; ?>\" title=\"click for delete\" onclick=\"return confirm('Are You Sure You Want To Delete This User?')\"><span class=\"glyphicon glyphicon-trash\"></span> Delete</a>\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t</p>\n\t\t\t\t\t</div> \n\t\t\t\t\t<?php\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\t?>\n\t\t\t\t<div class=\"col-xs-12\">\n\t\t\t\t\t<div class=\"alert alert-warning\">\n\t\t\t\t\t\t<span class=\"glyphicon glyphicon-info-sign\"></span>&nbsp; No Data Found.\n\t\t\t\t\t</div>\n\t\t\t\t</div>\n\t\t\t\t<?php\n\t\t\t}\n\t\t\t?>\n\t\t</div>\n\t</div>\n</body>\n</html>" }, { "alpha_fraction": 0.6611295938491821, "alphanum_fraction": 0.6976743936538696, "avg_line_length": 24.08333396911621, "blob_id": "09c66e851c19dd78589ee4fff6b33ef9d79a08a0", "content_id": "09ff7ea6b05af491b342a2349a24583a19334cd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 39, "num_lines": 12, "path": "/1.py", "repo_name": "chycoz21/dumbways", "src_encoding": "UTF-8", "text": "def waktu(kalori):\n return kalori/10\n#input data\nkalori= int(input(\"jumlah kalori : \"))\nWaktu=waktu(kalori)\nif kalori<=500:\n print(\" jenis olahraga :badminton\")\nelif kalori<=750:\n print(\"jenis olahraga :lari\")\nelif kalori>=750:\n print(\"jenis olahraga :lari\")\nprint(\"waktu olahraga\",Waktu)\n" } ]
5
AdityaMehta02/uploadimage-application
https://github.com/AdityaMehta02/uploadimage-application
89dd4c48bcb02fd5afee6d20c2c7dec3d81996e3
69688f218ed3a7722b5e0631e47a326b5e8ceef4
c8d3168bca998aeab04e33342091c1a99cc1e827
refs/heads/master
2020-12-28T01:09:53.218625
2020-04-05T00:19:10
2020-04-05T00:19:10
238,130,502
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6430604457855225, "alphanum_fraction": 0.6497372984886169, "avg_line_length": 30.39518928527832, "blob_id": "a201793958eaf97dbb2d405c50991cbd559804b0", "content_id": "187df0b30acab1bf4457b02a1feb289fb6fd87c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9136, "license_type": "no_license", "max_line_length": 155, "num_lines": 291, "path": "/main.py", "repo_name": "AdityaMehta02/uploadimage-application", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask, request, redirect, url_for, send_from_directory, render_template, send_file\n\nfrom io import BytesIO\nimport tempfile\nfrom six.moves import urllib\n\nimport matplotlib\nmatplotlib.use('agg')\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\nimport tensorflow as tf\nfrom tensorflow.core.framework import *\n\nclass DeepLabModel(object):\n \"\"\"Class to load deeplab model and run inference.\"\"\"\n\n INPUT_TENSOR_NAME = 'ImageTensor:0'\n OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'\n INPUT_SIZE = 513\n FROZEN_GRAPH_NAME = 'frozen_inference_graph'\n\n def __init__(self, graph_path):\n \"\"\"Creates and loads pretrained deeplab model.\"\"\"\n self.graph = tf.Graph()\n\n graph_def = graph_pb2.GraphDef()\n with open(graph_path, \"rb\") as pbfile:\n graph_def.ParseFromString(pbfile.read())\n\n if graph_def is None:\n raise RuntimeError('Cannot find inference graph in tar archive.')\n\n with self.graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n self.sess = tf.Session(graph=self.graph)\n\n def run(self, image):\n \"\"\"Runs inference on a single image.\n\n Args:\n image: A PIL.Image object, raw input image.\n\n Returns:\n resized_image: RGB image resized from original input image.\n seg_map: Segmentation map of `resized_image`.\n \"\"\"\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n return resized_image, seg_map\n\n\ndef create_pascal_label_colormap():\n \"\"\"Creates a label colormap used in PASCAL VOC segmentation benchmark.\n\n Returns:\n A Colormap for visualizing segmentation results.\n \"\"\"\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap\n\n\ndef label_to_color_image(label):\n \"\"\"Adds color defined by the dataset colormap to the label.\n\n Args:\n label: A 2D array with integer type, storing the segmentation label.\n\n Returns:\n result: A 2D array with floating type. The element of the array\n is the color indexed by the corresponding element in the input label\n to the PASCAL color map.\n\n Raises:\n ValueError: If label is not of rank 2 or its value is larger than color\n map maximum entry.\n \"\"\"\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label')\n\n colormap = create_pascal_label_colormap()\n\n if np.max(label) >= len(colormap):\n raise ValueError('label value too large.')\n\n return colormap[label]\n\n\ndef vis_segmentation(image, seg_map):\n \"\"\"Visualizes input image, segmentation map and overlay view.\"\"\"\n plt.figure(figsize=(15, 5))\n grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])\n\n plt.subplot(grid_spec[0])\n plt.imshow(image)\n plt.axis('off')\n plt.title('input image')\n\n plt.subplot(grid_spec[1])\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n plt.imshow(seg_image)\n plt.axis('off')\n plt.title('segmentation map')\n\n plt.subplot(grid_spec[2])\n plt.imshow(image)\n plt.imshow(seg_image, alpha=0.7)\n plt.axis('off')\n plt.title('segmentation overlay')\n\n unique_labels = np.unique(seg_map)\n ax = plt.subplot(grid_spec[3])\n plt.imshow(\n FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')\n ax.yaxis.tick_right()\n plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])\n plt.xticks([], [])\n ax.tick_params(width=0.0)\n plt.grid('off')\n plt.show()\n\n\nLABEL_NAMES = np.asarray([\n 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',\n 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\n 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'\n])\n\nFULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)\nFULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)\n\n#download_path = '/home/adi/Workspace/models/research/deeplab/datasets/goat_molt_seg/exp/train_on_trainval_set/export/frozen_inference_graph.pb'\ndownload_path = '/home/adi/Workspace/models/research/deeplab/datasets/goat_molt_seg/exp/train_on_trainval_set_mobilenetv2/export/frozen_inference_graph.pb'\n\nMODEL = DeepLabModel(download_path)\n\nSAMPLE_IMAGE = 'image1' # @param ['image1', 'image2', 'image3']\nIMAGE_URL = '' #@param {type:\"string\"}\n\n_SAMPLE_URL = ('https://github.com/tensorflow/models/blob/master/research/'\n 'deeplab/g3doc/img/%s.jpg?raw=true')\n\ndef serve_pil_image(pil_img):\n img_io = BytesIO()\n pil_img.save(img_io, 'JPEG')\n img_io.seek(0)\n return send_file(img_io, mimetype='image/jpeg')\n\n\ndef run_visualization(url):\n \"\"\"Inferences DeepLab model and visualizes result.\"\"\"\n try:\n f = urllib.request.urlopen(url)\n jpeg_str = f.read()\n original_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image. Please check url: ' + url)\n return\n\n print('running deeplab on image %s...' % url)\n resized_im, seg_map = MODEL.run(original_im)\n\n vis_segmentation(resized_im, seg_map)\n\n\n#image_url = IMAGE_URL or _SAMPLE_URL % SAMPLE_IMAGE\n#run_visualization(image_url)\n\nUPLOAD_FOLDER = '/static/Uploads'\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\[email protected](\"/\")\ndef home():\n return render_template(\"home.html\")\n\[email protected](\"/about\")\ndef about():\n return render_template(\"about.html\")\n\[email protected](\"/uploadImage\")\ndef uploadImage():\n return render_template(\"uploadImage.html\")\n\n#@app.route('/upload', methods=['GET', 'POST'])\n#def upload():\n# if request.method == 'POST':\n# file = request.files['file']\n# extension = os.path.splitext(file.filename)[1]\n# f_name = str(uuid.uuid4()) + extension\n# file.save(os.path.join(app.config['UPLOAD_FOLDER'], f_name))\n# return json.dumps({'filename':f_name})\n#def upload():\n# if request.method == 'POST':\n# if 'file' not in request.files:\n# return redirect(request.url)\n# file = request.files['file']\n# if file.filename == '':\n# return redirect(request.url)\n# if file and allowed_file(file.filename):\n# filename = secure_filename(file.filename)\n# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n#@app.route('/uploadImage', methods=['GET', 'POST'])\n#def upload():\n#\n# if request.method == \"POST\":\n#\n# if request.files:\n#\n# image = request.files[\"image\"]\n#\n# print (image)\n#\n# return redirect(request.url)\n#\n\n#image_url = IMAGE_URL or _SAMPLE_URL % SAMPLE_IMAGE\n#run_visualization(image_url)\n\n#@app.route('/test', methods=['GET', 'POST'])\[email protected](\"/uploadAjax\", methods=['GET', 'POST'])\ndef uploadAjax():\n imgFile=request.files['file']\n print(\"isthisFile\")\n print(imgFile)\n print(imgFile.filename)\n imgFile.save(\"./static/Uploads/\"+imgFile.filename)\n# return render_template(\"uploadImage.html\")\n#def upload_file():\n if request.method == 'POST':\n if imgFile:\n #jpeg_str=imgFile.read()\n jFile=\"./static/Uploads/\"+imgFile.filename\n #jpeg_str=imgFile.read()\n #print(len(jpeg_str))\n #print(type(jpeg_str))\n original_im = Image.open(jFile)\n #original_im = Image.open(BytesIO(jpeg_str))\n resized_im, seg_map = MODEL.run(original_im)\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n print(resized_im.__class__.__name__)\n print(seg_map.__class__.__name__)\n print(seg_image.__class__.__name__)\n #vis_segmentation(resized_im, seg_map)\n seg_img = Image.fromarray(seg_image, 'RGB')\n seg_img.save(\"./static/Uploads/proc\"+imgFile.filename)\n print(\"XXXXA:\")\n# return redirect(url_for('processedImage', msg=\"./static/Uploads/proc\"+imgFile.filename))\n# data = {\n# \"imageName\": \"./static/Uploads/\"+imgFile.filename,\n# \"imageProcessed\": \"./static/Uploads/proc\"+imgFile.filename\n# }\n return imgFile.filename\n# return render_template('upload_file.html')\n\[email protected](\"/processedImage\", methods=['GET'])\ndef processedImage():\n imageName=\"./static/Uploads/\"+request.args.get('img')\n print(\"XXXXXB:\" + imageName)\n imageProcessed=\"./static/Uploads/proc\"+request.args.get('img')\n ret = render_template('processedImage.html', img_filename=imageName, processedimg_filename=imageProcessed)\n print(\"XXXXXC:\" + imageName)\n return ret\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n" } ]
1
facitoo/Python_utility
https://github.com/facitoo/Python_utility
e005f8f8c3a1614084a0eb012b9776f4c1bba61c
99d93a20adade8a84bc673f4c5b29ea5f68b5f1b
1c9779c0362206d68ad4e59b88d9f74e8508063d
refs/heads/main
2023-01-02T13:21:15.067572
2020-10-22T12:22:05
2020-10-22T12:22:05
306,327,690
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5400437712669373, "alphanum_fraction": 0.5636761784553528, "avg_line_length": 33.18461608886719, "blob_id": "db19d4223bfff77b1e28c8dbdb0a460fa558b82b", "content_id": "215eb10e0785b2677fe071d3e0d91deaf8b8cc7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2285, "license_type": "no_license", "max_line_length": 102, "num_lines": 65, "path": "/ppt_to_mp4.py", "repo_name": "facitoo/Python_utility", "src_encoding": "UTF-8", "text": "import win32com.client\r\nimport time\r\nimport os\r\nimport shutil\r\n\r\ndef ppt_to_mp4(ppt_path,mp4_target,resolution = 720,frames = 24,quality = 60,timeout = 120):\r\n # status:Convert result. 0:failed. -1: timeout. 1:success.\r\n time_per_slide = 3\r\n status = 0\r\n if ppt_path == '' or mp4_target == '':\r\n return status\r\n start_tm = time.time()\r\n\r\n sdir = mp4_target[:mp4_target.rfind('\\\\')]\r\n if not os.path.exists(sdir):\r\n os.makedirs(sdir)\r\n ppt = win32com.client.Dispatch('PowerPoint.Application')\r\n presentation = ppt.Presentations.Open(ppt_path,WithWindow=False)\r\n presentation.CreateVideo(mp4_target,-1,time_per_slide,resolution,frames,quality)\r\n while True:\r\n try:\r\n time.sleep(0.1)\r\n if time.time() - start_tm > timeout:\r\n # Converting time out. Killing the PowerPoint process(An exception will be threw out).\r\n os.system(\"taskkill /f /im POWERPNT.EXE\")\r\n status = -1\r\n break\r\n if os.path.exists(mp4_path) and os.path.getsize(mp4_target) == 0:\r\n continue\r\n status = 1\r\n break\r\n except Exception as e:\r\n print ('Error! Code: {c}, Message, {m}'.format(c = type(e).__name__, m = str(e)))\r\n break\r\n print (time.time()-start_tm)\r\n if status != -1:\r\n ppt.Quit()\r\n\r\n return status\r\n \r\nif __name__ == '__main__':\r\n quality = 60\r\n resolution = 720\r\n frames = 24\r\n ppt_path = os.path.abspath('C:/Users/DESKTOP/Downloads/sample.ppt')\r\n mp4_path = os.path.abspath('C:/Users/DESKTOP/Downloads/test2.mp4')\r\n\r\n ie_temp_dir = ''\r\n\r\n status = 0\r\n timeout = 4*60\r\n try:\r\n status = ppt_to_mp4(ppt_path,mp4_path,resolution,frames,quality,timeout)\r\n if ie_temp_dir != '':\r\n shutil.rmtree(ie_temp_dir, ignore_errors=True)\r\n except Exception as e:\r\n print ('Error! Code: {c}, Message, {m}'.format(c = type(e).__name__, m = str(e)))\r\n if status == -1:\r\n print ('Failed:timeout.')\r\n elif status == 1:\r\n print ('Success!')\r\n else:\r\n if os.path.exists(mp4_path):\r\n os.remove(mp4_path)\r\n print ('Failed:The ppt may have unknow elements. You can try to convert it manual.')" }, { "alpha_fraction": 0.6657223701477051, "alphanum_fraction": 0.6713880896568298, "avg_line_length": 16.6842098236084, "blob_id": "9ca54b4c93e41e00560199ff38a2c51b88790187", "content_id": "6b42f6c6277a93693264072ada6403713341e5cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 353, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/speach_to_text.py", "repo_name": "facitoo/Python_utility", "src_encoding": "UTF-8", "text": "import speech_recognition as sr\r\nfrom win32com.client import constants, Dispatch\r\n\r\nr = sr.Recognizer()\r\n\r\nwith sr.Microphone() as mp:\r\n print('say')\r\n audio = r.listen(mp)\r\n \r\ntry:\r\n print(r.recognize_google(audio))\r\n\r\nexcept:\r\n pass\r\n\r\nMsg = r.recognize_google(audio)\r\nspeaker = Dispatch(\"SAPI.SpVoice\")\r\nspeaker.Speak(Msg)\r\ndel speaker" } ]
2
qzio/tododis
https://github.com/qzio/tododis
f4d641c4aae0e4c9702c6d2c09228dd40e7612dd
4708668a2881fb8bb5cfaeef09de969f2aa6d138
d7dd610f25b45b9d5383b82281986dff08621ed2
refs/heads/master
2019-07-19T14:21:38.404956
2013-04-04T18:50:35
2013-04-04T18:50:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7602905631065369, "alphanum_fraction": 0.7602905631065369, "avg_line_length": 33.41666793823242, "blob_id": "00b020508347d5cd6462c1db61fe28548ce18d7e", "content_id": "1aeef8d45e61d2ff8093f8dc6f68d9c03c52b074", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 413, "license_type": "no_license", "max_line_length": 73, "num_lines": 12, "path": "/README.md", "repo_name": "qzio/tododis", "src_encoding": "UTF-8", "text": "# Todo app without authentication.\n\nThis todo app lets you create a todo list by suppling a name and a email.\nThe security is based on the url hash generated. There is no password.\n\nThe list can be shared to anyone, just let them know about the url.\na \"Email this to your friend\" functionality is supplied for convenience.\n\nPlaned improvments is put into TODO.\n\n------\nlicense: DBAD http://www.dbad-license.org/.\n" }, { "alpha_fraction": 0.5667465925216675, "alphanum_fraction": 0.5731415152549744, "avg_line_length": 28.0930233001709, "blob_id": "20fc1f9564b0475496bde859f84eaef7e1369cef", "content_id": "8021626e069ac66d8d9505967e6de5057dbfa4c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1251, "license_type": "no_license", "max_line_length": 98, "num_lines": 43, "path": "/mails.py", "repo_name": "qzio/tododis", "src_encoding": "UTF-8", "text": "#-*- encoding: utf-8 -*-\n\nfrom flask import url_for\nfrom flask_mail import Mail, Message\nimport logging\n\nmail = Mail()\n\ndef send_all(todolists, recipient):\n mail_body = \"Here is all your todolists: \\n\"\n for todo in todolists:\n mail_body += \"{0}\\n\".format(url_for('lists_show', namehash=todo.namehash, _external=True))\n\n msg = Message(\"Your todolists\",\n sender=\"[email protected]\",\n recipients=[recipient.strip()])\n msg.body = mail_body\n\n try:\n mail.send(msg)\n return True\n except Exception, e:\n logging.error(\"Failed to send all lists to self: {0}{1}\".format(type(e), e))\n return False\n\ndef send_list(todolist, recipient):\n mail_body = \"{0} wants to share this list with you: {1}\".format(\n todolist.email,\n url_for('lists_show',\n namehash=todolist.namehash,\n _external=True))\n\n msg = Message(\"A todo was shared with you\",\n sender=\"[email protected]\",\n recipients=[recipient.strip()])\n msg.body = mail_body\n\n try:\n mail.send(msg)\n return True\n except Exception, e:\n logging.error(\"Failed to send list to friend: {0}({1})\".format(type(e), e))\n return False\n" }, { "alpha_fraction": 0.6208791136741638, "alphanum_fraction": 0.6270604133605957, "avg_line_length": 28.3154354095459, "blob_id": "c7b3671a8a550213822b22fc430a49dcd5b88c55", "content_id": "84fa1701bb95c2c288177a42a8e10a6f7f46c4d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4368, "license_type": "no_license", "max_line_length": 117, "num_lines": 149, "path": "/app.py", "repo_name": "qzio/tododis", "src_encoding": "UTF-8", "text": "#-*- encoding: utf-8 -*-\n\nimport sys\nimport re\n\nfrom flask import Flask, abort, request, flash\nfrom flask import redirect, render_template, url_for\n\nfrom models import db, Todolist, Todoitem, ValidationError\nfrom mails import mail, send_all, send_list\nfrom regexps import regexps\n\n\napp = Flask(__name__)\napp.config.from_pyfile(\"config.py\")\ndb.init_app(app)\nmail.init_app(app)\n\n\ndef todo_by(namehash):\n l = Todolist.query.filter_by(namehash=namehash).first()\n if not l:\n abort(404)\n return l\n\ndef todos_for(email):\n todolists = Todolist.query.filter_by(email=email).all()\n return todolists\n\n\n#\n# routes/actions\n#\n\[email protected](\"/\", methods=[\"GET\"])\ndef start():\n return render_template(\"lists_create.html\", title=\"Create your todo list\")\n\n\[email protected](\"/lists/<namehash>\", methods=[\"GET\"])\ndef lists_show(namehash):\n todolist = todo_by(namehash)\n return render_template(\"lists_show.html\", title=\"Showing list {0}\".format(todolist.name), todolist=todolist) \n\n\[email protected](\"/lists\", methods=[\"POST\"])\ndef lists_create():\n todolist = None\n try:\n todolist = Todolist(request.form[\"todo_email\"], request.form[\"todo_name\"])\n db.session.add(todolist)\n db.session.commit()\n except ValidationError, e:\n flash(str(e))\n return redirect(url_for('start'))\n else:\n return redirect(url_for('lists_show', namehash=todolist.namehash))\n\n\n# Stupid browsers that can't to proper DELETE\[email protected](\"/lists/<namehash>\", methods=[\"POST\",\"DELETE\"])\ndef lists_delete(namehash):\n if request.method == \"POST\" and request.form.get('_method','') != \"DELETE\":\n abort(404)\n\n todolist = todo_by(namehash)\n db.session.delete(todolist)\n db.session.commit()\n return redirect(url_for('start'))\n\n\[email protected](\"/lists/<namehash>/items\", methods=[\"POST\"])\ndef lists_add_item(namehash):\n todolist = todo_by(namehash)\n try:\n item = Todoitem(todolist, request.form.get(\"item_name\",\"\"))\n db.session.add(item)\n db.session.commit()\n except ValidationError, e:\n if 'format' in request.args and request.args['format'] == 'json':\n return \"\", 422\n else:\n flash(str(e))\n return redirect(url_for('lists_show', namehash=todolist.namehash))\n else:\n if 'format' in request.args and request.args['format'] == 'json':\n return \"Created\", 201\n else:\n return redirect(url_for('lists_show', namehash=todolist.namehash))\n\n\[email protected](\"/lists/<namehash>/items/<int:item_id>\", methods=[\"POST\"])\ndef lists_done(namehash, item_id):\n todolist = todo_by(namehash)\n for item in todolist.todoitems:\n if item.id == item_id:\n item.done = 1\n db.session.commit()\n\n if 'format' in request.args and request.args['format'] == 'json':\n return \"Marked done\", 201\n else:\n return redirect(url_for('lists_show', namehash=todolist.namehash))\n\n\[email protected](\"/sendalllists\", methods=[\"POST\"])\ndef lists_send_all():\n send_for_email = request.form.get('send_for_email', '')\n if not re.match(regexps['email'], send_for_email):\n flash('You need to supply a valid email')\n else:\n todolists = todos_for(send_for_email)\n if len(todolists) <= 0:\n return \"GTFO\", 403\n else:\n if send_all(todolists, send_for_email):\n flash(\"Will try and send you an email, check your spam folder as well!\")\n else:\n flash(\"Failed to send you the email, please contact the admin\")\n\n return redirect(url_for('start'))\n\n\[email protected](\"/lists/<namehash>/sendlist\", methods=[\"POST\"])\ndef lists_send(namehash):\n todolist = todo_by(namehash)\n send_to_email = request.form.get('send_to_email', '')\n if not re.match(regexps['email'], send_to_email):\n flash('You need to supply a valid email')\n else:\n if send_list(todolist, send_to_email):\n flash(\"Will send this list to {0}, tell him/her to check the spam folder as well!\".format(send_to_email))\n else:\n flash(\"Failed to send the email. Contacat the admin\")\n\n return redirect(url_for('lists_show', namehash=todolist.namehash))\n\n\n#\n# end routes\n#\n\nif __name__ == \"__main__\":\n\n if \"-c\" in sys.argv:\n app.test_request_context().push()\n db.create_all()\n else:\n app.run(host=\"0.0.0.0\")\n" }, { "alpha_fraction": 0.6256658434867859, "alphanum_fraction": 0.632929801940918, "avg_line_length": 29.352941513061523, "blob_id": "8670304547f8094340aa7932d990520836d8336c", "content_id": "6eb82c631135ca3631b4a426972354ed22753d8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2065, "license_type": "no_license", "max_line_length": 69, "num_lines": 68, "path": "/models.py", "repo_name": "qzio/tododis", "src_encoding": "UTF-8", "text": "#-*- encoding: utf-8 -*-\n\nimport hashlib\nimport random\nimport re\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom sqlalchemy.orm import validates\nfrom config import APP_KEY\n\nfrom regexps import regexps\n\ndb = SQLAlchemy()\n\nclass ValidationError(Exception):\n pass\n\nclass Todolist(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(200))\n name = db.Column(db.String(200))\n namehash = db.Column(db.String(200), unique=True)\n\n todoitems = db.relationship('Todoitem',\n order_by=\"asc(Todoitem.done), asc(Todoitem.id)\",\n primaryjoin=\"(Todoitem.todolist_id==Todolist.id)\")\n\n @validates('email')\n def validate_email(self, key, email_address):\n if not re.match(regexps['email'], email_address, re.UNICODE):\n raise ValidationError(\"Need to supply valid email.\")\n return email_address\n\n @validates('name')\n def validate_name(self, key, str):\n if not re.match(regexps['printables'], str, re.UNICODE):\n raise ValidationError(\"Name need to be normal chars.\")\n return str\n\n def __init__(self, email, name):\n self.name = name\n self.email = email\n hashstr = \"\".join([APP_KEY,name,email,str(random.random())])\n self.namehash = hashlib.sha1(hashstr).hexdigest()\n\n def __repr__(self):\n return '<Todolist %r by %r>' % (self.name, self.email)\n\nclass Todoitem(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(200))\n done = db.Column(db.Integer)\n\n todolist_id = db.Column(db.Integer, db.ForeignKey('todolist.id'))\n todolist = db.relationship('Todolist')\n\n @validates('name')\n def validate_name(self, key, str):\n if not re.match(regexps['printables'], str, re.UNICODE):\n raise ValidationError(\"Cant add blank todo item.\")\n return str\n\n def __init__(self, todolist, name):\n self.name = name\n self.todolist = todolist\n self.done = 0\n \n def __repr__(self):\n return '<Todoitem %r>' % self.name\n\n" }, { "alpha_fraction": 0.7093524932861328, "alphanum_fraction": 0.7165467739105225, "avg_line_length": 22.931034088134766, "blob_id": "8d48ae2bd0fe87e67296c0f189e8e6d224df5aeb", "content_id": "f25c86600a5c41624346316b66a11e5490d54332", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 695, "license_type": "no_license", "max_line_length": 84, "num_lines": 29, "path": "/config-example.py", "repo_name": "qzio/tododis", "src_encoding": "UTF-8", "text": "#-*- encoding: utf-8 -*-\nimport os\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nSECRET_KEY = \"your secret key\"\n\n# Dev\nDEBUG = True\nTRAP_BAD_REQUEST_ERRORS = False\n\n# Used as salt\nAPP_KEY = \"this is used as salt\"\n\n# Database\nSQLALCHEMY_DATABASE_URI = \"sqlite:///{0}\".format(os.path.join(basedir,'db.sqlite3'))\n\n# Mail settings\nMAIL_SERVER = \"localhost\"\n#MAIL_PORT : default 25\n#MAIL_USE_TLS : default False\n#MAIL_USE_SSL : default False\n#MAIL_DEBUG : default app.debug\n#MAIL_USERNAME : default None\n#MAIL_PASSWORD : default None\nDEFAULT_MAIL_SENDER = \"no-reply@localhost\"\n#DEFAULT_MAX_EMAILS : default None\n#MAIL_FAIL_SILENTLY : default False\n#MAIL_SUPPRESS_SEND : default False\n\n" }, { "alpha_fraction": 0.3743315637111664, "alphanum_fraction": 0.4331550896167755, "avg_line_length": 30.16666603088379, "blob_id": "7ca6c1de658eecb344b3b20b5c75e4aec7c967a6", "content_id": "ce90fc11993472ab0f2c1601cc0db433a3e875f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 187, "license_type": "no_license", "max_line_length": 104, "num_lines": 6, "path": "/regexps.py", "repo_name": "qzio/tododis", "src_encoding": "UTF-8", "text": "#-*- encoding: utf-8 -*-\n\nregexps = {}\nregexps['email'] = \"([0-9a-zA-Z]([-\\.\\w]*[0-9a-zA-Z])*@([0-9a-zA-Z][-\\w]*[0-9a-zA-Z]\\.)+[a-zA-Z]{2,9})$\"\n\nregexps['printables'] = \"^[\\w ,.-?\\/!]+$\"\n" }, { "alpha_fraction": 0.5149999856948853, "alphanum_fraction": 0.675000011920929, "avg_line_length": 14.384614944458008, "blob_id": "bf60a8b216fd9462939c851b66a6127f9a046b18", "content_id": "d0951286ed280abbdc6ef2126f1a5a3388821dfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 200, "license_type": "no_license", "max_line_length": 22, "num_lines": 13, "path": "/requirements.txt", "repo_name": "qzio/tododis", "src_encoding": "UTF-8", "text": "Flask==0.9\nFlask-Mail==0.7.6\nFlask-SQLAlchemy==0.16\nJinja2==2.6\nSQLAlchemy==0.8.0\nWerkzeug==0.8.3\nargparse==1.2.1\nblinker==1.2\nwsgiref==0.1.2\n\n# Production:\n# gunicorn==0.17.2\n# MySQL-python==1.2.4\n" } ]
7
hungnmai/Predic-Football
https://github.com/hungnmai/Predic-Football
7c9397c76fa1dad56b37fa5dbe24659677f63029
cd80a1a3be24cb90ffc266d32b6ea89f284aaa24
94770183b8dd92db9d7fd38c686a32582744d647
refs/heads/master
2020-03-13T06:56:53.897199
2018-04-25T14:05:46
2018-04-25T14:05:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7204030156135559, "alphanum_fraction": 0.741813600063324, "avg_line_length": 45.70588302612305, "blob_id": "03580315384d383629a763a4fcd0eb3c611e984a", "content_id": "ee46787fe6e35be6df4a95aaa66bee4a995aaaba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 794, "license_type": "no_license", "max_line_length": 195, "num_lines": 17, "path": "/README.md", "repo_name": "hungnmai/Predic-Football", "src_encoding": "UTF-8", "text": "## Predicting outcome of football matches using machine learning\n\n**Alexander Smirnov, Vasily Kumaev, Dmitry Vodopyanov** \n*Lobachevsky University, Nizhny Novgorod, Russia*\n\n### Overview\nIn this mini-project we try to use some machine learning methods to predict outcome of football matches, such as:\n\n - Naive Bayes\n - Random Forest\n - Support Vectors Machine\n\nThe dataset that used in our project is [here](http://www.football-data.co.uk/englandm.php).\n\n### Literature\n - A. Yezus. Predicting outcome of soccer matches using machine learning ([pdf](http://www.math.spbu.ru/SD_AIS/documents/2014-12-341/2014-12-tw-15.pdf))\n - A. Joseph, N.E. Fenton, M. Neil. Predicting football results using Bayesian nets and other machine learning techniques ([pdf](http://www.eecs.qmul.ac.uk/~martin/index_files/spurs_final_published.pdf))\n" }, { "alpha_fraction": 0.44237980246543884, "alphanum_fraction": 0.47147515416145325, "avg_line_length": 34.46242904663086, "blob_id": "a0ff3b99549546d18b56b2eeda3502f2ef577c75", "content_id": "f939656986ab1ce5a496df1fa016917b06e4d5e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12270, "license_type": "no_license", "max_line_length": 258, "num_lines": 346, "path": "/src/parse.py", "repo_name": "hungnmai/Predic-Football", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\"\"\"\n This script can parse CSV databases.\n\n To use it, you should call getInfo(teamName) function.\n It returns the list with following structure:\n [TeamName, OpponentTeamName, TeamNameScore, OpponentTeamNameScore, \n TeamNameSignificance, OpponentTeamNameSignificanceisTeamNameHome].\n If team played the match at the home stadium, isTeamHome returns True, otherwise - False.\n\"\"\"\n\nimport os\nimport csv\n\ndir = \"/Users/manhhung/Documents/BKHN/ML/workspace/pre_football/football-results-prediction-ml/datasets\"\ndirTrain = \"/Users/manhhung/Documents/BKHN/ML/workspace/pre_football/football-results-prediction-ml/data_train\"\ntxtDir = \"/Users/manhhung/Documents/BKHN/ML/workspace/pre_football/football-results-prediction-ml/txt\"\nteamName = \"\"\nteams = []\nteamSignificance = \"\"\nopponentTeamSignificance = \"\"\nteamsStat = []\nmatchesNumber = 1000\n\n\ndef getTeamsNamesList():\n teamsFile = open(dir + \"/allTeams.txt\", 'r')\n for team in teamsFile:\n teams.append(team.split(\",\"))\n teamsFile.close();\n\n\ndef getInfo(teams, matchesNumber):\n for team in teams:\n if team[1].endswith(\"\\n\"):\n team[1] = team[1][:team[1].find(\"\\n\")]\n teamName = team[0]\n teamSignificance = team[1]\n getCSV(teamName, teamSignificance, matchesNumber)\n del teamsStat[:]\n\n\ndef getCSV(teamName, teamSignificance, matchesNumber):\n for name in os.listdir(dir):\n # print(name)\n path = os.path.join(dir, name)\n if os.path.isfile(path):\n if name.endswith(\"csv\"):\n print(name)\n # print(teamName)\n # parseCSV(path, teamName, teamSignificance, matchesNumber)\n else:\n getCSV(dir)\n saveResults(teamsStat, teamName)\n\n\ndef parseCSV(path, teamName, teamSignificance, matchesNumber):\n inputFile = open(path, \"rb\")\n rdr = csv.reader(inputFile)\n for rec in rdr:\n try:\n # print(rec)\n if (len(teamsStat) <= matchesNumber):\n isTeamHome = rec[2] == teamName\n isTeamAway = rec[3] == teamName\n for team in teams:\n if isTeamHome:\n if (team[0] == rec[3]):\n opponentTeamSignificance = team[1]\n if isTeamAway:\n if (team[0] == rec[2]):\n opponentTeamSignificance = team[1]\n if opponentTeamSignificance.endswith(\"\\n\"):\n opponentTeamSignificance = opponentTeamSignificance[\n :opponentTeamSignificance.find(\"\\n\")]\n if isTeamHome:\n teamsStat.append([rec[2], rec[3], rec[4], rec[5], rec[6], rec[7], rec[8], rec[9], rec[10], rec[12],\n rec[13], rec[14], rec[15], rec[16], rec[17], rec[18], rec[19], rec[20], rec[21],\n rec[22], rec[23], teamSignificance, opponentTeamSignificance, str(isTeamHome)])\n elif isTeamAway:\n teamsStat.append([rec[3], rec[2], rec[5], rec[4],\n rec[6], rec[7], rec[8], rec[9], rec[10], rec[12],\n rec[13], rec[14], rec[15], rec[16], rec[17], rec[18], rec[19], rec[20], rec[21],\n rec[22], rec[23],\n teamSignificance, opponentTeamSignificance, str(isTeamHome)])\n except:\n pass\n inputFile.close()\n\n\ndef saveResults(teamsStat, teamName):\n resFile = open(txtDir + \"/sortedData\" + teamName.replace(\" \", \"_\") + \".txt\", 'w')\n for stat in teamsStat:\n resFile.write(';'.join(stat) + '\\n')\n resFile.close()\n\n\ndef fn(): # 1.Get file names from directory\n file_list = os.listdir(dir)\n listCSV = []\n for name in file_list:\n if name.endswith('csv'):\n listCSV.append(name)\n return listCSV\n\n\ndicsNameTeam = {\n \"Arsenal\": 3,\n \"Aston Villa\": 7,\n \"Birmingham\": 8,\n \"Blackburn\": 9,\n \"Blackpool\": 10,\n \"Bolton\": 11,\n \"Bournemouth\": 12,\n \"Burnley\": 13,\n \"Cardiff\": 14,\n \"Charlton\": 15,\n \"Chelsea\": 2,\n \"Crystal Palace\": 16,\n \"Derby\": 17,\n \"Everton\": 7,\n \"Fulham\": 18,\n \"Hull\": 19,\n \"Leeds\": 20,\n \"Leicester\": 21,\n \"Liverpool\": 4,\n \"Man City\": 6,\n \"Man United\": 1,\n \"Middlesbrough\": 22,\n \"Newcastle\": 23,\n \"Norwich\": 24,\n \"Portsmouth\": 25,\n \"QPR\": 26,\n \"Reading\": 27,\n \"Sheffield United\": 28,\n \"Southampton\": 29,\n \"Stoke\": 30,\n \"Sunderland\": 31,\n \"Swansea\": 32,\n \"Tottenham\": 5,\n \"Watford\": 33,\n \"West Brom\": 34,\n \"West Ham\": 35,\n \"Wigan\": 36,\n \"Wolves\": 37,\n \"Brighton\": 38,\n \"Huddersfield\": 39}\n\n\ndef parseCSV(path):\n f = open(path, 'rb')\n reader = csv.reader(f)\n reader = list(reader)\n reader = reader[1:]\n f.close()\n\n listHome = []\n listAway = []\n dataTrain = []\n for i in range(len(reader)):\n currentMatch = reader[i]\n listLastMatch = reader[0: i]\n\n for lastMatch in listLastMatch:\n if currentMatch[2] in lastMatch:\n listHome.append(lastMatch)\n if currentMatch[3] in lastMatch:\n listAway.append(lastMatch)\n\n gdHome = 0\n gdAway = 0\n HS = 0\n AS = 0\n HST = 0\n AST = 0\n HC = 0\n AC = 0\n HF = 0\n AF = 0\n HY = 0\n AY = 0\n HR = 0\n AR = 0\n scoreHome = 0\n scoreAway = 0\n for j in range(len(listHome)):\n lostGoalHome = 0\n winGoalHome = 0\n HS = 0\n # Neu doi 1 la doi chu nha trong lich su cua no\n if listHome[j][2] == currentMatch[2]:\n winGoalHome = int(listHome[j][4])\n lostGoalHome = int(listHome[j][5])\n if listHome[j][6] == \"H\":\n scoreHome += 3\n elif listHome[j][6] == \"D\":\n scoreHome += 1\n else:\n winGoalHome = int(listHome[j][5])\n lostGoalHome = int(listHome[j][4])\n if listHome[j][6] == \"A\":\n scoreHome += 3\n elif listHome[j][6] == \"D\":\n scoreHome += 1\n gdHome += (winGoalHome - lostGoalHome)\n # home\n if len(listHome) > 5:\n for m in range(len(listHome) - 1, len(listHome) - 6, -1):\n if listHome[m][2] == currentMatch[2]:\n HS += int(listHome[m][11])\n HST += int(listHome[m][13])\n HF += int(listHome[m][15])\n HC += int(listHome[m][17])\n HY += int(listHome[m][19])\n HR += int(listHome[m][21])\n else:\n HS += int(listHome[m][12])\n HST += int(listHome[m][14])\n HF += int(listHome[m][16])\n HC += int(listHome[m][18])\n HY += int(listHome[m][20])\n HR += int(listHome[m][22])\n elif (len(listHome) <= 5) and (len(listHome) > 0):\n for m in range(len(listHome)):\n if listHome[m][2] == currentMatch[2]:\n HS += int(listHome[m][11])\n HST += int(listHome[m][13])\n HF += int(listHome[m][15])\n HC += int(listHome[m][17])\n HY += int(listHome[m][19])\n HR += int(listHome[m][21])\n else:\n HS += int(listHome[m][12])\n HST += int(listHome[m][14])\n HF += int(listHome[m][16])\n HC += int(listHome[m][18])\n HY += int(listHome[m][20])\n HR += int(listHome[m][22])\n\n # Away\n if len(listAway) > 5:\n for m in range(len(listAway) - 1, len(listAway) - 6, -1):\n if listAway[m][2] == currentMatch[3]:\n AS += int(listAway[m][11])\n AST += int(listAway[m][13])\n AF += int(listAway[m][15])\n AC += int(listAway[m][17])\n AY += int(listAway[m][19])\n AR += int(listAway[m][21])\n else:\n AS += int(listAway[m][12])\n AST += int(listAway[m][14])\n AF += int(listAway[m][16])\n AC += int(listAway[m][18])\n AY += int(listAway[m][20])\n AR += int(listAway[m][22])\n elif (len(listAway) <= 5) and (len(listAway) > 0):\n for m in range(len(listAway)):\n if listAway[m][2] == currentMatch[3]:\n AS += int(listAway[m][11])\n AST += int(listAway[m][13])\n AF += int(listAway[m][15])\n AC += int(listAway[m][17])\n AY += int(listAway[m][19])\n AR += int(listAway[m][21])\n else:\n AS += int(listAway[m][12])\n AST += int(listAway[m][14])\n AF += int(listAway[m][16])\n AC += int(listAway[m][18])\n AY += int(listAway[m][20])\n AR += int(listAway[m][22])\n\n for j in range(len(listAway)):\n lostGoalAway = 0\n winGoalAway = 0\n # Neu doi thu 2 la doi chu nha trong cac tran dau truoc cua no\n if listAway[j][2] == currentMatch[3]:\n winGoalAway = int(listAway[j][4])\n lostGoalAway = int(listAway[j][5])\n if listAway[j][6] == \"H\":\n scoreAway += 3\n elif listAway[j][6] == \"D\":\n scoreAway += 1\n else:\n winGoalAway = int(listAway[j][5])\n lostGoalAway = int(listAway[j][4])\n if listAway[j][6] == \"A\":\n scoreAway += 3\n elif listAway[j][6] == \"D\":\n scoreAway += 1\n\n gdAway += (winGoalAway - lostGoalAway)\n # print(str(i + 1) + \": \" + str(dicsNameTeam[currentMatch[2]]) + \" vs \" + str(\n # dicsNameTeam[currentMatch[2]]) + \"|GD Home: \" + str(gdHome) +\n # \"|GD Away: \" + str(gdAway) + \" |HS: \" + str(HS) + \" |AS: \" + str(AS) + \\\n # \"|HST: \" + str(HST) + \" |AST: \" + str(AST) + \" |HF: \" + str(HF) + \" |AF: \" + \\\n # str(AF) + \" |HC: \" + str(HC) + \" |AC: \" + str(AC) + \\\n # \" |HY: \" + str(HY) + \" |AY: \" + str(AY) + \" |HR: \" + str(HR) + \" |AR: \" + str(AR) + \\\n # \"| Score Home: \" + str(scoreHome) + \" |Score away: \" + str(scoreAway) + \"| Outcome: \" + currentMatch[9])\n result = 0\n if (currentMatch[9] == \"H\"):\n result = 1\n elif (currentMatch[9] == \"A\"):\n result = -1\n\n dataTrain.append(\n [str(dicsNameTeam[currentMatch[2]]), str(dicsNameTeam[currentMatch[3]]),currentMatch[23], currentMatch[24], currentMatch[25], currentMatch[26], currentMatch[27], currentMatch[28], str(gdHome), str(gdAway), str(scoreHome), str(scoreAway), result])\n\n listHome = []\n listAway = []\n print(path)\n myFile = open(dirTrain + \"/datatrain1.csv\", 'a')\n with myFile:\n writer = csv.writer(myFile)\n writer.writerows(dataTrain)\n\n\n# def writeFile():\n# # myData = [[\"first_name\", \"second_name\", \"Grade\"],\n# # ['Alex', 'Brian', 'A'],\n# # ['Tom', 'Smith', 'B']]\n#\n# myData = [\n# [\"idHome\", \"idAway\", \"GD Home\", \"GD Away\", \"HS\", \"AS\", \"HST\", \"AST\", \"HF\", \"AF\",\n# \"HC\", \"AC\", \"HY\", \"AY\", \"HR\", \"AR\", \"Point Home\", \"Point Away\", \"Outcome\"]]\n# myFile = open(dirTrain + \"/datatrain.csv\", 'a')\n# with myFile:\n# writer = csv.writer(myFile)\n# writer.writerows(myData)\n\n\ndef writeDataTrain():\n listFile = fn()\n for file in listFile:\n path = dir + \"/\" + str(file)\n parseCSV(path)\n # print(file)\n\n\nif __name__ == '__main__':\n # getTeamsNamesList()\n # getInfo(teams, matchesNumber)\n # parseCSV(dir + \"/E01415.csv\")\n writeDataTrain()\n" }, { "alpha_fraction": 0.604151725769043, "alphanum_fraction": 0.6292054653167725, "avg_line_length": 24.870370864868164, "blob_id": "99b9b2b3e8847070cc58d6e62b9fb6675560be22", "content_id": "bdfc4e71b97b960305df7d5a807f263af67fab61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2794, "license_type": "no_license", "max_line_length": 126, "num_lines": 108, "path": "/src/knnHandler.py", "repo_name": "hungnmai/Predic-Football", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.preprocessing import scale\nfrom sklearn.preprocessing import MinMaxScaler\nimport csv\nimport numpy as np\nfrom numpy import genfromtxt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\ndirTrain = \"/Users/manhhung/Documents/BKHN/ML/workspace/pre_football/football-results-prediction-ml/data_train/datatrain1.csv\"\n\nmy_data = genfromtxt(dirTrain, delimiter=',')\n# print(my_data.shape)\nl = len(my_data[1])\nX = my_data[:, : l - 1]\nY = my_data[:, -1]\nX = X[:,2:]\nprint(X.shape)\n\n# print(type(X[0,0]))\n\n#\nfor i in range(X.shape[0]):\n for j in range(X.shape[1]):\n print(str(X[i,j]) + str(type(X[i,j])))\n# max = 1\n# min = 0\n# X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n# X_scaled = X_std * (max - min) + min\n# print(X)\n# # print(X[0])\n# scaler = MinMaxScaler()\n\n\n# scaler.fit(X)\n# scaler = scaler.fit(X)\n# X = scale(X)\n# print(X[0])\n\n# X0 = X[Y == 0,:]\n# print '\\nSamples from class 0:\\n', X0[:5,:]\n#\n# X1 = X[Y == 1,:]\n# print '\\nSamples from class 1:\\n', X1[:5,:]\n#\n# X2 = X[Y == -1,:]\n# print '\\nSamples from class 2:\\n', X2[:5,:]\n\n\ntest_size = int(len(Y) * 0.2)\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, Y, test_size=test_size)\n\n\nprint \"Training size: %d\" % len(y_train)\nprint \"Test size : %d\" % len(y_test)\n\n\ndef knn(n_n, p):\n clf = KNeighborsClassifier(n_neighbors=n_n, p=p, weights = 'distance')\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print \"Accuracy of \" + str(n_n) + \"NN: %.2f %%\" % (100 * accuracy_score(y_test, y_pred))\n return 100 * accuracy_score(y_test, y_pred)\n\ndef random_forest():\n clf = RandomForestClassifier(n_estimators=10)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print \"(Accuracy of random forest \" + \"%.2f)\"% (100 * accuracy_score(y_test, y_pred))\n return 100 * accuracy_score(y_test, y_pred)\n\ndef svm():\n clf = SVC(C = 1.0)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print \"(Accuracy of SVM \" + \"%.2f)\"% (100 * accuracy_score(y_test, y_pred))\n return 100 * accuracy_score(y_test, y_pred)\n\ndef naive_bayes():\n clf = GaussianNB()\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print \"(Accuracy of naive bayes \" + \"%.2f)\"% (100 * accuracy_score(y_test, y_pred))\n return 100 * accuracy_score(y_test, y_pred)\n\n\ndef runKNN():\n arrNN = range(2, 20)\n result = 0\n for i in arrNN:\n k = knn(i, 2)\n if k > result:\n result = k\n\n print(result)\n\n\n# random_forest()\nnaive_bayes()\nsvm()\nrunKNN()\n" } ]
3
Adharshmahesh/Machine-Learning-Adult-Dataset
https://github.com/Adharshmahesh/Machine-Learning-Adult-Dataset
cb14ebdff96614cbdbbd3d9a7e303ce3aa132725
05ea0bc45a73bd54dcb3d83ab0ae044a6f4987cc
29fa9c96826a5c45d9d735f2ed118a0f9ed892a5
refs/heads/main
2022-12-28T17:31:20.305645
2020-10-16T19:37:12
2020-10-16T19:37:12
304,721,224
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7102057933807373, "alphanum_fraction": 0.719865620136261, "avg_line_length": 38.63333511352539, "blob_id": "55b360590da0fd55fe81bdb0d504db411fca7c13", "content_id": "b11c58a16faa8b5219db3855f8ffbddfaf4596dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2381, "license_type": "no_license", "max_line_length": 161, "num_lines": 60, "path": "/Logistic Reg Scikit.py", "repo_name": "Adharshmahesh/Machine-Learning-Adult-Dataset", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport scipy\n#from loaddata import data\nimport sklearn\nimport sklearn.metrics as metrics\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\ncolumns= ['age','workclass','fnlwgt','education','education-num','marital-status',\n 'occupation','relationship','race','sex','capital-gain','capital-loss',\n 'hours-per-week','native-country','income']\n\n#loading dataset\nadult= pd.read_csv(r'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',sep=\",\",names=columns, header= None)\n#to consider cat data as type category\nfor col in set(adult.columns) - set(adult.describe().columns):\n adult[col] = adult[col].astype('category')\n#adult.info()\n#display last few rows\n#adult.tail()\n#display first few rows \n#adult.head(30)\n#since this column is not much usefull for analysis\nadult=adult.drop('fnlwgt', axis=1)\n#adult\n#making target variable simple in terms of 0 and 1\nadult['income'] =[0 if x==' <=50K' else 1 for x in adult['income']]\nadult.shape #shape after converting target as 0 and 1\n# Remove invalid data from table\nadult= adult[(adult.astype(str) != ' ?').all(axis=1)]\nadult.shape #shape after removal of ?\n#Separate categorical and numberical columns\ncat= adult.dtypes[adult.dtypes == 'object']\nnum= adult.dtypes[adult.dtypes != 'object']\n#sns.heatmap(adult[list(adult.dtypes.index)].corr(),annot = True,square = True);\n# Use one-hot encoding on categorial columns\nadult = pd.get_dummies(adult, columns=['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex','native-country'],drop_first=True)\n#print(adult.iloc[:,8])\nx=adult.drop(['income'], axis = 1)\ny=adult['income']\nfrom sklearn.model_selection import train_test_split \nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)\n#w = 0.1*np.ones(x.shape[1])\n\nfrom sklearn.linear_model import LogisticRegression \n# create logistic regression object \nreg = LogisticRegression() \n \n# train the model using the training sets \nreg.fit(x_train, y_train) \n \n# making predictions on the testing set \ny_pred = reg.predict(x_test) \n\nw = reg.coef_ \n# comparing actual response values (y_test) with predicted response values (y_pred) \n#print(w)\nprint(\"Logistic Regression model accuracy(in %):\", \nmetrics.accuracy_score(y_test, y_pred)*100) \n\n\n" }, { "alpha_fraction": 0.6484418511390686, "alphanum_fraction": 0.6687814593315125, "avg_line_length": 27.48404312133789, "blob_id": "e9e59d5364c5fc49795f8a5e30c71ddab8ae0c1b", "content_id": "84f1efe45197e09014105484d13176e9ad801f0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5359, "license_type": "no_license", "max_line_length": 161, "num_lines": 188, "path": "/Naive Bayes - Scratch.py", "repo_name": "Adharshmahesh/Machine-Learning-Adult-Dataset", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n#matplotlib inline\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport sklearn.metrics as metrics\nfrom sklearn.linear_model import LogisticRegression\nimport random\nimport math\n#giving headers to column\ncolumns= ['age','workclass','fnlwgt','education','education-num','marital-status',\n 'occupation','relationship','race','sex','capital-gain','capital-loss',\n 'hours-per-week','native-country','income']\n\n#loading dataset\nadult= pd.read_csv(r'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',sep=\",\",names=columns, header= None)\n#to consider cat data as type category\nfor col in set(adult.columns) - set(adult.describe().columns):\n adult[col] = adult[col].astype('category')\n#adult.info()\n#display last few rows\n#adult.tail()\n#display first few rows \n#adult.head(30)\n#since this column is not much usefull for analysis\nadult=adult.drop('fnlwgt', axis=1)\n#adult\n#making target variable simple in terms of 0 and 1\nadult['income'] =[0 if x==' <=50K' else 1 for x in adult['income']]\nadult.shape #shape after converting target as 0 and 1\n# Remove invalid data from table\nadult= adult[(adult.astype(str) != ' ?').all(axis=1)]\nadult.shape #shape after removal of ?\n#Separate categorical and numberical columns\ncat= adult.dtypes[adult.dtypes == 'object']\nnum= adult.dtypes[adult.dtypes != 'object']\n#sns.heatmap(adult[list(adult.dtypes.index)].corr(),annot = True,square = True);\n# Use one-hot encoding on categorial columns\nadult = pd.get_dummies(adult, columns=['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex','native-country'],drop_first=True)\n\ndef load_data(path, header):\n df = pd.read_csv(path, header=header)\n return df\n\nclass Naive_Bayes():\n\tdef __init__(self):\n\t\tself.datadict={}\n \n\tdef fit(self,x_train,y_train):\n\t\t\n\t\t\n\t\tself.x_train=x_train\n\t\tself.y_train=y_train\n\t\tself.datadict[0]=np.array([[]])\n\t\tself.datadict[1]=np.array([[]])\n\t\tself.datadict=self.gendata(self.datadict,self.x_train,self.y_train)\n\t\tself.datadict[0]=np.transpose(self.datadict[0])\n\t\tself.datadict[1]=np.transpose(self.datadict[1])\n\t\tself.mean_0=np.mean(self.datadict[0],axis=0)\n\t\tself.mean_1=np.mean(self.datadict[1],axis=0)\n\t\tself.std_0=np.std(self.datadict[0],axis=0)\n\t\tself.std_1=np.std(self.datadict[1],axis=0)\n\n\tdef gendata(self,datadict,x_train,y_train):\n\t\tset_one=True\n\t\tset_zero=True\n\t\tfor i in range(y_train.shape[0]):\n\t\t\tx_temp=x_train[i,:].reshape(x_train[i,:].shape[0],1)\n\t\t\tif y_train[i]==1:\n\t\t\t\tif set_one==True:\n\t\t\t\t\tdatadict[1]=x_temp\n\t\t\t\t\tset_one=False\n\t\t\t\telse:\n\t\t\t\t\tdatadict[1]=np.append(datadict[1],x_temp,axis=1)\n\t\t\telif y_train[i]==0:\n\t\t\t\tif set_zero==True:\n\t\t\t\t\tdatadict[0]=x_temp\n\t\t\t\t\tset_zero=False\n\t\t\t\telse:\n\t\t\t\t\tdatadict[0]=np.append(datadict[0],x_temp,axis=1)\n\t\treturn datadict \n \n\tdef predict(self,x_test):\n \n\t\tp1=self.postprob(x_test,self.datadict[1],self.mean_1,self.std_1)\n\t\tp0=self.postprob(x_test,self.datadict[0],self.mean_0,self.std_0)\n\t\treturn (p1>p0)\n\n\tdef postprob(self,x,x_trainclass,mean_,std_):\n\t\t\t \n\t\tp=np.prod(self.likelihood(x,mean_,std_),axis=1)\n\t\tp=p*(x_trainclass.shape[0]/self.x_train.shape[0])\n\t\treturn p\n\n\tdef likelihood(self,x,mean,sigma):\n\t\tfor i in range(len(sigma)):\n\t\t\tif sigma[i] == 0:\n\t\t\t\tsigma[i] = 1\n\t\treturn np.exp(-(x-mean)**2/(2*sigma**2))*(1/(np.sqrt(2*np.pi)*sigma))\n\n\tdef calc_accuracy(self, ytest, pred):\n\t\t\n\n\t\treturn np.mean(ytest == pred)\n\n\tdef crossvalidation(self, xtrain, ytain, k, alpha = 0.01, iter = 50000, eps = 0.01):\n\n\t\tsize = int(len(xtrain)/k)\n\t\tcv_accuracy = 0\n\t\tz=0\n\n\t\tfor i in range(k):\n\n\t\t\tvalstart = i*size\n\t\t\tvalend = valstart + size\n\n\t\t\tif i!=(k-1):\n\t\t\t\tvalend = size\n\n\t\t\t\txval = xtrain[:valend,:]\n\t\t\t\tyval = ytrain[:valend]\n\n\t\t\t\tkxtrain = xtrain[valend:,:]\n\t\t\t\tkytrain = ytrain[valend:]\n\n\t\t\telse:\n\t\t\n\t\t\t\txval = xtrain[valstart:,:]\n\t\t\t\tyval = ytrain[valstart:]\n\n\t\t\t\tkxtrain = xtrain[:valstart,:]\n\t\t\t\tkytrain = ytrain[:valstart]\n\n\t\t\t\tkxtrain = np.concatenate((xtrain[:valstart,:],xtrain[valend:,:]), axis = 0)\n\t\t\t\tkytrain = np.concatenate((ytrain[:valstart],ytrain[valend:]))\n\n\t\t\tw_kfold = self.fit(kxtrain, kytrain)\n\t\t\t\n\t\t\tpredy = self.predict(xval)\n\t\t\tcv_accuracy = cv_accuracy + self.calc_accuracy(yval,predy)\n\t\t\t\n\t\t\t\n\t\tprint(cv_accuracy)\n\t\t\t\n\t\tcv_accuracy = cv_accuracy / k\n\n\t\treturn cv_accuracy\n\n\ntrain_data = adult.sample(frac = 0.8)\n\nxtrain = np.array(train_data.drop(columns = ['income']))\nytrain = np.array(train_data['income'])\t\ntest_data = adult.drop(train_data.index)\nxtest = np.array((test_data.drop(columns = ['income'])))\nytest = np.array((test_data['income']))\n\nnb=Naive_Bayes()\n\nnb.fit(xtrain,ytrain)\n\npred=nb.predict(xtest)\naccuracy = nb.calc_accuracy(ytest,pred)\nprint(\"Accuracy is:\", accuracy)\n\n#Function to call cross validation \n#accuracy_kfold = nb.crossvalidation(xtrain, ytrain, 5)\n#print(\"Accuracy using k-fold is:\", accuracy_kfold)\n\n\n'''\n#No of instances and accuracy plot\ninstance_vector = [100, 500, 1000, 3000, 5000, 7000]\naccuracy = []\n\nfor k in instance_vector:\n\txtr = np.array(train_data.iloc[:k,:-1])\n\tytr = np.array(train_data.iloc[:k,-1])\n\txte = np.array((test_data.iloc[:k,:-1]))\n\tyte = np.array((test_data.iloc[:k,-1]))\n\tnb.fit(xtr, ytr)\n\tp = nb.predict(xte)\n\ta1 = nb.calc_accuracy(yte, p)\n\taccuracy.append(a1)\n\tprint(accuracy)\n\nprint(accuracy)\n'''\n\n\n\n\n" }, { "alpha_fraction": 0.6291451454162598, "alphanum_fraction": 0.6524186134338379, "avg_line_length": 26.057613372802734, "blob_id": "dd2582f8cf643c5864f717a82f3f0ac405657f82", "content_id": "42a6c6ba2b5a3f01f669afaa2eb07a99902ba6e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6574, "license_type": "no_license", "max_line_length": 199, "num_lines": 243, "path": "/Logistic Regression - Scratch.py", "repo_name": "Adharshmahesh/Machine-Learning-Adult-Dataset", "src_encoding": "UTF-8", "text": "#import libraries\nimport pandas as pd\nimport numpy as np\n#matplotlib inline\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport sklearn.metrics as metrics\nfrom sklearn.linear_model import LogisticRegression\n#giving headers to column\ncolumns= ['age','workclass','fnlwgt','education','education-num','marital-status',\n 'occupation','relationship','race','sex','capital-gain','capital-loss',\n 'hours-per-week','native-country','income']\n\n#loading dataset\nadult= pd.read_csv(r'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',sep=\",\",names=columns, header= None)\n#to consider cat data as type category\nfor col in set(adult.columns) - set(adult.describe().columns):\n adult[col] = adult[col].astype('category')\n#adult.info()\n#display last few rows\n#adult.tail()\n#display first few rows \n#adult.head(30)\n#since this column is not much usefull for analysis\nadult=adult.drop('fnlwgt', axis=1)\n#adult\n#making target variable simple in terms of 0 and 1\nadult['income'] =[0 if x==' <=50K' else 1 for x in adult['income']]\nadult.shape #shape after converting target as 0 and 1\n# Remove invalid data from table\nadult= adult[(adult.astype(str) != ' ?').all(axis=1)]\nadult.shape #shape after removal of ?\n#Separate categorical and numberical columns\ncat= adult.dtypes[adult.dtypes == 'object']\nnum= adult.dtypes[adult.dtypes != 'object']\n#sns.heatmap(adult[list(adult.dtypes.index)].corr(),annot = True,square = True);\n# Use one-hot encoding on categorial columns\nadult = pd.get_dummies(adult, columns=['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex','native-country'],drop_first=True)\n#adult\n#print(adult['income'])\n#print(adult)\n\nclass Logistic_Regression:\n\n\tdef __init__(self, w):\n\t\tself.w = w\n\n\tdef sigmoidal(self, l):\n\t\n\t\treturn (1/(1+np.exp(-l)))\n\t\n\tdef gradient(self, xtrain, ytrain):\n\t\tz = self.sigmoidal(np.transpose(self.w).dot(np.transpose(xtrain)))\n\t\tdeltaW = np.transpose(xtrain).dot(np.transpose(ytrain - z))\n\n\t\treturn deltaW\n\t\t\n\n\tdef fit(self, xtrain, ytrain, lr=0.01, iter=50000, eps=0.001, normal = True):\n\n\t\t\n\t\tcost = 0\n\t\tcost1 =list()\n\t\tconst = 1e-5 #To prevent zero\n\t\tytrain = ytrain[np.newaxis]\n\t\tnumiter = 1\n\t\t#print(ytrain.shape)\n\t\tfor i in range(iter):\n\t\t\t\n\t\t\tcost = -1*(ytrain.dot(np.log(self.sigmoidal(np.transpose(w).dot(np.transpose(xtrain))).T+const)) + (1-ytrain).dot(np.log(1-self.sigmoidal(np.transpose(self.w).dot(np.transpose(xtrain))).T+const)))\n\t\t\tcost1.append(cost)\n\n\t\t\tg = self.gradient(xtrain,ytrain)\n\t\t\t\n\t\t\tself.w = self.w+(lr * g)\n\t\t\t\n\t\t\tif(np.linalg.norm(g)<eps):\n\t\t\t\tbreak\n\t\t\tnumiter = numiter + 1\n\t\t\n\t\treturn self.w\n\n\tdef predict(self, xtest, normal = True):\n\t\t\n\t\tpred = (self.sigmoidal((self.w.T.dot(xtest.T))))\n\t\tfor i in range(len(pred)):\n\t\t\tfor j in range(len(pred[i])):\n\t\t\t\tif pred[i][j]==0:\n\t\t\t\t\tpred[i][j] = 0\n\t\t\t\telif pred[i][j] ==1:\n\t\t\t\t\tpred[i][j] = 1\n\t\t\t\telif pred[i][j]<0.5:\n\t\t\t\t\tpred[i][j] = 0\n\t\t\t\telse:\n\t\t\t\t\tpred[i][j] = 1 \n\t\t\n\t\t\n\t\treturn pred\n\t\t\n\n\tdef calc_accuracy(self, ytest, pred):\n\t\t\n\n\t\treturn np.mean(ytest == pred)\n\t\t \n\n\tdef conf_matrix(self, ytest, pred):\n\n\t\tcm = np.zeros(shape = (2,2))\n\n\t\tfor i in range(len(pred)):\n\t\t\tfor j in range(len(pred[i])):\n\t\t\t\tif ytest[i] == 0:\n\t\t\t\t\tif pred[i][j] == 0:\n\t\t\t\t\t\t\tcm[1][1] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tcm[1][0] += 1\n\n\t\t\t\telif ytest[i] == 1:\n\t\t\t\t\tif pred[i][j] == 1:\n\t\t\t\t\t\tcm[0][0] += 1\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tcm[0][1] += 1\n\t\tpositive = cm[0][0] + cm[1][0]\n\t\tnegative = cm[0][1] + cm[1][1]\n\n\t\taccuracy_cm = (cm[0][0] + cm[1][1]) / (positive + negative)\n\t\t#precision = cm[0][0] / (cm[0][0] + cm[0][1])\n\t\t#recall = cm[0][0] / positive\n\t\t#f_measure = (2*recall*precision)/ (recall + precision)\n\t\t\n\t\treturn accuracy\n\n\tdef crossvalidation(self, xtrain, ytain, k, alpha=0.01 , iter=50000, eps = 0.01):\n\t\t\n\t\tsize = int(len(xtrain)/k)\n\t\tcv_accuracy = 0\n\n\t\tfor i in range(k):\n\n\t\t\tvalstart = i*size\n\t\t\tvalend = valstart + size\n\n\t\t\tif i!=(k-1):\n\t\t\t\tvalend = size\n\n\t\t\t\txval = xtrain[:valend,:]\n\t\t\t\tyval = ytrain[:valend]\n\n\t\t\t\tkxtrain = xtrain[valend:,:]\n\t\t\t\tkytrain = ytrain[valend:]\n\n\t\t\telse:\n\t\t\n\t\t\t\txval = xtrain[valstart:,:]\n\t\t\t\tyval = ytrain[valstart:]\n\n\t\t\t\tkxtrain = xtrain[:valstart,:]\n\t\t\t\tkytrain = ytrain[:valstart]\n\n\t\t\t\tkxtrain = np.concatenate((xtrain[:valstart,:],xtrain[valend:,:]), axis = 0)\n\t\t\t\tkytrain = np.concatenate((ytrain[:valstart],ytrain[valend:]))\n\n\t\t\tw_kfold = self.fit(kxtrain, kytrain, alpha, iter)\n\t\t\t\n\t\t\tpredy = self.predict(xval)\n\t\t\t\n\t\t\tcv_accuracy = cv_accuracy+self.calc_accuracy(yval, predy)\n\t\t\tprint(cv_accuracy)\n\n\t\tcv_accuracy = cv_accuracy / k\n\n\t\treturn cv_accuracy\n\na = np.ones((len(adult),1),dtype = int)\nadult.insert(0,0,a, True)\n\n\t\ntrain_data = adult.sample(frac = 0.8)\n\nxtrain = np.array(train_data.drop(columns = ['income']))\nytrain = np.array(train_data['income'])\t\ntest_data = adult.drop(train_data.index)\nxtest = np.array((test_data.drop(columns = ['income'])))\nytest = np.array((test_data['income']))\n\t\nw = np.array(np.transpose(np.zeros((xtrain.shape[1]))[np.newaxis]))\nLR = Logistic_Regression(w)\nw = LR.fit(xtrain, ytrain)\n\n\npred = LR.predict(xtest)\n\t\naccuracy = LR.calc_accuracy(ytest,pred)\nprint(\"Accuracy is:\", accuracy)\n\n#Function call for k-fold validation\n#accuracy_kfold = LR.crossvalidation(xtrain, ytrain, 5)\n#print(\"Accuracy using k-fold is:\", accuracy_kfold)\n#accuracy_cm, precision, recall, f_measure= LR.conf_matrix(ytest, pred)\n\n#print(\"Accuracy using confusion matrix is:\", accuracy_cm)\n#print(\"Precision is:\", precision)\n#print(\"Recall is:\", recall)\n#print(\"F - measure is:\", f_measure)\n\n#Alpha and Number of Iterations plot\n'''\nalpha_vector = [0.001, 0.01, 0.05, 0.1, 0.5, 1]\naccuracy_cv = []\n\nfor j in alpha_vector:\n\n\tw = np.array(np.transpose(np.zeros((xtrain.shape[1]))[np.newaxis]))\n\tLR = Logistic_Regression(w)\n\taccuracy_cv.append(LR.crossvalidation(xtrain, ytrain, 5, j, 500))\nprint(accuracy_cv)\nplt.plot(alpha_vector, accuracy_cv, '.-')\nplt.title('Accuracy vs Learning rate alpha for adult data ')\nplt.xlabel('Learning rate')\nplt.ylabel('Accuracy using cross validation')\nplt.show()'''\n\n#No of instances and accuracy plot\n'''\ninstance_vector = [100, 500, 1000, 3000, 5000, 7000]\naccuracy = []\n\nfor k in instance_vector:\n\txtr = np.array(train_data.iloc[:k,:-1])\n\tytr = np.array(train_data.iloc[:k,-1])\n\txte = np.array((test_data.iloc[:k,:-1]))\n\tyte = np.array((test_data.iloc[:k,-1]))\n\tw = np.array(np.transpose(np.zeros((xtrain.shape[1]))[np.newaxis]))\n\tLR = Logistic_Regression(w)\n\tw = LR.fit(xtr, ytr)\n\tp = LR.predict(xte)\n\ta1 = LR.calc_accuracy(yte, p)\n\taccuracy.append(a1)\n\tprint(accuracy)\n\nprint(accuracy)'''" }, { "alpha_fraction": 0.6911564469337463, "alphanum_fraction": 0.7006802558898926, "avg_line_length": 37.70175552368164, "blob_id": "2e6f60beb631aa788e5873cf1f5519739d946208", "content_id": "f9f3335eb32ea1103e9c028db4928e5c3bce47f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2205, "license_type": "no_license", "max_line_length": 161, "num_lines": 57, "path": "/Data Analysis.py", "repo_name": "Adharshmahesh/Machine-Learning-Adult-Dataset", "src_encoding": "UTF-8", "text": "#import libraries\nimport pandas as pd\nimport numpy as np\n#matplotlib inline\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport sklearn.metrics as metrics\nfrom sklearn.linear_model import LogisticRegression\n#giving headers to column\ncolumns= ['age','workclass','fnlwgt','education','education-num','marital-status',\n 'occupation','relationship','race','sex','capital-gain','capital-loss',\n 'hours-per-week','native-country','income']\n\n#loading dataset\nadult= pd.read_csv(r'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',sep=\",\",names=columns, header= None)\n#to consider cat data as type category\nfor col in set(adult.columns) - set(adult.describe().columns):\n adult[col] = adult[col].astype('category')\n#adult.info()\n#display last few rows\n#adult.tail()\n#display first few rows \n#adult.head(30)\n#since this column is not much usefull for analysis\nadult=adult.drop('fnlwgt', axis=1)\n#adult\n#making target variable simple in terms of 0 and 1\nadult['income'] =[0 if x==' <=50K' else 1 for x in adult['income']]\nadult.shape #shape after converting target as 0 and 1\n# Remove invalid data from table\nadult= adult[(adult.astype(str) != ' ?').all(axis=1)]\nadult.shape #shape after removal of ?\n#Separate categorical and numberical columns\ncat= adult.dtypes[adult.dtypes == 'object']\nnum= adult.dtypes[adult.dtypes != 'object']\nsns.heatmap(adult[list(adult.dtypes.index)].corr(),annot = True,square = True);\nplt.show()\n# Use one-hot encoding on categorial columns\nadult = pd.get_dummies(adult, columns=['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex','native-country'],drop_first=True)\n\ncorr = adult.corr()\ncolumns = np.full((corr.shape[0],), True, dtype=bool)\nfor i in range(corr.shape[0]):\n for j in range(i+1, corr.shape[0]):\n if corr.iloc[i,j] >= 0.5:\n print(i,j)\n \n else:\n #print(i)\n continue\n#Correlation with output variable\ncor_target = abs(corr.iloc[:,-1])#Selecting highly correlated features\nrelevant_features = cor_target[cor_target>0.5]\nprint(relevant_features)\n\nsns.boxplot(data=adult['hours-per-week'])\nplt.show()" }, { "alpha_fraction": 0.7178281545639038, "alphanum_fraction": 0.7255237102508545, "avg_line_length": 35, "blob_id": "9baaa85025dbdc7518a872bce7c136ac9078e357", "content_id": "b018ab9921548ba4351f0c3bf595962547473801", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2339, "license_type": "no_license", "max_line_length": 161, "num_lines": 65, "path": "/Naive Bayes Scikit.py", "repo_name": "Adharshmahesh/Machine-Learning-Adult-Dataset", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport scipy\n#from loaddata import data\nimport sklearn\nimport sklearn.metrics as metrics\nimport matplotlib.pyplot as plt\n\ncolumns= ['age','workclass','fnlwgt','education','education-num','marital-status',\n 'occupation','relationship','race','sex','capital-gain','capital-loss',\n 'hours-per-week','native-country','income']\n\n#loading dataset\nadult= pd.read_csv(r'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',sep=\",\",names=columns, header= None)\n#to consider cat data as type category\nfor col in set(adult.columns) - set(adult.describe().columns):\n adult[col] = adult[col].astype('category')\n#adult.info()\n#display last few rows\n#adult.tail()\n#display first few rows \n#adult.head(30)\n#since this column is not much usefull for analysis\nadult=adult.drop('fnlwgt', axis=1)\n#adult\n#making target variable simple in terms of 0 and 1\nadult['income'] =[0 if x==' <=50K' else 1 for x in adult['income']]\nadult.shape #shape after converting target as 0 and 1\n# Remove invalid data from table\nadult= adult[(adult.astype(str) != ' ?').all(axis=1)]\nadult.shape #shape after removal of ?\n#Separate categorical and numberical columns\ncat= adult.dtypes[adult.dtypes == 'object']\nnum= adult.dtypes[adult.dtypes != 'object']\n#sns.heatmap(adult[list(adult.dtypes.index)].corr(),annot = True,square = True);\n# Use one-hot encoding on categorial columns\nadult = pd.get_dummies(adult, columns=['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex','native-country'],drop_first=True)\n#print(adult.iloc[:,8])\nx=adult.drop(['income'], axis = 1)\ny=adult['income']\n\nfrom sklearn.model_selection import train_test_split \nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)\n\nfrom sklearn.naive_bayes import GaussianNB\nclassifier = GaussianNB()\n\nclassifier.fit(x_train, y_train)\n\ny_pred = classifier.predict(x_test)\n\nfrom sklearn.metrics import confusion_matrix\ncm=confusion_matrix(y_test,y_pred)\nprint(cm)\nprint(metrics.accuracy_score(y_test, y_pred))\n\nfrom sklearn.linear_model import Lasso\nlasso = Lasso()\nlasso.fit(x_train,y_train)\ntrain_score=lasso.score(x_train,y_train)\ntest_score=lasso.score(x_test,y_test)\ncoeff_used = np.sum(lasso.coef_==0)\n\n\nprint(\"coeff_used is\", coeff_used)" } ]
5
BStandage/K-mer_Palindrome
https://github.com/BStandage/K-mer_Palindrome
a34fea80943184d353fc67f2db740bc9c8c700ac
f1dc2ef681013eea6b541ad3204864521274dddd
e2dee3483613eefb2688128713f44d895515e9c5
refs/heads/master
2020-03-27T09:16:02.482822
2018-08-28T22:09:02
2018-08-28T22:09:02
146,326,285
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5074116587638855, "alphanum_fraction": 0.5119726061820984, "avg_line_length": 21.461538314819336, "blob_id": "a3d41ef944e8934cdc1a699245851b743764a751", "content_id": "08259326e4a7016d44842bfb2b47a7529f7b568c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 877, "license_type": "no_license", "max_line_length": 77, "num_lines": 39, "path": "/palindromes.py", "repo_name": "BStandage/K-mer_Palindrome", "src_encoding": "UTF-8", "text": "import itertools\n\ndef is_palindrome(dna):\n k = len(dna)\n revcomp = ''\n\n for i in range(k):\n if dna[k - 1 - i] == 'A':\n revcomp += 'T'\n elif dna[k - 1 - i] == 'T':\n revcomp += 'A'\n elif dna[k - 1 - i] == 'C':\n revcomp += 'G'\n elif dna[k - 1 - i] == 'G':\n revcomp += 'C'\n\n if revcomp == dna:\n return True\n else:\n return False\n\n'''Produce all k-mer palindromes'''\ndef palindromes(k):\n reverse_conjugates = []\n neucleotides = 'ATCG'\n permutations = list(map(list, itertools.product(neucleotides, repeat=k)))\n str = ''\n\n for p in permutations:\n if is_palindrome(str.join(p)):\n reverse_conjugates.append(str.join(p))\n\n return reverse_conjugates\n\n\n\nif __name__ == '__main__':\n k = int(input('Enter a value for k: '))\n print(palindromes(k))\n\n" }, { "alpha_fraction": 0.7885714173316956, "alphanum_fraction": 0.7885714173316956, "avg_line_length": 86.5, "blob_id": "41f3fa788f799fbef7b513651595148cf26ed3d9", "content_id": "cc2090c4ed863d767d076bfab3ef81b80fc217ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 175, "license_type": "no_license", "max_line_length": 155, "num_lines": 2, "path": "/README.md", "repo_name": "BStandage/K-mer_Palindrome", "src_encoding": "UTF-8", "text": "# K-mer_Palindrome\nA given k-mer is a palindrome if its reverse compliment is identical to itself. Given a value for k, this function returns all k-mers that are palindromes.\n" } ]
2
Kluny/RocketshipsHome
https://github.com/Kluny/RocketshipsHome
15e346499f2213f3b5067f78395d58d1ccf7ed88
04565879cddc990f89c9b5807d542c2f7f6a173c
838e5c0d0ee1c8ffafbde73e5a32435c0dff552a
refs/heads/master
2016-09-11T07:42:03.386322
2013-01-09T18:13:27
2013-01-09T18:13:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5625494718551636, "alphanum_fraction": 0.5752177238464355, "avg_line_length": 29.97468376159668, "blob_id": "da97935d75214305e5a701904330f3db8d93edcf", "content_id": "cb8a22fc8394e8b333a71d530dbafca270800ce6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2526, "license_type": "no_license", "max_line_length": 121, "num_lines": 79, "path": "/bookgallery.php", "repo_name": "Kluny/RocketshipsHome", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\r\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\r\n<head>\r\n\r\n<?php\r\n\trequire(\"library.php\");\r\n?>\r\n\r\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\r\n<title>RocketShips</title>\r\n<link href=\"res.css\" rel=\"stylesheet\" type=\"text/css\" />\r\n<link href=\"bookgallery.css\" rel=\"stylesheet\" type=\"text/css\" />\r\n\r\n<link href=\"css/jquery.lightbox-0.5.css\" rel=\"stylesheet\" type=\"text/css\" media=\"screen\" />\r\n\r\n<script type=\"text/javascript\" src=\"js/jquery.js\"></script>\r\n<script type=\"text/javascript\" src=\"js/jquery.lightbox-0.5.min.js\"></script>\r\n\r\n<script type=\"text/javascript\">\r\n\r\n var _gaq = _gaq || [];\r\n _gaq.push(['_setAccount', 'UA-28733592-1']);\r\n _gaq.push(['_trackPageview']);\r\n\r\n (function() {\r\n var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;\r\n ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';\r\n var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);\r\n })();\r\n\r\n</script>\r\n\r\n<script type=\"text/javascript\">\r\n\r\n$(function() {\r\n\r\n$('#gallery a').lightBox();\r\n\r\n});\r\n\r\n</script>\r\n\r\n</head>\r\n\r\n<body>\r\n<div class=\"wrapper\">\r\n\t\r\n\t<div class=\"header\">\r\n\t\t<ul>\r\n\t\t\t<li><a href='www.rocketships.ca'>ROCKETSHIPS</a></li>\r\n\t\t\t<li><a href='http://rocketships.ca/blog/'>Blog</a></li>\r\n\t\t\t<li><a href='https://github.com/Kluny/'>Github</a></li>\r\n\t\t\t<li><a href='mailto:[email protected]'>Email</a></li>\r\n\t\t\t<li><a href='CIresume.pdf'>PDF Copy</a></li>\r\n\t\t\t<li><a href='hello.php'>Hello!</a></li>\r\n\t\t\t<li><a href='bookgallery.php' id=\"onlink\">Current Project</a></li>\r\n\t\t</ul>\r\n\t</div>\r\n\t<!-- the inline height attribute is temporary and should be moved to css asap.-->\r\n\t<div class=\"lightbox\" id=\"taller\" style=\"height:350em;\">\r\n <div class=\"bookgallery\">\r\n <p>Books that were good enough to read more than once.</p>\r\n <div id=\"gallery\"> \r\n <?php\r\n put(0);\r\n ?>\r\n <p>Robert A Heinlein - for when you have time to sit and read an entire book all at once.</p> \r\n <?php\r\n put(1);\r\n \t?>\r\n <p>Terry Pratchett - for when you're cool with laughing out loud, potentially in a public place.</p>\r\n <?php\r\n put(2); \r\n ?>\r\n </div><!-- end gallery-->\r\n </div> <!-- end bookgallery -->\r\n\t</div> <!--end lightbox-->\r\n</body>\r\n</html>\r\n" }, { "alpha_fraction": 0.6442105174064636, "alphanum_fraction": 0.6505263447761536, "avg_line_length": 32.85714340209961, "blob_id": "99d1a38abf01675e2cc0246931a24c114605a3c8", "content_id": "7f034ca2770544cd20e11244c203fc78a4c944e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 210, "num_lines": 14, "path": "/images/script.py", "repo_name": "Kluny/RocketshipsHome", "src_encoding": "UTF-8", "text": "picfile = open(\"list_pictures2.txt\")\nthumbfile = open(\"thumbs/list_thumbs2.txt\")\n\nwhile(1):\n firstline = picfile.readline();\n secondline = thumbfile.readline();\n if not firstline:\n break\n pass \n line = \"<book>\\n\\t<pathToPic>\\n\\t\\timages/\" + firstline + \"\\t</pathToPic>\\n\\t<pathToThumb>\\n\\t\\timages/thumbs/\" + secondline + \"\\t</pathToThumb>\\n\\t<title>\\n\\n\\t</title>\\n\\t<author>\\n\\n\\t</author>\\n</book>\"\n print line\n\npicfile.close()\nthumbfile.close() " }, { "alpha_fraction": 0.6057007312774658, "alphanum_fraction": 0.6128265857696533, "avg_line_length": 31.384614944458008, "blob_id": "bb7dbc9712c6e98f7f9bd17d62a7560f281478b6", "content_id": "3a9839efadb669bb4311bb8843208a24c2dfe41e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 421, "license_type": "no_license", "max_line_length": 106, "num_lines": 13, "path": "/email.php", "repo_name": "Kluny/RocketshipsHome", "src_encoding": "UTF-8", "text": "<?php\n\tif (isset($_POST['contact_name']) && (isset($_POST['contact_email']) && (isset($_POST['contact_message'])\n\n?>\n\n\n<form action=\"email.php\" method=\"POST\"/>\n\tName: <br/> <input type=\"text\" name=\"contact_name\"><br/><br/>\n\tEmail: <br/><input type=\"text\" name=\"contact_email\"><br/><br/>\n\tMessage:<br/>\n\t<textarea name=\"contact_text\" rows=\"6\" cols=\"20\"></textarea><br/><br/>\n\t<input type=\"submit\" value=\"Send\"/>\t\t\n</form>\n" }, { "alpha_fraction": 0.7338935732841492, "alphanum_fraction": 0.7394958138465881, "avg_line_length": 17.789474487304688, "blob_id": "145ca935b9c1c4f647e1b5114c338ae195c6c0fd", "content_id": "a7001cc16eb408c59c786e3e8f557bec1fde8427", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 357, "license_type": "no_license", "max_line_length": 101, "num_lines": 19, "path": "/README.md", "repo_name": "Kluny/RocketshipsHome", "src_encoding": "UTF-8", "text": "RocketshipsHome\n===============\n\nJust a resume. Some features:\n\nFancy tabbed menu - complete\n\nExtra impressive resume - complete(for now)\n\nBook gallery - working copy available, still in progress\n\n\nTodo: \n\nEmail contact form\n\nMake the book gallery more efficient - only display 15 or so images at a time instead of all of them.\n\nAdd amazon affiliate links.\n" }, { "alpha_fraction": 0.5043572783470154, "alphanum_fraction": 0.515250563621521, "avg_line_length": 25.22857093811035, "blob_id": "487ddddc472a9741e7934c849192c64f612a9e3e", "content_id": "37214ec03e0a0f2d0c6acf20582e1365c8fdbec8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 918, "license_type": "no_license", "max_line_length": 100, "num_lines": 35, "path": "/library.php", "repo_name": "Kluny/RocketshipsHome", "src_encoding": "UTF-8", "text": "<?php\nerror_reporting(0);\n\n\nfunction put($section) {\n\t//variables\n\t\n\t$xml = simplexml_load_file(\"library.xml\");\n\t$root = $xml->xpath(\"//section\");\t\n\t\n\t$count = count($root[$section]->book);\n\t\t\n\tfor ($bookNumber = 0; $bookNumber < $count; $bookNumber++) {\t\t\n\t\t\n\t\t$pathToPic = $root[$section]->book[$bookNumber]->pathToPic;\n\t\t$pathToThumb = $root[$section]->book[$bookNumber]->pathToThumb;\n\t\t$title = $root[$section]->book[$bookNumber]->title;\n\t\t$author = $root[$section]->book[$bookNumber]->author;\n\t\t\n\t\t\n\t\techo \"<div class='holder'>\n <div class='thumb'> \n <a href='\".$pathToPic.\"'><img src='\".$pathToThumb.\"' width='140' height='120' /></a>\n </div>\";\n \n echo \"<div class='title'>\n <h3>\".$title.\"<br />\".$author.\"</h3>\n </div>\";\n \n \techo \"<br class='clearFloat' /></div><!-- end holder -->\";\n \n }\n}\n\n?>\n" } ]
5
hip-property/jailbreak
https://github.com/hip-property/jailbreak
1c6a19baaa535071955d9c06a91e87e4db2536da
4c27aee04f72018e987aabab8a96f274539a8264
6921508480dc5ea940f196c14f28fe48382c311d
refs/heads/master
2020-03-20T00:54:52.561830
2018-06-12T13:25:34
2018-06-12T13:25:34
137,060,493
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7164179086685181, "alphanum_fraction": 0.7238805890083313, "avg_line_length": 32.5, "blob_id": "4bea54026fbe7b42838edbde10b26fb8b5631af0", "content_id": "aed6c4c84883c4cc7a61930844d06944b086bd7f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 134, "license_type": "permissive", "max_line_length": 50, "num_lines": 4, "path": "/setup.sh", "repo_name": "hip-property/jailbreak", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nchmod +x jailbreak.py\nrm /usr/local/bin/jailbreak 2> /dev/null\nln -s $(pwd)/jailbreak.py /usr/local/bin/jailbreak\n" }, { "alpha_fraction": 0.7444279193878174, "alphanum_fraction": 0.7444279193878174, "avg_line_length": 21.433332443237305, "blob_id": "14b2359e17303f6650b2e45512956fb4460d8ddf", "content_id": "06d6df6e97705a51b400b95e78db023517b3a164", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 673, "license_type": "permissive", "max_line_length": 85, "num_lines": 30, "path": "/README.md", "repo_name": "hip-property/jailbreak", "src_encoding": "UTF-8", "text": "# Jailbreak\nUtil for exporting directories from mono-repos to their own\nrepository.\n\nUseful for open-sourcing a portion of your repository, and keeping\nupdates in sync.\n\n## Installation\nClone this repo, then run `setup.sh`.\n\nThis script simply symlinks `jailbreak.py` to where you checked it out.\n\n> **You should read `setup.sh` for yourself before running**\n\n## Usage\nIn the directories you wish to push to a public repository, add a `.jailbreak` file, \ncontaining the git url of the open source repository you wish to publish to.\n\neg:\n\n`foo/.jailbreak:`\n```\[email protected]:hip-property/jailbreak.git\n``` \n\nThen, from the root of your repository, run:\n\n```bash\njailbreak foo\n```\n" }, { "alpha_fraction": 0.7138158082962036, "alphanum_fraction": 0.7160087823867798, "avg_line_length": 42.42856979370117, "blob_id": "e430c4000eef911b8d56aa4c92ec42fffe8b39cb", "content_id": "d5f23fc5b12d4f238c789ad4f6b1ca74a1acfddb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1824, "license_type": "permissive", "max_line_length": 124, "num_lines": 42, "path": "/jailbreak.py", "repo_name": "hip-property/jailbreak", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3.6\nimport subprocess\nimport tempfile\nimport shutil\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser(\"Exports a single directory within the project to a seperate repository\")\nparser.add_argument(\"dir\", help=\"The directory to be exported to a seperate repo\", )\nargs = parser.parse_args()\n\ndestRepo = open(\"./\" + args.dir + \"/.jailbreak\", \"r\").read()\n\n# Find the current git remote\ncommandResult = subprocess.run([\"git\", \"remote\", \"-v\"], universal_newlines=True, stdout=subprocess.PIPE)\n# origin [email protected]:hipproperty/jailbreak.git (fetch)\nremotes = commandResult.stdout.splitlines()\n# [email protected]:hipproperty/jailbreak.git\nremoteUrl = remotes[0].split()[1]\n\ngitBranch = subprocess.run([\"git\", \"rev-parse\", \"--abbrev-ref\", \"HEAD\"], universal_newlines=True,\n stdout=subprocess.PIPE)\ncurrentBranch = gitBranch.stdout.rstrip()\n\nprint(\"We're gonna jailbreak '{}' to repo {} on branch {}\".format(args.dir, destRepo, currentBranch))\nresponse = input(\"Press enter to confirm, or cancel to abort\")\n\ntempDir = tempfile.mkdtemp()\n\nprint(\"Cloning \" + remoteUrl + \" to \" + tempDir)\nsubprocess.run([\"git\", \"clone\", remoteUrl, tempDir], universal_newlines=True, stdout=subprocess.PIPE).check_returncode()\n\nprint(\"Removing original origin remote\")\nsubprocess.run([\"git\", \"remote\", \"remove\", \"origin\"], cwd=tempDir).check_returncode()\nprint(\"Pruning\")\nsubprocess.run([\"git\", \"filter-branch\", \"--prune-empty\", \"--subdirectory-filter\", args.dir], cwd=tempDir).check_returncode()\nprint(\"Adding upstream remote of \" + destRepo)\nsubprocess.run([\"git\", \"remote\", \"add\", \"origin\", destRepo], cwd=tempDir).check_returncode()\nprint(\"Pushing to \" + destRepo)\nsubprocess.run([\"git\", \"push\", \"-u\", \"origin\", currentBranch], cwd=tempDir).check_returncode()\nprint(\"Cleaning up\")\nshutil.rmtree(tempDir)\n" } ]
3
haokui/bioinformatics
https://github.com/haokui/bioinformatics
64f7ade47121f167ce2f3a55004cd80b41b896f5
83a53771222ecb0759e3b4bfa2018d2cd7647643
2fbce5c28f3c09cd5ebabce21463200a84c9aa90
refs/heads/master
2018-03-11T03:01:43.710225
2012-08-29T11:13:07
2012-08-29T11:13:07
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4909529685974121, "alphanum_fraction": 0.49256131052970886, "avg_line_length": 33.52777862548828, "blob_id": "6a5bcecfcbd88397c64af90634eba7e094287b29", "content_id": "ff38b0ac6c6c0f1a58d5526ba113a61a510befd4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2487, "license_type": "permissive", "max_line_length": 100, "num_lines": 72, "path": "/python/generic/flat2line.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom collections import defaultdict\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f flat file to import\" )\n stdout( \" -d delimiter (default: ', ' | allowed: any string, tab, space\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:p:d:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-d': args['delimiter'] = value\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n if not args.has_key('delimiter'): # or args.get('delimiter') not in [ \";\", \",\", \"tab\", \"space\" ]: \n args['delimiter'] = ', '\n elif args['delimiter'] == \"tab\": args['delimiter'] = \"\\t\"\n elif args['delimiter'] == \"space\": args['delimiter'] = \" \"\n\n return args\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n hash = defaultdict(list)\n fo = open( args.get('file') )\n for line in fo:\n line = line.rstrip()\n key, value = line.split(\"\\t\")\n hash[key].append(value)\n fo.close()\n \n for key, values in hash.iteritems():\n print key + \"\\t\" + string.join(values, args.get('delimiter'))\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5577822327613831, "alphanum_fraction": 0.5664662718772888, "avg_line_length": 34.22352981567383, "blob_id": "e7c16d7ff9e043ecb869fbcf8944dd6a67ffb07a", "content_id": "c2380a04ab527d9b69b25943d8e5d2b8eeafb590", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2994, "license_type": "permissive", "max_line_length": 187, "num_lines": 85, "path": "/python/blast/parse-blastout-xml.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys, getopt, string\nfrom Bio.Seq import Seq\nfrom Bio.Blast import NCBIXML\nfrom Bio.Alphabet import IUPAC\n\n#==============================================================================\ndef show_help():\n print \"\"\"%s parses BLASTX XML output to STDOUT\n \n Options:\n -f:\\tBLASTX output in XML format\n -n:\\tnumber of best hits to be parsed (default: 1)\n -e:\\tmaximum e-value to accept hits (default: 1e-5)\n\n\tWhat this program does:\n\tIt takes the best hit's start and endposition from BLAST, applies it to the sequence in your query (e.g. the CAP3-output),\n\tand translates to the left resp. right from the start resp. end of your CAP3-output, until a Start-orStopcodon appears.\n \"\"\" % sys.argv[0]\n\n sys.exit(1)\n\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n sys.stderr.write( \"no arguments provided.\\n\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:n:e:\" )\n except getopt.GetoptError:\n sys.stderr.write( \"invalid arguments provided.\\n\" )\n show_help()\n\n args = {}\n args['numhits'] = 1\n args['evalue'] = float('1e-5')\n for key, value in keys:\n if key == '-f': args['blastfile'] = value\n if key == '-n': args['numhits'] = int(value)\n if key == '-e': args['evalue'] = float(value)\n \n if not args.has_key('blastfile'):\n sys.stderr.write( \"blastx XML file argument missing.\\n\" )\n show_help()\n elif not os.path.exists( args.get('blastfile') ) or not os.path.isfile( args.get('blastfile') ):\n sys.stderr.write( \"blastx XML file does not exist.\\n\" )\n show_help()\n\n return args\n\n\n#==============================================================================\ndef main(args):\n #print \"Working...\"\n header = ['query', 'hit', 'frame', 'query_startpos', 'query_endpos', 'subject_startpos', 'subject_endpos', 'evalue', 'score']\n print '#', string.join(header, \"\\t\")\n XML = open( args.get('blastfile') )\n blast_records = NCBIXML.parse(XML)\n\n for i in blast_records:\n # print i.query\n count = 0\n while count < args.get('numhits'):\n count += 1\n hit = i.alignments.pop(0)\n hsp = hit.hsps[0]\n if hsp.expect > args.get('evalue'): break\n# print i.query, hit.title.split()[0], hsp.frame[0], hsp.query_start, hsp.query_start -1+ len(hsp.query)*3, hsp.sbjct_start, hsp.sbjct_start -1+ len(hsp.sbjct), hsp.expect, hsp.score\n print string.join([i.query, hit.title.split()[0], \n str(hsp.frame[0]), \n str(hsp.query_start),\n str(hsp.query_start -1+ len(hsp.query.replace('-', ''))*3), \n str(hsp.sbjct_start), \n str(hsp.sbjct_start -1+ len(hsp.sbjct)), \n str(hsp.expect),\n str(hsp.score)], \"\\t\")\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.4743706285953522, "alphanum_fraction": 0.47679707407951355, "avg_line_length": 30.701923370361328, "blob_id": "038879664b03ae31a009da4d6bcc79db152c9330", "content_id": "9baa876792076b28dfdb85da2608b4654ebfe41a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3297, "license_type": "permissive", "max_line_length": 83, "num_lines": 104, "path": "/python/kegg/kegg2xdom.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\t\t\t\t\t# index databases (file hash)\nfrom Bio import SeqIO # biopython stuff, to parse fasta files for instance\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f kegg KO annotation file\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['file'] = value\n \n if not args.has_key('file'):\n stderr( \"kegg file missing.\" )\n show_help()\n if not file_exists( args.get('file') ):\n stderr( \"kegg file does not exist.\" )\n show_help()\n\n return args\n\n\n# =============================================================================\ndef strip_tags(value):\n \"Return the given HTML with all tags (+ KEGG tags) stripped.\"\n value = re.sub(r'<[^>]*?>', '', value)\n value = re.sub(r'\\[.*\\]', '', value)\n return value\n\n\ndef read_KOs( file ):\n\n def next_entry(fo):\n pathlist = []\n definition = \"\"\n line = fo.readline().rstrip()\n if line == '': \n return fo, None, None\n entry = re.match('^ENTRY\\s+(\\S+)', line).group(1)\n line = fo.readline().rstrip()\n line = fo.readline().rstrip()\n if re.match( '^DEFINITION\\s+(.*)$',line):\n definition = re.search( '^DEFINITION\\s+(.*)$', line ).group(1)\n line = fo.readline().rstrip()\n while line.startswith('CLASS') or line.startswith(' '):\n if re.search('\\[\\S+:\\S+\\]', line):\n pathlist.append( re.search('\\[(\\S+:\\S+)\\]',line).group(1) )\n line = fo.readline().rstrip()\n \n while line != '///':\n line = fo.readline().rstrip()\n\n if definition != \"\": entry += \"\\t\" + definition\n return fo, entry, pathlist\n \n fo = open( file )\n kohash = {}\n while 1:\n fo, id, pathlist = next_entry( fo )\n if id == None: break\n print \">%s\\n%s\" %(id, string.join(pathlist,\"\\t\"))\n \n fo.close()\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n \n kohash = read_KOs( args.get('file') )\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.4432739019393921, "alphanum_fraction": 0.44732576608657837, "avg_line_length": 31.473684310913086, "blob_id": "6e9fc105c4537b86ff2fd6bb1d18ef9e7ab0907a", "content_id": "7d15171abfc891ffdcbccf49cb8ab1562948e583", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2468, "license_type": "permissive", "max_line_length": 83, "num_lines": 76, "path": "/python/fasta/fasta2flat.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport fasta\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['fastafile'] = value\n \n if not args.has_key('fastafile'):\n stderr( \"fasta file missing.\" )\n show_help()\n if not file_exists( args.get('fastafile') ):\n stderr( \"fasta file does not exist.\" )\n show_help()\n \n return args\n\n# =============================================================================\ndef get_sequences(file):\n seqcount, alnlength = 0, 0\n text = ''\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"):\n id = line[1:]\n if id.count(\" \") > 0: id = id[:id.index(\" \")]\n text += \"\\n\" + id + \"\\n\"\n seqcount += 1\n else:\n text += line\n if seqcount == 1: alnlength += len(line)\n fo.close()\n return text, seqcount, alnlength\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n for gid, seq in fasta.get_sequence_hash(args['fastafile']).iteritems():\n print string.join([gid, seq], \"\\t\")\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.4976525902748108, "alphanum_fraction": 0.5015962719917297, "avg_line_length": 34.731544494628906, "blob_id": "169863d94a171bfa11c1edfd67609c69319e5416", "content_id": "6ffdab3a74ab72cc94ac145ac74b4cdae0177a0a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5325, "license_type": "permissive", "max_line_length": 102, "num_lines": 149, "path": "/python/generic/flat2sqlinject.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom collections import defaultdict\nimport glob\nimport gff3\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> ...\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f flat file\" )\n stdout( \" -s separator of flat file (default: tab)\" )\n stdout( \" -a action [INSERT|UPDATE]\" )\n stdout( \" -t sql table name\" )\n stdout( \" \" )\n stdout( \" Field names are extracted from the header line (first line, must start with #).\" )\n stdout( \" NULL named fields are irgnored, the rest gets imported.\" )\n stdout( \" UPDATES are only possible with a given ID. Thus, the header must contain \" )\n stdout( \" a column named ID which will be used to generate an UPDATE ... WHERE id='ID'.\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:a:s:t:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'separator':\"\\t\", 'action':\"INSERT\"}\n for key, value in keys:\n if key == '-f': args['flatfile'] = value\n if key == '-s': args['separator'] = value\n if key == '-a': args['action'] = value.upper()\n if key == '-t': args['table'] = value\n \n for key in ['flatfile', 'separator', 'action', 'table']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key): show_help()\n return args\n\n# =============================================================================\ndef get_blastout_hash(file):\n hash = defaultdict(int)\n fo = open(file)\n for line in fo:\n qid = line.split(\"\\t\")[0]\n hash[qid] = 1\n fo.close()\n return hash\n\n# =============================================================================\ndef gather_blast_output(bdir):\n hash = {}\n for filename in glob.glob(bdir + '/*.blastout'):\n s = os.path.split(filename)[1][:4]\n hash[s] = get_blastout_hash(filename)\n return hash\n\n# =============================================================================\ndef get_scaffolds(file):\n\n def add_feature(hash, gf):\n if not hash.has_key(gf.seqid): hash[gf.seqid] = {}\n hash[gf.seqid][gf.start] = gf.get_attributes()['ID']\n return hash\n\n hash = {}\n fo = open(file)\n for line in fo:\n if line.startswith(\"#\"): continue\n gf = gff3.GeneFeature(line)\n if gf.ftype != \"mRNA\": continue\n hash = add_feature(hash, gf)\n fo.close()\n\n outhash = {}\n for scaffold, h in hash.iteritems():\n outhash[scaffold] = [h[key] for key in sorted(h.iterkeys())]\n\n return outhash\n\n# =============================================================================\ndef gather_genes_on_scaffolds(gffdir):\n hash = {}\n for filename in glob.glob(gffdir + '/*.gff'):\n s = os.path.split(filename)[1][:4]\n hash[s] = get_scaffolds(filename)\n return hash\n\n# =============================================================================\ndef get_neighbors(pid, geneids):\n index = geneids.index(pid)\n left = geneids[max([index-3,0]):index]\n right = geneids[index+1:min([index+4,len(geneids)])]\n return (left, right)\n\n# =============================================================================\ndef escape4sql(string):\n if string.count(\"'\") == 0: return string\n return string.replace(\"'\", \"\\\\'\")\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n fo = open(args['flatfile'])\n action, table, fieldnames = args['action'], args['table'], \"\"\n print \"SET autocommit=0;\"\n for line in fo:\n if line.startswith(\"#\"):\n if fieldnames == \"\": fieldnames = [e.strip().upper() for e in line[1:].split(args['separator'])]\n else: continue\n else:\n values = line.strip().split(args['separator'])\n sql = \"%s \" % action\n if action == \"INSERT\": sql += \"INTO \"\n sql += \"`%s` SET \" % table\n for i in range(len(fieldnames)):\n if fieldnames[i] == \"NULL\": continue\n if fieldnames[i] == \"ID\" and action == \"UPDATE\": continue\n if not sql.endswith(\" \"): sql += \", \"\n sql += \"%s='%s'\" %(fieldnames[i], escape4sql(values[i]))\n if action == \"UPDATE\": sql += \" WHERE ID='%s'\" % values[fieldnames.index(\"ID\")]\n print sql + \";\"\n print \"COMMIT;\"\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.531439483165741, "alphanum_fraction": 0.5461361408233643, "avg_line_length": 36.1553840637207, "blob_id": "2194cc3948b3011f12597ccdb0d845532fbde275", "content_id": "2c0aa963c18d091daa896ffa9d093f5bc84d7050", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33477, "license_type": "permissive", "max_line_length": 294, "num_lines": 901, "path": "/python/swapsc/swapsee.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nimport copy # clone an object\nfrom low import *\t\t\t# custom functions, written by myself\n\nDEBUG = 1\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"reads SWAPSC output and offers evaluation and plotting abilities.\\n\" )\n stdout( \"usage: \" + sys.argv[0] + \" -c <path> -o <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f swapsc output file\" )\n stdout( \" -g group water and terrestrial branches\" )\n stdout( \" -r remove overlaps\" )\n stdout( \" -m modes to consider [default: \\\"PS,AdN,NS\\\"]\" )\n\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the processed set of arguments \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:grm:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = { 'rmoverlaps': 1, 'modes': ['PS','AdN','NS'] }\n for key, value in keys:\n if key == '-f': args['swapscout'] = value\n if key == '-g': args['group'] = 1\n if key == '-r': args['rmoverlaps'] = 1\n if key == '-m': args['modes'] = value.split(',')\n \n if not args.has_key('swapscout'):\n stderr( \"swapsc out file missing.\" )\n show_help()\n if not file_exists( args.get('swapscout') ):\n stderr( \"swapsc out file does not exist.\" )\n show_help()\n\n return args\n\n# =============================================================================\ndef resolve_overlap(con1, con2):\n priority = { 'PS':10, 'NS':10, 'AdN':8, 'HS':7, 'S':6, 'AdN + S':7 }\n # check if con2 is within con1\n if con1.mode == con2.mode:\n start = min([con1.start, con2.start])\n stop = max([con1.stop, con2.stop])\n con1.start = start\n con1.stop = stop\n return [con1]\n if con2.start > con1.start and con2.stop < con1.stop:\n if priority[ con2.mode ] < priority[ con1.mode ]: return [con1]\n elif priority[ con2.mode ] > priority[ con1.mode ]: \n # break up in three\n con3 = copy.copy(con1)\n con4 = copy.copy(con1)\n con3.stop = con2.start - 1\n con4.stsart = con2.stop + 1\n return [con3,con2,con4]\n else: return [con1,con2]\n else: # con2 just overlaps with con1\n if priority[ con1.mode ] > priority[ con2.mode ]:\n con2.start = 1 + con1.stop\n if con2.start >= con2.stop: return [con1]\n else: return [con1, con2]\n elif priority[ con1.mode ] < priority[ con2.mode ]:\n con1.stop = con2.start - 1\n if con1.stop <= con1.start: return [con2]\n else: return [con1, con2]\n else:\n overlapfrom, overlapto = con2.start, con1.stop\n con1.stop = overlapfrom -1\n con2.start = overlapto +1\n if con1.stop <= con1.start: return [con2]\n elif con2.start >= con2.stop: return [con1]\n else: return [con1,con2]\n\n# =============================================================================\n# =============================================================================\nclass Swapscout:\n \"\"\"\n this class can parse SWAPSC output and stores the information the following way:\n nseq: number of input sequences\n seqlength: length (nt) of the input MSA\n seqhash: seqnumber => nucleotidesequence\n nodehash: for the ancestral sequences, their seqnumber => array of child seqnumbers\n parameterhash: stores values for parameters estimated for the simulated data\n constrainthash: stores all signals reported by SWAPSC\n branch => region => { mode, p }\n summaryhash: summarizing stats at the end of the SWAPSC out such as percentage of codons\n for each mode of selection, and P(neutral sites)\n \"\"\"\n def __init__(self, args):\n self.nseq = 0\n self.seqlength = 0\n self.seqhash = {}\n self.nodehash = {}\n self.parameterhash = {}\n self.constrainthash = {}\n self.summaryhash = {}\n self.args = args\n\n def parse(self, file):\n fo = open(file)\n lines = fo.readlines()\n fo.close()\n # nseq + seqlength\n line = lines.pop(0)\n self.nseq = int(re.match('Number of sequences =\\s*(\\d+)', line).group(1))\n# if DEBUG: print \"number seq:\" , self.nseq\n line = lines.pop(0)\n self.seqlength = int(re.match('Length of alignment \\(nucleotides\\) =\\s*(\\d+)', line).group(1))\n\n\n# if DEBUG: print \"seq legnth:\" , self.seqlength\n # input sequences\n line = lines.pop(0)\n line = lines.pop(0)\n count = 1\n while not re.match('$', line):\n name = line.rstrip()\n seq = lines.pop(0).rstrip()\n self.seqhash[count] = { 'name': name, 'seq': seq }\n count += 1\n line = lines.pop(0)\n# if DEBUG: print \"seqhash:\", self.seqhash\n\n # tree and branches\n while not re.match('Branches:', line):\n line = lines.pop(0)\n line = lines.pop(0)\n while not re.match('$', line):\n if re.match('\\d+\\s+:\\s+\\d+\\.\\.\\.\\d+', line):\n first = int( re.match('(\\d+)', line).group(1) )\n second = int( re.search('\\s+(\\d+)\\.\\.\\.', line).group(1) )\n third = int( re.search('\\.\\.\\.(\\d+)', line).group(1) )\n self.nodehash[first] = [second, third]\n line = lines.pop(0)\n# if DEBUG: print \"nodehash:\", self.nodehash\n\n # ancestral sequences\n while not re.match('Ancestral sequences inferred by MP:', line):\n line = lines.pop(0)\n line = lines.pop(0)\n line = lines.pop(0)\n while re.match('node', line):\n line = line.rstrip()\n elements = line.split()\n count = int( re.search('node(\\d+):', elements[0]).group(1) )\n self.seqhash[count] = { 'seq': elements[1] } \n line = lines.pop(0)\n# if DEBUG: print \"seqhash:\", self.seqhash\n\n # parameter estimates\n while not re.match('Parameter estimates using simulated data:', line):\n line = lines.pop(0)\n line = lines.pop(0)\n line = lines.pop(0)\n while not re.match('Numbers of the species follow the input', line):\n if re.match('$', line): \n line = lines.pop(0)\n continue\n elements = line.split(';')\n for e in elements:\n key, value = re.search('(.*)\\s+=\\s+(\\S+)', e).groups()\n self.parameterhash[key] = value\n line = lines.pop(0)\n# if DEBUG: print \"parameters:\", self.parameterhash\n\n # selective constraints\n while not re.match('=================', line):\n line = lines.pop(0)\n line = lines.pop(0)\n branch = ''\n while not re.match('\\S+', line):\n if not re.search('\\S+', line): \n line = lines.pop(0)\n continue\n # branch definition\n if re.match(\"\\s+\\d+\\.\\.\\d+$\", line):\n branch = line.strip()\n self.constrainthash[branch] = {}\n else:\n col = line.rstrip().split()\n if string.join(col,'') != '-------':\n p = string.join(col[6:9])\n mode = col[9]\n if len(col) == 12:\n mode = string.join(col[9:])\n self.constrainthash[branch][col[0]] = { 'mode': mode, 'p':p }\n line = lines.pop(0)\n# if DEBUG: print \"constrainthash:\", self.constrainthash\n\n # summary\n while not re.match('Selective constraints', line):\n line = lines.pop(0)\n line = lines.pop(0)\n line = lines.pop(0)\n while re.search('\\S', line):\n line = line.rstrip()\n col = line.split()\n self.summaryhash[col[0]] = {'% codons': col[1], 'mean Ka': col[2], 'mean Ks': col[3], 'mean W': col[4]}\n line = lines.pop(0)\n\n line = lines.pop(0)\n self.summaryhash['P(neutral sites)'] = re.search('(\\S+)$', line).group(1)\n# if DEBUG: print \"summary:\", self.summaryhash\n\n def create_cluster(self):\n cluster = Cluster(self.args, self.nseq, self.seqlength)\n ids = self.seqhash.keys()\n ids.sort()\n for id in ids:\n hash = self.seqhash[id]\n node = Node(id)\n node.seq = hash['seq']\n if hash.has_key('name'): node.name = hash['name']\n if self.nodehash.has_key(id): node.children = self.nodehash[id]\n cluster.addNode(node)\n\n pnodes = self.nodehash.keys()\n pnodes.sort()\n i = 0\n while i < (len(pnodes)-1):\n pnode = int(pnodes[i])\n children = self.nodehash.get(pnode)\n for child in children:\n cild = int(child)\n cluster.branches['%s..%s' %(pnode,child)] = 1\n #sys.stderr.write(\"added branch: %s..%s\\n\" %(pnode,child))\n i += 1\n\n for branch, hash in self.constrainthash.iteritems():\n for region, details in hash.iteritems():\n start, stop = region.split('..')\n c = Constraint(branch, start, stop)\n c.mode = details['mode']\n c.p = details['p']\n cluster.addConstraint(c)\n\n return cluster\n\n# =============================================================================\nclass Node:\n def __init__(self, id):\n self.id = int(id)\n self.name = ''\n self.seq = ''\n self.children = []\n\n# =============================================================================\nclass Branch:\n def __init__(self, id, nodefrom, nodeto):\n self.id = id\n self.nodefrom = nodefrom\n self.nodeto = nodeto\n\n# =============================================================================\nclass Constraint:\n def __init__(self, branch, start, stop):\n self.branch = branch\n self.start = int(start)\n self.stop = int(stop)\n self.mode = ''\n self.p = ''\n\n def __cmp__(self, other):\n return cmp(self.start, other.start)\n\n def is_unsignificant(self):\n if self.p == 'P > 0.05': return 1\n else: return 0\n\n def overlaps_with(self, con, debug=0):\n #if self.start < con.start and self.stop > con.start: return 1\n #else: return 0\n if debug:\n sys.stderr.write(\"overlap?\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" %(self.mode,self.start,self.stop,con.mode,con.start,con.stop))\n if (self.start < con.start and self.stop < con.start) or (self.start > con.stop and self.stop > con.stop): \n if debug: sys.stderr.write(\"\\tno\\n\")\n return 0\n else: \n if debug: sys.stderr.write(\"\\tyes\\n\")\n return 1\n\n def to_s(self):\n return string.join([self.branch,'%s' % self.start, '%s' % self.stop,self.mode,self.p],\"\\t\")\n\n# =============================================================================\nclass Gap:\n def __init__(self,branch,start,stop):\n self.branch = branch\n self.start = start\n self.stop = stop\n \n def __cmp__(self,other):\n return cmp(self.start,other.start)\n\n def overlaps_with(self,other):\n if (other.start < self.start and other.stop < self.start) or (other.start > self.stop and other.stop > self.stop): return 0\n else: return 1\n\n# =============================================================================\n# =============================================================================\n# PANEL: width\n# TRACK: name\n# FEAT: range, label, color, glyph\n#\nclass Cluster:\n \"\"\" store one complete SWAPSC output \"\"\"\n def __init__(self, args, nseq, seqlength):\n self.args = args\n self.nseq = int(nseq)\n self.seqlength = int(seqlength)\n self.nodes = {}\n self.branches = {}\n self.constraints = {}\n self.gaps = {}\n\n def get_node_by_name(self,name):\n for node in self.nodes.values():\n if node.name == name: return node\n return None\n\n def addNode(self, node):\n if node.name == '' and node.children != []:\n node.name = '(%s, %s)' %( self.nodes.get(node.children[0]).name, self.nodes.get(node.children[1]).name)\n self.nodes[node.id] = node\n\n def addConstraint(self, c):\n if not self.constraints.has_key(c.branch): self.constraints[c.branch] = {}\n self.constraints[c.branch][c.start] = c\n\n def addGap(self,c):\n if not self.gaps.has_key(c.branch): self.gaps[c.branch] = {}\n self.gaps[c.branch][c.start] = c\n\n def reduce_to_significant_constraint(self):\n \"\"\" remove all with P > 0.05 or with unwanted modes \"\"\"\n duplicate = {}\n for branch, hash in self.constraints.iteritems():\n for start, con in hash.iteritems():\n if not con.is_unsignificant() and con.mode in self.args.get('modes'):\n if not duplicate.has_key(branch): duplicate[branch] = {}\n duplicate[branch][start] = con\n self.constraints = duplicate\n\n def remove_overlapping_constraints(self):\n \"\"\" \n - put together overlapping signals of identical mode\n - resolve overlaps according to priorities of different modes\n \"\"\"\n NRconstraints = {}\n for branch, hash in self.constraints.iteritems():\n sortedstarts = hash.keys()\n sortedstarts.sort()\n sortedcons = []\n for key in sortedstarts: sortedcons.append(hash[key])\n pos = 0\n exit = 0\n while not exit:\n sortedcons.sort()\n if pos+1 == len(sortedcons):\n exit = 1\n continue\n firstcon = sortedcons[pos]\n nextcon = sortedcons[pos+1]\n if firstcon.overlaps_with(nextcon):\n #print \"OVERLAP | 1: %s %s %s | 2: %s %s %s\" %(firstcon.mode,firstcon.start,firstcon.stop,nextcon.mode,nextcon.start,nextcon.stop)\n newcons = resolve_overlap(firstcon,nextcon)\n #tmp = \"\"\n #for n in newcons:\n # tmp += ' %s %s %s' %(n.mode, n.start, n.stop)\n #print \"resolved into:\", tmp\n sortedcons.pop(pos)\n sortedcons.pop(pos)\n newcons.sort()\n newcons.reverse()\n for c in newcons:\n sortedcons.insert(pos, c)\n pos = 0\n else:\n pos += 1\n self.constraints[branch] = {}\n for con in sortedcons:\n self.constraints[con.branch][con.start] = con\n\n def add_gaps(self):\n for branch in self.branches.keys():\n node1 = self.nodes.get( int( re.match('(\\d+)\\.\\.', branch).group(1) ) )\n node2 = self.nodes.get( int( re.search('\\.\\.(\\d+)$', branch).group(1) ) )\n # walk along the sequence, check if gap present in one of the sequences\n # if gap, create new or prolong already existing constraint\n gapstart, gapstop = None, None\n i = 0\n while i < self.seqlength:\n if node1.seq[i] == '-' or node2.seq[i] == '-':\n if not gapstart: gapstart = i\n gapstop = i\n else:\n if gapstart != None and gapstop != None:\n gap = Gap(branch, gapstart, gapstop)\n self.addGap(gap)\n gapstart, gapstop = None, None\n i += 1\n if gapstart != None and gapstop != None:\n gap = Gap(branch, gapstart, gapstop)\n self.addGap(gap)\n\n def get_ancestor_of(self,nodeid):\n for b in self.branches.keys():\n node1 = int( re.match(\"(\\d+)\\.\\.\\d+\",b).group(1) )\n node2 = int( re.match(\"\\d+\\.\\.(\\d+)\",b).group(1) )\n if nodeid == node2:\n return node1\n return None\n\n def group_branches(self):\n allbranches = self.branches.keys()\n# for branch in allbranches:\n# for g in self.gaps[branch].values():\n# sys.stderr.write(\"gap:\\t%s\\t%s\\t%s\" %(g.branch,g.start,g.stop) +\"\\n\")\n# sys.stderr.write(\"\\n\")\n \n # water branches to group\n waterbranches = []\n pooc = self.get_node_by_name(\"P.oceanica\")\n zoma = self.get_node_by_name(\"Z.marina\")\n waterbranches.append('%s..%s' %( self.get_ancestor_of(zoma.id), zoma.id))\n waterbranches.append('%s..%s' %( self.get_ancestor_of(pooc.id), pooc.id))\n waterbranches.append('%s..%s' %( self.get_ancestor_of(self.get_ancestor_of(pooc.id)), self.get_ancestor_of(pooc.id)))\n #sys.stderr.write(\"water branches to group:\\n\")\n #for b in waterbranches: sys.stderr.write(\" %s\\n\" % b)\n\n # terrestrial branches to group\n terrestrialbranches = []\n sorghum = self.get_node_by_name(\"S.bicolor\")\n oryza = self.get_node_by_name(\"O.sativa\")\n populus = self.get_node_by_name(\"P.trichocarpa\")\n arath = self.get_node_by_name(\"A.thaliana\")\n terrestrialbranches.append('%s..%s' %(self.get_ancestor_of(arath.id), arath.id))\n terrestrialbranches.append('%s..%s' %(self.get_ancestor_of(populus.id), populus.id))\n terrestrialbranches.append('%s..%s' %(self.get_ancestor_of(sorghum.id), sorghum.id))\n terrestrialbranches.append('%s..%s' %(self.get_ancestor_of(self.get_ancestor_of(sorghum.id)), self.get_ancestor_of(sorghum.id)))\n #sys.stderr.write(\"terrestrial branches to group:\\n\")\n #for b in terrestrialbranches: sys.stderr.write(\" %s\\n\" % b)\n\n groups = [waterbranches,terrestrialbranches]\n newconstraints = {}\n newgaps = {}\n newbranches = {}\n i = 0\n while i < len(groups):\n g = string.join(groups[i], ' + ')\n newbranches[g] = 1\n newconstraints[g] = {}\n newgaps[g] = {}\n for branch in groups[i]:\n if not self.constraints.has_key(branch) and not self.gaps.has_key(branch): \n #sys.stderr.write(\"branch %s is empty of cons and gaps\\n\" %(branch) )\n continue\n if self.constraints.has_key(branch) and not self.constraints[branch] == {}:\n cons = self.constraints[branch].values()\n for con in cons:\n con.branch = g\n add = 0\n if newconstraints[g].has_key(con.start):\n while newconstraints[g].has_key(con.start + add): add += 1\n newconstraints[g][con.start + add] = con\n if self.gaps.has_key(branch):\n gaps = self.gaps[branch].values()\n for gap in gaps:\n gap.branch = g\n add = 0\n if newgaps[g].has_key(gap.start):\n while newgaps[g].has_key(gap.start + add): add += 1\n newgaps[g][gap.start + add] = gap\n #print \"GROUP\", i, \"|\", g.replace('+',' + ')\n i += 1\n self.constraints = newconstraints\n self.branches = newbranches\n self.gaps = newgaps\n# for branch in self.branches.keys():\n# for g in self.gaps[branch].values():\n# sys.stderr.write(\"gap:\\t%s\\t%s\\t%s\" %(g.branch,g.start,g.stop) +\"\\n\")\n# sys.stderr.write(\"\\n\")\n \n def collapse_gaps(self):\n for branch in self.branches:\n gaps = self.gaps[branch].values()\n gaps.sort()\n # for g in gaps:\n # sys.stderr.write(\"gap:\\t%s\\t%s\\t%s\" %(g.branch,g.start,g.stop) +\"\\n\")\n pos, exit = 0, 0\n while pos < (len(gaps) -1):\n gaps.sort()\n firstgap = gaps[pos]\n nextgap = gaps[pos+1]\n if firstgap.overlaps_with(nextgap):\n #sys.stderr.write(string.join([branch,str(firstgap.start),str(firstgap.stop),str(nextgap.start),str(nextgap.stop)],\"\\t\"))\n # gap2 is within gap1 --> remove gap2\n if nextgap.start >= firstgap.start and nextgap.stop <= firstgap.stop: \n gaps.pop(pos+1)\n # sys.stderr.write(\" | within --> remove gap2\" +\"\\n\")\n # gap2 overlaps with gap1 on the right side --> prolong gap1 and remove gap2\n else:\n firstgap.stop = nextgap.stop\n gaps.pop(pos+1)\n # sys.stderr.write(\" | overlap --> merging into new gap %s - %s\" %(firstgap.start,firstgap.stop) +\"\\n\")\n pos -= 1\n if pos < 0: pos = 0\n else:\n pos += 1\n self.gaps[branch] = {}\n for gap in gaps:\n self.gaps[branch][gap.start] = gap\n \n def remove_signals_in_gaps(self):\n newcons = {}\n gaps = []\n constraints = []\n for branch in self.branches: \n gaps.extend( self.gaps[branch].values() )\n constraints.extend( self.constraints[branch].values() )\n newcons[branch] = {}\n \n for gap in gaps:\n pos = 0\n while pos < (len(constraints)):\n constraints.sort()\n con = constraints[pos]\n if gap.overlaps_with(con):\n #sys.stderr.write( string.join([\"con\",con.mode,str(con.start),str(con.stop),\"gap\",str(gap.start),str(gap.stop)],\"\\t\") + \"\\n\" )\n \n # con within gap --> remove con\n if gap.start <= con.start and gap.stop >= con.stop:\n constraints.pop(pos)\n #pos -= 1\n #if pos < 0: pos = 0\n else:\n # gap within con --> split con\n if gap.start > con.start and gap.stop < con.stop:\n newcon = Constraint(con.branch, gap.stop+1, con.stop)\n newcon.mode = con.mode\n newcon.p = con.p\n con.stop = gap.start - 1\n constraints[pos] = con\n constraints.insert(pos,newcon)\n\n elif gap.start <= con.start:\n con.start = gap.stop +1\n constraints[pos] = con\n elif gap.start >= con.start:\n con.stop = gap.start -1\n constraints[pos] = con\n else:\n #sys.stderr.write(\"no overlap: GAP\\t%s\\t%s\\tCON\\t%s\\t%s\\n\" % (gap.start, gap.stop, con.start,con.stop))\n pos += 1\n\n\n for con in constraints:\n# gap = Gap(\"\",1023,1900)\n# if not gap.overlaps_with(con):\n if newcons[con.branch].has_key(con.start):\n oldcon = newcons[con.branch][con.start]\n if con.stop <= oldcon.stop: continue\n newcons[con.branch][con.start] = con\n\n self.constraints = newcons\n \n def reduce_to_unique_constraints(self):\n # build \"consensus\"\n seq = {}\n for branch in self.branches.keys():\n seq[branch] = self.seqlength*[\"\"]\n for c in self.constraints[branch].values():\n for i in range(c.start-1,c.stop):\n if seq[branch][i] == \"\" or seq[branch][i] == c.mode: seq[branch][i] = c.mode\n else: seq[branch][i] = \"X\"\n #sys.stderr.write(branch + \" | \" + string.join(seq[branch], \" \") + \"\\n\\n\" )\n\n # if any seq has X, everyone gets an X at pos i\n for i in range(self.seqlength):\n ambig = 0\n for seqs in seq.values():\n if seqs[i] == 'X': \n ambig = 1\n break\n if ambig: \n for branch in seq.keys(): \n seq[branch][i] = 'X'\n\n # re-create constraints\n for branch in self.branches.keys():\n newconstraints = {}\n start, stop, mode = None, None, None\n for i in range(self.seqlength):\n s = seq[branch][i]\n if (s == \"\" or s == \"X\"):\n if start != None:\n c = Constraint(branch, start, stop)\n c.mode = mode\n newconstraints[c.start] = c\n start, stop, mode = None, None, None\n else: continue\n else:\n if start != None and s == mode:\n stop = i\n elif start != None and s != mode:\n c = Constraint(branch, start, stop)\n c.mode = mode\n newconstraints[c.start] = c\n start, stop, mode = i, i, s\n else:\n start, stop, mode = i, i, s\n if start != None and not newconstraints.has_key(start):\n c = Constraint(branch, start, stop)\n c.mode = mode\n newconstraints[c.start] = c\n self.constraints[branch] = newconstraints\n # TODO: ignore those parts that are ambiguous in one branch in the other branch as well\n\n # reduce to different signals\n branches = self.branches.keys()\n branch1 = branches[0]\n branch2 = branches[1]\n cons1 = self.constraints[branch1].values()\n cons2 = self.constraints[branch2].values()\n #sys.stderr.write(\"constraints in branch 1 (%s):\\n\" % branch1)\n #for con in cons1:\n # sys.stderr.write(\"\\t%s\\t%s\\t%s\\n\" %(con.mode,con.start,con.stop))\n #sys.stderr.write(\"constraints in branch 2 (%s):\\n\" % branch2)\n #for con in cons2:\n # sys.stderr.write(\"\\t%s\\t%s\\t%s\\n\" %(con.mode,con.start,con.stop))\n i = 0\n while i < (len(cons1)): \n j = 0\n while j < (len(cons2)):\n if i < 0: i = 0\n if j < 0: j = 0\n if len(cons1) == 0: break\n #sys.stderr.write(\"i: %s j: %s ai: %s aj: %s\\n\" %(i,j, len(cons1), len(cons2)))\n con2 = cons2[j]\n con1 = cons1[i]\n if not con1.overlaps_with(con2):\n j += 1\n continue\n else:\n if con1.mode != con2.mode: \n j += 1\n continue\n else:\n # con1 = con2\n if con1.start == con2.start and con1.stop == con2.stop:\n #sys.stderr.write(\" 1 == 2\\n\")\n cons1.pop(i)\n cons2.pop(j)\n\n # con2 within con1\n elif con2.start >= con1.start and con2.stop <= con1.stop:\n #sys.stderr.write(\" 2 within 1\\n\")\n cons2.pop(j)\n cons1.pop(i)\n newstop = con2.start -1\n newstart = con2.stop +1\n con2.branch = con1.branch\n con2.start = newstart\n con2.stop = con1.stop\n con1.stop = newstop\n cons1.append(con1)\n cons1.append(con2)\n\n # con1 within con2\n elif con1.start >= con2.start and con1.stop <= con2.stop:\n #sys.stderr.write(\" 1 within 2\\n\")\n cons2.pop(j)\n cons1.pop(i)\n newstart = con1.stop +1\n newstop = con1.start -1\n con1.branch = con2.branch\n con1.start = newstart\n con1.stop = con2.stop\n con2.stop = newstop\n cons2.append(con2)\n cons2.append(con1)\n\n # con1 left of con2\n elif con1.start <= con2.start:\n #sys.stderr.write(\" con1 --> con2\\n\")\n #sys.stderr.write(\" con1 init.: %s\\n\" % con1.to_s() )\n #sys.stderr.write(\" con2 init.: %s\\n\" % con2.to_s() )\n newstart = con1.stop +1\n newstop = con2.start -1\n con1.stop = newstop\n con2.start = newstart\n cons1[i] = con1\n cons2[j] = con2\n #sys.stderr.write(\" con1 modf.: %s\\n\" % con1.to_s() )\n #sys.stderr.write(\" con2 modf.: %s\\n\" % con2.to_s() )\n\n # con1 right of con1\n elif con1.start >= con2.start:\n #sys.stderr.write(\" con2 --> con1\\n\")\n #sys.stderr.write(\" con1 init.: %s\\n\" % con1.to_s() )\n #sys.stderr.write(\" con2 init.: %s\\n\" % con2.to_s() )\n\n newstart = con2.stop +1\n newstop = con1.start -1\n con2.stop = newstop\n con1.start = newstart\n cons1[i] = con1\n cons2[j] = con2\n #sys.stderr.write(\" con1 modf.: %s\\n\" % con1.to_s() )\n #sys.stderr.write(\" con2 modf.: %s\\n\" % con2.to_s() )\n\n\n i, j = -1, -1\n i += 1\n\n self.constraints = {}\n for con in cons1:\n if con.stop >= con.start:\n if not self.constraints.has_key(con.branch): self.constraints[con.branch] = {}\n self.constraints[con.branch][con.start] = con\n for con in cons2:\n if con.stop >= con.start:\n if not self.constraints.has_key(con.branch): self.constraints[con.branch] = {}\n self.constraints[con.branch][con.start] = con\n\n terrestrialcodons = 0\n aquaticcondons = 0\n aquatics = {'AdN':0, 'PS':0, 'NS':0}\n terrestrials = {'AdN':0, 'PS':0, 'NS':0}\n for branch in self.constraints.keys():\n for con in self.constraints[branch].values():\n add = int(round(1.0*(1 + con.stop - con.start)/3))\n if con.branch.count('+') == 2:\n aquaticcondons += add\n aquatics[con.mode] += add\n else:\n terrestrialcodons += add\n terrestrials[con.mode] += add\n # TODO: count gaps, then normalize codon count by the length of the region that was analyzable (non-gapped)\n gaps = []\n for branch in self.gaps.keys(): gaps.extend( self.gaps[branch].values() )\n tmpseq = [''] * self.seqlength\n for gap in gaps:\n for i in range(gap.start,gap.stop+1):\n tmpseq[i] = \"g\"\n ngaps = int(math.ceil(1.0*tmpseq.count(\"g\") / 3))\n npcods = int(math.ceil(1.0*tmpseq.count(\"\") / 3))\n\n cons = []\n for branch in self.constraints.keys(): cons.extend( self.constraints[branch].values() )\n tmpseq = [''] * self.seqlength\n for con in cons:\n for i in range(con.start,con.stop+1):\n tmpseq[i] = \"c\"\n ncods = int(math.ceil(1.0*tmpseq.count(\"c\") / 3))\n #print self.seqlength / 3, (aquaticcondons + terrestrialcodons) /3, ngaps, npcods, ncods\n\n # output: name, total_length, analyzable_codons, constrained_codons, percent_constrained_codons, aquatic:AdN/PS/NS, terrestrial:AdN/PS/NS\n print \"#\" + string.join(['ID', 'aln_length', 'ungapped_length', 'constr_codons', 'constr_percent', 'aqua_codons', 'aqua_AdN', 'aqua_PS', 'aqua_NS', 'terr_codons', 'terr_AdN', 'terr_PS', 'terr_NS'], \"\\t\")\n print \"%s\\t%s\\t%s\\t%s\\t%01.2f\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" %(self.args.get('swapscout'), self.seqlength/3, npcods, ncods, ncods*100.0/npcods, aquaticcondons, aquatics['AdN'], aquatics['PS'], aquatics['NS'], terrestrialcodons, terrestrials['AdN'], terrestrials['PS'], terrestrials['NS'])\n\n\n def debug(self):\n for branch, hash in self.constraints.iteritems():\n for start, con in hash.iteritems(): print con.to_s()\n\n def to_s(self):\n color = {}\n color['PS'] = \"0.2,0.7,0.1\"\n color['NS'] = \"1,0.2,0.2\"\n color['AdN'] = \"0,0.5,1\"\n color['HS'] = \"0,0,0.7\"\n color['S'] = \"0.5,0.5,0.5\"\n color['AdN + S'] = \"1,0.5,0\"\n color['gap'] = \"0.6,0.6,0.6\"\n\n flatfile = string.join([\"PANEL\", '%s' % self.seqlength],\"\\t\") + \"\\n\"\n # annotation\n annotationfilename = string.join(self.args.get('swapscout').split('.')[:3],'.') + '.ids.annotation'\n annotationline = 0\n if file_exists('./' + annotationfilename):\n annotationline = open(annotationfilename).readline().rstrip()\n if file_exists('../' + annotationfilename):\n annotationline = open('../' + annotationfilename).readline().rstrip()\n if annotationline:\n flatfile += string.join([\"TRACK\",\"gene description [TAIR8 annotation]\",\"true\"],\"\\t\") + \"\\n\"\n flatfile += string.join([\"FEATURE\",'%s..%s' %(1,self.seqlength -1),\"1,0.8,0.2\",annotationline.replace(\"\\t\",\" \")],\"\\t\") + \"\\n\"\n \n # constraints and gaps\n for branch in self.branches.keys():\n if branch.find('+') == -1:\n node1 = int( re.match('(\\d+)\\.\\.', branch).group(1) )\n node2 = int( re.search('\\.\\.(\\d+)$', branch).group(1) )\n branchname = self.nodes[node1].name + ' : ' + self.nodes[node2].name\n else:\n if branch.count('+') >= 3: branchname = \"terrestrial\"\n else: branchname = \"aquatic\"\n #branchname = branch\n flatfile += string.join([\"TRACK\",branchname,\"false\"],\"\\t\") + \"\\n\"\n\n #if not self.constraints.has_key(branch): continue\n if self.constraints.has_key(branch):\n hash = self.constraints[branch]\n for start, con in hash.iteritems():\n flatfile += string.join([\"FEATURE\",'%s..%s' %(con.start,con.stop),color[con.mode]],\"\\t\") + \"\\n\"\n\n if self.gaps.has_key(branch):\n hash = self.gaps[branch]\n for start, gap in hash.iteritems():\n flatfile += string.join([\"FEATURE\",'%s..%s' %(gap.start,gap.stop),color['gap']],\"\\t\") + \"\\n\"\n\n # legend\n legend = {\n 'PS': 'Positive selection',\n 'HS': 'Hot spots',\n 'S' : 'Saturation of synonymous sites',\n 'AdN': 'Acceleration of non-synonymous substitutions',\n 'NS': 'Negative selection',\n 'AdN + S': 'Acceleration of non-syn. substitutions + Saturation of syn. sites'\n }\n flatfile += string.join([\"TRACK\",\"Legend\",\"true\"],\"\\t\") + \"\\n\"\n legendstartpos = int(0.01 * self.seqlength)\n legendstoppos = int(0.99 * self.seqlength)\n legendrange = '%s..%s' % (legendstartpos,legendstoppos)\n flatfile += string.join([\"FEATURE\", legendrange, color['gap'], \"gap in at least one of the sequences\"],\"\\t\") + \"\\n\"\n for m in self.args.get('modes'):\n flatfile += string.join([\"FEATURE\", legendrange, color[m], '%s: %s' %(m,legend[m])],\"\\t\") + \"\\n\"\n \n return flatfile\n\n\n def plot(self):\n count = 1\n outfile = self.args.get('swapscout') + '.%s.png' % count\n while file_exists(outfile):\n count += 1\n outfile = self.args.get('swapscout') + '.%s.png' % count\n output = self.to_s()\n tmpfile = \".bio-graphics-plot.txt\"\n write_to_file(tmpfile,output)\n os.system(\"bio-graphics-plot.rb %s %s\" %(tmpfile,outfile) )\n\n \n# =============================================================================\n# =============================================================================\ndef main( args ):\n so = Swapscout(args)\n file = args.get('swapscout')\n so.parse( file )\n cluster = so.create_cluster()\n #print 10*\"=\", \"all signals\", 10*\"=\"\n #cluster.to_s()\n cluster.reduce_to_significant_constraint()\n #print 10*\"=\", \"only significant\", 10*\"=\"\n #print cluster.to_s()\n cluster.remove_overlapping_constraints()\n #print 10*\"=\", \"no overlaps\", 10*\"=\"\n #cluster.to_s()\n\n cluster.add_gaps()\n #print 10*\"=\", \"with gaps\", 10*\"=\"\n #cluster.to_s()\n\n cluster.plot()\n #print 10*\"=\", \"SWAPSEE\", 10*\"=\"\n if args.has_key('group'):\n cluster.group_branches()\n cluster.collapse_gaps()\n cluster.remove_signals_in_gaps()\n cluster.plot()\n cluster.reduce_to_unique_constraints()\n cluster.plot()\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.6302589178085327, "alphanum_fraction": 0.6399676203727722, "avg_line_length": 23.235294342041016, "blob_id": "d11fe9f316f81ffec841ce1ed332214bf146dab8", "content_id": "11b94fc07793911a599793c613bcd025b1a7ec54", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1236, "license_type": "permissive", "max_line_length": 95, "num_lines": 51, "path": "/python/fasta/fasta-length.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys\n\ndef usage():\n print >> sys.stderr, \"reports all fasta files with one or more sequences < or > n characters\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" folder \\\"<> n\\\"\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 3: usage()\n inFolder, inCutoff = sys.argv[1:3]\n inCut, inThreshold = inCutoff.split()\n inThreshold = int(inThreshold)\n return inFolder, inCut, inThreshold\n\n\ndef parse_fasta_file(file):\n lengthHash = {}\n fo = open(file)\n id = \"\"\n for line in fo:\n line = line.strip()\n if line.startswith(\">\"):\n id = line[1:]\n lengthHash[id] = 0\n else:\n lengthHash[id] += len(line)\n lengths = lengthHash.values()\n lengths.sort()\n return lengths[0]\n\n\ndef test_threshold(length, inCut, inThreshold):\n if inCut == \">\" and length > inThreshold: return 1\n if inCut == \"<\" and length < inThreshold: return 1\n return 0\n\n\ndef main():\n inFolder, inCut, inThreshold = plausi()\n for filename in os.listdir(inFolder):\n if not filename.endswith(\".fasta\"): continue\n minlength = parse_fasta_file( filename )\n report = test_threshold(minlength, inCut, inThreshold)\n if report: \n print os.path.split(filename)[1] + \"\\t\" + str(minlength)\n \n\n\nmain()\n" }, { "alpha_fraction": 0.5241765975952148, "alphanum_fraction": 0.5426302552223206, "avg_line_length": 33.80487823486328, "blob_id": "b1fb3d421715b121de1f3440690d27b9efedf9bf", "content_id": "80e2ca05f0a167c3799693ab9a7957fc9a1833f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4281, "license_type": "permissive", "max_line_length": 117, "num_lines": 123, "path": "/ruby/geneontology/termcloud-from-go-enrichment2.rb", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby -w\n# == Synopsis\n# creates input for wordle.net/advanced to create a term cloud based on the six\n# output files produced by the go-enrichment.py script\n#\n\nrequire 'optparse'\nrequire 'rubygems'\nrequire 'faster_csv'\n\n\n# =============================================================================\ndef get_opt\n options = Hash.new\n optparse = OptionParser.new do |opts|\n opts.banner = \"Usage: #{$0} -d <dir>\"\n options[:dir] = nil\n options[:go2name] = nil\n opts.on( '-d DIR', 'directory that contains *.ORA files produced with go-enrichment2.py'\n ){|dir| options[:dir] = dir}\n opts.on( '-g FILE', 'gene ontology id to name mapping file, tab delimited, to look up shortened term names'\n ){|file| options[:go2name] = file}\n opts.on( '-f', 'use FDR < 0.05 instead of p < 0.05 filter'\n ){options[:filterfdr] = true}\n opts.on( '-c FILE', 'color mapping between filename and color code (without #)'\n ){|file| options[:colorfile] = file}\n end\n begin\n optparse.parse!\n mandatory = [:dir, :go2name]\n missing = mandatory.select{|param| options[param].nil?}\n if not missing.empty?\n puts \"Missing options: #{missing.join(', ')}\"\n puts optparse \n exit\n end\n rescue OptionParser::InvalidOption, OptionParser::MissingArgument\n puts $!.to_s\n puts optparse\n exit\n end\n return options\nend\n\n\n# modify to adjust colors\n# =============================================================================\ndef map_color(ontology, direction)\n return '0065ab' if ontology == \"BP\" and direction == \"O\"\n return '93002d' if ontology == \"MF\" and direction == \"O\"\n return '1f9300' if ontology == \"CC\" and direction == \"O\"\n return '4d4d4d' if ontology == \"BP\" and direction == \"U\"\n return '4d4d4d' if ontology == \"MF\" and direction == \"U\"\n return '4d4d4d' if ontology == \"CC\" and direction == \"U\"\nend\n\n# =============================================================================\ndef statusbar(progress, message=\"\", width=40)\n progressbar = \"=\" * (progress*width).to_i\n progressbar << \" \" while progressbar.length < width\n STDERR.print \"\\r 0% #{progressbar} 100% \"\n STDERR.print \"[#{message}]\" unless message.empty?\n STDERR.print \"\\n\" if progress == 1.0 \nend\n\n\n# =============================================================================\ndef load_color_map(file)\n str2color = Hash.new\n f = File.open(file, \"r\")\n while (line = f.gets)\n line.chomp!\n str, color = line.split(\"\\t\")[0,2]\n str2color[str] = color\n end\n return str2color\nend\n\n# =============================================================================\ndef get_go2name(file)\n go2name = Hash.new\n f = File.open(file, \"r\")\n while (line = f.gets)\n line.chomp!\n id, name = line.split(\"\\t\")[0,2]\n go2name[id] = name\n end\n return go2name\nend\n\n# =============================================================================\ndef parse_ORA_file(file, go2name, filterfdr=nil, color=nil)\n fw = File.open(file + \".termcloud\", 'w')\n IO.foreach(file) do |line|\n direction, ontology, goid, p, fdr = line.split(\"\\t\")\n p, fdr = p.to_f, fdr.to_f\n next if p > 0.05\n next if filterfdr and fdr > 0.05\n p = '1e-200'.to_f if p == 0.0\n size = -1.0* Math.log(p)\n term = go2name[goid]\n STDERR.puts \"could not find name for GO term #{goid}\" unless term\n col = color ? color : map_color(ontology, direction)\n fw.puts [term, sprintf('%.2f', size), col].join(\":\")\n end\n fw.close\nend\n\n# =============================================================================\n# === M A I N =================================================================\n# =============================================================================\n\noptions = get_opt()\nabort(\"directory does not exist - aborting.\") unless File.exists?(options[:dir]) and File.directory?(options[:dir])\nabort(\"go2name mapping file does not exist - aborting.\") if options[:go2name] and not File.exists?(options[:go2name])\ngo2name = get_go2name(options[:go2name])\nstr2color = load_color_map(options[:colorfile]) if options[:colorfile]\nDir.glob(options[:dir] + '/*.ORA').each do |file|\n filename = File.basename(file)\n color = nil\n color = str2color[filename] if options[:colorfile]\n parse_ORA_file(file, go2name, options[:filterfdr], color)\nend\n" }, { "alpha_fraction": 0.6332622766494751, "alphanum_fraction": 0.6492537260055542, "avg_line_length": 18.957447052001953, "blob_id": "13439452f3e6b50e131827b77c9871c4b217a8c4", "content_id": "965770fdc3f4da9e0d1bd15f47a4f348cc4e2684", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 938, "license_type": "permissive", "max_line_length": 82, "num_lines": 47, "path": "/python/generic/intersection.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sets\nimport sys, os\n\ndef get_lines_in_hash(file):\n hash = {}\n fo = open(file)\n for line in fo: hash[line.strip()] = 1\n fo.close()\n return hash\n\ndef get_lines( file ):\n lines = []\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n lines.append(line)\n\n return sets.Set(lines)\n\ndef terminate():\n print >> sys.stderr, \"provide at least two valid input files as input arguments\"\n sys.exit(1)\n\n\nif len(sys.argv[1:]) < 2: terminate()\nfor inputfile in sys.argv[1:]:\n if not os.path.isfile(inputfile): terminate()\n\nallhashes = []\nfor file in sys.argv[1:]:\n allhashes.append( get_lines_in_hash(file) )\n\nrefkeys = allhashes[0].keys()\nfor refkey in refkeys:\n found = 0\n for hash in allhashes:\n if hash.has_key(refkey): found += 1\n else: break\n if found == len(allhashes):\n print refkey\n\n#l1 = get_lines(sys.argv[1])\n#l2 = get_lines(sys.argv[2])\n#for e in l1.intersection(l2):\n# print e\n" }, { "alpha_fraction": 0.5591204166412354, "alphanum_fraction": 0.5656092166900635, "avg_line_length": 34.56410217285156, "blob_id": "17ee4ec5d1a06e818295061da68bc79e1d4c61fc", "content_id": "b8f51709cc35f46b965ce0dae3a9cb21e533a0ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2774, "license_type": "permissive", "max_line_length": 299, "num_lines": 78, "path": "/python/openreadingframe/ORFPREDICTORRR.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys,os,getopt\n\nOUTFILEPART2 = 'tmp.orf.part2.fasta'\n\n#==============================================================================\ndef usage():\n print \"\"\"Hello!\n \n Following options are possible:\n -i:\\tparsed BLASTX best hit definitions\n -j:\\tinput sequences in FASTA format\n -t:\\tminimum length for in silico predicted ORFs\n \"\"\"\n\n#==============================================================================\ndef main( XMLfile, CAP3, threshold ):\n # First, elongate BLAST-hits\n\n os.system(\"orf_prediction_part1.py -b \"+str(XMLfile)+\" -f \"+str(CAP3) )\n #print \"BLASTelongator has finished. Starting 2nd part...\"\n # It has written to temp and now comes Ina's script\n\n os.system(\"orf_prediction_part2.py\"+\" -t \"+str(threshold)+\" -f \" + OUTFILEPART2 )\n #print \"ORF-Prediction has finished. Removing temp-files..\"\n #os.system(\"cat BLASTelongatorHits.out SimulatedORFS.out > \"+str(outfile)) \n #os.system(\"rm BLASTelongatorHits.out\")\n os.system(\"rm \" + OUTFILEPART2)\n #os.system(\"rm SimulatedORFS.out\")\n #print \"Done. See you soon!\"\n\n\n\n#==============================================================================\n# MAIN ========================================================================\n#==============================================================================\ntry: \n if len(sys.argv) > 1:\n opts, args = getopt.getopt(sys.argv[1:],\"i:j:t:h\")\n else:\n usage()\n #print \"Hello! I take at least 3 arguments. I have the following options: -i defines the input XML-file which you want to use, -j defines the CAP3-outputfile, -t defines the threshold for in silico predicted proteins, -o defines the outfile, -h gives you more help! See you soon! You provided:\" \n sys.exit()\nexcept getopt.GetoptError, err:\n print \"Something went wrong - maybe this helps: \" + str(err)\n sys.exit()\n\nfor o, a in opts:\n if o == \"-h\":\n usage()\n sys.exit()\n elif o == \"-i\":\n if os.path.exists(a):\n if os.path.isfile(a):\n XMLfile = a\n elif os.path.isdir(a):\n print \"Specified XML-file is a directory!\"\n else:\n print \"Something is wrong with the XML-file, maybe it doesn't exist?\"\n elif o == \"-j\":\n if os.path.exists(a):\n if os.path.isfile(a):\n CAP3 = a\n elif os.path.isdir(a):\n print \"Specified CAP3-file is a directory!\"\n else:\n print \"Something is wrong with the CAP3-file, maybe it doesn't exist?\"\n elif o == \"-t\":\n threshold = a\n else:\n print \"Something went wrong ;_;. Maybe the file you specified doesn't exist?\"\n\nif len(opts) == 3:\n main( XMLfile, CAP3, threshold )\nelse:\n print len(opts)\n print \"Again, hello to you! You do not have the required amount of arguments given. Please specify them. For more, see -h! I AM THE PREDICTOR\"\n" }, { "alpha_fraction": 0.48523053526878357, "alphanum_fraction": 0.4873919188976288, "avg_line_length": 30.55681800842285, "blob_id": "3557176be51a370363b9de06e9b912eb3644c698", "content_id": "2102f6f76d78306d1a14678ec1ab19d48f9affde", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2776, "license_type": "permissive", "max_line_length": 82, "num_lines": 88, "path": "/python/fasta/stockholm-to-fasta.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -s <path> -k \\\"regex\\\" -v \\\"regex\\\"\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -s stockholm file\" )\n\tstdout( \" -k regular expression for the key\" )\n\tstdout( \" -v regular expression for the value\" )\n\tstdout( \" \" )\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hs:k:v:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-s': args['stockholm'] = value\n\t\tif key == '-k':\targs['keyregex'] = re.compile(value + '(.*)$' )\n\t\tif key == '-v':\targs['valueregex'] = re.compile(value + '(.*)$' )\n\t\t\t\t\n\tif not args.has_key('keyregex'):\n\t\tstderr( \"key regex missing.\" )\n\t\tshow_help()\n\t\t\n\tif not args.has_key('valueregex'):\n\t\tstderr( \"value regex missing.\" )\n\t\tshow_help()\n\n\tif not args.has_key('stockholm'):\n\t\tstderr( \"stockholm file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('stockholm') ):\n\t\tstderr( \"stockholm file does not exist.\" )\n\t\tshow_help()\n\t\t\n\treturn args\n\n\t\n# =============================================================================\n# =============================================================================\ndef main( args ):\n\n\tfo = open( args.get('stockholm') )\n\tkre = args.get('keyregex')\n\tvre = args.get('valueregex')\n\tkey, value = '', ''\n\tfor line in fo:\n\t\tif re.search( kre, line ):\n\t\t\tif key != '' and value != '':\n\t\t\t\tprint \">%s\" % key\n\t\t\t\tprint value\n\t\t\t\tkey, value = '', ''\n\t\t\tkey = re.search( kre, line ).group(1).strip()\n\t\tif re.search( vre, line ):\n\t\t\tvalue = re.search( vre, line ).group(1).strip()\n\tfo.close()\n\tif key != '' and value != '':\n\t\tprint \">%s\" % key\n\t\tprint value\n\t\n\t\n\t\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )" }, { "alpha_fraction": 0.5082715153694153, "alphanum_fraction": 0.5191100835800171, "avg_line_length": 32.075469970703125, "blob_id": "f5994db0a4e1a4d9a1b69fba718028c6bb129b24", "content_id": "9613058c9f67a9b2ddf7b423cf90c9072e2d546a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1753, "license_type": "permissive", "max_line_length": 90, "num_lines": 53, "path": "/python/base/blastout.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import string\n\n# =============================================================================\nclass BlastHit:\n def __init__(self, line):\n cols = line.split(\"\\t\")\n self.qid, self.hid = cols.pop(0), cols.pop(0)\n self.identity = float(cols.pop(0))\n self.alnlen = int(cols.pop(0))\n self.mismatch = int(cols.pop(0))\n self.gap = int(cols.pop(0))\n self.qstart = int(cols.pop(0))\n self.qstop = int(cols.pop(0))\n self.hstart = int(cols.pop(0))\n self.hstop = int(cols.pop(0))\n self.evalue = float(cols.pop(0))\n self.score = float(cols.pop(0))\n \n def to_s(self):\n out = []\n out += [self.qid, self.hid, str(self.identity), str(self.alnlen)]\n out += [str(self.mismatch), str(self.gap), str(self.qstart), str(self.qstop)]\n out += [str(self.hstart), str(self.hstop), str(self.evalue), str(self.score)]\n return string.join(out, \"\\t\")\n\n# =============================================================================\ndef get_query_hash(blastoutfile, evalue=10.0):\n qh = {}\n fo = open(blastoutfile)\n for line in fo:\n line = line.rstrip()\n if len(line) == 0 or line.startswith('#') or not len(line.split(\"\\t\")) == 12: continue\n blasthit = BlastHit(line)\n if blasthit.evalue > evalue: continue\n if not qh.has_key(blasthit.qid): qh[blasthit.qid] = []\n qh[blasthit.qid].append(blasthit)\n fo.close()\n return qh\n\n# =============================================================================\ndef get_sequence_hash(fastafile):\n seqhash = {}\n key = \"\"\n fo = open(fastafile)\n for line in fo:\n if line.startswith(\">\"):\n gid = re.match(\">(\\S+)\", line).group(1)\n key = gid\n seqhash[key] = \"\"\n else:\n if key != \"\": seqhash[key] += line.strip()\n fo.close()\n return seqhash\n" }, { "alpha_fraction": 0.5759233832359314, "alphanum_fraction": 0.5854992866516113, "avg_line_length": 18.756755828857422, "blob_id": "6e0f55a047d8d408779de0fe136da360a0f2c749", "content_id": "5510e3ee11b0e847e0f1373559de451dd6a8908d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 731, "license_type": "permissive", "max_line_length": 81, "num_lines": 37, "path": "/python/fasta/fasta-length-per-sequence.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys\n\ndef usage():\n print >> sys.stderr, \"reports for each fasta sequence the length in tab format\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" fastafile\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 2: usage()\n inFile = sys.argv[1]\n return inFile\n\n\ndef parse_fasta_file(file):\n lengthHash = {}\n fo = open(file)\n id = \"\"\n for line in fo:\n line = line.strip()\n if line.startswith(\">\"):\n id = line[1:]\n if id.count(\" \") > 0: id = id[:id.index(\" \")]\n lengthHash[id] = 0\n else:\n lengthHash[id] += len(line)\n for id, length in lengthHash.iteritems():\n print id + \"\\t\" + str(length)\n\n\ndef main():\n inFile = plausi()\n parse_fasta_file( inFile )\n\n\nmain()\n" }, { "alpha_fraction": 0.5953947305679321, "alphanum_fraction": 0.6074561476707458, "avg_line_length": 21.2439022064209, "blob_id": "0c4e3ae8e832e274eb68c9ecc1285ee97a9a670b", "content_id": "cfa28edeef77a3cf333dafd1aba049afdb6483c4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 912, "license_type": "permissive", "max_line_length": 67, "num_lines": 41, "path": "/ruby/pfam/length2hmmout.rb", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby -w\nHEADER = /^>(\\S+)\\s?.?/\nunless (ARGV.size == 2)\n\tputs \"Usage: #{$0} fasta hmmout [NOTE: will change input hmmout!]\"\n\texit\nend\nlengths\t= Hash.new\nseq \t\t= String.new\npid\t\t\t= nil \nf \t\t\t= File.open(ARGV[0], \"r\")\nc\t\t\t\t= 0\nwhile(line = f.gets)\n\tline.chomp!\n\tif (m = HEADER.match(line))\n\t\tlengths[pid] = seq.length.to_s unless (pid.nil?)\n\t\tpid = m[1]\n\t\tseq = String.new\n\t\tc += 1\n\t\tSTDERR.print \"\\r*** Reading fasta entries: #{c}... \"\n\t\tnext\n\tend\n\tseq += line\t\nend\nlengths[pid] = seq.length unless (pid.nil?)\nf.close\nSTDERR.puts \"done.\"\noldhmmout = Array.new\nIO.foreach(ARGV[1]) {|x| oldhmmout << x}\nf = File.open(ARGV[1], \"w\")\noldhmmout.each do |line|\n\tnext if (/^#.+/.match(line))\n\tfields = line.split\n\tunless (lengths.has_key?(fields[0]))\n\t\t\tputs \"*** NO LENGTH FOUND FOR >#{fields[0]}<\"\n\t\t\tpresent = false\n\t\t\tnext\n\tend\n\tline.chomp!\n\tf.puts lengths[fields[0]].to_s + \"\\t\" + line + \"\\n\"\nend\nf.close\n" }, { "alpha_fraction": 0.507228434085846, "alphanum_fraction": 0.5097067356109619, "avg_line_length": 33.08450698852539, "blob_id": "0a4abbe36f6e41ea7f8226277575347cfcf10b61", "content_id": "e786e43a2385ec128e0b5d864ba2dbeae53c1d86", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2421, "license_type": "permissive", "max_line_length": 84, "num_lines": 71, "path": "/python/misa/gc-content-from-misa.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom collections import defaultdict\nfrom misa import MisaSSRspecies\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f all.misa out file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n \n if not args.has_key('file'):\n stderr( \"fasta file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"fasta file does not exist.\" )\n show_help()\n \n return args\n\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n specieshash = {}\n fo = open(args['file'])\n for line in fo:\n m = MisaSSRspecies(line)\n if not specieshash.has_key(m.species): specieshash[m.species] = defaultdict(int)\n for char in ['A', 'T', 'G', 'C']:\n specieshash[m.species][char] += m.motif.count(char) * m.repeats\n\n speciesarray = specieshash.keys()\n speciesarray.sort()\n for species in speciesarray:\n total = sum(specieshash[species].values())\n gc = 1.0 * (specieshash[species]['G'] + specieshash[species]['C']) / total\n print species + \"\\t\" + str(gc)\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5346097350120544, "alphanum_fraction": 0.5764532685279846, "avg_line_length": 31.98285675048828, "blob_id": "d43e37dbe788eba240bc57181a1c6752000a974a", "content_id": "ec79c6ba56a3cfd76beb39e55c5e1e326266c91e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11543, "license_type": "permissive", "max_line_length": 92, "num_lines": 350, "path": "/python/blast/benchmark_blast.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t# low level handling, such as command line stuff\nfrom low import write_to_file\nimport string\t\t\t# string methods available\nimport re\t\t\t\t\t# regular expressions\nimport getopt\t\t\t# comand line argument handling\nimport math\t\t\t\t# match functions\nfrom low import *\t# custom functions, written by myself\nimport tempfile # generate tmp files\nfrom Bio import SeqIO # biopython stuff, to parse fasta files for instance\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> [-m <path> -c <path>] [-s <path>]\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f path to the fasta file containing all contig and singleton sequences\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\t\n\tfile = ''\n\tfor key, value in keys:\n\t\tif key == '-f': file = value\n\t\t\t\n\tif file == '':\n\t\tstderr( \"sequence data file missing.\" )\n\t\tshow_help()\n\telif not file_exists( value ):\n\t\tstderr( \"invalid path in \" + key )\n\t\tshow_help()\n\t\t\n\tfile = get_global_path( file )\n\treturn file\n\n# =============================================================================\ndef get_sequences( fastafile, number ):\n\t\"\"\"\n\tgets the first <number> of sequences within the fasta file.\n\twrites it to a file, returns the filename of this file.\n\t\"\"\"\n\tfh, tmpfilename = tempfile.mkstemp(dir='.')\n\tfw = open( tmpfilename, 'w' )\n\thandle = open(fastafile)\n\tcount = 0\n\tfor seq_record in SeqIO.parse(handle, \"fasta\"):\n\t\tcount += 1\n\t\tif count > number: break\n\t\tfw.write( '>' + seq_record.id + '\\n' + seq_record.seq.tostring() + '\\n' )\n\thandle.close()\n\tfw.flush()\n\tfw.close()\n\treturn tmpfilename\n\ndef generate_datasets( file ):\n\t\n\tdatahash = {}\n\tfo = open( file )\n\tfor line in fo:\n\t\talphabet, counter, path = line.split()\n\t\tdatahash[ alphabet+'-'+counter ] = path\n\tfo.close()\n\t\n\tseqhash = {}\n\t# nucleotide\n\tseqhash[ 'nt-1-1' ] = get_sequences(datahash[ 'nt-1' ], 500)\n\tseqhash[ 'nt-2-1' ] = get_sequences(datahash[ 'nt-2' ], 50)\n\t#seqhash[ 'nt-1-2' ] = get_sequences(datahash[ 'nt-1' ], 100)\n\t#seqhash[ 'nt-2-2' ] = get_sequences(datahash[ 'nt-2' ], 250)\n\t#seqhash[ 'nt-1-3' ] = get_sequences(datahash[ 'nt-1' ], 165)\n\t#seqhash[ 'nt-2-3' ] = get_sequences(datahash[ 'nt-2' ], 165)\n\t# amino acid\n\t#seqhash[ 'aa-1-1' ] = get_sequences(datahash[ 'aa-1' ], 100)\n\t#seqhash[ 'aa-2-1' ] = get_sequences(datahash[ 'aa-2' ], 100)\n\t#seqhash[ 'aa-1-2' ] = get_sequences(datahash[ 'aa-1' ], 150)\n\t#seqhash[ 'aa-2-2' ] = get_sequences(datahash[ 'aa-2' ], 150)\n\t#seqhash[ 'aa-1-3' ] = get_sequences(datahash[ 'aa-1' ], 200)\n\t#seqhash[ 'aa-2-3' ] = get_sequences(datahash[ 'aa-2' ], 200)\n\t#seqhash[ 'aa-1-4' ] = get_sequences(datahash[ 'aa-1' ], 300)\n\t#seqhash[ 'aa-2-4' ] = get_sequences(datahash[ 'aa-2' ], 300)\n\t\n\tfor key, path in seqhash.iteritems():\n\t\tif key.startswith('nt'): t = 'n'\n\t\telse: t = 'p'\n\t\tos.system( \"xdformat -\" + t + \" \" + path + \" &> xdformat.log\")\n\t\tos.system( \"formatdb -i \" + path )\n\t\n\treturn seqhash \n\n# =============================================================================\ndef determine_blast_program( type1, type2 ):\n\t\"\"\"\n\t\"\"\"\n\tif type1 == 'nt' and type2 == 'nt':\n\t\treturn 'tblastx'\n\telif type1 == 'aa' and type2 == 'aa':\n\t\treturn 'blastp'\n\telif type1 == 'aa' and type2 == 'nt':\n\t\treturn 'tblastn'\n\telif type1 == 'nt' and type2 == 'aa':\n\t\treturn 'blastx'\n\telse:\n\t\treturn None\n\n# =============================================================================\ndef benchmark_blastall( type1, path1, type2, path2 ):\n\t\"\"\"\n\tdetermines the runtime of dataset1 blasted against dataset2.\n\tdetermines the type of blast to use depending on the file types (aa or nt).\n\t\"\"\"\n\tp = determine_blast_program( type1, type2 )\n\tstarttime = time.time()\n\tos.system( \"blastall -p \" + p + \" -d \" + path2 + \" -i \" + path1 + \" -o blastall.out\" )\n\truntime = time.time() - starttime\n\tprint \"benchmark blastall\", type1, \"vs\", type2, \"--\", runtime\n# =============================================================================\ndef benchmark_wublast( type1, path1, type2, path2 ):\n\t\"\"\"\n\tdetermines the runtime of dataset1 blasted against dataset2.\n\tdetermines the type of blast to use depending on the file types (aa or nt).\n\twublast syntax: <program> <database> <query> [options...]\n\t\"\"\"\n\tp = determine_blast_program( type1, type2 )\n\tstarttime = time.time()\n\tos.system( p + \" \" + path2 + \" \" + path1 + \" &> wublast.out\")\n\truntime = time.time() - starttime\n\tsin,sout = os.popen2(\"grep \\> -c \" + path1)\n\tsin.close()\n\ts1 = sout.read().replace('\\n','')\n\t\n\tsout.close()\n\tsin,sout = os.popen2(\"grep \\> -c \" + path2)\n\tsin.close()\n\ts2 = sout.read().replace('\\n','')\n\tsout.close()\n\t\n\tprint \"benchmark wublast\", s1, type1, \"vs\", s2, type2, \"--\", runtime\n\t\n\t\ndef xdformat( file, type ):\n\tos.system( \"xdformat -\" + type + \" \" + file + \" &> xdformat.log\")\n\n# =============================================================================\ndef bench_nt_vs_aa( seqhash ):\n\tprint \"benchmark nt vs aa\"\n\t\n\tricent = 'data/rice.nt'\n\triceaa = 'data/rice.aa'\n\tarathnt = 'data/arath.nt'\n\tarathaa = 'data/arath.aa'\n\t\n\trice_nt_100 = get_sequences(ricent, 100)\n\txdformat( rice_nt_100, 'n' )\n\tarath_nt_100 = get_sequences(arathnt, 100)\n\txdformat( arath_nt_100, 'n' )\n\trice_nt_300 = get_sequences(ricent, 300)\n\txdformat( rice_nt_300, 'n' )\n\tarath_nt_300 = get_sequences(arathnt, 300)\n\txdformat( arath_nt_300, 'n' )\n\trice_nt_500 = get_sequences(ricent, 500)\n\txdformat( rice_nt_500, 'n' )\n\tarath_nt_500 = get_sequences(arathnt, 500)\n\txdformat( arath_nt_500, 'n' )\n\t\n\trice_aa_100 = get_sequences(riceaa, 100)\n\txdformat( rice_aa_100, 'p' )\n\tarath_aa_100 = get_sequences(arathaa, 100)\n\txdformat( arath_aa_100, 'p' )\n\trice_aa_300 = get_sequences(riceaa, 300)\n\txdformat( rice_aa_300, 'p' )\n\tarath_aa_300 = get_sequences(arathaa, 300)\n\txdformat( arath_aa_300, 'p' )\n\trice_aa_500 = get_sequences(riceaa, 500)\n\txdformat( rice_aa_500, 'p' )\n\tarath_aa_500 = get_sequences(arathaa, 500)\n\txdformat( arath_aa_500, 'p' )\n\t\n\tprint \"---\"\n\tprint \"TBLASTX\"\n\tbenchmark_wublast( 'nt', rice_nt_100, 'nt', arath_nt_100 )\n\tbenchmark_wublast( 'nt', rice_nt_300, 'nt', arath_nt_300 )\n\tbenchmark_wublast( 'nt', rice_nt_500, 'nt', arath_nt_500 )\n\tprint \"---\"\n\tprint \"BLASTX\"\n\tbenchmark_wublast( 'nt', rice_nt_100, 'aa', arath_aa_100 )\n\tbenchmark_wublast( 'nt', rice_nt_300, 'aa', arath_aa_300 )\n\tbenchmark_wublast( 'nt', rice_nt_500, 'aa', arath_aa_500 )\n\tprint \"---\"\n\tprint \"TBLASTN\"\n\tbenchmark_wublast( 'aa', rice_aa_100, 'nt', arath_nt_100 )\n\tbenchmark_wublast( 'aa', rice_aa_300, 'nt', arath_nt_300 )\n\tbenchmark_wublast( 'aa', rice_aa_500, 'nt', arath_nt_500 )\n\tprint \"---\"\n\tprint \"BLASTP\"\n\tbenchmark_wublast( 'aa', rice_aa_100, 'aa', arath_aa_100 )\n\tbenchmark_wublast( 'aa', rice_aa_300, 'aa', arath_aa_300 )\n\tbenchmark_wublast( 'aa', rice_aa_500, 'aa', arath_aa_500 )\n\tprint \"---\"\n\t\n# =============================================================================\ndef bench_sizes( seqhash ):\n\tprint \"benchmark sizes\"\n\t\n\tricent = 'data/rice.nt'\n\triceaa = 'data/rice.aa'\n\tarathnt = 'data/arath.nt'\n\tarathaa = 'data/arath.aa'\n\t\n\tarath_aa_200 = get_sequences(arathaa, 200)\n\txdformat( arath_aa_200, 'p' )\n\t\n\trice_aa_10 = get_sequences(riceaa, 10)\n\txdformat( rice_aa_10, 'p' )\n\t\n\trice_aa_50 = get_sequences(riceaa, 50)\n\txdformat( rice_aa_50, 'p' )\n\t\n\trice_aa_200 = get_sequences(riceaa, 200)\n\txdformat( rice_aa_200, 'p' )\n\t\n\trice_aa_300 = get_sequences(riceaa, 300)\n\txdformat( rice_aa_300, 'p' )\n\t\n\trice_aa_500 = get_sequences(riceaa, 500)\n\txdformat( rice_aa_500, 'p' )\n\t\n\tprint \"---\"\n\t\n\tbenchmark_wublast( 'aa', rice_aa_10, 'aa', arath_aa_200 )\n\tbenchmark_wublast( 'aa', rice_aa_50, 'aa', arath_aa_200 )\n\tbenchmark_wublast( 'aa', rice_aa_200, 'aa', arath_aa_200 )\n\tbenchmark_wublast( 'aa', rice_aa_300, 'aa', arath_aa_200 )\n\tbenchmark_wublast( 'aa', rice_aa_500, 'aa', arath_aa_200 )\n\t\n\tprint \"---\"\n\t\n\tbenchmark_wublast( 'aa', arath_aa_200, 'aa', rice_aa_10 )\n\tbenchmark_wublast( 'aa', arath_aa_200, 'aa', rice_aa_50 )\n\tbenchmark_wublast( 'aa', arath_aa_200, 'aa', rice_aa_200 )\n\tbenchmark_wublast( 'aa', arath_aa_200, 'aa', rice_aa_300 )\n\tbenchmark_wublast( 'aa', arath_aa_200, 'aa', rice_aa_500 )\n\t\n\tprint \"---\"\n\t\n\n# =============================================================================\ndef bench_single_vs_multiple_files( seqhash ):\n\t\n\tdef single_files( file ):\n\t\tcount = 0\n\t\tfilenames = {}\n\t\thandle = open(file)\n\t\tfor seq_record in SeqIO.parse(handle, \"fasta\") :\n\t\t\tfilenames[ file+str(count) ] = 1 \n\t\t\twrite_to_file( file+str(count), seq_record.id + '\\n' + seq_record.seq.tostring() + '\\n' )\n\t\t\tcount += 1\n\t\thandle.close()\n\t\treturn filenames\n\t\t\t\n\tprint \"benchmark query files\"\n\t\n\tricent = 'data/rice.nt'\n\triceaa = 'data/rice.aa'\n\tarathnt = 'data/arath.nt'\n\tarathaa = 'data/arath.aa'\n\t\n\trice_aa_10 = get_sequences(riceaa, 50)\n\trice_aa_50 = get_sequences(riceaa, 200)\n\trice_aa_100 = get_sequences(riceaa, 500)\n\tarath_aa_1000 = get_sequences(arathaa, 1000)\n\txdformat( arath_aa_1000, 'p' )\n\t\n\tprint \"---\"\n\t\n\t# split the files\n\tp = 'blastp'\n\n\t\n\tstarttime = time.time()\n\tfilenames = single_files( rice_aa_10 )\n\tfor file in filenames.keys():\n\t\tos.system( p + \" \" + arath_aa_1000 + \" \" + file + \" &> wublast.out\")\n\t\tsys.stdout.write('.')\n\truntime = time.time() - starttime\n\tsys.stdout.write('\\n')\n\tprint \"benchmark wublast\", str(len(filenames.keys())), \"--\", runtime\n\tfor file in filenames.keys(): os.unlink(file)\n\t\n\tstarttime = time.time()\n\tfilenames = single_files( rice_aa_50 )\n\tfor file in filenames.keys():\n\t\tos.system( p + \" \" + arath_aa_1000 + \" \" + file + \" &> wublast.out\")\n\t\tsys.stdout.write('.')\n\truntime = time.time() - starttime\n\tsys.stdout.write('\\n')\n\tprint \"benchmark wublast\", str(len(filenames.keys())), \"--\", runtime\n\tfor file in filenames.keys(): os.unlink(file)\n\t\n\tstarttime = time.time()\n\tfilenames = single_files( rice_aa_100 )\n\tfor file in filenames.keys():\n\t\tos.system( p + \" \" + arath_aa_1000 + \" \" + file + \" &> wublast.out\")\n\t\tsys.stdout.write('.')\n\truntime = time.time() - starttime\n\tsys.stdout.write('\\n')\n\tprint \"benchmark wublast\", str(len(filenames.keys())), \"--\", runtime\n\tfor file in filenames.keys(): os.unlink(file)\n\t\n\tprint \"---\"\n\tbenchmark_wublast( 'aa', rice_aa_10, 'aa', arath_aa_1000 )\n\tbenchmark_wublast( 'aa', rice_aa_50, 'aa', arath_aa_1000 )\n\tbenchmark_wublast( 'aa', rice_aa_100, 'aa', arath_aa_1000 )\n\n# =============================================================================\ndef remove_tmpfiles( seqhash ):\n\tfor key, value in seqhash.iteritems():\n\t\tos.system( \"rm \" + value + \"*\" )\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\ndef main():\n\t\"\"\"\n\t\"\"\"\n\tfile = handle_arguments() \n\tseqhash = generate_datasets( file )\n\t#bench_nt_vs_aa( seqhash )\n\tbench_sizes( seqhash )\n\t#bench_single_vs_multiple_files( seqhash )\n\tremove_tmpfiles( seqhash ) \n\n# =============================================================================\nmain()" }, { "alpha_fraction": 0.578125, "alphanum_fraction": 0.5796874761581421, "avg_line_length": 34.55555725097656, "blob_id": "c90dd8a14c76056d255ad5d8b357af56f864c1c1", "content_id": "4e0a91b3f624f1f5f6d9b0649d1c4bff84d5f69e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "permissive", "max_line_length": 85, "num_lines": 18, "path": "/python/base/muscle.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import tempfile, os, fasta\n\n# =============================================================================\ndef align(sequences, ids, outfile=False):\n h, infile = tempfile.mkstemp()\n os.close(h)\n fw = open(infile, 'w')\n for i in range(len(sequences)): fw.write(\">\" + ids[i] + \"\\n\" + sequences[i] + \"\\n\")\n fw.close()\n h, outfile = tempfile.mkstemp()\n os.close(h)\n os.system(\"muscle -in %s -out %s -quiet 2> /dev/null\" %(infile, outfile))\n os.unlink(infile)\n aligned_sequences = []\n alnhash = fasta.get_sequence_hash(outfile)\n for gid in ids: aligned_sequences.append(alnhash[gid])\n os.unlink(outfile)\n return aligned_sequences\n" }, { "alpha_fraction": 0.5830497145652771, "alphanum_fraction": 0.5973451137542725, "avg_line_length": 30.255319595336914, "blob_id": "4498497bd05134b00ad47e85a06654f2c118298a", "content_id": "81b8818865340f4c759a86e9c5df7f8b53d1f272", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2938, "license_type": "permissive", "max_line_length": 118, "num_lines": 94, "path": "/python/orthomcl/add-blasthits-to-cluster.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, string, anydbm\nfrom low import *\nfrom orthomcl import OrthoMCLCluster\n\n\n# =============================================================================\ndef usage():\n print >> sys.stderr, \"add significant BLAST hits (e.g. in-paralogs) to an existing orthomcl cluster.\\n\"\n print >> sys.stderr, \"usage: (1) \" + sys.argv[0] + \" noparalogs.orthomcl.out blastout.add.dbm\" \n print >> sys.stderr, \" or (2) \" + sys.argv[0] + \" noparalogs.orthomcl.out all.fasta all.gg all.blastout\" \n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 3 and len(sys.argv) != 5: usage()\n return sys.argv[1:]\n\n\ndef read_gg(inGG):\n outHash, speciesArray = {}, []\n fo = open(inGG)\n for line in fo: \n line = line.rstrip()\n cols = line.split()\n species = str(cols[0])[:-1]\n if not species in speciesArray: speciesArray.append(species)\n for col in cols[1:]:\n outHash[col] = species\n fo.close()\n return outHash, speciesArray\n\n\ndef get_seq_lengths(file):\n lengthHash, id = {}, \"\"\n fo = open(file)\n for line in fo: \n line = line.strip()\n if line.startswith(\">\"):\n id = line[1:]\n if id.count(\" \") > 0: id = id[:id.index(\" \")]\n lengthHash[id] = 0 \n else: lengthHash[id] += len(line)\n return lengthHash\n\n\ndef main():\n args = plausi()\n in_orthomcl = args[0]\n EVALUE = float('1e-20')\n IDENTITY = 30.0\n if len(args) == 4:\n in_fasta, in_gg, in_blast = args[1:4]\n gene2species, speciesArray = read_gg(in_gg)\n gene2length = get_seq_lengths(in_fasta)\n dbmfile = in_blast + \".add.dbm\"\n dbm = anydbm.open(dbmfile, \"c\")\n fo = open(in_blast)\n for line in fo: \n line = line.rstrip()\n cols = line.split(\"\\t\")\n qid, hid, evalue, identity = cols[0], cols[1], float(cols[10]), float(cols[2])\n # ignore self-hits and between-species hits, check e-value threshold\n if qid == hid: continue\n if gene2species[qid] != gene2species[hid]: continue\n if evalue > EVALUE: continue\n if identity < IDENTITY: continue\n # check that blast alignment spans at least 75% of the longer sequence\n alnlength, qlength, hlength = int(cols[3]), gene2length[qid], gene2length[hid]\n lengthcutoff = 0.80 * max([qlength, hlength])\n if alnlength < lengthcutoff: continue\n if not dbm.has_key(qid): dbm[qid] = \"\"\n else: dbm[qid] += \" \"\n dbm[qid] += hid\n fo.close()\n dbm.close()\n else: dbmfile = args[1]\n dbm = anydbm.open(dbmfile)\n\n fo = open(in_orthomcl)\n for line in fo:\n o = OrthoMCLCluster(line.rstrip())\n oldsize = o.get_count()\n additions = []\n for geneid, species in o.get_gene_hash().iteritems():\n if not dbm.has_key(geneid): continue\n [additions.append([x, species]) for x in dbm[geneid].split()]\n\n for x, species in additions: o.add_gene(x,species)\n o.to_s()\n newsize = o.get_count()\n print >> sys.stderr, \"%s\\t%s\\t%s\" %(o.get_name(), oldsize, newsize)\n\nmain()\n" }, { "alpha_fraction": 0.4897400736808777, "alphanum_fraction": 0.4935704469680786, "avg_line_length": 31.061403274536133, "blob_id": "2c4170005e498ba04afe1c9039f4ff1870fb15b1", "content_id": "764590a6c4825a86ca942686b165e4085cb35ea0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3655, "license_type": "permissive", "max_line_length": 83, "num_lines": 114, "path": "/python/fasta/reduce_fasta_file.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -i <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fast file which should be reduced\" )\n stdout( \" -i file with the IDs to keep\" )\n stdout( \" -v verbose: report statistics to STDERR, otherwise silent\" )\n stdout( \" \" )\n \n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hi:f:v\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n \n args = {}\n args['verbose'] = 0\n for key, value in keys:\n if key == '-f': args['in-fasta'] = value\n if key == '-i': args['in-ids'] = value\n if key == '-v': args['verbose'] = 1\n\n if not args.has_key('in-fasta'):\n stderr( \"in-fasta file missing.\" )\n show_help()\n if not args.has_key('in-ids'):\n stderr( \"in-ids file missing.\" )\n show_help()\n \n if not file_exists( args.get('in-fasta') ):\n stderr( \"in-fasta file does not exist.\" )\n show_help()\n if not file_exists( args.get('in-ids') ):\n stderr( \"in-ids file does not exist.\" )\n show_help()\n \n return args\n\n# =============================================================================\ndef get_ids_to_keep( args ):\n \"\"\"\n reads in the in-ids file and gathers all IDs to which\n the out fasta file will be reduced to.\n \"\"\"\n fo = open( args.get('in-ids'), 'r' )\n keepids = {}\n for line in fo:\n line = line.rstrip()\n keepids[ line.replace('>','') ] = 1\n fo.close()\n return keepids\n \n \n# =============================================================================\ndef reduce_fasta( args, keepids ):\n \"\"\"\n reads in in-fasta and creates out-fasta that only contains the records\n whose id is contained in the hash keepids.\n \"\"\"\n if args.get('verbose'):\n sys.stderr.write('\\tnumber of records to retain: %s ' % len(keepids) )\n retained = 0\n id, seq = \"\", \"\"\n fo = open( args.get('in-fasta') )\n for line in fo:\n line = line.rstrip()\n if len(line) == 0: continue\n if line[0] == \">\":\n if id != \"\" and seq != \"\" and keepids.has_key(id):\n print \">\" + id + \"\\n\" + seq\n retained += 1\n id, seq = \"\", \"\"\n checkid = line[1:].split()[0]\n if keepids.has_key(checkid): id = checkid\n\n else:\n if id != \"\":\n if seq != \"\": seq += \"\\n\"\n seq += line\n\n fo.close()\n if id != \"\" and seq != \"\" and keepids.has_key(id):\n print \">\" + id + \"\\n\" + seq\n retained += 1\n if args.get('vebose'):\n sys.stderr.write('| retained: %s | done.\\n' % retained )\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nkeepids = get_ids_to_keep( args )\nreduce_fasta( args, keepids )\n" }, { "alpha_fraction": 0.47622641921043396, "alphanum_fraction": 0.48867926001548767, "avg_line_length": 31.30487823486328, "blob_id": "c1db937cf11416b46bb99b7110d0ade24f3da4c0", "content_id": "86f25d702c97860336ab1a193cf7d2b932f4cadc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2650, "license_type": "permissive", "max_line_length": 83, "num_lines": 82, "path": "/python/generic/text2range.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\nREGEX = re.compile(\"(\\d+)$\")\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f text flat file to analyze\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\ndef is1higherthan( text1, text2, regex=REGEX ):\n\n def splittext( text, regex ):\n return regex.split( text )[0], int(regex.split( text )[1])\n\n id1, number1 = splittext( text1, regex )\n id2, number2 = splittext( text2, regex )\n if id1 != id2: return 0 \n if (number1 +1) == number2: return 1\n return 0\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n fo = open( args.get('file') )\n lines = fo.readlines()\n fo.close()\n\n started_at = \"\"\n\n for i in range(1,len(lines)):\n line0, line1 = lines[i-1], lines[i]\n if started_at == \"\": started_at = line0\n if i < (len(lines)-1) and is1higherthan( line0, line1 ): continue\n print string.join([started_at.rstrip(), line0.rstrip()], \"\\t\")\n started_at = \"\"\n \n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.45883092284202576, "alphanum_fraction": 0.4638592004776001, "avg_line_length": 30.81999969482422, "blob_id": "e3ea8820469206f592ad6c4c49c625a38505ed82", "content_id": "554e0d44d7df86d5ab7aa3c3f3b2ba68e75fec01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3182, "license_type": "permissive", "max_line_length": 83, "num_lines": 100, "path": "/python/paml/get-paml-results.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f nt alignment file\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:t:p:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['aln'] = value\n \n if not args.has_key('aln'):\n stderr( \"aln file missing.\" )\n show_help()\n if not file_exists( args.get('aln') ):\n stderr( \"aln file does not exist.\" )\n show_help()\n \n return args\n\n# =============================================================================\ndef get_aln_length_from_file( filename ):\n fo = open( filename )\n firstline = fo.readline()\n n, length = firstline.split()\n fo.close()\n return length\n\n# =============================================================================\ndef get_lnL_from_file( filename, model ):\n file = filename + '.paml.out.' + model\n np, lnL = None, None\n if not file_exists( file ): \n stderr( \"File does not exist: %s\" %file )\n return np, lnL\n\n fo = open( file )\n for line in fo:\n if line.startswith(\"lnL\"):\n #print filename, model, line\n np = re.match(\"lnL\\(.*\\s+np:\\s*(\\d+)\", line ).group(1)\n lnL = re.match(\"lnL\\(.*\\):\\s+([0-9.-]+)\", line ).group(1)\n break\n fo.close()\n return np, lnL\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n \n models = [\"M0\", \"M3K2\", \"M3K3\", \"M7\", \"M8\", \"Free\"]\n filename = args.get('aln')\n \n line = []\n line.append( filename )\n length = get_aln_length_from_file( filename )\n line.append( length )\n for M in models:\n np, lnL = get_lnL_from_file( filename, M )\n if np == None or lnL == None:\n stderr( \"%s: None returned for model %s (%s/%s)\" %( filename, M, np, lnL ) )\n sys.exit(1)\n line.append( M )\n line.append( np )\n line.append( lnL )\n print string.join(line,\"\\t\")\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.5767690539360046, "alphanum_fraction": 0.5861148238182068, "avg_line_length": 18.205127716064453, "blob_id": "30286f27ad9ad500d23dcbc3a9f0a6411bb7247e", "content_id": "87e9a1ebac2406c635a679a3990dae94a6a16b37", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 749, "license_type": "permissive", "max_line_length": 81, "num_lines": 39, "path": "/python/fasta/fasta-length-per-file.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys\n\ndef usage():\n print >> sys.stderr, \"reports for each fasta sequence the length in tab format\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" fastafile\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 2: usage()\n inFile = sys.argv[1]\n return inFile\n\n\ndef parse_fasta_file(file):\n lengthHash = {}\n fo = open(file)\n id = \"\"\n length = 0\n for line in fo:\n line = line.strip()\n if line.startswith(\">\"):\n continue\n else:\n length += len(line)\n fo.close()\n base = file\n if base.count(\".\") > 0: base = base[:base.index(\".\")]\n if base.count(\"_\") > 0: base = base[:base.index(\"_\")]\n print base + \"\\t\" + str(length)\n\n\ndef main():\n inFile = plausi()\n parse_fasta_file( inFile )\n\n\nmain()\n" }, { "alpha_fraction": 0.5583371520042419, "alphanum_fraction": 0.5686724781990051, "avg_line_length": 34.98347091674805, "blob_id": "433d86db4df75f6583f954e5fbae29c15bbf2198", "content_id": "b69365f7cd0e2b904a0746cdf033d02d615c4977", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4354, "license_type": "permissive", "max_line_length": 117, "num_lines": 121, "path": "/ruby/geneontology/termtable-from-go-enrichment2.rb", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby -w\n# == Synopsis\n# creates input for wordle.net/advanced to create a term cloud based on the six\n# output files produced by the go-enrichment.py script\n#\n\nrequire 'optparse'\nrequire 'rubygems'\nrequire 'faster_csv'\n#require 'rsruby'\n\n\n# =============================================================================\ndef get_opt\n options = Hash.new\n optparse = OptionParser.new do |opts|\n opts.banner = \"Usage: #{$0} -d <dir>\"\n options[:dir] = nil\n options[:go2name] = nil\n opts.on( '-d DIR', 'directory that contains *.ORA files produced with go-enrichment2.py'\n ){|dir| options[:dir] = dir}\n opts.on( '-g FILE', 'gene ontology id to name mapping file, tab delimited, to look up shortened term names'\n ){|file| options[:go2name] = file}\n opts.on( '-m MIN', 'minimum number of species in which a term has to be found significant'\n ){|s| options[:min] = s.to_i}\n opts.on( '-f', 'use FDR < 0.05 instead of p < 0.05 filter'\n ){options[:filterfdr] = true}\n opts.on( '-o', 'evaluate over-representation' \n ){options[:over] = true} \n# opts.on( '-u', 'evaluate under-representation' \n# ){options[:under] = true} \n end\n begin\n optparse.parse!\n mandatory = [:dir, :go2name, :min]\n missing = mandatory.select{|param| options[param].nil?}\n if not missing.empty?\n puts \"Missing options: #{missing.join(', ')}\"\n puts optparse \n exit\n end\n rescue OptionParser::InvalidOption, OptionParser::MissingArgument\n puts $!.to_s\n puts optparse\n exit\n end\n return options\nend\n\n\n# =============================================================================\ndef statusbar(progress, message=\"\", width=40)\n progressbar = \"=\" * (progress*width).to_i\n progressbar << \" \" while progressbar.length < width\n STDERR.print \"\\r 0% #{progressbar} 100% \"\n STDERR.print \"[#{message}]\" unless message.empty?\n STDERR.print \"\\n\" if progress == 1.0 \nend\n\n# =============================================================================\ndef get_go2name(file)\n go2name = Hash.new\n f = File.open(file, \"r\")\n while (line = f.gets)\n line.chomp!\n id, name = line.split(\"\\t\")[0,2]\n go2name[id] = name\n end\n return go2name\nend\n\n# =============================================================================\ndef parse_ORA_file(file, filterfdr, doOver, doUnder)\n terms = Hash.new\n IO.foreach(file) do |line|\n direction, ontology, goid, p, fdr = line.split(\"\\t\")\n next if direction == 'O' and not doOver\n next if direction == 'U' and not doUnder\n p, fdr = p.to_f, fdr.to_f\n next if p > 0.05\n next if filterfdr and fdr > 0.05\n terms[goid] = filterfdr ? fdr : p\n end\n return terms\nend\n\n# =============================================================================\n# === M A I N =================================================================\n# =============================================================================\n\noptions = get_opt()\nabort(\"directory does not exist - aborting.\") unless File.exists?(options[:dir]) and File.directory?(options[:dir])\nabort(\"go2name mapping file does not exist - aborting.\") if options[:go2name] and not File.exists?(options[:go2name])\ngo2name = get_go2name(options[:go2name])\nenrichedHash = Hash.new\nDir.glob(options[:dir] + '/*.ORA').each do |file|\n key = File.basename(file).split('.').first\n enrichedHash[key] = parse_ORA_file(file, options[:filterfdr], options[:over], options[:under])\nend\n\nspecies = enrichedHash.keys\nSTDOUT.puts(([\"GO.ID\", \"GO.NAME\"] + species.collect{|s| s.upcase}).join(\"\\t\"))\nallterms = enrichedHash.values.collect{|v| v.keys}.inject{|union, array| union + array}.uniq\nallterms.each do |goid|\n occurrence = enrichedHash.select{|s, gohash| gohash.key? goid}.count\n next if occurrence < options[:min]\n STDOUT.print [goid, go2name[goid]].join(\"\\t\")\n out = species.collect{|s| (enrichedHash[s].key?(goid) ? Math.log(-1*Math.log(enrichedHash[s][goid])).to_s : \"0\")}\n STDOUT.puts \"\\t\" + out.join(\"\\t\")\nend\nSTDOUT.close\n\n#r = RSRuby.instance\n#r.assign('d', r.read.csv(options[:dir] + '/termtable.tab', :header => true, :sep => \"\\t\"))\n#r.assign('m', as.matrix(r.d[,3:species.count+2]))\n#r.rownames(r.d) = r.d[,2]\n#r.library('gplots')\n#r.library('RColorBrewer')\n#r.pdf(options[:dir] + '/termtable.pdf')\n#r.heatmap.2(r.m, :col => brewer.pal(3, \"Blues\"))\n#r.eval_R(\"dev.off()\")\n" }, { "alpha_fraction": 0.47435489296913147, "alphanum_fraction": 0.4769034683704376, "avg_line_length": 31.360824584960938, "blob_id": "47a1b5954d20b14295cf0a3e3bdbb569e6869904", "content_id": "2948144c6053baf257552eecae8016651d9388f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3139, "license_type": "permissive", "max_line_length": 82, "num_lines": 97, "path": "/python/kegg/kegg-parser.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\t\t\t\t\t# index databases (file hash)\nfrom Bio import SeqIO # biopython stuff, to parse fasta files for instance\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f kegg html file\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f':\targs['file'] = value\n\t\t\t\t\n\tif not args.has_key('file'):\n\t\tstderr( \"kegg file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('file') ):\n\t\tstderr( \"kegg file does not exist.\" )\n\t\tshow_help()\n\t\t\n\treturn args\n\n\n# =============================================================================\ndef strip_tags(value):\n \"Return the given HTML with all tags (+ KEGG tags) stripped.\"\n value = re.sub(r'<[^>]*?>', '', value)\n value = re.sub(r'\\[.*\\]', '', value)\n return value\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n fo = open( args.get('file'), 'r' )\n statics = {}\n statics['entry'] = '^#ENTRY\\s+(\\S+)'\n statics['name'] = '^#NAME\\s+(\\S+)'\n statics['definition'] = '^#DEFINITION\\s+(.*)$'\n oldlevel = \"\"\n hier = []\n for line in fo:\n for name, regex in statics.iteritems():\n if re.search( regex, line ):\n print \"#%s\\t%s\" %(name, re.search( regex, line).group(1))\n\n if re.match( '[A-Z]\\s+', line ):\n currentlevel = line[0]\n #print currentlevel\n rest = re.match( '[A-Z]\\s+(.*)$', line ).group(1).strip()\n if not re.search( '\\S+', rest ): continue\n rest = re.match( '(\\S+)', rest ).group(1)\n if currentlevel > oldlevel:\n hier.append( strip_tags(rest) )\n elif currentlevel == oldlevel: \n print string.join( hier, '/' )\n hier.pop()\n hier.append( strip_tags(rest) )\n else:\n hier.pop()\n \n oldlevel = currentlevel\n\n fo.close()\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.5742297172546387, "alphanum_fraction": 0.5798319578170776, "avg_line_length": 22.799999237060547, "blob_id": "d07e0688ae61abdc88241d790285f9cd7b19bd08", "content_id": "f8ef5bea6d2f19b4c921d60d47c01d927bca0705", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "permissive", "max_line_length": 101, "num_lines": 30, "path": "/python/orthomcl/geneid2cluster.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, string\nfrom low import *\nfrom orthomcl import OrthoMCLCluster\n\n\n# =============================================================================\ndef usage():\n print >> sys.stderr, \"prints a mapping between each gene id and its cluster from orthomcl output\\n\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" orthomcl.out\" \n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 2: usage()\n inFile = sys.argv[1]\n return inFile\n\n\ndef main():\n inFile = plausi()\n fo = open(inFile)\n for line in fo:\n o = OrthoMCLCluster(line.rstrip())\n name = o.get_name()\n geneHash = o.get_gene_hash()\n for geneid, species in geneHash.iteritems(): print geneid + \"\\t\" + name\n\n\nmain()\n" }, { "alpha_fraction": 0.4970139265060425, "alphanum_fraction": 0.5029860734939575, "avg_line_length": 31.058509826660156, "blob_id": "38af3ad41fcf2131afb5d486f676e1f676b959b8", "content_id": "b47461aed8aec250e74d3935fc184b31dad55ea5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6028, "license_type": "permissive", "max_line_length": 122, "num_lines": 188, "path": "/python/fasta/import-fasta-sequence.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -t -a -n -m [-i -p]\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file to import\" )\n stdout( \" -t type: fasta, or xdom\" )\n stdout( \" -a action: \\\"insert\\\" or \\\"update\\\"\" )\n stdout( \" -n mysql table name\" )\n stdout( \" -m field names (comma separated), mapping to the fields to be parsed in the same order, leave out ID\" )\n stdout( \" -p prefix to put in fron of the key\" )\n stdout( \" -i record name to id file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:n:t:m:i:p:a:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-a': args['action'] = value\n if key == '-t': args['type'] = value\n if key == '-n': args['table'] = value\n if key == '-m': args['fields'] = value\n if key == '-i': args['idfile'] = value\n if key == '-p': args['prefix'] = value\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n if args.has_key('idfile') and not file_exists( args.get('idfile') ):\n stderr( \"idfile does not exist.\" )\n show_help()\n \n\n if not args.has_key('type'):\n stderr( \"file format argument missing.\" )\n show_help()\n \n if not args.has_key('action'):\n stderr( \"action argument missing.\" )\n show_help()\n\n if not args.has_key('table'):\n stderr( \"table name missing.\" )\n show_help()\n \n if not args.has_key('fields'):\n stderr( \"field names missing.\" )\n show_help()\n\n return args\n\n\n# =============================================================================\ndef get_ids( args ):\n \"\"\" \n reads in the idfile and returns a hash, mapping record names to IDs\n \"\"\"\n idhash = {}\n fo = open( args.get('idfile') )\n for line in fo:\n recordname, recordid = line.split()\n idhash[ recordname ] = recordid\n return idhash\n \n\n# =============================================================================\ndef sql_out(action, table, fieldvaluelist):\n \"\"\" \n writes an sql insert statement to stdout\n \"\"\"\n count = 0\n\n if action == \"insert\":\n sys.stdout.write( \"INSERT INTO `\" + table + \"` SET \" )\n for kv in fieldvaluelist:\n if count > 0: sys.stdout.write(\", \")\n sys.stdout.write( kv[0] + \"='\" + kv[1] + \"'\" )\n count += 1\n\n elif action == \"update\":\n sys.stdout.write( \"UPDATE `\" + table + \"` SET \" )\n for kv in fieldvaluelist[1:]:\n if count > 0: sys.stdout.write(\", \")\n sys.stdout.write( kv[0] + \"='\" + kv[1] + \"'\" )\n count += 1\n sys.stdout.write( \" WHERE \" + fieldvaluelist[0][0] + \"='\" + fieldvaluelist[0][1] + \"'\" )\n\n sys.stdout.write(\";\\n\")\n\n# =============================================================================\ndef parse_fasta( args ):\n\n action = args.get('action')\n tablename = args.get('table')\n fields = args.get('fields').split(',')\n if not len(fields) == 2:\n stderr(\"expected 2 fields to parse fasta.\")\n sys.exit(3)\n \n fo = open( args.get('file'), 'r' )\n key, value = \"\", \"\"\n for line in fo:\n line = line.rstrip().rstrip(\"\\n\")\n if line.startswith( \">\" ):\n if key != \"\":\n sql_out(action, tablename, [ [fields[0], key], [fields[1], value] ])\n key, value = \"\", \"\"\n if args.has_key('prefix'): key = args.get('prefix') + line[1:]\n else: key = line[1:]\n else:\n value += line.strip()\n\n fo.close()\n\n# =============================================================================\ndef parse_xdom( args ):\n \"\"\"\n \"\"\"\n action = args.get('action')\n tablename = args.get('table')\n fields = args.get('fields').split(',')\n if not len(fields) >= 2:\n stderr(\"expected 2 or more fields to parse fasta.\")\n sys.exit(3)\n \n fo = open( args.get('file'), 'r' )\n fieldvaluelist = []\n for line in fo:\n line = line.rstrip().rstrip(\"\\n\")\n\n if line.startswith( \">\" ):\n if args.has_key('prefix'): fieldvaluelist.append( [ fields[0], args.get('prefix') + line[1:] ] )\n else: fieldvaluelist.append( [ fields[0], line[1:] ] )\n\n else:\n values = line.split(\"\\t\")\n for i in range( len(fields[1:]) ):\n fieldvaluelist.append( [ fields[i+1], values[i] ] )\n sql_out(action, tablename, fieldvaluelist)\n fieldvaluelist = fieldvaluelist[:1]\n\n fo.close()\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n if args.has_key('idfile'): idhash = get_ids( args )\n fileformat = args.get('type')\n if fileformat.lower() == 'fasta':\n parse_fasta(args)\n elif fileformat.lower() == 'xdom':\n parse_xdom(args)\n else:\n stderr( \"invalid file format. only fasta or xdom allowed.\" )\n sys.exit(2)\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.46761512756347656, "alphanum_fraction": 0.4732309877872467, "avg_line_length": 30.05813980102539, "blob_id": "894ea283be62a5d49161cbc1a82868e65068d1e9", "content_id": "3b1eed34cbee98f535685271ce6a80f432dc7229", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2671, "license_type": "permissive", "max_line_length": 83, "num_lines": 86, "path": "/python/fasta/fasta-to-swapsc-input.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f nt alignment file (fasta)\" )\n stdout( \" -m paml M0 out file\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:m:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['aln'] = value\n if key == '-m':\targs['m0'] = value\n \n if not args.has_key('aln'):\n stderr( \"aln file missing.\" )\n show_help()\n if not file_exists( args.get('aln') ):\n stderr( \"aln file does not exist.\" )\n show_help()\n \n if not args.has_key('m0'):\n stderr( \"M0 file missing.\" )\n show_help()\n if not file_exists( args.get('m0') ):\n stderr( \"M0 file does not exist.\" )\n show_help()\n\n return args\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n\n #sys.stderr.write(args.get('aln') + \"\\t\")\n #sys.stderr.flush()\n # create evolver control file based on the M0 out file\n fo = open( args.get('m0') )\n line = \"\"\n while not re.match(\"\\s+\\d+\\s+\\d+\\s*$\", line):\n line = fo.readline()\n numbers = line.split()\n nspecies, length = numbers[0:2] \n fo.close()\n\n fo = open( args.get('aln') )\n print \" \" + nspecies + \" \" + length + \"\\n\"\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"): print line[1:]\n else: print line\n fo.close()\n\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.6375510096549988, "alphanum_fraction": 0.6416326761245728, "avg_line_length": 37.25, "blob_id": "d26cd0bdaa8ad90b64356723401cc4dcc71960d7", "content_id": "a47c0cfba61e97f32991e8da8d2585f80d84d275", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1225, "license_type": "permissive", "max_line_length": 113, "num_lines": 32, "path": "/python/base/orthomcl.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import sys\n\nclass OrthoMCLCluster():\n def __init__(self, line):\n descr, genedefs = line.split(\"\\t\")\n genedefs = genedefs.split()\n self.name = descr[:descr.index('(')].lower()\n self.geneHash = {}\n self.speciesHash = {}\n for genedef in genedefs:\n geneid = genedef[:genedef.index('(')]\n species = genedef[genedef.index('(')+1:-1]\n self.geneHash[geneid] = species\n if self.speciesHash.has_key(species): self.speciesHash[species].append(geneid)\n else: self.speciesHash[species] = [geneid]\n\n def add_gene(self, geneid, species):\n if not self.geneHash.has_key(geneid):\n self.speciesHash[species].append(geneid)\n self.geneHash[geneid] = species\n def get_name(self): return self.name\n def get_count(self): return len(self.geneHash)\n def get_gene_hash(self): return self.geneHash\n def get_species_hash(self): return self.speciesHash\n def to_s(self):\n sys.stdout.write(self.name + \"(\" + str(len(self.geneHash)) + \" genes, \" + str(len(self.speciesHash)) + \")\\t\")\n first = 1\n for geneid, species in self.geneHash.iteritems():\n if first == 0: sys.stdout.write(\" \")\n first = 0\n sys.stdout.write(geneid + \"(\" + species + \")\")\n sys.stdout.write(\"\\n\")\n\n" }, { "alpha_fraction": 0.5114926099777222, "alphanum_fraction": 0.5336048603057861, "avg_line_length": 32.36893081665039, "blob_id": "51af2af151fa9ea08c6070988fd67a55c38f1195", "content_id": "5ab32b5133f3f5a84cad1905e804ad56bc628bdc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 3437, "license_type": "permissive", "max_line_length": 117, "num_lines": 103, "path": "/ruby/geneontology/termcloud-from-go-enrichment2-comp.rb", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby -w\n# == Synopsis\n# creates input for wordle.net/advanced to create a term cloud based on the six\n# output files produced by the go-enrichment.py script\n#\n\nrequire 'optparse'\nrequire 'rubygems'\nrequire 'faster_csv'\n\n\n# =============================================================================\ndef get_opt\n options = Hash.new\n optparse = OptionParser.new do |opts|\n opts.banner = \"Usage: #{$0} -d <dir>\"\n options[:dir] = nil\n options[:go2name] = nil\n opts.on( '-d DIR', 'directory that contains *.ORA files produced with go-enrichment2.py'\n ){|dir| options[:dir] = dir}\n opts.on( '-g FILE', 'gene ontology id to name mapping file, tab delimited, to look up shortened term names'\n ){|file| options[:go2name] = file}\n end\n begin\n optparse.parse!\n mandatory = [:dir, :go2name]\n missing = mandatory.select{|param| options[param].nil?}\n if not missing.empty?\n puts \"Missing options: #{missing.join(', ')}\"\n puts optparse \n exit\n end\n rescue OptionParser::InvalidOption, OptionParser::MissingArgument\n puts $!.to_s\n puts optparse\n exit\n end\n return options\nend\n\n\n# modify to adjust colors\n# =============================================================================\ndef map_color(species)\n return '004d84' if species == \"Acep\"\n return '5abbff' if species == \"Aech\"\n return 'b4b4b4' if species == \"Amel\"\n return '000000' if species == \"Hsal\"\n return '93002d' if species == \"Cflo\"\n return 'ff6d9a' if species == \"Lhum\"\n return '145e00' if species == \"Pbar\"\n return '71f84c' if species == \"Sinv\"\n return '803300' if species == \"Nvit\"\n return 'ff9955' if species == \"Dmel\"\nend\n\n# =============================================================================\ndef statusbar(progress, message=\"\", width=40)\n progressbar = \"=\" * (progress*width).to_i\n progressbar << \" \" while progressbar.length < width\n STDERR.print \"\\r 0% #{progressbar} 100% \"\n STDERR.print \"[#{message}]\" unless message.empty?\n STDERR.print \"\\n\" if progress == 1.0 \nend\n\n# =============================================================================\ndef get_go2name(file)\n go2name = Hash.new\n f = File.open(file, \"r\")\n while (line = f.gets)\n line.chomp!\n id, name = line.split(\"\\t\")[0,2]\n go2name[id] = name\n end\n return go2name\nend\n\n# =============================================================================\ndef parse_ORA_file(file, go2name)\n species = file.split(\".\")[-2]\n IO.foreach(file) do |line|\n direction, ontology, goid, p, fdr = line.split(\"\\t\")\n p, fdr = p.to_f, fdr.to_f\n next if fdr > 0.05\n size = -1.0* Math.log(p)\n term = go2name[goid]\n STDERR.puts \"no name found for #{goid}\" unless term\n color = map_color(species)\n STDOUT.puts [term, sprintf('%.2f', size), color].join(\":\")\n end\nend\n\n# =============================================================================\n# === M A I N =================================================================\n# =============================================================================\n\noptions = get_opt()\nabort(\"directory does not exist - aborting.\") unless File.exists?(options[:dir]) and File.directory?(options[:dir])\nabort(\"go2name mapping file does not exist - aborting.\") if options[:go2name] and not File.exists?(options[:go2name])\ngo2name = get_go2name(options[:go2name])\nDir.glob(options[:dir] + '/*.ORA').each do |file|\n parse_ORA_file(file, go2name)\nend\n" }, { "alpha_fraction": 0.5423728823661804, "alphanum_fraction": 0.5565393567085266, "avg_line_length": 30.875, "blob_id": "066b9531158813832c59a8a730b2db1413884b79", "content_id": "5b6be9c98e026a9bd86e0e1139384cfc6266eaae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7906, "license_type": "permissive", "max_line_length": 165, "num_lines": 248, "path": "/python/misa/ortho-pairwise-intra-intergenic.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport hashlib\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSR\nimport newick\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -a <path> -b <path> -o <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f combined misa output\" )\n stdout( \" -o pairwise ortholog intra/intergenic regions file\" )\n stdout( \" -t newick treew with branch lengths\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:o:t:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['misa'] = value\n if key == '-o': args['orth'] = value\n if key == '-t': args['tree'] = value\n \n if not args.has_key('misa'):\n stderr( \"misa file argument missing.\" )\n show_help()\n elif not file_exists( args.get('misa') ):\n stderr( \"misa file does not exist.\" )\n show_help()\n\n if not args.has_key('orth'):\n stderr( \"orth file argument missing.\" )\n show_help()\n elif not file_exists( args.get('orth') ):\n stderr( \"orth file does not exist.\" )\n show_help()\n \n return args\n\n\ndef get_distances(file):\n tree = open(file).readline().strip()\n ancestral_nodes = []\n leaves = {}\n while 1:\n # END OF TREE: semicolon\n if tree.startswith(\";\"): break\n\n # START INNER NODE\n if tree.startswith(\"(\"):\n tree = tree[1:]\n n = newick.Node()\n if len(ancestral_nodes) > 0: n.parent = ancestral_nodes[-1]\n ancestral_nodes.append(n)\n continue\n\n # END INNER NODE\n if tree.startswith(\")\"):\n tree = tree[1:]\n if re.match(\":(\\d+)\", tree):\n distance = re.match(\":(\\d+)\", tree).group(1)\n ancestral_nodes[-1].distance_to_parent = distance\n while re.match(\"[:\\d]+\", tree): tree = tree[1:]\n ancestral_nodes.pop(-1)\n continue\n\n # OUTER NODE SINGLE\n if re.match(\",([A-Za-z]+):(\\d+)\\)\", tree):\n els = re.match(\",([A-Za-z]+):(\\d+)\", tree).groups()\n n1 = newick.Node()\n n1.parent = ancestral_nodes[-1]\n n1.distance_to_parent = els[1]\n leaves[els[0]] = n1\n while not tree.startswith(\")\"): tree = tree[1:]\n continue\n\n # OUTER NODE DOUBLE\n if re.match(\"([A-Za-z]+):(\\d+),([A-Za-z]+):(\\d+)\", tree):\n els = re.match(\"([A-Za-z]+):(\\d+),([A-Za-z]+):(\\d+)\", tree).groups()\n n1 = newick.Node()\n n1.parent = ancestral_nodes[-1]\n n1.distance_to_parent = els[1]\n n1.distance_to_parent = els[1]\n n2 = newick.Node()\n n2.parent = ancestral_nodes[-1]\n n2.distance_to_parent = els[3]\n leaves[els[0]] = n1\n leaves[els[2]] = n2\n while not tree.startswith(\")\"): tree = tree[1:]\n continue\n\n # INTERNAL INNER NODE\n if tree.startswith(\",(\"):\n tree = tree[2:]\n n = newick.Node()\n if len(ancestral_nodes) > 0: n.parent = ancestral_nodes[-1]\n ancestral_nodes.append(n)\n continue\n if tree.startswith(\",\"):\n tree = tree[1:]\n continue\n\n distances = {}\n for species1, leafnode1 in leaves.iteritems():\n for species2, leafnode2 in leaves.iteritems():\n distances[species1 + \",\" + species2] = str(leafnode1.summed_distance_to(leafnode2))\n return distances\n\n\n\nclass LocationPair():\n def __init__(self, line):\n columns = line.rstrip().split(\"\\t\")\n self.species = columns[0:2]\n self.type = columns[2]\n self.locations = [{'chr': columns[3], 'start': int(columns[4]), 'stop': int(columns[5])}, {'chr': columns[6], 'start': int(columns[7]), 'stop': int(columns[8])}]\n\n\ndef get_orthologs(file):\n orthologs = []\n fo = open(file)\n for line in fo:\n if line.startswith(\"#\"): continue\n if len(line.rstrip()) == 0: continue\n orthologs.append(LocationPair(line))\n fo.close()\n return orthologs\n\n\ndef get_ssrs(file):\n hash = {}\n fo = open(file)\n for line in fo:\n if line.startswith(\"ID\\t\"): continue\n m = MisaSSR(line)\n hash[m.geneid + \"|\" + str(m.startpos)] = m\n fo.close()\n return hash\n\n\ndef hash(s):\n return hashlib.sha224(s).hexdigest()\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n ssrs = get_ssrs(args['misa'])\n orthologLocationPairs = get_orthologs(args['orth'])\n distances = get_distances(args['tree'])\n \n perfect, poly, shift, loss = defaultdict(int), defaultdict(int), defaultdict(int), defaultdict(int)\n for locpair in orthologLocationPairs:\n qspecies, ospecies = locpair.species[0], locpair.species[1]\n qchr, qstart, qstop = locpair.locations[0]['chr'], locpair.locations[0]['start'], locpair.locations[0]['stop']\n ochr, ostart, ostop = locpair.locations[1]['chr'], locpair.locations[1]['start'], locpair.locations[1]['stop']\n qssrs, ossrs = [], []\n for s in range(qstart, qstop):\n key = qspecies + \"|\" + qchr + \"|\" + str(s)\n if ssrs.has_key(key): qssrs.append(ssrs[key])\n for s in range(ostart, ostop):\n key = ospecies + \"|\" + ochr + \"|\" + str(s)\n if ssrs.has_key(key): ossrs.append(ssrs[key])\n key = [qspecies, ospecies]\n key.sort()\n key = string.join(key, \",\") + \"|\" + locpair.type\n\n # no SSRs in these both locations\n if len(qssrs) == 0 and len(ossrs) == 0: continue\n # no SSRs in either one of the two locations\n if len(qssrs) == 0:\n loss[key] += len(ossrs)\n continue\n if len(ossrs) == 0:\n loss[key] += len(qssrs)\n continue\n\n caught = {}\n # stage 1: perfect matches\n for m1 in qssrs: \n for m2 in ossrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_perfect_match_to(m2):\n perfect[key] += 1\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n # stage 2: polymorphic matches (same motif, but different number of repeats)\n for m1 in qssrs: \n for m2 in ossrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_polymorphic_to(m2):\n poly[key] += 1\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n # stage 3: shifted matches (motif is shifted [permuated])\n for m1 in qssrs: \n for m2 in ossrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_shifted_to(m2):\n shift[key] += 1\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n mapped = len(caught) / 2\n loss[key] += len(qssrs) + len(ossrs) - mapped \n\n\n keys = perfect.keys()\n keys.sort()\n for key in keys:\n perfectcount = str(perfect[key])\n polycount = str(poly[key])\n shiftcount = str(shift[key])\n losscount = str(loss[key])\n speciespair = key[:key.index(\"|\")]\n time = str(distances[speciespair])\n print string.join([key, time, perfectcount, polycount, shiftcount, losscount], \"\\t\")\n\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.6073729395866394, "alphanum_fraction": 0.6256263256072998, "avg_line_length": 33.46913528442383, "blob_id": "3df020479d6db03a01b2a3f7cfe7c6864294ff4b", "content_id": "2b05c8b282d818ab723fcc8dbe7f06659d7d44cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2794, "license_type": "permissive", "max_line_length": 137, "num_lines": 81, "path": "/python/base/misa.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import string\n\nclass MisaSSRspecies():\n def __init__(self, line):\n self.feature = 0\n columns = line.rstrip().split(\"\\t\")\n self.species = columns.pop(0)\n self.geneid = columns.pop(0)\n self.ssrnr = int(columns.pop(0))\n self.type = columns.pop(0)\n self.pattern = columns.pop(0)\n self.length = int(columns.pop(0))\n self.startpos = int(columns.pop(0))\n self.endpos = int(columns.pop(0))\n if len(columns) > 0: self.feature = columns.pop(0)\n if self.type != \"c\" and self.type != \"c*\":\n self.motif = self.pattern[1:self.pattern.index(\")\")]\n if self.pattern.endswith(\"*\"): self.repeats = int(self.pattern[self.pattern.index(\")\")+1:-1])\n else: self.repeats = int(self.pattern[self.pattern.index(\")\")+1:])\n\n def to_s(self):\n array = [self.species, self.geneid, str(self.ssrnr), self.type, self.pattern, str(self.length), str(self.startpos), str(self.endpos)]\n return string.join(array, \"\\t\")\n\n def is_perfect_match_to(self, other):\n if self.pattern != other.pattern: return 0\n return 1\n\n def is_polymorphic_to(self, other):\n if self.motif != other.motif: return 0\n if self.repeats == other.repeats: return 0\n return 1\n\n def is_shifted_to(self, other):\n if self.motif == other.motif: return 0\n if self.type != other.type: return 0\n m = self.motif\n for i in range(len(self.motif)):\n m = m[1:] + m[0]\n if m == other.motif: return 1\n return 0\n\n\nclass MisaSSR():\n def __init__(self, line):\n self.feature = 0\n columns = line.rstrip().split(\"\\t\")\n self.geneid = columns.pop(0)\n self.ssrnr = int(columns.pop(0))\n self.type = columns.pop(0)\n self.pattern = columns.pop(0)\n self.length = int(columns.pop(0))\n self.startpos = int(columns.pop(0))\n self.endpos = int(columns.pop(0))\n if len(columns) > 0: self.feature = columns.pop(0)\n if self.type != \"c\" and self.type != \"c*\":\n self.motif = self.pattern[1:self.pattern.index(\")\")]\n if self.pattern.endswith(\"*\"): self.repeats = int(self.pattern[self.pattern.index(\")\")+1:-1])\n else: self.repeats = int(self.pattern[self.pattern.index(\")\")+1:])\n\n def to_s(self):\n array = [self.geneid, str(self.ssrnr), self.type, self.pattern, str(self.length), str(self.startpos), str(self.endpos)]\n return string.join(array, \"\\t\")\n\n def is_perfect_match_to(self, other):\n if self.pattern != other.pattern: return 0\n return 1\n\n def is_polymorphic_to(self, other):\n if self.motif != other.motif: return 0\n if self.repeats == other.repeats: return 0\n return 1\n\n def is_shifted_to(self, other):\n if self.motif == other.motif: return 0\n if self.type != other.type: return 0\n m = self.motif\n for i in range(len(self.motif)):\n m = m[1:] + m[0]\n if m == other.motif: return 1\n return 0\n\n\n" }, { "alpha_fraction": 0.5423596501350403, "alphanum_fraction": 0.5483368039131165, "avg_line_length": 28.159090042114258, "blob_id": "1d148ae3f9b4320eb0f47915b7f20f4cb259aefa", "content_id": "81a19ec174ab686025762103e03b20273cde738b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3848, "license_type": "permissive", "max_line_length": 82, "num_lines": 132, "path": "/python/openreadingframe/stats_predicted_orfs.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t# low level handling, such as command line stuff\nimport string\t\t\t# string methods available\nimport re\t\t\t\t\t# regular expressions\nimport getopt\t\t\t# comand line argument handling\nfrom low import *\t# custom functions, written by myself\nfrom Bio import SeqIO # biopython stuff, to parse fasta files for instance\nfrom rpy import r\nfrom pylab import *\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> -n <path>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f path to the predicted protein sequence fasta file\" )\n\tstdout( \" -n path to the nucleotide sequence fasta file\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:n:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\t\n\torffile, ntfile = '', ''\n\tfor key, value in keys:\n\t\tif key == '-f': orffile = value\n\t\tif key == '-n': ntfile = value\n\t\t\t\n\tif orffile == '':\n\t\tstderr( \"orf sequence data file missing.\" )\n\t\tshow_help()\n\telif not file_exists( orffile ):\n\t\tstderr( \"invalid path in orffile \" + orffile )\n\t\tshow_help()\n\t\t\n\tif ntfile == '':\n\t\tstderr( \"nucleotide sequence data file missing.\" )\n\t\tshow_help()\n\telif not file_exists( ntfile ):\n\t\tstderr( \"invalid path in ntfile \" + ntfile )\n\t\tshow_help()\n\t\t\n\tntfile = get_global_path( ntfile )\n\torffile = get_global_path( orffile )\n\treturn orffile, ntfile\n\n# =============================================================================\ndef orf_stats( orffile, ntfile ):\n\t\"\"\"\n\t\n\t\"\"\"\n\t\n\t# read in all nt sequences and store them in a hash\n\tnthash = {}\n\thandle = open( ntfile )\n\tfor seq_record in SeqIO.parse(handle, \"fasta\"):\n\t\tnthash[seq_record.id] = seq_record.seq.tostring()\n\thandle.close()\n\t#print \"read in %s nucleotide sequences.\" % len(nthash)\n\t\n\tstopcodons = [ 'TAG', 'TAA', 'TGA' ]\n\t\n\t# do stats on each predicted orf (= entry in orffile)\n\thandle = open( orffile )\n\taaseqlength = []\n\taaseqstop = []\n\tfw = open( get_basename( orffile ) + '.withstop', 'w' )\n\tfor seq_record in SeqIO.parse(handle, \"fasta\") :\n\t\torfinfo = seq_record.description.split() # id frame ntfrom ntto\n\t\tid = seq_record.id\n\t\taaseq = seq_record.seq.tostring()\n\t\tntseq = nthash[id]\n\t\t\n\t\tstop = 0\n\t\tpos = int(orfinfo[-2])-1\n\t\twhile (pos < int(orfinfo[-1])+3):\n\t\t\tcodon = ntseq[ pos : pos+3 ]\n\t\t\tif codon in stopcodons:\n\t\t\t\tstop = 1\n\t\t\t\tbreak\n\t\t\tpos += 3\n\t\t\n\t\tprint \"%s\\t%s\\t%s\" %( id, len(aaseq), stop )\n\t\taaseqlength.append( len(aaseq) )\n\t\taaseqstop.append( stop )\n\t\t\n\t\tif stop: fw.write( \">\" + id + \"\\n\" + aaseq + \"*\\n\" )\n\t\t\t\n\tfw.flush()\n\tfw.close()\n\thandle.close()\n\trc( 'figure', figsize=(12,5) )\n\t# all SNPs\n\n\tfigure()\n\tsubplot(121)\n\thist( aaseqlength, fc='grey' )\n\ttitle( get_basename(orffile) + ' sequence length (aa)' )\n\t\n\tsubplot(122)\n\thist( aaseqstop, bins=[0,1], fc='grey' )\n\ttitle( get_basename(orffile) + ' sequences containing a stop codon' )\n\t\n\tsavefig( get_basename(orffile) + '.pdf')\n\t\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\ndef main():\n\t\"\"\"\n\t\"\"\"\n\torffile, ntfile = handle_arguments() \n\torf_stats( orffile, ntfile )\n\n# =============================================================================\nmain()" }, { "alpha_fraction": 0.4825051426887512, "alphanum_fraction": 0.48632755875587463, "avg_line_length": 29.630630493164062, "blob_id": "4024f8862ad5d89bb468757a1bb2720d0d7f6c03", "content_id": "ce80a2535bb5104867cfcdec47ef37828480cd90", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3401, "license_type": "permissive", "max_line_length": 83, "num_lines": 111, "path": "/python/generic/addid2xdom.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -i -n\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file to import\" )\n stdout( \" -i id mapping file\" )\n stdout( \" -n column to look up the id for [0..n]\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:i:n:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-i': args['idfile'] = value\n if key == '-n': args['column'] = int(value)\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n if not args.has_key('idfile'):\n stderr( \"import id file argument missing.\" )\n show_help()\n elif not file_exists( args.get('idfile') ):\n stderr( \"import id file does not exist.\" )\n show_help()\n \n if not args.has_key('column'):\n stderr( \"column argument missing.\" )\n show_help()\n\n return args\n\n\n return idhash\n \n# =============================================================================\ndef get_idhash( args ):\n idhash = {}\n fo = open( args.get('idfile') )\n for line in fo:\n line = line.rstrip()\n if len(line.split(\"\\t\")) > 2:\n key = line.split(\"\\t\")[0]\n value = string.join(line.split(\"\\t\")[1:],\"\\t\")\n else:\n key, value = line.split(\"\\t\")\n idhash[ key ] = value\n fo.close()\n return idhash\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n idhash = get_idhash( args )\n\n fo = open( args.get('file') )\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"): \n id = line[1:]\n print line\n #asdf = 0\n else:\n columns = line.split(\"\\t\")\n if args.get('column') == 0:\n lookup = id\n else: \n lookup = columns[ args.get('column') -1 ]\n if not idhash.has_key( lookup ):\n stderr( \"lookup name not found in the id file: \" + lookup )\n continue\n #sys.exit(1)\n id = idhash.get( lookup )\n columns.append(id)\n print string.join( columns, \"\\t\" )\n fo.close()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5194706320762634, "alphanum_fraction": 0.5387786030769348, "avg_line_length": 39.96444320678711, "blob_id": "1a8324bbafa152c654ec2bddbf7b05c09f02af41", "content_id": "c762f0e5a4e74249022fff849ab6d889b462e3ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9219, "license_type": "permissive", "max_line_length": 149, "num_lines": 225, "path": "/python/paml/PAML_Ka_Ks.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, re\nimport getopt\nfrom low import *\nfrom Bio.Seq import Seq,reverse_complement\nfrom Bio import Translate\nfrom Bio.Alphabet import IUPAC\n\nUnambiguousTranslator = Translate.unambiguous_dna_by_id[1]\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -o outfile\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f path to the paml.filelist file generated by another python script\" )\n stdout( \" -a path to the TAIR functional description file\" )\n stdout( \" -o summary outfile\" )\n stdout( \" \" )\n sys.exit(1)\n\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" checks given arguments and returns either an error mesasge\n or a hash of the correctly defined arguments \"\"\"\n \n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:o:a:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n \n args = {}\n \n for key, value in keys: # handle arguments\n if key == '-f': args[ 'filelist' ] = value\n if key == '-o': args[ 'outfile' ] = value\n if key == '-a': args[ 'annofile' ] = value\n \n if not args.has_key( 'filelist' ):\n stderr( \"filelist is missing.\" )\n show_help()\n elif not file_exists( args[ 'filelist' ] ):\n stderr( \"filelist does not exist: \" + value )\n show_help()\n else:\n args['filelist'] = get_global_path( args.get('filelist') )\n \n if not args.has_key( 'annofile' ):\n stderr( \"annotation file is missing.\" )\n show_help()\n elif not file_exists( args[ 'annofile' ] ):\n stderr( \"annotation file does not exist: \" + value )\n show_help()\n else:\n args['annofile'] = get_global_path( args.get('annofile') )\n \n if not args.has_key( 'outfile' ):\n stderr( \"outfile is missing.\" )\n show_help()\n else:\n args['outfile'] = get_global_path( args.get('outfile') )\n \n args['outdir'] = os.path.split(args['filelist'])[0]\n if not args['outdir'].endswith('/'): args['outdir'] += '/'\n \n return args\n\n\n# =============================================================================\ndef Ka_Ks_with_PAML( filelist, annofile, outdir, outfile ):\n \"\"\"\n uses the PAML package to analyze each MSA for synonymous and nonsynonymous\n substitutions. Therefore, control files are generated, and the program\n is executed for each file.\n \"\"\"\n def generate_codeml_ctl_file( filename, seqfile, treefile, outfile ):\n \n fw = open( filename, 'w' )\n fw.write( \"seqfile = \" + seqfile + \" * sequence data file name\\n\" )\n fw.write( \"treefile = \" + treefile + \" * tree structure file name\\n\" )\n fw.write( \"outfile = \" + outfile + \" * main result file\\n\" )\n \n fw.write( \"noisy = 0 * 0,1,2,3,9: how much rubbish on the screen\\n\" )\n fw.write( \"verbose = 0 * 1: detailed output, 0: concise output\\n\" )\n fw.write( \"runmode = 0 * 0: user tree; 1: semi-automatic; 2: automatic * 3: StepwiseAddition; (4,5):PerturbationNNI; -2: pairwise\\n\" )\n fw.write( \"seqtype = 1 * 1:codons; 2:AAs; 3:codons-->AAs\\n\" )\n fw.write( \"CodonFreq = 2 * 0:1/61 each, 1:F1X4, 2:F3X4, 3:codon table\\n\" )\n #fw.write( \"* ndata = \" + str(ndata) + \"\\n\" )\n fw.write( \"clock = 0 * 0:no clock, 1:clock; 2:local clock; 3:TipDate\\n\" )\n fw.write( \"aaDist = 0 * 0:equal, +:geometric; -:linear, 1-6:G1974,Miyata,c,p,v,a * 7:AAClasses\\n\" )\n fw.write( \"aaRatefile = jones.dat * only used for aa seqs with model=empirical(_F) * dayhoff.dat, jones.dat, wag.dat, mtmam.dat, or your own\\n\" )\n fw.write( \"model = 0 * models for codons:\\n\" )\n fw.write( \"* 0:one, 1:b, 2:2 or more dN/dS ratios for branches\\n\" )\n fw.write( \"* models for AAs or codon-translated AAs:\\n\" )\n fw.write( \"* 0:poisson, 1:proportional,2:Empirical,3:Empirical+F\\n\" )\n fw.write( \"* 6:FromCodon, 8:REVaa_0, 9:REVaa(nr=189)\\n\" )\n fw.write( \"NSsites = 0 * 0:one w;1:neutral;2:selection; 3:discrete;4:freqs;\\n\" )\n fw.write( \"* 5:gamma;6:2gamma;7:beta;8:beta&w;9:beta&gamma;\\n\" )\n fw.write( \"* 10:beta&gamma+1; 11:beta&normal>1; 12:0&2normal>1;\\n\" )\n fw.write( \"* 13:3normal>0\\n\" )\n fw.write( \"icode = 0 * 0:universal code; 1:mammalian mt; 2-11:see below\\n\" )\n fw.write( \"Mgene = 0 * 0:rates, 1:separate;\\n\" )\n fw.write( \"fix_kappa = 0 * 1: kappa fixed, 0: kappa to be estimated\\n\" )\n fw.write( \"kappa = 2 * initial or fixed kappa\\n\" )\n fw.write( \"fix_omega = 0 * 1: omega or omega_1 fixed, 0: estimate\\n\" )\n fw.write( \"omega = .4 * initial or fixed omega, for codons or codon-based AAs\\n\" )\n fw.write( \"fix_alpha = 1 * 0: estimate gamma shape parameter; 1: fix it at alpha\\n\" )\n fw.write( \"alpha = 0. * initial or fixed alpha, 0:infinity (constant rate)\\n\" )\n fw.write( \"Malpha = 0 * different alphas for genes\\n\" )\n fw.write( \"ncatG = 3 * # of categories in dG of NSsites models\\n\" )\n fw.write( \"fix_rho = 1 * 0: estimate rho; 1: fix it at rho\\n\" )\n fw.write( \"rho = 0. * initial or fixed rho, 0:no correlation\\n\" )\n fw.write( \"getSE = 0 * 0: don't want them, 1: want S.E.s of estimates\\n\" )\n fw.write( \"RateAncestor = 0 * (0,1,2): rates (alpha>0) or ancestral states (1 or 2)\\n\" )\n fw.write( \"Small_Diff = .5e-6\\n\" )\n fw.write( \"cleandata = 1 * remove sites with ambiguity data (1:yes, 0:no)?\\n\" )\n fw.write( \"* fix_blength = 0 * 0: ignore, -1: random, 1: initial, 2: fixed\\n\" )\n fw.write( \"method = 1 * 0: simultaneous; 1: one branch at a time\\n\" )\n fw.flush()\n fw.close()\n \n # ---------------------------------------------------------------------------\n def run_codeml( ctlfile ):\n ok = os.system( \"~/bin/paml4/bin/codeml \" + ctlfile + \" > codeml.log\" )\n if not (ok % 256 == 0):\n stderr( \"codeml exited with an error. aborting, please check log file.\" )\n sys.exit(3)\n \n # ---------------------------------------------------------------------------\n def parse_file( outfile ):\n hash = {'ns':None, 'ls':None, 'global_KaKs':None}\n fo = open( outfile )\n for line in fo:\n # get number of sequences and number of shared codons within the set\n if line.startswith('ns ='):\n hash['ns'] = int( re.match( 'ns =\\s*(\\S+)', line ).group(1) )\n hash['ls'] = int( re.search( 'ls =\\s*(\\S+)', line ).group(1) )\n # get global omega (dN/dS)\n if line.startswith('omega (dN/dS) ='):\n hash['global_KaKs'] = float( re.match( 'omega \\(dN/dS\\) =\\s*(\\S+)', line ).group(1) )\n fo.close()\n return hash\n \n # ---------------------------------------------------------------------------\n def prepare_annotation( annofile ):\n annohash = {}\n fo = open( annofile, 'r' )\n for line in fo:\n id, shortdescr = line.split(\"\\t\")[0:2]\n annohash[id.lower()] = shortdescr\n fo.close()\n return annohash\n \n # ---------------------------------------------------------------------------\n def get_annotation( seqfile, annohash, hash ):\n # get ids\n ids = []\n from Bio import SeqIO\n handle = open(seqfile)\n for seq_record in SeqIO.parse(handle, \"fasta\"):\n ids.append(seq_record.id.lower())\n handle.close()\n # get annotation and add to hash\n hash['descr'] = None\n for id in ids:\n if annohash.has_key( id ):\n hash['descr'] = annohash.get(id)\n break\n return hash\n \n # ---------------------------------------------------------------------------\n # ---------------------------------------------------------------------------\n # ---------------------------------------------------------------------------\n \n #infomsg( \"PAML4 | analyzing orthologous sets for dN/dS\" )\n cwd = os.getcwd()\n os.chdir(outdir)\n count = 0\n totalcount = 0\n fw = open( outfile, 'w' )\n fo = open( filelist, 'r' )\n annohash = prepare_annotation( annofile )\n for line in fo:\n if line.startswith('#'):\n totalcount = int(re.search( '\\[(\\d+)\\]', line ).group(1))\n continue\n file = line.split()\n # CODEML\n outfile = get_basename(file[0],0)+'.KaKs' \n #generate_codeml_ctl_file( 'codeml.ctl', file[4], file[3], outfile )\n #run_codeml( 'codeml.ctl' )\n # parse out file\n hash = parse_file( outfile )\n hash = get_annotation( get_basename(file[0])+'.fa', annohash, hash )\n fw.write( outfile + ' %s %s %s %s\\n' \n %( hash.get('ns'), hash.get('ls'), \n hash.get('global_KaKs'), hash.get('descr') ) )\n sysout(\"\\r percent sets PAMLed: %f\" %( 100.0*count/totalcount ))\n count += 1\n sysout(\"\\r percent sets PAMLed: %f\\n\" %( 100.0*count/totalcount ))\n fo.close()\n fw.flush()\n fw.close()\n os.chdir(cwd)\n\n# =============================================================================\ndef main():\n \n args = handle_arguments()\n \n # feed to PAML program\n Ka_Ks_with_PAML( args.get('filelist'), args.get('annofile'), args.get('outdir'), args.get('outfile') )\n \n \n### MAIN ######################################################################\n\nmain()\n " }, { "alpha_fraction": 0.5143777132034302, "alphanum_fraction": 0.5203588604927063, "avg_line_length": 33.5, "blob_id": "bff484fe56503bef20f57a995fdc213121324c5e", "content_id": "190592c79e645416bf39e979d95b72b6bbf12e8b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4347, "license_type": "permissive", "max_line_length": 100, "num_lines": 126, "path": "/python/misa/get-transcript-and-protein-per-droso-gene.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom collections import defaultdict\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSR\nimport pickle\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" -f <fasta>\"\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f all.translations.fasta\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['fasta'] = value\n \n if not args.has_key('fasta'):\n print >> sys.stderr, \"fasta file argument missing.\"\n show_help()\n elif not file_exists( args.get('fasta') ):\n print >> sys.stderr, \"fasta file does not exist.\"\n show_help()\n\n\n return args\n\n# =============================================================================\ndef get_ssrs(file):\n hash = defaultdict(list)\n fo = open(file)\n for line in fo: \n if line.startswith(\"ID\\t\"): continue\n m = MisaSSR(line)\n hash[m.geneid].append(m)\n fo.close()\n return hash\n\n# =============================================================================\ndef get_features(dir):\n storage = \".misa.gff.storage.tmp\"\n features = defaultdict(list)\n if not file_exists(storage):\n for filename in os.listdir(args['dir']):\n gzip = 0\n if not filename.endswith(\".gff\") and not filename.endswith(\".gff.gz\"): continue\n species = filename[:filename.index(\"-\")]\n filename = args['dir'] + filename\n if filename.endswith(\".gff.gz\"): gzip = 1\n if gzip: \n os.system(\"gunzip \" + filename)\n filename = filename[:-3]\n\n fo = open(filename)\n for line in fo: \n if line.startswith(\"#\") or len(line.rstrip()) == 0: continue\n columns = line.rstrip().split(\"\\t\")\n if len(columns) != 9: continue\n type = columns[2]\n if type != \"gene\" and type != \"exon\" and type != \"intron\": continue\n chr, start, stop, strand, descr = columns[0], columns[3], columns[4], columns[6], columns[8]\n key = string.join([species, chr], \"|\")\n features[key].append([type, int(start), int(stop)])\n fo.close()\n if gzip: os.system(\"gzip \" + filename)\n\n fw = open(storage, \"w\")\n for key, features in features.iteritems():\n for feat in features:\n fw.write(string.join([key, feat[0], str(feat[1]), str(feat[2])], \"\\t\") + \"\\n\")\n fw.close()\n\n else:\n fo = open(storage)\n for line in fo:\n columns = line.rstrip().split(\"\\t\")\n key = columns[0]\n feat = list(columns[1:4])\n features[key].append(feat)\n fo.close()\n return features\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n geneHash = {}\n fo = open(args['fasta'])\n for line in fo:\n if not line.startswith(\">\"): continue\n proteinid = re.search(\" ID=(\\S+);\", line).group(1)\n geneid, transcriptid = re.search(\" parent=(\\S+);\", line).group(1).split(\",\")[0:2]\n length = int(re.search(\" length=(\\d+)\", line).group(1))\n if not geneHash.has_key(geneid) or geneHash[geneid]['length'] < length: \n geneHash[geneid] = {'protein':proteinid, 'transcript':transcriptid, 'length':length}\n fo.close()\n\n for geneid, hash in geneHash.iteritems():\n print string.join([geneid, hash['transcript'], hash['protein']], \"\\t\")\n\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.49927008152008057, "alphanum_fraction": 0.5129926800727844, "avg_line_length": 33.2400016784668, "blob_id": "afca863ff2ea578b9f8e1e0792be8c35bf0b36b2", "content_id": "7a4283dc4f71db778c0d3ba0d9adbbf0740a7153", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3425, "license_type": "permissive", "max_line_length": 83, "num_lines": 100, "path": "/python/paml/plot-codeml-model-A-digest.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\nfrom collections import defaultdict\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f digested file with model A positional data of PS\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\ndef get_genes_with_features( file ):\n hash = defaultdict(list)\n fo = open(file)\n for line in fo:\n filename, length, feature = line.rstrip().split(\"\\t\")[0:3]\n cluster = filename[:filename.index(\".\")]\n branch = filename[filename.index(\"tree.\")+5:filename.rindex(\".\")]\n pos, aa, prob = feature.split()[0:3]\n if prob.endswith(\"*\"): prob = prob[:prob.index(\"*\")]\n prob = float(prob)\n pos = int(pos)\n length = int(length)\n hash[cluster + \" \" + branch].append([length, pos, aa, prob])\n fo.close()\n return hash\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n hash = get_genes_with_features(args['file'])\n for key, featurearray in hash.iteritems():\n cluster, branch = key.split()\n length = int(featurearray[0][0])\n import matplotlib.pyplot as P\n x = [e+1 for e in range(length+1)]\n y1 = [0] * (length+1)\n y2 = [0] * (length+1)\n for feature in featurearray:\n length, pos, aa, prob = feature[0:4]\n if prob > 0.95: y1[pos] = prob\n else: y2[pos] = prob\n \n P.bar(x, y1, color='#000000', edgecolor='#000000')\n P.bar(x, y2, color='#bbbbbb', edgecolor='#bbbbbb')\n P.ylim(ymin=0, ymax=1)\n P.xlim(xmin=0, xmax=length)\n P.xlabel(\"position in the ungapped alignment [aa]\")\n P.ylabel(r'$P (\\omega > 1)$')\n P.title(cluster + \" (branch \" + branch + \")\")\n\n P.axhline(y=.95, xmin=0, xmax=length, linestyle=\":\", color=\"k\")\n P.savefig(cluster + \".\" + branch + \".png\", format=\"png\")\n P.close()\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.4698063135147095, "alphanum_fraction": 0.4770224094390869, "avg_line_length": 30.33333396911621, "blob_id": "e1ba0804639467a9bb9af17e8421c60797d704a6", "content_id": "b125a2f95d03a00c5348474b1de49fb6e93514c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2633, "license_type": "permissive", "max_line_length": 100, "num_lines": 84, "path": "/python/fasta/concatenate-alignments.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -i -n\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -e file extension, e.g. \\\".muscle\\\"\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"he:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-e': args['ext'] = value\n \n if not args.has_key('ext'):\n stderr( \"ext argument missing.\" )\n show_help()\n \n return args\n\n \n# =============================================================================\ndef aln_is_conserved(file, min=0.85):\n popenout = os.popen(\"~/bin/t-coffee -other_pg seq_reformat -in %s -output sim | tail -n 1\" % file)\n out = popenout.read()\n popenout.close()\n identity = float(out.split()[-1])\n if identity > min: return 1\n else: return 0\n \n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n added = 0\n seqhash = defaultdict(str)\n ext = args['ext']\n for file in os.listdir('.'):\n if added == 1500: break\n if not file.endswith(ext): continue\n if not aln_is_conserved(file): continue\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"):\n id = line[1:]\n if id.count(\" \") > 0: id = id[:id.index(\" \")]\n else:\n seqhash[id] += line\n fo.close()\n added += 1\n for id, seq in seqhash.iteritems():\n print \">\" + id\n print seq\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5450921654701233, "alphanum_fraction": 0.5505729913711548, "avg_line_length": 37.10126495361328, "blob_id": "0d5285ea8784c7d09f27d811146b1508acb3660e", "content_id": "cb27794c3d3238a8b92f7f52fc9bdca946c8859f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6021, "license_type": "permissive", "max_line_length": 148, "num_lines": 158, "path": "/python/gff/splice-forms-from-gff.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom collections import defaultdict\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n print sys.stderr, \"parses a gff file to map mRNA to genes.\"\n print sys.stderr, \"depending on whether or not -p is given, it ouputs different results:\"\n print sys.stderr, \"-p absent: reports the transcripts IDs for a given gene ID. output: <geneid>tab<transcript1> <transcript2> <transcript3>...\"\n print sys.stderr, \"-p present: reports the longest transcripts ID for a given gene ID. output: <geneid>tab<transcript>\\n\"\n print sys.stderr, \"usage: \" + sys.argv[0] + \" -f <gff-file> -p <peptides.fasta> [-n]\"\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f gff file to parse\" )\n stdout( \" -p peptide fasta file from which to extract the longest sequence for a gene with splice variants\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:p:n\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'name':0}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-p': args['pep'] = value\n if key == '-n': args['name'] = 1\n \n if not args.has_key('file'):\n stderr( \"gff file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"gff file does not exist.\" )\n show_help()\n\n if args.has_key('pep') and not file_exists( args.get('pep') ):\n stderr( \"peptide fasta file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\ndef get_seq_lengths(file):\n lengthHash, id = {}, \"\"\n fo = open(file)\n for line in fo: \n line = line.strip()\n if line.startswith(\">\"):\n id = line[1:]\n if id.count(\" \") > 0: id = id[:id.index(\" \")]\n if id.count(\"\\t\") > 0: id = id[:id.index(\"\\t\")]\n lengthHash[id] = 0 \n else: lengthHash[id] += len(line)\n return lengthHash\n\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n def process_gff_line(line):\n columns = line.rstrip().split(\"\\t\")\n type, descrline = columns[2], columns[8]\n if type != \"gene\" and type != \"mRNA\" and type != \"CDS\": return\n currentdescr = {}\n for pair in descrline.split(\";\"): currentdescr[pair.split(\"=\")[0]] = pair.split(\"=\")[1]\n if type == \"CDS\":\n mRNAwithCDS[currentdescr[\"Parent\"]] = 1\n return\n if not currentdescr.has_key(\"ID\"):\n print >> sys.stderr, \"ERROR: ID tag missing! line: \\\"\" + line + \"\\\"\"\n return\n if type == \"mRNA\" and not currentdescr.has_key(\"Parent\"):\n print >> sys.stderr, \"ERROR: Parent association missing! line: \\\"\" + line + \"\\\"\"\n return\n if type == \"mRNA\": gene2transcripts[currentdescr[\"Parent\"]].append(currentdescr[\"ID\"])\n #if currentdescr.has_key(\"Alias\"): aliases[currentdescr[\"ID\"]] = currentdescr[\"Alias\"]\n\n# =============================================================================\n\n aliases = {}\n gene2transcripts = defaultdict(list)\n mRNAwithCDS = {}\n fo = open( args.get('file') )\n for line in fo:\n if line.startswith(\"#\"): continue\n process_gff_line(line)\n fo.close()\n\n if args.has_key('pep'):\n lengthHash = get_seq_lengths(args['pep'])\n for gene, associds in gene2transcripts.iteritems():\n galias = gene\n if aliases.has_key(gene): galias = aliases[gene]\n if len(associds) == 1:\n talias = associds[0]\n if aliases.has_key(talias): talias = aliases[talias]\n if not mRNAwithCDS.has_key(associds[0]): continue\n if not lengthHash.has_key(talias): talias = talias.split(\":\")[1]\n if not lengthHash.has_key(talias): talias = talias.split(\"-\")[0]\n if not lengthHash.has_key(talias): \n print >> sys.stderr, \"ERROR: could not find fasta sequence in peptide file with ID \\\"\" + talias + \"\\\"\"\n continue\n print galias + \"\\t\" + talias\n continue\n peptides = {}\n for associd in associds:\n talias = associd\n if aliases.has_key(talias): talias = aliases[talias]\n if not mRNAwithCDS.has_key(associd): continue\n if not lengthHash.has_key(talias): talias = talias.split(\":\")[1]\n if not lengthHash.has_key(talias): talias = talias.split(\"-\")[0]\n if not lengthHash.has_key(talias): \n print >> sys.stderr, \"ERROR: could not find fasta sequence in peptide file with ID \\\"\" + talias + \"\\\"\"\n continue\n peptides[talias] = lengthHash[talias]\n if len(peptides) > 0:\n best = sorted(peptides.iteritems(), key=lambda x: x[1] , reverse=True)[0][0]\n print galias + \"\\t\" + best\n \n\n\n else:\n for gene, associds in gene2transcripts.iteritems():\n galias = gene\n if aliases.has_key(gene): galias = aliases[gene]\n sys.stdout.write(galias + \"\\t\")\n for i in range(len(associds)):\n talias = associds[i]\n if aliases.has_key(talias): \n associds[i] = aliases[talias]\n sys.stdout.write(string.join(associds, \" \") + \"\\n\")\n\n\n\n\n \n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5141835808753967, "alphanum_fraction": 0.5187127590179443, "avg_line_length": 35.78947448730469, "blob_id": "3c1f3ee2813d28fafc3db176b1b5451d58ca5027", "content_id": "96d478141824671fcc5e78e2545775f0609e7274", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4195, "license_type": "permissive", "max_line_length": 108, "num_lines": 114, "path": "/python/generic/map.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f file to map to\" )\n stdout( \" -m file to map from\" )\n stdout( \" -a column to index in the map-to file\" )\n stdout( \" -d delimiter (default: tab | allowed: ; , tab space\" )\n stdout( \" -v verbose/debug mode\" )\n stdout( \" -s silent mode: no stderr msgs\" )\n stdout( \" -c conservative: output only entries where a mapping was found\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hvscf:m:d:a:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n args['silent'] = 0\n for key, value in keys:\n if key == '-f': args['mapto'] = value\n if key == '-m': args['mapfrom'] = value\n if key == '-d': args['delimiter'] = value\n if key == '-a': args['colto'] = int(value)\n if key == '-v': args['debug'] = 1\n if key == '-s': args['silent'] = 1\n if key == '-c': args['conserve'] = 1\n \n if not args.has_key('mapto'):\n stderr( \"map-to file argument missing.\" )\n show_help()\n elif not file_exists( args.get('mapto') ):\n stderr( \"map-to file does not exist.\" )\n show_help()\n\n if not args.has_key('mapfrom'):\n stderr( \"map-from file argument missing.\" )\n show_help()\n elif not file_exists( args.get('mapfrom') ):\n stderr( \"map-from file does not exist.\" )\n show_help()\n \n if not args.has_key('delimiter') or args.get('delimiter') not in [ \";\", \",\", \"tab\", \"space\" ]: \n args['delimiter'] = \"\\t\"\n else:\n if args['delimiter'] == \"tab\": args['delimiter'] = \"\\t\"\n elif args['delimiter'] == \"space\": args['delimiter'] = \" \"\n\n if not args.has_key('colto'): args['colto'] = 0\n if not args.has_key('debug'): args['debug'] = 0\n if not args.has_key('conserve'): args['conserve'] = 0\n\n return args\n\n\n# =============================================================================\ndef get_mapping(file, delimiter, col, debug=0):\n hash = {}\n fo = open( file )\n for line in fo:\n line = line.rstrip()\n #print \"delimiter:\" + delimiter\n col = line.split(delimiter)\n if hash.has_key(col[0]): print >> sys.stderr, \"*** WARNING: mapping ambiguous for entry\", col[0]\n hash[ col[0] ] = string.join(col[1:], delimiter)\n if debug: print col[0], \"=>\", string.join(col[1:], delimiter)\n fo.close()\n return hash\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n mapping = get_mapping( args.get('mapfrom'), args.get('delimiter'), 0, args.get('debug'))\n fo = open( args.get('mapto') )\n for line in fo:\n line = line.rstrip()\n col = line.split(args.get('delimiter'))\n key = col[args.get('colto')]\n if mapping.has_key(key):\n col.append( mapping.get(key) )\n else:\n if not args.get('silent'): stderr('skipping entry. key not found in mapping-from file: \\\"%s\\\"' % key )\n if mapping.has_key(key) or not args.get('conserve'): \n print string.join(col, args.get('delimiter'))\n\n fo.close()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5035999417304993, "alphanum_fraction": 0.508075475692749, "avg_line_length": 34.43448257446289, "blob_id": "061868cd873577a581767857281440c061593e8a", "content_id": "d2d45af6a68b6e4ed19dc101df346f7141e87973", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5139, "license_type": "permissive", "max_line_length": 98, "num_lines": 145, "path": "/python/fasta/fasta-extract-fragment.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom Bio.Seq import Seq\nfrom Bio.Alphabet import IUPAC\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file\" )\n stdout( \" -i seq ID\" )\n stdout( \" -u startpos (BLAST-like, starting at 1)\" )\n stdout( \" -v endpos (BLAST-like)\" )\n stdout( \" -x do not count gaps (BLAST-like)\" )\n stdout( \" -C return complement (antisense strand sequence instead of sense)\" )\n stdout( \" -R return reverse sequence\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:i:u:v:xCR\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'countGaps':True, 'complement':False, 'reverse':False}\n for key, value in keys:\n if key == '-f': args['fastafile'] = value\n if key == '-i': args['seqid'] = value\n if key == '-u': args['startpos'] = int(value) -1\n if key == '-v': args['endpos'] = int(value)\n if key == '-g': args['countGaps'] = False\n if key == '-C': args['complement'] = True\n if key == '-R': args['reverse'] = True\n \n for key in ['fastafile', 'seqid']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n return args\n\n# =============================================================================\ndef statusbar(current, total, message=\"\", width=40):\n progress = 1.0*current/total\n if message != \"\": message = \"[\" + message + \"]\"\n progressbar = \"=\" * int(progress*width)\n while len(progressbar) < width: progressbar += \" \" \n sys.stderr.write(\"\\r 0% \" + progressbar + \" 100% \" + message)\n if progress == 1.0: sys.stderr.write(\"\\n\")\n \n# =============================================================================\ndef extract_sequence(args):\n fo = open(args['fastafile'])\n seqid = args['seqid']\n found = False\n for line in fo:\n if line.startswith('>'):\n line = line.rstrip()\n fid = line[1:].split()[0]\n if fid == seqid: found = True\n elif found: break\n if found: print line\n fo.close()\n \n# =============================================================================\ndef extract_fragment(args):\n fo = open(args['fastafile'])\n seqid = args['seqid']\n pos = 0\n found = False\n startpos = args.get('startpos', 0)\n endpos = args.get('endpos', False)\n seq = \"\"\n for line in fo:\n line = line.rstrip()\n if line.startswith('>'):\n fid = line[1:].split()[0]\n if fid == seqid: \n found = True\n out = '>' + fid\n if args.has_key('startpos') or args.has_key('endpos'): out += \" %s:%s\" %(startpos, endpos)\n if args['reverse']: out += \" reverse\"\n if args['complement']: out += \" complement\"\n print out\n elif found: break\n else: continue\n elif found: \n if not args['countGaps']: line = line.replace('-','')\n if pos > endpos: break\n if pos < startpos and pos+len(line) < startpos: \n pos += len(line)\n continue\n if pos < startpos and pos+len(line) >= startpos: out = line[startpos-pos:]\n elif pos >= startpos: out = line\n if not endpos == False and endpos < pos+len(line): out = out[:endpos-pos]\n if not args['reverse'] and not args['complement']:\n print out\n else:\n seq += out\n pos += len(line)\n fo.close()\n if args['reverse'] or args['complement']:\n seq = Seq(seq, IUPAC.unambiguous_dna)\n if args['reverse'] and args['complement']:\n seq = seq.reverse_complement()\n elif args['complement']: seq = seq.complement()\n elif args['reverse']: seq = seq[::-1]\n seq = str(seq)\n print seq\n\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n if args.has_key('startpos') or args.has_key('endpos'):\n extract_fragment(args)\n else:\n extract_sequence(args)\n \n \n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.4985795319080353, "alphanum_fraction": 0.5035511255264282, "avg_line_length": 35.73043441772461, "blob_id": "f874da4b96ed3c87a5abaf730fe4c6df0ac6c414", "content_id": "a2a10d79006aa5357ec1eb336e2a928792c300c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4224, "license_type": "permissive", "max_line_length": 105, "num_lines": 115, "path": "/python/pfam/pfam-filter-output.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> \" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f pfam_full file to parse\" )\n stdout( \" -d Pfam-A.hmm file [default: /global/databases/pfam/current/pfam_scan_db/Pfam-A.hmm\" )\n stdout( \" -c cutoff to apply (GA|TC|NC) [default: GA]\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:d:c:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'hmmfile':'/global/databases/pfam/current/pfam_scan_db/Pfam-A.hmm', 'cut':'GA'}\n for key, value in keys:\n if key == '-f': args['annotfile'] = value\n if key == '-d': args['hmmfile'] = value\n if key == '-c': args['cut'] = value\n \n for key in ['annotfile', 'hmmfile']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n\n return args\n\n\n# =============================================================================\ndef get_regex( args ):\n idhash = {}\n idhash['name'] = re.compile('^#=GF ID\\s+(\\S+)')\n idhash['acc'] = re.compile('^#=GF AC\\s+(PF\\S+)')\n idhash['descr'] = re.compile('^#=GF DE\\s+(.*)$')\n idhash['comment'] = re.compile('^#=GF CC\\s+(.*)$')\n idhash['pftype'] = re.compile('^#=GF TP\\s+(\\S+)')\n idhash['terminate'] = re.compile('^\\\\\\\\$')\n return idhash\n\n# =============================================================================\nclass PfamEntry:\n def __init__(self):\n self.name = None\n self.acc = None\n self.descr = None\n self.ga = None\n self.tc = None\n self.nc = None\n \n\n# =============================================================================\ndef load_hmmfile(hmmfile):\n name2pfam = {}\n fo = open( hmmfile )\n entry = PfamEntry()\n for line in fo:\n line = line.rstrip()\n if line.startswith('//'):\n name2pfam[entry.name] = entry\n entry = PfamEntry()\n elif line.startswith(\"NAME\"): entry.name = line.split()[1]\n elif line.startswith(\"ACC\"): entry.acc = line.split()[1]\n elif line.startswith(\"DESC\"): entry.descr = line.split(' ', 1)[1]\n elif line.startswith(\"GA\"): entry.ga = float(line.split()[1])\n elif line.startswith(\"NC\"): entry.nc = float(line.split()[1])\n elif line.startswith(\"TC\"): entry.tc = float(line.split()[1])\n fo.close()\n return name2pfam\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n name2entry = load_hmmfile(args['hmmfile'])\n fo = open(args['annotfile'])\n for line in fo:\n line = line.rstrip()\n if line.startswith(\"#\") or len(line) == 0: continue\n cols = line.split()\n name, score = cols[-9], float(cols[-4])\n if args['cut'] == 'GA' and score < name2entry[name].ga: continue\n elif args['cut'] == 'TC' and score < name2entry[name].tc: continue\n elif args['cut'] == 'NC' and score < name2entry[name].nc: continue\n print string.join(cols, \"\\t\")\n fo.close() \n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.48111438751220703, "alphanum_fraction": 0.487543523311615, "avg_line_length": 31.46086883544922, "blob_id": "bdd99c7c85e449222d2ddf82c3b9c683a926744d", "content_id": "44012a8ad757620f45e56bcdf3af79ba9a42bb3d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3733, "license_type": "permissive", "max_line_length": 86, "num_lines": 115, "path": "/python/blast/cluster-paralogs.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport hashlib\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f parsed blast.out, the first two columns being paralog pair IDs\" )\n\tstdout( \" \" )\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f': args['file'] = value\n\t\t\t\t\n\tif not args.has_key('file'):\n\t\tstderr( \"parsed blast file file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('file') ):\n\t\tstderr( \"parsed blast file file does not exist.\" )\n\t\tshow_help()\n\t\t\n\treturn args\n\n# =============================================================================\ndef get_all_pairs(ifile):\n parhash = {}\n fo = open( args.get('file'))\n for line in fo:\n id1, id2 = line.strip().split(\"\\t\")\n if not parhash.has_key(id1): parhash[id1] = []\n parhash[id1].append(id2)\n if not parhash.has_key(id2): parhash[id2] = []\n parhash[id2].append(id1)\n return parhash\n\n# =============================================================================\ndef get_edges(ifile):\n hash = {}\n fo = open(ifile)\n for line in fo:\n ids = line.strip().split(\"\\t\")[0:2]\n ids.sort()\n hash[string.join(ids, \",\")] = 1\n fo.close()\n return hash\n\n# =============================================================================\ndef edges_in_cluster(clgids, edgehash):\n edgecount = 0\n for i in range(len(clgids)):\n for j in range(len(clgids)):\n if j <= i: continue\n gids = [clgids[i], clgids[j]]\n gids.sort()\n key = string.join(gids, \",\")\n if edgehash.has_key(key): edgecount += 1\n return edgecount\n \n# =============================================================================\n# =============================================================================\ndef main( args ):\n parhash = {}\n nclusters = 0\n parhash = get_all_pairs(args.get('file'))\n nodes = parhash.keys()\n edgehash = get_edges(args['file'])\n while len(nodes) > 0:\n nclusters += 1\n first = nodes.pop()\n members = [first]\n check = []\n check.extend(parhash[first])\n while len(check) > 0:\n c = check.pop()\n if c in members: continue\n members.append(c)\n if parhash.has_key(c):\n check.extend(parhash[c])\n check = list(set(check))\n cid = hashlib.md5(string.join(members, '')).hexdigest()\n out = [cid, str(len(members)), str(edges_in_cluster(members, edgehash))] + members\n print string.join(out, \"\\t\")\n for m in members: \n if m in nodes: nodes.remove(m)\n print >> sys.stderr, \"clusters: %s\" % nclusters\n\t\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.6135895848274231, "alphanum_fraction": 0.6211393475532532, "avg_line_length": 41.85293960571289, "blob_id": "893b80bc4d6d877bccc0b8497a5d9f0eb7dde0a0", "content_id": "6278a2a4a966bac30d96cce4b2a68080597ef3ad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1457, "license_type": "permissive", "max_line_length": 87, "num_lines": 34, "path": "/python/base/goterm.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "class GOTerm():\n def __init__(self, lines):\n self.id = \"\"\n self.name = \"\"\n self.namespace = \"\"\n self.definition = \"\"\n self.is_a = []\n self.alt_ids = []\n self.xrefs = []\n self.synonyms = []\n self.obsolete = 0\n\n for line in lines:\n line = line.strip()\n if line.startswith(\"id: \"): self.id = line[line.index(\":\")+2:]\n if line.startswith(\"name: \"): self.name = line[line.index(\":\")+2:]\n if line.startswith(\"namespace: \"): self.namespace = line[line.index(\":\")+2:]\n if line.startswith(\"def: \"): self.definition = line[line.index(\":\")+2:]\n if line.startswith(\"is_a: \"): self.is_a.append( line[line.index(\":\")+2:] )\n if line.startswith(\"is_obsolete: true\"): self.obsolete = 1\n if line.startswith(\"alt_id: \"): self.alt_ids.append( line[line.index(\":\")+2:] )\n if line.startswith(\"xref: \"): self.xrefs.append( line[line.index(\":\")+2:] )\n if line.startswith(\"synonym: \"): self.synonyms.append( line[line.index(\":\")+2:] )\n\n def get_id(self): return self.id\n def get_name(self): return self.name\n def get_namespace(self): return self.namespace\n def get_definition(self): return self.definition\n def get_is_a(self): return self.is_a\n def get_is_a_goids(self): return [e.split()[0] for e in self.is_a]\n def get_alt_ids(self): return self.alt_ids\n def get_xrefs(self): return self.xrefs\n def get_synonyms(self): return self.synonyms\n def get_is_obsolete(self): return self.obsolete\n" }, { "alpha_fraction": 0.5870881080627441, "alphanum_fraction": 0.595158040523529, "avg_line_length": 28.739999771118164, "blob_id": "b48c76472299d6173b6c5c2620ccdbbd232e73da", "content_id": "324ee60607572029ec1c663891fc0fae89bc06a0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2974, "license_type": "permissive", "max_line_length": 114, "num_lines": 100, "path": "/python/latex-bibtex/bibtex-number-of-coauthors.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, re, string\n\nclass BibtexEntry:\n\n\n def __init__(self, lines):\n self.ATTRIBUTE_REGEX = re.compile(\"\\s{2}(\\S+)\\s{1}=\\s\\{(.*)\\}*$\")\n self.BIBTEXSTART_REGEX = re.compile(\"@([A-Z]+)\\{(\\S+),$\")\n self.key = \"\"\n self.bibtype = \"\"\n self.attributehash = {}\n while 1:\n if len(lines) == 0: break\n line = lines.pop(0)\n\n # end of entry\n if line.startswith(\"}\"): break\n\n # bibtex entry start line and key definition\n if self.BIBTEXSTART_REGEX.match(line):\n self.bibtype = self.BIBTEXSTART_REGEX.match(line).group(1)\n self.key = self.BIBTEXSTART_REGEX.match(line).group(2)\n continue\n\n # bibtex attribute start\n if self.ATTRIBUTE_REGEX.match(line):\n attr = self.ATTRIBUTE_REGEX.match(line).group(1)\n value = self.ATTRIBUTE_REGEX.match(line).group(2)\n self.attributehash[attr] = value\n else: self.attributehash[attr] += \" \" + line.strip()\n\n for attr, value in self.attributehash.iteritems():\n if value.endswith(\"}\"): self.attributehash[attr] = value[:-1]\n elif value.endswith(\"},\"): self.attributehash[attr] = value[:-2]\n\n def get_key(self): return self.key\n def get_first_author(self): return self.attributehash['author'].split(\" and \")[0]\n def get_attr(self, name):\n if self.attributehash.has_key(name): return self.attributehash[name]\n return \"\"\n\n def get_author_count(self, return_str=0): \n count = self.attributehash['author'].count(\" and \") +1\n if return_str: return \"%s\" % count\n else: return count\n\n def annotate(self):\n self.attributehash['annotate'] = \"(%s co-authors)\" % self.get_author_count()\n self.attributehash['note'] = \"(%s co-authors)\" % self.get_author_count()\n\n def to_s(self, escape_title=1, annotate=0):\n print \"@\" + self.bibtype + \"{\" + self.key + \",\"\n all_attrs = self.attributehash.keys()\n for i in range(len(all_attrs)):\n attr = all_attrs[i]\n if i == len(all_attrs)-1: comma = \"\"\n else: comma = \",\"\n if attr == \"title\" and escape_title: print \" \" + attr + \" = \\\"{\" + self.attributehash[attr] + \"}\\\"\" + comma\n else: print \" \" + attr + \" = {\" + self.attributehash[attr] + \"}\" + comma\n print \"}\"\n\n\n\n\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" db.bib [n=max-coauthors]\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 3: usage()\n inFile, inAuthors = sys.argv[1:3]\n return inFile, int(inAuthors)\n\n\ndef main():\n inFile, inAuthors = plausi()\n fo = open(inFile)\n while 1:\n line = fo.readline().rstrip()\n if line.startswith(\"%\"): continue\n if line.startswith(\"@comment\"): break\n if line.startswith(\"@\"):\n lines = []\n lines.append(line)\n while 1:\n line = fo.readline().rstrip()\n lines.append(line)\n if line.startswith(\"}\"): break\n b = BibtexEntry(lines)\n count = b.get_author_count()\n if count > inAuthors: b.annotate()\n b.to_s()\n\n fo.close()\n\n\nmain()\n" }, { "alpha_fraction": 0.6263186931610107, "alphanum_fraction": 0.6449195146560669, "avg_line_length": 33.61538314819336, "blob_id": "3e66f902fe99dba2f3ea1f808bf9162cbe187e88", "content_id": "b325206a135f31b4498ae7564e201ffa0c4c244a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3602, "license_type": "permissive", "max_line_length": 170, "num_lines": 104, "path": "/python/base/sciroko.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import string\n\nclass SSR():\n def __init__(self, line):\n columns = line.rstrip().split(\"\\t\")\n self.seqname = columns.pop(0)\n self.motif = columns.pop(0)\n self.motif_std = columns.pop(0)\n self.startpos = int(columns.pop(0))\n self.endpos = int(columns.pop(0))\n self.length = int(columns.pop(0))\n self.score = int(columns.pop(0))\n self.mismatches = int(columns.pop(0))\n self.mode = columns.pop(0)\n self.seq = columns.pop(0)\n\n def to_s(self):\n array = [self.seqname, self.motif, self.motif_std, str(self.startpos), str(self.endpos), str(self.length), str(self.score), str(self.mismatches), self.mode, self.seq]\n return string.join(array, \"\\t\")\n\n def is_perfect(self):\n if self.mismatches == 0: return 1\n return 0\n\n def motif_len(self): return len(self.motif_std)\n\n def compare_to(self, other):\n if self.organism == other.organism or self.chromosome != other.chromosome: return 0 # not comparable\n if self.motif_std != other.motif_std: return 99 # different motif, i.e. no similarity between the SSRs\n if self.is_perfect and other.is_perfect:\n if self.seq == other.seq: return 1 # 100% perfect matches\n if self.length != other.length: return 11 # two perfect SSRs with polymorphic change\n\n if (self.is_perfect and not other.is_perfect) or (not self.is_perfect and other.is_perfect):\n if self.length == other.length: return 31\n\n if not self.is_perfect and not other.is_perfect:\n if self.seq == other.seq: return 51\n\n\n def is_perfect_match_to(self, other):\n if self.motif_std != other.motif_std: return 0\n if self.length != other.length: return 0\n if self.mismatches != other.mismatches: return 0\n return 1\n\n def is_imperfect_match_to(self, other):\n if self.motif_std != other.motif_std: return 0\n if self.length != other.length: return 0\n return 1\n\n def is_polymorphic_to(self, other):\n if self.motif != other.motif: return 0\n if self.repeats == other.repeats: return 0\n return 1\n\n def is_shifted_to(self, other):\n if self.motif == other.motif: return 0\n if self.type != other.type: return 0\n m = self.motif\n for i in range(len(self.motif)):\n m = m[1:] + m[0]\n if m == other.motif: return 1\n return 0\n\n\nclass MisaSSR():\n def __init__(self, line):\n self.feature = 0\n columns = line.rstrip().split(\"\\t\")\n self.geneid = columns.pop(0)\n self.ssrnr = int(columns.pop(0))\n self.type = columns.pop(0)\n self.pattern = columns.pop(0)\n self.length = int(columns.pop(0))\n self.startpos = int(columns.pop(0))\n self.endpos = int(columns.pop(0))\n if len(columns) > 0: self.feature = columns.pop(0)\n if self.type != \"c\" and self.type != \"c*\":\n self.motif = self.pattern[1:self.pattern.index(\")\")]\n if self.pattern.endswith(\"*\"): self.repeats = int(self.pattern[self.pattern.index(\")\")+1:-1])\n else: self.repeats = int(self.pattern[self.pattern.index(\")\")+1:])\n\n def to_s(self):\n array = [self.geneid, str(self.ssrnr), self.type, self.pattern, str(self.length), str(self.startpos), str(self.endpos)]\n return string.join(array, \"\\t\")\n\n def is_perfect_match_to(self, other):\n if self.pattern != other.pattern: return 0\n return 1\n\n def is_polymorphic_to(self, other):\n if self.motif != other.motif: return 0\n if self.repeats == other.repeats: return 0\n return 1\n\n def is_shifted_to(self, other):\n if self.motif == other.motif: return 0\n if self.type != other.type: return 0\n m = self.motif\n for i in range(len(self.motif)):\n m = m[1:] + m[0]\n if m == other.motif: return 1\n return 0\n\n\n" }, { "alpha_fraction": 0.5172798037528992, "alphanum_fraction": 0.5245261788368225, "avg_line_length": 32.22222137451172, "blob_id": "3c3ebd49ffc8435d38a516571144313cedbba696", "content_id": "8cac8247a09ade74a8b8dea95228106cac2e5402", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1794, "license_type": "permissive", "max_line_length": 82, "num_lines": 54, "path": "/python/base/orthocluster.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import sys\n\n# =============================================================================\ndef parse(clusterfile, poshash={}):\n nspecies = 0\n clusterhash = {}\n fo = open(clusterfile)\n while 1:\n line = fo.readline()\n if not line: break\n if line.startswith(\"No. of sequence\"): nspecies = int(line.split()[-1])\n if line.startswith(\"CL-\"):\n cols = line.split()\n sc = OrthoCluster(cols[0])\n ngenes = int(max(cols[1:nspecies+1]))\n fo.readline()\n for i in range(ngenes):\n cols = fo.readline().split()\n for j in range(nspecies):\n cols.pop(0)\n cols.pop(0)\n strand = cols.pop(0)\n scaffold = cols.pop(0)\n geneid = cols.pop(0)\n if poshash.has_key(geneid):\n startpos, endpos = poshash[geneid][1:3]\n else:\n print >> sys.stderr, \"geneid\", geneid, \"not found in poshash\"\n startpos, endpos = None, None\n sr = SyntenicRegion(geneid, scaffold, strand, startpos, endpos)\n sc.add_syntenic_region(sr, j)\n clusterhash[sc.id] = sc\n fo.close()\n return nspecies, clusterhash\n\n\n# =============================================================================\nclass SyntenicRegion():\n def __init__(self, geneid, scaffold, strand, startpos, endpos):\n self.geneid = geneid\n self.scaffold = scaffold\n self.strand = strand\n self.startpos = startpos\n self.endpos = endpos\n\n# =============================================================================\nclass OrthoCluster():\n def __init__(self, clusterid):\n self.id = clusterid\n self.syntenic_regions = {}\n\n def add_syntenic_region(self, sr, index):\n if not self.syntenic_regions.has_key(index): self.syntenic_regions[index] = []\n self.syntenic_regions[index].append(sr)\n" }, { "alpha_fraction": 0.6390358805656433, "alphanum_fraction": 0.6431511044502258, "avg_line_length": 23.299999237060547, "blob_id": "2d89a1fe2947fb8a985c4a2666538356ad071790", "content_id": "6258dc53a8822f98b077dcdeb61f4a4352d95d79", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1701, "license_type": "permissive", "max_line_length": 84, "num_lines": 70, "path": "/python/orthomcl/paralogs-per-cluster.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys\nimport string\n\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" orthomcl.out\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 2: usage()\n inFile = sys.argv[1]\n return inFile\n\n\nclass OrthoCluster():\n def __init__(self, line):\n descr, genedefs = line.split(\"\\t\")\n genedefs = genedefs.split()\n self.name = descr[:descr.index('(')].lower()\n self.geneHash = {}\n self.speciesHash = {}\n for genedef in genedefs:\n geneid = genedef[:genedef.index('(')]\n species = genedef[genedef.index('(')+1:-1]\n self.geneHash[geneid] = species\n if self.speciesHash.has_key(species): self.speciesHash[species].append(geneid)\n else: self.speciesHash[species] = [geneid]\n\n def get_name(self): return self.name\n def get_count(self): return len(self.geneHash)\n def get_gene_hash(self): return self.geneHash\n def get_species_hash(self): return self.speciesHash\n\n\ndef get_species_from_first_line(inFile):\n fo = open(inFile)\n line = fo.readline()\n o = OrthoCluster(line.rstrip())\n fo.close()\n species = o.get_species_hash().keys()\n species.sort()\n return species\n\n\ndef parse_orthocml_out(inFile):\n speciesList = get_species_from_first_line(inFile)\n print >> sys.stdout, \"\\t\" + string.join(speciesList, \"\\t\")\n fo = open(inFile)\n for line in fo:\n o = OrthoCluster(line.rstrip())\n speciesHash = o.get_species_hash()\n sys.stdout.write(o.get_name())\n for s in speciesList:\n count = 0\n if speciesHash.has_key(s): count = len(speciesHash[s])\n sys.stdout.write(\"\\t%s\" % count)\n sys.stdout.write(\"\\n\")\n \n fo.close()\n\n\ndef main():\n inFile = plausi()\n parse_orthocml_out(inFile)\n\n\n\nmain()\n" }, { "alpha_fraction": 0.6140764951705933, "alphanum_fraction": 0.6258856654167175, "avg_line_length": 23.905881881713867, "blob_id": "c3ad7e29e17e896249b989f816231dd618cd5909", "content_id": "41c2e268c528d60a0ffbfab70732151a36bae578", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2117, "license_type": "permissive", "max_line_length": 84, "num_lines": 85, "path": "/python/orthomcl/tree-for-codeml.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys\nimport string\n\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" orthomcl.out base.tree\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 3: usage()\n inOrtho, inTree = sys.argv[1:3]\n return inOrtho, inTree\n\n\nclass OrthoCluster():\n def __init__(self, line):\n descr, genedefs = line.split(\"\\t\")\n genedefs = genedefs.split()\n self.name = descr[:descr.index('(')].lower()\n self.geneHash = {}\n self.speciesHash = {}\n for genedef in genedefs:\n geneid = genedef[:genedef.index('(')]\n species = genedef[genedef.index('(')+1:-1] + \"1\"\n self.geneHash[geneid] = species\n if self.speciesHash.has_key(species): self.speciesHash[species].append(geneid)\n else: self.speciesHash[species] = [geneid]\n\n def get_name(self): return self.name\n def get_count(self): return len(self.geneHash)\n def get_gene_hash(self): return self.geneHash\n def get_species_hash(self): return self.speciesHash\n\n\ndef get_species_from_first_line(inFile):\n fo = open(inFile)\n line = fo.readline()\n o = OrthoCluster(line.rstrip())\n fo.close()\n species = o.get_species_hash().keys()\n species.sort()\n return species\n\n\ndef parse_orthocml_out(inFile, tree):\n fo = open(inFile)\n for line in fo:\n o = OrthoCluster(line.rstrip())\n speciesHash = o.get_species_hash()\n name = o.get_name()\n for species, genelist in speciesHash.iteritems():\n if len(genelist) > 1: break\n\n replacement = '(' + species[:-1] + '1 #1,' + species[:-1] + '2)'\n tree_repl_1 = tree.replace(species, replacement)\n replacement = '(' + species[:-1] + '1,' + species[:-1] + '2 #1)'\n tree_repl_2 = tree.replace(species, replacement)\n fw = open(name + \".tree.1\", \"w\")\n fw.write(tree_repl_1)\n fw.close()\n fw = open(name + \".tree.2\", \"w\")\n fw.write(tree_repl_2)\n fw.close()\n fo.close()\n\n\ndef read_tree_from_file(file):\n fo = open(file)\n tree = \"\"\n for line in fo:\n tree += line.strip()\n fo.close()\n return tree\n\n\ndef main():\n inOrtho, inTree = plausi()\n tree = read_tree_from_file(inTree)\n parse_orthocml_out(inOrtho, tree)\n\n\n\nmain()\n" }, { "alpha_fraction": 0.46151116490364075, "alphanum_fraction": 0.4693153500556946, "avg_line_length": 31.76744270324707, "blob_id": "28a84367d10cb489475b15416882eda1547bdd81", "content_id": "4677b6771443780bfbaa214c270a202611007372", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2819, "license_type": "permissive", "max_line_length": 86, "num_lines": 86, "path": "/python/fasta/index-fasta.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport getopt\t\t\t\t\t# comand line argument handling\nimport anydbm\t\t\t\t\t# index databases (file hash)\nfrom low import *\t\t\t# collection of generic self-defined functions\n\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> -o <path>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f input fasta file\" )\n\tstdout( \" -o output dbm file\" )\n\tstdout( \" \" )\n\tsys.exit(1)\n\t\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:o:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f': args['fasta'] = value\n\t\tif key == '-o':\targs['out'] = value\n\t\n\tif not args.has_key('fasta'):\n\t\tstderr( \"fasta file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('fasta') ):\n\t\tstderr( \"fasta file does not exist.\" )\n\t\tshow_help()\n\t\n\tif not args.has_key('out'):\n\t\tstderr( \"out file missing.\" )\n\t\tshow_help()\n\t\t\n\treturn args\n\n\t\n# =============================================================================\n# =============================================================================\ndef main( args ):\n DBM = anydbm.open( args.get('out'), 'c' )\n sout, serr = catch_bash_cmd_output( \"grep '>' -c %s\" %args.get('fasta') )\n total = int( sout )\n added = 0\n fo = open( args.get('fasta') )\n key, value = '', ''\n for line in fo:\n line = line.rstrip()\n if line.startswith('>'):\n if key != '' and value != '':\n #print key + \"\\t\" + value\n added += 1\n DBM[ key ] = value\n sys.stderr.write('\\r\\tindexing:\\t%s\\t%01.2f%%' %(added,100.0*added/total) )\n sys.stderr.flush()\n key, value = '', ''\n key = re.match(\">(\\S+)\", line).group(1)\n else:\n value += line.rstrip()\n fo.close()\n if key != '' and value != '':\n added += 1\n DBM[ key ] = value\n #print key + \"\\t\" + value\n DBM.close()\n sys.stderr.write('\\r\\tindexing:\\t%s\\t%01.2f%%\\ndone.\\n' %(added,100.0*added/total) )\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\nargs = handle_arguments( )\nmain( args )\n\n" }, { "alpha_fraction": 0.4615110456943512, "alphanum_fraction": 0.4668567478656769, "avg_line_length": 32.79518127441406, "blob_id": "8cb087b75583841a37757263a33ed5adac0edf72", "content_id": "d9671ea05f0b70833fd1b26cfd500b35900ad15c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2806, "license_type": "permissive", "max_line_length": 115, "num_lines": 83, "path": "/python/fasta/uniprot-dat-to-fasta.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f uniprot dat file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['datfile'] = value\n \n for key in ['datfile']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n return args\n\n# ============================================================================= \ndef parse_until_doubleslash(fo):\n hash, end = {}, False\n line = fo.readline().strip()\n while not line.startswith(\"//\"):\n if len(line) == 0:\n end = True\n break\n if len(line.split(\" \", 1)[0]) != 2:\n key = \"SEQ\"\n value = line.strip().replace(\" \", \"\")\n else:\n cols = [e.strip() for e in line.split(\" \", 1)]\n if len(cols) != 2: \n line = fo.readline().strip()\n continue\n key, value = [e.strip() for e in line.split(\" \", 1)]\n if not hash.has_key(key): hash[key] = \"\"\n if key != \"SEQ\" and len(hash[key]) > 0 and hash[key][-1] != \" \" and not value.startswith(\" \"): hash[key] += \" \"\n hash[key] += value\n line = fo.readline().strip()\n return hash, end\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n fo = open(args['datfile'])\n while 1:\n hash, end = parse_until_doubleslash(fo)\n if end: break\n print \">\" + hash[\"ID\"].split()[0] + \" \" + hash[\"OC\"]\n print hash[\"SEQ\"]\n fo.close()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5245274305343628, "alphanum_fraction": 0.5294779539108276, "avg_line_length": 32.92366409301758, "blob_id": "a5987dc35a6068b09aa7ef24651b571a0ec34f3f", "content_id": "e50abcccfb89da7fe1c9b956b6e13162107384d1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4444, "license_type": "permissive", "max_line_length": 98, "num_lines": 131, "path": "/python/swapsc/swapsc.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport tempfile\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -a nt alignment file\" )\n stdout( \" -s evolver simulation file (generated with swapsc-in)\" )\n stdout( \" -t tree file containing the newick tree (with numbers, not names, no spaces)\" )\n stdout( \" -p path to swapsc binary\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"ha:s:p:t:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-a':\targs['aln'] = value\n if key == '-s':\targs['simul'] = value\n if key == '-p':\targs['swapsc'] = value\n if key == '-t':\targs['tree'] = value\n \n if not args.has_key('aln'):\n stderr( \"aln file missing.\" )\n show_help()\n if not file_exists( args.get('aln') ):\n stderr( \"aln file does not exist.\" )\n show_help()\n \n if not args.has_key('simul'):\n stderr( \"simulation file missing.\" )\n show_help()\n if not file_exists( args.get('simul') ):\n stderr( \"simulation file does not exist.\" )\n show_help()\n\n if not args.has_key('tree'):\n stderr( \"tree file missing.\" )\n show_help()\n if not file_exists( args.get('tree') ):\n stderr( \"tree file does not exist.\" )\n show_help()\n \n if not args.has_key('swapsc'):\n stderr(\"path to SWAPCS not specified\")\n show_help()\n\n if not file_exists( args.get('swapsc') ):\n stderr( \"SWAPSC binary not found.\" )\n show_help()\n\n args['workdir'] = os.path.split( args.get('swapsc') )[0] + '/'\n\n return args\n\n# =============================================================================\ndef get_sysout(command):\n proc = os.popen(command)\n out = proc.read().strip()\n proc.close()\n return out \n\n\n# =============================================================================\ndef generate_control_file(outFolder):\n path = outFolder\n if not path.endswith(\"/\"): path += \"/\"\n path += \"SWAPSC.ctl\"\n fw = open(path, \"w\")\n fw.write(\"\"\"data_file: aln *File with the alignment of sequences\n Tree_file: tree *File with the phylogenetic tree in Newick format\n Output_file: out *Name of the file with the output results\n Simulations: simul *Name of the file with the simulated alignments\n Model : 0 * 0 = Li 1993, 1 = Nei&Gojobori, 2 = Pamilo&Bianchi\n Window: 0 * 0 = inferred as in Fares et al. (2002), 1 = fixed\n Window_size: 3 * Size in codons of the window if fixed\\n\"\"\")\n fw.close()\n\n\n# =============================================================================\ndef main( args ):\n aln = args['aln']\n tree = args['tree']\n swapscbin = args['swapsc']\n simul = args['simul']\n outfile = aln + '.swapsc.out'\n\n tempdir = tempfile.mkdtemp(suffix='', prefix='tmp.swapsc.', dir='.')\n os.system(\"cp %s %s\" %(aln, tempdir + '/aln'))\n os.system(\"cp %s %s\" %(tree, tempdir + '/tree'))\n os.system(\"cp %s %s\" %(simul, tempdir + '/simul'))\n generate_control_file(tempdir)\n os.chdir(tempdir)\n os.system(swapscbin + '&> swapsc.log')\n os.chdir(\"..\")\n os.system(\"mv %s/out %s\" %(tempdir, outfile))\n # check if output complete\n lastline = get_sysout(\"tail -n 1 %s\" % outfile)\n if not lastline.startswith(\"P(neutral sites)\"):\n os.system(\"mv %s/log %s.log\" %(tempdir, outfile))\n os.system(\"rm -rf %s\" % tempdir)\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.494314581155777, "alphanum_fraction": 0.501263439655304, "avg_line_length": 32.31578826904297, "blob_id": "9755d84346e8cf743ca236a1d87bd8d27aef3011", "content_id": "5f03dc1b2c71d057721efabd35831f4339e651be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3166, "license_type": "permissive", "max_line_length": 120, "num_lines": 95, "path": "/python/generic/flat-split-by-lines.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom low import *\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n print \"splits a flat file into chunks. Options: (1) N number of lines per chunk. (2) N number of chunks of equal size\"\n print \"usage: \" + sys.argv[0] + \" -f <file> [-i <chunks> -l <lines>]\"\n print \" \"\n print \" option description\"\n print \" -h help (this text here)\"\n print \" -f flat file to split\"\n print \" -l number of lines per chunk\"\n print \" -i number of equally sized chunks\"\n print \" \"\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n print >> sys.stderr, \"no arguments provided.\"\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:i:l:\" )\n except getopt.GetoptError:\n print >> sys.stderr, \"invalid arguments provided.\"\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-l': args['l'] = int(value)\n if key == '-i': args['i'] = int(value)\n \n if not args.has_key('file'):\n print >> sys.stderr, \"import file argument missing.\"\n show_help()\n elif not file_exists( args.get('file') ):\n print >> sys.stderr, \"import file does not exist.\"\n show_help()\n\n if not args.has_key('l') and not args.has_key('i'):\n print >> sys.stderr, \"l or i missing.\"\n show_help()\n \n return args\n\n\ndef get_number_of_lines(file):\n lines = 0\n fo = open(file)\n for line in fo: lines += 1\n return lines\n\n# =============================================================================\ndef get_lines_in(ifile):\n lc = 0\n fo = open(ifile)\n for line in fo: lc += 1\n fo.close()\n return lc\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n totallines = get_lines_in(args.get('file'))\n linecount, filecount = 0, 1\n if args.has_key('i'): rotate = int(math.ceil( 1.0 * totallines / args.get('i') ))\n else: rotate = args.get('l')\n\n digits = len(str(math.ceil(1.0*totallines/rotate)))\n fw = open( args.get('file') + '.' + add_leading_zeroes(filecount, digits), 'w' )\n fo = open( args.get('file') )\n for line in fo:\n linecount += 1\n if ((linecount % rotate) == 1 and linecount > 1) or (rotate == 1 and linecount > 1):\n filecount += 1\n fw.close()\n fw = open( args.get('file') + '.' + add_leading_zeroes(filecount, digits), 'w' )\n fw.write(line) \n fo.close()\n fw.close()\n \n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5315762162208557, "alphanum_fraction": 0.5485386252403259, "avg_line_length": 34.81308364868164, "blob_id": "d9e32258c5e0ff09d0bd85eb0fcecff999f0b62b", "content_id": "b40970abb5e0f084b994eac0c926d982024a9d9d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 3832, "license_type": "permissive", "max_line_length": 161, "num_lines": 107, "path": "/ruby/geneontology/termcloud-from-go-enrichment.rb", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby -w\n# == Synopsis\n# creates input for wordle.net/advanced to create a term cloud based on the six\n# output files produced by the go-enrichment.py script\n#\n\nrequire 'optparse'\nrequire 'rubygems'\nrequire 'faster_csv'\n\n\nFILES = ['topGO.over.Sig.CC.csv', 'topGO.over.Sig.BP.csv', 'topGO.over.Sig.MF.csv', 'topGO.under.Sig.CC.csv', 'topGO.under.Sig.BP.csv', 'topGO.under.Sig.MF.csv']\n\n# =============================================================================\ndef get_opt\n options = Hash.new\n optparse = OptionParser.new do |opts|\n opts.banner = \"Usage: #{$0} -d <dir>\"\n options[:dir] = nil\n options[:go2name] = nil\n opts.on( '-d DIR', 'directory that contains the topGO.over.Sig*.csv files'\n ){|dir| options[:dir] = dir}\n opts.on( '-g FILE', 'gene ontology id to name mapping file, tab delimited, to look up shortened term names'\n ){|file| options[:go2name] = file}\n end\n begin\n optparse.parse!\n mandatory = [:dir]\n missing = mandatory.select{|param| options[param].nil?}\n if not missing.empty?\n puts \"Missing options: #{missing.join(', ')}\"\n puts optparse \n exit\n end\n rescue OptionParser::InvalidOption, OptionParser::MissingArgument\n puts $!.to_s\n puts optparse\n exit\n end\n return options\nend\n\n\n# modify to adjust colors\n# =============================================================================\ndef map_color(ontology, direction)\n return '0065ab' if ontology == \"BP\" and direction == \"over\"\n return '93002d' if ontology == \"MF\" and direction == \"over\"\n return '1f9300' if ontology == \"CC\" and direction == \"over\"\n return '4d4d4d' if ontology == \"BP\" and direction == \"under\"\n return '4d4d4d' if ontology == \"MF\" and direction == \"under\"\n return '4d4d4d' if ontology == \"CC\" and direction == \"under\"\nend\n\n# =============================================================================\ndef statusbar(progress, message=\"\", width=40)\n progressbar = \"=\" * (progress*width).to_i\n progressbar << \" \" while progressbar.length < width\n STDERR.print \"\\r 0% #{progressbar} 100% \"\n STDERR.print \"[#{message}]\" unless message.empty?\n STDERR.print \"\\n\" if progress == 1.0 \nend\n\n# =============================================================================\ndef get_go2name(file)\n go2name = Hash.new\n f = File.open(file, \"r\")\n while (line = f.gets)\n line.chomp!\n id, name = line.split(\"\\t\")[0,2]\n go2name[id] = name\n end\n return go2name\nend\n\n# =============================================================================\ndef parse_topgo_file(file, go2name)\n direction, ontology = file.split('.')[1], file.split('.')[3]\n csvrows = FasterCSV.read(file, :quote_char => '\"', :col_sep => ',', :row_sep => :auto, :headers => true)\n csvrows.each do |row|\n pfilter = row[10].to_f\n next unless pfilter < 0.05\n size = -1.0* Math.log(pfilter)\n term = row[2]\n term = go2name[row[1]] if go2name and term[-3,3] == '...'\n color = map_color(ontology, direction)\n puts [term, size.to_s, color].join(\":\")\n end\nend\n\n# =============================================================================\n# === M A I N =================================================================\n# =============================================================================\n\noptions = get_opt()\nabort(\"directory does not exist - aborting.\") unless File.exists?(options[:dir]) and File.directory?(options[:dir])\nabort(\"go2name mapping file does not exist - aborting.\") if options[:go2name] and not File.exists?(options[:go2name])\ngo2name = nil\ngo2name = get_go2name(options[:go2name]) if options[:go2name]\nDir.chdir(options[:dir])\nFILES.each do |file|\n unless File.exists?(file)\n STDERR.puts \"could not find file #{file} in the given dir. skipping...\" unless File.exists?(file)\n next\n end\n parse_topgo_file(file, go2name)\nend\n" }, { "alpha_fraction": 0.5314064025878906, "alphanum_fraction": 0.5357661843299866, "avg_line_length": 31.946807861328125, "blob_id": "8d407237b5e6bf704edef6ed446d7267ff87b409", "content_id": "9260b6745e60bb9ed0ebcf395fc4a39428b7b35a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6193, "license_type": "permissive", "max_line_length": 98, "num_lines": 188, "path": "/python/blast/homology_blast.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t# low level handling, such as command line stuff\nimport string\t\t\t# string methods available\nimport re\t\t\t\t\t# regular expressions\nimport getopt\t\t\t# comand line argument handling\nfrom low import *\t# custom functions, written by myself\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> -e <1e-10> -i <70> -p <85> -o <path>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f in homolog source file\" )\n\tstdout( \" -e maximum evalue\" )\n\tstdout( \" -i minimum percent identity\" )\n\tstdout( \" -p minimum percent positives\" )\n\tstdout( \" -o out folder\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:o:e:i:p:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f': args['in'] = value\n\t\tif key == '-o':\targs['out'] = value\n\t\tif key == '-i':\targs['identities'] = value\n\t\tif key == '-p':\targs['positives'] = value\n\t\tif key == '-e':\targs['evalue'] = value\n\t\t\t\t\n\tif not args.has_key('in'):\n\t\tstderr( \"in file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('in') ):\n\t\tstderr( \"in file does not exist.\" )\n\t\tshow_help()\n\t\t\n\tif not args.has_key('out'):\n\t\tstderr( \"out folder missing.\" )\n\t\tshow_help()\n\t\n\tif not dir_exists( args.get('out') ):\n\t\tos.mkdir( args.get('out') )\n\t\n\tif not args['out'].endswith('/'): args['out'] += '/'\n\t\n\treturn args\n\n\n# =============================================================================\ndef get_src_files( file ):\n\treturn read_from_file( file ).splitlines()\n\n# =============================================================================\ndef blast( query, subject ):\n\tinfomsg( \"BLAST %s vs. %s\" %(query, subject) )\n\tblastout = \"blast-out.\" + get_basename(query).replace('red_','') + \"_\" + get_basename(subject)\n\t# formatdb\n\tif file_exists( subject) and not file_exists( subject + '.pin' ):\n\t\tos.system( \"formatdb -i %s\" %(subject) )\n\t# blast\n\tif not file_exists( blastout ):\n\t\tos.system( \"blastall -p blastp -i %s -d %s -o %s\" %( query, subject, blastout ) )\n\treturn blastout\n\t\n# =============================================================================\ndef parse_blastout( file, args ):\n\tparseout = \"parsed\" + os.path.splitext( file ) [1]\n\tprogrammcall = \"parse_blast_out2.py -f \" + file\n\tif args.has_key( 'evalue' ): programmcall += \" -e \" + args.get('evalue')\n\tif args.has_key( 'identities' ): programmcall += \" -i \" + args.get('identities')\n\tif args.has_key( 'positives' ): programmcall += \" -p \" + args.get('positives')\n\tif not file_exists( parseout ):\n\t\tos.system( programmcall + \" > \" + parseout )\n\thash = {}\n\tfo = open(parseout)\n\tfor line in fo:\n\t\thash[ line.split()[1] ] = 1\n\tfo.close()\n\treturn parseout, hash\n\n# =============================================================================\ndef get_orthologs( hashes, id, orthologlist ):\n\tif not hashes.has_key( id[0:2]): return orthologlist\n\tsearchhash = hashes.get(id[0:2])\n\tif not searchhash.has_key(id): return orthologlist\n\torths = searchhash.get(id)\n\tfor o in orths:\n\t\tif o in orthologlist: continue\n\t\torthologlist.append(o)\n\t\tget_orthologs( hashes, o, orthologlist )\n\t\t\n\treturn orthologlist\n\n# =============================================================================\ndef integrate_all_homologs( files, args ):\n\t# gather all data into a hash per pair\n\thashes = {}\n\tfor file in files:\n\t\thash = {}\n\t\tfo = open( file, 'r')\n\t\tfor line in fo:\n\t\t\tqid, sid = line.split()[0:2]\n\t\t\tif not hash.has_key( qid ): hash[ qid ] = [sid]\n\t\t\telse: hash[ qid ].append( sid )\n\t\tfo.close()\n\t\thashes[ qid[0:2] ] = hash\n\t\n\t# iterate all hashes, integrate homologous relationship into a single big hash\n\t# take the first hash (query ids) as the reference keyset\n\tfw = open( args.get('out') + 'homologous-clusters.txt', 'w' )\n\tHomologs = {}\n\tfor searchid in hashes.get('PO').keys():\n\t\torthologlist = get_orthologs( hashes, searchid, [] )\n\t\tHomologs[ searchid ] = orthologlist\n\t\tfw.write( str(len(orthologlist)+1)+' '+ searchid + ' ' + string.join(orthologlist, ' ') + '\\n' )\n\t\n\tfw.flush()\n\tfw.close()\n\treturn Homologs\n\t\t\t\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n\t\n\tsourcefiles = get_src_files( args.get('in') )\n\thashofhos = None\n\tparsedfiles = []\n\tfor i in range( len(sourcefiles)-1 ):\n\t\t\n\t\tqueryfile = sourcefiles[i]\n\t\tsubjectfile = sourcefiles[i+1]\n\t\t\n\t\tif hashofhos:\n\t\t\tidfile = 'keepids.tmp'\n\t\t\tfw = open( idfile, 'w' )\n\t\t\tfor id in hashofhos.keys():\tfw.write( id + '\\n')\t\t\t\n\t\t\tfw.flush()\n\t\t\tfw.close()\n\t\t\toutfile = 'red_' + get_basename(queryfile) + '.aa'\n\t\t\tos.system( 'reduce_fasta_file.py -f %s -i %s -o %s' %(queryfile,idfile,outfile) )\n\t\t\tqueryfile = outfile\n\t\t\n\t\tblastout = blast( queryfile, subjectfile )\n\t\tparsedfile, hashofhos = parse_blastout( blastout, args )\n\t\tparsedfiles.append( parsedfile )\n\t\tinfomsg( \"hits: %s\" %len(hashofhos) )\n\t\t\n\tHomologs = integrate_all_homologs( parsedfiles, args )\n\t\n\t# stats\n\tno = []\n\tfor sid, orthlist in Homologs.iteritems():\n\t\tn = len(orthlist) + 1\n#\t\tinfomsg( str(n) )\n\t\tno.append(n)\n\t\n\tfrom rpy import r\n\toutfile = 'hist_size_homol_sets.pdf'\n\ttitle = 'Size of Homologous Sets'\n\tx = 'number of homologs'\n\ty = 'frequency'\n\tr.pdf( outfile )\n\tr.hist(no, xlab=x, ylab=y, main=title, col='grey', breaks=max(no))\n\tr.dev_off()\n\t\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )" }, { "alpha_fraction": 0.42791128158569336, "alphanum_fraction": 0.464695006608963, "avg_line_length": 32.8125, "blob_id": "81e9a26182f2b83ce616a36ef76d4b645822dd23", "content_id": "b4ff44c8ae08200fefe6e1300625aefc51791ac2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5410, "license_type": "permissive", "max_line_length": 235, "num_lines": 160, "path": "/python/paml/paml-lrt-bic.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport math\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f nt alignment file\" )\n stdout( \" -s simple mode: only test between M0 and FreeRatio\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:t:s\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['file'] = value\n if key == '-s':\targs['simple'] = 1\n \n if not args.has_key('file'):\n stderr( \"file missing.\" )\n show_help()\n if not file_exists( args.get('file') ):\n stderr( \"file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\ndef LRT( lnL1, lnL2 ):\n \"\"\" \n The LRT statistic, or twice the log likelihood difference between the two compared models (2{Delta}{ell}), may be compared against {chi}Formula, with critical values to be 5.99 and 9.21 at 5% and 1% significance levels, respectively.\n \"\"\"\n lnL1 = float(lnL1)\n lnL2 = float(lnL2)\n return (2 * math.fabs( (lnL1 - lnL2) ))\n\n# =============================================================================\ndef BIC( np, lnL, length ):\n \"\"\"\n BIC = -2 ln( L ) + k ln(n),\n n = number of observations = sample size = alignment length (nt)\n k = number of parameters\n ln( L ) = PAML lnL = log likelihood\n \"\"\"\n lnL = float(lnL)\n np = int(np)\n length = int(length)\n return ( ((-2)* lnL) + (np * math.log(length)) )\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n \n fo = open( args.get('file'), 'r' )\n for line in fo:\n line = line.rstrip()\n columns = line.split(\"\\t\")\n name = columns.pop(0)\n length = columns.pop(0)\n MH = {}\n while len( columns ) > 2:\n hash = {}\n Modelname = columns.pop(0)\n np = int(columns.pop(0))\n lnL = float(columns.pop(0))\n MH[ Modelname ] = [ np, lnL, Modelname ]\n \n if args.has_key('simple'):\n # Free vs. M0\n lrt = LRT( MH.get(\"Free\")[1], MH.get(\"M0\")[1])\n df = 2* ( int(MH.get(\"Free\")[0]) - int(MH.get(\"M0\")[0]) )\n L = [name, str(lrt), str(df)]\n print string.join( L, \"\\t\" )\n\n else:\n # M7 vs. M8\n lnL_M7 = MH.get(\"M7\")[1]\n lnL_M8 = MH.get(\"M8\")[1]\n if lnL_M7 > lnL_M8: MH[ \"M7M8\" ] = MH.get(\"M7\")\n else: MH[ \"M7M8\" ] = MH.get(\"M8\")\n\n # M3K2 vs. M0\n if MH.get(\"M3K2\")[1] > MH.get(\"M0\")[1]: MH[ \"M3K2M0\" ] = MH.get(\"M3K2\")\n else: MH[ \"M3K2M0\" ] = MH.get(\"M0\")\n \n # M3K3 vs. M0\n if MH.get(\"M3K3\")[1] > MH.get(\"M0\")[1]: MH[ \"M3K3M0\" ] = MH.get(\"M3K3\")\n else: MH[ \"M3K3M0\" ] = MH.get(\"M0\")\n\n # M3 vs. M0\n if MH.get(\"M3K2\")[1] > MH.get(\"M0\")[1] and MH.get(\"M3K3\")[1] > MH.get(\"M0\")[1]:\n # BIC\n BIC_M3K2 = BIC( MH.get(\"M3K2\")[0], MH.get(\"M3K2\")[1], length )\n BIC_M3K3 = BIC( MH.get(\"M3K3\")[0], MH.get(\"M3K3\")[1], length )\n if BIC_M3K2 < BIC_M3K3: MH[ \"M3M0\" ] = MH.get(\"M3K2\")\n else: MH[ \"M3M0\" ] = MH.get(\"M3K3\")\n elif MH.get(\"M3K2\")[1] > MH.get(\"M0\")[1]: MH[ \"M3M0\" ] = MH.get(\"M3K2\")\n elif MH.get(\"M3K3\")[1] > MH.get(\"M0\")[1]: MH[ \"M3M0\" ] = MH.get(\"M3K3\")\n else: MH[ \"M3M0\" ] = MH.get(\"M0\")\n\n # Free vs. M0\n if MH.get(\"Free\")[1] > MH.get(\"M0\")[1]: MH[ \"FreeM0\" ] = MH.get(\"Free\")\n else: MH[ \"FreeM0\" ] = MH.get(\"M0\")\n\n # now compare winners of\n # - M0 vs Free\n # - M0 vs M3.2/3\n # - M7 vs M8\n\n # M3M0 vs. FreeM0\n HB = {}\n for M in [\"M3M0\", \"M7M8\", \"FreeM0\"]:\n HB[ MH.get(M)[2] ] = BIC( MH.get(M)[0], MH.get(M)[1], length )\n \n for name, array in MH.iteritems():\n print name, \"--\", array\n# print \"MH:\", MH\n print \"HB:\", HB\n \n sortedKeys = sort_by_value( HB )\n L = []\n for key in sortedKeys[0:1]:\n L.append( key )\n L.append( str(HB.get(key)) )\n print string.join( L, \"\\t\" )\n #BIC_M3M0 = BIC( MH.get(\"M3M0\")[0], MH.get(\"M3M0\")[1], length )\n #BIC_FreeM0 = BIC( MH.get(\"FreeM0\")[0], MH.get(\"FreeM0\")[1], length )\n #BIC_M7M8 = BIC( MH.get(\"M7M8\")[0], MH.get(\"M7M8\")[1], length )\n \n \n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.5171994566917419, "alphanum_fraction": 0.5322718024253845, "avg_line_length": 30.0415096282959, "blob_id": "6beca957e9baf7e4b517ffce49c826e722a4bf78", "content_id": "10d4ca7fa1b53c2d94deaa998a40fbd766ed39bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8227, "license_type": "permissive", "max_line_length": 101, "num_lines": 265, "path": "/python/misa/orth-all-pairwise-ssr-comparison.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport hashlib\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSR\nimport newick\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -a <path> -b <path> -o <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f combined misa output\" )\n stdout( \" -o clustered flybase dmel ortholog file\" )\n stdout( \" -t newick treew with branch lengths\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:o:t:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['misa'] = value\n if key == '-o': args['orth'] = value\n if key == '-t': args['tree'] = value\n \n if not args.has_key('misa'):\n stderr( \"misa file argument missing.\" )\n show_help()\n elif not file_exists( args.get('misa') ):\n stderr( \"misa file does not exist.\" )\n show_help()\n\n if not args.has_key('orth'):\n stderr( \"orth file argument missing.\" )\n show_help()\n elif not file_exists( args.get('orth') ):\n stderr( \"orth file does not exist.\" )\n show_help()\n \n return args\n\n\ndef get_distances(file):\n tree = open(file).readline().strip()\n ancestral_nodes = []\n leaves = {}\n while 1:\n # END OF TREE: semicolon\n if tree.startswith(\";\"): break\n\n # START INNER NODE\n if tree.startswith(\"(\"):\n tree = tree[1:]\n n = newick.Node()\n if len(ancestral_nodes) > 0: n.parent = ancestral_nodes[-1]\n ancestral_nodes.append(n)\n continue\n\n # END INNER NODE\n if tree.startswith(\")\"):\n tree = tree[1:]\n# print \"end of an\"\n if re.match(\":(\\d+)\", tree):\n distance = re.match(\":(\\d+)\", tree).group(1)\n ancestral_nodes[-1].distance_to_parent = distance\n# print \" ... distance:\", distance\n while re.match(\"[:\\d]+\", tree): tree = tree[1:]\n ancestral_nodes.pop(-1)\n continue\n\n # OUTER NODE SINGLE\n if re.match(\",([A-Za-z]+):(\\d+)\\)\", tree):\n els = re.match(\",([A-Za-z]+):(\\d+)\", tree).groups()\n n1 = newick.Node()\n n1.parent = ancestral_nodes[-1]\n n1.distance_to_parent = els[1]\n leaves[els[0]] = n1\n# print \"single node found:\", els[0], \"distance:\", els[1]\n while not tree.startswith(\")\"): tree = tree[1:]\n continue\n\n # OUTER NODE DOUBLE\n if re.match(\"([A-Za-z]+):(\\d+),([A-Za-z]+):(\\d+)\", tree):\n els = re.match(\"([A-Za-z]+):(\\d+),([A-Za-z]+):(\\d+)\", tree).groups()\n n1 = newick.Node()\n n1.parent = ancestral_nodes[-1]\n n1.distance_to_parent = els[1]\n n1.distance_to_parent = els[1]\n n2 = newick.Node()\n n2.parent = ancestral_nodes[-1]\n n2.distance_to_parent = els[3]\n leaves[els[0]] = n1\n leaves[els[2]] = n2\n# print \"double node found:\", els[0], els[2], \"distances:\", els[1], els[3]\n while not tree.startswith(\")\"): tree = tree[1:]\n continue\n\n # INTERNAL INNER NODE\n if tree.startswith(\",(\"):\n tree = tree[2:]\n n = newick.Node()\n if len(ancestral_nodes) > 0: n.parent = ancestral_nodes[-1]\n ancestral_nodes.append(n)\n continue\n\n if tree.startswith(\",\"):\n #ancestral_nodes[-1].parent = ancestral_nodes[-2]\n #ancestral_nodes.pop()\n tree = tree[1:]\n continue\n\n distances = {}\n for species1, leafnode1 in leaves.iteritems():\n for species2, leafnode2 in leaves.iteritems():\n distances[species1 + \",\" + species2] = str(leafnode1.summed_distance_to(leafnode2))\n return distances\n\n\n\n\n\n\ndef get_orthologs(file):\n orthologs = []\n fo = open(file)\n for line in fo:\n if line.startswith(\"#\"): continue\n if len(line.rstrip()) == 0: continue\n columns = line.rstrip().split(\"\\t\")\n orthologs.append(columns)\n fo.close()\n return orthologs\n\n\ndef get_ssrs(file):\n hash = defaultdict(list)\n fo = open(file)\n for line in fo:\n if line.startswith(\"ID\\t\"): continue\n m = MisaSSR(line)\n hash[m.geneid].append(m)\n fo.close()\n return hash\n\n\ndef hash(s):\n return hashlib.sha224(s).hexdigest()\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n ssrs = get_ssrs(args['misa'])\n orthologclusters = get_orthologs(args['orth'])\n distances = get_distances(args['tree'])\n \n perfect, poly, shift, loss = defaultdict(int), defaultdict(int), defaultdict(int), defaultdict(int)\n for cluster in orthologclusters:\n for i in range(len(cluster)):\n query = cluster[i]\n qid = query[:query.index(\"(\")]\n qspecies = query[query.index(\"(\")+1:-1]\n q_ssrs = ssrs[qid]\n for j in range(i+1,len(cluster)):\n ortholog = cluster[j]\n oid = ortholog[:ortholog.index(\"(\")]\n ospecies = ortholog[ortholog.index(\"(\")+1:-1]\n o_ssrs = ssrs[oid]\n key = [qspecies, ospecies]\n key.sort()\n key = string.join(key, \",\")\n if len(q_ssrs) == 0 and len(o_ssrs) == 0: \n break\n if len(q_ssrs) == 0:\n loss[key] += len(o_ssrs)\n break\n if len(o_ssrs) == 0:\n loss[key] += len(q_ssrs)\n break\n\n # stage 1: perfect matches\n caught = {}\n for m1 in q_ssrs: \n for m2 in o_ssrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_perfect_match_to(m2):\n# print \"\\nperfect match\"\n# print m1.to_s()\n# print m2.to_s()\n perfect[key] += 1\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n # stage 2: polymorphic matches (same motif, but different number of repeats)\n for m1 in q_ssrs: \n for m2 in o_ssrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_polymorphic_to(m2):\n# print \"\\npolymorphic match\"\n# print m1.to_s()\n# print m2.to_s()\n poly[key] += 1\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n # stage 3: shifted matches (motif is shifted [permuated])\n for m1 in q_ssrs: \n for m2 in o_ssrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_shifted_to(m2):\n# print \"\\nshifted match\"\n# print m1.to_s()\n# print m2.to_s()\n shift[key] += 1\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n mapped = len(caught) / 2\n# print \"\\nUncaught between genes\", gid1, \"and\", gid2\n# for m1 in ssrs:\n# if caught.has_key(hash(m1.to_s())): continue\n# print spec1 + \"\\t\" + m1.to_s()\n#\n# for m2 in ossrs:\n# if caught.has_key(hash(m2.to_s())): continue\n# print spec2 + \"\\t\" + m2.to_s()\n loss[key] += len(q_ssrs) + len(o_ssrs) - mapped \n\n\n keys = perfect.keys()\n keys.sort()\n for key in keys:\n perfectcount = str(perfect[key])\n polycount = str(poly[key])\n shiftcount = str(shift[key])\n losscount = str(loss[key])\n time = str(distances[key])\n print string.join([key, time, perfectcount, polycount, shiftcount, losscount], \"\\t\")\n\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5057855248451233, "alphanum_fraction": 0.5101568698883057, "avg_line_length": 30.362903594970703, "blob_id": "03d402efea7edaffe5b04e95167279ff5e2e9fc8", "content_id": "88dfc4131f4af9b93962b6678def7a605178abc8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3889, "license_type": "permissive", "max_line_length": 83, "num_lines": 124, "path": "/python/fasta/get-cluster-sequences.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f orth.file\" )\n stdout( \" -d dbm file (indexed fasta file)\" )\n stdout( \" -e file extension of the fasta files to create (e.g. '.aa')\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:d:e:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['file'] = value\n if key == '-d':\targs['dbm'] = value\n if key == '-e':\targs['ext'] = value\n \n if not args.has_key('file'):\n stderr( \"cluster file missing.\" )\n show_help()\n if not file_exists( args.get('file') ):\n stderr( \"cluster file does not exist.\" )\n show_help()\n \n if not args.has_key('dbm'):\n stderr( \"dbm file missing.\" )\n show_help()\n if not file_exists( args.get('dbm') ):\n stderr( \"dbm file does not exist.\" )\n show_help()\n\n if not args.has_key('ext'):\n args['ext'] = '.fasta'\n\n\n return args\n\n#\ndef get_shorthand( id ):\n if id.startswith(\"AT\"):\n return \"Arath\"\n elif id.startswith(\"Pooc_\"):\n return \"Pooc\"\n elif id.startswith(\"Zoma_\"):\n return \"Zoma\"\n elif id.startswith(\"LOC_\"):\n return \"Oryza\"\n elif id.startswith(\"IMGA|\"):\n return \"Medicago\"\n elif id.startswith(\"jgi|Phypa1_1|\"):\n return \"Physco\"\n elif id.startswith(\"jgi|Poptr1_1|\"):\n return \"Populus\"\n elif id.startswith(\"GSVIV\"):\n return \"Vitis\"\n elif id.startswith(\"AC\"):\n return \"Zea\"\n elif id.startswith(\"Sbi\"):\n return \"Sorghum\"\n else:\n stderr( \"did not find a suitable shorthand for this id: %s\" % id )\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n dbm = anydbm.open( args.get('dbm'), 'r' )\n fo = open( args.get('file') )\n counter = 0\n stdouttext, stderrtext = catch_bash_cmd_output(\"wc -l %s\" %args.get('file') )\n #print \" stdout: %s\" % stdouttext\n #print \" stderr: %s\" % stderrtext\n clusternum = int(stdouttext.split()[0])\n\n for line in fo:\n counter += 1\n line = line.rstrip()\n ids = line.split(\"\\t\")[1:]\n c = add_leading_zeroes(counter, len(str(clusternum)))\n filename1 = \"orth.cluster.%s%s\" % (c, args.get('ext'))\n filename2 = \"orth.cluster.%s%s\" % (c, '.ids')\n fw = open( filename1, \"w\" )\n fwid = open( filename2, \"w\" )\n for id in ids:\n if not dbm.has_key( id ):\n stderr( \"%s: id %s not available in the sequence hash\" % (filename1, id) )\n continue\n fwid.write(\"%s\\n\" % id )\n fw.write(\">%s\\n%s\\n\" %( get_shorthand(id), dbm.get(id) ) )\n fw.close()\n fwid.close()\n fo.close()\n dbm.close()\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.48782405257225037, "alphanum_fraction": 0.4893951416015625, "avg_line_length": 34.36111068725586, "blob_id": "0272b5a05b01f58cbdb8f24409138478a8b8d904", "content_id": "85ef7ca07754be8d991a017fb50c7040b49773d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2546, "license_type": "permissive", "max_line_length": 104, "num_lines": 72, "path": "/python/latex-bibtex/latex-rename.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"renames files so that they can be included in latex documents.\" )\n stdout( \"this means that all dots are removed except for the last one of the actual file extension.\" )\n stdout( \"dots are replaced by \\\"_\\\" by default.\" )\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> [-r <x>]\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file\" )\n stdout( \" -r replace dot with this sign (default: \\\"_\\\")\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:r:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f': args['file'] = value\n\t\tif key == '-r':\targs['r'] = str(value)\n\t\t\t\t\n\tif not args.has_key('file'):\n\t\tstderr( \"file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('file') ):\n\t\tstderr( \"file does not exist.\" )\n\t\tshow_help()\n\t\t\n\treturn args\n\n\t\n# =============================================================================\n# =============================================================================\ndef main( args ):\n oldfilename = args.get('file')\n path, filename = os.path.split(oldfilename)\n base, ext = os.path.splitext(filename)\n if args.has_key('r'): r = args.get('r')\n else: r = '_'\n base = base.replace('.',r)\n if path != \"\":\n newfilename = path + '/' + base + ext\n else:\n newfilename = base + ext\n os.system( \"mv %s %s\" %(oldfilename, newfilename) )\n\t\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.49272486567497253, "alphanum_fraction": 0.49669313430786133, "avg_line_length": 30.17525863647461, "blob_id": "b020141c66680d7be5123c2fb7a5ccdbc096cb06", "content_id": "4686fcc69c51c9c4f822649ee24c3dadec5485d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3024, "license_type": "permissive", "max_line_length": 83, "num_lines": 97, "path": "/python/blast/remove-from-blastout.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -i <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -b blastout file (-m 8)\" )\n stdout( \" -i file with the IDs to keep\" )\n stdout( \" \" )\n \n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hi:b:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n \n args = {}\n args['verbose'] = 0\n for key, value in keys:\n if key == '-b': args['in-blastout'] = value\n if key == '-i': args['in-ids'] = value\n\n if not args.has_key('in-blastout'):\n stderr( \"in-blastout file missing.\" )\n show_help()\n if not args.has_key('in-ids'):\n stderr( \"in-ids file missing.\" )\n show_help()\n \n if not file_exists( args.get('in-blastout') ):\n stderr( \"in-blastout file does not exist.\" )\n show_help()\n if not file_exists( args.get('in-ids') ):\n stderr( \"in-ids file does not exist.\" )\n show_help()\n \n return args\n\n# =============================================================================\ndef get_ids_to_remove( args ):\n \"\"\"\n reads in the in-ids file and gathers all IDs to which\n the out fasta file will be reduced to.\n \"\"\"\n fo = open( args.get('in-ids'), 'r' )\n ids = {}\n for line in fo:\n line = line.rstrip()\n ids[ line.replace('>','') ] = 1\n fo.close()\n return ids\n \n \n# =============================================================================\ndef reduce_blastout( args, rmids ):\n \"\"\"\n reads in in-fasta and creates out-fasta that only contains the records\n whose id is contained in the hash keepids.\n \"\"\"\n \n retained = 0\n fo = open( args.get('in-blastout') )\n for line in fo:\n line = line.rstrip()\n if len(line) == 0: continue\n hid, qid = line.split(\"\\t\")[0:2]\n if rmids.has_key(hid) or rmids.has_key(qid): continue\n print line\n retained += 1\n fo.close()\n \n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nrmids = get_ids_to_remove( args )\nreduce_blastout( args, rmids )\n" }, { "alpha_fraction": 0.47587719559669495, "alphanum_fraction": 0.47894737124443054, "avg_line_length": 31.098590850830078, "blob_id": "8a9d0ccd4f33f97b94cc714ec37e668ad93a2969", "content_id": "9f0222b9af0ab2dd99455d388a75766bf960de56", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2280, "license_type": "permissive", "max_line_length": 84, "num_lines": 71, "path": "/python/fasta/create-clusters.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f ortholog cluster flat file to import\" )\n stdout( \" -p prefix to put in front of the number\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:p:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n args['prefix'] = 'orth.cluster.'\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-p': args['prefix'] = value\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n\n counter = 0\n fo = open( args.get('file') )\n\n for line in fo:\n counter += 1\n fw = open( args.get('prefix') + add_leading_zeroes( counter, 3 ) + '.ids', 'w' )\n ids = line.split()\n for id in ids: fw.write( id + \"\\n\" )\n fw.close()\n\n fo.close()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.47805139422416687, "alphanum_fraction": 0.485813707113266, "avg_line_length": 30.644067764282227, "blob_id": "81076836c41de26b1c4ea272f0364ca62b6c31f7", "content_id": "9263505c2a0991e6dfd7429a4a0e70a8ea2d798f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3736, "license_type": "permissive", "max_line_length": 83, "num_lines": 118, "path": "/python/geneontology/goodness-of-fit.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport sqlite3\nimport glob\nimport newick\nimport pylab\nimport rpy2.robjects as rpy2\nimport numpy\nimport copy\nfrom low import * # custom functions, written by myself\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f CSV file with two follows (first x, second y)\" )\n stdout( \" -t type of fit [linear|log|exp]\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:t:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'fit': \"linear\"}\n for key, value in keys:\n if key == '-f': args['csvfile'] = value\n if key == '-t': args['fit'] = value\n \n if not args.has_key('csvfile'):\n stderr( \"csv file argument missing.\" )\n show_help()\n elif not file_exists( args.get('csvfile') ):\n stderr( \"csv file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\ndef statusbar(counter, total, message=\"\", width=50):\n fraction = 1.0*counter/total\n progressbar = int(width*fraction) * \"=\"\n while len(progressbar) < width: progressbar += \" \"\n sys.stderr.write(\"\\r 0% [\" + progressbar + \"] 100% \")\n if message != \"\": sys.stderr.write(\"| \" + message)\n if fraction == 1.0: sys.stderr.write(\"\\n\")\n\n# =============================================================================\ndef get_xy(csvfile):\n x, y = [], []\n fo = open(csvfile)\n for line in fo:\n col = line.rstrip().split(',')\n x.append(float(col[0]))\n y.append(float(col[1])) \n fo.close()\n return x, y\n \n\n# =============================================================================\ndef get_fit_params(x, y, fit):\n R = rpy2.r\n rpy2.globalEnv['x'] = rpy2.FloatVector(x)\n rpy2.globalEnv['y'] = rpy2.FloatVector(y)\n ymean = numpy.average(y)\n SStot, SSerr = [], []\n \n if fit == 'linear':\n lmfit = R.lm('y ~ x')\n elif fit == 'log':\n lmfit = R.lm('y ~ log(x)')\n elif fit == 'exp':\n lmfit = R.lm('y ~ exp(x)')\n a, b = lmfit[0][0], lmfit[0][1]\n \n for i in range(len(x)):\n SStot.append(numpy.square(y[i]-ymean))\n if fit == 'linear':\n SSerr.append(numpy.square(y[i]-(a+x[i]*b)))\n elif fit == 'log':\n SSerr.append(numpy.square(y[i]-(a+numpy.log(x[i])*b)))\n elif fit == 'exp':\n SSerr.append(numpy.square(y[i]-(a+numpy.exp(x[i])*b)))\n SStot, SSerr = sum(SStot), sum(SSerr)\n rsquared = 1 - (SSerr / SStot)\n return a, b, rsquared\n \n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n x, y = get_xy(args['csvfile'])\n a, b, rsquared = get_fit_params(x, y, args['fit'])\n print rsquared\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n\n" }, { "alpha_fraction": 0.5361478328704834, "alphanum_fraction": 0.5406776666641235, "avg_line_length": 35.30921173095703, "blob_id": "ffd576d67ee190fcd92dffd0d5228fd036bcd89d", "content_id": "e2ac1fa6fadaf30c27ab170e2b5bc7f866b4d050", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5519, "license_type": "permissive", "max_line_length": 193, "num_lines": 152, "path": "/python/misa/ssr-to-pfam.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom collections import defaultdict\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSR\nfrom pfam import PfamDomain\nimport rpy2.robjects as robjects\nR = robjects.r\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" -d <gff-folder>\"\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -p pfam annotation of proteins\" )\n stdout( \" -m misa file incl. protein in last column\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hm:p:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-m': args['misa'] = value\n if key == '-p': args['pfam'] = value\n \n if not args.has_key('misa'):\n print >> sys.stderr, \"misa file argument missing.\"\n show_help()\n elif not file_exists( args.get('misa') ):\n print >> sys.stderr, \"misa file does not exist.\"\n show_help()\n\n if not args.has_key('pfam'):\n print >> sys.stderr, \"pfam file argument missing.\"\n show_help()\n elif not file_exists( args.get('pfam') ):\n print >> sys.stderr, \"pfam file does not exist.\"\n show_help()\n\n return args\n\n\n# =============================================================================\ndef get_ssrs(file):\n hash = defaultdict(list)\n fo = open(file)\n for line in fo: \n if line.startswith(\"ID\\t\"): continue\n m = MisaSSR(line)\n hash[m.geneid].append(m)\n fo.close()\n print >> sys.stderr, \"read %s microsatellites\" % len(hash)\n return hash\n\n# =============================================================================\ndef get_pfam(file):\n hash = defaultdict(list)\n counthash = defaultdict(int)\n fo = open(file)\n for line in fo:\n pd = PfamDomain(line)\n hash[pd.get_attr('seq_id')].append(pd)\n counthash[pd.get_attr('hmm_acc') + '|' + pd.get_attr('hmm_name')] += 1\n fo.close()\n print >> sys.stderr, \"read %s pfam annotations\" % len(hash)\n return hash, counthash\n\n# =============================================================================\ndef sd(pylist):\n rvec = robjects.FloatVector(pylist)\n sd = R['sd'](rvec)[0]\n return sd\n\n# =============================================================================\ndef pnorm(value, mean, sd):\n p = R['pnorm'](value, mean=mean, sd=sd, lower=False)[0]\n return p\n\n# =============================================================================\ndef fisher_test(pylist):\n rcountsvec = robjects.IntVector(pylist)\n rmatrix = R['matrix'](rcountsvec,2,2)\n p = R['fisher.test'](rmatrix, alternative=\"greater\")[0][0]\n p = float(p)\n pylist.append(p)\n print >> sys.stderr, string.join([str(x) for x in pylist],\"\\t\")\n return p\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n SSRs = get_ssrs(args['misa']) \n PFAMs, pfamglobalcounts = get_pfam(args['pfam']) \n pfamhits = defaultdict(int)\n for sid, SSRs in SSRs.iteritems():\n for SSR in SSRs:\n for PFAM in PFAMs[SSR.feature]:\n if SSR.startpos < int(PFAM.get_attr('alignment_start')) and SSR.endpos < int(PFAM.get_attr('alignment_start')): continue\n if SSR.startpos > int(PFAM.get_attr('alignment_end')) and SSR.endpos > int(PFAM.get_attr('alignment_end')): continue\n pfamhits[PFAM.get_attr('hmm_acc') + '|' + PFAM.get_attr('hmm_name')] += 1\n\n totalssrsindomains = 0\n for pfam, count in pfamhits.iteritems(): totalssrsindomains += count\n totaldomains = 0\n for id, count in pfamglobalcounts.iteritems(): totaldomains += count\n\n ratios = {}\n totalssrcount = sum(pfamhits.values())\n totaldomaincount = sum(pfamglobalcounts.values())\n expectedfreq = 1.0*totalssrcount/totaldomaincount\n for id, count in pfamhits.iteritems():\n p = fisher_test([count, max([0,pfamglobalcounts[id] - count]), totalssrcount - count, totaldomaincount - max([0,pfamglobalcounts[id] - count])])\n ratios[id] = p\n\n pvalues = []\n ids = []\n for id, p in ratios.iteritems(): \n pvalues.append(p)\n ids.append(id)\n\n ps = robjects.FloatVector(pvalues)\n psadjusted = tuple(R['p.adjust'](ps, method=\"fdr\"))\n for i in range(len(psadjusted)):\n if psadjusted[i] < 0.05:\n print ids[i] + \"\\t\" + str(ratios[ids[i]]) + \"\\t\" + str(pvalues[i]) + \"\\t\" + str(psadjusted[i])\n\n# for id, p in testeddomains.iteritems():\n# if p < 0.05: print id + \"\\t\" + str(p)\n# print id + \"\\t\" + str(p) #+ \"\\t\" + str(pfamglobalcounts[id] - count) + \"\\t\" + str(totalssrsindomains - count) + \"\\t\" + str(totaldomains - totalssrsindomains - pfamglobalcounts[id] - count)\n\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.5147950649261475, "alphanum_fraction": 0.5351957082748413, "avg_line_length": 29.561798095703125, "blob_id": "26c2c280d32c90cf1d6aa19e95e1d3237b3e0d80", "content_id": "22b68736b417f168031803775add89211359abd0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5441, "license_type": "permissive", "max_line_length": 89, "num_lines": 178, "path": "/python/misa/orth-ssr-comparison.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport hashlib\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSR\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -a <path> -b <path> -o <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -a misa output of species 1 (Dmel)\" )\n stdout( \" -b misa output of species 2 (Dxxx)\" )\n stdout( \" -o flybase dmel ortholog report file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"ha:b:o:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-a': args['misa1'] = value\n if key == '-b': args['misa2'] = value\n if key == '-o': args['orth'] = value\n \n if not args.has_key('misa1'):\n stderr( \"misa1 file argument missing.\" )\n show_help()\n elif not file_exists( args.get('misa1') ):\n stderr( \"misa1 file does not exist.\" )\n show_help()\n\n if not args.has_key('misa2'):\n stderr( \"misa2 file argument missing.\" )\n show_help()\n elif not file_exists( args.get('misa2') ):\n stderr( \"misa2 file does not exist.\" )\n show_help()\n \n if not args.has_key('orth'):\n stderr( \"orth file argument missing.\" )\n show_help()\n elif not file_exists( args.get('orth') ):\n stderr( \"orth file does not exist.\" )\n show_help()\n \n return args\n\n\ndef get_orthologs(file, spec2):\n spec2 = spec2.lower()\n orthologs = {}\n fo = open(file)\n for line in fo:\n if line.startswith(\"#\"): continue\n if len(line.rstrip()) == 0: continue\n columns = line.rstrip().split(\"\\t\")\n #print columns\n id1, id2, descr = columns[0], columns[5], columns[6]\n orthspecies = descr[:descr.index(\"\\\\\")].lower()\n if orthspecies != spec2: continue\n orthologs[id1] = id2\n fo.close()\n return orthologs\n\n\ndef get_ssrs(file):\n hash = defaultdict(list)\n fo = open(file)\n for line in fo:\n if line.startswith(\"ID\\t\"): continue\n m = MisaSSR(line)\n hash[m.geneid].append(m)\n fo.close()\n return hash\n\n\ndef hash(s):\n return hashlib.sha224(s).hexdigest()\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n spec1 = args['misa1'][:args['misa1'].index(\"-\")]\n spec2 = args['misa2'][:args['misa2'].index(\"-\")]\n orthohash = get_orthologs(args['orth'], spec2)\n ssrs1 = get_ssrs(args['misa1'])\n ssrs2 = get_ssrs(args['misa2'])\n\n perfect, poly, shift, loss = 0, 0, 0, 0\n total = 0\n for gid1, ssrs in ssrs1.iteritems():\n if not orthohash.has_key(gid1): continue\n total += len(ssrs)\n gid2 = orthohash[gid1]\n if not ssrs2.has_key(gid2): \n loss += len(ssrs)\n continue\n\n ossrs = ssrs2[gid2]\n\n # stage 1: perfect matches\n caught = {}\n for m1 in ssrs: \n for m2 in ossrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_perfect_match_to(m2):\n# print \"\\nperfect match\"\n# print m1.to_s()\n# print m2.to_s()\n perfect += 1\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n # stage 2: polymorphic matches (same motif, but different number of repeats)\n for m1 in ssrs: \n for m2 in ossrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_polymorphic_to(m2):\n# print \"\\npolymorphic match\"\n# print m1.to_s()\n# print m2.to_s()\n poly += 1\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n # stage 3: shifted matches (motif is shifted [permuated])\n for m1 in ssrs: \n for m2 in ossrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_shifted_to(m2):\n# print \"\\nshifted match\"\n# print m1.to_s()\n# print m2.to_s()\n shift += 1\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n mapped = len(caught) / 2\n# print \"\\nUncaught between genes\", gid1, \"and\", gid2\n# for m1 in ssrs:\n# if caught.has_key(hash(m1.to_s())): continue\n# print spec1 + \"\\t\" + m1.to_s()\n#\n# for m2 in ossrs:\n# if caught.has_key(hash(m2.to_s())): continue\n# print spec2 + \"\\t\" + m2.to_s()\n loss += len(ssrs) - mapped \n\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" %( spec1, spec2, perfect, poly, shift, loss, total )\n\n\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.4974048435688019, "alphanum_fraction": 0.49913495779037476, "avg_line_length": 33, "blob_id": "50a0509e2f15b77ce8bba8df165cac1b73f6a02d", "content_id": "da6f710e488b0728db33dd17fa3e7155b22d190f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3468, "license_type": "permissive", "max_line_length": 83, "num_lines": 102, "path": "/python/paml/codeml-parallel.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nimport tempfile\nfrom low import *\t\t\t# custom functions, written by myself\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f nt alignment file\" )\n stdout( \" -t tree file (newick format)\" )\n stdout( \" -m models to run (comma separates)\" )\n stdout( \" -p path to PAML codeml\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:t:m:p:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['aln'] = value\n if key == '-t':\targs['tree'] = value\n if key == '-p':\targs['codeml'] = value\n if key == '-m':\targs['models'] = value.split(\",\")\n \n if not args.has_key('aln'):\n stderr( \"aln file missing.\" )\n show_help()\n if not file_exists( args.get('aln') ):\n stderr( \"aln file does not exist.\" )\n show_help()\n \n if not args.has_key('tree'):\n stderr( \"tree file missing.\" )\n show_help()\n if not file_exists( args.get('tree') ):\n stderr( \"tree file does not exist.\" )\n show_help()\n if not args.has_key('models'): \n stderr( \"no models to run.\" )\n show_help()\n\n if not file_exists( args.get('codeml') ):\n stderr( \"codeml binary not found.\" )\n show_help()\n args['pamlfolder'] = os.path.split(args.get('codeml'))[0] + '/'\n if not dir_exists( args.get('pamlfolder') ):\n stderr( \"paml folder does not exist\" )\n show_help()\n\n return args\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n \n models = args['models']\n aln, tree = args['aln'], args['tree']\n codemlbin = args['codeml']\n ctlbase = args['pamlfolder'] + 'codeml.ctl.'\n for model in models:\n if tree.count(\".\"):\n ext = os.path.splitext(tree)[1]\n outfile = aln+\".codeml\"+ext+\".\"+model\n else:\n outfile = aln+\".codeml.\"+model\n tempdir = tempfile.mkdtemp(suffix=model, prefix='tmp.codeml.', dir='.')\n os.system(\"cp %s %s\" %(aln, tempdir + '/in-aln'))\n os.system(\"cp %s %s\" %(tree, tempdir + '/in-tree'))\n os.system(\"cp %s %s\" %(ctlbase + model, tempdir + '/codeml.ctl'))\n os.chdir(tempdir)\n os.system(codemlbin)\n os.chdir(\"..\")\n os.system(\"mv %s/out-codeml %s\" %(tempdir, outfile))\n os.system(\"rm -rf %s\" % tempdir)\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.49526387453079224, "alphanum_fraction": 0.5006765723228455, "avg_line_length": 34.60240936279297, "blob_id": "3f91c40ee4878685150c06b76f75fd2cede3ae64", "content_id": "e60ec73ee0b6dafea66e4f8aa14dbd5f86ea9ee6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2956, "license_type": "permissive", "max_line_length": 96, "num_lines": 83, "path": "/python/gff/droso-introns-exons.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom collections import defaultdict\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" -d <gff-folder>\"\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -d folder with gff files to parse\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hd:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-d': args['dir'] = value\n \n if not args.has_key('dir'):\n print >> sys.stderr, \"gff dir argument missing.\"\n show_help()\n elif not dir_exists( args.get('dir') ):\n print >> sys.stderr, \"gff dir does not exist.\"\n show_help()\n\n if not args['dir'].endswith(\"/\"): args['dir'] += '/'\n return args\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n def process_gff_line(line, species):\n if line.startswith(\"#\") or len(line.rstrip()) == 0: return\n columns = line.rstrip().split(\"\\t\")\n if len(columns) != 9: return\n type = columns[2]\n if type != \"exon\" and type != \"intron\": return\n chr, start, stop, strand, descr = columns[0], columns[3], columns[4], columns[6], columns[8]\n # id = re.search(\"ID=([^;]+);\", descr).group(1)\n sys.stdout.write(species + \"\\t\" + type + \"\\t\")\n print string.join([chr, start, stop], \"\\t\")\n\n# =============================================================================\n\n for filename in os.listdir(args['dir']):\n gzip = 0\n if not filename.endswith(\".gff\") and not filename.endswith(\".gff.gz\"): continue\n species = filename[:filename.index(\"-\")]\n filename = args['dir'] + filename\n if filename.endswith(\".gff.gz\"): gzip = 1\n if gzip: \n os.system(\"gunzip \" + filename)\n filename = filename[:-3]\n fo = open(filename)\n for line in fo: process_gff_line(line, species)\n fo.close()\n if gzip: os.system(\"gzip \" + filename)\n\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5016863346099854, "alphanum_fraction": 0.5132096409797668, "avg_line_length": 35.67010498046875, "blob_id": "00e03d99c37def484be7000acd4b63dea4c6f9f7", "content_id": "4bcfa05ad3bf063391a30732476ebd8911aff2d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3558, "license_type": "permissive", "max_line_length": 127, "num_lines": 97, "path": "/python/fasta/assembly-stats.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nimport fasta\nimport numpy\nimport copy\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file (scaffolds/chromosomes)\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['fastafile'] = value\n \n for key in ['fastafile']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n return args\n\n# =============================================================================\ndef statusbar(current, total, message=\"\", width=40):\n progress = 1.0*current/total\n if message != \"\": message = \"[\" + message + \"]\"\n progressbar = \"=\" * int(progress*width)\n while len(progressbar) < width: progressbar += \" \" \n sys.stderr.write(\"\\r 0% \" + progressbar + \" 100% \" + message)\n if progress == 1.0: sys.stderr.write(\"\\n\")\n \n# =============================================================================\ndef get_n50_from_lengthhash(sid2length):\n sorted_lengths = sid2length.values()\n sorted_lengths.sort()\n total = sum(sid2length.values())\n i, runningsum, threshold = -1, 0, 0.5*total\n while runningsum < threshold:\n i += 1\n runningsum += sorted_lengths[i]\n return sorted_lengths[i-1]\n\n# =============================================================================\ndef get_gaps_and_Ns(fastafile):\n gaps, n = 0, 0\n fo = open(fastafile)\n for line in fo:\n if line.startswith(\">\"): continue\n gaps += line.count(\"-\")\n n += line.count(\"N\")\n fo.close()\n return gaps, n\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n sid2length = fasta.get_length_hash(args['fastafile'])\n count = len(sid2length)\n lmin, lmax = min(sid2length.values()), max(sid2length.values())\n mean, median = numpy.mean(sid2length.values()), numpy.median(sid2length.values())\n n50 = get_n50_from_lengthhash(sid2length)\n total = sum(sid2length.values())\n gaps, unresolved = get_gaps_and_Ns(args['fastafile'])\n print string.join([str(e) for e in [args['fastafile'], total, count, lmin, lmax, mean, median, n50, gaps, unresolved]], \"\\t\")\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.6362694501876831, "alphanum_fraction": 0.6450777053833008, "avg_line_length": 29.634920120239258, "blob_id": "2ce8263f01f1a24d2993a1fd22d508de97afc9d0", "content_id": "8565599d2d5720dcb08e44a07308d746e0e7ac71", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1930, "license_type": "permissive", "max_line_length": 133, "num_lines": 63, "path": "/python/orthomcl/map-orthomcl-clusters.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, string\nfrom low import *\n\n\ndef usage():\n print >> sys.stderr, \"usage: \", sys.argv[0], \" <from> <to> [<orthomcl.out>]\"\n print >> sys.stderr, \"from/to: speciesname or \\\"cluster\\\"\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) < 3: usage()\n inTo = sys.argv[2].lower()\n inFrom = sys.argv[1].lower()\n if len(sys.argv) > 3:\n inTable = sys.argv[3]\n else:\n inTable = \"/home/low/workspace/back-to-the-sea-orf-cluster-verification/32-new-orthologs/3-orthomcl-v1.4/noparalogs_orthomcl.out\"\n if not os.path.exists(inTable) or not os.path.isfile(inTable) or not os.path.getsize(inTable) > 0: \n print >> sys.stderr, \"specified orthomcl table file does not exist, is not a file, or is empty\\n\"\n usage()\n return inFrom, inTo, inTable\n\n\nclass OrthoCluster():\n def __init__(self, line):\n descr, genedefs = line.split(\"\\t\")\n genedefs = genedefs.split()\n self.name = descr[:descr.index('(')].lower()\n self.geneHash = {}\n self.speciesHash = {}\n for genedef in genedefs:\n geneid = genedef[:genedef.index('(')]\n species = genedef[genedef.index('(')+1:-1].lower()\n self.geneHash[geneid] = species\n if self.speciesHash.has_key(species): self.speciesHash[species].append(geneid)\n else: self.speciesHash[species] = [geneid]\n\n def get_name(self): return self.name\n def get_count(self): return len(self.geneHash)\n def get_gene_hash(self): return self.geneHash\n def get_species_hash(self): return self.speciesHash\n \n\n\ndef main():\n inFrom, inTo, inTable = plausi()\n fo = open(inTable)\n for line in fo:\n o = OrthoCluster(line.rstrip())\n speciesHash = o.get_species_hash()\n name = o.get_name()\n mapfrom, mapto = \"\", \"\"\n if inFrom == \"cluster\": mapfrom = name\n else: mapfrom = speciesHash[inFrom][0]\n if inTo == \"cluster\": mapto = name\n else: mapto = speciesHash[inTo][0]\n print mapfrom + \"\\t\" + mapto\n fo.close()\n\n\nmain()\n" }, { "alpha_fraction": 0.4895554482936859, "alphanum_fraction": 0.49089449644088745, "avg_line_length": 32.63063049316406, "blob_id": "e5c351040b7b6047e9ca7aa54ffe033ee23c7237", "content_id": "790dc7a15f3e10652fa91d703d5d33b46dee027c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3734, "license_type": "permissive", "max_line_length": 154, "num_lines": 111, "path": "/python/fasta/seqlength.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file to import\" )\n stdout( \" -g map file, tab delimited, regex to name (one per line) to group sequences into distinct bins\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:g:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-g': args['group'] = value\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\ndef read_groups( file ):\n groups = {}\n fo = open( file )\n for line in fo:\n line = line.rstrip()\n regex, name = line.split(\"\\t\")\n groups[name] = re.compile(regex)\n fo.close()\n return groups\n\n# =============================================================================\ndef read_sequences( file, groups ):\n def add_entry( hash, groups, id, seq ):\n group = \"*all*\"\n for name, regex in groups.iteritems():\n if re.search(regex, id):\n group = name\n break\n if hash[group].has_key(id): sys.stderr.write(\"WARNING: overwriting entry with the same ID (%s) in group %s...\\n\" %(id, group))\n hash[group][id] = seq\n return hash\n\n\n hash = {}\n for name, regex in groups.iteritems(): hash[name] = {}\n if hash.has_key('*all*'): sys.stderr.write(\"WARNING: you used \\\"*all*\\\" as a group name. This name refers to all non-group-matching entries as well!\\n\")\n hash['*all*'] = {}\n\n id, seq = \"\", \"\"\n fo = open( file )\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"):\n if id != \"\": add_entry( hash, groups, id, seq )\n id = line[1:]\n seq = \"\"\n else:\n seq += line\n if id != \"\": add_entry( hash, groups, id, seq )\n fo.close()\n return hash\n\n# =============================================================================\ndef eval_seq_lengths(hash):\n for group, seqhash in hash.iteritems():\n for id, seq in seqhash.iteritems():\n print string.join([group, id, str(len(seq))], \"\\t\")\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n groups = {}\n if args.has_key('group'): groups = read_groups( args.get('group') )\n seqhash = read_sequences( args.get('file'), groups )\n eval_seq_lengths(seqhash)\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.49195751547813416, "alphanum_fraction": 0.49438542127609253, "avg_line_length": 32.96907043457031, "blob_id": "c2bb32b1e5118c48cee28f3cf3ede815ed7e3ba7", "content_id": "66a4d29c52dfe340e34c499d9210176ce9857e8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3295, "license_type": "permissive", "max_line_length": 88, "num_lines": 97, "path": "/python/fasta/generate-fasta-aa-nt.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\t\t\t\t\t# index databases (file hash)\nfrom Bio import SeqIO # biopython stuff, to parse fasta files for instance\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -i <path> [-d <path>]\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -i ID file\" )\n\tstdout( \" -d directory to search for orthologs\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hi:d:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-d':\targs['dir'] = value\n if key == '-i':\targs['idfile'] = value\n \n if args.has_key('dir') and not dir_exists( args.get('dir') ):\n stderr( \"dir folder does not exist.\" )\n show_help()\n if not args.has_key('dir'): args['dir'] = './'\n if not args.get('dir').endswith('/'): args['dir'] = args.get('dir') + '/'\n \n if not args.has_key('idfile'):\n stderr( \"id file missing.\" )\n show_help()\n if not file_exists( args.get('idfile') ):\n stderr( \"id file does not exist\" )\n show_help\n\n return args\n\n \n# =============================================================================\n# =============================================================================\ndef main( args ):\n idlist = read_from_file( args.get('idfile') ).splitlines()\n dir = args.get('dir')\n\n hash = {}\n for id in idlist:\n popenout = os.popen(\"grep -l \\\"%s\\\" %s*\" %(id, dir))\n out = popenout.read()\n popenout.close()\n outlines = out.splitlines()\n\n hash[ id ] = outlines\n\n aafile = args.get('idfile') + '.aa'\n ntfile = args.get('idfile') + '.nt'\n for id,files in hash.iteritems():\n for file in files:\n if not file.endswith('.aa') and not file.endswith('.nt'): continue\n popenout = os.popen(\"grep -A 100 \\\"%s\\\" %s\" %(id, file))\n out = popenout.read()\n popenout.close()\n outlines = out.splitlines()\n outlines.pop(0)\n \n if file.endswith('.aa'): outfile = aafile\n else: outfile = ntfile\n \n os.system( \"echo \\\">%s\\\" >> %s\" %( id, outfile ) )\n for line in outlines:\n if not line.startswith(\">\"): os.system( \"echo \\\"%s\\\" >> %s\" %( line, outfile ) )\n else: break\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.49239417910575867, "alphanum_fraction": 0.4970238208770752, "avg_line_length": 31.85869598388672, "blob_id": "211c82e697706a9cb5c54f3a869b19ef8799c8aa", "content_id": "bb44ccb5c19dd2e322a6debff8e054c1e0beac16", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3024, "license_type": "permissive", "max_line_length": 83, "num_lines": 92, "path": "/python/generic/grab-columns.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -i -n\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f tab delimited input file\" )\n stdout( \" -1 keep first column\" )\n stdout( \" -r regex for the column header to mark as to keep\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:r:1\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n args['keepfirstcol'] = 0\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-r': args['regex'] = re.compile(value)\n if key == '-1': args['keepfirstcol'] = 1\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n\n if not args.has_key('regex'):\n stderr( \"regex argument missing.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\ndef get_header( file ):\n fo = open(file)\n header = fo.readline().rstrip()\n fo.close()\n return header\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n headline = get_header( args.get('file') )\n columns = headline.split(\"\\t\")\n regex = args.get('regex')\n keepindices = []\n for i in range(len(columns)):\n if regex.search(columns[i]): \n keepindices.append(i)\n #sys.stderr.write(\"marked:\\t%02d\\t%s\\n\" % (i, columns[i]))\n elif i == 0 and args.get('keepfirstcol'): \n keepindices.append(i)\n #sys.stderr.write(\"marked:\\t%02d\\t%s\\n\" % (i, columns[i]))\n\n fo = open(args.get('file'))\n for line in fo:\n line = line.rstrip()\n columns = line.split(\"\\t\")\n out = []\n for i in keepindices: out.append( columns[i] )\n print string.join(out, \"\\t\")\n fo.close()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5627232193946838, "alphanum_fraction": 0.5727678537368774, "avg_line_length": 38.29824447631836, "blob_id": "6b1e804c0556b04567082f25839672b1645be182", "content_id": "72ec069249b691fb33ebdbe7b3864b3f8533b16c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4480, "license_type": "permissive", "max_line_length": 225, "num_lines": 114, "path": "/python/fasta/get-all-possible-translations.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# for an assembled genome (contigs/scaffolds/chromosomes), get all possible\n# translations longer than a user-specified threshold.\n\nfrom Bio import SeqIO\nfrom Bio.Seq import reverse_complement, transcribe, back_transcribe, translate, Seq\nfrom Bio.Alphabet import IUPAC\nfrom low import *\nimport getopt, sys\nimport string\n\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:t:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'minlength':30}\n for key, value in keys:\n if key == '-f': args['fasta'] = value\n if key == '-t': args['minlength'] = int(value)\n \n if not args.has_key('fasta'):\n stderr( \"fasta file argument missing.\" )\n show_help()\n elif not file_exists( args.get('fasta') ):\n stderr( \"fasta file does not exist.\" )\n show_help()\n\n return args\n\n# =============================================================================\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> [-t <n>]\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file of the nucleotide sequences from which to predict the translations\" )\n stdout( \" -t minimum length of a translation (amino acids) to be reported [default: 30]\" )\n stdout( \" \" )\n sys.exit(1)\n\n\ndef process_seq(header, seq):\n hits = 0\n id = header\n if id.count(\" \") > 0: id = id[:id.index(\" \")]\n seq = Seq(seq)\n # direction1 is the direction we originally have had, 2 is the antisense strand\n # then TRANSLATE ALL POSSIBLE ORFs, do not stop at STOP codons\n dna_sequence_direction1 = seq\n dna_sequence_direction2 = dna_sequence_direction1.reverse_complement()\n translations = {}\n translations['+1'] = translate(dna_sequence_direction1)\n translations['-1'] = translate(dna_sequence_direction2)\n translations['+2'] = translate(dna_sequence_direction1[1:])\n translations['-2'] = translate(dna_sequence_direction2[1:])\n translations['+3'] = translate(dna_sequence_direction1[2:])\n translations['-3'] = translate(dna_sequence_direction2[2:])\n # get all polypeptides between stops, filter out those shorter than minlength\n polypeptides = {}\n for frame, translation in translations.iteritems():\n peptides = translation.split('*')\n if int(frame) < 0: startpos = len(seq) +1 + int(frame)\n else: startpos = int(frame)\n #print >> sys.stderr, \"frame: %s | startpos: %s | scaffold length: %s\" %(frame, startpos, len(seq))\n #print >> sys.stderr, \"# peptides: %s | pep.length: %s | transformed length: %s | scaffold length: %s\" %(len(peptides), sum([len(pep) for pep in peptides]), (sum([len(pep) for pep in peptides])+len(peptides))*3, len(seq))\n for peptide in peptides:\n peptide += '*'\n if int(frame) < 0: stoppos = startpos +1 - (3*len(peptide))\n else: stoppos = startpos -1 + (3*len(peptide))\n polypeptides[str(startpos)+':'+str(stoppos)] = peptide.tostring()\n if int(frame) < 0: startpos = stoppos-1\n else: startpos = stoppos+1\n\n for key, pepseq in polypeptides.iteritems():\n if len(pepseq) < args['minlength']: continue\n startpos, stoppos = [int(e) for e in key.split(\":\")]\n hits += 1\n print \">%s[%s:%s]\" %( id, startpos, stoppos )\n print pepseq\n return hits\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n npep = 0\n fo = open(args['fasta'], 'r')\n id, seq = '', ''\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"):\n if id != '' and seq != '': npep += process_seq(id, seq)\n id = line[1:]\n seq = ''\n sys.stderr.write(\"\\r\\tpeptides caught: %s \" % npep)\n else: seq += line.strip()\n if id != '' and seq != '': npep += process_seq(id, seq)\n sys.stderr.write(\"\\r\\tpeptides caught: %s \\n\" % npep)\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.5306222438812256, "alphanum_fraction": 0.5338069796562195, "avg_line_length": 29.91666603088379, "blob_id": "9c26ef05c4ea132b6d4ac086a396367c133ee4ef", "content_id": "c1793db4d59ef6084aa485013e3d142a77d8f178", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4082, "license_type": "permissive", "max_line_length": 95, "num_lines": 132, "path": "/python/geneontology/add-parental-go-terms.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom goterm import GOTerm\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -i -n\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -a go annot file (geneid <tab> goid)\" )\n stdout( \" -o go obo file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"ha:o:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-a': args['annot'] = value\n if key == '-o': args['obo'] = value\n \n if not args.has_key('annot'):\n stderr( \"annot file argument missing.\" )\n show_help()\n elif not file_exists( args.get('annot') ):\n stderr( \"annot file does not exist.\" )\n show_help()\n \n if not args.has_key('obo'):\n stderr( \"obo file argument missing.\" )\n show_help()\n elif not file_exists( args.get('obo') ):\n stderr( \"obo file does not exist.\" )\n show_help()\n \n return args\n\n \n# =============================================================================\ndef read_obo( file ):\n hash = {}\n fo = open(file)\n while 1:\n line = fo.readline()\n if line.startswith(\"[Typedef]\"): break\n if not line.startswith(\"[Term]\"): continue\n obolines = []\n while 1:\n line = fo.readline()\n if line.count(\":\") == 0: break\n obolines.append(line)\n goterm = GOTerm(obolines)\n hash[goterm.get_id()] = goterm\n for alt_id in goterm.get_alt_ids():\n hash[alt_id] = goterm\n fo.close()\n print >> sys.stderr, \"goterms read from obo: %s\" % len(hash)\n return hash\n\n\ndef get_parents_of_goterms(file, gohash):\n\n def get_parents(goid):\n parents = gohash[goid].get_is_a_goids()\n return parents + [get_parents(p) for p in parents]\n\n def flatten(x):\n result = []\n for el in x:\n if hasattr(el, \"__iter__\") and not isinstance(el, basestring): result.extend(flatten(el))\n else: result.append(el)\n return result\n\n hash = {}\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n geneid, gotermid = line.split(\"\\t\")[0:2]\n if not gotermid.startswith(\"GO:\"): continue\n if hash.has_key(gotermid): continue\n hash[gotermid] = []\n fo.close()\n allannotgotermids = hash.keys()\n for goid in allannotgotermids: hash[goid] = list(set(flatten(get_parents(goid))))\n return hash\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n print >> sys.stderr, \"reading obo file ...\"\n gohash = read_obo(args['obo'])\n print >> sys.stderr, \"gather parents for all annotated goterms ...\"\n goterm2parents = get_parents_of_goterms(args['annot'],gohash)\n\n print >> sys.stderr, \"producing output ...\"\n fo = open(args['annot'])\n for line in fo:\n line = line.rstrip()\n geneid, goid = line.split(\"\\t\")[0:2]\n if not goid.startswith(\"GO:\"): continue\n print geneid + \"\\t\" + goid\n for id in goterm2parents[goid]:\n print geneid + \"\\t\" + id\n fo.close()\n print >> sys.stderr, \"done.\"\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5122792720794678, "alphanum_fraction": 0.5189770460128784, "avg_line_length": 33.94326400756836, "blob_id": "b08455d074fbb181d434a49bde02a75842effa91", "content_id": "5deadef20b8fb8d445ea89852f551d92233334ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4927, "license_type": "permissive", "max_line_length": 170, "num_lines": 141, "path": "/python/misa/add-features-to-misa.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom collections import defaultdict\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSRspecies\nimport pickle\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" -d <gff-folder>\"\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -d folder with gff files to parse\" )\n stdout( \" -f misa file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hd:f:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-d': args['dir'] = value\n if key == '-f': args['misa'] = value\n \n if not args.has_key('dir'):\n print >> sys.stderr, \"gff dir argument missing.\"\n show_help()\n elif not dir_exists( args.get('dir') ):\n print >> sys.stderr, \"gff dir does not exist.\"\n show_help()\n\n if not args.has_key('misa'):\n print >> sys.stderr, \"misa file argument missing.\"\n show_help()\n elif not file_exists( args.get('misa') ):\n print >> sys.stderr, \"misa file does not exist.\"\n show_help()\n\n\n if not args['dir'].endswith(\"/\"): args['dir'] += '/'\n return args\n\n# =============================================================================\ndef get_ssrs(file):\n hash = defaultdict(list)\n fo = open(file)\n for line in fo: \n if line.startswith(\"ID\\t\"): continue\n m = MisaSSRspecies(line)\n hash[m.species + '|' + m.geneid].append(m)\n fo.close()\n return hash\n\n# =============================================================================\ndef get_features(dir):\n storage = \".misa.gff.storage.tmp\"\n features = defaultdict(list)\n if not file_exists(storage):\n for filename in os.listdir(args['dir']):\n gzip = 0\n if not filename.endswith(\".gff\") and not filename.endswith(\".gff.gz\"): continue\n species = filename[:filename.index(\"-\")]\n filename = args['dir'] + filename\n if filename.endswith(\".gff.gz\"): gzip = 1\n if gzip: \n os.system(\"gunzip \" + filename)\n filename = filename[:-3]\n\n fo = open(filename)\n for line in fo: \n if line.startswith(\"#\") or len(line.rstrip()) == 0: continue\n columns = line.rstrip().split(\"\\t\")\n if len(columns) != 9: continue\n type = columns[2]\n if type != \"gene\" and type != \"exon\" and type != \"intron\": continue\n chr, start, stop, strand, descr = columns[0], columns[3], columns[4], columns[6], columns[8]\n key = string.join([species, chr], \"|\")\n features[key].append([type, int(start), int(stop)])\n fo.close()\n if gzip: os.system(\"gzip \" + filename)\n\n fw = open(storage, \"w\")\n for key, features in features.iteritems():\n for feat in features:\n fw.write(string.join([key, feat[0], str(feat[1]), str(feat[2])], \"\\t\") + \"\\n\")\n fw.close()\n\n else:\n fo = open(storage)\n for line in fo:\n columns = line.rstrip().split(\"\\t\")\n key = columns[0]\n feat = list(columns[1:4])\n features[key].append(feat)\n fo.close()\n return features\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n SSRs = get_ssrs(args['misa']) \n print >> sys.stderr, \"misa output loaded...\"\n Features = get_features(args['dir']) \n print >> sys.stderr, \"gff loaded...\"\n total = len(SSRs)\n count = 0\n for key, ssrs in SSRs.iteritems():\n sys.stderr.write(\"\\r STATUS: %s/%s (%.2f%%) CURRENT LOCATION: %s (%s SSRs, %s features)\" %(count, total, (100.0*count/total), key, len(ssrs), len(Features[key])))\n for ssr in ssrs:\n type = 0\n for feat in Features[key]:\n ftype, fstart, fstop = feat[0], int(feat[1]), int(feat[2])\n if ssr.startpos >= fstart and ssr.startpos <= fstop:\n if not type or type == \"gene\": type = ftype\n if type == \"gene\": type = \"UTR\"\n if type == 0: type = \"intergenic\"\n print ssr.to_s() + \"\\t\" + type\n count += 1\n sys.stderr.write(\"\\n\")\n\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.539500892162323, "alphanum_fraction": 0.5507073402404785, "avg_line_length": 34.744319915771484, "blob_id": "b8e0d734a9cc6d873465b43a6a481bc54ee23f58", "content_id": "e385bbaa1f3ded81c50b2138db2e3994048d1025", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 12582, "license_type": "permissive", "max_line_length": 152, "num_lines": 352, "path": "/ruby/swapsc/visualize-swapsc.rb", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n# generates a diagram of where in the sequence accelerated evolution / positive selection / negative selection took place\nrequire 'rubygems'\nrequire 'bio-graphics'\n\nDEBUG = false\n\n$categories = { \n \"NS\" => { :color => [1,0,0], :legend => \"Negative selection\", :stats => 0 },\n \"PS\" => { :color => [0.1,0.7,0.1], :legend => \"Positive selection\", :stats => 0 },\n# \"HS\" => { :color => [0,0,0.7], :legend => \"Hot sports\", :stats => 0 },\n# \"S\" => { :color => [0.5,0.5,0.5], :legend => \"Saturation of synonymous sites\", :stats => 0 },\n# \"AdN+S\" => { :color => [1,0.5,0], :legend => \"Acceleration of non-synonymous substitutions + Saturation of synonymous sites\", :stats => 0 },\n \"AdN\" => { :color => [0,0.7,1], :legend => \"Acceleration of non-synonymous substitutions\", :stats => 0 }\n}\n\n###############################################################################\nclass SwapscFeature\n\n attr_accessor :start, :stop, :category\n \n def initialize(start,stop,category)\n @start = start\n @stop = stop\n @category = category\n @added = false\n end\n\n def <=> other\n @start <=> other.start\n end \n\n def added?\n return @added\n end\n\n def add_to_track(track)\n track.add_feature( Bio::Feature.new(@category, '%s..%s' % [ @start, @stop ]), :colour => $categories[@category][:color] )\n $categories[@category][:stats] += (@stop - @start +1)\n $categories[@category][:branchstats] += (@stop - @start +1)\n @added = true\n end\nend\n###############################################################################\n\n###############################################################################\ndef correct_overlap( pf, cf )\n if cf.start <= pf.stop and cf.stop <= pf.stop # current included in previous\n if pf.category == 'NS'\n outerstop = +1 + pf.stop\n pf.stop = -1 + cf.start \n nf = SwapscFeature.new(-1 + cf.stop,outerstop,\"NS\")\n return [pf, nf]\n elsif pf.category == 'PS' and cf.category == 'AdN'\n return [pf]\n elsif pf.category == 'AdN' and cf.category == 'PS'\n outerstop = +1 + pf.stop\n pf.stop = -1 + cf.start\n nf = SwapscFeature.new(-1 + cf.stop,outerstop,\"AdN\")\n return [pf, cf, nf]\n end\n else # current overlaps to the right (but is longer)\n if pf.category == 'NS' or cf.category == 'NS'\n newstop = -1 + cf.start\n newstart = +1 + pf.stop\n pf.stop = newstop\n cf.start = newstart\n return [pf, cf]\n elsif pf.category == 'AdN' and cf.category == 'PS'\n pf.stop = -1 + cf.start\n return [pf, cf]\n elsif pf.category == 'PS' and cf.category == 'AdN'\n cf.start = +1 + pf.stop\n return [pf, cf]\n end\n end\n return [pf, cf]\nend\n###############################################################################\n\nif ARGV[0] and not File.exists?(ARGV[0])\n puts \"error: invalid path to file specified.\"\n ARGV[0] = nil\nend\n\nunless ARGV[0]\n puts \"generates a diagram of where in the sequence accelerated evolution / positive selection / negative selection took place\\n\"\n puts \"usage: visualize-swapsc.rb <swapsc.out> [-r]\"\n puts \"\\t-r\\tremove overlapping features\"\n puts \"will create <swapsc.out>.png\\n\"\n exit 1\nend\n\n$annotation = true\ntmpparts = ARGV[0].split('.')\nannofilename = tmpparts[0..2].join('.') + '.ids.annotation'\n#STDERR.puts \"checking file #{annofilename} ...\"\nif File.exists?(annofilename)\n STDERR.puts \"annotation file found: #{annofilename}\"\nelse\n annofilename = '../' + annofilename\n #STDERR.puts \"checking file #{annofilename} ...\"\n if File.exists?(annofilename)\n STDERR.puts \"annotation file found: #{annofilename}\"\n else\n STDERR.puts \"no annotation file found.\"\n $annotation = false\n end\nend\n\n$removeoverlaps = false\n$removeoverlaps = true if ARGV[1] == '-r'\n\n# === MAIN ====================================================================\n# =============================================================================\n\n\nglobals = Hash.new\nbranches = Hash.new\nbranchsequences = Hash.new\n\nf = File.open( ARGV[0], \"r\" )\n \n STDERR.print( ARGV[0] + \"\\t\" )\n line = f.gets.chomp\n line.scan(/^Number of sequences =\\s*(\\d+)/) { |match| globals.store(:n, match.to_s.to_i) }\n #puts \"Number of sequences: #{globals.fetch(:n)}\"\n line = f.gets.chomp\n line.scan(/^Length of alignment \\(nucleotides\\) =\\s*(\\d+)/) { |match| globals.store(:length, match.to_s.to_i) }\n #puts \"Length alignment: #{globals.fetch(:length)}\"\n\n line = f.gets.chomp\n 1.upto( globals.fetch(:n) ) do |i|\n line = f.gets.chomp\n branches.store(i, line)\n puts \"branchpoint #{i}: #{line}\" if DEBUG\n line = f.gets.chomp\n branchsequences.store(i, line)\n puts \" seq: #{line.slice(0,80)}...\" if DEBUG\n end\n \n # Branches:\n # ----------\n line = f.gets.chomp while line !~ /^Branches:/\n line = f.gets.chomp\n line = f.gets.chomp\n while line =~ /^\\d+\\s+:\\s+\\d+\\.{3}\\d+/\n h = Hash.new\n line.scan(/^(\\d+)/) { |match| h.store(:key, match.to_s.to_i) }\n line.scan(/^\\d+\\s+:\\s+(\\d+)\\.{3}\\d+/) { |match| h.store(:x, match.to_s.to_i) }\n line.scan(/^\\d+\\s+:\\s+\\d+\\.{3}(\\d+)/) { |match| h.store(:y, match.to_s.to_i) }\n #puts \"#{h.fetch(:x)} #{h.fetch(:y)}\"\n value = '(' + branches.fetch(h.fetch(:x)) + ',' + branches.fetch(h.fetch(:y)) + ')'\n puts \"branch #{:key} #{h.fetch(:key)}: #{value} (#{h.fetch(:x)},#{h.fetch(:y)})\" if DEBUG\n branches.store(h.fetch(:key), value)\n line = f.gets.chomp\n end\n\n # Ancestral sequences inferred by MP:\n # -----------------------------------\n line = f.gets.chomp while line !~ /^Ancestral sequences inferred by MP:/\n line = f.gets.chomp\n line = f.gets.chomp\n while line =~ /^node/\n descr, seq = line.split\n branch = (descr.match(/^node(\\d+):/)[1]).to_s.to_i\n branchsequences.store(branch, seq)\n #puts \"ancestral sequence #{branch} => #{seq.slice(0,80)}\"\n line = f.gets.chomp\n end\n\n if DEBUG\n branches.each do |key,value| \n puts \"branch #{key} => #{value}\"\n puts \" seq => #{branchsequences.fetch(key)}\" if branchsequences.key?(key)\n end\n end\n\n line = f.gets.chomp while line !~ /^mean w =/\n line.scan(/^mean w =\\s+(\\S+);\\s+/) { |match| globals.store(:omega, match.to_s) }\n #puts \"omega: #{globals.fetch(:omega)}\"\n \n \n panel = Bio::Graphics::Panel.new( globals.fetch(:length), :width => 800, :format => :png )\n # add annotation to diagram\n if $annotation\n annofile = File.new(annofilename, 'r')\n annotationtext = annofile.readline.chomp\n annotationtext = annotationtext.gsub('\"','')\n annofile.close\n STDERR.puts \"annotation line: #{annotationtext}\"\n track = panel.add_track('TAIR annotation', :label => true, :colour => [1.0,0.8,0.3])\n track.add_feature( Bio::Feature.new('tair8', '%s..%s' % [ 0, globals.fetch(:length) ]), :label => annotationtext )\n end\n\n # GET LIST OF SIGNALS\n line = f.gets.chomp while line !~ /^={20,}/\n\n while line !~ /^Proportion of codon sites under selective constraints/ # read all branches\n while line !~ /^Proportion of codon sites under selective constraints/ and line !~ /\\d+\\.{2}\\d+/ # read single branch\n # puts \"skipping line: #{line}\"\n line = f.gets.strip # while line !~ /\\d+\\.{2}\\d+/\n end\n if line =~ /\\d+\\.{2}\\d+/\n puts \"positions: #{line}\" if DEBUG\n bs = line.split('..')\n x = branches.fetch(bs[0].to_i)\n y = branches.fetch(bs[1].to_i)\n name = \"#{x} : #{y}\"\n track = panel.add_track(name, :label => false, :colour => [0,0,0])\n # display gaps\n if branchsequences.key?(bs[0].to_i) and branchsequences.key?(bs[1].to_i)\n i = 0\n seqx = branchsequences.fetch(bs[0].to_i)\n seqy = branchsequences.fetch(bs[1].to_i)\n puts \"seqx (#{bs[0]}): #{seqx}\" if DEBUG\n puts \"seqy (#{bs[1]}): #{seqy}\" if DEBUG\n start, stop = nil, nil\n while i < seqx.length do\n if (seqx[i,1] == '-' or seqy[i,1] == '-') \n if start.nil?\n start = i \n end\n stop = i\n\n else\n if not start.nil? and not stop.nil?\n track.add_feature( Bio::Feature.new(\"gap\", '%s..%s' % [ start, stop ]), :colour => [0.55,0.55,0.55] )\n puts \"added gap in #{name} between #{start} and #{stop}\" if DEBUG\n start, stop = nil, nil\n end\n end\n i += 1\n end\n else\n STDERR.puts \"no sequence pair found for #{name}\"\n end\n # /display gaps\n line = f.gets.chomp\n features = Hash.new\n while line.split.size >= 10\n columns = line.split\n significance = columns.slice(6,3).to_s\n unless significance == \"P>0.05\"\n positions = columns[0]\n category = columns.slice(9,columns.size).to_s\n if $categories.has_key?(category)\n puts \"add feature #{positions} \\\"#{category}\\\"\" if DEBUG\n features[category] = Hash.new unless features.has_key?(category)\n start, stop = positions.split('..')\n features[category][start.to_i] = stop.to_i \n end\n end\n line = f.gets.chomp\n end\n \n sortedfeatures = Array.new\n features.each do |category, positionhash| # iterate features\n next if positionhash.size == 0\n sortedpositions = positionhash.sort\n fsf = nil\n for e in sortedpositions # iterate positions\n nstart, nstop = e\n puts \"#{category} #{nstart} #{nstop}\" if DEBUG\n unless fsf\n fsf = SwapscFeature.new(nstart,nstop,category)\n next\n end\n if nstart <= fsf.stop\n fsf.stop = nstop\n else\n sortedfeatures << fsf ? $removeoverlaps : fsf.add_to_track(track) \n fsf = SwapscFeature.new(nstart,nstop,category)\n end\n end # /iterate positions\n \n sortedfeatures << fsf ? $removeoverlaps : fsf.add_to_track(track) unless fsf.added?\n\n end # /iterate features\n\n if $removeoverlaps \n sortedfeatures.sort!\n sortedfeatures.each_index do |index|\n unless index == 0\n currentfeature = sortedfeatures[index]\n prevfeature = sortedfeatures[(index -1)]\n if currentfeature.start < prevfeature.stop:\n #puts sortedfeatures.inspect\n #STDOUT.puts \"\\n#{name}\\toverlap found:\\n\"\n #STDOUT.puts \"\\t%s\\tstart: %s\\tstop:\\t%s\" % [prevfeature.category, prevfeature.start, prevfeature.stop]\n #STDOUT.puts \"\\t%s\\tstart: %s\\tstop:\\t%s\" % [currentfeature.category, currentfeature.start, currentfeature.stop]\n resarray = correct_overlap(prevfeature,currentfeature)\n sortedfeatures[index -1] = nil\n sortedfeatures[index] = nil\n resarray.each {|r| sortedfeatures << r }\n sortedfeatures.compact!\n sortedfeatures.sort!\n retry\n end\n end\n end # /sortedfeatures.each\n sortedfeatures.compact!\n $categories[\"PS\"][:branchstats] = 0\n $categories[\"NS\"][:branchstats] = 0\n $categories[\"AdN\"][:branchstats] = 0\n sortedfeatures.each do |feat| \n feat.add_to_track(track) \n end\n printlist = Array.new\n printlist << ARGV[0]\n printlist << name\n $categories.each { |key,hash| printlist << $categories[key][:branchstats] }\n STDOUT.puts( printlist.join(\"\\t\") )\n end\n\n end # /read single branch \n\n\n end # /read all branches\n\n\n# exit 3\n\n\n # LEGEND + STATS\n track = panel.add_track(\"Legend\", :label => true)\n startpos = (globals.fetch(:length).to_f * 0.02).to_i\n endpos = (globals.fetch(:length).to_f * 0.98).to_i\n negative, positive = 0, 0\n #STDOUT.print \"#{ARGV[0]}\"\n $categories.each do |abbrv,hash|\n color = hash[:color]\n legend = hash[:legend]\n puts \"legend: #{abbrv} #{legend} #{color}\" if DEBUG\n stats = hash[:stats].to_f * 100 / (branches.size - globals.fetch(:n)) / globals.fetch(:length)\n if [\"S\",\"NS\"].include?(abbrv) \n negative += stats\n else\n positive += stats\n end\n stats = format(\"%.2f\",stats)\n #STDOUT.print \"\\t#{abbrv}\\t#{stats}\"\n track.add_feature( Bio::Feature.new(abbrv, '%s..%s' % [ startpos, endpos ]), :colour => color, :label => \"#{abbrv} (#{legend}): #{stats} %\" )\n end\n positive = format(\"%.2f\",positive)\n negative = format(\"%.2f\",negative)\n #STDOUT.print \"\\t#{positive}\\t#{negative}\\n\"\n\n panel.draw(ARGV[0] + \".gaps.png\")\n STDERR.print( \"done.\\n\" )\n\nf.close\n" }, { "alpha_fraction": 0.4551916718482971, "alphanum_fraction": 0.4711855947971344, "avg_line_length": 36.86538314819336, "blob_id": "d657ea3e44b6b511647542411f7ac79c80e449bd", "content_id": "ab0306d952fad836d3d7635cffd7244ba9ba11a1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3939, "license_type": "permissive", "max_line_length": 132, "num_lines": 104, "path": "/python/gff/overlapping-cds-from-gff.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport tempfile\nfrom low import * # custom functions, written by myself\nBEDBIN = \"~/bin/bedtools/BEDTools-Version-2.14.3/bin/intersectBed\"\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f gene feature file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['gffile'] = value\n \n for key in ['gffile']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n return args\n\n# =============================================================================\ndef statusbar(current, total, message=\"\", width=40):\n progress = 1.0*current/total\n if message != \"\": message = \"[\" + message + \"]\"\n progressbar = \"=\" * int(progress*width)\n while len(progressbar) < width: progressbar += \" \" \n sys.stderr.write(\"\\r 0% \" + progressbar + \" 100% \" + message)\n if progress == 1.0: sys.stderr.write(\"\\n\")\n \n\n# =============================================================================\ndef find_overlapping_cds(gffile, overlap=20):\n fh, fn = tempfile.mkstemp()\n os.close(fh)\n cdsff, outfile = fn + \".gff\", fn + \".out\"\n if gffile.endswith(\".gz\"): readbin = \"zcat\"\n else: readbin = \"cat\"\n os.system(\"%s %s | awk -F \\\"\\t\\\" '$3 == \\\"cds\\\" || $3 == \\\"CDS\\\" {print $0}' > %s\" %(readbin, gffile, cdsff))\n os.system(\"cat %s | awk -F \\\"\\t\\\" '$4 == 0 {$4 = 1}; {print}' > %s\" %(cdsff, cdsff+'.1'))\n os.system(\"cat %s | awk -F \\\"\\t\\\" '$4 < $5 {print $0}' > %s\" %(cdsff+'.1', cdsff))\n os.system(\"cat %s | awk -F \\\"\\t\\\" '$4 > $5 {print $1, $2, $3, $5, $4, $6, $7, $8, $9}' >> %s\" %(cdsff+'.1', cdsff))\n os.system(\"%s -a %s -b %s -wo | awk -F \\\"\\t\\\" '$7 != $16 && $19 >= %s {print $0}' > %s\" %(BEDBIN, cdsff, cdsff, overlap, outfile))\n os.unlink(fn)\n os.unlink(cdsff)\n os.unlink(cdsff+'.1')\n return outfile\n\n# =============================================================================\ndef find_parent(s):\n s = s[s.index(\"Parent=\")+7:]\n s = s[:s.index(\";\")]\n return s\n\n# =============================================================================\ndef parse_overlapping_gene_pairs(outfile):\n fo = open(outfile)\n for line in fo:\n cols = line.strip().split(\"\\t\")\n a1, a2 = cols[8], cols[17]\n p1, p2 = find_parent(a1), find_parent(a2)\n out = [p1, p2]\n out.sort()\n print string.join(out, \"\\t\")\n os.unlink(outfile)\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n outfile = find_overlapping_cds(args['gffile'])\n parse_overlapping_gene_pairs(outfile)\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.554190456867218, "alphanum_fraction": 0.5625274181365967, "avg_line_length": 35.37234115600586, "blob_id": "358d4e6a79d85c06dba5299e22e8c46b1f4162da", "content_id": "37ac5992c8ed3f618b301f272dc7aa3713113899", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6837, "license_type": "permissive", "max_line_length": 166, "num_lines": 188, "path": "/python/blast/parse_blast_annotate.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t# low level handling, such as command line stuff\nimport string\t\t\t# string methods available\nimport re\t\t\t\t\t# regular expressions\nimport getopt\t\t\t# comand line argument handling\nfrom low import *\t# custom functions, written by myself\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> [-b <path> -o <path>]\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f blast.out file to be parsed\" )\n\tstdout( \" -n number of best hits to parse\" )\n\tstdout( \" -e minimum evalue of a hit to be parsed\" )\n\tstdout( \" -i minimum identity (in %)\" )\n\tstdout( \" -l minimum length of a hit to be parsed\" )\n\tstdout( \" -d delimiter\" )\n\tstdout( \" default is the blast.out base name plus .best-hit\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hb:f:n:e:l:i:p:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\targs['delimiter'] = '\\t'\n\tfor key, value in keys:\n\t\tif key == '-f':\n\t\t\tif not file_exists( value ):\n\t\t\t\tstderr( \"invalid path in \" + key )\n\t\t\t\tshow_help()\n\t\t\telse:\n\t\t\t\targs['file'] = value\n\t\tif key == '-e':\targs['evalthresh'] = float(value)\n\t\tif key == '-l':\targs['minlength'] = int(value)\n\t\tif key == '-i':\targs['minident'] = int(value)\n\t\tif key == '-p':\targs['minpos'] = int(value)\n\t\tif key == '-n':\targs['numberofbesthits'] = int(value)\n\t\tif key == '-d':\targs['delimiter'] = value\n\t\t\t\t\n\tif not args.has_key('file'):\n\t\tstderr( \"blast out file missing.\" )\n\t\tshow_help()\n\t\n\treturn args\n\n# =============================================================================\ndef print_hit(args,hithash):\n\t\"\"\"\n\t\"\"\"\n\tif args.has_key('evalthresh'):\n\t\tif args.get('evalthresh') < float(hithash.get('evalue')):\treturn\n\tif args.has_key('minlength'):\n\t\tif args.get('minlength') > int(hithash.get('hitlength')): return\n\tif args.has_key('minident'):\n\t\tif args.get('minident') > int(hithash.get('identities')): return\n\tif args.has_key('minpos'):\n\t\tif args.get('minpos') > int(hithash.get('positives')): return\n\t\t\n\thithash['hitdescr'] = re.sub( '\\s{2,99}', ' ' , hithash['hitdescr'] )\n\t\t\n\tL = []\n\tL.append(hithash.get('query'))\n\tL.append(hithash.get('evalue'))\n\t#splits = hithash.get('hitdescr').split()\n\t#id = splits[0]\n\t#descr, species = string.join( splits[1:], ' ').split(' - ')\n\t#L.append( id )\n\t#L.append( descr )\n\t#L.append( species )\n\tL.append( hithash.get('hitdescr') )\n\t\n\tprint string.join(L, args.get('delimiter') )\n\t\n\n# =============================================================================\ndef parse_blast_out( args ):\n\t#print \"# blast.out file:\", args.get('file')\n\t#print \"# numberofbesthits:\", args.get('numberofbesthits')\n\t#print \"# max.evalue:\", args.get('evalthresh')\n\t#print \"# min.length:\", args.get('minlength')\n\t#print \"# fields: query, hitid, score, evalue, query_startpos, query_endpos, sbjct_startpos, sbjct_endpos, hitlength, length, identities, positives, frame_or_strand\"\n\t\n\thithash = {}\n\tcurrenthits = 0\n\tfh = open( args.get('file') )\n\tfor line in fh:\n\t\t# new hit entry\n\t\tif (line.startswith('Query=') or line.startswith('>')) and len(hithash) > 1:\n\t\t\tprint_hit(args,hithash)\n\t\t\tquery = hithash.get('query')\n\t\t\thithash.clear()\n\t\t\thithash['query'] = query\n\t\t\tcurrenthits += 1\n\t\t# query\n\t\tif line.startswith('Query='):\n\t\t\thithash['query'] = re.search('Query=\\s*(\\S+)',line).group(1)\n\t\t\tcurrenthits = 0\n\t\t# query with no hit\n\t\telif re.search('No hits found',line):\n\t\t\tif not args.has_key('evalthresh') and not args.has_key('minlength'):\n\t\t\t\tprint hithash.get('query') + args.get('delimiter') + \"no_hit_found\"\n\t\t\thithash.clear()\n\t\t\tcurrenthits = 0\n\t\t\tcontinue\n\t\t\n\t\tif args.has_key('numberofbesthits') and not args.get('numberofbesthits') > currenthits:\n\t\t\thithash.clear()\n\t\t\tcontinue\n\t\t\n\t\tif len(hithash) < 1: continue\n\t\t\n\t\t# hit id and descr\n\t\tif not hithash.has_key('hitdescr'):\n\t\t\tif line[:1] == '>':\n\t\t\t\thithash['hitdescr'] = line[1:].replace('\\n','')\n\t\t\t\thithash['hitid'] = re.match('(\\w)',hithash['hitdescr']).group(1)\n\t\telse:\n\t\t\tif not hithash.has_key('length'):\n\t\t\t\tif re.search( 'Length =', line):\n\t\t\t\t\thithash['hitdescr'] += line[:line.index('Length =')]\n\t\t\t\telse:\n\t\t\t\t\thithash['hitdescr'] += line.replace('\\n','')\n\t\t\n\t\t# subject length\n\t\tif re.search('Length =',line):\n\t\t\thithash['length'] = re.search('Length =\\s{0,9}(\\d+)',line).group(1)\n\t\t# hit length\n\t\tif re.search('Identities =',line):\n\t\t\thithash['hitlength'] = re.search('Identities =\\s{0,9}\\d+/(\\d+)',line).group(1)\n\t\t# identities\n\t\tif re.search('Identities =',line):\n\t\t\thithash['identities'] = re.search('Identities =\\s{0,9}\\d+/\\d+\\s+\\((\\d+)%\\)',line).group(1)\n\t\t# positives\n\t\tif re.search('Positives =',line):\n\t\t\thithash['positives'] = re.search('Positives =\\s{0,9}\\d+/\\d+\\s+\\((\\d+)%\\)',line).group(1)\n\t\t# gaps\n\t\tif re.search('Gaps =',line):\n\t\t\thithash['gaps'] = re.search('Gaps =\\s{0,9}\\d+/\\d+\\s+\\((\\d+)%\\)',line).group(1)\n\t\t# score\n\t\tif re.search('Score =',line):\n\t\t\thithash['score'] = re.search('Score =\\s{0,9}(\\S+)',line).group(1)\n\t\t# evalue\n\t\tif re.search('Expect[(2)]* =',line):\n\t\t\thithash['evalue'] = re.search('Expect[(2)]* =\\s{0,9}([0-9e.-]+)',line).group(1)\n\t\t\tif hithash['evalue'].count('e') > 0 and not re.match( '\\d', hithash['evalue'] ):\n\t\t\t\thithash['evalue'] = '1' + hithash['evalue']\n\t\t# frame\n\t\tif re.search('Frame =',line):\n\t\t\thithash['frame'] = re.search('Frame =\\s{0,9}(\\S+)',line).group(1)\n\t\t# strand (BLASTN)\n\t\tif re.search('Strand =',line):\n\t\t\thithash['strand'] = re.search('Strand =\\s{0,9}(.*)\\n',line).group(1)\n\t\t# get hit positions\n\t\tif re.search('Query:\\s*\\d+',line):\n\t\t\tif not hithash.has_key('query_startpos'): \n\t\t\t\thithash['query_startpos'] = re.search('Query:\\s*(\\d+)',line).group(1)\n\t\t\thithash['query_endpos'] = re.search('(\\d+)\\n',line).group(1)\n\t\tif re.search('Sbjct:\\s*\\d+',line):\n\t\t\tif not hithash.has_key('sbjct_startpos'): \n\t\t\t\thithash['sbjct_startpos'] = re.search('Sbjct:\\s*(\\d+)',line).group(1)\n\t\t\thithash['sbjct_endpos'] = re.search('(\\d+)\\n',line).group(1)\t\t\t\n\t\t\t\t\t\t\n\tif len(hithash) > 1: print_hit(args,hithash)\n\tfh.close()\n\t\t\n\t\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nparse_blast_out( args )" }, { "alpha_fraction": 0.4872051179409027, "alphanum_fraction": 0.5061975121498108, "avg_line_length": 31.47402572631836, "blob_id": "dfc311219c612790dfbae53c69ca72c612bf60c8", "content_id": "b3a5f1cf357b562fe6d33c9e1c28c7fc9d55066b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5002, "license_type": "permissive", "max_line_length": 106, "num_lines": 154, "path": "/python/pfam/pfamtable-from-pid-annotation.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nimport glob\nimport stats\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -d pfam dir\" )\n stdout( \" -e file extention (e.g. \\\"*.pfam\\\")\" )\n stdout( \" -m output matrix instead of table\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hd:e:m\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'matrix': False}\n for key, value in keys:\n if key == '-d': args['pfamdir'] = value\n if key == '-e': args['extension'] = value\n if key == '-m': args['matrix'] = True\n \n for key in ['pfamdir', 'extension']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n return args\n\n \n# =============================================================================\ndef get_domain_counts(ifile):\n chash = {}\n fo = open(ifile)\n for line in fo:\n pid, did = line.strip().split(\"\\t\")\n if not chash.has_key(did): chash[did] = 0\n chash[did] += 1\n fo.close()\n return chash\n\n\n# =============================================================================\ndef gather_input(idir, ext):\n hash = {}\n for filename in glob.glob(idir + '/' + ext):\n species = os.path.split(filename)[1]\n species = species[:species.index(ext[1:])]\n hash[species] = filename\n return hash\n\n# =============================================================================\ndef output_table(species2domain2count):\n species = species2domain2count.keys()\n species.sort()\n alldids = {}\n for spec, chash in species2domain2count.iteritems():\n for did, count in chash.iteritems(): alldids[did] = 1\n \n print string.join([\"DID\"] + species, \"\\t\")\n for did in alldids.keys():\n out = did\n for spec in species: out += \"\\t\" + str(species2domain2count[spec].get(did,0))\n print out\n \n\n# =============================================================================\ndef score_pair(v1, v2, method=1):\n if method == 1:\n cor, p = stats.correlate(v1, v2)\n return cor\n elif method == 2:\n x1, x2 = [], []\n for i in range(len(v1)):\n if v1[i] != 0 or v2[i] != 0: x1.append(v1[i]), x2.append(v2[i])\n cor, p = stats.correlate(x1, x2)\n return cor\n elif method == 3:\n up = 0\n for i in range(len(v1)):\n if v1[i] != 0 and v2[i] != 0: up += 1\n return 1.0*up/len(v1)\n elif method == 4:\n up = 0\n for i in range(len(v1)):\n if v1[i] != 0 and v2[i] != 0: up += 1\n return up\n\n# =============================================================================\ndef output_matrix(species2domain2count):\n species = species2domain2count.keys()\n species.sort()\n alldids = {}\n for spec, chash in species2domain2count.iteritems():\n for did, count in chash.iteritems(): alldids[did] = 1\n \n print string.join([\"\"] + species, \",\")\n for sp1 in species:\n out = sp1\n for sp2 in species:\n v1, v2 = [], []\n for did in alldids.keys():\n v1.append(species2domain2count[sp1].get(did,0))\n v2.append(species2domain2count[sp2].get(did,0))\n sim = score_pair(v1, v2, method=4)\n out += \",\" + str(sim)\n print out\n \n \n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n species2file = gather_input(args['pfamdir'], args['extension'])\n species2domain2count = {}\n for species, ifile in species2file.iteritems(): species2domain2count[species] = get_domain_counts(ifile)\n alldids = {}\n for species, chash in species2domain2count.iteritems():\n for did, count in chash.iteritems(): alldids[did] = 1\n \n if args['matrix']: output_matrix(species2domain2count)\n else: output_table(species2domain2count)\n \n \n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.43602362275123596, "alphanum_fraction": 0.44389763474464417, "avg_line_length": 29.696969985961914, "blob_id": "84b57c0427ce035e29b90ea995b4695763841a9e", "content_id": "724be5e0be361f206f1c8eff5e4b0d00c8248d6a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1016, "license_type": "permissive", "max_line_length": 79, "num_lines": 33, "path": "/python/base/stats.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import numpy\nimport rpy2.robjects \nR = rpy2.robjects.r\n\n# =============================================================================\ndef correlate(x, y, method=\"pearson\"):\n \"\"\"\n performs a correlation between two vectors (assumed floats) and a given \n correlation method. returns cor.coefficient and p-value.\n \"\"\"\n xr = rpy2.robjects.FloatVector(x)\n yr = rpy2.robjects.FloatVector(y)\n res = R['cor.test'](xr, yr, method=method)\n #for i in range(len(res)):\n # k = res.names[i]\n # v = res[i]\n # print i, \"|\", k, \"=\", v\n p = res.subset('p.value')[0][0]\n cor = res.subset('estimate')[0][0]\n return cor, p\n \n# =============================================================================\ndef average(array):\n return numpy.average(array)\n \n# =============================================================================\ndef median(array):\n return numpy.median(array)\n \n\n# =============================================================================\ndef stdev(array):\n return numpy.std(array)\n \n" }, { "alpha_fraction": 0.492460697889328, "alphanum_fraction": 0.5001603960990906, "avg_line_length": 34.40909194946289, "blob_id": "8c1277704e9b7e22aa347d81155fb68582ad660e", "content_id": "856839ee5d2c77502c3b823fa3bee8c92963a42f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3117, "license_type": "permissive", "max_line_length": 123, "num_lines": 88, "path": "/python/generic/flat2matrix.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f flat file to import [tab delimited]\" )\n stdout( \" -a index of the first dimension key [default: 0]\" )\n stdout( \" -b index of the second dimension key [default: 1]\" )\n stdout( \" -v index of the value [default: 2]\" )\n stdout( \" -o order: comma-separated list of keys in which to output the matrix [default: alphabetically sorted]\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:a:b:v:o:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'key1':0, 'key2':1, 'value':2}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-a': args['key1'] = int(value)\n if key == '-b': args['key2'] = int(value)\n if key == '-v': args['value'] = int(value)\n if key == '-o': args['order'] = value.split(\",\")\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n\n return args\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n hash = {}\n keys = []\n fo = open( args.get('file') )\n for line in fo:\n col = line.strip().split(\"\\t\")\n key1, key2, value = col[args['key1']], col[args['key2']], col[args['value']]\n hash[key1 + '|||' + key2] = value\n if not key1 in keys: keys.append(key1)\n if not key2 in keys: keys.append(key2)\n fo.close()\n if args.has_key('order'): keys = args['order']\n else: keys.sort()\n\n print string.join(keys, \",\")\n for i in keys:\n sys.stdout.write(i)\n for j in keys:\n value = 'NA'\n if hash.has_key(i+'|||'+j): value = hash[i+'|||'+j]\n elif hash.has_key(j+'|||'+i): value = hash[j+'|||'+i]\n sys.stdout.write(\",\"+value)\n sys.stdout.write(\"\\n\")\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.480169415473938, "alphanum_fraction": 0.4824797809123993, "avg_line_length": 31.450000762939453, "blob_id": "50738a9a8226aabfaec495e9071d6db884ab33bb", "content_id": "acc18ddc21018327dbd8728f7674614c00ca4bc3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2597, "license_type": "permissive", "max_line_length": 93, "num_lines": 80, "path": "/python/geneontology/goid2name-from-obo.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom goterm import GOTerm\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f go obo file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['obo'] = value\n \n if not args.has_key('obo'):\n stderr( \"obo file argument missing.\" )\n show_help()\n elif not file_exists( args.get('obo') ):\n stderr( \"obo file does not exist.\" )\n show_help()\n \n return args\n\n \n# =============================================================================\ndef read_obo( file ):\n hash = {}\n goterm = {}\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n if line.startswith(\"[Term]\") or line.startswith(\"[Typedef]\"):\n if goterm.has_key('id') and goterm.has_key('name'): hash[goterm['id']] = goterm['name']\n goterm = {}\n elif line.startswith(\"id:\"):\n goterm['id'] = line.split()[1]\n elif line.startswith(\"name:\"):\n goterm['name'] = string.join(line.split()[1:], \" \")\n fo.close()\n print >> sys.stderr, \"goterms read from obo: %s\" % len(hash)\n return hash\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n gohash = read_obo(args['obo'])\n for goid, goname in gohash.iteritems():\n print goid + \"\\t\" + goname\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5105866193771362, "alphanum_fraction": 0.5154460072517395, "avg_line_length": 35, "blob_id": "5ebee0b863f843c14c5c41188c8dbef0d26afeb9", "content_id": "1ee7ef9fa2651bb324c912931b7cc929123d8e61", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2881, "license_type": "permissive", "max_line_length": 211, "num_lines": 80, "path": "/python/misa/import-into-sqlite3.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport sqlite3\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSRspecies\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f misa output file with an additional first column = speciesname\" )\n stdout( \" -d db file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:d:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-d': args['db'] = value\n \n if not args.has_key('file'):\n stderr( \"misa file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"misa file does not exist.\" )\n show_help()\n \n return args\n\n# =============================================================================\ndef init_db(conn):\n conn.execute(\"CREATE TABLE IF NOT EXISTS ssrs(id INTEGER PRIMARY KEY ASC, species VARCHAR(4), chr VARCHAR(50), startpos INTEGER, endpos INTEGER, ssr_type VARCHAR(2), motif VARCHAR(20), repeats INTEGER)\")\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n conn = sqlite3.connect(args['db'])\n init_db(conn)\n \n fo = open(args['file'])\n for line in fo:\n if line.startswith(\"ID\\t\"): continue\n m = MisaSSRspecies(line)\n sql = \"INSERT INTO ssrs(species, chr, startpos, endpos, ssr_type, motif, repeats) VALUES (\\'%s\\', \\'%s\\', %s, %s, \\'%s\\', \\'%s\\', %s)\" %(m.species, m.geneid, m.startpos, m.endpos, m.type, m.motif, m.repeats)\n conn.execute(sql)\n res = conn.execute(\"SELECT COUNT(*) FROM ssrs\")\n entries = res.fetchall()[0][0]\n print \"done. entries added:\", entries\n conn.commit()\n conn.close()\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.45157334208488464, "alphanum_fraction": 0.45606866478919983, "avg_line_length": 27.78823471069336, "blob_id": "013259aa2368dcf310f7cb91275704173382044e", "content_id": "554d63491659049f425442e6b1b824e26444873a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2447, "license_type": "permissive", "max_line_length": 83, "num_lines": 85, "path": "/python/fasta/fasta-sort.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:m:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['aln'] = value\n \n if not args.has_key('aln'):\n stderr( \"fasta file missing.\" )\n show_help()\n if not file_exists( args.get('aln') ):\n stderr( \"fasta file does not exist.\" )\n show_help()\n \n return args\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n\n #sys.stderr.write(args.get('aln') + \"\\t\")\n #sys.stderr.flush()\n # create evolver control file based on the M0 out file\n\n hash = {}\n id = \"\"\n fo = open( args.get('aln') )\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"): \n id = line[1:]\n hash[id] = \"\"\n else:\n hash[id] += line\n fo.close()\n\n sorted_keys = hash.keys()\n sorted_keys.sort()\n for id in sorted_keys:\n print \">\" + id\n seq = hash[id]\n i = 0\n while i < len(seq):\n end = min([i+60, len(seq)])\n print seq[i:end]\n i += 60\n\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.4670981764793396, "alphanum_fraction": 0.47033441066741943, "avg_line_length": 26.264705657958984, "blob_id": "30245adbee63e19b68718f993aee779397431a53", "content_id": "30aef0949341b3988ed9f8a5aeb357d5b17522b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 927, "license_type": "permissive", "max_line_length": 79, "num_lines": 34, "path": "/python/base/fasta.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import re\nimport gzip \n\n# =============================================================================\ndef get_sequence_hash(fastafile):\n seqhash = {}\n key = \"\"\n if fastafile.endswith('.gz'): fo = gzip.open(fastafile)\n else: fo = open(fastafile)\n for line in fo:\n if line.startswith(\">\"):\n gid = re.match(\">(\\S+)\", line).group(1)\n key = gid\n seqhash[key] = \"\"\n else:\n if key != \"\": seqhash[key] += line.strip()\n fo.close()\n return seqhash\n \n# =============================================================================\ndef get_length_hash(fastafile):\n lenhash = {}\n key = \"\"\n if fastafile.endswith('.gz'): fo = gzip.open(fastafile)\n else: fo = open(fastafile)\n for line in fo:\n if line.startswith(\">\"):\n gid = re.match(\">(\\S+)\", line).group(1)\n key = gid\n lenhash[key] = 0\n else:\n if key != \"\": lenhash[key] += len(line.strip())\n fo.close()\n return lenhash\n" }, { "alpha_fraction": 0.5708447098731995, "alphanum_fraction": 0.5708447098731995, "avg_line_length": 22.580644607543945, "blob_id": "130083161f859efc4dbf335e60d0f488555a57f9", "content_id": "93250623d98804970992b1efe9cf3586deb7b776", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 734, "license_type": "permissive", "max_line_length": 65, "num_lines": 31, "path": "/ruby/geneontology/go-eval.rb", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#/usr/bin/ruby\n=begin\n=end\n\nclass GOterm\n attr_accessor :id, :name, :namespace, :parents\n def initialize\n @parents = Array.new\n end\nend\n\ndef load_obo_definition(file)\n goterm = Hash.new\n obofile = File.open(file)\n while line = obofile.gets.chomp\n if line =~ /^\\[Term\\]/\n g = GOterm.new\n elsif line =~ /^id:/\n g.id = line.scan(/^id:\\s+(GO:\\d+)/).first.first\n elsif line =~ /^name:/\n g.name = line.scan(/^name:\\s+(.*)$/).first.first\n elsif line =~ /^namespace:/\n g.namespace = line.scan(/^namespace:\\s+(\\S+)$/).first.first\n elsif line =~ /^is_a:/\n g.parents << line.scan(/^is_a:\\s+(GO:\\d+)/).first.first\n elsif line =~ /^$/\n goterm[g.id] = g\n end\n end\n return goterm\nend\n\n\n\n" }, { "alpha_fraction": 0.5502645373344421, "alphanum_fraction": 0.5590828657150269, "avg_line_length": 40.992591857910156, "blob_id": "e20ef0905debd942457a752ed3ba3658d5cc17f9", "content_id": "aa174e5f9fc7b7f3f00d7315e805689f0b76ff1d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5670, "license_type": "permissive", "max_line_length": 192, "num_lines": 135, "path": "/python/sciroko/sciroko-single-genome-stats.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport sqlite3\nimport glob\nfrom low import * # custom functions, written by myself\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f dir with fasta files on which SSR identification was done (*all-chromosome*.fasta)\" )\n stdout( \" -d sqlite3 database\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:d:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-d': args['db'] = value\n if key == '-f': args['fasta'] = value\n\n if not args.has_key('db'):\n stderr( \"db file argument missing.\" )\n show_help()\n elif not file_exists( args.get('db') ):\n stderr( \"db file does not exist.\" )\n show_help()\n \n if not args.has_key('fasta'):\n stderr( \"fasta dir argument missing.\" )\n show_help()\n elif not dir_exists( args.get('fasta') ):\n stderr( \"fasta dir does not exist.\" )\n show_help()\n \n return args\n\n \n# =============================================================================\ndef get_fasta_length(file):\n length = 0\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"): continue\n length += len(line.replace(\" \", ''))\n return length\n\n\n# =============================================================================\ndef get_length_hash(fastadir, species):\n print >> sys.stderr, \"getting genome sizes...\"\n fastahash = {}\n for spec in species:\n dest = fastadir + '/' + spec + '*all-chromosome*.fasta'\n files = glob.glob(dest)\n if len(files) == 0: \n sys.exit(\"no fasta file found for species %s (%s). aborting.\" %(spec, dest))\n elif len(files) > 1:\n sys.exit(\"more than 1 file found for species %s (%s). aborting.\" %(spec, dest))\n else:\n fastahash[spec] = get_fasta_length(files[0])\n return fastahash\n \n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n # TODO: swtich reading to the sqlite3 db\n # execute appropriate queries including the gene_features table\n conn = sqlite3.connect(args['db'])\n species = [str(e[0]) for e in conn.execute(\"SELECT DISTINCT species FROM ssrs\").fetchall()]\n lengthhash = get_length_hash(args['fasta'], species)\n\n sys.stdout.write(string.join([\"Species\", \"Length (bp)\", \"SSRs (bp)\", \"# SSRs\", \"SSR.coverage\", \"SSRs/kb\"], \"\\t\") + \"\\t\")\n sys.stdout.write(string.join([\"genomic.\" + str(e) for e in range(1,7)], \"\\t\") + \"\\t\")\n sys.stdout.write(string.join([\"exonic.\" + str(e) for e in range(1,7)], \"\\t\") + \"\\t\")\n sys.stdout.write(string.join([\"intronic.\" + str(e) for e in range(1,7)], \"\\t\") + \"\\t\")\n sys.stdout.write(string.join([\"intergenic.\" + str(e) for e in range(1,7)], \"\\t\") + \"\\t\")\n sys.stdout.write(\"\\n\")\n\n for spec in species:\n c = conn.execute(\"SELECT length FROM ssrs WHERE species='%s'\" % spec)\n ssrlength = 0\n rows = c.fetchmany()\n while rows:\n for l in [e[0] for e in rows]: ssrlength += l\n rows = c.fetchmany()\n ssrcount = conn.execute(\"SELECT COUNT(id) FROM ssrs WHERE species='%s'\" % spec).fetchall()[0][0]\n sys.stdout.write(spec + \"\\t\")\n sys.stdout.write(str(lengthhash[spec]) + \"\\t\")\n sys.stdout.write(str(ssrlength) + \"\\t\")\n sys.stdout.write(str(ssrcount) + \"\\t\")\n sys.stdout.write(str(1.0*ssrlength/lengthhash[spec]) + \"\\t\")\n sys.stdout.write(str(1.0*ssrcount/lengthhash[spec]*1000) + \"\\t\")\n ssrtypefreq = {}\n for row in conn.execute(\"SELECT LENGTH(motif),COUNT(id) FROM ssrs WHERE species='%s' GROUP BY LENGTH(motif)\" % spec).fetchall(): ssrtypefreq[str(row[0])] = row[1]\n for i in range(1,7): sys.stdout.write(str(ssrtypefreq[str(i)]) + \"\\t\")\n ssrtypefreq = {}\n for row in conn.execute(\"SELECT LENGTH(motif),COUNT(id) FROM ssrs WHERE species='%s' AND gene_feature='exon' GROUP BY LENGTH(motif)\" % spec).fetchall(): ssrtypefreq[str(row[0])] = row[1]\n for i in range(1,7): sys.stdout.write(str(ssrtypefreq[str(i)]) + \"\\t\")\n ssrtypefreq = {}\n for row in conn.execute(\"SELECT LENGTH(motif),COUNT(id) FROM ssrs WHERE species='%s' AND gene_feature='intron' GROUP BY LENGTH(motif)\" % spec).fetchall(): ssrtypefreq[str(row[0])] = row[1]\n for i in range(1,7): sys.stdout.write(str(ssrtypefreq[str(i)]) + \"\\t\")\n ssrtypefreq = {}\n for row in conn.execute(\"SELECT LENGTH(motif),COUNT(id) FROM ssrs WHERE species='%s' AND gene_feature IS NULL GROUP BY LENGTH(motif)\" % spec).fetchall(): ssrtypefreq[str(row[0])] = row[1]\n for i in range(1,7): sys.stdout.write(str(ssrtypefreq[str(i)]) + \"\\t\")\n sys.stdout.write(\"\\n\")\n\n conn.close()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5555070638656616, "alphanum_fraction": 0.5613958835601807, "avg_line_length": 33.466163635253906, "blob_id": "cd4257daeba45ede1dbc317977f3c9c11b608cf9", "content_id": "d71a279b7ba75883570006c9241481e08be1936b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4585, "license_type": "permissive", "max_line_length": 147, "num_lines": 133, "path": "/python/fasta/rename-geneids.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom collections import defaultdict\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -n <file> -p <file> [-i <string>]\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -n nucleotide fasta file (trancript, CDS, ...)\" )\n stdout( \" -p protein fasta file\" )\n stdout( \" -i string to prefix all unique IDs with\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hp:n:i:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n args['idpref'] = \"\"\n for key, value in keys:\n if key == '-p': args['pfasta'] = value\n if key == '-n': args['nfasta'] = value\n if key == '-i': args['idpref'] = value\n\n \n if not args.has_key('pfasta'):\n stderr( \"protein fasta file argument missing.\" )\n show_help()\n elif not file_exists( args.get('pfasta') ):\n stderr( \"protein fasta file does not exist.\" )\n show_help()\n\n if not args.has_key('nfasta'):\n stderr( \"nucleotide fasta file argument missing.\" )\n show_help()\n elif not file_exists( args.get('nfasta') ):\n stderr( \"nucleotide fasta file does not exist.\" )\n show_help()\n\n return args\n\n\ndef get_all_ids_from_fasta(file):\n ids = []\n fo = open(file)\n for line in fo:\n line = line.strip()\n if not line.startswith(\">\"): continue\n ids.append(line[1:])\n return ids\n\n\ndef get_unique_id_fragments(idArray):\n hash = defaultdict(int)\n part2id = {}\n for id in idArray:\n parts = id.split(\"|\")\n for part in parts: \n hash[part] += 1\n part2id[part] = id\n \n uniqHash = {}\n for part, count in hash.iteritems():\n if count == 1: uniqHash[part] = part2id[part]\n revUniqHash = defaultdict(list)\n \n for part, id in uniqHash.iteritems():\n revUniqHash[id].append(part)\n return uniqHash, revUniqHash\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n allPids = get_all_ids_from_fasta(args.get('pfasta'))\n allNids = get_all_ids_from_fasta(args.get('nfasta'))\n upPids, uHashPids = get_unique_id_fragments(allPids)\n upNids, uHashNids = get_unique_id_fragments(allNids)\n \n matchHash = {}\n matchPart = {}\n prefix = args.get('idpref')\n for pid, uparts in uHashPids.iteritems():\n debug = 0\n if pid == \"jgi|Araly1|878105|Al_scaffold_0002_2699\": debug = 1\n for upart in uparts:\n if debug: print >> sys.stderr, upart, pid\n if upNids.has_key(upart):\n nid = upNids[upart]\n if debug: print >> sys.stderr, \"match to\", nid\n if matchHash.has_key(pid):\n if debug: print >> sys.stderr, \"ERROR: matching Hash already contains an association with PID\", pid, \"=>\", matchHash[pid]\n continue\n if matchHash.has_key(nid):\n if debug: print >> sys.stderr, \"ERROR: matching Hash already contains an association with NID\", nid, \"=>\", matchHash[nid]\n continue\n matchHash[pid] = nid\n matchHash[nid] = pid\n matchPart[pid + \"$$$\" + nid] = upart\n print string.join([prefix+upart, pid, nid], \"\\t\")\n break\n if not matchHash.has_key(pid):\n print >> sys.stderr, \"no match for PID\", pid\n\n if len(matchPart) == len(allPids) and len(matchPart) == len(allNids):\n print >> sys.stderr, \"everything is well: we have found an association match between all protein and nucleotide IDs\"\n else:\n print >> sys.stderr, \"ERROR: unequal number of matches (%s) and protein/nucleotide IDs (%s/%s)\" %( len(matchPart), len(allPids), len(allNids) )\n\n \n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.4783044159412384, "alphanum_fraction": 0.48030707240104675, "avg_line_length": 30.536842346191406, "blob_id": "6df05c361a6762cbda6f01d0414644e4a3248551", "content_id": "048a1ea8c98ff64fc8f983a3e26ad42ee4aca4f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2996, "license_type": "permissive", "max_line_length": 82, "num_lines": 95, "path": "/python/geneontology/go-from-blastout.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\t\t\t\t\t# index databases (file hash)\nfrom Bio import SeqIO # biopython stuff, to parse fasta files for instance\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -c <path> -o <path>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f blast.out file\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f':\targs['file'] = value\n\t\t\t\t\n\tif not args.has_key('file'):\n\t\tstderr( \"blast.out file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('file') ):\n\t\tstderr( \"blast.out file does not exist.\" )\n\t\tshow_help()\n\t\t\n\treturn args\n\n\n\n# =============================================================================\ndef parse_descr( text ):\n hash = {}\n if not re.search(\"GO:\\d+.*evidence\", text): \n sys.stderr.write(\"return None.\\n\")\n return hash\n for match in re.finditer( '(GO:\\d+)\\s*\\\"([^\"]+)\\\"\\s*evidence', text ):\n id = match.group(1)\n description = match.group(2)\n hash[ id ] = description\n return hash\n \n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n fo = open( args.get('file') )\n descr_index = None\n for line in fo:\n line = line.rstrip()\n cols = line.split(\"\\t\")\n if descr_index == None:\n for index, col in enumerate(cols):\n if re.search(\"GO:\\d+\", col):\n descr_index = index\n break\n descr = cols[ descr_index ]\n go_hash = parse_descr( descr )\n for goterm, godescr in go_hash.iteritems():\n L = []\n for index, col in enumerate(cols):\n if index == descr_index:\n L.append(goterm)\n L.append(godescr)\n else:\n L.append(col)\n print string.join(L,\"\\t\")\n fo.close()\n\t\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.5121668577194214, "alphanum_fraction": 0.5152568817138672, "avg_line_length": 34.94444274902344, "blob_id": "c4a1d4865465ce3fc900066701e7adc6b18ebb7f", "content_id": "712e36e3ccdeff157160ff8d85790fb3272a0606", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2589, "license_type": "permissive", "max_line_length": 113, "num_lines": 72, "path": "/python/generic/add-basename-as-first-col.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom collections import defaultdict\nimport fileinput\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f input file (will be rewritten on the fly!) - basename is everything before the first dot\" )\n stdout( \" -l basename to lower case\" )\n stdout( \" -u basename to upper case\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:ul\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'lower':False, 'upper':False}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-l': args['lower'] = True\n if key == '-u': args['upper'] = True\n \n if not args.has_key('file'):\n stderr( \"fasta file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"fasta file does not exist.\" )\n show_help()\n\n if args['lower'] and args['upper']:\n stderr( \"cannot select both lower and upper.\" )\n show_help()\n \n return args\n\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n filename = os.path.split(args['file'])[1]\n basename = filename\n while basename.count(\".\") > 0: basename = os.path.splitext(basename)[0]\n if args['lower']: basename = basename.lower()\n if args['upper']: basename = basename.upper()\n for line in fileinput.input(args['file'],inplace=1):\n print basename + \"\\t\" + line.rstrip()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5132356882095337, "alphanum_fraction": 0.5194037556648254, "avg_line_length": 35.36448669433594, "blob_id": "73a1b785d93348f7f61bfc65ae34aeb642d49afe", "content_id": "094cfe3d51fb0c229485462da6980f4f782eb7a9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3891, "license_type": "permissive", "max_line_length": 143, "num_lines": 107, "path": "/python/misa/add-localization-to-misa.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom collections import defaultdict\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSRspecies\nimport pickle\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" -d <gff-folder>\"\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -g gff3 file\" )\n stdout( \" -f misa file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hg:f:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-g': args['gff'] = value\n if key == '-f': args['misa'] = value\n \n if not args.has_key('gff'):\n print >> sys.stderr, \"gff file argument missing.\"\n show_help()\n elif not file_exists( args.get('gff') ):\n print >> sys.stderr, \"gff file does not exist.\"\n show_help()\n\n if not args.has_key('misa'):\n print >> sys.stderr, \"misa file argument missing.\"\n show_help()\n elif not file_exists( args.get('misa') ):\n print >> sys.stderr, \"misa file does not exist.\"\n show_help()\n\n return args\n\n# =============================================================================\ndef get_ssrs(file):\n hash = defaultdict(list)\n fo = open(file)\n for line in fo: \n if line.startswith(\"ID\\t\"): continue\n m = MisaSSRspecies(line)\n hash[m.species + '|' + m.geneid].append(m)\n fo.close()\n return hash\n\n# =============================================================================\ndef get_features(filename):\n type2abbrv = { 'exon':'E', 'intron':'I', 'five_prime_UTR':'5', 'three_prime_UTR':'3' }\n features = {}\n fo = open(filename)\n for line in fo: \n if line.startswith(\"#\") or len(line.rstrip()) == 0: continue\n columns = line.rstrip().split(\"\\t\")\n if len(columns) != 9: continue\n type = columns[2]\n if type != \"sequence_assembly\" and type != \"exon\" and type != \"intron\" and type != \"five_prime_UTR\" and type != \"three_prime_UTR\": continue\n chr, start, stop, strand, descr = columns[0], columns[3], columns[4], columns[6], columns[8]\n if type == \"sequence_assembly\": features[chr] = [\"i\"] * int(stop)\n else:\n for i in range(int(start)-1, int(stop)-1): features[chr][i] = type2abbrv[type]\n fo.close()\n print >> sys.stderr, \"features of all %s scaffolds loaded.\" % len(features)\n return features\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n Features = get_features(args['gff']) \n fo = open(args['misa'])\n for line in fo:\n if line.startswith(\"ID\\t\"): continue\n line = line.rstrip()\n columns = line.split(\"\\t\")\n key = columns[1]\n start, stop = int(columns[6])-1, int(columns[7])-1\n fstart, fstop = Features[key][start], Features[key][stop]\n if fstart != fstop: print >> sys.stderr, \"SSR spans two different features: %s %s/%s\" %( key, fstart, fstop )\n print line + \"\\t\" + fstart\n fo.close()\n \n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.5582236647605896, "alphanum_fraction": 0.5615131855010986, "avg_line_length": 38.225807189941406, "blob_id": "d84fdc8f6535e466d5125077893596968ff73719", "content_id": "ff79549fc5355dde5ea1332800a04474dd78605d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6080, "license_type": "permissive", "max_line_length": 226, "num_lines": 155, "path": "/python/geneontology/go-enrichment2.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys\nimport rpy\nimport string\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\n\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -a <path> -t <path> -m <N> -n <namespaces>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -a annotation file in topGO table format\" )\n stdout( \" -t test ids file\" )\n stdout( \" -e use ELIM instead of WEIGHT algorithm\" )\n stdout( \" -n list of namespaces to test. default: \\\"BP,CC,MF\\\"\" )\n stdout( \" -m minimum number of genes per GO term for the GO term to be tested. default: 1\" )\n stdout( \" -o test for over-representation\" )\n stdout( \" -u test for under-representation\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"ha:t:n:m:oue\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'namespaces':[\"BP\", \"CC\", \"MF\"], 'min':1, 'over':False, 'under':False, 'algorithm':'weight01'}\n for key, value in keys:\n if key == '-a': args['annot'] = value\n if key == '-t': args['testset'] = value\n if key == '-m': args['min'] = int(value)\n if key == '-n': args['namespaces'] = value.split(\",\")\n if key == '-o': args['over'] = True\n if key == '-u': args['under'] = True\n if key == '-e': args['algorithm'] = 'elim'\n \n if not args.has_key('annot'):\n stderr( \"annot file argument missing.\" )\n show_help()\n elif not file_exists( args.get('annot') ):\n stderr( \"annot file does not exist.\" )\n show_help()\n \n if not args.has_key('testset'):\n stderr( \"testset file argument missing.\" )\n show_help()\n elif not file_exists( args.get('testset') ):\n stderr( \"testset file does not exist.\" )\n show_help()\n\n return args\n\n\n# =============================================================================\ndef init_R(under=False):\n R = rpy.r\n R('sink(\"/dev/null\")')\n try:\n R('invisible(capture.output(library(\"topGO\")))')\n except:\n try: \n R.source(\"http://bioconductor.org/biocLite.R\")\n R.biocLite('topGO')\n R.library('topGO')\n except:\n print \"Problem importing R libraries.\"\n sys.exit()\n \n if under:\n R('if(!isGeneric(\"GOFisherTestUnder\")) setGeneric(\"GOFisherTestUnder\", function(object) standardGeneric(\"GOFisherTestUnder\"))')\n R('setMethod(\"GOFisherTestUnder\", \"classicCount\", function(object) { contMat <- contTable(object); if(all(contMat == 0)) p.value <- 1 else p.value <- fisher.test(contMat, alternative = \"less\")$p.value; return(p.value) })')\n return R\n\n# =============================================================================\ndef test_for_overrepresentation(R, args):\n significant = []\n tmp = R('GOmap = readMappings(file = \"' + args['annot'] + '\")')\n tmp = R('refset = names(GOmap)')\n tmp = R('testset = scan(file=\"' + args['testset'] + '\", what=character())')\n tmp = R('genes_of_interest = factor(as.integer(refset %in% testset))')\n tmp = R('names(genes_of_interest) <- refset')\n for ontology in args['namespaces']:\n tmp = R('tgData = new(\"topGOdata\", ontology = \"' + ontology + '\", allGenes = genes_of_interest, nodeSize = ' + str(args['min']) + ', annot = annFUN.gene2GO, gene2GO = GOmap)')\n pvalueHash = R('score(runTest(tgData, algorithm=\"%s\", statistic=\"fisher\"))' %(args['algorithm']))\n keys, pvalues = [], []\n for key, p in pvalueHash.iteritems():\n keys.append(key)\n pvalues.append(p)\n tmp = R.assign('pvalues',pvalues)\n padjusted = R('p.adjust(pvalues, method=\"fdr\")')\n for i in range(len(keys)):\n goterm = keys[i]\n p = pvalues[i]\n fdr = padjusted[i]\n if p > 0.05: continue\n significant.append([\"O\", ontology, goterm, str(p), str(fdr)])\n return significant\n\n# =============================================================================\ndef test_for_underrepresentation(R, args):\n significant = []\n tmp = R('GOmap = readMappings(file = \"' + args['annot'] + '\")')\n tmp = R('refset = names(GOmap)')\n tmp = R('testset = scan(file=\"' + args['testset'] + '\", what=character())')\n tmp = R('genes_of_interest = factor(as.integer(refset %in% testset))')\n tmp = R('names(genes_of_interest) <- refset')\n for ontology in args['namespaces']:\n tmp = R('tgData = new(\"topGOdata\", ontology = \"' + ontology + '\", allGenes = genes_of_interest, nodeSize = ' + str(args['min']) + ', annot = annFUN.gene2GO, gene2GO = GOmap)')\n tmp = R('test.stat <- new(\"weightCount\", testStatistic = GOFisherTestUnder, name =\"Fisher test underrepresentation\")')\n pvalueHash = R('score(getSigGroups(tgData, test.stat))')\n keys, pvalues = [], []\n for key, p in pvalueHash.iteritems():\n keys.append(key)\n pvalues.append(p)\n tmp = R.assign('pvalues',pvalues)\n padjusted = R('p.adjust(pvalues, method=\"fdr\")')\n for i in range(len(keys)):\n goterm = keys[i]\n p = pvalues[i]\n fdr = padjusted[i]\n if p > 0.05: continue\n significant.append([\"U\", ontology, goterm, str(p), str(fdr)])\n return significant\n\n\n# =============================================================================\ndef main(args):\n \n R = init_R(args['under'])\n fw = open(args['testset'] + \".ORA\", \"w\")\n if args['over']: \n results = test_for_overrepresentation(R, args)\n for r in results: fw.write(string.join(r, \"\\t\") + \"\\n\")\n if args['under']: \n results = test_for_underrepresentation(R, args)\n for r in results: fw.write(string.join(r, \"\\t\") + \"\\n\")\n fw.close()\n\n\n\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.5159345269203186, "alphanum_fraction": 0.5199540853500366, "avg_line_length": 32.480770111083984, "blob_id": "2e02f4058fe54ce395a326489e73764d644298f9", "content_id": "d0a511aff2de056582f96648fa85fbbc6841b04d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3483, "license_type": "permissive", "max_line_length": 83, "num_lines": 104, "path": "/python/geneontology/goflat2grouptable.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\nfrom collections import defaultdict\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -g <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f GO flat file to import [tab delimited]\" )\n stdout( \" -g gene_id to group_id table [tab delimited]\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:g:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-g': args['group'] = value\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n if not args.has_key('group'):\n stderr( \"group file argument missing.\" )\n show_help()\n elif not file_exists( args.get('group') ):\n stderr( \"group file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\ndef get_gene2groups(file):\n hash = {}\n groups = {}\n fo = open(file)\n for line in fo:\n if line.startswith(\"#\"): continue\n if not len(line.split(\"\\t\")) == 2: continue\n geneid, group = line.rstrip().split(\"\\t\")\n if not hash.has_key(geneid): hash[geneid] = []\n hash[geneid].append(group)\n groups[group] = 1\n fo.close()\n return hash, groups.keys()\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n gene2groups, groups = get_gene2groups(args['group'])\n groups.sort()\n\n hash = {}\n fo = open( args.get('file') )\n for line in fo:\n line = line.strip()\n geneid, goterm = line.split(\"\\t\")\n if geneid.count(\" \") > 0: geneid = geneid[:geneid.index(\" \")]\n if not gene2groups.has_key(geneid): continue\n if not hash.has_key(goterm): hash[goterm] = defaultdict(int)\n for g in gene2groups[geneid]: hash[goterm][g] += 1\n fo.close()\n print string.join([\"GO.term\"] + groups, \"\\t\")\n for goterm, counthash in hash.iteritems():\n #print goterm\n #print hash[goterm]\n #for g in groups:\n # print groups, g, counthash[g]\n counts = [counthash[g] for g in groups]\n if sum(counts) < 5: continue\n counts = [str(c) for c in counts]\n print string.join([goterm] + counts, \"\\t\")\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5364431738853455, "alphanum_fraction": 0.5433030128479004, "avg_line_length": 30.187166213989258, "blob_id": "7a82ecb541ea0044b2417d2246e7ec2aec6380b5", "content_id": "57a1a147029f3d635fc37c86a8cf3785e21a2d42", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5831, "license_type": "permissive", "max_line_length": 133, "num_lines": 187, "path": "/python/geneontology/goterms-to-xdom.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t# low level handling, such as command line stuff\nimport string\t\t\t# string methods available\nimport re\t\t\t\t\t# regular expressions\nimport getopt\t\t\t# comand line argument handling\nimport anydbm\t\t\t\t\t# index databases (file hash)\nfrom low import *\t# custom functions, written by myself\nfrom Bio import SeqIO # biopython stuff, to parse fasta files for instance\n\nclass GOterm:\n\tdef __init__(self):\n\t\tself.goid = \"\"\n\t\tself.name = \"\"\n\t\tself.namespace = \"\"\n\t\tself.definition = \"\"\n\t\tself.synonym = \"\"\n\t\tself.is_a = []\n\t\tself.alt_id = []\n\t\t\n\tdef tostring(self):\n\t\tS = \"\"\n\t\tS += \"id: \" + self.goid\n\t\tS += \"\\tname: \" + self.name\n\t\tS += \"\\tnamespace: \" + self.namespace\n\t\tS += \"\\tdef: \" + self.definition\n\t\tS += \"\\tsynonym: \" + self.synonym\n\t\tS += \"\\talt_id: \" + string.join(self.alt_id,',')\n\t\tS += \"\\tis_a: \" + string.join(self.is_a,',')\n\t\treturn S\n\nclass OBOParser:\n\t\n\tdef __init__(self,file):\n\t\tself.file = file\n\t\tself.filehandle = open( file )\n\t\tself.alt_ids = {}\n\t\t\n\tdef next(self):\n\t\tngo = None\n\t\tfor line in self.filehandle:\n\t\t\tif line.startswith('[Term]'):\tngo = GOterm()\n\t\t\tif ngo == None: continue\n\t\t\tif line.startswith('id:'): ngo.goid = re.search( '^id:\\s+(.*)$', line ).group(1)\n\t\t\tif line.startswith('name:'): ngo.name = re.search( '^name:\\s+(.*)$', line ).group(1)\n\t\t\tif line.startswith('namespace:'): ngo.namespace = re.search( '^namespace:\\s+(.*)$', line ).group(1)\n\t\t\tif line.startswith('def:'): ngo.definition = re.search( '^def:\\s+\"(.*)\"', line ).group(1)\n\t\t\tif line.startswith('synonym:'): ngo.synonym = re.search( '^synonym:\\s+\"(.*)\"', line ).group(1)\n\t\t\tif line.startswith('alt_id:'): self.alt_ids[ re.search( '^alt_id:\\s+(.*)$', line ).group(1) ] = ngo.goid\n\t\t\tif line.startswith('is_a:'): ngo.is_a.append( re.search( '^is_a:\\s+(.*)$', line ).group(1) )\n\t\t\tif re.match( '$', line): break\n\t\treturn ngo\n\t\t\n\tdef get_alt_ids(self):\n\t\treturn self.alt_ids\n\t\n\tdef close(self):\n\t\tself.filehandle.close()\n\t\n\t\n\t\t\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <oath> -o <path> [ -e ]\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f parsed gene ontology blast file\" )\n\tstdout( \" -o OBO file\" )\n\tstdout( \" -e evalue threshold. default: 10.0\" )\n\tstdout( \" \" )\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:o:e:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f':\targs['file'] = value\n\t\tif key == '-o':\targs['OBO'] = value\n\t\tif key == '-e':\targs['evalue'] = float(value)\n\t\t\t\t\n\tif not args.has_key('file'):\n\t\tstderr( \"GO file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('file') ):\n\t\tstderr( \"GO file does not exist.\" )\n\t\tshow_help()\n\n\tif not args.has_key('OBO'):\n\t\tstderr( \"OBO file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('OBO') ):\n\t\tstderr( \"OBO file does not exist.\" )\n\t\tshow_help()\n\t\t\n\tif not args.has_key('evalue'):\n\t\targs['evalue'] = 10.0\n\t\t\n\treturn args\n\n\n# =============================================================================\ndef get_obo_hash( file ):\n\tobohash = {}\n\tOP = OBOParser( file )\n\tgotermcount = 0\n\twhile (1):\n\t\tgoterm = OP.next()\n\t\tif goterm == None: break\n\t\tobohash[ goterm.goid ] = goterm\n\t\tgotermcount += 1\n\t\tsys.stderr.write( \"\\r processing OBO file ... | goterms caught: %d\" %(gotermcount) )\n\t\t\t\n\talt_ids = OP.get_alt_ids()\n\tfor altid, goid in alt_ids.iteritems():\n\t\tobohash[ altid ] = obohash.get(goid)\n\t\n\treturn obohash\n\tOP.close()\n\t\n# =============================================================================\n# =============================================================================\ndef main( args ):\n\t\n\tobohash = get_obo_hash( args.get('OBO') )\n\t\n\toldquery = ''\n\tquerygoterms = {}\n\tregex = re.compile('GO:\\d+')\n\tentrycount = 0\n\tgotermcount = 0\n\tsout, serr = catch_bash_cmd_output(\"cat %s | wc -l\" %args.get('file'))\n\ttotalentries = int(sout)\n\t\n\tfo = open( args.get('file') )\n\tfor line in fo:\n\t\tline = line.replace('\\n','')\n\t\tcolumns = line.split('\\t')\n\t\tentrycount += 1\n\t\tsys.stderr.write( \"\\r entries processed: %01.2f%% | goterms caught: %d\" %( 100.0*entrycount/totalentries, gotermcount ))\n\t\t# not hit found: skip\n\t\tif columns[1] == 'no_hit_found': continue\n\t\t# else\n\t\tqid, hitid, evalue, descr = columns\n\t\tif oldquery != qid:\n\t\t\toldquery = qid\n\t\t\tquerygoterms.clear()\n\t\t\tprint \">%s\" %qid\n\t\tassoc_goterms = re.findall( regex, descr )\n\t\tfor e in assoc_goterms:\n\t\t\tif querygoterms.has_key(e): continue\n\t\t\tquerygoterms[e] = 1\n\t\t\tif float(evalue) < args.get('evalue'):\n\t\t\t\tList = [e]\n\t\t\t\tif obohash.has_key(e):\n\t\t\t\t\tList.append( obohash.get(e).namespace )\n\t\t\t\t\tList.append( obohash.get(e).name )\n\t\t\t\t\t#List.append( obohash.get(e).namespace )\n\t\t\t\telse:\n\t\t\t\t\tstderr( \"GO:id not found in the OBO hash: \\\"%s\\\"\" %e )\n\t\t\t\tList.append(evalue)\n\t\t\t\tList.append(hitid)\n\t\t\t\tprint string.join( List, '\\t' )\n\t\t\t\tgotermcount += 1\n\tfo.close()\n\tsys.stderr.write( \"\\r entries processed: %01.2f%% | goterms caught: %d\\n\" %( 100.0*entrycount/totalentries, gotermcount ))\n\t\n\t\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )" }, { "alpha_fraction": 0.5310199856758118, "alphanum_fraction": 0.5345250368118286, "avg_line_length": 28.112245559692383, "blob_id": "b6dcfb5597af347db64996069e622e6b3aa4acd8", "content_id": "889582e418a15454d533185e889b682887ebe2c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2853, "license_type": "permissive", "max_line_length": 82, "num_lines": 98, "path": "/python/blast/parse-best-blast-hit.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t# low level handling, such as command line stuff\nimport string\t\t\t# string methods available\nimport re\t\t\t\t\t# regular expressions\nimport getopt\t\t\t# comand line argument handling\nimport math\t\t\t\t# match functions\nfrom low import *\t# custom functions, written by myself\n\n\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -b <path> [-f <path>]\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f path to the fasta file containing the record ids.\" )\n\tstdout( \" -b path to the blast.best-hit file of swiss-prot\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hb:f:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\t\n\tblastbesthitfile, recordfile = '', ''\n\tfor key, value in keys:\n\t\tif key == '-b':\n\t\t\tif not file_exists( value ):\n\t\t\t\tstderr( \"invalid path in \" + key )\n\t\t\t\tshow_help()\n\t\t\telse:\n\t\t\t\tblastbesthitfile = value\n\t\t\n\t\tif key == '-f':\n\t\t\tif not file_exists( value ):\n\t\t\t\tstderr( \"invalid path in \" + key )\n\t\t\t\tshow_help()\n\t\t\telse:\n\t\t\t\trecordfile = value\n\t\t\n\tif blastbesthitfile == '':\n\t\tstderr( \"blast.best-hit file missing.\" )\n\t\tshow_help()\n\telif not file_exists( blastbesthitfile ):\n\t\tstderr( \"blast.best-hit file does not exist.\" )\n\t\tshow_help()\n\t\t\n\tif recordfile == '':\n\t\tstderr( \"recordfile missing.\" )\n\t\tshow_help()\n\telif not file_exists( recordfile ):\n\t\tstderr( \"recordfile does not exist.\" )\n\t\tshow_help()\n\t\t\n\treturn blastbesthitfile, recordfile\n\n\n# =============================================================================\ndef parse_best_blast_hits( blastbesthitfile, recordfile ):\n\t\"\"\" \"\"\"\n\t\n\trecords = []\n\tfo = open( recordfile, 'r' )\n\tfor line in fo:\n\t\trecords.append(line.strip().replace('\\n',''))\n\t\n\tfo = open( blastbesthitfile, 'r' )\n\tfor line in fo:\n\t\tcolumns = line.split()\n\t\tif columns[0] in records:\n\t\t\tprint columns[0]\n\t\t\tprint \" hit :\", string.join(columns[10:], ' ')[1:]\n\t\t\tprint \" evalue:\", columns[4], \"\\n\"\n\t\t\n\tfo.close()\n\t\n\t\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nblastbesthitfile, recordfile = handle_arguments()\nparse_best_blast_hits( blastbesthitfile, recordfile )\n" }, { "alpha_fraction": 0.5842911601066589, "alphanum_fraction": 0.6005747318267822, "avg_line_length": 20.306121826171875, "blob_id": "e468a282d9d51ebe912986c2a5d77fb219ad21fd", "content_id": "24bcdcd0db297c206e0b27aabc0d177277fbcaba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 1044, "license_type": "permissive", "max_line_length": 70, "num_lines": 49, "path": "/ruby/generic/wordwrap.rb", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\nrequire 'ftools'\n\ndef exit_with_usage\n STDOUT.print \"\"\" \n #{$0} <file> [<line length>]\n \n this script inserts newlines in front of all words that would exceed\n a given threshold for max line length.\n default max length: 80\n\n \"\"\"\n exit(1)\nend\n\nexit_with_usage unless ARGV.length > 0\nexit_with_usage unless File.exists? ARGV[0]\nMAXLENGTH = (ARGV[1] || 80).to_i\n\nSTDERR.puts \"INPUT FILE:\\t%s\" % ARGV[0]\nSTDERR.puts \"MAXLENGTH:\\t%s\" % MAXLENGTH\n\nf = File.open(ARGV[0])\nwhile line = f.gets\n if line.length < 80\n STDOUT.print line\n else\n words = line.chomp.split\n first = line[0..0]\n pos = 0\n newline = Array.new\n words.each do |word|\n newline << word\n if newline.join(\" \").length > MAXLENGTH\n newline[-1] = \"\\n\"\n STDOUT.print newline.join(\" \")\n if first == \"#\" or first == \"%\"\n newline = [first, word]\n else\n newline = [word]\n end\n end\n end\n STDOUT.puts newline.join(\" \").chomp\n end\nend\nf.close\n\nSTDERR.puts \"STATUS: \\tdone.\\n\"\n" }, { "alpha_fraction": 0.5302481055259705, "alphanum_fraction": 0.5475327372550964, "avg_line_length": 42.20481872558594, "blob_id": "6198d8bfdd62ee8887afa06a11854552734d4bb5", "content_id": "858e47580e00e843a7b8a76dccc73e1143691ff7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3587, "license_type": "permissive", "max_line_length": 203, "num_lines": 83, "path": "/python/misa/misa-global-stats.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom collections import defaultdict\nfrom misa import MisaSSRspecies\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f all.misa out file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n \n if not args.has_key('file'):\n stderr( \"fasta file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"fasta file does not exist.\" )\n show_help()\n \n return args\n\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n specieshash = {}\n fo = open(args['file'])\n for line in fo:\n m = MisaSSRspecies(line)\n if not specieshash.has_key(m.species): specieshash[m.species] = {'gc':defaultdict(int), 'p1':0, 'p2':0, 'p3':0, 'p4':0, 'p5':0, 'p6':0, 'p1l':0, 'p2l':0, 'p3l':0, 'p4l':0, 'p5l':0, 'p6l':0, 'ssrl':0}\n # gc\n for char in ['A', 'T', 'G', 'C']:\n specieshash[m.species]['gc'][char] += m.motif.count(char) * m.repeats\n # count repeats and coverage\n specieshash[m.species][m.type] += 1\n specieshash[m.species][m.type + 'l'] += m.length\n specieshash[m.species]['ssrl'] += m.length\n\n speciesarray = specieshash.keys()\n speciesarray.sort()\n print \"#species\\tssr.gc\\tp1\\tp2\\tp3\\tp4\\tp5\\tp6\\tp1l\\tp2l\\tp3l\\tp4l\\tp5l\\tp6l\\tp1a\\tp2a\\tp3a\\tp4a\\tp5a\\tp6a\\tssrl\"\n for species in speciesarray:\n total = sum(specieshash[species]['gc'].values())\n gc = 1.0 * (specieshash[species]['gc']['G'] + specieshash[species]['gc']['C']) / total\n repeats = [specieshash[species]['p1'], specieshash[species]['p2'], specieshash[species]['p3'], specieshash[species]['p4'], specieshash[species]['p5'], specieshash[species]['p6']]\n repeats = [str(r) for r in repeats]\n coverage = [specieshash[species]['p1l'], specieshash[species]['p2l'], specieshash[species]['p3l'], specieshash[species]['p4l'], specieshash[species]['p5l'], specieshash[species]['p6l']]\n coverage = [str(c) for c in coverage]\n avglength = []\n for i in range(len(repeats)): avglength.append(str(float(coverage[i]) / float(repeats[i])))\n print species + \"\\t\" + str(gc) + \"\\t\" + string.join(repeats, \"\\t\") + \"\\t\" + string.join(coverage, \"\\t\") + \"\\t\" + string.join(avglength, \"\\t\") + \"\\t\" + str(specieshash[species]['ssrl'])\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.4750795364379883, "alphanum_fraction": 0.4885118305683136, "avg_line_length": 30.775279998779297, "blob_id": "2918ae17ab42893334e33726ea6e4b8af546fb88", "content_id": "8b31a532f2bda24e7da9541d0cf2d965faf586cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2829, "license_type": "permissive", "max_line_length": 88, "num_lines": 89, "path": "/python/phylip/create-distance-matrix.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom goterm import GOTerm\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -i -n\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f input file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n \n if not args.has_key('file'):\n stderr( \"input file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"input file does not exist.\" )\n show_help()\n \n return args\n\n \n# =============================================================================\ndef read_input(file):\n hash = {}\n speciesarray = []\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n pair, rate = line.split(\"\\t\")\n rate = str(round(1-float(rate),4))\n while len(rate) < 6: rate += \"0\"\n hash[pair] = rate\n speciesarray.extend(pair.split(\",\"))\n fo.close()\n speciesarray = list(set(speciesarray))\n speciesarray.sort()\n return speciesarray, hash\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n speciesarray, hash = read_input(args['file'])\n print \"\\t\" + str(len(speciesarray)+1)\n print \"outgroup \" + \"0.0000\" + \"\\t\" + string.join([\"1.0000\"]*len(speciesarray), \"\\t\")\n for sp1 in speciesarray:\n line = sp1\n while len(line) < 10: line += \" \"\n line += \"1.0000\"\n for sp2 in speciesarray:\n key = [sp1,sp2]\n key.sort()\n key = string.join(key, \",\")\n if sp1 == sp2: line += \"\\t\" + \"0.0000\"\n else: line += \"\\t\" + hash[key]\n print line\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5160374641418457, "alphanum_fraction": 0.5248367786407471, "avg_line_length": 30.45535659790039, "blob_id": "2743a8208a00edccec807e76fce51c7d67bc8a08", "content_id": "c0ddb121fa0aadf5b19ef7b12e7350b03169a44a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3523, "license_type": "permissive", "max_line_length": 105, "num_lines": 112, "path": "/python/geneontology/go2slim.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys\nimport string\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom tempfile import mkstemp\nfrom collections import defaultdict\n\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -a <path> -t <path> -m <N> -n <namespaces>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -a annotation file in blast2go annot format (geneid <tab> goid)\" )\n stdout( \" -g gene ontology obo file\" )\n stdout( \" -s go slim obo\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"ha:g:s:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n map2slim = os.system(\"which map2slim > /dev/null\")\n if not map2slim == 0: sys.exit(\"map2slim program not installed or in path. try 'sudo cpan GO::Parser'\")\n\n args = {}\n for key, value in keys:\n if key == '-a': args['annot'] = value\n if key == '-g': args['goobo'] = value\n if key == '-s': args['slimobo'] = value\n \n if not args.has_key('annot'):\n stderr( \"annot file argument missing.\" )\n show_help()\n elif not file_exists( args.get('annot') ):\n stderr( \"annot file does not exist.\" )\n show_help()\n \n if not args.has_key('goobo'):\n stderr( \"go obo file argument missing.\" )\n show_help()\n elif not file_exists( args.get('goobo') ):\n stderr( \"go obo file does not exist.\" )\n show_help()\n\n if not args.has_key('slimobo'):\n stderr( \"goslim obo file argument missing.\" )\n show_help()\n elif not file_exists( args.get('slimobo') ):\n stderr( \"goslim obo file does not exist.\" )\n show_help()\n\n return args\n\n# =============================================================================\ndef annot2fb(annot):\n gohash = {}\n fo = open(annot)\n fb = mkstemp('.fb', 'go2slim', '/tmp')[1]\n fw = open(fb, 'w')\n for line in fo:\n col = line.rstrip().split(\"\\t\")\n goid = col[1]\n if gohash.has_key(goid): continue\n gohash[goid] = 1\n fw.write(string.join([goid]*5 + [\"\"]*12, \"\\t\") + \"\\n\")\n fo.close()\n fw.close()\n return fb\n\n# =============================================================================\ndef annot2slim(annot, result):\n go2slim = defaultdict(list)\n fo = open(result)\n for line in fo:\n col = line.rstrip().split(\"\\t\")\n goid, slimid = col[0], col[4]\n go2slim[goid].append(slimid)\n fo.close()\n fo = open(annot)\n for line in fo:\n col = line.rstrip().split(\"\\t\")\n for slimid in go2slim[col[1]]:\n col[1] = slimid\n print string.join(col, \"\\t\")\n fo.close()\n\n# =============================================================================\ndef main(args):\n fb = annot2fb(args['annot'])\n result = mkstemp('.out', 'go2slim', '/tmp')[1]\n os.system(\"map2slim %s %s %s > %s\" %(args['slimobo'], args['goobo'], fb, result))\n annot2slim(args['annot'], result)\n \n \n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.5156591534614563, "alphanum_fraction": 0.5185724496841431, "avg_line_length": 31.122806549072266, "blob_id": "69e74bb8ecb218cf4865d432f63cd5e40e4fdbe6", "content_id": "6556277e4bc1befc20742d890eb99707bd7d75e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5492, "license_type": "permissive", "max_line_length": 84, "num_lines": 171, "path": "/python/fasta/create_fasta_clusters.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\t\t\t\t\t# index databases (file hash)\nfrom Bio import SeqIO # biopython stuff, to parse fasta files for instance\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> -s <path> -d <path> -o <path> [-i]\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f in cluster file\" )\n\tstdout( \" -s swiss-prot database folder\" )\n\tstdout( \" -d genome datasets folder\" )\t\n\tstdout( \" -i reindex databases\" )\n\tstdout( \" -o out folder\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:o:d:s:i\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f': args['in'] = value\n\t\tif key == '-o':\targs['out'] = value\n\t\tif key == '-d':\targs['datasets'] = value\n\t\tif key == '-s':\targs['swissprot'] = value\n\t\tif key == '-i':\targs['indexdb'] = 1\n\t\t\t\t\n\tif not args.has_key('in'):\n\t\tstderr( \"in file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('in') ):\n\t\tstderr( \"in file does not exist.\" )\n\t\tshow_help()\n\t\t\n\tif not args.has_key('datasets'):\n\t\tstderr( \"datasets folder missing.\" )\n\t\tshow_help()\n\tif not dir_exists( args.get('datasets') ):\n\t\tstderr( \"datasets folder does not exist.\" )\n\t\tshow_help()\n\t\t\n\tif not args.has_key('swissprot'):\n\t\tstderr( \"swissprot folder missing.\" )\n\t\tshow_help()\n\tif not dir_exists( args.get('swissprot') ):\n\t\tstderr( \"swissprot folder does not exist.\" )\n\t\tshow_help()\n\t\t\n\t\t\n\tif not args.has_key('out'):\n\t\tstderr( \"out folder missing.\" )\n\t\tshow_help()\n\t\n\tif not dir_exists( args.get('out') ):\n\t\tos.mkdir( args.get('out') )\n\t\n\tif not args['out'].endswith('/'): args['out'] += '/'\n\tif not args['swissprot'].endswith('/'): args['swissprot'] += '/'\n\tif not args['datasets'].endswith('/'): args['datasets'] += '/'\n\t\n\treturn args\n\n# =============================================================================\ndef get_cluster_ids( file ):\n\thash = {}\n\tcount = 0\n\tfo = open( file, 'r' )\n\tfor line in fo:\n\t\tcount += 1\n\t\thash[count] = line.split()\n\tfo.close()\n\treturn hash\n\n\n# =============================================================================\ndef get_fasta_( id, args, genomes ):\n\tif id[:2] in genomes:\n\t\tdb = args.get('datasets') + id[:2] + '.aa'\n\telse: db = args.get('swissprot') + 'uniprot_sprot.fasta'\n\tprint \"lookup | db:\", db, \"\\t->\", id\n\t\n\tif not file_exists( db + '.xpi' ):\n\t\tos.system( \"xdformat -p -I %s &> /dev/null\" % db )\n\t\t\n\tout = os.popen( \"xdget -p %s %s\" %( db, id ) )\n\tseq = out.read()\n\tout.close()\n\treturn seq\n\t\n\n# =============================================================================\ndef index_databases( dbs, args ):\n\text = os.path.splitext(dbs[0])[1]\n\tDBM_name = '/data/l_wiss01/database/all-fasta-records.dbm' + ext\n\tif file_exists(DBM_name) and not args.has_key('indexdb'):\n\t\treturn DBM_name\n\tprint \"creating DBM:\", DBM_name\n\tDBM = anydbm.open( DBM_name, 'c' )\n\tfor db in dbs:\n\t\tprint \"-> adding db:\", db\n\t\thandle = open(db)\n\t\tfor seq_record in SeqIO.parse(handle, \"fasta\"):\n\t\t\tDBM[ seq_record.id ] = seq_record.seq.tostring()\n\t\thandle.close()\n\tDBM.close()\n\tprint \"DONE. indexed database:\", DBM_name\n\treturn DBM_name\n\t\n# =============================================================================\n# =============================================================================\ndef main( args ):\n\tgenomes = ['PO', 'ZO', 'At', 'Os', 'Mt', 'Pt', 'Lj' ]\n\t# database names\n\taa_dbs = []\n\tfor g in genomes:\n\t\taa_dbs.append( args.get('datasets') + g + '.aa' )\n\tnt_dbs = []\n\tfor g in genomes:\n\t\tnt_dbs.append( args.get('datasets') + g + '.nt' )\n\t#dbs.append( args.get('swissprot') + + 'uniprot_sprot.fasta' )\n\t\n\t# index databases\n\taa_dbmname = index_databases( aa_dbs, args )\n\tnt_dbmname = index_databases( nt_dbs, args )\n\t\n\tclusterhash = get_cluster_ids( args.get('in') )\n\t\n\taa_db = anydbm.open(aa_dbmname, \"r\")\n\tnt_db = anydbm.open(nt_dbmname, \"r\")\n\t\n\tfor i, idlist in clusterhash.iteritems():\n\t\tfwaa = open( args.get('out') + 'cluster' + add_leading_zeroes(i,3) + '.aa', 'w' )\n\t\tfwnt = open( args.get('out') + 'cluster' + add_leading_zeroes(i,3) + '.nt', 'w' )\n\t\tfor id in idlist[1:]:\n\t\t\tif not aa_db.has_key(id) or not nt_db.has_key(id):\n\t\t\t\tstderr( \"cluster %s | id %s not in both datasets | skipped.\" %(i, id) )\n\t\t\t\tcontinue\n\t\t\tfwaa.write( \">\" + id + \"\\n\" + aa_db[ id ] + \"\\n\" )\n\t\t\tfwnt.write( \">\" + id + \"\\n\" + nt_db[ id ] + \"\\n\" )\n\t\tfwaa.flush()\n\t\tfwaa.close()\n\t\tfwnt.flush()\n\t\tfwnt.close()\n\t\t\n\t\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )" }, { "alpha_fraction": 0.6145833134651184, "alphanum_fraction": 0.6354166865348816, "avg_line_length": 15, "blob_id": "c6b362d26f6dc9940873d06d5ce9574a3f09d8df", "content_id": "11158415a6e6285b2b2f17e737f9439f2b4f9b24", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "permissive", "max_line_length": 27, "num_lines": 18, "path": "/python/generic/difference.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sets\nimport sys, os\n\ndef get_lines( file ):\n lines = []\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n lines.append(line)\n\n return sets.Set(lines)\n\nl1 = get_lines(sys.argv[1])\nl2 = get_lines(sys.argv[2])\nfor e in l1.difference(l2):\n print e\n" }, { "alpha_fraction": 0.6522781848907471, "alphanum_fraction": 0.6587872505187988, "avg_line_length": 43.227272033691406, "blob_id": "336189eb10d4ec6dec4195ec6eb32de77ee441ae", "content_id": "c9b4c775a6dba2e9d88ec72a76bad0064db0e350", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2919, "license_type": "permissive", "max_line_length": 224, "num_lines": 66, "path": "/python/geneontology/go-enrichment.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys\nimport rpy\n\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" universe-topGO.table testset.ids\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 3: usage()\n inUniverse, inTestset = sys.argv[1:3]\n return inUniverse, inTestset\n\n\ndef init_R():\n R = rpy.r\n try:\n R.library('topGO')\n except:\n try: \n R.source(\"http://bioconductor.org/biocLite.R\")\n R.biocLite('topGO')\n R.library('topGO')\n except:\n print \"Problem importing R libraries.\"\n sys.exit()\n\n R('if(!isGeneric(\"GOFisherTestUnder\")) setGeneric(\"GOFisherTestUnder\", function(object) standardGeneric(\"GOFisherTestUnder\"))')\n R('setMethod(\"GOFisherTestUnder\", \"classicCount\", function(object) { contMat <- contTable(object); if(all(contMat == 0)) p.value <- 1 else p.value <- fisher.test(contMat, alternative = \"less\")$p.value; return(p.value) })')\n return R\n\n\ndef main():\n inUniverse, inTestset = plausi()\n R = init_R()\n R('GOmap = readMappings(file = \"' + inUniverse + '\")')\n R('refset = names(GOmap)')\n R('testset = scan(file=\"' + inTestset + '\", what=character())')\n R('genes_of_interest = factor(as.integer(refset %in% testset))')\n R('names(genes_of_interest) <- refset')\n for ontology in [\"MF\", \"BP\", \"CC\"]:\n R('tgData = new(\"topGOdata\", ontology = \"' + ontology + '\", allGenes = genes_of_interest, annot = annFUN.gene2GO, gene2GO = GOmap)')\n R('fisherRes = runTest(tgData, algorithm=\"classic\", statistic=\"fisher\")')\n R('fisherResCor = p.adjust(score(fisherRes), method=\"fdr\")')\n R('weightRes = runTest(tgData, algorithm=\"weight01\", statistic=\"fisher\")')\n R('weightResCor = p.adjust(score(weightRes), method=\"fdr\")')\n R('allRes = GenTable(tgData, classic=fisherRes, weight=weightRes, orderBy=\"weight\", ranksOf=\"classic\", topNodes=150)')\n R('allRes$fisher.FDR = fisherResCor[allRes$GO.ID]')\n R('allRes$weight.FDR = weightResCor[allRes$GO.ID]')\n R('write.csv(allRes, \"topGO.over.Sig.' + ontology + '.csv\")')\n\n R('tgData = new(\"topGOdata\", ontology = \"' + ontology + '\", allGenes = genes_of_interest, annot = annFUN.gene2GO, gene2GO = GOmap)')\n R('test.stat <- new(\"classicCount\", testStatistic = GOFisherTestUnder, name =\"Fisher test underrepresentation\")')\n R('fisherRes <- getSigGroups(tgData, test.stat)')\n R('fisherResCor = p.adjust(score(fisherRes), method=\"fdr\")')\n R('test.stat <- new(\"weightCount\", testStatistic = GOFisherTestUnder, name =\"Fisher test underrepresentation\")')\n R('weightRes <- getSigGroups(tgData, test.stat)')\n R('weightResCor = p.adjust(score(weightRes), method=\"fdr\")')\n R('allRes = GenTable(tgData, classic=fisherRes, weight=weightRes, orderBy=\"weight\", ranksOf=\"classic\", topNodes=150)')\n R('allRes$fisher.FDR = fisherResCor[allRes$GO.ID]')\n R('allRes$weight.FDR = weightResCor[allRes$GO.ID]')\n R('write.csv(allRes, \"topGO.under.Sig.' + ontology + '.csv\")')\n \nmain()\n" }, { "alpha_fraction": 0.4650489091873169, "alphanum_fraction": 0.4679464101791382, "avg_line_length": 31.48235321044922, "blob_id": "5688dec614dc34073112de1db26ede6a483589e5", "content_id": "2cfd40acde1b0f1615bf86725e8dfc4e70242f90", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2761, "license_type": "permissive", "max_line_length": 82, "num_lines": 85, "path": "/python/kegg/kegg-enzyme2ko.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\t\t\t\t\t# index databases (file hash)\nfrom Bio import SeqIO # biopython stuff, to parse fasta files for instance\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f kegg ko file file\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f':\targs['file'] = value\n\t\t\t\t\n\tif not args.has_key('file'):\n\t\tstderr( \"kegg file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('file') ):\n\t\tstderr( \"kegg file does not exist.\" )\n\t\tshow_help()\n\t\t\n\treturn args\n\n\n# =============================================================================\ndef strip_tags(value):\n \"Return the given HTML with all tags (+ KEGG tags) stripped.\"\n value = re.sub(r'<[^>]*?>', '', value)\n value = re.sub(r'\\[.*\\]', '', value)\n return value\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n fo = open( args.get('file'), 'r' )\n ko_regex = re.compile( \"^ENTRY\\s+(K\\S+)\" )\n enzyme_regex = re.compile( \"\\s+EC:\\s+([0-9.]+)\" )\n\n ko, enzyme = \"\", \"\"\n for line in fo:\n line = line.rstrip()\n if line.startswith(\"///\"): \n ko, enzyme = \"\", \"\"\n continue\n if ko == \"\":\n if re.search( ko_regex, line): ko = re.search( ko_regex, line ).group(1)\n else:\n if re.search( enzyme_regex, line):\n enzyme = re.search( enzyme_regex, line ).group(1)\n print \"%s\\t%s\" % ( ko, enzyme )\n\n fo.close()\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.4780130386352539, "alphanum_fraction": 0.48534202575683594, "avg_line_length": 35.10293960571289, "blob_id": "c4c15b8e13aaf91be9b05dc38d21e1b408b2590d", "content_id": "fab0925e0213008cd0d7dfce0f09bdc4f0ed4ac1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2456, "license_type": "permissive", "max_line_length": 83, "num_lines": 68, "path": "/python/blast/blast-best-hit-per-query.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nimport blastout\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f blastout file (-m 8)\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['blastoutfile'] = value\n \n for key in ['blastoutfile']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n return args\n\n# =============================================================================\ndef statusbar(current, total, message=\"\", width=40):\n progress = 1.0*current/total\n if message != \"\": message = \"[\" + message + \"]\"\n progressbar = \"=\" * int(progress*width)\n while len(progressbar) < width: progressbar += \" \" \n sys.stderr.write(\"\\r 0% \" + progressbar + \" 100% \" + message)\n if progress == 1.0: sys.stderr.write(\"\\n\")\n \n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n q2hits = blastout.get_query_hash(args['blastoutfile'])\n for qid, blasthits in q2hits.iteritems():\n print blasthits[0].to_s()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.53481125831604, "alphanum_fraction": 0.5428728461265564, "avg_line_length": 37.4295768737793, "blob_id": "9ceb1cc8718d95bc103e7d404fde98e786aef913", "content_id": "dca539918ef71ebbdf8b63db639a4ab0e47a312e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5458, "license_type": "permissive", "max_line_length": 182, "num_lines": 142, "path": "/python/gff/intra-and-intergenic-orthologous-regions.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom collections import defaultdict\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" -f <gff-file> -o <orth-file>\"\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f sorted all.parsed.gff file (species, chr, startpos)!!!\" )\n stdout( \" -o clustered flybase gene orthologs file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:o:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['gff'] = value\n if key == '-o': args['orth'] = value\n \n if not args.has_key('gff'):\n print >> sys.stderr, \"gff file argument missing.\"\n show_help()\n elif not file_exists( args.get('gff') ):\n print >> sys.stderr, \"gff file does not exist.\"\n show_help()\n\n if not args.has_key('orth'):\n print >> sys.stderr, \"orth file argument missing.\"\n show_help()\n elif not file_exists( args.get('orth') ):\n print >> sys.stderr, \"orth file does not exist.\"\n show_help()\n\n return args\n\n# =============================================================================\nclass Gene:\n def __init__(self, line):\n cols = line.rstrip().split(\"\\t\")\n self.species, self.name, self.chr, self.start, self.stop, self.strand = cols[0:6]\n self.start, self.stop = int(self.start), int(self.stop)\n self.prev, self.next = 0, 0\n self.orthologs = []\n\n def __cmp__(self, other):\n return cmp(self.start, other.start)\n\n def is_orthologous_to(self, other):\n if self in other.orthologs and other in self.orthologs: return 1\n return 0\n\n\n# =============================================================================\ndef parse_gene_order(file):\n name2gene = {}\n fo = open(file)\n prevgene, prevspecies, prevchr = 0,0,0\n for line in fo:\n g = Gene(line)\n name2gene[g.name] = g\n if g.species == prevspecies and g.chr == prevchr:\n g.prev = prevgene\n prevgene.next = g\n prevgene, prevspecies, prevchr = g, g.species, g.chr\n fo.close()\n return name2gene\n\n# =============================================================================\ndef get_orthologs(file, name2gene):\n fo = open(file)\n for line in fo: \n if line.startswith(\"#\"): continue\n if len(line.rstrip()) == 0: continue\n columns = line.rstrip().split(\"\\t\")\n genenames = [e[:e.index(\"(\")] for e in columns]\n for gn in genenames:\n name2gene[gn].orthologs = [name2gene[x] for x in genenames]\n name2gene[gn].orthologs.remove(name2gene[gn])\n fo.close()\n return name2gene\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n def process_gff_line(line, species):\n if line.startswith(\"#\") or len(line.rstrip()) == 0: return\n columns = line.rstrip().split(\"\\t\")\n if len(columns) != 9: return\n type = columns[2]\n if type != \"gene\": return\n chr, start, stop, strand, descr = columns[0], columns[3], columns[4], columns[6], columns[8]\n id = re.search(\"ID=([^;]+);\", descr).group(1)\n sys.stdout.write(species + \"\\t\" + id + \"\\t\")\n print string.join([chr, start, stop, strand], \"\\t\")\n\n# =============================================================================\n \n name2gene = parse_gene_order(args['gff'])\n name2gene = get_orthologs(args['orth'], name2gene)\n caught = {}\n for qname, qgene in name2gene.iteritems():\n # continue if already caught, or no neighbor\n if caught.has_key(qname): continue\n if not qgene.next: continue\n ngene = qgene.next\n # now check all direct orthologs of the query gene and see whether their neighbor is orthologous to the query's neighbor\n for ogene in qgene.orthologs:\n if not ogene.next: continue\n ongene = ogene.next\n if ngene.is_orthologous_to(ongene):\n print string.join([qgene.species, ogene.species, \"intergenic\", qgene.chr, str(qgene.stop +1), str(ngene.start -1), ogene.chr, str(ogene.stop +1), str(ongene.start -1)], \"\\t\")\n print string.join([qgene.species, ogene.species, \"gene\", qgene.chr, str(qgene.start), str(qgene.stop), ogene.chr, str(ogene.start), str(ogene.stop)], \"\\t\")\n print string.join([ngene.species, ongene.species, \"gene\", ngene.chr, str(ngene.start), str(ngene.stop), ongene.chr, str(ongene.start), str(ongene.stop)], \"\\t\")\n caught[qgene.name] = 1\n caught[ngene.name] = 1\n caught[ogene.name] = 1\n caught[ongene.name] = 1\n \n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5656824111938477, "alphanum_fraction": 0.5796326398849487, "avg_line_length": 35.760684967041016, "blob_id": "db0eca60b6a328298f520299fa25f9162806db2e", "content_id": "8a6b911c53d2a1ec52a820febdf1248dadf168c4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4301, "license_type": "permissive", "max_line_length": 146, "num_lines": 117, "path": "/python/base/newick.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import os, re, string\nimport needlemanwunsch\n\n# =============================================================================\ndef get_motif_array(ssrarray):\n if len(ssrarray) == 0: return ssrarray\n if type(ssrarray[0]) is unicode: return ssrarray\n return [e.std_motif + '/' + e.gene_feature for e in ssrarray]\n\n# =============================================================================\ndef get_consensus_from_aln(alns, removegaps=True):\n cons = []\n a1, a2 = alns[0], alns[1]\n for i in range(len(a1)):\n if a1[i] == '-' or a2[i] == '-':\n if not removegaps: cons.append(\"-\")\n else: cons.append(a1[i])\n return cons\n\n# =============================================================================\nclass Node():\n def __init__(self, name=\"\", dist=0, parent=0):\n self.name = name\n self.distance_to_parent = dist\n if parent: self.set_parent(parent)\n else: self.parent = parent\n self.properties = {}\n self.children = []\n #print \"init Node. name:\", name, \"dist\", dist, \"parent:\", parent\n\n def summed_distance_to(self, other):\n if self == other: return 0\n self_nodes = [self]\n while 1:\n if self_nodes[-1].parent == 0: break\n self_nodes.append(self_nodes[-1].parent)\n other_nodes = [other]\n while 1:\n if other_nodes[-1].parent == 0: break\n other_nodes.append(other_nodes[-1].parent)\n distance = 0\n for node in self_nodes:\n if node in other_nodes: break\n distance += 2* int(node.distance_to_parent)\n return distance\n\n def set_parent(self, node):\n self.parent = node\n node.properties = {}\n if not self in node.children: node.children.append(self)\n if len(node.children) == 2: \n node.name = '(' + string.join([c.name for c in node.children], \",\") + ')'\n node.children = sorted(node.children, key=lambda e: e.name)\n\n def ssrs(self):\n if not self.properties.has_key('ssrs') or (len(self.properties['ssrs']) == 0 and len(self.children) == 2):\n score, pointers, aln = needlemanwunsch.align(get_motif_array(self.children[0].ssrs()), get_motif_array(self.children[1].ssrs()), -1, 2, -10)\n #print \"D1\", get_motif_array(self.children[0].ssrs())\n #print \"D2\", get_motif_array(self.children[1].ssrs())\n #print \"DC\", aln\n self.properties['ssrs'] = get_consensus_from_aln(aln)\n #print \"+ 2 +\", len(self.properties['ssrs'])\n return self.properties['ssrs']\n\n\n# =============================================================================\nclass Tree():\n def __init__(self, file):\n self.name = os.path.split(file)[1]\n self.leaves = {}\n self.temp_ancestral_nodes = []\n self.ancestral_nodes = []\n self.build_from_file(file)\n\n def add_leave_node(self, node, leafname):\n self.leaves[leafname] = node\n\n def add_ancestral_node(self, node):\n self.temp_ancestral_nodes.append(node)\n\n def get_last_ancestral_node(self):\n return self.temp_ancestral_nodes[-1] \n\n def remove_last_ancestral_node(self):\n self.ancestral_nodes.append(self.temp_ancestral_nodes.pop(-1))\n if len(self.temp_ancestral_nodes) > 0: self.ancestral_nodes[-1].set_parent(self.get_last_ancestral_node())\n\n def get_root_node(self):\n parent = self.leaves.values()[0].parent\n while parent.parent: parent = parent.parent\n return parent\n\n def get_ancestral_node_of(self, node1, node2):\n lineage1 = [node1]\n while lineage1[-1].parent: lineage1.append(lineage1[-1].parent)\n anode = node2\n while 1:\n if not anode in lineage1: anode = anode.parent\n else: break\n return anode\n\n def build_from_file(self, file):\n tree = open(file).readline().strip()\n while not tree.startswith(\";\"):\n if tree.startswith(\"(\"): \n self.add_ancestral_node(Node())\n tree = tree[1:]\n elif tree.startswith(\"):\"):\n self.get_last_ancestral_node().distance_to_parent = re.match(\"\\):(\\d+)\", tree).group(1)\n tree = tree[2+len(str(self.get_last_ancestral_node().distance_to_parent)):]\n self.remove_last_ancestral_node()\n elif tree.startswith(\",\"): \n tree = tree[1:]\n elif re.match(\"([A-Za-z]+):(\\d+)\", tree):\n name, length = re.match(\"([A-Za-z]+):(\\d+)\", tree).groups()\n self.add_leave_node(Node(name, length, self.get_last_ancestral_node()), name)\n tree = tree[1+len(name)+len(str(length)):]\n" }, { "alpha_fraction": 0.6312848925590515, "alphanum_fraction": 0.6368715167045593, "avg_line_length": 26.305084228515625, "blob_id": "c2359e45cf7da4ef0ed8fed1b7e26f308c75cc46", "content_id": "d42d0c1b7655655706932b244980e5ee0aa57a8c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1611, "license_type": "permissive", "max_line_length": 101, "num_lines": 59, "path": "/python/orthomcl/table-of-gene-id-per-cluster.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, string\nfrom low import *\n\n\ndef usage():\n print >> sys.stderr, \"usage: \", sys.argv[0], \" [<noparalogs.orthomcl.out>]\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) < 2: usage()\n inOrtho = sys.argv[1]\n if not os.path.exists(inOrtho) or not os.path.isfile(inOrtho) or not os.path.getsize(inOrtho) > 0: \n print >> sys.stderr, \"specified orthomcl file does not exist, is not a file, or is empty\\n\"\n usage()\n return inOrtho\n\n\nclass OrthoCluster():\n def __init__(self, line):\n descr, genedefs = line.split(\"\\t\")\n genedefs = genedefs.split()\n self.name = descr[:descr.index('(')].lower()\n self.geneHash = {}\n self.speciesHash = {}\n for genedef in genedefs:\n geneid = genedef[:genedef.index('(')]\n species = genedef[genedef.index('(')+1:-1].lower()\n self.geneHash[geneid] = species\n if self.speciesHash.has_key(species): self.speciesHash[species].append(geneid)\n else: self.speciesHash[species] = [geneid]\n\n def get_name(self): return self.name\n def get_count(self): return len(self.geneHash)\n def get_gene_hash(self): return self.geneHash\n def get_species_hash(self): return self.speciesHash\n \n\n\ndef main():\n inOrtho = plausi()\n fo = open(inOrtho)\n speciesCols = 0\n for line in fo:\n o = OrthoCluster(line.rstrip())\n SH = o.get_species_hash()\n if not speciesCols:\n speciesCols = SH.keys()\n speciesCols.sort()\n print \"OrthoMCL.ID\" + \"\\t\" + string.join(speciesCols, \"\\t\")\n\n name = o.get_name()\n print name + \"\\t\" + string.join( [SH[x][0] for x in speciesCols], \"\\t\")\n\n fo.close()\n\n\nmain()\n" }, { "alpha_fraction": 0.5095042586326599, "alphanum_fraction": 0.5139768719673157, "avg_line_length": 34.28947448730469, "blob_id": "27900494f5731bacb0d9f69da7148b0042cbf3a7", "content_id": "b8b25f6d5a6a718c28618ab0bff071e2343eedbb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2683, "license_type": "permissive", "max_line_length": 138, "num_lines": 76, "path": "/python/misa/split-compound-ssrs.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSR\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f misa outptu file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n \n if not args.has_key('file'):\n stderr( \"misa file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"misa file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n fo = open(args['file'])\n for line in fo:\n if line.startswith(\"ID\\t\"): continue\n m = MisaSSR(line)\n if m.type != \"c\" and m.type != \"c*\": print m.to_s()\n else:\n startpos = m.startpos\n separatepatterns = re.findall(\"\\([ATGC]+\\)\\d+[*]{0,1}\",m.pattern)\n for separatepattern in separatepatterns:\n motif = separatepattern[1:separatepattern.index(\")\")]\n if separatepattern.endswith(\"*\"): repeats = int(separatepattern[separatepattern.index(\")\")+1:-1])\n else: repeats = int(separatepattern[separatepattern.index(\")\")+1:])\n length = len(motif)*repeats\n endpos = startpos + length -1\n print string.join([m.geneid, str(m.ssrnr), \"p\" + str(len(motif)), separatepattern, str(length), str(startpos), str(endpos)], \"\\t\")\n startpos = endpos+1\n\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5511295795440674, "alphanum_fraction": 0.57015460729599, "avg_line_length": 24.876922607421875, "blob_id": "35267cc4714488c509c1f9278763f93632212a09", "content_id": "2dde51d207067c82971c7776f00b774b886babf2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1682, "license_type": "permissive", "max_line_length": 79, "num_lines": 65, "path": "/python/generic/z-score-stats.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, string\nfrom low import *\nfrom collections import defaultdict\nimport rpy2.robjects as robjects\nR = robjects.r\n\n\n# =============================================================================\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" paralog-count.tab\" \n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 2: usage()\n inCounts = sys.argv[1]\n return inCounts\n\n\ndef R_mean_and_sd(pylist):\n rcountsvec = robjects.IntVector(pylist)\n mean = R['mean'](rcountsvec)[0]\n sd = R['sd'](rcountsvec)[0]\n return mean, sd\n\n\ndef Zscore(x, mean, sd):\n if sd == 0: return 0\n return (1.0*x - mean)/sd\n\ndef main():\n inCounts = plausi()\n fo = open(inCounts)\n lines = fo.readlines()\n fo.close()\n header = lines.pop(0).rstrip().split(\"\\t\")\n speciesArray = header[1:]\n results = defaultdict(lambda: defaultdict(int))\n for line in lines:\n line = line.rstrip()\n columns = line.split(\"\\t\")\n cluster = columns[0]\n genecounts = columns[1:]\n mean, sd = R_mean_and_sd(genecounts)\n for i in range(len(genecounts)):\n gc, species = int(genecounts[i]), speciesArray[i]\n z = Zscore(gc, mean, sd)\n if abs(z) < 2: continue\n if z > 3: results[species]['Z > 3'] += 1\n elif z > 2: results[species]['Z > 2'] += 1\n elif z < -3: results[species]['Z < -3'] += 1\n elif z < -2: results[species]['Z < -2'] += 1\n \n speciesArray.sort()\n print \"\\t\" + string.join(speciesArray, \"\\t\")\n for zcat in ['Z > 3', 'Z > 2', 'Z < -3', 'Z < -2']:\n sys.stdout.write(zcat)\n for spec in speciesArray:\n count = str(results[spec][zcat])\n sys.stdout.write(\"\\t\" + count)\n sys.stdout.write(\"\\n\")\n\n\nmain()\n" }, { "alpha_fraction": 0.47743552923202515, "alphanum_fraction": 0.4795845150947571, "avg_line_length": 32.62650680541992, "blob_id": "2e4738cfde3c8a073f9a0915a2665ad884388bdc", "content_id": "544894dc75fd48d1b7d064c75a92b9f63ed7bc78", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2792, "license_type": "permissive", "max_line_length": 83, "num_lines": 83, "path": "/python/swapsc/swapsee-table-annotation.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport getopt\t\t\t\t\t# comand line argument handling\nimport anydbm\t\t\t\t\t# index databases (file hash)\nfrom low import *\t\t\t# collection of generic self-defined functions\n\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> -e <string> [-i <n>]\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f table file\" )\n\tstdout( \" -i table column index containing the lookup name [default: 0]\" )\n\tstdout( \" -c annotation file column to use [default: all]\" )\n\tstdout( \" -l annotation file line(s) to use [default: first]\" )\n\tstdout( \" -e file extension to look for (= lookupname.extension)\" )\n\tstdout( \" \" )\n\tsys.exit(1)\n\t\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n \tstderr( \"no arguments provided.\" )\n \tshow_help()\t\n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:i:e:\" )\n except getopt.GetoptError:\n \tstderr( \"invalid arguments provided.\" )\n \tshow_help()\n \n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-i':\targs['col'] = int( value )\n if key == '-e':\targs['ext'] = value\n \n if not args.has_key('file'):\n \tstderr( \"table file missing.\" )\n \tshow_help()\n if not file_exists( args.get('file') ):\n \tstderr( \"table file does not exist.\" )\n \tshow_help()\n \t\n if not args.has_key('col'):\n args['col'] = 0\n \n if not args.has_key('ext'):\n stderr( \"file extension missing.\" )\n show_help()\n \n return args\n\n\t\n# =============================================================================\n# =============================================================================\ndef main( args ):\n fo = open( args.get('file') )\n for line in fo:\n line = line.rstrip()\n columns = line.split(\"\\t\")\n lookup = columns[ args.get('col') ]\n lookupfile = lookup + args['ext']\n if file_exists( lookupfile):\n ft = open( lookupfile )\n lines = ft.readlines()\n ft.close()\n # TODO:\n # get lines, get column\n # then add to table\n # print the new line\n\n fo.close()\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\nargs = handle_arguments( )\nmain( args )\n\n" }, { "alpha_fraction": 0.5171396136283875, "alphanum_fraction": 0.5309875011444092, "avg_line_length": 38.32143020629883, "blob_id": "7d4919ec000bb8cef8388e5358a02e9d81b52a1e", "content_id": "c31e9f83cfa896ab0e9411be88a347ba48404f0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4405, "license_type": "permissive", "max_line_length": 158, "num_lines": 112, "path": "/python/misa/misa-single-genome-stats.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSRspecies\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f scaffold fasta file on which SSR identification was done\" )\n stdout( \" -m misa out file (with species name as 1st column and localization feature in last column)\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:m:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-m': args['misa'] = value\n if key == '-f': args['fasta'] = value\n\n if not args.has_key('misa'):\n stderr( \"misa file argument missing.\" )\n show_help()\n elif not file_exists( args.get('misa') ):\n stderr( \"misa file does not exist.\" )\n show_help()\n \n if not args.has_key('fasta'):\n stderr( \"fasta file argument missing.\" )\n show_help()\n elif not file_exists( args.get('fasta') ):\n stderr( \"fasta file does not exist.\" )\n show_help()\n \n return args\n\n \n# =============================================================================\ndef get_scaffold_lengths(file):\n lengths = {}\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"):\n id = line[1:]\n if id.count(\" \") > 0: id = id[:id.index(\" \")]\n lengths[id] = 0\n else: lengths[id] += len(line.replace(\" \", ''))\n return lengths\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n scaffold2length = get_scaffold_lengths(args['fasta'])\n scaffoldhash = {}\n fo = open(args['misa'])\n for line in fo:\n m = MisaSSRspecies(line)\n scaffold = m.geneid\n if not scaffoldhash.has_key(scaffold):\n scaffoldhash[scaffold] = {'ssrs':0, 'length':scaffold2length[scaffold], 'ssrlength':0, 'exonic':0, 'intronic':0, 'intergenic':0, \"5'UTR\":0, \"3'UTR\":0}\n scaffoldhash[scaffold]['ssrs'] += 1\n scaffoldhash[scaffold]['ssrlength'] += m.length\n if m.feature == 'E': scaffoldhash[scaffold]['exonic'] += 1\n elif m.feature == 'I': scaffoldhash[scaffold]['intronic'] += 1\n elif m.feature == '3': scaffoldhash[scaffold][\"3'UTR\"] += 1\n elif m.feature == '5': scaffoldhash[scaffold][\"5'UTR\"] += 1\n else: scaffoldhash[scaffold]['intergenic'] += 1\n fo.close()\n\n print string.join([\"Scaffold\", \"Length (kb)\", \"SSRs (bp)\", \"# SSRs\", \"SSR.coverage\", \"SSRs/kb\", \"exonic\", \"intronic\", \"3'UTR\", \"5'UTR\", \"intergenic\"], \"\\t\")\n for scaffold, length in scaffold2length.iteritems():\n if not scaffoldhash.has_key(scaffold):\n sys.stdout.write(scaffold + \"\\t\" + str(1.0*length/1000) + \"\\t\")\n sys.stdout.write(string.join([\"0\"]*9, \"\\t\") + \"\\n\")\n else:\n hash = scaffoldhash[scaffold]\n sys.stdout.write(scaffold + \"\\t\" + str(1.0*hash['length']/1000))\n sys.stdout.write(\"\\t\" + str(hash['ssrlength']))\n sys.stdout.write(\"\\t\" + str(hash['ssrs']))\n sys.stdout.write(\"\\t\" + \"%0.2f%%\" %(100.0*hash['ssrlength'] / hash['length']))\n sys.stdout.write(\"\\t\" + \"%0.2f\" %(1000.0*hash['ssrs'] / hash['length']))\n sys.stdout.write(\"\\t\" + str(hash['exonic']) + \"\\t\" + str(hash['intronic']))\n sys.stdout.write(\"\\t\" + str(hash[\"3'UTR\"]) + \"\\t\" + str(hash[\"5'UTR\"]))\n sys.stdout.write(\"\\t\" + str(hash['intergenic']) + \"\\n\")\n\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.6712730526924133, "alphanum_fraction": 0.6825795769691467, "avg_line_length": 29.227848052978516, "blob_id": "74987863b3e573250c5f0ca15d9d4add104874fe", "content_id": "4fc4c3118722351ad8dc5f60214bde8187276dcb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2388, "license_type": "permissive", "max_line_length": 82, "num_lines": 79, "path": "/python/orthomcl/build-orthomcl-like-output.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys\nfrom low import *\n\n# takes an input protein fasta file and an orthomcl.gg file\n# orthomcl.gg file format:\n# speciesname1: id1 id2 id3 id4 .... full genome\n# speciesname2: id1 id2 id3 id4 .... full genome\n#\n# with these infos, the goal is to get only one protein sequence per species\n# we use t-coffee to find the most similar protein sequence per species\n# to the whole cluster. so in case one species contributes several sequences \n# to a cluster, we choose the one species to keep which has the highest average \n# similarity to the rest of the cluster. if more than 1 sequence yield the highest\n# avgsim, we determine whether these protein sequences are (1) all identical, \n# or whether they are (2) slightly different. In case (1), we choose any sequence\n# randomly because it does not matter. In case (2), we sum up all pairwise\n# similarities for each candidate sequence, and keep only the one sequence\n# with the highest sum. If these are identical as well, we again choose randomly\n# (should happen very rarely).\n\n\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" clustering.out orthomcl.gg\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 3: usage()\n inClustering, inGG = sys.argv[1:3]\n return inClustering, inGG\n\n\ndef get_number_of_species(inGG):\n count = 0\n fo = open(inGG)\n for line in fo: count += 1\n fo.close()\n return count\n\n\ndef read_gg(inGG):\n outHash = {}\n speciesArray = []\n fo = open(inGG)\n for line in fo:\n line = line.rstrip()\n cols = line.split()\n species = str(cols[0])[:-1]\n if not species in speciesArray: speciesArray.append(species)\n for col in cols[1:]:\n outHash[col] = species\n fo.close()\n return outHash, speciesArray\n\n\ndef main():\n inClustering, inGG = plausi()\n speciesHash, speciesArray = read_gg(inGG)\n \n fo = open(inClustering)\n for line in fo:\n if line.startswith(\"#\"): continue\n line = line.rstrip()\n cluster, count, geneids = line.split(\"\\t\")[0:3]\n geneids = geneids.split(\", \")\n currentSpecies = []\n for id in geneids: currentSpecies.append(speciesHash[id])\n speciesCount = len(set(currentSpecies))\n sys.stdout.write(\"%s(%s genes, %s taxa):\\t\" %(cluster, count, speciesCount)) \n for id in geneids: \n species = speciesHash[id]\n sys.stdout.write(id + \"(\" + species + \") \")\n sys.stdout.write(\"\\n\")\n fo.close()\n\n\nmain()\n" }, { "alpha_fraction": 0.6085433959960938, "alphanum_fraction": 0.6141456365585327, "avg_line_length": 38.66666793823242, "blob_id": "0efaa75ff48b0f48142b4d53e453230cf856d7ff", "content_id": "11d6c137cb6b0029ecccf1cafa3ce6f5f4e0cb5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1428, "license_type": "permissive", "max_line_length": 219, "num_lines": 36, "path": "/python/base/pfam.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "def read_hmmout(ifile, evalue=10, matchreq=0.0):\n hash = {}\n fo = open(ifile)\n for line in fo:\n cols = line.strip().split()\n if len(cols) == 16: \n i = []\n i.append(line.index(\"\\t\"))\n if line.count(\" \") > 0: i.append(line.index(\" \"))\n line = line[min(i):]\n pd = PfamDomain(line)\n if float(pd.get_attr('E-value')) > evalue: continue\n if matchreq > 0 and ((float(pd.get_attr('alignment_end'))-float(pd.get_attr('alignment_start')))/float(pd.get_attr('hmm_length'))) < matchreq: continue\n #print pd.get_attr('seq_id'), pd.get_attr('hmm_name')\n #print pd.get_attr('seq_id')\n if not hash.has_key(pd.get_attr('seq_id')): hash[pd.get_attr('seq_id')] = []\n hash[pd.get_attr('seq_id')].append(pd)\n fo.close()\n return hash\n \n\nclass PfamDomain():\n def __init__(self, line):\n self.attributes = ['seq_id', 'alignment_start', 'alignment_end', 'envelope_start', 'envelope_end', 'hmm_acc', 'hmm_name', 'type', 'hmm_start', 'hmm_end', 'hmm_length', 'bit_score', 'E-value', 'significance', 'clan']\n line = line.strip()\n self.values = line.split()\n\n def get_attr(self, name):\n if not name in self.attributes: return \"\"\n return self.values[ self.attributes.index(name) ]\n \n def covers(self, position):\n position = int(position)\n if int(self.get_attr('alignment_start')) <= position and int(self.get_attr('alignment_end')) >= position:\n return True\n return False\n" }, { "alpha_fraction": 0.6086596846580505, "alphanum_fraction": 0.6194153428077698, "avg_line_length": 29.47058868408203, "blob_id": "57c71f705e663191f309671f5c7ae709c89210d4", "content_id": "2bbf6c751c9a302cd62cd94f618e5df9d9f46e36", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3626, "license_type": "permissive", "max_line_length": 129, "num_lines": 119, "path": "/python/openreadingframe/orf_prediction_part2.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# ORF prediction for the sequences without homologs\n\nfrom Bio import SeqIO\nfrom Bio.Seq import reverse_complement, transcribe, back_transcribe, translate\nfrom Bio.Alphabet import IUPAC\nimport getopt, sys\nimport string\n\n\n# getopt reads in my input file\ninput_file = \"\"\n\n#print \"All arguments: \", sys.argv\n \nshortOptions = 'hf:t:'\nlongOptions = ['help', 'filename=', 'threshold=']\n \n#==============================================================================\ndef usage():\n print \"\"\"\n %s \n -h\\t--help\\tdisplay help.\n -f\\t--filename\\tpath to FASTA input file in which to predict ORFs\n -t\\t--threshold\\tminimum length of predicted ORF (in amino acids) to be considered\n \"\"\" % sys.argv[0]\n\n\n#==============================================================================\ndef get_parameters():\n\n if len(sys.argv) == 1:\n usage()\n sys.exit()\n\n opts = []\n args = []\n try:\n opts, args = getopt.getopt(sys.argv[1:], shortOptions, longOptions)\n except getopt.GetoptError:\n print \"ERR: At least one option is not available!\"\n usage()\n sys.exit()\n \n for o, a in opts:\n if o == \"--help\" or o == \"-h\":\n print \"HELP\"\n usage()\n elif o == \"--filename\" or o == \"-f\":\n# print \"Filename:\", a\n input_file = a\n elif o == \"--threshold\" or o == \"-t\":\n threshold = int(a)\n \n for a in args:\n print \"Additional argument, no option: \", a \n #end of getopt stuff! now it becomes even more exciting!!!\n\n return input_file, threshold\n\n\ndef get_input_sequences(input_file):\n ids2seqs = {}\n for seq_record in SeqIO.parse(open(input_file), \"fasta\"):\n ids2seqs[seq_record.id] = seq_record.seq\n return ids2seqs\n\n\n############################################now the real programme starts########################################################\n\ninput_file, threshold = get_parameters()\nids2seqs = get_input_sequences(input_file)\n\n#header = ['id', 'frame', 'startpos', 'endpos', 'cds', 'protein', 'evidence']\n#print \"#\" + string.join(header, \"\\t\")\n\n# iterate input sequences\nfor key, dna_sequence_direction1 in ids2seqs.iteritems():\n # direction1 is the direction we originally have had, 2 is the antisense strand\n dna_sequence_direction2 = dna_sequence_direction1.reverse_complement()\n \n # TRANSLATE ALL POSSIBLE ORFs, do not stop at STOP codons\n translations = {}\n translations['1'] = translate(dna_sequence_direction1)\n translations['-1'] = translate(dna_sequence_direction2)\n translations['2'] = translate(dna_sequence_direction1[1:])\n translations['-2'] = translate(dna_sequence_direction2[1:])\n translations['3'] = translate(dna_sequence_direction1[2:])\n translations['-3'] = translate(dna_sequence_direction2[2:])\n\n polypeptides = {}\n for frame, translation in translations.iteritems():\n peptides = translation.split('*')\n startpos = 0\n for peptide in peptides:\n polypeptides[peptide.tostring()] = [frame, startpos]\n startpos += len(peptide)+1\n\n # get longest ORF with startpos and frame\n peptides = polypeptides.keys()\n peptides.sort(key=len)\n longestpeptide = peptides[-1]\n frame, startpos = polypeptides[longestpeptide]\n\n if len(longestpeptide) < threshold: continue\n\n start_nt = startpos *3\n stop_nt = start_nt + ((len(longestpeptide)+1)*3)\n if frame.startswith('-'):\n cds = dna_sequence_direction2.tostring()\n else:\n cds = dna_sequence_direction1.tostring()\n cds = cds[start_nt:stop_nt+1]\n\n if frame.startswith('-'):\n start_nt, stop_nt = stop_nt, start_nt\n \n outlist = [key, frame, str(start_nt), str(stop_nt), cds, longestpeptide, \"2\"]\n print string.join(outlist, \"\\t\")\n" }, { "alpha_fraction": 0.5133663415908813, "alphanum_fraction": 0.5198019742965698, "avg_line_length": 31.580644607543945, "blob_id": "7125b7487653e7a61932dca267bfac8b9d77463b", "content_id": "f3ed93219b53352b39049896981ee23345f3b5b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2020, "license_type": "permissive", "max_line_length": 98, "num_lines": 62, "path": "/python/fasta/translatedprot_from_gb_to_fasta.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t# low level handling, such as command line stuff\nimport string\t\t\t# string methods available\nimport re\t\t\t\t\t# regular expressions\nfrom low import *\t# custom functions, written by myself\n\n\n# =============================================================================\ndef get_translatedfasta_from_gb( file ):\n\t\"\"\"\n\t\"\"\"\n\t\n\tdef write_output( source, hash ):\n\t\tL = [ \">\",hash.get('protein_id'),\"|\",hash.get('db_xref')]\n\t\tif hash.has_key('product'): L.append(\"|\"+hash.get('product'))\n\t\tL.append(\" (\"+source+\")\")\n\t\tprint string.join(L,'')\n\t\tprint hash.get('translation')\n\t\n\tfo = open(file)\n\t# read general infos\n\tsource = ''\n\tfor line in fo:\n\t\tif re.search('FEATURES',line): break\n\t\tif re.match('SOURCE',line):\n\t\t\tsource = re.search('SOURCE\\s+(.*)\\n',line).group(1)\n\t\t\n\t# read gene infos\n\thash = {}\n\thit = 0\n\tfor line in fo:\n\t\tif not re.match(' ',line):\n\t\t\tif len(hash) > 0:\twrite_output( source, hash )\n\t\t\thash = {} \n\t\t\thit = 0\n\t\tif re.match(' CDS',line): hit = 1\n\t\t\n\t\tif hit:\n\t\t\t# catch everything except translation sequence\n\t\t\tif re.search('/(\\S+)=\".*\"',line):\n\t\t\t\thash[re.search('/(\\S+)=\".*\"',line).group(1)] = re.search('/\\S+=\"(.*)\"',line).group(1)\n\t\t\t# catch translation sequence\n\t\t\tif re.search('/translation=',line):\n\t\t\t\thash['translation'] = re.search('/translation=\"(.*)\\n',line).group(1)\n\t\t\telif hash.has_key('translation'): hash['translation'] += re.search(\"([a-zA-Z]+)\",line).group(1)\n\tif len(hash) > 0: write_output( source, hash )\n\tfo.close()\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nif len( sys.argv ) == 1:\n\tprint \"no arguments provided. you need to specify the gb file(s) to parse.\"\n\tsys.exit(1)\n\nfor file in sys.argv[1:]:\n\tif not file_exists(file):\n\t\tprint \"gb file not found (or is a dir):\", file\n\t\tcontinue\n\tget_translatedfasta_from_gb( file ) " }, { "alpha_fraction": 0.536912739276886, "alphanum_fraction": 0.5450370907783508, "avg_line_length": 27.595958709716797, "blob_id": "6642906ba26493af64ceced397e58e506b374c10", "content_id": "c5e9e5344405fd19ba9044cff30713343e386c01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 2831, "license_type": "permissive", "max_line_length": 130, "num_lines": 99, "path": "/ruby/swapsc/bio-graphics-plot.rb", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n# generates a diagram of where in the sequence accelerated evolution / positive selection / negative selection took place\nrequire 'rubygems'\nrequire 'bio-graphics'\n\nDEBUG = false\n\n\n\n###############################################################################\nclass SwapscFeature\n\n attr_accessor :start, :stop, :category\n \n def initialize(start,stop,category)\n @start = start\n @stop = stop\n @category = category\n @added = false\n end\n\n def <=> other\n @start <=> other.start\n end \n\n def added?\n return @added\n end\n\n def add_to_track(track)\n track.add_feature( Bio::Feature.new(@category, '%s..%s' % [ @start, @stop ]), :colour => $categories[@category][:color] )\n $categories[@category][:stats] += (@stop - @start +1)\n $categories[@category][:branchstats] += (@stop - @start +1)\n @added = true\n end\nend\n###############################################################################\n\n###############################################################################\n\nif ARGV[0] and not File.exists?(ARGV[0])\n puts \"error: invalid path to file specified.\"\n ARGV[0] = nil\nend\n\nunless ARGV[0] or ARGV[1]\n puts \"generates a diagram of where in the sequence accelerated evolution / positive selection / negative selection took place\\n\"\n puts \"expected format [tab-delimited]:\"\n puts \"PANEL length\"\n puts \"TRACK name label\"\n puts \"FEATURE range color [label]\"\n puts \"usage: visualize-swapsc.rb <flatfile> <outfile>\\n\"\n exit 1\nend\n\n# === MAIN ====================================================================\n# =============================================================================\n\n# 1. read flatfile and save the input\n# 2. process input, create the plot\npanel = nil\ntrack = nil\ntracks = Array.new\nfeatures = Array.new\n\nf = File.open( ARGV[0], \"r\" )\n#STDERR.print( ARGV[0] + \"\\t\" )\nwhile line = f.gets\n next if line == nil\n line.chomp!\n cols = line.split(\"\\t\")\n if cols[0] == \"PANEL\"\n panel = Bio::Graphics::Panel.new( cols[1].to_i, :width => 800, :format => :png )\n elsif cols[0] == \"TRACK\"\n i, name, label = line.split(\"\\t\")\n if label == \"true\"\n label = true\n else\n label = false\n end\n track = panel.add_track(name, :label => label)\n elsif cols[0] == \"FEATURE\"\n if line.split(\"\\t\").length == 4\n i, range, color, label = line.split(\"\\t\")\n color = color.split(',').collect{|c| c.to_f}\n track.add_feature( Bio::Feature.new(\"feature\", range), :colour => color, :label => label )\n else\n i, range, color = line.split(\"\\t\")\n color = color.split(',').collect{|c| c.to_f}\n track.add_feature( Bio::Feature.new(\"feature\", range), :colour => color )\n end\n else\n STDERR.puts \"unknown line descriptor \\\"#{cols[0]}\\\"\" unless cols[0].nil?\n end\nend\nf.close\npanel.draw(ARGV[1])\n\n#STDERR.puts \"done.\"\n" }, { "alpha_fraction": 0.5363309383392334, "alphanum_fraction": 0.5687050223350525, "avg_line_length": 38.140846252441406, "blob_id": "f399353e404efe0ed4e1299e6e4785da595d6636", "content_id": "3134acb802a1b2efa9c90765f144e2b261919342", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2780, "license_type": "permissive", "max_line_length": 145, "num_lines": 71, "path": "/python/base/needlemanwunsch.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import sys\n\n# =============================================================================\ndef align(array1, array2, gap = -2, match = 1, mismatch = -1):\n \"\"\"Performs Needleman-Wunsch alignment of string1 and string2.\n Prints out the alignment and returns the array of scores and pointers(arrows).\n\n Example usage from an interactive shell:\n from NeedlemanWunsch import NW\n Scores, Pointers = NW('PELICAN','COELACANTH')\n\n This is modified from a Perl implementation in the book BLAST by Korf, et al.\n \"\"\"\n # initialize scoring and 'arrow' matrices to 0\n Scores = [[0 for x in range(len(array2)+1)] for y in range(len(array1)+1)]\n Pointers = [[0 for x in range(len(array2)+1)] for y in range(len(array1)+1)]\n\n # initialize borders\n # for pointers (arrows), use 2 for diagonal, -1 for horizontal, and 1 for vertical moves (an arbitrary system).\n # I have tried to consistently use i for rows (vertical positions) in the score and pointer tables, and j for columns (horizontal positions).\n for i in range(len(array1)+1):\n Scores[i][0] = gap*i\n Pointers[i][0] = 1 \n for j in range(len(array2)+1):\n Scores[0][j] = gap*j\n Pointers[0][j] = -1\n\n # fill with scores\n for i in range(1,len(array1)+1):\n for j in range(1,len(array2)+1):\n letter1 = array1[i-1]\n letter2 = array2[j-1]\n if letter1 == letter2: \n DiagonalScore = Scores[i-1][j-1] + match\n else: DiagonalScore = Scores[i-1][j-1] + mismatch\n HorizontalScore = Scores[i][j-1] + gap \n UpScore = Scores[i-1][j] + gap\n # TempScores is list of the three scores and their pointers\n TempScores = [[DiagonalScore,2],[HorizontalScore,-1],[UpScore,1]]\n # Now we keep the highest score, and the associated direction (pointer)\n Scores[i][j], Pointers[i][j] = max(TempScores)\n\n # backtrace from the last entry. \n [i,j] = [len(array1),len(array2)]\n align1 = []\n align2 = []\n while [i,j] != [0,0]:\n if Pointers[i][j] == 2:\n align1.append(array1[i-1])\n align2.append(array2[j-1])\n i = i - 1\n j = j - 1\n elif Pointers[i][j] == -1:\n align1.append('-')\n align2.append(array2[j-1])\n j = j - 1\n else:\n align1.append(array1[i-1])\n align2.append('-')\n i = i - 1\n\n # the alignments have been created backwards, so we need to reverse them:\n align1 = align1[::-1]\n align2 = align2[::-1]\n\n # print out alignment\n #print align1\n #print align2\n\n # in case you want to look at the scores and pointers, the function returns them\n return [Scores,Pointers, [align1, align2]]\n\n" }, { "alpha_fraction": 0.6210746765136719, "alphanum_fraction": 0.6294487118721008, "avg_line_length": 22.491804122924805, "blob_id": "887c7ec98d425429ce4a21c62501580bf3acb2b1", "content_id": "dd064d2840da9f8e1b0afa244efb7be330723ddd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1433, "license_type": "permissive", "max_line_length": 95, "num_lines": 61, "path": "/python/orthomcl/speciesids4orthomcl.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, string\n\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" folder with genomes (*.fasta or *.fasta.gz)\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 2: usage()\n inFolder = sys.argv[1]\n if not os.path.exists(inFolder) or not os.path.isdir(inFolder): \n print >> sys.stderr, \"specified input folder does not exist or is not a directory\\n\"\n usage()\n if not inFolder.endswith('/'): inFolder += '/'\n return inFolder\n\n\ndef iterate_folder(inFolder):\n inFiles = []\n for fname in os.listdir(inFolder):\n if not fname.endswith('.fasta') and not fname.endswith('.fasta.gz'): continue\n inFiles.append(inFolder + fname)\n return inFiles\n\n\ndef process_file(inFile):\n gzip = 0\n if inFile.endswith('.gz'): gzip = 1\n\n if gzip:\n ec = os.system('gunzip ' + inFile)\n inFile = os.path.splitext(inFile)[0]\n\n filename = os.path.split(inFile)[1]\n outName = os.path.splitext(filename)[0]\n\n sys.stdout.write(outName + \": \")\n\n ids = {}\n fo = open(inFile)\n for line in fo:\n if not line.startswith(\">\"): continue\n line = line.rstrip()\n id = line[1:]\n if id.count(\" \") > 0: id = id[:id.index(\" \")]\n ids[id] = 1\n\n sys.stdout.write( string.join(ids.keys(), \" \") )\n sys.stdout.write(\"\\n\")\n\n if gzip: ec = os.system('gzip ' + inFile)\n \n\ndef main():\n inFolder = plausi()\n inFiles = iterate_folder(inFolder)\n for inFile in inFiles: process_file(inFile)\n\nmain()\n" }, { "alpha_fraction": 0.4715249538421631, "alphanum_fraction": 0.47665315866470337, "avg_line_length": 31.78761100769043, "blob_id": "1a306b0fd0bdd20fd66cf34029aafea4c4d4003f", "content_id": "330e88c316a14bc065961fb6af97c6e691be19b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3705, "license_type": "permissive", "max_line_length": 135, "num_lines": 113, "path": "/python/misa/ssr-to-amino-acid.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport getopt # comand line argument handling\nfrom collections import defaultdict\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSR\n\nAA = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" -d <gff-folder>\"\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -p protein fasta file\" )\n stdout( \" -m misa file incl. protein in last column\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hm:p:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-m': args['misa'] = value\n if key == '-p': args['protein'] = value\n \n if not args.has_key('misa'):\n print >> sys.stderr, \"misa file argument missing.\"\n show_help()\n elif not file_exists( args.get('misa') ):\n print >> sys.stderr, \"misa file does not exist.\"\n show_help()\n\n if not args.has_key('protein'):\n print >> sys.stderr, \"protein file argument missing.\"\n show_help()\n elif not file_exists( args.get('protein') ):\n print >> sys.stderr, \"protein file does not exist.\"\n show_help()\n\n return args\n\n\n# =============================================================================\ndef get_ssrs(file):\n hash = defaultdict(list)\n fo = open(file)\n for line in fo: \n if line.startswith(\"ID\\t\"): continue\n m = MisaSSR(line)\n hash[m.geneid].append(m)\n fo.close()\n print >> sys.stderr, \"read %s microsatellites\" % len(hash)\n return hash\n\n# =============================================================================\ndef get_protein(file):\n seqhash = defaultdict(str)\n id = \"\"\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"):\n id = line[1:]\n if id.count(\" \") > 0: id = id[:id.index(\" \")]\n else:\n seqhash[id] += line\n fo.close()\n print >> sys.stderr, \"read %s protein sequencess\" % len(seqhash)\n return seqhash\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n SSRs = get_ssrs(args['misa']) \n seqhash = get_protein(args['protein']) \n for sid, SSRs in SSRs.iteritems():\n for SSR in SSRs:\n prot = seqhash[SSR.feature]\n pstart, pend = SSR.startpos / 3, SSR.endpos / 3\n seq = prot[pstart:pend+1]\n indic = \"-\"\n for aa in AA:\n if seq.count(aa*4) > 0:\n indic = aa\n break\n if indic == \"-\":\n for aa1 in AA:\n for aa2 in AA:\n if seq.count((aa1+aa2)*3) > 0:\n indic = aa1+aa2\n break\n print SSR.to_s() + \"\\t\" + indic + \"\\t\" + seq\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.5148559808731079, "alphanum_fraction": 0.5193921327590942, "avg_line_length": 36.050418853759766, "blob_id": "2ea2c20920b4a6f418bc66aa60da7c9fcdba6d72", "content_id": "1e566ee094b6764452c696c6c11f7921ca000504", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4409, "license_type": "permissive", "max_line_length": 101, "num_lines": 119, "path": "/python/paml/paml-codeml.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f nt alignment file\" )\n stdout( \" -t tree file (newick format)\" )\n stdout( \" -p path to PAML codeml\" )\n stdout( \" -n number of cpus to use\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:t:p:n:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['aln'] = value\n if key == '-t':\targs['tree'] = value\n if key == '-p':\targs['codeml'] = value\n if key == '-n':\targs['ncpu'] = int(value)\n \n if not args.has_key('aln'):\n stderr( \"aln file missing.\" )\n show_help()\n if not file_exists( args.get('aln') ):\n stderr( \"aln file does not exist.\" )\n show_help()\n \n if not args.has_key('tree'):\n stderr( \"tree file missing.\" )\n show_help()\n if not file_exists( args.get('tree') ):\n stderr( \"tree file does not exist.\" )\n show_help()\n\n if not args.has_key('codeml'):\n args['codeml'] = \"~/Results/Orthologs/PAML/codeml\"\n args['pamlfolder'] = os.path.split(args.get('codeml'))[0] + '/'\n\n if not file_exists( args.get('codeml') ):\n stderr( \"codeml binary not found.\" )\n show_help()\n if not dir_exists( args.get('pamlfolder') ):\n stderr( \"paml folder does not exist\" )\n show_help()\n\n return args\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n \n paml_orth_aln = args.get('pamlfolder')+'orth.aln'\n paml_orth_tree = args.get('pamlfolder')+'orth.tree'\n paml_orth_out = args.get('pamlfolder')+'orth.out'\n if file_exists(paml_orth_aln): os.unlink( paml_orth_aln )\n if file_exists(paml_orth_tree): os.unlink( paml_orth_tree )\n if file_exists(paml_orth_out): os.unlink( paml_orth_out )\n\n \n # copy all necessary files in the PAML folder\n #os.system( 'cp %s %s' % (args.get('aln'), paml_orth_aln) )\n os.system( 'sed s/^\\>//g %s > %s' %( args.get('aln'), paml_orth_aln ) )\n os.system( 'cp %s %s' % (args.get('tree'), paml_orth_tree) )\n \n # now run PAML with a given model (ctl file) and return the results of the run\n models = [\"M0\", \"Free\", \"M3K2\", \"M3K3\", \"M7\", \"M8\"]\n #models = [\"M3K2\", \"M3K3\", \"M7\", \"M8\", \"Free\"]\n #models = [\"M0\"]\n #models = [\"Free\"]\n sys.stderr.write('%s\\trunning PAML.codeml' % (os.path.split(args.get('aln'))[1]) )\n for M in models:\n sys.stderr.write('\\t' + M)\n sys.stderr.flush()\n while not file_exists(paml_orth_out) or not file_exists(args.get('pamlfolder')+'rst'):\n CWD = os.getcwd()\n os.chdir( args.get('pamlfolder') )\n #os.system( 'cp %s codeml.ctl' % ('codeml.ctl.'+M) )\n error = os.WEXITSTATUS(os.system( './codeml codeml.ctl.' + M + ' &> codeml.log'))\n os.chdir( CWD )\n \n if not error:\n os.system( 'mv %s %s' % (paml_orth_out, args.get('aln')+'.paml.out.'+M) )\n os.system( 'mv %s %s' % (args.get('pamlfolder')+'rst', args.get('aln')+'.paml.rst.'+M) )\n else:\n os.system( 'mv %s %s' % (args.get('pamlfolder')+'codeml.log', args.get('aln')+'.paml.err.'+M) )\n sys.stderr.write(' (!)')\n sys.stderr.flush()\n sys.stderr.write(\"\\n\")\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.5430978536605835, "alphanum_fraction": 0.5486066341400146, "avg_line_length": 30.804122924804688, "blob_id": "75ca286a05309e26da60b0a8d55ea5ebadaf86e6", "content_id": "d3f23b2780df77ebc036089a3de25f6877d053f0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 3086, "license_type": "permissive", "max_line_length": 165, "num_lines": 97, "path": "/ruby/pfam/hmmout_annotation.rb", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nrequire 'optparse'\n\nPFAMFILE = \"/global/databases/pfam/current/pfam_scan_db/Pfam-A.hmm\"\n\nclass String\n def valid_float?\n # The double negation turns this into an actual boolean true - if you're \n # okay with \"truthy\" values (like 0.0), you can remove it.\n !!Float(self) rescue false\n end\nend\n\n\n# =============================================================================\ndef get_opt\n options = {}\n optparse = OptionParser.new do |opts|\n opts.banner = \"Usage: #{$0} -f <file> -c <value>\"\n opts.on( '-f FILE or DIR', 'single hmmout file (pfam_scan output with first column = protein length), or a directory where all *.hmmout files will be processed' \n ){|file| options[:hmmfile] = file}\n opts.on( '-c CUTOFF', '[evalueFloat|GA|TC|NC]'\n ){|v| options[:cutoff] = v}\n end \n begin\n optparse.parse!\n mandatory = [:hmmfile, :cutoff]\n missing = mandatory.select{|param| options[param].nil?}\n if not missing.empty?\n puts \"Missing options: #{missing.join(', ')}\"\n puts optparse \n exit\n end \n rescue OptionParser::InvalidOption, OptionParser::MissingArgument\n puts $!.to_s\n puts optparse\n exit\n end\n return options\nend\n\ndef get_cutoffs(file=PFAMFILE)\n cutoffHash = Hash.new\n capture = %w( NAME GA NC TC )\n @name = nil\n reader = File.open(file, 'r')\n while (line = reader.gets)\n entry = {} if line[0,6] == 'HMMER3'\n capture.each{|e| entry[e] = line.split[1] if line[0,e.length] == e }\n if line[0,2] == \"//\" \n if entry.length != capture.count\n STDERR.puts \"FATAL ERROR: not all required fields found for an entry: #{entry.inspect}\"\n next\n end\n cutoffHash[entry['NAME']] = entry\n end\n end\n return cutoffHash\nend\n\n\n# ==============================================================================\ndef filter_hmmout(file, cutoff)\n fw = File.open(file + \".\" + cutoff, 'w')\n f = File.open(file, 'r')\n if cutoff.valid_float? # e-value cutoff given\n e = cutoff.to_f\n f.each{|line| cols = line.chomp.split; fw.puts cols.join(\"\\t\") if cols[13].to_f < e}\n else \n e = cutoff if ['GA', 'TC', 'NC'].include?(cutoff)\n abort(\"invalid value given for cutoff method (#{cutoff}). allowed values are GA, NC, and TC.\") if e.nil?\n cutoffHash = get_cutoffs()\n puts \"--- cutoffHash: #{cutoffHash.count} ---\"\n f.each{|line| \n cols = line.chomp.split;\n name, bitscore = cols[7], cols[12].to_f\n puts name, cutoffHash[name]\n next unless name[0,6] == 'Pfam-B' or bitscore > cutoffHash[name][e].to_f\n fw.puts cols.join(\"\\t\") \n }\n end\n f.close\n fw.close\nend\n\n\n# ==============================================================================\n# =MAIN=========================================================================\n# ==============================================================================\n\noptions = get_opt()\nunless File.directory?(options[:hmmfile])\n filter_hmmout(options[:hmmfile], options[:cutoff])\nelse\n Dir.glob(options[:hmmfile] + '/*.hmmout').each{|hmmfile| filter_hmmout(hmmfile, options[:cutoff])}\nend\n\n" }, { "alpha_fraction": 0.477456271648407, "alphanum_fraction": 0.4831763207912445, "avg_line_length": 31.30434799194336, "blob_id": "22cdf148e345be894e7cd0e9877b9eabc4842441", "content_id": "bf4dcca2ce1c81b1ce3a56b4f94e2c3b598fc744", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2972, "license_type": "permissive", "max_line_length": 94, "num_lines": 92, "path": "/python/fasta/fastasplit.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nimport math\nfrom low import *\t\t\t# custom functions, written by myself\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> -n <x> -i <x>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f fasta file\" )\n\tstdout( \" -n size of each new fasta file (# seq)\" )\n\tstdout( \" -i number of fasta files to split into\" )\n\tstdout( \" \" )\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:n:i:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f': args['fasta'] = value\n\t\tif key == '-n':\targs['n'] = int(value)\n\t\tif key == '-i':\targs['i'] = int(value)\n\t\t\t\t\n\tif not args.has_key('n') and not args.has_key('i'):\n\t\tstderr( \"n or i missing.\" )\n\t\tshow_help()\n\n\tif not args.has_key('fasta'):\n\t\tstderr( \"fasta file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('fasta') ):\n\t\tstderr( \"fasta file does not exist.\" )\n\t\tshow_help()\n\t\t\n\treturn args\n\n\t\n# =============================================================================\n# =============================================================================\ndef main( args ):\n sout, serr = catch_bash_cmd_output( \"grep '>' -c %s\" % args.get('fasta') )\n total = int( sout )\n cut = total\n seqcount = 0\n filecount = 1\n\n if args.has_key('i'): cut = int(math.ceil( 1.0 * total / args.get('i') ))\n else: cut = args.get('n')\n\n\n fw = open( args.get('fasta') + '.' + add_leading_zeroes(filecount, 6), 'w' )\n handle = open(args.get('fasta'))\n for line in handle:\n\n if line[0] == \">\": \n seqcount += 1\n if ((seqcount % cut) == 1 and seqcount > 1) or (cut == 1 and seqcount > 1):\n filecount += 1\n fw.flush()\n fw.close()\n fw = open( args.get('fasta') + '.' + add_leading_zeroes(filecount, 6), 'w' )\n\n fw.write(line)\n\n fw.flush()\n fw.close()\n infomsg( \"total.seq.count: %s | split.count: %s | file.count: %s\" %(total, cut, filecount) )\n\t\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.5354772806167603, "alphanum_fraction": 0.5495150685310364, "avg_line_length": 28.0222225189209, "blob_id": "c6ca1bb530d1376178c89687a3c71627bcbf6dc0", "content_id": "b7a99ef1d3ece6cbdc9d75fc018fda154eb8ed45", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3918, "license_type": "permissive", "max_line_length": 147, "num_lines": 135, "path": "/python/orthomcl/orthomcl-blastparse.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, string\nimport threading, time\nimport anydbm\nfrom low import *\n\n\nTHREADS = 8\nEVALUE = float(1e-5)\n\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" all.blastout\"\n print >> sys.stderr, \" - all.blastout: blast out file generated by NCBI blast (concat all genomes in all one and blastp against itself, -m 8)\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 2: usage()\n inBlast = sys.argv[1]\n if not os.path.exists(inBlast) or not os.path.isfile(inBlast): \n print >> sys.stderr, \"specified blast input file does not exist or is not a file\\n\"\n usage()\n return inBlast\n\n\n# =============================================================================\nclass BlastHit():\n def __init__(self, line):\n cols = line.split(\"\\t\")\n self.queryid = cols.pop(0)\n self.subjectid = cols.pop(0)\n self.percentident = float(cols.pop(0))\n self.alnlength = int(cols.pop(0))\n self.mismatch = cols.pop(0)\n self.gap = cols.pop(0)\n self.querystart = cols.pop(0)\n self.queryend = cols.pop(0)\n self.subjectstart = cols.pop(0)\n self.subjectend = cols.pop(0)\n self.evalue = float(cols.pop(0))\n self.score = cols.pop(0)\n \n\n# =============================================================================\ndef orthomcl_parse(hit_lines):\n simspan = ''\n simspanid = 1\n evalues = []\n sum_identical = 0\n sum_length = 0\n for line in hit_lines:\n b = BlastHit(line)\n evalues.append(b.evalue)\n simspan += str(simspanid) + \":\" + b.querystart + \"-\" + b.queryend + \":\" + b.subjectstart + \"-\" + b.subjectend + \".\"\n simspanid += 1\n sum_identical += b.percentident * b.alnlength\n sum_length += b.alnlength\n\n if len(evalues) == 0 or min(evalues) > EVALUE: return 0\n evalue = str(min(evalues))\n percentIdent = str(int(sum_identical / sum_length))\n return string.join( [b.queryid, \"0\", b.subjectid, \"0\", evalue, percentIdent, simspan], \";\" )\n \n\n# =============================================================================\nclass Output():\n def __init__(self):\n self.counter = 1\n def write(self, outstring):\n print \"%s;%s\" % (self.counter, outstring)\n self.counter += 1\n\n# =============================================================================\nclass MyThread( threading.Thread ):\n def __init__(self, jobs, Out):\n self.jobs = jobs\n self.out = Out\n\n def run(self):\n for lines in self.jobs:\n outputstring = orthomcl_parse(lines)\n if outputstring != 0: self.out.write(outputstring)\n\n# =============================================================================\ndef read_gg(file):\n speciesHash = {}\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n cols = line.split()\n speciesname = cols.pop(0)\n speciesname = speciesname[:speciesname.index(':')]\n for id in cols: speciesHash[id] = speciesname\n return speciesHash\n\n\ndef process_file(inBlast):\n Out = Output()\n q1, h1 = \"\", \"\"\n done = 0\n info( \"\\tcounting blast lines to parse...\" )\n sout, serr = catch_bash_cmd_output( \"wc -l %s\" % inBlast )\n totaljobs = int( sout.split()[0] )\n fo = open(inBlast)\n line = fo.readline()\n while threading.activeCount() > 1 or line:\n # check existing threads: still running?\n # fill up all remaining slots\n while threading.activeCount() <= THREADS and line:\n # start new thread\n jobs = []\n while line and len(jobs) < 1000:\n jobs.append([line])\n q2,h2 = line.split(\"\\t\")[0:2]\n line = fo.readline()\n done += 1\n while line and line.split(\"\\t\")[0:2] == [q2,h2]:\n jobs[-1].append(line)\n line = fo.readline()\n done += 1\n\n t = MyThread( jobs, Out )\n t.run()\n\n info( \"\\t[ jobs done: %s | remain: %s | threads active: %s ] \" % ( (totaljobs - done ), done, threading.activeCount() -1) ) \n time.sleep(0.2)\n fo.close()\n\n\ndef main():\n inBlast = plausi()\n process_file(inBlast)\n\nmain()\n" }, { "alpha_fraction": 0.5415470004081726, "alphanum_fraction": 0.5567613840103149, "avg_line_length": 32.32624053955078, "blob_id": "69d7aeed012febbdcb9fb61115ca70518ef9af82", "content_id": "078a0cc2fdbab4ccbe39a042781aa0b927b4e793", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9399, "license_type": "permissive", "max_line_length": 185, "num_lines": 282, "path": "/python/misa/ortho-pairwise-exon-intron.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport hashlib\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSR\nimport newick\nfrom collections import defaultdict\nimport pickle\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -a <path> -b <path> -o <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f combined misa output\" )\n stdout( \" -o pairwise ortholog intra/intergenic regions file\" )\n stdout( \" -t newick treew with branch lengths\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:o:t:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['misa'] = value\n if key == '-o': args['orth'] = value\n if key == '-t': args['tree'] = value\n \n if not args.has_key('misa'):\n stderr( \"misa file argument missing.\" )\n show_help()\n elif not file_exists( args.get('misa') ):\n stderr( \"misa file does not exist.\" )\n show_help()\n\n if not args.has_key('orth'):\n stderr( \"orth file argument missing.\" )\n show_help()\n elif not file_exists( args.get('orth') ):\n stderr( \"orth file does not exist.\" )\n show_help()\n \n return args\n\n\ndef get_distances(file):\n tree = open(file).readline().strip()\n ancestral_nodes = []\n leaves = {}\n while 1:\n # END OF TREE: semicolon\n if tree.startswith(\";\"): break\n\n # START INNER NODE\n if tree.startswith(\"(\"):\n tree = tree[1:]\n n = newick.Node()\n if len(ancestral_nodes) > 0: n.parent = ancestral_nodes[-1]\n ancestral_nodes.append(n)\n continue\n\n # END INNER NODE\n if tree.startswith(\")\"):\n tree = tree[1:]\n if re.match(\":(\\d+)\", tree):\n distance = re.match(\":(\\d+)\", tree).group(1)\n ancestral_nodes[-1].distance_to_parent = distance\n while re.match(\"[:\\d]+\", tree): tree = tree[1:]\n ancestral_nodes.pop(-1)\n continue\n\n # OUTER NODE SINGLE\n if re.match(\",([A-Za-z]+):(\\d+)\\)\", tree):\n els = re.match(\",([A-Za-z]+):(\\d+)\", tree).groups()\n n1 = newick.Node()\n n1.parent = ancestral_nodes[-1]\n n1.distance_to_parent = els[1]\n leaves[els[0]] = n1\n while not tree.startswith(\")\"): tree = tree[1:]\n continue\n\n # OUTER NODE DOUBLE\n if re.match(\"([A-Za-z]+):(\\d+),([A-Za-z]+):(\\d+)\", tree):\n els = re.match(\"([A-Za-z]+):(\\d+),([A-Za-z]+):(\\d+)\", tree).groups()\n n1 = newick.Node()\n n1.parent = ancestral_nodes[-1]\n n1.distance_to_parent = els[1]\n n1.distance_to_parent = els[1]\n n2 = newick.Node()\n n2.parent = ancestral_nodes[-1]\n n2.distance_to_parent = els[3]\n leaves[els[0]] = n1\n leaves[els[2]] = n2\n while not tree.startswith(\")\"): tree = tree[1:]\n continue\n\n # INTERNAL INNER NODE\n if tree.startswith(\",(\"):\n tree = tree[2:]\n n = newick.Node()\n if len(ancestral_nodes) > 0: n.parent = ancestral_nodes[-1]\n ancestral_nodes.append(n)\n continue\n if tree.startswith(\",\"):\n tree = tree[1:]\n continue\n\n distances = {}\n for species1, leafnode1 in leaves.iteritems():\n for species2, leafnode2 in leaves.iteritems():\n distances[species1 + \",\" + species2] = str(leafnode1.summed_distance_to(leafnode2))\n return distances\n\n\n\nclass LocationPair():\n def __init__(self, line):\n columns = line.rstrip().split(\"\\t\")\n self.species = columns[0:2]\n self.type = columns[2]\n self.locations = [{'chr': columns[3], 'start': int(columns[4]), 'stop': int(columns[5])}, {'chr': columns[6], 'start': int(columns[7]), 'stop': int(columns[8])}]\n\n\ndef get_orthologs(file):\n orthologs = []\n fo = open(file)\n for line in fo:\n if line.startswith(\"#\"): continue\n if len(line.rstrip()) == 0: continue\n orthologs.append(LocationPair(line))\n fo.close()\n return orthologs\n\n\ndef get_ssrs(file):\n hash = {}\n fo = open(file)\n for line in fo:\n if line.startswith(\"ID\\t\"): continue\n m = MisaSSR(line)\n hash[m.geneid + \"|\" + str(m.startpos)] = m\n fo.close()\n return hash\n\n\ndef hash(s):\n return hashlib.sha224(s).hexdigest()\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n ssrs = get_ssrs(args['misa'])\n orthologLocationPairs = get_orthologs(args['orth'])\n distances = get_distances(args['tree'])\n\n perfect, poly, shift, loss = defaultdict(lambda: defaultdict(int)), defaultdict(lambda: defaultdict(int)), defaultdict(lambda: defaultdict(int)), defaultdict(lambda: defaultdict(int))\n for locpair in orthologLocationPairs:\n qspecies, ospecies = locpair.species[0], locpair.species[1]\n qchr, qstart, qstop = locpair.locations[0]['chr'], locpair.locations[0]['start'], locpair.locations[0]['stop']\n ochr, ostart, ostop = locpair.locations[1]['chr'], locpair.locations[1]['start'], locpair.locations[1]['stop']\n qssrs, ossrs = [], []\n for s in range(qstart, qstop):\n key = qspecies + \"|\" + qchr + \"|\" + str(s)\n if ssrs.has_key(key): qssrs.append(ssrs[key])\n for s in range(ostart, ostop):\n key = ospecies + \"|\" + ochr + \"|\" + str(s)\n if ssrs.has_key(key): ossrs.append(ssrs[key])\n key = [qspecies, ospecies]\n key.sort()\n key = string.join(key, \",\")\n\n # no SSRs in these both locations\n if len(qssrs) == 0 and len(ossrs) == 0: continue\n # no SSRs in either one of the two locations\n if len(qssrs) == 0:\n for ssr in ossrs: loss[key][ssr.feature] += 1\n continue\n if len(ossrs) == 0:\n for ssr in qssrs: loss[key][ssr.feature] += 1\n continue\n\n caught = {}\n # stage 1: perfect matches\n for m1 in qssrs: \n for m2 in ossrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_perfect_match_to(m2):\n if m1.feature == m2.feature: \n perfect[key][m1.feature] += 1\n if m1.feature != \"intergenic\": \n print m1.to_s()\n print m2.to_s()\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n # stage 2: polymorphic matches (same motif, but different number of repeats)\n for m1 in qssrs: \n for m2 in ossrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_polymorphic_to(m2):\n if m1.feature == m2.feature: \n poly[key][m1.feature] += 1\n if m1.feature != \"intergenic\": \n print m1.to_s()\n print m2.to_s()\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n # stage 3: shifted matches (motif is shifted [permuated])\n for m1 in qssrs: \n for m2 in ossrs:\n if caught.has_key(hash(m1.to_s())) or caught.has_key(hash(m2.to_s())): continue\n if m1.is_shifted_to(m2):\n if m1.feature == m2.feature: \n shift[key][m1.feature] += 1\n if m1.feature != \"intergenic\": \n print m1.to_s()\n print m2.to_s()\n caught[hash(m1.to_s())] = 1\n caught[hash(m2.to_s())] = 1\n\n for m1 in qssrs:\n if caught.has_key(hash(m1.to_s())): continue\n loss[key][m1.feature] += 1\n for m2 in ossrs:\n if caught.has_key(hash(m2.to_s())): continue\n loss[key][m2.feature] += 1 \n \n \n keys = list(set(perfect.keys() + poly.keys() + shift.keys() + loss.keys()))\n keys.sort()\n for key in keys:\n speciespair = key\n time = str(distances[speciespair])\n perfectcounts, polycounts, shiftedcounts, losscounts = [], [], [], []\n for feature in [\"exon\", \"intron\", \"intergenic\"]:\n count = \"0\"\n if loss[key].has_key(feature): count = str(loss[key][feature])\n losscounts.append(count)\n \n count = \"0\"\n if perfect[key].has_key(feature): count = str(perfect[key][feature])\n perfectcounts.append(count)\n\n count = \"0\"\n if poly[key].has_key(feature): count = str(poly[key][feature])\n polycounts.append(count)\n\n count = \"0\"\n if shift[key].has_key(feature): count = str(shift[key][feature])\n shiftedcounts.append(count)\n\n sys.stderr.write(string.join([key, speciespair, time], \"\\t\"))\n sys.stderr.write(\"\\t\" + string.join(perfectcounts, \"\\t\"))\n sys.stderr.write(\"\\t\" + string.join(polycounts, \"\\t\"))\n sys.stderr.write(\"\\t\" + string.join(shiftedcounts, \"\\t\"))\n sys.stderr.write(\"\\t\" + string.join(losscounts, \"\\t\") + \"\\n\")\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5479999780654907, "alphanum_fraction": 0.5573333501815796, "avg_line_length": 35.3636360168457, "blob_id": "527c33f4b3102563dc3aad82e0bafa42a17a6441", "content_id": "afda111d4e19bc2b358c5a28b14db247763cc38b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6000, "license_type": "permissive", "max_line_length": 102, "num_lines": 165, "path": "/python/gff/get-missing-exons.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# for all predicted peptides of a genome, compare its hmmout to the hmmout\n# of all possible translations. putting this onto the scaffolds will\n# show where genes might have been missed.\n\nfrom low import *\nimport getopt, sys\nimport string\nfrom gff3 import GeneFeature\n\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hg:p:a:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-g': args['gff'] = value\n if key == '-p': args['predicted'] = value\n if key == '-a': args['alltranslations'] = value\n \n if not args.has_key('predicted'):\n stderr( \"predicted hmmout file argument missing.\" )\n show_help()\n elif not file_exists( args.get('predicted') ):\n stderr( \"predicted hmmout file does not exist.\" )\n show_help()\n\n if not args.has_key('alltranslations'):\n stderr( \"alltranslations hmmout file argument missing.\" )\n show_help()\n elif not file_exists( args.get('alltranslations') ):\n stderr( \"alltranslations hmmout file does not exist.\" )\n show_help()\n\n if not args.has_key('gff'):\n stderr( \"gff file argument missing.\" )\n show_help()\n elif not file_exists( args.get('gff') ):\n stderr( \"gff file does not exist.\" )\n show_help()\n\n\n return args\n\n# =============================================================================\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -g <path> -p <path> -a <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -g gff file of the genome under investigation\" )\n stdout( \" -p hmmout of the predicted peptides\" )\n stdout( \" -a hmmout of all translations\" )\n stdout( \" \" )\n sys.exit(1)\n\n\n# =============================================================================\ndef load_hmmout(file, gff=None):\n\n pid2domains = {}\n pid2scaffold = None\n if gff: pid2scaffold = load_gff(gff)\n fo = open(file, 'r')\n for line in fo:\n line = line.rstrip()\n columns = line.split()\n if len(columns) == 16: columns.pop(0) # remove first column (prot length)\n seqid, start, stop = columns[0], int(columns[3]), int(columns[4])\n\n if seqid.endswith(\"]\"):\n posinfo = seqid[seqid.rindex(\"[\")+1:-1]\n seqid = seqid[:seqid.rindex(\"[\")]\n extractstart, extractstop = [int(e) for e in posinfo.split(\":\")[0:2]]\n if seqid == \"scaffold_192\": print \"S:\", extractstart, extractstop, start, stop\n if extractstop < extractstart: # indicating negative frame / anti-strand\n start = extractstop - (3*start)\n stop = extractstop - (3*stop)\n #stop, start = start, stop\n else:\n start = extractstart + (3*start)\n stop = extractstart + (3*stop)\n if seqid == \"scaffold_192\" and start > 10000: print \"E:\", extractstart, extractstop, start, stop\n\n elif pid2scaffold and pid2scaffold.has_key(seqid):\n exons = pid2scaffold[seqid]\n startnt = start * 3\n stopnt = stop * 3\n sumlength = 0\n for (scaffold, estart, estop) in exons:\n seqid = scaffold\n if not pid2domains.has_key(seqid): pid2domains[seqid] = []\n sumlength += (estop - estart)\n if startnt > sumlength: \n #print scaffold, start, \"(\", startnt, \")\", \">\", sumlength\n continue\n if stopnt < sumlength - (estop - estart): \n #print scaffold, stop, \"(\", stopnt, \")\", \"<\", sumlength - (estop - estart)\n continue\n nstart = max([estop - (sumlength - startnt), estart])\n nstop = min([estop, estop - (sumlength - stopnt)])\n pid2domains[seqid].append( [nstart, nstop] )\n\n if pid2scaffold: continue # already added the stuff\n if not pid2domains.has_key(seqid): pid2domains[seqid] = []\n pid2domains[seqid].append( [start, stop] )\n fo.close()\n return pid2domains\n\n# =============================================================================\ndef load_gff(file):\n pid2scaffold = {}\n fo = open(file, 'r')\n for line in fo:\n if line.startswith(\"#\"): continue\n feat = GeneFeature(line)\n #print >> sys.stderr, \"feat type: %s\" % feat.type\n if not feat.type == \"exon\": continue\n pid = feat.get_attributes()['Parent']\n pid = pid[pid.index(\":\")+1:]\n if not pid2scaffold.has_key(pid): pid2scaffold[pid] = []\n seqid, start, stop = feat.seqid, feat.start, feat.stop\n pid2scaffold[pid].append([seqid, start, stop])\n fo.close()\n print >> sys.stderr, \"gff loaded with %s protein ids of relevance\" % len(pid2scaffold)\n return pid2scaffold\n\n# =============================================================================\ndef report_new(scaffold, hits):\n for (start, stop) in hits:\n print string.join([scaffold, str(start), str(stop)], \"\\t\")\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n predictedHash = load_hmmout(args['predicted'], args['gff'])\n alltranslationsHash = load_hmmout(args['alltranslations'])\n print >> sys.stderr, \"finished loading hmmouts.\"\n for scaffold, alltranshits in alltranslationsHash.iteritems():\n if not predictedHash.has_key(scaffold):\n report_new(scaffold, alltranshits)\n continue\n predhits = predictedHash[scaffold]\n print scaffold\n print alltranshits\n print \"\"\n print predhits\n sys.exit(99)\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.5129060745239258, "alphanum_fraction": 0.5206942558288574, "avg_line_length": 35.8278694152832, "blob_id": "a55b07b91d88cb7b0c690af9f7e0e7122bb84feb", "content_id": "7c30c22c9c00a1d2ca490c1d4ede36964cbbd146", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4494, "license_type": "permissive", "max_line_length": 154, "num_lines": 122, "path": "/python/signalp/signalp-report-hits.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f signalp output file (short format)\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['signalpfile'] = value\n \n for key in ['signalpfile']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n return args\n\n# =============================================================================\ndef statusbar(current, total, message=\"\", width=40):\n progress = 1.0*current/total\n if message != \"\": message = \"[\" + message + \"]\"\n progressbar = \"=\" * int(progress*width)\n while len(progressbar) < width: progressbar += \" \" \n sys.stderr.write(\"\\r 0% \" + progressbar + \" 100% \" + message)\n if progress == 1.0: sys.stderr.write(\"\\n\")\n \n# =============================================================================\n# name Cmax pos ? Ymax pos ? Smax pos ? Smean ? D ? \t# name ! Cmax pos ? Sprob ?\nclass SignalpResult:\n def __init__(self, line):\n col = line.rstrip().split()\n self.gid = col.pop(0)\n self.NN_Cmax = float(col.pop(0))\n self.NN_Cmax_pos = int(col.pop(0))\n if col.pop(0) == 'Y': self.NN_Cmax_sign = True\n else: self.NN_Cmax_sign = False\n self.NN_Ymax = float(col.pop(0))\n self.NN_Ymax_pos = int(col.pop(0))\n if col.pop(0) == 'Y': self.NN_Ymax_sign = True\n else: self.NN_Ymax_sign = False\n self.NN_Smax = float(col.pop(0))\n self.NN_Smax_pos = int(col.pop(0))\n if col.pop(0) == 'Y': self.NN_Smax_sign = True\n else: self.NN_Smax_sign = False\n \n self.NN_Smean = float(col.pop(0))\n if col.pop(0) == 'Y': self.NN_Smean_sign = True\n else: self.NN_Smean_sign = False\n self.NN_D = float(col.pop(0))\n if col.pop(0) == 'Y': self.NN_D_sign = True\n else: self.NN_D_sign = False\n \n col.pop(0)\n self.HMM_res = col.pop(0)\n self.HMM_Cmax = float(col.pop(0))\n self.HMM_Cmax_pos = int(col.pop(0))\n if col.pop(0) == 'Y': self.HMM_Cmax_sign = True\n else: self.HMM_Cmax_sign = False\n self.HMM_Sprob = float(col.pop(0))\n if col.pop(0) == 'Y': self.HMM_Sprob_sign = True\n else: self.HMM_Sprob_sign = False\n \n def is_significant(self, NN=True, HMM=True):\n sign = True\n if not NN and not HMM:\n return [self.NN_Cmax_sign, self.NN_Ymax_sign, self.NN_Smax_sign, self.NN_Smean_sign, self.NN_D_sign, self.HMM_Cmax_sign, HMM_Sprob_sign].count(True)\n else:\n if NN:\n if not self.NN_Cmax_sign and not self.NN_Ymax_sign and not self.NN_Smax_sign and not self.NN_Smean_sign and not self.NN_D_sign: sign = False\n if HMM: \n if not self.HMM_Cmax_sign and not self.HMM_Sprob_sign: sign = False\n return sign\n \n \n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n fo = open(args[\"signalpfile\"])\n for line in fo:\n if line.startswith(\"#\"): continue\n #print line\n sr = SignalpResult(line)\n if sr.is_significant(True, True): print sr.gid\n fo.close()\n \n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5096774101257324, "alphanum_fraction": 0.5181935429573059, "avg_line_length": 36.61165237426758, "blob_id": "281d475e4ad064463e74247691e1df9365ea9917", "content_id": "666312684e2f546a83614478f1431b9a992448f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3875, "license_type": "permissive", "max_line_length": 83, "num_lines": 103, "path": "/python/fasta/get-sequence-between-genes.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nimport gff3\nimport fasta\nfrom Bio.Seq import Seq\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -i surrounding genes (ID1,ID2)\" )\n stdout( \" -g gff file\" )\n stdout( \" -f fasta file\" )\n stdout( \" -r return reverse complement\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hi:g:f:r\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {'rv': False}\n for key, value in keys:\n if key == '-i': args['genes'] = value.split(',')\n if key == '-g': args['gffile'] = value\n if key == '-f': args['fastafile'] = value\n if key == '-r': args['rv'] = True\n \n for key in ['genes', 'gffile', 'fastafile']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n return args\n\n# =============================================================================\ndef statusbar(current, total, message=\"\", width=40):\n progress = 1.0*current/total\n if message != \"\": message = \"[\" + message + \"]\"\n progressbar = \"=\" * int(progress*width)\n while len(progressbar) < width: progressbar += \" \" \n sys.stderr.write(\"\\r 0% \" + progressbar + \" 100% \" + message)\n if progress == 1.0: sys.stderr.write(\"\\n\")\n \n\n# =============================================================================\ndef get_coordinates(gfhash, genes):\n positions = []\n for scaffold, gfs in gfhash.iteritems():\n for gf in gfs:\n if not gf.get_attributes().has_key('ID'): continue\n if gf.get_attributes()['ID'] in genes: positions += [gf.start, gf.stop]\n if len(positions) == 4: break\n if len(positions) == 4: break\n if min(positions[0:2]) < min(positions[2:4]):\n return scaffold, max(positions[0:2]), min(positions[2:4])\n else:\n return scaffold, max(positions[2:4]), min(positions[0:2])\n \n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n gfhash = gff3.get_gff_hash(args['gffile'])\n sys.stderr.write(\"gff loaded \")\n gid, startpos, endpos = get_coordinates(gfhash, args['genes'])\n sys.stderr.write(\"| coordinates identified \")\n if not args['rv']: print \">%s_%s:%s\" %(gid, startpos, endpos)\n else: print \">%s_%s:%s\" %(gid, endpos, startpos)\n \n seqhash = fasta.get_sequence_hash(args['fastafile'])\n sys.stderr.write(\"| fasta loaded \")\n seq = seqhash[gid][startpos-1:endpos]\n if args['rv']: seq = Seq(seq).reverse_complement().tostring()\n sys.stderr.write(\"| subsequence extracted \")\n print seq\n sys.stderr.write(\"\\n\")\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5341075658798218, "alphanum_fraction": 0.5402200222015381, "avg_line_length": 33.2259407043457, "blob_id": "6f96fa36e3e77be7c90e54ce663c59666571fc44", "content_id": "3b812cf3de1be5c0cd940213efb66eaa1b7a7ce3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8180, "license_type": "permissive", "max_line_length": 174, "num_lines": 239, "path": "/python/blast/parse_blast_out3.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * \n\nOUTPUTHASH = {\n 'q' : 'query',\n 'h' : 'hitid',\n 'd' : 'hitdescr',\n 'sl' : 'sbjct_length',\n 'ql' : 'query_length',\n 'e' : 'evalue',\n 's' : 'score',\n 'qs' : 'query_startpos',\n 'qe' : 'query_endpos',\n 'ss' : 'sbjct_startpos',\n 'se' : 'sbjct_endpos',\n 'hl' : 'hitlength',\n 'i' : 'identities',\n 'p' : 'positives',\n 'g' : 'gaps',\n 'frm' : 'frame',\n 'str' : 'strand', \n}\n\n\n# ============================================================================= \ndef show_help():\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> [-e -n -i -p -l -o <string>] > out.file\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f blast.out file to be parsed\" )\n stdout( \" -n number of best hits to parse\" )\n stdout( \" -e minimum evalue of a hit to be parsed\" )\n stdout( \" -i minimum identity (in %)\" )\n stdout( \" -l minimum length of a hit to be parsed\" )\n stdout( \" -d delimiter that is used in the stdout to seperate the fields. default: space\" )\n stdout( \" also allowed: ';' and 'tab' and ',' and '|'\" )\n stdout( \" \" )\n stdout( \" -o output fields. default: \\\"q,h,s,e,qs,qe,ss,se,hl,sl,i,p\\\"\" )\n for k, v in OUTPUTHASH.iteritems(): stdout( \" %s:\\t%s\" %(k,v) )\n stdout( \" \" )\n \n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hb:f:n:e:l:i:p:o:d:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n \n args = {}\n for key, value in keys:\n if key == '-f':\n if not file_exists( value ):\n stderr( \"invalid path in \" + key )\n show_help()\n else:\n args['file'] = value\n if key == '-e': args['evalthresh'] = float(value)\n if key == '-l': args['minlength'] = int(value)\n if key == '-i': args['minident'] = int(value)\n if key == '-p': args['minpos'] = int(value)\n if key == '-n': args['numberofbesthits'] = int(value)\n if key == '-o': args['outformat'] = value\n if key == '-d': args['delimiter'] = value\n \n if not args.has_key('file'):\n stderr( \"blast out file missing.\" )\n show_help()\n \n if not args.has_key('outformat'):\n args['outformat'] = \"q,h,s,e,qs,qe,ss,se,hl,sl,i,p\"\n \n args['tooutput'] = []\n for key in args.get('outformat').split(','):\n if OUTPUTHASH.has_key(key):\n args['tooutput'].append( OUTPUTHASH.get(key) )\n else:\n stderr( 'OUTPUTHASH does not contain the key \"%s\"' %(key) )\n \n if not args.has_key('delimiter'):\n args['delimiter'] = \" \"\n \n return args\n\n# =============================================================================\ndef print_hit(args,hithash):\n \"\"\"\n returns counted (0,1)\n \"\"\"\n \n if args.has_key('evalthresh'):\n if args.get('evalthresh') < float(hithash.get('evalue')): return 0\n if args.has_key('minlength'):\n if args.get('minlength') > int(hithash.get('hitlength')): return 0\n if args.has_key('minident'):\n if args.get('minident') > int(hithash.get('identities')): return 0\n if args.has_key('minpos'):\n if args.get('minpos') > int(hithash.get('positives')): return 0\n \n L = [] \n for key in args.get('tooutput'):\n if hithash.has_key( key ):\n L.append(hithash.get( key ))\n else:\n stderr( 'hithash does not contain the key \"%s\"' %(key) )\n sys.exit(1)\n \n if args.get('delimiter') in [ 't', 'tab' ]:\n print string.join(L, '\\t')\n elif args.get('delimiter') in [ ';' ',' '|' ]:\n print string.join(L, args.get('delimiter'))\n else:\n print string.join(L, ' ')\n \n return 1\n\n\n# =============================================================================\nclass BlastOutput:\n def __init__(self, file):\n self.filehandle = open(file, 'r')\n\n def next_query(self):\n print \"# next_query\"\n self.querylines = []\n while 1:\n line = self.filehandle.readline()\n # find start\n if len(self.querylines) == 0 and not line.startswith(\"Query=\"): continue\n # break at end\n if line.startswith(\"BLAST\") or line.startswith(\" Database\"): break\n # else append\n self.querylines.append(line.rstrip())\n\n def next_hit(self):\n print \"# next_hit\"\n self.hitlines = []\n index = 0\n while 1:\n if index == len(self.querylines): break\n line = self.querylines[index]\n index += 1\n # find start\n if len(self.hitlines) == 0 and not line.startswith(\">\"): continue\n # break at end\n if len(self.hitlines) > 0 and line.startswith(\">\"): break\n # else append\n self.hitlines.append(line.rstrip())\n print line.rstrip()\n\n def next_hsp(self):\n print \"# next_hsp\"\n self.hsplines = []\n index = 0\n while 1:\n if index == len(self.hitlines): break\n line = self.hitlines[index]\n index += 1\n # find start\n if len(self.hsplines) == 0 and not line.startswith(\" Score =\"): continue\n # break at end\n if len(self.hsplines) > 0 and line.startswith(\" Score =\"): break\n # else append\n self.hsplines.append(line.rstrip())\n\n\n def parse_querylines(self):\n queryid = self.querylines[0].split()[1]\n return queryid\n\n def parse_hitlines(self):\n elements = string.join(self.hitlines, \" \")[1:].split()\n subject_id = elements[0]\n subject_length = elements[-1]\n subject_description = elements[1:-3]\n return subject_id, subject_description, subject_length\n\n def parse_hsplines(self):\n topdefs = string.join(self.hsplines[0:3], \" \")\n hsp_frame = None\n hsp_strand = None\n hsp_score = re.search('Score =\\S+(\\d+)', topdefs).group(1)\n hsp_evalue = re.search('Expect[\\(\\)0-9]* =\\S+([e\\.0-9-]+)', topdefs).group(1)\n hsp_identity = re.search('Identities =\\S+\\d+/\\d+\\S+\\((\\d+)%\\)', topdefs).group(1)\n hsp_positive = re.search('Positives =\\S+\\d+/\\d+\\S+\\((\\d+)%\\)', topdefs).group(1)\n if re.search('Frame =', topdefs): hsp_frame = re.search('Frame =\\S+([-+1-3]+)', topdefs).group(1)\n return hsp_score, hsp_evalue, hsp_identity, hsp_positive, hsp_frame, hsp_strand\n\n\n def parse(self):\n while 1:\n self.next_query()\n if not self.querylines[0].startswith(\"Query=\"): break\n while 1:\n self.next_hit()\n if not self.hitlines[0].startswith(\">\"): break\n while 1:\n self.next_hsp()\n if not self.hsplines[0].startswith(\" Score =\"): break\n queryid = self.parse_querylines()\n subject_id, subject_description, subject_length = self.parse_hitlines()\n hsp_score, hsp_evalue, hsp_identity, hsp_positive, hsp_frame, hsp_strand = self.parse_hsplines()\n print string.join([queryid, subject_id, hsp_score, hsp_evalue, hsp_frame], \"\\t\")\n \n\n self.filehandle.close()\n\n\n# =============================================================================\ndef parse_blast_out( args ):\n #print \"# blast.out file:\", args.get('file')\n #print \"# numberofbesthits:\", args.get('numberofbesthits')\n #print \"# max.evalue:\", args.get('evalthresh')\n #print \"# min.length:\", args.get('minlength')\n #print \"# fields: query, hitid, score, evalue, query_startpos, query_endpos, sbjct_startpos, sbjct_endpos, hitlength, subjct_length, identities, positives, frame_or_strand\"\n \n parser = BlastOutput( args.get('file') )\n parser.parse()\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nparse_blast_out( args )\n" }, { "alpha_fraction": 0.4984115660190582, "alphanum_fraction": 0.5008824467658997, "avg_line_length": 33.96296310424805, "blob_id": "6594e3e538f604a266822108a6d1f06ad30546e6", "content_id": "e260130fcf0438db5bc67672f79ca74e15fd778a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2833, "license_type": "permissive", "max_line_length": 88, "num_lines": 81, "path": "/python/geneontology/goid2name-from-obo-xml.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom collections import defaultdict\nfrom xml.dom import minidom\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f go term obo-xml file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['obo'] = value\n \n if not args.has_key('obo'):\n stderr( \"obo file argument missing.\" )\n show_help()\n elif not file_exists( args.get('obo') ):\n stderr( \"obo file does not exist.\" )\n show_help()\n \n return args\n\n# =============================================================================\nclass GOTerm():\n\n def __init__(self, xml):\n self.id = xml.getElementsByTagName(\"id\")[0].firstChild.data\n self.name = xml.getElementsByTagName(\"name\")[0].firstChild.data\n self.namespace = xml.getElementsByTagName(\"namespace\")[0].firstChild.data\n self.alt_ids = [node.firstChild.data for node in xml.getElementsByTagName(\"alt_id\")]\n\n# =============================================================================\ndef read_obo( file ):\n hash = {}\n xmldoc = minidom.parse(file)\n for term in xmldoc.getElementsByTagName('term'):\n goterm = GOTerm(term)\n hash[goterm.id] = goterm\n for alt_id in goterm.alt_ids: \n if not hash.has_key(alt_id): hash[alt_id] = goterm\n print >> sys.stderr, \"goterms read from obo: %s\" % len(hash)\n return hash\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n gohash = read_obo(args['obo'])\n for goid, goterm in gohash.iteritems():\n print goid + \"\\t\" + goterm.name\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.475159227848053, "alphanum_fraction": 0.48025476932525635, "avg_line_length": 31.24657440185547, "blob_id": "fb3799d00f36e960a766b10f2285cb030aae26f1", "content_id": "14af146b7b0663c033c02611f4176d641884f1ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2355, "license_type": "permissive", "max_line_length": 83, "num_lines": 73, "path": "/python/fasta/gc-content-from-fasta.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom goterm import GOTerm\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f DNA fasta file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n \n if not args.has_key('file'):\n stderr( \"fasta file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"fasta file does not exist.\" )\n show_help()\n \n return args\n\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n counts = {'A':0, 'T':0, 'G':0, 'C':0}\n fo = open(args['file'])\n for line in fo:\n if line.startswith(\">\"): continue\n line = line.rstrip().upper()\n for char in ['A', 'T', 'G', 'C']:\n counts[char] += line.count(char)\n\n total = sum(counts.values())\n gc = 1.0 * (counts['G'] + counts['C']) / total\n base = args['file']\n if base.count(\".\") > 0: base = base[:base.index(\".\")]\n if base.count(\"_\") > 0: base = base[:base.index(\"_\")]\n\n print base + \"\\t\" + str(gc)\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5297297239303589, "alphanum_fraction": 0.5387387275695801, "avg_line_length": 19.55555534362793, "blob_id": "a40e605b47ff102fa0217b1c9b50fe1b79df88d0", "content_id": "420140fff20c827dcb01aeac93a6ef0a3ffdfb01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "permissive", "max_line_length": 79, "num_lines": 27, "path": "/python/orthomcl/cluster2arath.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, string\nfrom low import *\nfrom orthomcl import OrthoMCLCluster\n\n\n# =============================================================================\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" noparalogs.orthomcl.out\" \n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 2: usage()\n inFile = sys.argv[1]\n return inFile\n\n\ndef main():\n inFile = plausi()\n fo = open(inFile)\n for line in fo:\n o = OrthoMCLCluster(line.rstrip())\n print o.get_name() + \"\\t\" + o.get_species_hash()['Arath'][0]\n\n\nmain()\n" }, { "alpha_fraction": 0.5193469524383545, "alphanum_fraction": 0.5454694032669067, "avg_line_length": 31.062828063964844, "blob_id": "c5338565693257003b108be2cac5cbb6e95f369e", "content_id": "dcc500c2eb9c6e0c4053dbaba356863be968ce59", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6125, "license_type": "permissive", "max_line_length": 113, "num_lines": 191, "path": "/python/misa/qc-orthologous-regions.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport hashlib\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSR\nimport newick\nfrom collections import defaultdict\nimport pickle\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> \" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f orthologous region map\" )\n stdout( \" -g all.gff\" )\n stdout( \" -m gene2transcript2protein.map\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:g:m:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-g': args['gff'] = value\n if key == '-m': args['map'] = value\n \n if not args.has_key('file'):\n stderr( \"orth.map file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"orth.map file does not exist.\" )\n show_help()\n if not args.has_key('map'):\n stderr( \"gene2transcript2protein map file argument missing.\" )\n show_help()\n elif not file_exists( args.get('map') ):\n stderr( \"gene2transcript2protein map file does not exist.\" )\n show_help()\n\n if not args.has_key('gff'):\n stderr( \"gff file argument missing.\" )\n show_help()\n elif not file_exists( args.get('gff') ):\n stderr( \"gff file does not exist.\" )\n show_help()\n\n\n return args\n\n\n# =============================================================================\ndef coordinates_to_gene(file):\n hash = {}\n fo = open(file)\n for line in fo:\n cols = line.rstrip().split(\"\\t\")\n if not cols[3] == \"gene\": continue\n key = string.join([cols[0], cols[1], cols[4]], \"|\")\n value = [re.search(\"ID=([^;]+);\", cols[9]).group(1), cols[7]]\n hash[key] = value\n fo.close()\n return hash\n\n\ndef gene_to_transcript(file):\n hash = {}\n fo = open(file)\n for line in fo:\n gid, tid = line.rstrip().split(\"\\t\")[0:2]\n hash[gid] = tid\n fo.close()\n return hash\n\n\n\ndef get_gene_features(file):\n exons = defaultdict(list)\n introns = defaultdict(list)\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n cols = line.split(\"\\t\")\n if cols[3] != \"exon\" and cols[3] != \"intron\": continue\n tid = re.search(\"Parent=([^;]+)\", cols[9]).group(1)\n start, stop = cols[4], cols[5]\n strand = cols[7]\n if cols[3] == \"exon\": exons[tid].append([start, stop, strand])\n else: introns[tid].append([start, stop, strand])\n# hash[cols[0] + \"|\" + cols[1] + \"|\" + cols[3]] = \n return exons, introns\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n gene2transcript = gene_to_transcript(args['map'])\n print >> sys.stderr, \"gene2transcript loaded.\"\n coord2gene = coordinates_to_gene(args['gff'])\n print >> sys.stderr, \"coord2gene loaded.\"\n exons, introns = get_gene_features(args['gff'])\n print >> sys.stderr, \"gene features loaded.\"\n\n fo = open(args['file'])\n for line in fo:\n if line.startswith(\"#\"): continue\n if len(line.split(\"\\t\")) < 9: continue\n line = line.rstrip()\n cols = line.split(\"\\t\")\n species1, species2 = cols[0:2]\n type = cols[2]\n chr1, chr2 = cols[3], cols[6]\n start1, start2 = cols[4], cols[7]\n stop1, stop2 = cols[5], cols[8]\n\n # remove regions with length=0 or where one region is significantly longer (150%)\n l1 = int(cols[5]) - int(cols[4])\n l2 = int(cols[8]) - int(cols[7])\n if l1 == 0 or l2 == 0: continue\n if float(max([l1,l2])) / float(min([l1,l2])) > 1.5 or (max([l1,l2]) - min([l1,l2])) > 5000: continue\n\n if type == \"gene\":\n key = string.join([species1, chr1, start1], \"|\")\n gid, strand1 = coord2gene[key]\n if not gene2transcript.has_key(gid): continue\n tid1 = gene2transcript[gid]\n exons1 = exons[tid1]\n introns1 = introns[tid1]\n\n key = string.join([species2, chr2, start2], \"|\")\n gid, strand2 = coord2gene[key]\n if not gene2transcript.has_key(gid): continue\n tid2 = gene2transcript[gid]\n exons2 = exons[tid2]\n introns2 = introns[tid2]\n\n if len(exons1) != len(exons2): continue\n if len(introns1) != len(introns2): continue\n\n cols.insert(6, strand1)\n cols.insert(10, strand2)\n\n # replace a gene by all its exons and introns\n for i in range(len(exons1)):\n ex1, ex2 = exons1[i], exons2[i]\n cols[2] = \"exon\"\n cols[4:7] = ex1\n cols[8:11] = ex2\n print string.join(cols, \"\\t\")\n for i in range(len(introns1)):\n in1, in2 = introns1[i], introns2[i]\n cols[2] = \"intron\"\n cols[4:7] = in1\n cols[8:11] = in2\n print string.join(cols, \"\\t\")\n continue\n \n key1 = string.join([species1, chr1, str(int(stop1) +1)], \"|\")\n key2 = string.join([species2, chr2, str(int(stop2) +1)], \"|\")\n gid, strand1 = coord2gene[key1]\n gid, strand2 = coord2gene[key2]\n cols.insert(6, strand1)\n cols.insert(10, strand2)\n print string.join(cols, \"\\t\")\n\n fo.close()\n# print \"exons equal:\", ee, \"exons unequal:\", ue, \"introns equal:\", ei, \"introns unqual:\", ui, \"both equal:\", be\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.5258477926254272, "avg_line_length": 33.78417205810547, "blob_id": "aa7f233a9ccf866ec74f02baa3c820a95a51ca36", "content_id": "151890b3f35cfd9078f037801ec8ad44afa63424", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4836, "license_type": "permissive", "max_line_length": 123, "num_lines": 139, "path": "/python/pfam/pfam-domain-counts.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom collections import defaultdict\nimport glob\nimport pfam\nimport stats\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -h <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h hmmout dir (*.hmmout)\" )\n stdout( \" -p protein ids dir (*.gids; optional, if given only domains in the specified proteins are considered)\" )\n stdout( \" -s list of species (csv)\" )\n stdout( \" -i pfam dids to ignore\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"h:p:s:i:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-h': args['hmmoutdir'] = value\n if key == '-i': args['ignorefile'] = value\n if key == '-p': args['pidsdir'] = value\n if key == '-s': args['species'] = value.split(',')\n \n for key in ['hmmoutdir', 'species']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n return args\n\n \n# =============================================================================\ndef get_pids(ifile):\n ohash = {}\n fo = open(ifile)\n for line in fo:\n gid = line.rstrip()\n ohash[gid] = 1\n fo.close()\n return ohash\n\n\n\n# =============================================================================\ndef did2count(hmmout, pids=False):\n pid2pfamdomains = pfam.read_hmmout(hmmout)\n if not pids: keys = pid2pfamdomains.keys()\n else: keys = pids\n did2count = {}\n for pid in keys:\n if not pid2pfamdomains.has_key(pid): continue\n for did in list(set([d.get_attr(\"hmm_name\") for d in pid2pfamdomains[pid]])):\n if not did2count.has_key(did): did2count[did] = 0\n did2count[did] += 1\n return did2count\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n species2counts = {}\n for species in args['species']:\n hmmoutfiles = glob.glob(args['hmmoutdir'] + \"/\" + species + \"*.hmmout\")\n if not len(hmmoutfiles) == 1: sys.exit(\"ERROR: no single hmmout file found for species %s\" % species)\n if args.has_key('pidsfile'): \n pidsfiles = glob.glob(args['pidsdir'] + \"/\" + species + \"*.gids\")\n if not len(pidsfiles) == 1: sys.exit(\"ERROR: no single pids file found for species %s\" % species)\n pids = get_pids(pidsfiles[0]).keys()\n else: pids = False\n species2counts[species] = did2count(hmmoutfiles[0], pids)\n \n ignore = {}\n if args.has_key('ignorefile'): ignore = get_pids(args['ignorefile'])\n hash = {}\n for s, counts in species2counts.iteritems():\n for did in counts.keys(): \n if not ignore.has_key(did): hash[did] = 1\n \n fwt = open(\"pfam-table\", \"w\")\n fwt.write(string.join([\"DID\"] + args['species'], \"\\t\") + \"\\n\")\n for did in hash.keys():\n out = did\n for species in args['species']:\n count = 0\n if species2counts[species].has_key(did): count = species2counts[species][did]\n out += \"\\t\" + str(count)\n fwt.write(out + \"\\n\")\n fwt.close()\n \n fwm = open(\"pfam-matrix.csv\", \"w\")\n fwm.write(\",\" + string.join(args['species'], \",\") + \"\\n\")\n for i in range(len(args['species'])):\n s1 = args['species'][i]\n fwm.write(s1)\n for j in range(len(args['species'])):\n if i == j: \n fwm.write(\",1\")\n continue\n s2 = args['species'][j]\n v1, v2 = [], []\n for did in hash.keys():\n v1.append(species2counts[s1].get(did, 0))\n v2.append(species2counts[s2].get(did, 0))\n cor, p = stats.correlate(v1, v2)\n fwm.write(\",\" + str(cor))\n #print string.join([s1, s2, str(cor), str(p)], \"\\t\")#\n fwm.write(\"\\n\")\n fwm.close()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.598324716091156, "alphanum_fraction": 0.6057989597320557, "avg_line_length": 36.12918472290039, "blob_id": "857a4f00d2843df8e0933969cb6c84b8053dbd98", "content_id": "87cec999748f54fda820a9768c8dbaa076ce5181", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7760, "license_type": "permissive", "max_line_length": 205, "num_lines": 209, "path": "/python/openreadingframe/orf_prediction_part1.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys, getopt, string, math\nfrom Bio.Seq import Seq\nfrom Bio.Blast import NCBIXML\nfrom Bio.Alphabet import IUPAC\n\nOUTFILEPART2 = 'tmp.orf.part2.fasta'\n\n#==============================================================================\ndef show_help():\n print \"\"\"%s uses parsed BLASTX output to determine ORF, cds, and \n putative protein sequence.\n \n Options:\n -f:\\tFASTA file with the input nucleotide sequences output in XML format\n -b:\\tparsed BLASTX output with the best hit for each input sequence\n \"\"\" % sys.argv[0]\n\n sys.exit(1)\n\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n sys.stderr.write( \"no arguments provided.\\n\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:b::\" )\n except getopt.GetoptError:\n sys.stderr.write( \"invalid arguments provided.\\n\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['fastafile'] = value\n if key == '-b': args['blastfile'] = value\n \n if not args.has_key('blastfile'):\n sys.stderr.write( \"parsed blastx best hit file argument missing.\\n\" )\n show_help()\n elif not os.path.exists( args.get('blastfile') ) or not os.path.isfile( args.get('blastfile') ):\n sys.stderr.write( \"parsed blastx best hit file does not exist.\\n\" )\n show_help()\n\n if not args.has_key('fastafile'):\n sys.stderr.write( \"input fasta file argument missing.\\n\" )\n show_help()\n elif not os.path.exists( args.get('fastafile') ) or not os.path.isfile( args.get('fastafile') ):\n sys.stderr.write( \"input fasta file does not exist.\\n\" )\n show_help()\n\n return args\n\n\n#==============================================================================\ndef get_blast_measures(blastfile):\n fo = open(blastfile)\n header = fo.readline()[1:].strip()\n # blaaa blast -m8 format, where to get frame from?\n fields = ['query', 'hit', 'ident', 'aln_length', 'mismatches', 'gaps', 'query_startpos', 'query_endpos', 'subject_startpos', 'subject_endpos', 'evalue', 'score']\n hash = {}\n\n for line in fo:\n if line.startswith(\"#\"): continue\n line = line.rstrip()\n elements = line.split(\"\\t\")\n if hash.has_key(elements[0]): continue\n hash[elements[0]] = {} \n for i in range(len(fields)):\n hash[elements[0]][fields[i]] = elements[i]\n # define frame\n if int(hash[elements[0]]['query_startpos']) > int(hash[elements[0]]['query_endpos']):\n startpos, endpos = hash[elements[0]]['query_endpos'], hash[elements[0]]['query_startpos']\n hash[elements[0]]['query_startpos'], hash[elements[0]]['query_endpos'] = startpos, endpos\n startpos = int(startpos)\n frame = -1\n else:\n startpos = int(hash[elements[0]]['query_startpos'])\n frame = 1\n hash[elements[0]]['frame'] = str(frame * (startpos % 3))\n fo.close()\n\n return hash\n\n#==============================================================================\ndef get_prototype_cds(prop, seq):\n if prop['frame'].startswith('-'):\n prototypeseq = Seq(seq, IUPAC.unambiguous_dna).reverse_complement().tostring()\n# print \"#++\", prop['query_endpos'], prop['query_startpos'], len(seq)\n orfstart = len(seq) - int(prop['query_endpos'])\n orfstop = len(seq) - int(prop['query_startpos'])\n else:\n prototypeseq = seq\n orfstart, orfstop = int(prop['query_startpos']) -1, int(prop['query_endpos'])\n\n return prototypeseq, orfstart, orfstop\n \n#==============================================================================\ndef process_sequence(id, seq, blasthash):\n if not blasthash.has_key(id):\n fw = open(OUTFILEPART2, 'a')\n fw.write(\">%s\\n%s\\n\" %(id, seq))\n fw.close()\n return\n\n prop = blasthash[id]\n # get prototype cds\n # it is based on the blast hit but as long as possible\n # we walk backwards at the start position to then get the\n # sequence to analyze (sense if frame > 0, antisense if frame < 0)\n # and the adjusted start and stop positions of the minimum (homolog) orf\n prototypeseq, orfstart, orfstop = get_prototype_cds(prop, seq)\n #print \"###\", id, orfstart, orfstop\n tmpstart = orfstart\n while tmpstart > 3: tmpstart -= 3\n prototypeprotein = Seq(prototypeseq[tmpstart:], IUPAC.unambiguous_dna).translate().tostring()\n\n # determine start of ORF\n protstart = (orfstart - tmpstart) / 3\n\n #print >> sys.stderr, \"####\", 'id:', id, 'fr:', prop['frame'], 'qs:', prop['query_startpos'], 'qe:', prop['query_endpos'], 'O:', orfstart, 'P:', protstart, 'l:', len(prototypeprotein)# , prototypeprotein\n\n currentaminoacid = prototypeprotein[protstart]\n while protstart > 0 and currentaminoacid != 'M' and currentaminoacid != '*':\n protstart -= 1\n orfstart -= 3\n currentaminoacid = prototypeprotein[protstart]\n #print \"#..\", orfstart, currentaminoacid\n if currentaminoacid == '*': \n protstart += 1\n orfstart += 3\n #print \"#..\", orfstart, prototypeprotein[protstart]\n\n # deterine end of ORF\n protstop = (orfstop - tmpstart) / 3\n if protstop >= len(prototypeprotein):\n protstop = len(prototypeprotein)-1\n #print \"####\", id, orfstart, orfstop, len(prototypeseq), protstop, len(prototypeprotein)\n currentaminoacid = prototypeprotein[protstop]\n while protstop < len(prototypeprotein) -1 and currentaminoacid != '*':\n protstop += 1\n orfstop += 3\n currentaminoacid = prototypeprotein[protstop]\n\n #cds = prototypeseq[orfstart:orfstop+1]\n if currentaminoacid == '*': protstop -= 1\n #protein = Seq(cds, IUPAC.ambiguous_dna).translate(to_stop=True).tostring()\n protein = prototypeprotein[protstart:protstop+1]\n cds = prototypeseq[orfstart:orfstart+(len(protein)*3)]\n \n #cds = prototypeseq[orfstart:]\n #seqtotranslate = prototypeseq[orfstart:]\n #protein = Seq(seqtotranslate, IUPAC.ambiguous_dna).translate(to_stop=False).tostring()\n #orfstop = orfstart + len(protein)*3\n #print \"####\", orfstart, orfstop, len(cds), len(protein)*3, protein\n #if protein.count('*') > 0:\n # protein = protein[:protein.index('*')+1]\n # orfstop = orfstart + len(protein)*3\n # protein = protein[:-1]\n\n #print \"####\", orfstart, orfstop, len(seqtotranslate), len(protein)*3, protein\n\n #currentaminoacid = prototypeprotein[protstart]\n #cds = prototypeseq[orfstart:orfstop]\n # backtranslate positions for negative frames\n if prop['frame'].startswith('-'):\n startpos = len(prototypeseq) - orfstart\n endpos = len(prototypeseq) - orfstop\n else:\n startpos, endpos = orfstart, orfstop\n\n print string.join([id, prop['frame'], str(startpos), str(endpos), cds, protein, \"1\"], \"\\t\")\n if len(protein) < 50:\n sys.stderr.write(\"protein shorter than 50aa!! check record %s (len=%s, %s)\\n\" %(id, len(protein), prop['frame'] ))\n\n if len(cds) < int(prop['aln_length']):\n print >> sys.stderr, \"WARNING: cds shorter than NCBI blast hit. Check entry %s\" % id\n# print \"#####\", len(cds), len(protein)*3, \"\\n\"\n\n\n#==============================================================================\ndef main(args):\n blasthash = get_blast_measures( args.get('blastfile') )\n #print blasthash\n header = ['record_name', 'frame', 'startpos', 'endpos', 'cds', 'protein', 'evidence']\n print '#', string.join(header, \"\\t\")\n\n id, seq = \"\", \"\"\n fo = open( args.get('fastafile') )\n for line in fo:\n line = line.strip()\n if line.startswith('>'):\n if id != \"\": \n process_sequence(id, seq, blasthash)\n \n id = line.split()[0][1:]\n seq = \"\"\n else: seq += line\n\n fo.close()\n if id != \"\": \n process_sequence(id, seq, blasthash)\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.5712460279464722, "alphanum_fraction": 0.5795527100563049, "avg_line_length": 27.981481552124023, "blob_id": "113c0e931f099111fa01139ebec6ba104d017019", "content_id": "1df26b3a9ee6dea3ec72e0f2aa8a0f7f4ad83269", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1565, "license_type": "permissive", "max_line_length": 154, "num_lines": 54, "path": "/python/base/gff3.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "import sys, string\n\ndef get_gff_hash(gffile):\n hash = {}\n fo = open(gffile)\n for line in fo:\n gf = GeneFeature(line)\n if not hash.has_key(gf.seqid): hash[gf.seqid] = []\n hash[gf.seqid].append(gf)\n fo.close()\n return hash\n \n\nclass GeneFeature():\n def __init__(self, line):\n columns = line.rstrip().split(\"\\t\")\n if not len(columns) == 9:\n print >> sys.stderr, \"GFF3 with incorrect number of columns. Expected: 9 | Observed: %s\" % len(columns)\n print >> sys.stderr, \"\\\"%s\\\"\" % line\n sys.exit(1)\n self.seqid = columns.pop(0)\n self.source = columns.pop(0)\n self.ftype = columns.pop(0)\n self.start = int(columns.pop(0))\n self.stop = int(columns.pop(0))\n self.score = columns.pop(0)\n self.strand = columns.pop(0)\n self.phase = columns.pop(0)\n self.attributes = columns.pop(0)\n\n def get_attributes(self):\n hash = {}\n for e in self.attributes.split(\";\"):\n if e == '': continue\n k, v = e.split(\"=\")\n hash[k] = v\n return hash\n \n def set_attribute(self, key, value):\n hash = {}\n for e in self.attributes.split(\";\"):\n if e == '': continue\n k, v = e.split(\"=\")\n hash[k] = v\n if hash.has_key(key):\n hash[key] = value\n self.attributes = \"\"\n for k, v in hash.iteritems(): self.attributes += \"%s=%s;\" %(k, v)\n else:\n self.attributes += \"%s=%s;\" %(key, value)\n \n\n def to_string(self):\n return string.join([self.seqid, self.source, self.ftype, str(self.start), str(self.stop), self.score, self.strand, self.phase, self.attributes], \"\\t\")\n" }, { "alpha_fraction": 0.4901021718978882, "alphanum_fraction": 0.4936143159866333, "avg_line_length": 31.957895278930664, "blob_id": "9e084f8149bfe2c918c0dcf9ad24a0e03355e78e", "content_id": "6467c0c1b86d4fc90e76d85ea4368b12d0dd467b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3132, "license_type": "permissive", "max_line_length": 99, "num_lines": 95, "path": "/python/fasta/rename-fasta-sequences.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file to import\" )\n stdout( \" -m tab delimited file that maps a regex to the replacement name, one per line\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:m:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-m': args['mapping'] = value\n \n if not args.has_key('file'):\n stderr( \"fasta file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"fasta file does not exist.\" )\n show_help()\n \n if not args.has_key('mapping'):\n stderr( \"mapping file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"mapping file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\ndef get_mapping(mfile):\n hash = {}\n fo = open( mfile, \"r\" )\n for line in fo:\n line = line.rstrip()\n if len(line) == 0: break\n if len(line.split(\"\\t\")) != 2: continue\n regex, replacement = line.split(\"\\t\")\n hash[re.compile(regex)] = replacement\n fo.close()\n return hash\n\n# =============================================================================\ndef apply_replacement(idline, maphash):\n id = idline[1:].split()[0]\n for regex, replacement in maphash.iteritems():\n if re.search(regex, idline[1:]):\n idline = '>' + re.sub(regex, replacement, idline[1:], count=1)\n break\n return idline\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n maphash = get_mapping( args.get('mapping') )\n \n fo = open( args.get('file') )\n for line in fo:\n line = line.rstrip()\n if line.startswith(\">\"): line = apply_replacement(line, maphash)\n print line\n fo.close()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5047732591629028, "alphanum_fraction": 0.5122315287590027, "avg_line_length": 30.61320686340332, "blob_id": "160d9420f012099865fb588763409b3309f90a31", "content_id": "a07a2fd3854b3ad7afcbc53a62a10ee89f70d4fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3352, "license_type": "permissive", "max_line_length": 83, "num_lines": 106, "path": "/python/generic/search-replace.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom tempfile import mkstemp\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -i\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file to import\" )\n stdout( \" -m mapping: searchstring tab replacestring, one per line\" )\n stdout( \" -i do in file, otherwise replace output to STDOUT\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:im:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n args['withinfile'] = 0\n for key, value in keys:\n if key == '-f': args['infile'] = value\n if key == '-m': args['mapfile'] = value\n if key == '-i': args['withinfile'] = 1\n \n if not args.has_key('infile'):\n stderr( \"input file argument missing.\" )\n show_help()\n elif not file_exists( args.get('infile') ):\n stderr( \"input file does not exist.\" )\n show_help()\n\n if not args.has_key('mapfile'):\n stderr( \"map file argument missing.\" )\n show_help()\n elif not file_exists( args.get('mapfile') ):\n stderr( \"map file does not exist.\" )\n show_help()\n \n return args\n\n\ndef get_map(mapfile):\n hash = {}\n fo = open(mapfile)\n for line in fo:\n line = line.strip()\n search, replace = line.split(\"\\t\")[0:2]\n hash[search] = replace\n fo.close()\n return hash\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n mapHash = get_map(args.get('mapfile'))\n if args.get('withinfile'):\n tmpfile = mkstemp(\".tmp\", \"sr\")[1]\n outstream = open( tmpfile, \"w\" )\n else:\n outstream = sys.stdout\n\n sout, serr = catch_bash_cmd_output( \"wc -l %s\" % args.get('infile') )\n total = int( sout.split()[0] )\n count = 0\n\n fo = open( args.get('infile') )\n for line in fo:\n line = line.rstrip()\n for s,r in mapHash.iteritems(): \n if not s in line: continue\n line = line.replace(s,r)\n print >> outstream, line\n count += 1\n progress = int(50.0 * count / total) * \"#\"\n progress += (50 - len(progress)) * \" \"\n info(\" 0% \" + progress + \" 100% \")\n fo.close()\n if args.get('withinfile'):\n outstream.close()\n os.system(\"mv %s %s\" %( tmpfile, args.get('infile') ))\n info(\" 0% \" + progress + \" 100% \\n\")\n\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5159657597541809, "alphanum_fraction": 0.5232346653938293, "avg_line_length": 33.8597297668457, "blob_id": "e7658f95d65edf4802c8b54b8b331c36da04e5a6", "content_id": "3e04c2b6f0145136ca8639b68873d9d8cf2f6f2f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7704, "license_type": "permissive", "max_line_length": 143, "num_lines": 221, "path": "/python/swapsc/evolve4swapsc.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\nimport anydbm\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f nt alignment file\" )\n stdout( \" -m paml M0 out file\" )\n stdout( \" -p path to PAML evolver\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:m:p:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['aln'] = value\n if key == '-m':\targs['m0'] = value\n if key == '-p':\targs['evolver'] = value\n \n if not args.has_key('aln'):\n stderr( \"aln file missing.\" )\n show_help()\n if not file_exists( args.get('aln') ):\n stderr( \"aln file does not exist.\" )\n show_help()\n \n if not args.has_key('m0'):\n stderr( \"M0 file missing.\" )\n show_help()\n if not file_exists( args.get('m0') ):\n stderr( \"M0 file does not exist.\" )\n show_help()\n\n if not args.has_key('evolver'):\n stderr(\"path to PAML evolver not specified\")\n show_help()\n\n if not file_exists( args.get('evolver') ):\n stderr( \"evolver binary not found.\" )\n show_help()\n\n return args\n\n# =============================================================================\ndef generate_evolver_infile( file, settings ):\n setnames = ['mode', 'random seed number', '# sequences', '# sites', '# replicates', 'tree length', 'newick tree', 'omega', 'kappa', 'codons']\n for name in setnames:\n if not settings.has_key(name):\n stderr( 'key %s missing in settings hash' %(name) )\n fw = open( file, 'w' )\n line = [settings.get('mode'), \"* mode\", \"\\n\"]\n fw.write(string.join(line, ' '))\n line = [settings.get('random seed number'), \"* random seed number\", \"\\n\"]\n fw.write(string.join(line, ' '))\n line = [settings.get('# sequences'), settings.get('# sites'), settings.get('# replicates'), \"* #seq #sites #replicates\", \"\\n\"]\n fw.write(string.join(line, ' '))\n fw.write(\"\\n\")\n line = [settings.get('tree length'), \"* tree length\", \"\\n\"]\n fw.write(string.join(line, ' '))\n line = [settings.get('newick tree'), \"\\n\"]\n fw.write(string.join(line, ' '))\n fw.write(\"\\n\")\n line = [settings.get('omega'), \"* omega\", \"\\n\"]\n fw.write(string.join(line, ' '))\n line = [settings.get('kappa'), \"* kappa\", \"\\n\"]\n fw.write(string.join(line, ' '))\n fw.write(\"\\n\")\n fw.write(settings.get('codons'))\n fw.write(\"\\n\")\n fw.write('// end of file.')\n fw.flush()\n fw.close()\n\n# =============================================================================\ndef generate_tree_file( file, settings ):\n tree = settings.get('tree for file')\n# pos = tree.index('),(')+1\n# if tree.index('1') > pos:\n# part1 = tree[1:pos]\n# part2 = tree[pos+1:-1]\n# tree = '(' + part2 + ',' + part1 + ')' + \"\\n\"\n\n# while re.search('\\),\\d+\\)', tree):\n# print \"rearranging tree: %s\" % tree\n# match = re.search('\\),(\\d+)\\)', tree)\n# xpos = match.start(1) -1\n# node = match.group(1)\n# print \"pos = %s, node = %s\" %(xpos,node)\n# pos = xpos\n# count = 0\n# while 1:\n# pos -= 1\n# if tree[pos] == ')': \n# count += 1\n# continue\n# if tree[pos] == '(':\n# count -= 1\n# if count == 0:\n# tree = tree[:pos] + node + ',' + tree[pos:xpos-1] + ')' + tree[xpos+2:] + \"\\n\"\n# break\n \n tree = tree.rstrip()\n #beginbrackets = len(re.search(\"^(\\(+)\", tree).group(1))\n #endbrackets = len(re.search(\"(\\)+)$\", tree).group(1))\n #if beginbrackets > endbrackets:\n # xpos = tree.rindex(\"),(\") +1\n # tree = '(' + tree[xpos+1:-1] + ',' + tree[1:xpos] + ')'\n\n fw = open( file, \"w\" )\n fw.write( tree )\n fw.close()\n #print \"final tree: %s\" % tree\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n\n #sys.stderr.write(args.get('aln') + \"\\t\")\n #sys.stderr.flush()\n # create evolver control file based on the M0 out file\n TARGET = args.get('aln')+'.evolver.out'\n if os.path.exists(TARGET) and os.path.isfile(TARGET) and os.path.getsize(TARGET) > 0: return\n\n settings = {'mode':'0', '# replicates':'1000'}\n fo = open( args.get('m0') )\n line = \"\"\n while not line.startswith('Time used:'):\n line = fo.readline()\n if line.startswith('seed used ='):\n settings['random seed number'] = re.match('seed used =\\s*(\\d+)', line).group(1)\n #line = fo.readline()\n while not re.match(\"\\s+\\d+\\s+\\d+\\s*$\", line): \n line = fo.readline()\n numbers = line.split()\n settings['# sequences'], settings['# sites'] = numbers[0:2] \n settings['# sites'] = str( (int(settings['# sites'])/3) )\n continue\n if line.startswith('Codon frequencies under model, for use in evolver'):\n line = fo.readline()\n settings['codons'] = ''\n while re.search(\"\\S+\", line):\n settings['codons'] += line\n line = fo.readline()\n continue\n if line.startswith('tree length ='):\n settings['tree length'] = re.match('tree length =\\s+(\\S+)', line).group(1)\n line = fo.readline()\n line = fo.readline()\n settings['newick tree'] = re.match('(.*)$', line).group(1)\n continue\n if line.startswith('TREE # 1:'):\n settings['tree for file'] = re.search('(\\(.*\\));', line).group(1).replace(' ','')\n if line.startswith('kappa'):\n settings['kappa'] = re.match('kappa\\s+\\(ts/tv\\)\\s+=\\s+(\\S+)', line).group(1)\n continue\n if line.startswith('omega'):\n settings['omega'] = re.match('omega\\s+\\(dN/dS\\)\\s+=\\s+(\\S+)', line).group(1)\n break\n fo.close()\n\n tmpFolder = \".\" + args.get('aln')\n if not os.path.exists( tmpFolder ): os.mkdir( tmpFolder )\n os.chdir( tmpFolder )\n \n evolver_in = 'evolver.in'\n generate_evolver_infile(evolver_in, settings)\n treefile = args.get('aln') + '.swapsc.tree'\n generate_tree_file(treefile, settings)\n #sys.exit(1)\n\n #sys.stderr.write('> evolver.in'+ \"\\t\")\n #sys.stderr.flush()\n\n # run evolver, save output\n os.system( '%s 6 %s &> evolver.log' % (args.get('evolver'), evolver_in) )\n #sys.stderr.write('> evolver.out'+ \"\\t\")\n #sys.stderr.flush()\n evolver_out = \"mc.paml\"\n if file_exists('evolver.out'): os.unlink('evolver.out')\n if file_exists('ancestral.txt'): os.unlink('ancestral.txt')\n if file_exists('evolver.log'): os.unlink('evolver.log')\n if file_exists('evolver.in'): os.unlink('evolver.in')\n if file_exists(evolver_out):\n os.system('mv %s %s' %(evolver_out, \"../\"+TARGET) )\n #sys.stderr.write('> success'+ \"\\n\")\n #else:\n #sys.stderr.write('> ERROR'+ \"\\n\")\n os.chdir(\"..\")\n os.system(\" rm -rf \" + tmpFolder )\n\n\n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.573803186416626, "alphanum_fraction": 0.5917553305625916, "avg_line_length": 24.066667556762695, "blob_id": "0f3e4c29e116d9e271180c6a9f483357213c091e", "content_id": "7df552e4d712ebbab5bf97d9d5ca62746502add0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1504, "license_type": "permissive", "max_line_length": 91, "num_lines": 60, "path": "/python/paml/parse_codeml.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, re\n\nMODELS = [\"M0\", \"M7\", \"M8\", \"Free\", \"M1a\", \"M2a\", \"MT1\", \"MT2\", \"MT3\", \"MT4\", \"MT5\", \"MT6\"]\n\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" folder\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 2: usage()\n inFolder = sys.argv[1]\n return inFolder\n\n\ndef get_all_base_files(inFolder):\n fileHash = {}\n for file in os.listdir(inFolder):\n filename = os.path.split(file)[1]\n basename = filename\n while basename.count('.') > 0: basename = os.path.splitext(basename)[0]\n fileHash[basename] = 1\n return fileHash.keys()\n\n\ndef parse_all_from_basefile(file):\n filesToParse = []\n for m in MODELS: filesToParse.append(file + \".codeml.\" + m)\n for f in filesToParse:\n if not os.path.exists(f) or not os.path.isfile(f): \n print >> sys.stderr, \"bad stuff happening with file\", file, \"/\", f\n return\n\n modelHash = {}\n for f in filesToParse:\n fo = open(f)\n for line in fo:\n if line.startswith(\"lnL(\"):\n np = re.match(\"lnL.*\\s+np:\\s*(\\d+)\", line ).group(1)\n lnL = re.match(\"lnL\\(.*\\):\\s+([0-9.-]+)\", line ).group(1)\n break\n modelHash[ os.path.splitext(f)[1][1:] ] = [lnL, np]\n fo.close()\n\n sys.stdout.write(file)\n for m in MODELS:\n sys.stdout.write(\"\\t\" + m + \":\" + modelHash[m][0] + \",\" + modelHash[m][1])\n sys.stdout.write(\"\\n\")\n\n\ndef main():\n inFolder = plausi()\n basefiles = get_all_base_files(inFolder)\n for basefile in basefiles:\n parse_all_from_basefile(basefile)\n\n\nmain()\n" }, { "alpha_fraction": 0.4686054289340973, "alphanum_fraction": 0.47058823704719543, "avg_line_length": 31.52688217163086, "blob_id": "5e68065d8e92e1dea3d27a95e4d0bf500cb46901", "content_id": "4e254be1b6c09abf55ff0e6f475cd7eb174a3b8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3026, "license_type": "permissive", "max_line_length": 83, "num_lines": 93, "path": "/python/generic/add_to_xdom.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport getopt\t\t\t\t\t# comand line argument handling\nimport anydbm\t\t\t\t\t# index databases (file hash)\nfrom low import *\t\t\t# collection of generic self-defined functions\n\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> -o <path>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f xdom file\" )\n\tstdout( \" -i indexed ndb file\" )\n\tstdout( \" -n column to look up [0..n]\" )\n\tstdout( \" \" )\n\tsys.exit(1)\n\t\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n \tstderr( \"no arguments provided.\" )\n \tshow_help()\t\n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:i:n:\" )\n except getopt.GetoptError:\n \tstderr( \"invalid arguments provided.\" )\n \tshow_help()\n \n args = {}\n for key, value in keys:\n if key == '-f': args['xdom'] = value\n if key == '-i':\targs['dbm'] = value\n if key == '-n':\targs['column'] = int(value)\n \n if not args.has_key('xdom'):\n \tstderr( \"xdom file missing.\" )\n \tshow_help()\n if not file_exists( args.get('xdom') ):\n \tstderr( \"xdom file does not exist.\" )\n \tshow_help()\n \t\n if not args.has_key('dbm'):\n \tstderr( \"dbm file missing.\" )\n \tshow_help()\n if not file_exists( args.get('dbm') ):\n \tstderr( \"dbm file does not exist.\" )\n \tshow_help()\n \n if not args.has_key('column'):\n stderr( \"column index missing.\" )\n show_help()\n \n return args\n\n\t\n# =============================================================================\n# =============================================================================\ndef main( args ):\n DBM = anydbm.open( args.get('dbm'), 'r' )\n fo = open( args.get('xdom') )\n n = args.get('column')\n key, value = '', ''\n for line in fo:\n line = line.rstrip()\n if line.endswith('\\n'): line = line.replace('\\n','')\n if line.startswith('>'):\n print line\n \t\t#if key != '' and value != '':\n \t\t#\tsys.stdout.write( \">%s\\n%s\" %(key,value) )\n \t\t#\tkey, value = '', ''\n \t\t#key = line[1:].rstrip()\n else:\n value = line.rstrip()\n pid = value.split()[ n ]\n if not DBM.has_key( pid ):\n \tprint \"DBM does not contain the following key:\", pid\n else: value += \"\\t\" + DBM.get(pid)\n print value \n fo.close()\n #if key != '' and value != '':\n #\tsys.stdout.write( \">%s\\n%s\" %(key,value) )\n DBM.close()\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\nargs = handle_arguments( )\nmain( args )\n\n" }, { "alpha_fraction": 0.48249927163124084, "alphanum_fraction": 0.48597049713134766, "avg_line_length": 30.135135650634766, "blob_id": "f4e5ecc3143c3fc7f218487845eca76c875686bf", "content_id": "5e35e84f53d3e3a15defa7e297a64b17594360fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3457, "license_type": "permissive", "max_line_length": 83, "num_lines": 111, "path": "/python/misa/exonic-ssrs-to-genes.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nfrom misa import MisaSSR\nfrom collections import defaultdict\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -e <path> -g <path> \")\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -e all ssrs in exons\" )\n stdout( \" -g parsed gff for all drosophilas\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hg:e:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-g': args['gff'] = value\n if key == '-e': args['ssr'] = value\n \n if not args.has_key('gff'):\n stderr( \"parsed gff argument missing.\" )\n show_help()\n elif not file_exists( args.get('gff') ):\n stderr( \"parsed gff does not exist.\" )\n show_help()\n \n if not args.has_key('ssr'):\n stderr( \"ssr file argument missing.\" )\n show_help()\n elif not file_exists( args.get('ssr') ):\n stderr( \"ssr file does not exist.\" )\n show_help()\n \n return args\n\n# =============================================================================\nclass Gene():\n def __init__(self, line):\n cols = line.rstrip().split(\"\\t\")\n self.species = cols.pop(0)\n self.id = cols.pop(0)\n self.chr = cols.pop(0)\n self.start = cols.pop(0)\n self.stop = cols.pop(0)\n self.strand = cols.pop(0)\n self.loc = self.species + \"|\" + self.chr\n\n def pos_in_gene(self, pos):\n if int(pos) >= int(self.start) and int(pos) <= int(self.stop): return 1\n else: return 0\n\n \n# =============================================================================\ndef get_gene_features(file):\n hash = defaultdict(list)\n fo = open(file)\n for line in fo:\n g = Gene(line)\n hash[g.loc].append(g)\n fo.close()\n return hash\n\n# =============================================================================\ndef get_ssrs(file):\n hash = defaultdict(list)\n fo = open(file)\n for line in fo: \n if line.startswith(\"ID\\t\"): continue\n m = MisaSSR(line)\n hash[m.geneid].append(m)\n fo.close()\n return hash\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n \n locssrs = get_ssrs(args['ssr'])\n locgenes = get_gene_features(args['gff'])\n for loc, genes in locgenes.iteritems():\n for gene in genes:\n for ssr in locssrs[loc]:\n if gene.pos_in_gene(ssr.startpos): print gene.id\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.4617816209793091, "alphanum_fraction": 0.4669540226459503, "avg_line_length": 34.51020431518555, "blob_id": "4a2bc3768140e9bfe272bad8bc1e40e3406c4a64", "content_id": "c7605431740abb2cc73179b6318d58af8c766a4e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3480, "license_type": "permissive", "max_line_length": 166, "num_lines": 98, "path": "/python/swapsc/parse-swapsc.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nfrom low import *\t\t\t# custom functions, written by myself\n\n# =============================================================================\t\ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file\" )\n stdout( \" -b branches of interest comma-separated\" )\n stdout( \" \" )\n\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help()\t\n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:b:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f':\targs['file'] = value\n if key == '-b':\targs['branches'] = value.split(\",\")\n \n if not args.has_key('file'):\n stderr( \"swapsc out file missing.\" )\n show_help()\n if not file_exists( args.get('file') ):\n stderr( \"swapsc out file does not exist.\" )\n show_help()\n \n return args\n\n# =============================================================================\ndef get_contraints(file):\n contraints = {}\n interest = 0\n fo = open(args['file'])\n for line in fo:\n if line.startswith(\"Proportion of codon sites under selective constraints\"): break\n if not interest and not line.startswith(\"=============================================================================================================\"): continue\n if line.startswith(\"=============================================================================================================\"): \n interest = 1\n continue\n line = line.rstrip()\n columns = line.split()\n if len(columns) == 1:\n currentbranch = columns[0]\n contraints[currentbranch] = {}\n else:\n if len(columns) < 9: continue\n if columns[7] == \">\": continue # not significant signal\n contraints[currentbranch][columns[0]] = columns[9]\n for line in fo:\n line = line.rstrip()\n columns = line.split()\n if len(columns) != 5 or columns[0] != \"S\": continue\n S = columns[1]\n fo.close()\n return contraints, S\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n\n constraintshash, S = get_contraints(args['file'])\n outarray = [args['file'], S]\n for branch in args['branches']:\n if not constraintshash.has_key(branch): \n outarray.append(\"0\")\n continue\n PS = \"0\"\n for coord, type in constraintshash[branch].iteritems():\n if type == \"PS\": PS = \"1\"\n outarray.append(PS)\n print string.join(outarray, \"\\t\")\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.5916786193847656, "alphanum_fraction": 0.6008608341217041, "avg_line_length": 28.533897399902344, "blob_id": "af3d2944ae5345faaab87a8e64857edbc4506e1d", "content_id": "f86a4a9ecf322e4f8a460737dd3f6770c45907e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3485, "license_type": "permissive", "max_line_length": 105, "num_lines": 118, "path": "/python/orthomcl/build-clusters.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, string, re\nimport anydbm # index databases (file hash)\nfrom collections import defaultdict\n\nFASTAID_REGEX = re.compile(\">(\\S+)\")\n\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" orthomcl.out all.fasta\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 3: usage()\n inFasta = sys.argv[2]\n inOrtho = sys.argv[1]\n if not os.path.exists(inFasta) or not os.path.isfile(inFasta) or not os.path.getsize(inFasta) > 0: \n print >> sys.stderr, \"specified input fasta file does not exist, is not a file, or is empty\\n\"\n usage()\n if not os.path.exists(inOrtho) or not os.path.isfile(inOrtho) or not os.path.getsize(inOrtho) > 0: \n print >> sys.stderr, \"specified input orthomcl.out file does not exist, is not a file, or is empty\\n\"\n usage()\n return inOrtho, inFasta\n\n\ndef cache_genomes(file, recreate=0):\n outdbm = file + \".dbm\"\n if os.path.exists(outdbm) and os.path.getsize(outdbm) > 0 and not recreate: return outdbm\n DBM = anydbm.open( outdbm, 'c' )\n fo = open(file)\n key = \"\"\n for line in fo:\n line = line.strip()\n if line.startswith(\">\"): \n key = re.match(FASTAID_REGEX, line).group(1)\n DBM[key] = \"\"\n else:\n DBM[key] += line\n DBM.close()\n return outdbm\n\n\nclass OrthoCluster():\n def __init__(self, line):\n descr, genedefs = line.split(\"\\t\")\n genedefs = genedefs.split()\n self.name = descr[:descr.index('(')].lower()\n self.geneHash = {}\n self.speciesHash = {}\n for genedef in genedefs:\n geneid = genedef[:genedef.index('(')]\n species = genedef[genedef.index('(')+1:-1]\n self.geneHash[geneid] = species\n if self.speciesHash.has_key(species): self.speciesHash[species].append(geneid)\n else: self.speciesHash[species] = [geneid]\n\n def get_name(self): return self.name\n def get_count(self): return len(self.geneHash)\n def get_gene_hash(self): return self.geneHash\n def get_species_hash(self): return self.speciesHash\n \n\n\ndef main():\n inOrtho, inFasta = plausi()\n info(\" caching genomes ...\")\n dbm = cache_genomes(inFasta)\n info(\" done.\")\n sout, serr = catch_bash_cmd_output( \"wc -l %s\" % inOrtho )\n total = int( sout.split()[0] )\n count = 0\n fo = open(inOrtho)\n for line in fo:\n o = OrthoCluster(line.rstrip())\n geneHash = o.get_gene_hash()\n name = o.get_name()\n idfile = name + \".ids\"\n fastafile = name + \".fasta\"\n ufastafile = name + \".ufasta\"\n fw = open(idfile, 'w')\n for id, species in geneHash.iteritems(): fw.write(id + \"\\n\")\n fw.close()\n \n fu = open(ufastafile, 'w')\n fw = open(fastafile, 'w')\n seqHash = anydbm.open(dbm, \"r\")\n collectedSpecies = defaultdict(int)\n for geneid, species in geneHash.iteritems():\n if not seqHash.has_key(geneid):\n print \"ID\", geneid, \"not found\"\n continue\n sequence = seqHash[geneid]\n collectedSpecies[species] += 1\n count = collectedSpecies[species] \n fw.write(\">\" + geneid + \"\\n\")\n fu.write(\">\" + species + \"-\" + str(count) +\"\\n\")\n i = 0\n width = 60\n while i < len(sequence):\n frac = sequence[i:min([len(sequence),i+width])]\n fw.write(frac + \"\\n\")\n fu.write(frac + \"\\n\")\n i += width\n\n seqHash.close()\n fw.close()\n fu.close()\n count += 1\n progress = int(50.0 * count / total) * \"#\"\n progress += (50 - len(progress)) * \" \"\n info(\" 0% \" + progress + \" 100% \")\n\n info(\" 0% \" + progress + \" 100% \\n\")\n \n\n\nmain()\n" }, { "alpha_fraction": 0.5037654042243958, "alphanum_fraction": 0.5170645713806152, "avg_line_length": 34.460227966308594, "blob_id": "f26a9bcb46a0ee9ef3c9c1b489b01d6c9ae69e1d", "content_id": "9e190ce30c23251fb1898c1997057e72d021b02f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6241, "license_type": "permissive", "max_line_length": 116, "num_lines": 176, "path": "/python/blast/paralogs-from-selfblast.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t\t\t# low level handling, such as command line stuff\nimport string\t\t\t\t\t# string methods available\nimport re\t\t\t\t\t\t\t# regular expressions\nimport getopt\t\t\t\t\t# comand line argument handling\nimport tempfile\nimport hashlib\nimport fasta\nfrom low import *\t\t\t# custom functions, written by myself\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> -b <path>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f fasta file (input for blast)\" )\n\tstdout( \" -b blast.out in tab format (-m 8)\" )\n\tstdout( \" -l filter local (based on blast alignment)\" )\n\tstdout( \" -g filter global (usign muscle)\" )\n\tstdout( \" -q quiet: not stderr status messages\" )\n\tstdout( \" -i add percent identity as a column\" )\n\tstdout( \" \" )\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:b:qgli\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {'quiet':False, 'global':False, 'local':False, 'identity':False}\n\tfor key, value in keys:\n\t\tif key == '-f': args['fasta'] = value\n\t\tif key == '-b': args['blastout'] = value\n\t\tif key == '-q': args['quiet'] = True\n\t\tif key == '-l': args['local'] = True\n\t\tif key == '-g': args['global'] = True\n\t\tif key == '-i': args['identity'] = True\n\t\t\t\t\n\tif not args.has_key('blastout'):\n\t\tstderr( \"blastout file file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('blastout') ):\n\t\tstderr( \"blastout file file does not exist.\" )\n\t\tshow_help()\n\t\t\n\tif not args.has_key('fasta'):\n\t\tstderr( \"fasta file file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('fasta') ):\n\t\tstderr( \"fasta file file does not exist.\" )\n\t\tshow_help()\n\treturn args\n\n# =============================================================================\ndef statusbar(current, total, message=\"\", width=40):\n progress = 1.0*current/total\n if message != \"\": message = \"[\" + message + \"]\"\n progressbar = \"=\" * int(progress*width)\n while len(progressbar) < width: progressbar += \" \" \n sys.stderr.write(\"\\r 0% \" + progressbar + \" 100% \" + message)\n if progress == 1.0: sys.stderr.write(\"\\n\")\n \n \n# =============================================================================\nclass ParalogPair:\n def __init__(self, id1, id2):\n a = [id1, id2]\n a.sort()\n self.ids = a\n self.key = string.join(a, \",\")\n self.pep = []\n self.name = hashlib.md5(string.join(self.ids, \",\")).hexdigest() \n \n def align_pep(self, overwrite=False):\n pf, pepfile = tempfile.mkstemp(\".pep\")\n af, alnfile = tempfile.mkstemp(\".aln\")\n os.close(pf)\n os.close(af)\n pf = open(pepfile, 'w')\n for i in range(len(self.pep)):\n pf.write(\">\" + self.ids[i] + \"\\n\")\n pf.write(self.pep[i] + \"\\n\")\n pf.close()\n os.system(\"muscle -in %s -out %s -quiet -maxiters 2 2> /dev/null\" %(pepfile, alnfile))\n os.unlink(pepfile)\n self.aln = []\n af = open(alnfile)\n for line in af:\n if line.startswith(\">\"): self.aln.append(\"\")\n else: self.aln[-1] += line.strip()\n af.close()\n os.unlink(alnfile)\n self.alnlen = len(self.aln[0])\n self.alnres = 0\n identity = 0\n for i in range(self.alnlen):\n x = self.aln[0][i]\n y = self.aln[1][i]\n if x != '-' and y != '-': \n self.alnres += 1\n if x == y: identity += 1\n self.identity = 100.0 * identity / self.alnlen\n \n\n# =============================================================================\ndef prefetch_sequences(pepfile):\n return fasta.get_sequence_hash(pepfile)\n\n# =============================================================================\ndef get_seq_lengths(fastafile):\n lenhash = {}\n seqhash = fasta.get_sequence_hash(fastafile)\n for gid, seq in seqhash.iteritems(): lenhash[gid] = len(seq)\n return lenhash\n\n# =============================================================================\ndef get_total_line_count(ifile):\n total = 0\n fo = open(args['blastout'])\n for line in fo: total += 1\n fo.close()\n return total\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n pephash, lenhash = prefetch_sequences(args['fasta']), get_seq_lengths(args['fasta'])\n current, total = 0, get_total_line_count(args['blastout'])\n fo = open(args['blastout'])\n for line in fo:\n current += 1\n if not args['quiet']: statusbar(current, total, \"processing blastout\")\n line = line.strip()\n if line.startswith(\"#\") or len(line) == 0: continue\n (sid1, sid2, identity, alnlen, mismatch, gap, start1, stop1, start2, stop2, evalue, bitscore) = line.split(\"\\t\")\n if sid1 == sid2: continue\n if float(evalue) > float('1e-10'): continue\n if float(identity) < 30: continue\n \n pp = ParalogPair(sid1, sid2)\n# print >> sys.stderr, sid1 + \"\\t\" + sid2\n if args['local']:\n length1, length2 = lenhash[sid1], lenhash[sid2]\n if length1 < 100 or length2 < 100: continue\n if int(alnlen) < 80 or (float(alnlen) / max([length1, length2])) < 0.70: continue\n #if alnlen < 100: continue\n pp.identity = float(identity)\n pp.alnres = float(bitscore)\n \n if args['global']:\n for pid in pp.ids: pp.pep.append(pephash[pid])\n pp.align_pep()\n if pp.alnres < 100 or pp.identity < 40: continue\n\n sys.stdout.write(string.join(pp.ids, \"\\t\"))\n if args['identity']: print \"\\t\" + str(pp.identity)\n else: print \"\"\n \n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.5144413113594055, "alphanum_fraction": 0.5193926095962524, "avg_line_length": 30.5625, "blob_id": "4a3ab7c75fd0285dd152a545824d81932ab44c75", "content_id": "0398e51fec2469323a81f234c24af77e0e331dcf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6059, "license_type": "permissive", "max_line_length": 100, "num_lines": 192, "path": "/python/blast/cluster_sequences.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys \t\t# low level handling, such as command line stuff\nimport string\t\t\t# string methods available\nimport re\t\t\t\t\t# regular expressions\nimport getopt\t\t\t# comand line argument handling\nfrom low import *\t# custom functions, written by myself\n\n# =============================================================================\t\ndef show_help( ):\n\t\"\"\" displays the program parameter list and usage information \"\"\"\n\tstdout( \"usage: \" + sys.argv[0] + \" -f <path> -e <1e-n> -i <75> -p <90> -o <path>\" )\n\tstdout( \" \" )\n\tstdout( \" option description\" )\n\tstdout( \" -h help (this text here)\" )\n\tstdout( \" -f in sequence file\" )\n\tstdout( \" -e maximum evalue\" )\n\tstdout( \" -i minimum percent identity\" )\n\tstdout( \" -p minimum percent positives\" )\n\tstdout( \" -o out folder\" )\n\tstdout( \" \" )\n\t\n\tsys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n\t\"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\tif len ( sys.argv ) == 1:\n\t\tstderr( \"no arguments provided.\" )\n\t\tshow_help()\t\n\t\n\ttry: # check for the right arguments\n\t\tkeys, values = getopt.getopt( sys.argv[1:], \"hf:o:e:i:p:\" )\n\texcept getopt.GetoptError:\n\t\tstderr( \"invalid arguments provided.\" )\n\t\tshow_help()\n\t\n\targs = {}\n\tfor key, value in keys:\n\t\tif key == '-f': args['in'] = value\n\t\tif key == '-o':\targs['out'] = value\n\t\tif key == '-i':\targs['identities'] = value\n\t\tif key == '-p':\targs['positives'] = value\n\t\tif key == '-e':\targs['evalue'] = value\n\t\t\t\t\n\tif not args.has_key('in'):\n\t\tstderr( \"in file missing.\" )\n\t\tshow_help()\n\tif not file_exists( args.get('in') ):\n\t\tstderr( \"in file does not exist.\" )\n\t\tshow_help()\n\t\t\n\tif not args.has_key('out'):\n\t\tstderr( \"out folder missing.\" )\n\t\tshow_help()\n\t\n\tif not dir_exists( args.get('out') ):\n\t\tos.mkdir( args.get('out') )\n\t\n\tif not args['out'].endswith('/'): args['out'] += '/'\n\t\n\treturn args\n\n# =============================================================================\ndef blast( query, subject, overwrite=0 ):\n\t#infomsg( \"BLAST %s vs. %s\" %(query, subject) )\n\tblastout = \"blast-out.\" + get_basename(query).replace('red_','') + \"_\" + get_basename(subject)\n\t# formatdb\n\t#os.system( \"formatdb -i %s\" %(subject) )\n\t# blast\n\tif not file_exists( blastout ) or overwrite:\n\t\tos.system( \"blastall -p blastp -i %s -d %s -o %s\" %( query, subject, blastout ) )\n\t#print \"blastall -p blastp -i %s -d %s -o %s\" %( query, subject, blastout )\n\treturn blastout\n\t\n# =============================================================================\ndef parse_blastout( file, args ):\n\tparseout = \"parsed\" + os.path.splitext( file ) [1]\n\tprogrammcall = \"parse_blast_out2.py -f \" + file\n\tif args.has_key( 'evalue' ): programmcall += \" -e \" + args.get('evalue')\n\tif args.has_key( 'identities' ): programmcall += \" -i \" + args.get('identities')\n\tif args.has_key( 'positives' ): programmcall += \" -p \" + args.get('positives')\n\t#if not file_exists( parseout ):\n\tos.system( programmcall + \" > \" + parseout )\n\n\t# make list non-redundant: remove self hits\n\tnonred_out = args.get('out') + 'blast-out.parsed-nr'\n\tfo = open(parseout)\n\tfw = open( nonred_out, 'w' )\n\tfor line in fo:\n\t\tqid, hid = line.split()[0:2]\n\t\tif qid == hid: continue\n\t\tfw.write( line )\n\tfw.flush()\n\tfw.close()\n\tfo.close()\n\treturn nonred_out\n\n\n# =============================================================================\ndef assemble_clusters( parsedfile, args ):\n\thash = {}\n\tfo = open( parsedfile, 'r' )\n\tfor line in fo:\n\t\tqid, hid = line.split()[0:2]\n\t\t#print qid, \"\\t\", hid\n\t\tif not hash.has_key(qid) and not hash.has_key(hid):\n\t\t\thash[ qid ] = [hid]\n\t\telse:\n\t\t\tif qid == hid: \n\t\t\t\t#print \"skipped\"\n\t\t\t\tcontinue\n\t\t\tif hash.has_key(qid) and not hid in hash.get(qid):\n\t\t\t\thash[ qid ].append( hid )\n\t\t\telif hash.has_key(hid) and not qid in hash.get(hid):\t\n\t\t\t\thash[ hid ].append( qid )\n\t\t\t#else:\n\t\t\t#\tprint \"skipped\"\n\t\t#else:\n\t\t#\tstderr( \"strange hash behavior! %s %s\" %( qid, hid ) )\n\tfo.close()\n\tclusterout = args.get('out') + 'clusters.ids'\n\tfw = open( clusterout , 'w' )\n\tfor id, listofids in hash.iteritems():\n\t\tfw.write( id + ' ' + string.join( listofids, ' ' ) + '\\n' )\n\tfw.flush()\n\tfw.close()\n\treturn clusterout\n\t\n\n# =============================================================================\ndef annotate_clusters( clusterout, args ):\n\t\n\tdef get_sequence( id ):\n\t\tseqfile = args.get('out')+'seqfile.tmp'\n\t\tos.system( \"xdget -p ~/workspace/EST/rsd/datasets/aa/%s %s > %s\" %( id[0:2]+'.aa', id, seqfile ) )\n\t\treturn seqfile\n\t\n\tdef blast_sp( seqfile ):\n\t\tout = blast( seqfile, '~/workspace/EST/blast-databases/swissprot-all', 1 )\n\t\treturn out\n\t\n\tdef get_descr( blastout ):\n\t\tdescr = ''\n\t\tfo = open( blastout, 'r' )\n\t\tfor line in fo:\n\t\t\t#print line\n\t\t\tif descr == '':\n\t\t\t\tif not line[:1] == '>': continue\n\t\t\t\telse:\tdescr += line[1:].replace('\\n','')\n\t\t\telse:\n\t\t\t\tif re.search( 'Length =', line):\n\t\t\t\t\tdescr += line[:line.index('Length =')]\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tdescr += line.replace('\\n','')\n\t\tfo.close()\n\t\tdescr = re.sub( '\\s{2,99}', ' ' , descr )\n\t\treturn descr\n\t\n\tfo = open( clusterout, 'r' )\n\tfor line in fo:\n\t\tids = line.split()\n\t\tdescr = []\n\t\tfor id in ids:\n\t\t\tseq = get_sequence( id )\n\t\t\tblastout = blast_sp( seq )\n\t\t\tdescr.append( get_descr( blastout ) )\n\t\tprint \"cluster:\", string.join(ids, ' ')\n\t\tfor d in descr:\n\t\t\tprint \"descr:\", d\n\t\tprint \" -\" *50\n\t\t#sys.exit(1212)\n\t\n\tfo.close()\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n\t\n\tblastout = blast( args.get('in'), args.get('in') )\n\tparsedfile = parse_blastout( blastout, args )\n\tclusterout = assemble_clusters( parsedfile, args )\n\tannotate_clusters( clusterout, args )\n\t\n\t\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )" }, { "alpha_fraction": 0.5669804215431213, "alphanum_fraction": 0.5850832462310791, "avg_line_length": 24.574073791503906, "blob_id": "6afe1c52319abf9abeaa9e6eeb3768af7b1caada", "content_id": "c4ee191c35f47b60dcc3d9003a8f8bedd1b162d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1381, "license_type": "permissive", "max_line_length": 122, "num_lines": 54, "path": "/python/paml/parse_codeml-modelA.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys, re\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" folder (files end with *.MAalt)\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 2: usage()\n inFolder = sys.argv[1]\n return inFolder\n\n\ndef parse_from_file(inFile):\n basename = os.path.split(inFile)[1]\n fo = open(inFile)\n line = fo.readline().rstrip()\n while 1:\n if line.startswith(\"ns =\"): \n print >> sys.stderr, inFile\n length = re.search(\"ls =\\s+(\\d+)\", line).group(1)\n\n if not line.startswith(\"Bayes Empirical Bayes (BEB) analysis (Yang, Wong & Nielsen 2005. Mol. Biol. Evol. 22:1107-1118)\"):\n line = fo.readline().rstrip()\n else:\n line = fo.readline().rstrip() # Positive sites for foreground lineages Prob(w>1):\n line = fo.readline().rstrip()\n if re.match(\"^$\", line): \n sites = 0\n else:\n sites = 0\n while not re.match(\"^$\", line):\n if line.endswith(\"*\"):\n sites += 1\n print basename + \"\\t\" + length + \"\\t\" + line\n line = fo.readline().rstrip()\n break\n print >> sys.stderr, basename + \"\\t\" + str(sites)\n fo.close()\n\n\ndef parse_all_files(inFolder):\n for filename in os.listdir(inFolder):\n if not filename.endswith(\".MAalt\"): continue\n parse_from_file(inFolder + \"/\" + filename)\n\n\ndef main():\n inFolder = plausi()\n parse_all_files(inFolder)\n\n\nmain()\n" }, { "alpha_fraction": 0.6224783658981323, "alphanum_fraction": 0.6282420754432678, "avg_line_length": 15.523809432983398, "blob_id": "082a6a3ff4827c25c542dc069dca15306d54c9f3", "content_id": "e3da51f6dda3f8992e25f0af745f9455a3c48dd4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "permissive", "max_line_length": 30, "num_lines": 21, "path": "/python/generic/subtract.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sets\nimport sys, os\n\ndef get_lines( file ):\n lines = []\n fo = open(file)\n for line in fo:\n line = line.rstrip()\n lines.append(line)\n\n return sets.Set(lines)\n\nref = get_lines(sys.argv[1])\nfor filename in sys.argv[2:]:\n l = get_lines(filename)\n for e in l:\n if e in ref: ref.remove(e)\n\nfor e in ref: print e\n" }, { "alpha_fraction": 0.48209065198898315, "alphanum_fraction": 0.4842836260795593, "avg_line_length": 32.35365676879883, "blob_id": "346ec4e893e3cb925a05d76ba8b798593de3a735", "content_id": "a7121bf365a898910be047c8dcbf9d2210dd8fa2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2736, "license_type": "permissive", "max_line_length": 97, "num_lines": 82, "path": "/python/generic/flat2xdom.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f fasta file to import\" )\n stdout( \" -p prefix to put in fron of the key\" )\n stdout( \" -d delimiter (default: space | allowed: ; , tab space\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:p:d:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-p': args['prefix'] = value\n if key == '-d': args['delimiter'] = value\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n if not args.has_key('delimiter') or args.get('delimiter') not in [ \";\", \",\", \"tab\", \"space\" ]: \n args['delimiter'] = 'space'\n\n return args\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n fo = open( args.get('file') )\n oldid = \"\"\n for line in fo:\n line = line.rstrip()\n if args.get('delimiter') == \"tab\":\n columns = line.split(\"\\t\")\n elif args.get('delimiter') == \"space\":\n columns = line.split()\n else:\n columns = line.split( args.get('delimiter') )\n id = columns[0]\n if id != oldid:\n oldid = id\n if args.has_key('prefix'):\n print \">\" + args.get('prefix') + id\n else:\n print \">\" + id\n print string.join( columns[1:], \"\\t\" )\n fo.close()\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5091423988342285, "alphanum_fraction": 0.5173834562301636, "avg_line_length": 37.05882263183594, "blob_id": "7d3277fac6874dbbe8d200b429ab8a400030d03a", "content_id": "701151653ec05e98269a98f5915a97c418e23ead", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3883, "license_type": "permissive", "max_line_length": 107, "num_lines": 102, "path": "/python/pfam/pfam-pid2arrangement.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nimport pfam\nimport copy\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f hmmout file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['hmmoutfile'] = value\n \n for key in ['hmmoutfile']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n return args\n\n# =============================================================================\ndef statusbar(current, total, message=\"\", width=40):\n progress = 1.0*current/total\n if message != \"\": message = \"[\" + message + \"]\"\n progressbar = \"=\" * int(progress*width)\n while len(progressbar) < width: progressbar += \" \" \n sys.stderr.write(\"\\r 0% \" + progressbar + \" 100% \" + message)\n if progress == 1.0: sys.stderr.write(\"\\n\")\n\n# =============================================================================\ndef get_dom_coverage(domains):\n endpos = max([int(d.get_attr('alignment_end')) for d in domains])\n cov = [0] * endpos\n for d in domains:\n for i in range(int(d.get_attr('alignment_start')), int(d.get_attr('alignment_end'))):\n cov[i] += 1\n return cov\n \n# =============================================================================\ndef domains2arrangement(domains):\n if len(domains) == 1: return domains[0].get_attr('hmm_name')\n domains.sort(cmp=lambda x,y: cmp(int(x.get_attr('alignment_start')), int(y.get_attr('alignment_start'))))\n domainCoverage = get_dom_coverage(domains)\n while max(copy.copy(domainCoverage)) > 1:\n pos = domainCoverage.index(max(copy.copy(domainCoverage)))\n resolveDomains = []\n for d in domains:\n if d.covers(pos): resolveDomains.append(d)\n resolveDomains.sort(cmp=lambda x,y: cmp(float(x.get_attr('E-value')), float(y.get_attr('E-value'))))\n domains.remove(resolveDomains[-1])\n domainCoverage = get_dom_coverage(domains)\n domarray = [d.get_attr('hmm_name') for d in domains]\n if len(domarray) > 1:\n i = 0\n while i < len(domarray)-1:\n if domarray[i] == domarray[i+1]: domarray.pop(i+1)\n else: i += 1\n return string.join(domarray, ';') \n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n pid2domains = pfam.read_hmmout(args['hmmoutfile'])\n for pid, domains in pid2domains.iteritems():\n sys.stderr.write(\"\\r\" + pid + \" \"*20)\n arr = domains2arrangement(domains)\n print pid + \"\\t\" + arr \n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.5404064059257507, "alphanum_fraction": 0.5474952459335327, "avg_line_length": 38.542057037353516, "blob_id": "58f4df31ae494e10e442f9255875136ebf7612f7", "content_id": "054b6ef10611c882a1e219db4c2093c60a29f34d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4232, "license_type": "permissive", "max_line_length": 148, "num_lines": 107, "path": "/python/gff/plot-genomic-region.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nfrom low import * # custom functions, written by myself\nimport gff3\n\nfrom reportlab.lib import colors\nfrom reportlab.lib.units import cm\nfrom Bio.Graphics import GenomeDiagram\nfrom Bio import SeqIO\nfrom Bio.SeqFeature import SeqFeature, FeatureLocation\n\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path>\" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -i surrounding genes (ID1,ID2)\" )\n stdout( \" -g gff file\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hi:g:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-i': args['genes'] = value.split(',')\n if key == '-g': args['gffile'] = value\n \n for key in ['genes', 'gffile']:\n if key.endswith(\"file\"):\n if not args_file_exists(args, key): show_help()\n elif key.endswith(\"dir\"):\n if not args_dir_exists(args, key): show_help()\n elif not args.has_key(key):\n print >> sys.stderr, \"missing argument\", key\n show_help()\n return args\n\n# =============================================================================\ndef statusbar(current, total, message=\"\", width=40):\n progress = 1.0*current/total\n if message != \"\": message = \"[\" + message + \"]\"\n progressbar = \"=\" * int(progress*width)\n while len(progressbar) < width: progressbar += \" \" \n sys.stderr.write(\"\\r 0% \" + progressbar + \" 100% \" + message)\n if progress == 1.0: sys.stderr.write(\"\\n\")\n \n\n# =============================================================================\ndef get_coordinates_for_diagram(gfhash, genes):\n positions = []\n for scaffold, gfs in gfhash.iteritems():\n for gf in gfs:\n if gf.ftype != 'mRNA' or not gf.get_attributes().has_key('ID'): continue\n if gf.get_attributes()['ID'] in genes: \n positions += [gf.start, gf.stop]\n print gf.start, gf.stop\n if len(positions) == 4: return scaffold, min(positions), max(positions)\n \n \n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n gfhash = gff3.get_gff_hash(args['gffile'])\n gid, Diagstart, Diagstop = get_coordinates_for_diagram(gfhash, args['genes'])\n print gid, Diagstart, Diagstop\n \n gd_diagram = GenomeDiagram.Diagram(gid)\n gd_track_for_features = gd_diagram.new_track(1, name=\"Annotated Genes\")\n gd_feature_set = gd_track_for_features.new_set()\n \n for gf in sorted(gfhash[gid], key=lambda x: x.start):\n if gf.ftype != 'mRNA': continue\n if gf.start > gf.stop: gf.start, gf.stop = gf.stop, gf.start\n if gf.stop < Diagstart or gf.start > Diagstop: continue\n f = SeqFeature(FeatureLocation(max([gf.start, Diagstart]), min([gf.stop, Diagstop])), strand=int(gf.strand+'1'), type=gf.get_attributes()['ID'])\n gd_feature_set.add_feature(f, label=True, label_size=10, label_angle=0, sigil=\"ARROW\")\n print gf.get_attributes()['ID'], gf.start, gf.stop\n \n gd_diagram.draw(start=Diagstart, end=Diagstop, format='linear', fragments=1, pagesize=(100*cm, 4*cm))\n outfile = os.path.split(args['gffile'])[1] + \"_\"+ string.join(args['genes'], \"_\") + '.pdf'\n gd_diagram.write(outfile, \"PDF\")\n print outfile\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n\n" }, { "alpha_fraction": 0.7657067775726318, "alphanum_fraction": 0.7657067775726318, "avg_line_length": 68.36363983154297, "blob_id": "36c0da9ef36fb5e1538013ae769a231875121fb4", "content_id": "58dd1c39616d210e46c7141bb7d2ff1bbbb8490e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 764, "license_type": "permissive", "max_line_length": 318, "num_lines": 11, "path": "/README.md", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "This repository contains a broad, but probably incomplete, collection of scripts I developed for my bioinformatics analyses.\n\n\nOrganization of files\n---------------------\nThus far, *python* and *ruby* scripts are included in separate folders. I have further created subfolders for scripts that relate to specific programs/databases (e.g. Pfam, GeneOntology) or file formats (e.g. fasta, gff). The *base* folder includes sources that may be imported and therefore required by other scripts.\n\n\nDocumentation\n-------------\nDocumentation is almost non-existant, and if present in scripts, it may be outdated. However, naming of the scripts themsolves as well as their subroutines should make it easy to reconstruct how/when/where each of the programs can be applied.\n\n" }, { "alpha_fraction": 0.5070521831512451, "alphanum_fraction": 0.5112835168838501, "avg_line_length": 33.585365295410156, "blob_id": "de953e12414a89c661852569a6dbe77d23499aef", "content_id": "69072efea2bb4f37775fa4508fc18ebd33c0845c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4254, "license_type": "permissive", "max_line_length": 105, "num_lines": 123, "path": "/python/pfam/pfam-mapping.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys # low level handling, such as command line stuff\nimport string # string methods available\nimport re # regular expressions\nimport getopt # comand line argument handling\nimport math # match functions\nfrom low import * # custom functions, written by myself\n\n# ============================================================================= \ndef show_help( ):\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> \" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f pfam_full file to parse\" )\n stdout( \" \" )\n sys.exit(1)\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n \n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n \n if not args.has_key('file'):\n stderr( \"import file argument missing.\" )\n show_help()\n elif not file_exists( args.get('file') ):\n stderr( \"import file does not exist.\" )\n show_help()\n \n return args\n\n\n# =============================================================================\ndef get_regex( args ):\n idhash = {}\n idhash['name'] = re.compile('^#=GF ID\\s+(\\S+)')\n idhash['acc'] = re.compile('^#=GF AC\\s+(PF\\S+)')\n idhash['descr'] = re.compile('^#=GF DE\\s+(.*)$')\n idhash['comment'] = re.compile('^#=GF CC\\s+(.*)$')\n idhash['pftype'] = re.compile('^#=GF TP\\s+(\\S+)')\n idhash['terminate'] = re.compile('^\\\\\\\\$')\n return idhash\n\n# =============================================================================\nclass PfamEntry:\n def __init__(self):\n self.name = \" \"\n self.acc = \" \"\n self.descr = \" \"\n self.comment = \" \"\n self.pftype = \" \"\n\n def set_name(self, name):\n self.name = name\n\n def set_acc(self, acc):\n self.acc = acc\n\n def set_descr(self, descr):\n if self.descr == \" \": self.descr = descr\n else: self.descr += \" \" + descr\n\n def set_comment(self, comment):\n if self.comment == \" \": self.comment = comment\n else: self.comment += \" \" + comment\n\n def set_pftype(self, pftype):\n self.pftype = pftype\n \n def to_string(self):\n return string.join([self.name, self.acc, self.pftype, self.descr, self.comment], \"\\t\")\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\ndef main( args ):\n\n regex = get_regex( args )\n info(\"getting total number of entries...\")\n sout, serr = catch_bash_cmd_output( \"grep '^//' -c %s\" %args.get('file') )\n total = int( sout )\n info(\"total number of entries: %s\\n\" %total)\n count = 0\n infomsg(\"entries found: %s\" %total )\n print \"#\" + string.join(['abbrv', 'pfamid', 'pfamtype', 'description', 'comment'], \"\\t\")\n \n fo = open( args.get('file') )\n entry = PfamEntry()\n for line in fo:\n line = line.rstrip()\n if line.startswith('//'):\n count += 1\n print entry.to_string()\n info( \" status:\\t%01.2f%%\\t%s/%s\" %( 100.0*count/total, count, total ) )\n entry = PfamEntry()\n elif not line.startswith('#=GF'): continue\n elif re.match(regex['name'], line): entry.set_name( re.match(regex['name'], line).group(1) )\n elif re.match(regex['acc'], line): entry.set_acc( re.match(regex['acc'], line).group(1) )\n elif re.match(regex['pftype'], line): entry.set_pftype( re.match(regex['pftype'], line).group(1) )\n elif re.match(regex['descr'], line): entry.set_descr( re.match(regex['descr'], line).group(1) )\n elif re.match(regex['comment'], line): entry.set_comment( re.match(regex['comment'], line).group(1) )\n fo.close()\n info(\"\\ndone.\\n\")\n\n# =============================================================================\nargs = handle_arguments()\nmain( args )\n" }, { "alpha_fraction": 0.48062244057655334, "alphanum_fraction": 0.4844392240047455, "avg_line_length": 31.132076263427734, "blob_id": "7027fca227b60153adc197ae83add0dd2ed8a05f", "content_id": "f72d19fb07ebb13290b184d52af272ca902172d8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3406, "license_type": "permissive", "max_line_length": 231, "num_lines": 106, "path": "/python/generic/parallel-processes.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport sys, os\nimport math, time\nimport threading\nimport getopt # comand line argument handling\nfrom low import *\n\n# =============================================================================\ndef show_help():\n \"\"\" displays the program parameter list and usage information \"\"\"\n stdout( \"usage: \" + sys.argv[0] + \" -f <path> -n \" )\n stdout( \" \" )\n stdout( \" option description\" )\n stdout( \" -h help (this text here)\" )\n stdout( \" -f file that contains all system calls, one per line\" )\n stdout( \" -n number of processes to be run in parallel\" )\n stdout( \" \" )\n sys.exit(1)\n\n\n# =============================================================================\ndef handle_arguments():\n \"\"\" verifies the presence of all necessary arguments and returns the data dir \"\"\"\n if len ( sys.argv ) == 1:\n stderr( \"no arguments provided.\" )\n show_help() \n\n try: # check for the right arguments\n keys, values = getopt.getopt( sys.argv[1:], \"hf:n:\" )\n except getopt.GetoptError:\n stderr( \"invalid arguments provided.\" )\n show_help()\n\n args = {}\n for key, value in keys:\n if key == '-f': args['file'] = value\n if key == '-n': args['ncpu'] = int(value)\n \n if not args.has_key('file'):\n stderr( \"process file missing.\" )\n show_help()\n if not file_exists( args.get('file') ):\n stderr( \"process file does not exist.\" )\n show_help()\n\n if not args.has_key('ncpu'):\n stderr( \"number of CPUs to use is missing.\" )\n show_help()\n\n return args\n\n\n\n# =============================================================================\ndef get_jobs( file ):\n jobs = []\n fo = open( file )\n for line in fo:\n if not line.startswith(\"#\"): jobs.append(line.rstrip())\n fo.close()\n return jobs\n\n# =============================================================================\nclass MyThread( threading.Thread ):\n \n def set_command(self, command):\n self.command = command\n\n def run(self):\n os.system(self.command)\n\n\n# =============================================================================\n# =============================================================================\ndef main( args ):\n \n jobs = get_jobs( args.get('file') )\n totaljobs = len(jobs)\n infomsg( \"Collected %s jobs queued | will distribute them among %s CPUs\" %(len(jobs), args.get('ncpu')) )\n \n start_time = time.time()\n while threading.activeCount() > 1 or len(jobs) > 0:\n # check existing threads: still running?\n # fill up all remaining slots\n elapsed = time.time() - start_time\n while threading.activeCount() <= args.get('ncpu') and len(jobs) > 0:\n # start new thread\n cmd = jobs.pop(0)\n t = MyThread()\n t.set_command( cmd )\n t.start()\n\n remain = elapsed / (totaljobs - len(jobs) + (threading.activeCount() -1)) * len(jobs)\n info( \"\\telapsed: %s\\tremaining: %s\\t[ jobs done: %s | remain: %s | active: %s ] \" % (humanize_time(elapsed), humanize_time(remain), totaljobs - len(jobs) - (threading.activeCount() -1), len(jobs), threading.activeCount() -1) )\n time.sleep(0.2)\n \n info( \"\\n\" )\n infomsg( \"DONE.\" )\n\n\n# =============================================================================\n# === MAIN ====================================================================\n# =============================================================================\n\nargs = handle_arguments( )\nmain( args )\n" }, { "alpha_fraction": 0.6059070229530334, "alphanum_fraction": 0.6218329071998596, "avg_line_length": 29.973094940185547, "blob_id": "07a199cad2858a8835c972472ee7f42799a00ddf", "content_id": "8788ed5bd547face93409df80bfa9379b0076bbb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6907, "license_type": "permissive", "max_line_length": 155, "num_lines": 223, "path": "/python/orthomcl/remove-paralogs.py", "repo_name": "haokui/bioinformatics", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os, sys\nimport warnings\nwarnings.filterwarnings('ignore', category=DeprecationWarning)\n\n# takes an input protein fasta file and an orthomcl.gg file\n# orthomcl.gg file format:\n# speciesname1: id1 id2 id3 id4 .... full genome\n# speciesname2: id1 id2 id3 id4 .... full genome\n#\n# with these infos, the goal is to get only one protein sequence per species\n# we use t-coffee to find the most similar protein sequence per species\n# to the whole cluster. so in case one species contributes several sequences \n# to a cluster, we choose the one species to keep which has the highest average \n# similarity to the rest of the cluster. if more than 1 sequence yield the highest\n# avgsim, we determine whether these protein sequences are (1) all identical, \n# or whether they are (2) slightly different. In case (1), we choose any sequence\n# randomly because it does not matter. In case (2), we sum up all pairwise\n# similarities for each candidate sequence, and keep only the one sequence\n# with the highest sum. If these are identical as well, we again choose randomly\n# (should happen very rarely).\n\n\n\ndef usage():\n print >> sys.stderr, \"usage: \" + sys.argv[0] + \" fasta-folder all-proteins.fasta orthomcl.gg\"\n sys.exit(1)\n\n\ndef plausi():\n if len(sys.argv) != 4: usage()\n inFolder, allProteins, inGG = sys.argv[1:5]\n if not inFolder.endswith(\"/\"): inFolder += '/'\n return inFolder, allProteins, inGG\n\n\ndef get_number_of_species(inGG):\n count = 0\n fo = open(inGG)\n for line in fo: count += 1\n fo.close()\n return count\n\n\ndef read_gg(inGG):\n outHash = {}\n speciesArray = []\n fo = open(inGG)\n for line in fo:\n line = line.rstrip()\n cols = line.split()\n species = str(cols[0])[:-1]\n if not species in speciesArray: speciesArray.append(species)\n for col in cols[1:]:\n outHash[col] = species\n fo.close()\n return outHash, speciesArray\n\n\ndef parse_sim_out(inSeqs,inSim):\n allIDs = []\n fo = open(inSeqs)\n for line in fo:\n if line.startswith(\">\"): \n line = line.rstrip()\n allIDs.append(line[1:])\n fo.close()\n\n def translate_id(id, aI=allIDs):\n if not id in aI:\n for E in aI: \n e = E\n e = e.replace(\"#\",\"_\")\n e = e.replace(\":\",\"_\")\n if id == e: id = E\n return id\n\n outHash = {}\n fo = open( inSim )\n for line in fo:\n line = line.rstrip()\n cols = line.split()\n if line.startswith(\"AVG\"):\n id, avgsim = translate_id(cols[2]), float(cols[4])\n outHash[id].insert(0, avgsim)\n elif line.startswith(\"TOP\") or line.startswith(\"BOT\"):\n id1, id2, sim = translate_id(cols[4]), translate_id(cols[5]), float(cols[6])\n outHash[id1 + \"$$$\" + id2] = sim\n if outHash.has_key(id1): outHash[id1].append(sim)\n else: outHash[id1] = [sim]\n return outHash\n\n\ndef tcoffee_similarity(inSeqs, inSpeciesHash):\n out = inSeqs + \".tcoffee.sim\"\n if not os.path.exists(out) or not os.path.getsize(out) > 0:\n syscall = '~/bin/t-coffee -other_pg seq_reformat -in %s -output sim 1> %s' %(inSeqs, out)\n ec = os.system(syscall)\n if not ec == 0 or not os.path.isfile(out) or os.path.getsize(out) == 0:\n print \"T-COFFEE did not run smoothly. check manually: \" + syscall\n sys.exit(3)\n \n simHash = parse_sim_out(inSeqs,out)\n specHash = {}\n for id, sim in simHash.iteritems():\n if id.count('$$$') > 0: continue\n if not inSpeciesHash.has_key(id):\n id = id.replace(\"___\",\"_#_\")\n species = inSpeciesHash[id]\n if not specHash.has_key(species) or specHash[species][0] < sim[0]:\n specHash[species] = [sim[0], id]\n elif specHash.has_key(species) and specHash[species][0] == sim[0]: specHash[species].append(id)\n\n\n idsToKeep = []\n for species, array in specHash.iteritems():\n avgsim = array[0]\n ids = array[1:]\n while len(ids) > 1:\n id1, id2 = ids[0], ids[1]\n sim = simHash[id1 + '$$$' + id2]\n if sim == 100.00: ids.pop(1)\n else: \n simid1 = sum(simHash[id1])\n simid2 = sum(simHash[id2])\n if simid1 == simid2:\n print \"WARNING %s: identical avgsims but no identity between sequences %s [%s] / %s [%s] (%s | %s)\" %(out, id1, simid1, id2, simid2, sim, avgsim)\n ids.pop(1)\n elif simid1 > simid2: ids.pop(1)\n elif simid1 < simid2: ids.pop(0)\n idsToKeep.append(ids[0])\n \n return idsToKeep\n\n\ndef cache_genomes(file, recreate=0):\n outdbm = file + \".dbm\"\n if os.path.exists(outdbm) and os.path.getsize(outdbm) > 0 and not recreate: return outdbm\n DBM = anydbm.open( outdbm, 'c' )\n fo = open(file)\n key = \"\"\n for line in fo: \n line = line.strip()\n if line.startswith(\">\"): \n key = line[1:]\n if key.count(\" \") > 0: key = key[:key.index(\" \")]\n DBM[key] = \"\"\n else:\n DBM[key] += line\n DBM.close()\n return outdbm\n\n \n\ndef slim_cluster(inProteins, seqHash, speciesHash, speciesArray):\n outdir = \"paralog-free-clusters/\"\n if not os.path.exists(outdir): os.mkdir(outdir)\n sout, serr = catch_bash_cmd_output( \"grep '>' -c %s\" %(inProteins) )\n nGenes = int( sout )\n outbase = os.path.split(inProteins)[1]\n outFasta = outdir + os.path.splitext(outbase)[0] + \".fasta\"\n outUfasta = outdir + os.path.splitext(outbase)[0] + \".ufasta\"\n \n if nGenes > len(speciesArray):\n idsToKeep = tcoffee_similarity(inProteins, speciesHash)\n else:\n idsToKeep = []\n fo = open(inProteins)\n for line in fo:\n if line.startswith(\">\"): \n line = line.rstrip()\n idsToKeep.append(line[1:])\n fo.close()\n\n outstring = os.path.splitext(outbase)[0] + \"(\" + str(len(speciesArray)) + \" genes, \" + str(len(speciesArray)) + \" taxa):\\t\"\n fwi = open(outFasta, 'w')\n fwu = open(outUfasta, 'w')\n for id in idsToKeep: \n fwi.write(\">\" + id + \"\\n\")\n fwu.write(\">\" + speciesHash[id] + \"\\n\")\n sequence = seqHash[id]\n i = 0\n width = 60\n while i < len(sequence):\n frac = sequence[i:min([len(sequence),i+width])]\n fwi.write(frac + \"\\n\")\n fwu.write(frac + \"\\n\")\n i += width\n outstring += id + \"(\" + speciesHash[id] + \") \"\n fwi.close()\n fwu.close()\n return outstring\n \n\ndef get_all_fasta_files(inFolder):\n files = []\n for inFile in os.listdir(inFolder):\n if not inFile.endswith(\".fasta\"): continue\n files.append(inFolder + inFile)\n return files\n\n\ndef main():\n inFolder, allProteins, inGG = plausi()\n inFiles = get_all_fasta_files(inFolder)\n speciesHash, speciesArray = read_gg(inGG)\n dbm = cache_genomes(allProteins)\n seqHash = anydbm.open(dbm, \"r\")\n total, count = len(inFiles), 0\n fw = open(\"noparalogs.orthomcl.out\", \"w\")\n for inFile in inFiles:\n outstring = slim_cluster(inFile, seqHash, speciesHash, speciesArray)\n fw.write( outstring + \"\\n\" )\n count += 1\n progress = int(50.0 * count / total) * \"#\"\n progress += (50 - len(progress)) * \" \"\n info(\" 0% \" + progress + \" 100% \")\n info(\" 0% \" + progress + \" 100% \\n\")\n seqHash.close()\n fw.close()\n\n\nmain()\n" } ]
152
tylerkurahashi/study
https://github.com/tylerkurahashi/study
6a5ed93337b2f8e89215c8d0fab25f7d7c0b41fa
61431a91865c57f224878fd74e47402b347a01f7
b6080b6086e98b985f129f177824ac14f4a2e841
refs/heads/main
2023-08-05T03:43:49.333723
2021-09-21T03:07:59
2021-09-21T03:07:59
348,392,101
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6527472734451294, "alphanum_fraction": 0.6615384817123413, "avg_line_length": 21.549999237060547, "blob_id": "f9ff0e53fc543078c3068a31d80ffb669b587a9a", "content_id": "256e3e5198ec3a2929fc83d58fa31bb6425f133b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "no_license", "max_line_length": 63, "num_lines": 20, "path": "/python/tornado/app.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "import tornado.ioloop\nimport tornado.web\nfrom tornado.web import RequestHandler\n\nfrom tornado.options import define, options, parse_command_line\n\n\nclass MainHandler(RequestHandler):\n def get(self):\n self.write({'message':'Hello World'})\n\ndef make_app():\n return tornado.web.Application([\n (r\"/\", MainHandler),\n ])\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n " }, { "alpha_fraction": 0.460317462682724, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 10.8125, "blob_id": "ec28a5efcf895889fb16bad190faea7dab0fc86a", "content_id": "aa06a9659f265e7226bf8c9680507d3db97648a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 27, "num_lines": 16, "path": "/atCoder/abc218/a.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "import sys\n\n# stdin\nl = []\nfor e in sys.stdin:\n if e == '\\n':\n break\n else:\n l.append(e.strip('\\n'))\n\nn, p = int(l[0]), l[1]\n\nif p[n - 1] == 'o':\n print('Yes')\nelse:\n print('No')\n" }, { "alpha_fraction": 0.8297872543334961, "alphanum_fraction": 0.8404255509376526, "avg_line_length": 22.75, "blob_id": "33644e85a0e8428c032dff74570c5fed9985bbee", "content_id": "287d2920e937218b978038f25152a142ac858a0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/python/enum/enum_import.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "from enum_class import BookPrice\n\nprint(BookPrice.nodejs.value)\nprint(BookPrice.python3.value)" }, { "alpha_fraction": 0.6098360419273376, "alphanum_fraction": 0.6098360419273376, "avg_line_length": 20.85714340209961, "blob_id": "96a12f3f65d662ae8c1be440aef4167e61986ffa", "content_id": "b83442a6a134fda066fbc7508fd72babd0a1c0df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 65, "num_lines": 14, "path": "/python/Python3/Chapter_4/lambda.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "animals = ['lion', 'rabbit', 'dog', 'cat']\n\n# Without lambda\ndef print_word(words, func):\n for word in words:\n print(func(word))\n \ndef emphasize(word):\n return f\"{word}!!!\"\n\nprint_word(animals, emphasize)\n\n# With lambda\nprint_word(animals, lambda word: word.capitalize() + \"!!!!!!!!!\")" }, { "alpha_fraction": 0.46592843532562256, "alphanum_fraction": 0.47103917598724365, "avg_line_length": 16.264705657958984, "blob_id": "1264ba2b130392730b7f67c2905becfcdf0f1fa4", "content_id": "02c7848e055a04f8cdbd165603d615846a9dac30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1174, "license_type": "no_license", "max_line_length": 67, "num_lines": 68, "path": "/atCoder/abc218/c.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "def rot(S):\n return list(zip(*S[::-1]))\n\n\ndef find_left_top(S):\n \n for i in range(N):\n print(f'[{i}]', S[i])\n for j in range(N):\n print(S[i][j])\n if S[i][j] == '#':\n return i, j\n\n\ndef is_same(S, T):\n print('S')\n print(S)\n Si, Sj = find_left_top(S)\n print('T')\n print(T)\n Ti, Tj = find_left_top(T)\n\n print('Si, Sj',(Si, Sj))\n print('Ti, Tj',(Ti, Tj))\n\n offset_i = Ti - Si\n offset_j = Tj - Sj\n\n for i in range(N):\n for j in range(N):\n ii = i + offset_i\n jj = j + offset_j\n\n if 0 <= ii < N and 0 <= jj < N:\n if S[i][j] != T[ii][jj]:\n return False\n\n else:\n print('in the else condition')\n print((i,j))\n print(S)\n print(T)\n if S[i][j] == '#':\n print('it is false')\n return False\n\n return True\n\n\nN = int(input())\nS = [input() for _ in range(N)]\nT = [input() for _ in range(N)]\n\ncntS = sum(1 for i in range(N) for j in range(N) if S[i][j] == '#')\ncntT = sum(1 for i in range(N) for j in range(N) if T[i][j] == '#')\n\nif cntS != cntT:\n print(\"No\")\n exit()\n\nfor _ in range(4):\n if is_same(S, T):\n print(\"Yes\")\n exit()\n\n S = rot(S)\n\nprint(\"No\")\n" }, { "alpha_fraction": 0.5734265446662903, "alphanum_fraction": 0.5944055914878845, "avg_line_length": 23, "blob_id": "1ade22739a3f7ec80083eb8900f8a7ec2638c285", "content_id": "90fcaad73779086ca81da508eec94bf0ae3624bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/python/class_test/child.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "from parent import Bank\n\nclass ChildBank(Bank):\n def __init__(self, money):\n super().__init__(money)\n self.money = money + 500" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 30, "blob_id": "dda0df1e2854918e41979935a52e8d4c494ca7aa", "content_id": "0774c13dd1188ca0159fc6c94b3e6d361cee13b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 30, "license_type": "no_license", "max_line_length": 30, "num_lines": 1, "path": "/JavaScript/bundler/src/message.js", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "module.exports = \"Hi, There.\";" }, { "alpha_fraction": 0.5038610100746155, "alphanum_fraction": 0.5328185558319092, "avg_line_length": 18.148147583007812, "blob_id": "2675e5940d5c41cfc114e3227480400a5b46d7e0", "content_id": "b66d21791ec96fd49270c3978587b91950749b45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 518, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/nodejs/Chapter2/2-2-1.js", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "// setTimeout(\n// () => console.log('1 sec has passed.'),1000\n// )\n\n// console.log('setTimeout() was executed.')\n\n// ---\n\n// const array1 = [0,1,2,3]\n// const array2 = array1.map((ele) => {\n// console.log(`Converting ${ele}.`)\n// return ele*10\n// })\n\n// console.log('Conversion of array has completed.',array2)\n\n// ---\n\n// fs = require('fs')\n\n// fs.readdir('.', (err, files) => {\n// console.log('Result of fs.readdir()')\n// console.log('err', err)\n// console.log('files', files)\n// })\n\n// ---\n\n" }, { "alpha_fraction": 0.5499390959739685, "alphanum_fraction": 0.585261881351471, "avg_line_length": 15.595959663391113, "blob_id": "eb70267635ee040afb50f7c90abad81818e5d512", "content_id": "3330708d99136c565c073739d6998e0dc25013ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1642, "license_type": "no_license", "max_line_length": 61, "num_lines": 99, "path": "/python/Python3/Chapter_4/Questions.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "# 4-1\nguess_me = 7\nif guess_me == 7:\n print('Just right')\nelif guess_me < 7:\n print('Too low')\nelse:\n print('Too high')\n\n# 4-2\nguess_me = 7\nstart = 6\nwhile guess_me >= start:\n if guess_me == start:\n print(\"found it!\")\n break\n elif guess_me < start:\n print(\"oops!\")\n break\n else:\n print(\"Too low\")\n\n start += 1\n\n# 4-3\nnumbers = [3,2,1,0]\nfor num in numbers:\n print(num)\n\n# 4-4\nevens = [num for num in range(10) if num%2 == 0]\nprint(evens)\n\n# 4-5\nsquares = {num: num**2 for num in range(10)}\nprint(squares)\n\n# 4-6\nodd_set = {num for num in range(10) if num % 2 == 1}\nprint(odd_set)\n\n# 4-7\ngen = (f\"Got {num}\" for num in range(10))\nprint(gen)\nfor out in gen:\n print(out)\n\n# since data in the generator is consumed, output is none\nprint(\"2nd\")\nfor out in gen:\n print(out)\n\n# 4-8\ndef good():\n return ['Harry','Ron','Harmione']\n\nprint(good())\n\n# 4-9\ndef get_odds():\n for num in range(10):\n if num % 2 == 1:\n yield num\n\nodds = [num for num in get_odds()]\nprint(odds[3])\n\n# 4-10\ndef decorator(func):\n def new_func(*args, **kwargs):\n print('start')\n result = func(*args, **kwargs)\n print('end')\n return result\n return new_func\n\n@decorator\ndef add(a,b):\n print(a+b)\n return a+b\n\nadd(1,2)\n\n# 4-11\nclass OopsException(Exception):\n pass\n\nword = \"haha\"\n# raise OopsException(word)\n\n# 4-12\ntitles = ['Creature of Habit', 'Crewl Fate']\nplots = ['A nun turns into a monster', 'A haunted yarn shop']\nresult = list(zip(titles, plots))\ndictionary = {}\nfor key, value in result:\n dictionary[key] = value\n\nprint(dictionary)" }, { "alpha_fraction": 0.6734007000923157, "alphanum_fraction": 0.6734007000923157, "avg_line_length": 26.090909957885742, "blob_id": "904e0b19e3c703ff055d4496ecdafc69b77a58ec", "content_id": "71b49af8394d61e0c2f465e7955f4144b1e6c748", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 62, "num_lines": 11, "path": "/python/Python3/Chapter_4/closure.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "# Defining a function which returns a function\ndef knight(saying):\n # Inner function memorizes the variables in the locale.\n def inner():\n announce = \"We are the knights who say: '%s'\" % saying\n return announce\n return inner\n\nfunc = knight('Tyler')\nprint(func)\nprint(func())" }, { "alpha_fraction": 0.6675257682800293, "alphanum_fraction": 0.6804123520851135, "avg_line_length": 18.450000762939453, "blob_id": "87760da7beacc788a66a8264e19028a3a409f6fa", "content_id": "00f48696eb97a51b0a4fc9edeadf44942276d0e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 51, "num_lines": 20, "path": "/python/asyncio/asyncio_test.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "import asyncio\nimport random\n\n# Example of Coroutine function\nasync def main(): \n print(\"Hello ...\")\n await asyncio.sleep(1)\n print('... World.')\n\nasyncio.run(main())\n\n# Example of awaiting and printing Coroutine object\nasync def return_randint():\n return random.randint(0,9)\n\nasync def main_2():\n # return_randint()\n print(await return_randint())\n\nasyncio.run(main_2())" }, { "alpha_fraction": 0.5721784830093384, "alphanum_fraction": 0.5767716765403748, "avg_line_length": 30.12244987487793, "blob_id": "99a291fa3c2f7871a511bfd2057e96d9369117e9", "content_id": "b95664154d7b088f34c66bdbac51bbc9b4475045", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1524, "license_type": "no_license", "max_line_length": 76, "num_lines": 49, "path": "/python/image_splitter/image_splitter.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "import os\nimport glob\nimport argparse\nimport random\nimport cv2\nfrom PIL import Image\nimport numpy as np\nimport xml.etree.ElementTree as ET\n#-----------------------------------\n# Purpose\n# - Augment dataset by cropping images with random ratio \n# - Also generate xml files according to the original image\n# Requirement \n# - Image\n# - xml file \n#-----------------------------------\n\n# img is a numpy array\nclass Split:\n def __init__(self, img, split_px) -> None:\n self.img = img\n self.height, self.width, self.dimension = self.img.shape\n self.splitted_img = None\n self.split_px = split_px\n self.column = self.width // self.split_px + 1\n self.row = self.height // self.split_px + 1\n self.column_stride = self.width / (self.column - 1) - (split_px / 2)\n self.row_stride = self.height / (self.row - 1) - (split_px / 2)\n if split_px <= self.width and split_px <= self.height: \n self.next_range = [split_px, split_px, self.dimension] \n \n def next_range(self):\n return self.next_range\n\n def cut_image(self):\n return \n\n def move_range(self):\n return\n\nif __name__ == '__main__': \n parser = argparse.ArgumentParser() \n parser.add_argument('--dataset') \n args = parser.parse_args()\n uc = args.dataset \n xml_path = '/home/tyler/Desktop/datasets/' + uc + '/' \n img_path = '/home/tyler/Desktop/datasets/' + uc + '/'\n for f in glob.glob(xml_path + ''):\n img = np.array(Image.open(f))" }, { "alpha_fraction": 0.7818182110786438, "alphanum_fraction": 0.7818182110786438, "avg_line_length": 54, "blob_id": "a2546558de3355ffba466cf0010967caaf3e6803", "content_id": "28c1808469e3c9d4fc8b9d2cd227c2ef5ee001c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 55, "license_type": "no_license", "max_line_length": 54, "num_lines": 1, "path": "/TypeScript/ts-projects/ts-redux/README.md", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "## Just a practice project related to Udemy resources.\n" }, { "alpha_fraction": 0.5879120826721191, "alphanum_fraction": 0.593406617641449, "avg_line_length": 12, "blob_id": "97355884b70ca752afbcd4a4f93bc36ea2b48d30", "content_id": "fc5eb8bff42fb7ba7be1269638524370d1c2ad60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 28, "num_lines": 14, "path": "/atCoder/abc218/b.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "import sys\nimport string\n\nasc = string.ascii_lowercase\n\nl = sys.stdin.readline()\nl = l.strip('\\n').split(' ')\n\nans = ''\nfor e in l:\n e = int(e) - 1\n ans += str(asc[e])\n\nprint(ans)\n" }, { "alpha_fraction": 0.6834862232208252, "alphanum_fraction": 0.6926605701446533, "avg_line_length": 32.230770111083984, "blob_id": "7768fc2656680354fd7feca6c6de2b0d4ecfab77", "content_id": "ccaf88867c28223bcdecf1b0a5c7bc0cc20636ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 79, "num_lines": 13, "path": "/python/SQLAlchemy/alchemy.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "from sqlalchemy import create_engine\nfrom sqlalchemy import text\nimport pymysql\n\nengine = create_engine(\"mysql+pymysql://root:mysql@localhost:3306/TESTALCHEMY\")\n# engine.connect()\n\nwith engine.connect() as connection:\n result = connection.execute(text(\"select user,user_id,age from myTable\"))\n for user,user_id,age in result:\n print(\"user_id is:\", user_id)\n print(\"user is:\", user)\n print(\"age is:\", age)\n " }, { "alpha_fraction": 0.5349794030189514, "alphanum_fraction": 0.5349794030189514, "avg_line_length": 20.727272033691406, "blob_id": "d8ce73dfcee5d2bd5975d378edc0e1b28e859199", "content_id": "c58293103f71ba9ba6d2a0c441819f332a6add8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 30, "num_lines": 11, "path": "/python/class_test/parent.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "class Bank():\n def __init__(self, money):\n self.money = money\n\n def deposit(self, price):\n self.money += price\n print(self.money)\n\n def draw(self, price):\n self.money -= price\n print(self.money)\n " }, { "alpha_fraction": 0.7105262875556946, "alphanum_fraction": 0.7269737124443054, "avg_line_length": 24.16666603088379, "blob_id": "6c8e7a674cb916fba88776ed1abbbec866c3ea17", "content_id": "e7fa0f68fa91e92a8031a3d366debac8237f508c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "no_license", "max_line_length": 75, "num_lines": 12, "path": "/python/Python3/Chapter_4/generator.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "# Defining a generator\nnumber_thing = (number for number in range(1,6))\n\n# Properly prints out the numbers\nprint(\"1st\")\nfor number in number_thing:\n print(number)\n\n# Numbers are not stored anymore since it was consumed in the 1st for loop.\nprint(\"2nd\")\nfor number in number_thing:\n print(number)\n\n\n" }, { "alpha_fraction": 0.7325227856636047, "alphanum_fraction": 0.7325227856636047, "avg_line_length": 17.33333396911621, "blob_id": "b0313e1b1fc6b20944531c1b3aeeaad0c7cc9a6e", "content_id": "f68b908e0365bb192d3ce6df442fe80d63a188b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 35, "num_lines": 18, "path": "/python/flask/app.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom pymongo import MongoClient\n\napp = Flask(__name__)\nclient = MongoClient()\nprint(client)\n\nfinace_db = client.finance\nprint(finace_db)\nsum_collection = finace_db.sum\nprint(sum_collection)\n\nhistory = sum_collection.find_one()\nprint(history)\n\[email protected]('/')\ndef hello():\n return str(history['Income'])" }, { "alpha_fraction": 0.658823549747467, "alphanum_fraction": 0.7882353067398071, "avg_line_length": 16.200000762939453, "blob_id": "d4413eed655d0a67ac31103faff6d5e18360dce5", "content_id": "766f565db0466c27bfa0ba7ec2fc6c7e20a2f1a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "no_license", "max_line_length": 27, "num_lines": 5, "path": "/python/class_test/class_use.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "from child import ChildBank\n\nbank = ChildBank(1000)\nbank.deposit(1000)\nbank.draw(500)" }, { "alpha_fraction": 0.6413662433624268, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 23, "blob_id": "bd09017ffd8135e920f2ece5f1cd0eaed1cdfd15", "content_id": "83eb4115b7e26f494fff8cb3477ef7ca1e73d7b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 527, "license_type": "no_license", "max_line_length": 72, "num_lines": 22, "path": "/python/Python3/Chapter_4/decorator.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "'''\nDecorator is a function which takes another function as an argument and \nwhich returns a function.\n'''\n\n# Defining decorator\ndef return_document(func):\n def documented(*args, **kwargs):\n print('Running function:', func.__name__)\n print(\"Positional argument:\", args)\n print('Keyword argument:', kwargs)\n result = func(*args, **kwargs)\n print('Result:', result)\n return result\n return documented\n\n@return_document\ndef add_int(a,b):\n return a+b\n\nnum = add_int(3,5)\nprint(num)" }, { "alpha_fraction": 0.5853658318519592, "alphanum_fraction": 0.6951219439506531, "avg_line_length": 15.600000381469727, "blob_id": "0e18cb518172763db6561c9c487c1e13cd6d82c7", "content_id": "a88970d0d8aa1c833266b69d694569c5bcd8ac0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 82, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/python/enum/enum_class.py", "repo_name": "tylerkurahashi/study", "src_encoding": "UTF-8", "text": "from enum import Enum\n\nclass BookPrice(Enum):\n nodejs = 4070\n python3 = 3000" } ]
21
MilenaValarezo28/Ejercicios-2do-Parcial-POO-MilenaV
https://github.com/MilenaValarezo28/Ejercicios-2do-Parcial-POO-MilenaV
67931fdaecee6a71996a5295f0426a692d588491
a8bc2799c7af4ed0d5f8c7b68773a9ab6fcc32ec
012788c79a2308eccb2bf6022bb366d5ed022f6d
refs/heads/main
2023-07-28T10:56:03.051295
2021-09-13T16:17:56
2021-09-13T16:17:56
405,676,561
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5947368144989014, "alphanum_fraction": 0.6188365817070007, "avg_line_length": 29.6842098236084, "blob_id": "97b699cd3ea558cb4371d68f4727f6dff317e248", "content_id": "9acabe3846328f5deee99f031e5534ec277ca527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3610, "license_type": "no_license", "max_line_length": 92, "num_lines": 114, "path": "/Ejercicios 2do Parcial POO Milena Valarezo/Diagrama de Facturación.py", "repo_name": "MilenaValarezo28/Ejercicios-2do-Parcial-POO-MilenaV", "src_encoding": "UTF-8", "text": "from _typeshed import Self\r\nfrom datetime import date\r\n\r\nclass Empresa:\r\n def __init__(self, nom=\"El mas Barato\", ruc=\"0999999999\", telf=\"042971234\", dir=\"Milagro\"):\r\n self.nombre= nom\r\n self.ruc= ruc\r\n self.telefono= telf\r\n self.direccion= dir\r\n \r\n def Mostrar_Empresa(self):\r\n print(\"Empresa: {:17} Ruc:{}\".format(self.nombre,self.ruc))\r\n\r\nfrom abc import ABC, abstractmethod\r\nclass Cliente(ABC):\r\n def __init__(self, nom, ced, telf):\r\n self.nombre= nom\r\n self.cedula= ced\r\n self.telefono= telf\r\n \r\n def mostrarCliente(self):\r\n print(self.nombre,self.cedula,self.telefono)\r\n \r\nclass Cliente_Corporativo(Cliente):\r\n def __init__(self, nom, ced, telf,contrato):\r\n super().__init__(self, nom, ced, telf)\r\n self._contrato= contrato\r\n\r\n @property\r\n def contrato(self): #getter: obtener el valor del atributo privado\r\n return self._contrato\r\n @contrato.setter\r\n def contrato(self,value): #setter: asigna un valor al atributo privado\r\n if value:\r\n self._contrato= value\r\n else:\r\n self._contrato= \"Sin contrato\"\r\n \r\n def mostrarCliente(self):\r\n print(self.nombre,self._contrato)\r\n\r\nclass ClientePersonal(Self):\r\n def __init__(self, nom, ced, telf, promocion=True):\r\n super().__init__(nom, ced, telf)\r\n self._promocion= promocion\r\n\r\n @property\r\n def promocion(self): #getter: obtener el valor del atributo\r\n return self._promocion\r\n @promocion.setter\r\n def promocion(self,value): #getter: obtener el valor de\r\n self._promocion= value\r\n\r\n def mostrarCliente(self):\r\n print(self.nombre,self.promocion)\r\n\r\nclass Articulo:\r\n secuencia= 0\r\n iva = 0.12\r\n def __init__(self,des,pre,sto):\r\n Articulo.secuencia += 1\r\n self.codigo= Articulo.secuencia\r\n self.descripcion= des\r\n self.precio= pre\r\n self.stock= sto\r\n def mostrarArticulo(self):\r\n print(self.codigo, self.descripcion)\r\n\r\nclass DetVenta:\r\n linea= 0\r\n def __init__(self, articulo, cantidad):\r\n DetVenta.linea += 1\r\n self.lineaDetalle= DetVenta.linea\r\n self.articulo= articulo\r\n self.precio= articulo.precio\r\n self.cantidad= cantidad\r\n\r\nclass CabVenta:\r\n def __init__(self,fac,empresa,fecha, cliente,tot=0):\r\n self.empresa= empresa\r\n self.factura= fac\r\n self.fecha= fecha\r\n self.cliente= cliente\r\n self.total= tot\r\n self.detalleVen= []\r\n\r\n def agregarDetalle(self, articulo, cantidad):\r\n detalle = DetVenta(articulo,cantidad)\r\n self.total += detalle.precio * detalle.cantidad\r\n self.detalleVen.append(detalle)\r\n \r\n def mostrarVenta(self,empNombre,empRuc):\r\n print(\"Empresa: {:17}Ruc:{}\".format(empNombre,empRuc))\r\n print(\"Factura#: {:13}Fecha: {}\".format(self.factura,self.cliente.mostrarCliente()))\r\n print(\"Linea Articulo , Precio Cantidad Subtotal\")\r\n for det in self.detalleVen:\r\n print(\"{:5} {:15} {} {:6} {:7}\".format(det.linea,))\r\n print(\"Total Venta:{:26}\".format(self.total))\r\n\r\nempresa= Empresa()\r\ncli1 = ClientePersonal(\"Daniel\",\"0992214888\",\"099214847\",False)\r\nprint(cli1.getCedula())\r\nart1=Articulo(\"Aceite\",3,100)\r\nart2= Articulo(\"Coca Cola\",1,200)\r\ntoday= date.today()\r\nfecha= date(2021,8,15)\r\nventa= CabVenta('F0001',date.today(),cli1)\r\nventa.agregarDetalle(art1,3)\r\nventa.agregarDetalle(art1,2)\r\nventa.mostrarVenta(empresa.nombre,empresa.ruc)\r\n\r\nclass InterfaceSistemaPago(ABC):\r\n @abstractmethod\r\n def pago(self):" }, { "alpha_fraction": 0.5558823347091675, "alphanum_fraction": 0.576764702796936, "avg_line_length": 26.58823585510254, "blob_id": "fc0d656e273908c0533cdb1ab37f9f62772da9d5", "content_id": "9ecfcdb5953d25219fe997f977f3fb00fc168b57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3400, "license_type": "no_license", "max_line_length": 113, "num_lines": 119, "path": "/Ejercicios 2do Parcial POO Milena Valarezo/Diagrama Empresa.py", "repo_name": "MilenaValarezo28/Ejercicios-2do-Parcial-POO-MilenaV", "src_encoding": "UTF-8", "text": "from datetime import date\r\n\r\n\r\nclass Empresa:\r\n def __init__(self, nom='MORTADELA', ruc='0000000', tel='000000', dir=''):\r\n self.nombre = nom\r\n self.ruc = ruc\r\n self.tel = tel\r\n self.dir = dir\r\n\r\n def mostrarEmpresa(self):\r\n print('Empresa: {:17} Ruc:{}'.format(self.nombre, self.ruc))\r\n\r\n\r\nclass Cliente:\r\n def __init__(self, nom, ced, tel):\r\n self.nombre = nom\r\n self.cedula = ced\r\n self.telefono = tel\r\n\r\n def mostrarCliente(self):\r\n print(self.nombre, self.cedula, self.telefono)\r\n\r\n\r\nclass ClienteCorporativo(Cliente):\r\n def __init__(self, nom, ced, tel, contrato):\r\n super().__init__(nom, ced, tel)\r\n self.__contrato = contrato\r\n\r\n @property\r\n def contrato(self):\r\n return self.__contrato\r\n\r\n @contrato.setter\r\n def contrato(self, value):\r\n if value:\r\n self.__contrato = value\r\n else:\r\n self.__contrato = 'Sin contrato'\r\n\r\n def mostrarCliente(self):\r\n print(self.nombre, self.__contrato)\r\n\r\n\r\nclass ClientePersonal(Cliente):\r\n def __init__(self, nom, ced, tel, promocion=True):\r\n super().__init__(nom, ced, tel)\r\n self.__promocion = promocion\r\n\r\n @property\r\n def promocion(self):\r\n if self.__promocion == True:\r\n return '10% descuento'\r\n else:\r\n return 'No hay promocion'\r\n\r\n def mostrarCliente(self):\r\n print(self.nombre, self.promocion)\r\n\r\n\r\nclass Articulo:\r\n secuencia = 0\r\n iva = 0.12\r\n\r\n def __init__(self, des, pre, sto):\r\n Articulo.secuencia += 1\r\n self.codigo = Articulo.secuencia\r\n self.descripcion = des\r\n self.precio = pre\r\n self.stock = sto\r\n\r\n def mostrararticulo(self):\r\n print(self.codigo, self.descripcion)\r\n\r\n\r\nclass DetVenta:\r\n linea = 0\r\n\r\n def __init__(self, articulo, cantidad):\r\n DetVenta.linea += 1\r\n self.lineadetalle = DetVenta.linea\r\n self.articulo = articulo\r\n self.cantidad = cantidad\r\n self.precio = articulo.precio\r\n\r\n\r\nclass CabVenta:\r\n def __init__(self, fac, fecha, cliente, tot=0):\r\n self.factura = fac\r\n self.fecha = fecha\r\n self.cliente = cliente\r\n self.total = tot\r\n self.detalleVen = []\r\n\r\n def agregardetalle(self, articulo, cantidad):\r\n detalle = DetVenta(articulo, cantidad)\r\n self.total += detalle.precio * detalle.cantidad\r\n self.detalleVen.append(detalle)\r\n\r\n def mostrarventa(self, empNombre, empRuc):\r\n print('Empresa: {:15} Ruc:{}'.format(empNombre, empRuc))\r\n print('Factura#: {:15}Fecha:{}'.format(self.factura, self.fecha))\r\n self.cliente.mostrarCliente()\r\n print('Linea Articulo Precio Cantidad Subtotal')\r\n for det in self.detalleVen:\r\n print('{:5} {:15} {} {:6} {:7}'.format(det.linea, det.articulo.descripcion, det.precio, det.cantidad,\r\n det.precio * det.cantidad))\r\n print('total venta : {:25}'.format(self.total))\r\n\r\n\r\nempresa = Empresa()\r\ncli1 = ClientePersonal('Erick', 1250277215, 00000, False)\r\nart1 = Articulo('Aceite', 2, 100)\r\nart2 = Articulo('Coca Cola', 3, 100)\r\ntoday = date.today()\r\nventa = CabVenta('F0001', today, cli1)\r\nventa.agregardetalle(art1, 3)\r\nventa.agregardetalle(art2, 2)\r\nventa.mostrarventa(empresa.nombre, empresa.ruc)" } ]
2
Wentao795/face_torch
https://github.com/Wentao795/face_torch
03dabb3d1d1f0839b2368b4b7de919ed01c9b034
35e016cd3c0e9ba9632b7fc6c5c3da6a56266d73
01d62cc2f2bbe33110de6db3a57d41a6f8fcc630
refs/heads/master
2021-10-22T22:56:37.289412
2019-03-13T09:55:10
2019-03-13T09:55:10
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.640564501285553, "alphanum_fraction": 0.6477587223052979, "avg_line_length": 37.4361686706543, "blob_id": "3fdf9a2fb81bea678954a35fd90ad1f6950bcdcb", "content_id": "d3125d4495cd6dc61e366549ef205b25ca4daacd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3614, "license_type": "no_license", "max_line_length": 127, "num_lines": 94, "path": "/train_softmax.py", "repo_name": "Wentao795/face_torch", "src_encoding": "UTF-8", "text": "from config import config\nfrom model.model import MobileFaceNet,Am_softmax,Arcface,Softmax\nfrom torch.nn import DataParallel\nfrom dataset.dataloder import Train_DATA\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom symbols.utils import Metric\nimport os\nfrom torch.autograd import Variable\nimport torchvision\nimport torch\nimport numpy as np\n\ndef main():\n #model pack\n model = MobileFaceNet(config.embedding_size)\n model = DataParallel(model,device_ids=config.gpu_id)\n if config.loss_type == 0:\n loss_cess = Softmax()\n elif config.loss_type == 1:\n loss_cess = Arcface(config.embedding_size,config.num_classe,config.margin_s,config.margin_m)\n else:\n loss_cess = Am_softmax(config.embedding_size,config.num_classe)\n loss_cess = DataParallel(loss_cess,device_ids=config.gpu_id)\n\n train_data = Train_DATA(config.train_data)\n train_loader = DataLoader(train_data,batch_size=config.batch_size,shuffle=True,num_workers=config.num_work,pin_memory=True)\n\n criterion = nn.CrossEntropyLoss()\n criterion = DataParallel(criterion,device_ids=config.gpu_id)\n optimizer = optim.SGD(model.parameters(),lr=config.lr,momentum=config.momentum,weight_decay=config.weight_decay)\n optimizer = DataParallel(optimizer,device_ids=config.gpu_id)\n scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=20,gamma=0.1)\n\n train_loss = Metric()\n train_acc = Metric()\n\n best_precision1 = 0\n start_epoch = 0\n fold = 0\n\n if config.resume:\n checkpoint = torch.load(config.model_path)\n start_epoch = checkpoint[\"epoch\"]\n fold = checkpoint[\"fold\"]\n best_precision1 = checkpoint[\"best_precision1\"]\n model.load_state_dict(checkpoint[\"state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n print(\"input data ,start run ,this is %d epoch \"%(start_epoch))\n\n if not os.path.exists(config.model_output):\n os.makedirs(config.model_output)\n\n for epoch in range(start_epoch,config.end_epoch):\n scheduler.step(epoch)\n for iter,(input,target) in enumerate(train_loader):\n model.train()\n input = Variable(input)\n target = Variable(torch.from_numpy(np.array(target)).long())\n input = DataParallel(input,device_ids=config.gpu_id)\n target = DataParallel(input,device_ids=config.gpu_id)\n\n optimizer.zero_grad()\n embeddings = model(input)\n output = loss_cess(embeddings,target)\n loss = criterion(output,target)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n output = output.data.cpu().numpy()\n output = np.argmax(output,axis=1)\n label = target.data.cpu().numpy()\n acc = np.mean((label==output).astype(int))\n train_loss.updata(loss.data.cpu().numpy(),input.size(0))\n train_acc.updata(acc,input.size(0))\n\n if iter%20 ==0:\n print(\"Add valyue loss:%.3f acc:%.3f\"%(train_loss.avg,train_acc.avg))\n\n is_best = train_acc.avg >best_precision1\n best_precision1 = max(train_acc.avg,best_precision1)\n model_savename = config.model_output+'/'+'epoch%d'%epoch+'_checkpoint.pth.tar'\n torch.save({\n \"epoch\":epoch+1,\n \"model_name\":config.model_name,\n \"state_dict\":model.state_dict(),\n \"best_precision1\":best_precision1,\n \"optimizer\":optimizer.state_dict(),\n \"fold\":fold,\n \"train_loss\":train_loss.avg\n },model_savename)\n\n" }, { "alpha_fraction": 0.5144429206848145, "alphanum_fraction": 0.5337001085281372, "avg_line_length": 32.09090805053711, "blob_id": "16ff0dba1d04c1d3ec120e23f9175afcb02cf96e", "content_id": "703737dc045ce0bdfd7aa318dca34e1005a789c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 727, "license_type": "no_license", "max_line_length": 54, "num_lines": 22, "path": "/dataset/dataloder.py", "repo_name": "Wentao795/face_torch", "src_encoding": "UTF-8", "text": "from PIL import Image\nfrom torchvision import transforms as T\nclass Train_DATA(object):\n def __init__(self,root):\n imgs = []\n file = open(root,'r')\n for i in file.readlines():\n temp = i.replace('\\n','').split('\\t')\n imgs.append(temp)\n self.imgs = imgs\n self.transforms = T.Compose([\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n T.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])])\n def __getitem__(self, index):\n img_path = self.imgs[index][0]\n label = int(self.imgs[index][1])\n data = Image.open(img_path)\n data = self.transforms(data)\n return data,label\n def __len__(self):\n return len(self.imgs)" }, { "alpha_fraction": 0.5682593584060669, "alphanum_fraction": 0.57337886095047, "avg_line_length": 29.894737243652344, "blob_id": "3fa40b4ebf19df9b31052eabce5a17dc423ead5a", "content_id": "07dda3de416901bc0884e27fafe0e10fe3a74391", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 69, "num_lines": 19, "path": "/dataset/path.py", "repo_name": "Wentao795/face_torch", "src_encoding": "UTF-8", "text": "from config import config\nimport os\ndef main():\n srcfloder = config.train_path\n outFile = open(config.train_path.split('/')[-1],'w')\n childFolders = os.listdir(srcfloder)\n num = 0\n for childfloder in childFolders:\n secondfile = srcfloder + '/' + childfloder\n allFiles = os.listdir(secondfile)\n for fileline in allFiles:\n print(num)\n imgfile = secondfile + '/' + fileline +'\\t'+str(num)+'\\n'\n outFile.write(imgfile)\n outFile.flush()\n num += 1\n outFile.close()\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5375974178314209, "alphanum_fraction": 0.5797246098518372, "avg_line_length": 37.32986068725586, "blob_id": "4d228b7c988a983eb8f69f91a7edb943f78b830f", "content_id": "c369025c40942928bbbea79f460095753442838c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11038, "license_type": "no_license", "max_line_length": 161, "num_lines": 288, "path": "/model/model.py", "repo_name": "Wentao795/face_torch", "src_encoding": "UTF-8", "text": "from torch.nn import Linear,Conv2d,BatchNorm1d,BatchNorm2d,PReLU,ReLU,Sigmoid,Dropout2d,Dropout,AvgPool2d,MaxPool2d,AdaptiveAvgPool2d,Sequential,Module,Parameter\nimport torch.nn.functional as F\nimport torch\nimport math\nimport pdb\nfrom collections import namedtuple\nclass Flatten(Module):\n def forward(self,input):\n return input.view(input.size(0),-1)\n\ndef l2_norm(input,axis=1):\n #axis grap row so axis = 0 replace cel,kuahang.\n norm = torch.norm(input,2,axis,True)#(x,l2 norm,kualieqiu,baozhiweidububian)\n output = torch.div(input,norm)#a / |a|\n return output\n\n\n##input attetion??\nclass SEModule(Module):\n def __init__(self,channels,reduction):\n super(SEModule,self).__init__()\n self.avg_pool = AdaptiveAvgPool2d(1)\n self.fc1 = Conv2d(channels,channels // reduction,kernel_size=1,padding=0,bias=False)\n self.relu = ReLU(inplace=True)\n self.fc2 = Conv2d(channels // reduction,channels,kernel_size=1,padding=0,bias=False)\n self.sigmoid = Sigmoid()\n\n def forward(self, x):\n module_input = x\n x = self.avg_pool(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.sigmoid(x)\n return module_input * x\n\nclass bottleneck_IR(Module):\n def __init__(self,in_channel,depth,stride):\n super(bottleneck_IR,self).__init__()\n if in_channel == depth:\n self.shorcut_layer = MaxPool2d(1,stride)\n else:\n self.shorcut_layer = Sequential(\n Conv2d(in_channel,depth,(1,1),stride,bias=False),\n BatchNorm2d(depth)\n )\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel,depth,(3,3),(1,1),1,bias=False),\n PReLU(depth),\n Conv2d(depth,depth,(3,3),stride,1,bias=False),\n BatchNorm2d(depth)\n )\n\n def forward(self, x):\n shortcut = self.shorcut_layer(x)\n res = self.res_layer(x)\n return res + shortcut\n\nclass bottleneck_IR_SE(Module):\n def __init__(self,in_channel,depth,stride):\n super(bottleneck_IR_SE,self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1,stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel,depth,(1,1),stride,bias=False),\n BatchNorm2d(depth)\n )\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel,depth,(3,3),(1,1),1,bias=False),\n PReLU(depth),\n Conv2d(depth,depth,(3,3),stride,1,bias=False),\n BatchNorm2d(depth),\n SEModule(depth,16)\n )\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n return res + shortcut\n\nclass Bottleneck(namedtuple('Block',['in_channel','depth','stride'])):\n \"\"\"\"\"\"\n\ndef get_block(in_channel,depth,num_units,stride = 2):\n return [Bottleneck(in_channel,depth,stride)] + [Bottleneck(depth,depth,1) for i in range(num_units - 1)]\n\ndef get_blocks(num_layers):\n if num_layers == 50:\n blocks = [\n get_block(in_channel=64,depth=64,num_units=3),\n get_block(in_channel=64, depth=128, num_units=4),\n get_block(in_channel=128, depth=256, num_units=14),\n get_block(in_channel=256, depth=512, num_units=3),\n ]\n elif num_layers == 100:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=13),\n get_block(in_channel=128, depth=256, num_units=30),\n get_block(in_channel=256, depth=512, num_units=3),\n ]\n elif num_layers == 152:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=8),\n get_block(in_channel=128, depth=256, num_units=36),\n get_block(in_channel=256, depth=512, num_units=3),\n ]\n return blocks\n\nclass Backbone(Module):\n def __init__(self,num_layers,drop_ration,mode='ir'):\n super(Backbone,self).__init__()\n assert num_layers in [50,100,152]\n assert mode in ['ir','ir_se']\n blocks = get_blocks(num_layers)\n if mode == 'ir':\n unit_module = bottleneck_IR\n elif mode == 'ir_se':\n unit_module = bottleneck_IR_SE\n self.input_layer = Sequential(\n Conv2d(3,64,(3,3),1,1,bias=False),\n BatchNorm2d(64),\n PReLU(64)\n )\n self.output_layer = Sequential(\n BatchNorm2d(512),\n Dropout(drop_ration),\n Flatten(),\n Linear(512*7*7,512),\n BatchNorm1d(512)\n )\n\n modules = []\n for block in blocks:\n for bottleneck in block:\n modules.append(unit_module(bottleneck.in_channel,\n bottleneck.depth,\n bottleneck.stride))\n self.body = Sequential(*modules)\n\n def forward(self, x):\n x = self.input_layer(x)\n x = self.body(x)\n x = self.output_layer(x)\n return l2_norm(x)\n\nclass Conv_block(Module):\n def __init__(self,in_c,out_c,kernel=(1,1),stride=(1,1),padding=(1,1),groups=1):\n super(Conv_block,self).__init__()\n self.conv = Conv2d(in_c,out_channels=out_c,kernel_size=kernel,groups=groups,stride=stride,padding=padding,bias=False)\n self.bn = BatchNorm2d(out_c)\n self.prelu = PReLU(out_c)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.prelu(x)\n return x\n\nclass Linear_block(Module):\n def __init__(self,in_c,out_c,kernel=(1,1),stride=(1,1),padding=(0,0),groups=1):\n super(Linear_block,self).__init__()\n self.conv = Conv2d(in_c,out_channels=out_c,kernel_size=kernel,groups=groups,stride=stride,padding=padding,bias=False)\n self.bn = BatchNorm2d(out_c)\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return x\nclass Depth_Wise(Module):\n def __init__(self,in_c,out_c,residual=False,kernel=(3,3),stride=(2,2),padding=(1,1),groups=1):\n super(Depth_Wise,self).__init__()\n self.conv = Conv_block(in_c,out_c=groups,kernel=(1,1),padding=(0,0),stride=(1,1))\n self.conv_dw = Conv_block(groups,groups,groups=groups,kernel=kernel,padding=padding,stride=stride)\n self.project = Linear_block(groups,out_c,kernel=(1,1),padding=(0,0),stride=(1,1))\n self.residual = residual\n def forward(self, x):\n if self.residual:\n short_cut = x\n x = self.conv(x)\n x = self.conv_dw(x)\n x = self.project(x)\n if self.residual:\n output = short_cut + x\n else:\n output = x\n return output\n\nclass Residual(Module):\n def __init__(self,c,num_block,groups,kernel=(3,3),stride=(1,1),padding=(1,1)):\n super(Residual,self).__init__()\n modules = []\n for _ in range(num_block):\n modules.append(Depth_Wise(c,c,residual=True,kernel=kernel,padding=padding,stride=stride,groups=groups))\n self.model = Sequential(*modules)\n def forward(self, x):\n return self.model(x)\n\nclass MobileFaceNet(Module):\n def __init__(self,embedding_size):\n super(MobileFaceNet,self).__init__()\n self.conv1 = Conv_block(3,64,kernel=(3,3),stride=(2,2),padding=(1,1))\n self.conv2_dw = Conv_block(64,64,kernel=(3,3),stride=(1,1),padding=(1,1),groups=64)\n self.conv_23 = Depth_Wise(64,64,kernel=(3,3),stride=(2,2),padding=(1,1),groups=128)\n self.conv_3 = Residual(64,num_block=4,groups=128,kernel=(3,3),stride=(1,1),padding=(1,1))\n self.conv_34 = Depth_Wise(64,128,kernel=(3,3),stride=(2,2),padding=(1,1),groups=256)\n self.conv_4 = Residual(128,num_block=6,groups=256,kernel=(3,3),stride=(1,1),padding=(1,1))\n self.conv_45 = Depth_Wise(128,128,kernel=(3,3),stride=(2,2),padding=(1,1),groups=512)\n self.conv_5 = Residual(128,num_block=2,groups=256,kernel=(3,3),stride=(2,2),padding=(1,1))\n self.conv_6_sep = Conv_block(128,512,kernel=(1,1),stride=(1,1),padding=(0,0))\n self.conv_6_dw = Linear_block(512,512,groups=512,kernel=(7,7),stride=(1,1),padding=(0,0))\n self.conv_6_flatten = Flatten()\n self.linear = Linear(512,embedding_size,bias=False)\n self.bn = BatchNorm1d(embedding_size)\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2_dw(out)\n out = self.conv_23(out)\n out = self.conv_3(out)\n out = self.conv_34(out)\n out = self.conv_4(out)\n out = self.conv_45(out)\n out = self.conv_5(out)\n out = self.conv_6_sep(out)\n out = self.conv_6_dw(out)\n out = self.conv_6_flatten(out)\n out = self.linear(out)\n out = self.bn(out)\n return l2_norm(out)\n\nclass Arcface(Module):\n def __init__(self,embedding_size=512,classnum=51332,s=64,m=0.5):\n super(Arcface,self).__init__()\n self.classnum = classnum\n self.kernel = Parameter(torch.Tensor(embedding_size,classnum))\n self.kernel.data.uniform_(-1,1).renorm_(2,1,1e-5).mul_(1e5)\n self.m = m\n self.s = s\n self.cos_m = math.cos(m)\n self.sin_m = math.sin(m)\n self.mm = self.sin_m*m\n self.threshold = math.cos(math.pi - m)\n def forward(self, embedding,label):\n nB = len(embedding)\n kernel_norm = l2_norm(self.kernel,axis=0)\n cos_theta = torch.mm(embedding,kernel_norm)\n cos_theta = cos_theta.clamp(-1,1)\n cos_theta_2 = torch.pow(cos_theta,2)\n sin_theta_2 = 1 - cos_theta_2\n sin_theta = torch.sqrt(sin_theta_2)\n cos_theta_m = (cos_theta*self.cos_m-sin_theta*self.sin_m)\n cond_v = cos_theta - self.threshold\n cond_mask = cond_v <= 0\n keep_val = (cos_theta - self.mm)\n cos_theta_m[cond_mask] = keep_val[cond_mask]\n output = cos_theta*1.0\n idx_ = torch.arange(0,nB,dtype=torch.long)\n output[idx_,label] = cos_theta_m[idx_,label]\n output *= self.s\n return output\n\nclass Am_softmax(Module):\n def __init__(self,embedding_size=512,classnum=51332):\n super(Am_softmax,self).__init__()\n self.classnum = classnum\n self.kernel = Parameter(torch.Tensor(embedding_size,classnum))\n self.kernel.data.uniform_(-1,1).renorm_(2,1,1e-5).mul_(1e5)\n self.m = 0.35\n self.s = 30\n def forward(self, embbedings,label):\n kernel_norm = l2_norm(self.kernel,axis=0)\n cos_theta = torch.mm(embbedings,kernel_norm)\n cos_theta = cos_theta.clamp(-1,1)\n phi = cos_theta - self.m\n lable = label.view(-1,1)\n index = cos_theta.data *0.0\n index.scatter_(1,label.data.view(-1,1),1)\n index = index.byte()\n output = cos_theta * 1.0\n output[index] = phi[index]\n output *=self.s\n return output\n\nclass Softmax(Module):\n pass" }, { "alpha_fraction": 0.5009862184524536, "alphanum_fraction": 0.5759368538856506, "avg_line_length": 22.090909957885742, "blob_id": "54faeea2b66644b26b0171993bed27b52c6329f0", "content_id": "3bff6ef45de03e5192a307cc0cc42ef40fb87059", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "no_license", "max_line_length": 55, "num_lines": 22, "path": "/config.py", "repo_name": "Wentao795/face_torch", "src_encoding": "UTF-8", "text": "import os\nclass Defaultconfig(object):\n train_path = ''\n train_data = './dataset/'+train_path.split('/')[-1]\n embedding_size = 128\n loss_type = 0 # 0 softmaxe 1 arcface 3 am_softmax\n num_classe = 180000\n margin_s = 64\n margin_m = 0.5\n gpu_id = [0,1,2,3]\n lr = 0.1\n momentum = 0.9\n weight_decay = 5e-4\n batch_size = 512\n num_work = 128\n resume = 0\n model_path = ''\n model_output = ''\n end_epoch = 100\n model_name = 'face_mobile'\n\nconfig = Defaultconfig()" } ]
5
hu-guanwei/personas
https://github.com/hu-guanwei/personas
6cf29315c0a03f2bd62e95568761ec4383df8eab
fb04b46ba50fb0a0bd4c9b7e9d5eb449cad228fd
9f56a8e202e546c2fbce96347d8ab79af194eda1
refs/heads/master
2020-04-06T11:44:48.844836
2019-04-30T04:43:49
2019-04-30T04:43:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6385542154312134, "alphanum_fraction": 0.6385542154312134, "avg_line_length": 6.2727274894714355, "blob_id": "6a0e1ce9b7e066b396579e7255332e5de71d907e", "content_id": "bb20bf9654135e49f107052c370cc1c666cdaf2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 103, "license_type": "no_license", "max_line_length": 21, "num_lines": 11, "path": "/readme.md", "repo_name": "hu-guanwei/personas", "src_encoding": "UTF-8", "text": "\n\n用户词向量相似性度量\n\n![simi](png/simi.png)\n\n\n\nfrequent word cloud\n\n\n\n![top](png/top.png)\n\n" }, { "alpha_fraction": 0.5473372936248779, "alphanum_fraction": 0.5554733872413635, "avg_line_length": 29.727272033691406, "blob_id": "cb8c902f4366f76ec3aff812cf59dd40af4acebe", "content_id": "a6d6e1d07e48d14fe8f57028f935450adc00e7c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1352, "license_type": "no_license", "max_line_length": 93, "num_lines": 44, "path": "/utils/lsh.py", "repo_name": "hu-guanwei/personas", "src_encoding": "UTF-8", "text": "from collections import defaultdict\nfrom itertools import combinations\nfrom numba import jit, njit\nimport numpy as np\n\ndef prob_hit(s, r, b):\n return 1 - (1 - s ** r) ** b\n\nclass localSensitiveHash(object):\n \n def __init__(self, X):\n self._X = X\n self._n_rows, self._n_cols = X.shape\n \n @jit\n def random_proejction(self, r, b):\n v = np.random.uniform(-1, 1, (self._n_cols, r * b))\n return np.sign((self._X @ v).T)\n \n @jit\n def bucket_bands(self, signature_matrix, P, r, b):\n res = []\n for i in range(b):\n r_a = np.random.randint(low=1, high=P, size=(1, r))\n r_b = np.random.randint(low=1, high=P, size=(r, 1))\n res.append(((r_a @ (signature_matrix[i * r: (i + 1) * r, :] + r_b)) % P).ravel())\n return np.array(res).astype(int)\n\n\n @jit\n def inverted_index(self, bucket_bands):\n inverted_index = defaultdict(set)\n for key in range(bucket_bands.shape[1]):\n for value in set(bucket_bands[:, key]):\n inverted_index[value].add(key)\n return inverted_index\n\n @jit\n def find_pairs(self, inverted_index):\n pairs = set()\n for key in inverted_index.keys():\n users = sorted(inverted_index[key])\n pairs.update(tuple(combinations(users, 2)))\n return pairs\n" }, { "alpha_fraction": 0.6099939346313477, "alphanum_fraction": 0.6538695693016052, "avg_line_length": 15.06862735748291, "blob_id": "f6a353247e3b92855b26b78198581b6d0d8a94c0", "content_id": "887d8bbf268f5cd5f92db1e32684fb637563af80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1641, "license_type": "no_license", "max_line_length": 100, "num_lines": 102, "path": "/utils/find_similar.py", "repo_name": "hu-guanwei/personas", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport faiss\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\nword2vec_df = pd.read_csv('word2vec.csv', header=None)\nword2vec_df.head()\n\n\n# In[3]:\n\n\n# mean imputation\nmean_vec = np.mean(word2vec_df.dropna(axis=0).values, axis=0)\nword2vec_df.iloc[32618, :] = mean_vec\nword2vec_df.iloc[87646, :] = mean_vec\n\n# C-style contiguous\nword2vec_arr = np.ascontiguousarray(word2vec_df.values.astype(np.float32))\n\n# l2 normalization\nfaiss.normalize_L2(word2vec_arr)\n\n\n# In[4]:\n\n\ndef random_pairs(X, n_sample):\n # sample index pairs\n random_id = np.random.randint(low=0, high=len(X), size=(n_sample, 2))\n return random_id\n\ndef cosine_similarity(x1, x2):\n return np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))\n\n\n# In[5]:\n\n\n# dinstance distribution\npairs = random_pairs(word2vec_arr, 5000)\nsim_sample = [cosine_similarity(word2vec_arr[id1, :], word2vec_arr[id2, :]) for (id1, id2) in pairs]\nplt.hist(sim_sample, bins=int(np.sqrt(len(sim_sample))))\nplt.show()\n\n\n# In[6]:\n\n\nindex = faiss.IndexFlatIP(word2vec_arr.shape[1])\nprint(index.is_trained)\nindex.add(word2vec_arr)\nprint(index.ntotal)\n\n\n# In[7]:\n\n\nk = 20\nxq = word2vec_arr\nD, I = index.search(xq, k)\n\n\n# In[8]:\n\n\nprint(I[:5])\nprint('...')\nprint(I[-5:])\n\n\n# In[9]:\n\n\nprint(D[:5])\nprint('...')\nprint(D[-5:])\n\n\n# In[10]:\n\n\n# filtered distance distribution\nplt.hist(D[:, 1:].ravel(), bins=int(np.sqrt(len(D[:, 1:].ravel()))))\nplt.show()\n\n\n# In[11]:\n\n\nplt.hist(sim_sample, bins=int(np.sqrt(len(sim_sample))), density=True)\nplt.hist(D[:, 1:].ravel(), bins=int(np.sqrt(len(D[:, 1:].ravel()))), density=True)\nplt.show()\n\n" } ]
3
Amrutha-Veedhi/Python
https://github.com/Amrutha-Veedhi/Python
26656ca37656ac34191c292f9ba5435bbb6ddd4f
a6ce8d6187207c0f5527b9109b4c29c4ac717d76
7de8dd5610f415597e6b8cc53eb523aef5f970dd
refs/heads/master
2020-03-29T02:16:53.007314
2018-10-31T09:03:25
2018-10-31T09:03:25
149,428,162
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5972762703895569, "alphanum_fraction": 0.6011673212051392, "avg_line_length": 20.375, "blob_id": "357bc99d75d92813720d96d00a318c7cfc1c9352", "content_id": "c60507709ee40aa3336788a7ffa6b5aee5e83948", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 58, "num_lines": 24, "path": "/readingErrorLogsInFile.py", "repo_name": "Amrutha-Veedhi/Python", "src_encoding": "UTF-8", "text": "# python 3.6\n# This program will print line(s) which contains error log\n\nimport os\nimport re\n\nmyFile = 'F:\\\\My Python Scripts\\\\error.txt'\n\ndef RegEx(file):\n openFile = open(myFile, 'r') \n myResList = []\n myList = openFile.readlines()\n myReg = re.compile(r'error')\n for i in (myList):\n result = myReg.search(i)\n if result != None:\n myResList.append(i)\n final = '\\n'.join(myResList)\n return final\n\nprint()\nprint('Results:')\nprint(RegEx(myFile))\nprint()\n\n" }, { "alpha_fraction": 0.4117647111415863, "alphanum_fraction": 0.4388125240802765, "avg_line_length": 46.79999923706055, "blob_id": "0dba5b62538d9d977f08de888b7b15f320950496", "content_id": "e146e3aa53c775fe66b8fe0b6521dd469c99eeb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9131, "license_type": "no_license", "max_line_length": 291, "num_lines": 190, "path": "/specialNumbers.py", "repo_name": "Amrutha-Veedhi/Python", "src_encoding": "UTF-8", "text": "#python 3.6\n\"\"\"\nThe final project consists of multiple portions. The first three parts consists of writing functions and the final part consists of sorting a list of numbers using those functions. They are as follows:\n\nPart 1\nA palindrome is a number which reads the same forward and backwards.\nTo illustrate:\n10101,131,1331,1225221,2 are examples of palindromes\n122,100,19 are examples of numbers which are not palindromes.\nWrite a function that given a number determines if the number is a palindrome or not. Your function should take in a number and return “yes” or “no” depending if it’s a palindrome or not.\n\nPart 2\nWrite a function that given a number determines if the number is near prime. To understand what is near prime let us first understand what a prime number is.\nA number is prime is it is only divisible by 1 and itself.\n2,5,13,23 are examples of prime numbers.\n4,22,100,60 are examples of numbers which are not prime numbers.\nConsider a number such as 49. It is not a prime number because it is divisible by 7. However, it is not divisible by anything else apart from 1, 7 and 49.\nWe call a number near prime if it is divisible by 1, itself and at most 2 other numbers. To illustrate:\n49 is near prime as only 1,7 and 49 divide it.\n22 is near prime as only 1,2,11,22 divide it.\n4 is near prime as only 1,2,4 divide it.\n60 is not near prime as 1,2,3,5,6,10,12,20,30,60 divide it.\n17 is near prime as only 1,17 divide it\n20 is not near prime as 1,2,4,5,10,20 divide it.\nWrite a function which determines if a number is near prime or not.\nYour function should take in a number and return “yes” or “no” depending if it’s a near prime or not.\n\nPart 3\nWe call a number an up number if the digits of the number increase (or stay the same) as we read the number. To illustrate:\n123 is an up number\n2577 is an up number\n598 is not an up number\nSimilarly, in a down number the digits of the number decrease (or stay the same). To illustrate\n321 is a down number\n775 is a down number\n123 is not a down number.\nFinally combining the two we define an updown number. In an updown number the digits initially increase (or stay the same) and then they decrease (or stay the same). Once they have started to decrease they can not increase again.\nTo illustrate\n123431 is an updown number\n4577852 is an updown number\n123758 is not an updown number (because it the digits increased and then decreased but then increased again)\n4789089 is not an updown number.\nWe consider a number “nice” if its either an up, a down or an updown number.\nWrite a function which given a number determines if it is a nice number. Your function should take in a number and return “yes” if it is an up, a down or an updown number. Your function should return “no” otherwise. That is to say if the number is nice it should return yes and no otherwise.\n\nPart 4.\nYou will be given a list of numbers. Each number is assigned a score. Its score is determined as follows:\nA number’s base score is the value of the number itself.\nIf the number is a palindrome it scores is doubled.\nIf the number is a prime its score is doubled.\nIf the number is a nice number its score is tripled.\nConsider how to determine the score of a number 89.\nIt is not a palindrome and but it is nice and near prime:\nBase score: 89\nScore after palindrome check: 89 (remains same as not a palindrome)\nScore after prime check: 188 (doubled as 89 is prime)\nScore after nice check: 534 (tripled as 89 is nice)\nConsider the number 101:\nIt is a palindrome, it is prime but it is not a nice number:\nBase score: 101\nScore after palindrome check: 202 (doubled as it’s a palindrome)\nScore after prime check: 404 (doubled as 101 a prime)\nScore after nice check: 404 (not tripled as 101 is not nice)\nGiven a list of numbers entered by the user output the list of numbers in sorted order (ascending) of their scores.\n\n\"\"\"\ndef isPalindrome(num):\n try:\n if isinstance(num,int):\n if num == int(str(num)[::-1]):\n ptr = \"Yes\"\n else:\n ptr = \"No\"\n return ptr\n except:\n print(\"{} is not a number\".format(num))\ndef nearPrime(num):\n try:\n l=[]\n if isinstance(num,int):\n for i in range(1,num):\n if num%i==0:\n l.append(i)\n count=len(l)\n if count <=4:\n ptr = \"Yes\"\n else:\n ptr = \"No\"\n return ptr\n except:\n print(\"{} is not a number\".format(num))\n \ndef isNiceNumber(num):\n try:\n l=[]\n l2=[]\n l3=[]\n l4=[]\n l5=[]\n if isinstance(num,int): \n s=str(num)\n for ss in s:\n l.append(ss)\n l1=list(map(int, l))\n \n for i in range(len(l1)-1):\n if l1[i]<=l1[i+1]:\n l2.append(\"Yes\")\n else:\n l2.append(\"No\")\n \n if all(x==\"Yes\" for x in l2):\n ptr = \"Yes\"\n \n else:\n ptr = \"No\"\n if ptr == \"Yes\":\n return ptr\n \n else:\n for i in range(len(l1)-1):\n if l1[i]>=l1[i+1]:\n l3.append(\"Yes\")\n else:\n l3.append(\"No\")\n \n if all(x==\"Yes\" for x in l3):\n ptr = \"Yes\"\n else:\n ptr = \"No\"\n if ptr == \"Yes\":\n return ptr\n else:\n ind = l2.index(\"No\")\n \n for i in range(ind,len(l1)-1):\n if l1[i]>=l1[i+1]:\n l5.append(\"Yes\")\n else:\n l5.append(\"No\")\n \n if all(x==\"Yes\" for x in l5):\n ptr = \"Yes\"\n \n else:\n ptr = \"No\"\n return ptr\n except:\n print(\"{} is not a number\".format(num))\n\ndef score(li):\n try:\n if (isinstance(li,list) and all(isinstance(x, int) for x in li)):\n l1=[]\n for x in li:\n if (palindrom(x)==\"Yes\"):\n \n if nearprime(x)==\"Yes\":\n if nice(x)==\"Yes\":\n l1.append(12*x)\n \n else:\n l1.append(4*x)\n \n elif nice(x)==\"Yes\":\n l1.append(6*x)\n \n else:\n l1.append(2*x)\n \n else:\n l1.append(x)\n if nearprime(x)==\"Yes\":\n \n if nice(x)==\"Yes\":\n l1.append(6*x)\n \n else:\n l1.append(2*x) \n else:\n l1.append(x) \n if (nice(x)==\"Yes\"):\n l1.append(2*x)\n \n else:\n l1.append(x)\n l1=sorted(l1)\n return l1\n except:\n print(\"{} is not a list of numbers\".format(li))\n \n" } ]
2
tjacek/hsne
https://github.com/tjacek/hsne
dee33f307938f7ff284a056159143d7357c21ff7
e4bdb2986e117bb05fa39711e62e8fa8885c7bd8
d1a1e012b0eda9aa4a73e19e0c6c773ffcc408a0
refs/heads/master
2020-03-20T01:31:53.758298
2018-06-30T22:11:35
2018-06-30T22:11:35
137,079,131
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6794049143791199, "alphanum_fraction": 0.6852391958236694, "avg_line_length": 33.98979568481445, "blob_id": "1affbe81542a32feab9334832542772fed353d93", "content_id": "bee0c2274d969080b0646888589ef89837df348c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3428, "license_type": "no_license", "max_line_length": 79, "num_lines": 98, "path": "/hsne.py", "repo_name": "tjacek/hsne", "src_encoding": "UTF-8", "text": "import os,time\nimport utils,knn,markov,tsne,plot\nimport numpy as np\nfrom scipy import sparse\nfrom knn import NNGraph\nfrom sklearn.datasets import fetch_mldata\n\n#def make_dataset(dataset_name=\"MNIST original\",out_path=\"mnist_d/imgs\"):\n# dataset=utils.downsample_dataset(dataset_name)\n# utils.save_as_img(dataset,out_path)\n\n#def make_graph(dataset_name=\"mnist_d/imgs\",out_path=\"mnist_d/nn_graph\",k=100):\n# dataset=utils.read_as_dataset(dataset_name)\n# print(\"dataset loaded\")\n# knn.save_nn_graph(dataset,out_path)\n\ndef prepare_hsne(graph_path='mnist_d/nn_graph',\n scale_path='mnist_d/scale1'):\n os.mkdir(scale_path)\n trans=scale_path+ \"/trans.txt\"\n states=scale_path+ \"/states.txt\"\n nn_graph=knn.read_nn_graph(graph_path)\n print(\"nn graph loaded\")\n t0=time.time()\n mc=markov.make_eff_markov_chain(nn_graph)\n print(\"markov chain constructed %d\" % (time.time()-t0))\n mc.save(trans,states)\n\ndef hsne(dataset_name=\"MNIST original\",\n scale_path='mnist_d/scale1',\n weights_in=None):\n\n landmarks,sparse_pairs=load_hsne(scale_path)\n \n W=get_weights(weights_in,sparse_pairs)\n T,W_next=tsne.compute_t(landmarks,sparse_pairs,W)\n t_embd=time.time()\n embd=tsne.create_embedding(T)\n print(\"embeding created %d\" % (time.time() - t_embd))\n mnist = fetch_mldata(dataset_name)\n plot.plot_embedding(embd,mnist.target,landmarks,title=\"beta_threshold=1.5\")\n save_hsne(T,embd,W_next,scale_path)\n\ndef load_hsne(scale_path):\n landmark_file= scale_path+\"/landmarks.txt\"\n print(landmark_file)\n landmarks=utils.read_ints(landmark_file)\n print(\"landmarks loaded\")\n influence_file=scale_path+\"/influence.txt\"\n sparse_pairs=utils.read_pairs(influence_file)\n print(\"pairs loaded %d\" % len(sparse_pairs))\n return landmarks,sparse_pairs\n\ndef save_hsne(T,embd,W_next,scale_path):\n t_file=scale_path+\"/T.txt\"\n weights_out=scale_path+\"/W.txt\"\n utils.save_object(T,t_file)\n utils.save_array(W_next,weights_out)\n embd_file=scale_path+\"/embd\"\n utils.save_object(embd,embd_file)\n\ndef next_iter(in_scale=\"mnist_d/scale1\",out_scale=\"mnist_d/scale2\" ):\n# os.mkdir(out_scale)\n landmarks,trans=load_iter(in_scale)\n\n trans=markov.to_cum_matrix(trans) \n states_str=\",\".join([ str(l) for l in landmarks])\n save_iter(trans,states_str,out_scale)\n\ndef load_iter(in_scale):\n landmark_file=in_scale+\"/landmarks.txt\"\n landmarks=utils.read_ints(landmark_file)\n print(\"landmarks loaded\")\n t_file=in_scale+\"/T.txt\"\n trans=utils.read_object(t_file)#np.loadtxt(t_file,delimiter=',')\n print(\"trans matrix loaded\")\n return landmarks,trans\n\ndef save_iter(trans,states_str,out_scale):\n trans_file=out_scale+\"/trans.txt\"\n utils.save_array(trans,trans_file)\n states_file=out_scale+\"/states.txt\"\n utils.save_str(states_str,states_file) \n\ndef get_weights(weights_in,sparse_pairs):\n if(weights_in is None):\n n_points=len(sparse_pairs)\n W=sparse.dok_matrix(np.ones((n_points,1)),dtype=np.float32)\n else:\n weights_file=weights_in+\"/W.txt\"\n W=np.loadtxt(weights_file,delimiter=',')\n W=np.expand_dims(W,axis=1)\n W=sparse.dok_matrix(W)\n return W\n\n#prepare_hsne(graph_path='mnist_pca/graph',scale_path='mnist_pca/scale1')\n#hsne(scale_path=\"mnist_pca/scale1\",weights_in=None)#\"mnist/scale1\")\nnext_iter(in_scale=\"mnist_pca/scale1\",out_scale=\"mnist_pca/scale2\")" }, { "alpha_fraction": 0.6087388396263123, "alphanum_fraction": 0.6160211563110352, "avg_line_length": 31.84782600402832, "blob_id": "20cf1f37843133503cca299fe55265e2285cbcb2", "content_id": "dcc9ce34fd705abd1920b5e56b83c1ae6399eb82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3021, "license_type": "no_license", "max_line_length": 69, "num_lines": 92, "path": "/utils.py", "repo_name": "tjacek/hsne", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport pickle,os,re\n#from sets import Set \nfrom scipy.sparse import dok_matrix\nimport sklearn.datasets.base\nfrom sklearn.datasets import fetch_mldata\n\ndef downsample_dataset(dataset_name,factor=10):\n dataset=fetch_mldata(dataset_name)\n examples=[ example_i\n for i,example_i in enumerate(dataset.data)\n if((i % factor) ==0)]\n target = [ example_i\n for i,example_i in enumerate(dataset.target)\n if((i % factor) ==0)]\n return sklearn.datasets.base.Bunch(data=examples, target=target)\n\ndef save_as_img(dataset,out_path,new_shape=(28,28),selected=None):\n if(not selected is None):\n selected=Set(selected)\n def save_helper(i,img_i): \n img_i=np.reshape(img_i,new_shape)\n cat_i=str(int(dataset.target[i]))\n name_i=out_path+'/'+str(i)+'_'+ cat_i +'.png'\n cv2.imwrite(name_i,img_i)\n print(name_i)\n for i,img_i in enumerate(dataset.data):\n if((selected is None) or (i in selected)):\n save_helper(i,img_i)\n\ndef read_as_dataset(in_path):\n def read_helper(filename_i):\n img_path_i=in_path+'/'+filename_i \n img_i=cv2.imread(img_path_i,0)\n print(img_i.shape)\n img_i=img_i.flatten()\n cat_i=int(extract_int(filename_i)[1])\n return img_i,cat_i\n imgs=[ read_helper(filename_i)\n for filename_i in os.listdir(in_path)]\n data=np.array([ img_i[0] for img_i in imgs])\n target=np.array([ img_i[1] for img_i in imgs])\n return sklearn.datasets.base.Bunch(data=data, target=target)\n\ndef extract_int(str_i):\n return re.findall('\\d+', str_i ) \n \ndef read_ints(filename):\n with open(filename) as f:\n raw_ints = f.readlines()\n return [ int(raw_i) for raw_i in raw_ints] \n\ndef save_str(txt,out_path):\n text_file = open(out_path, \"w\")\n text_file.write(txt)\n text_file.close()\n\ndef save_array(arr,out_path,prec='%.4e'):\n np.savetxt(out_path, arr, fmt=prec, delimiter=',', newline='\\n')\n\ndef save_object(nn,path):\n file_object = open(path,'wb')\n pickle.dump(nn,file_object)\n file_object.close()\n\ndef read_object(path):\n file_object = open(path,'rb')\n obj=pickle.load(file_object) \n file_object.close()\n return obj\n\ndef to_sparse_matrix(sparse_pairs,n_states,n_landmarks):\n infl_matrix=dok_matrix((n_states, n_landmarks), dtype=np.float32)\n for i,pairs_i in enumerate(sparse_pairs):\n for j,value_j in pairs_i:\n infl_matrix[i,j]=value_j\n return infl_matrix \n\ndef read_pairs(filename):\n with open(filename) as f:\n lines = f.readlines()\n def parse_pair(pair):\n key,value=pair.split(\",\")\n return int(key),float(value)\n def parse_line(line):\n pairs=line.split(\")(\")\n pairs[0]=pairs[0].replace(\"(\",\"\")\n pairs[-1]=pairs[-1].replace(\")\",\"\")\n return [ parse_pair(pair_i) for pair_i in pairs]\n sparse_pairs=[parse_line(line_i) for line_i in lines]\n return sparse_pairs" }, { "alpha_fraction": 0.6207672357559204, "alphanum_fraction": 0.6291390657424927, "avg_line_length": 28.426469802856445, "blob_id": "b46aca5f785ecb7787699c767d7f23a730ee84e7", "content_id": "5c8ad6e57ea82518b44f90f3e575a08c0859841a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8003, "license_type": "no_license", "max_line_length": 104, "num_lines": 272, "path": "/landmarks.cpp", "repo_name": "tjacek/hsne", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <fstream>\n#include <string>\n#include <sstream>\n#include <vector>\n#include <random>\n#include <set>\n#include <map>\nusing namespace std;\n\nstd::random_device rd; //Will be used to obtain a seed for the random number engine\nstd::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()\nstd::uniform_real_distribution<> dis(0.0, 1.0);\n\nclass MarkovChain{\n public:\t\n\tMarkovChain( vector<vector<int>> states,vector<vector<double>> trans);//,bool raw_states);\n//\t~MarkovChain();\n vector<vector<int> > states;\n vector<vector<double> > trans;\n int n_states;\n int n_dims;\n bool raw_states;\n int next_state(int current_state);\n vector<int> find_landmarks(int beta,int theta,double threshold_factor);\n vector<map<int,int>> compute_influence(vector<int> landmarks,int beta);\n vector<int> get_landmark_indexs(vector<int> landmarks);\n};\n\nvoid save_landmarks(const char* filename,vector<int> landmarks);\nvoid save_influence(const char* filename,vector<map<int,int>> influence);\nvector<vector<int> > to_int(vector<vector<string> > raw_strings);\nvector<vector<double> > to_double(vector<vector<string> > raw_strings);\nvector<vector<string> > read_file(const char* filename);\nvector<string> split(const string &s, char delim);\n\nMarkovChain::MarkovChain( vector<vector<int> > states,vector<vector<double>> trans){//,bool raw_states){\n this->states=states;\n this->trans=trans;\n this->n_states= trans.size();\n this->n_dims= trans[0].size();\n this->raw_states= (states.size()==1);\n cout << \"N_states\" << this->n_states << endl;\n};\n\nvector<int> MarkovChain::find_landmarks(int beta,int theta,double threshold_factor){\n \tint histogram[this->n_states];\n \tfor(int i=0;i<this->n_states;i++){\n histogram[i]=0; \t\t\n \t}\n\n \tfor(int state=0;state<this->n_states;state++){\n if( (state % 100) == 0){\n cout << state << endl;\n }\n for(int i=0;i<beta;i++){\n \tint current_state=state;\n for(int j=0;j<theta;j++){\n \t int raw_state=this->next_state(current_state);\n if(this->raw_states){\n current_state=raw_state;\n }else{\n current_state=this->states[current_state][raw_state];\n }\t\n \t }\n \t histogram[current_state]+=1;\t\n \t }\t\n \t}\n\n int landmark_threshold=threshold_factor*beta; \n \tvector<int> landmarks;\n cout << landmark_threshold << \" AAAA\" << endl;\n \tint outliners=0;\n for(int i=0;i<this->n_states;i++){\n if( histogram[i] > landmark_threshold){\n cout << histogram[i] << endl;\n //if(this->raw_states){\n // landmarks.push_back(this->states[0][i] );\n //}else{\n landmarks.push_back(i);\n //}\n }\n\t if(histogram[i]==0){\n outliners++;\n }\n \t}\n cout << \"Number of landmarks \" << landmarks.size() << endl;\n cout << \"Number of outliners \" << outliners << endl;\n \treturn landmarks;\n }\n\nvector<map<int,int>> MarkovChain::compute_influence(vector<int> landmarks,int beta){\n vector<map<int,int>> influence;\n set<int> landmark_set(landmarks.begin(), landmarks.end());\n map<int,int> landmark_dict;\n for(int l=0;l<landmarks.size();l++){\n landmark_dict[landmarks[l]]=l;\n }\n for(int i=0; i<this->n_states;i++){\n if( (i% 500)==0){\n cout << i <<endl;\n }\n map<int,int> histogram;\n for (int j=0; j<beta; j++){\n int current_state = i;\n set<int>::iterator result;\n result=landmark_set.find(current_state); \n while(result==landmark_set.end()){\n int raw_state=this->next_state(current_state);\n if(this->raw_states){\n cout << \"raw state\" << raw_state << endl;\n current_state=raw_state; \n cout << \"current state\" << raw_state << endl;\n\n }else{\n current_state=this->states[current_state][raw_state]; \n }\n result=landmark_set.find(current_state); \n }\n int landmark_index=landmark_dict[current_state];\n \n if(histogram.count(landmark_index) == 1){\n histogram[landmark_index]+=1;\n }else{\n histogram[landmark_index]=1;\n }\n }\n influence.push_back(histogram);\n }\n return influence;\n}\n\nvector<int> MarkovChain::get_landmark_indexs(vector<int> landmarks){\n if(!this->raw_states){\n return landmarks;\n }\n vector<int> landmark_indexes;\n for(int i=0;i<landmarks.size();i++){\n int index=this->states[0][landmarks[i]];\n landmark_indexes.push_back(index);\n }\n return landmark_indexes;\n}\n\nint MarkovChain::next_state(int current_state){\n double rand_real=dis(gen);\n for(int i=0;i<this->n_dims;i++){\n if(rand_real < this->trans[current_state][i]){\n \treturn i;\n }\n }\n return (this->n_dims-1);\n}\n\nvoid save_landmarks(const char* filename,vector<int> landmarks){\n ofstream myfile;\n myfile.open(filename);\n for(int i=0;i<landmarks.size();i++){\n myfile << landmarks[i] <<\"\\n\";\n }\n myfile.close();\n}\n\nvoid save_influence(const char* filename,vector<map<int,int>> influence){\n ofstream myfile;\n myfile.open(filename);\n int n_states=influence.size();\n for(int i=0;i<n_states;i++){\n map<int,int> histogram=influence[i];\n for (map<int,int>::iterator it=histogram.begin(); it!=histogram.end(); ++it){\n myfile << \"(\" << it->first << \",\" << it->second << \")\";\n\n }\n myfile <<\"\\n\";\n }\n myfile.close();\n}\n\nvector<vector<int> > to_int(vector<vector<string> > raw_strings){\n vector<vector<int> > result;\n int n_samples=raw_strings.size();\n int dim=raw_strings[0].size();\n std::string::size_type sz;\n for (int i=0;i<n_samples;i++){\n vector<int> sample;\n for(int j=0;j<dim;j++){\n// cout << raw_strings[i][j] << endl;\t\n int value_ij= std::stoi(raw_strings[i][j],&sz);\n// cout << value_ij << endl;\n sample.push_back(value_ij);\n\n }\n result.push_back(sample);\t\n }\n return result;\n}\n\nvector<vector<double> > to_double(vector<vector<string> > raw_strings){\n vector<vector<double> > result;\n int n_samples=raw_strings.size();\n int dim=raw_strings[0].size();\n std::string::size_type sz;\n for (int i=0;i<n_samples;i++){\n vector<double> sample;\n for(int j=0;j<dim;j++){\n double value_ij= std::stod(raw_strings[i][j],&sz);\n sample.push_back(value_ij);\n\n }\n result.push_back(sample);\t\n }\n return result;\n}\n\nvector<vector<string> > read_file(const char* filename){\n ifstream infile(filename);\n string line;\n vector<vector<string> > result;\n while (std::getline(infile, line)) {\n vector<string> splited= split(line,',');\n result.push_back(splited);\n }\n return result;\n}\n\nvector<string> split(const string &s, char delim) {\n stringstream ss(s);\n string item;\n vector<std::string> elems;\n while (std::getline(ss, item, delim)) {\n elems.push_back(item);\n }\n return elems;\n}\n\nint is_norm(vector<vector<double>> trans){\n int value=0;\n for(int i=0;i<trans.size()-1;i++){\n double prob=0;\n for(int j=0; j<trans[i].size()-1;j++){\n double current=trans[i][j];\n double next=trans[i][j+1];\n prob+= (next - current);\n if(prob>1.0){\n value++;\n }\n }\n //cout << prob << endl;\n }\n return value; \n}\n\nint main () {\n int beta=100;\n int theta=50;\n float threshold=1.5;\n const char* trans_path=\"mnist_pca/scale2/trans.txt\";\n const char* states_path=\"mnist_pca/scale2/states.txt\";\n const char* landmarks_path=\"mnist_pca/scale2/landmarks.txt\";\n const char* influence_path=\"mnist_pca/scale2/influence.txt\";\n vector<vector<string>> raw_trans=read_file(trans_path);\n vector<vector<double>> trans=to_double(raw_trans);\n cout <<\"TRANS MATRIX CORRECTNESS \" << is_norm(trans) <<endl;\n vector<vector<string>> raw_states=read_file(states_path);\n vector<vector<int>> states=to_int(raw_states);\n MarkovChain mc(states,trans);\n vector<int> landmarks=mc.find_landmarks(beta,theta,threshold);\n save_landmarks(landmarks_path,mc.get_landmark_indexs(landmarks));\n cout << \"landmarks saved\" << endl;\n vector<map<int,int>> influence=mc.compute_influence(landmarks,beta); \n save_influence(influence_path,influence);\n}" }, { "alpha_fraction": 0.6701298952102661, "alphanum_fraction": 0.6805194616317749, "avg_line_length": 28.64102554321289, "blob_id": "e94e449eb948a50a74cefcd0be27aaf3c2827b01", "content_id": "7b937a057e9675fe39b855a7df819169544f824f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 83, "num_lines": 39, "path": "/knn.py", "repo_name": "tjacek/hsne", "src_encoding": "UTF-8", "text": "from sklearn.datasets import fetch_mldata\nfrom sklearn.neighbors import LSHForest,NearestNeighbors\nimport time\nimport markov,utils\n\nclass NNGraph(object):\n def __init__(self,names,distances,target):\n self.names=names\n self.distances=distances\n self.target=target\n\n def __len__(self):\n return len(self.names)\n \n def __getitem__(self,i):\n return self.names[i],self.distances[i]\n\ndef make_nn_graph(dataset,k=100):\n nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(dataset.data)\n# nbrs=LSHForest(n_estimators=20, n_candidates=200,n_neighbors=k).fit(X)\n distances, indices = nbrs.kneighbors(dataset.data)\n print(indices.shape)\n return NNGraph(indices,distances,dataset.target)\n\ndef read_nn_graph(in_path):\n t0=time.time()\n nn_graph=utils.read_object(in_path)\n print(time.time()-t0)\n return nn_graph\n\ndef save_nn_graph(data,out_path):\n t0=time.time()\n nn_graph=make_nn_graph(data)\n print(time.time()-t0)\n utils.save_object(nn_graph,out_path)\n\nif __name__ == \"__main__\": \n dataset=fetch_mldata(\"MNIST original\")\n save_nn_graph(dataset,\"mnist/graph\")" }, { "alpha_fraction": 0.7361111044883728, "alphanum_fraction": 0.7361111044883728, "avg_line_length": 13.600000381469727, "blob_id": "22f94584ebf860a207c56540002cd8c7e4f5095f", "content_id": "d9cc8ab8bcd1a44a0570a9622f5eca6dd1db7d2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 72, "license_type": "no_license", "max_line_length": 48, "num_lines": 5, "path": "/markov.c", "repo_name": "tjacek/hsne", "src_encoding": "UTF-8", "text": "#include <Python.h>\n\nstatic PyObject find_landmarks(PyObject * args){\n\n}" }, { "alpha_fraction": 0.6610565781593323, "alphanum_fraction": 0.6718092560768127, "avg_line_length": 30.940298080444336, "blob_id": "f47b433d7de061b37bd37545eaa50fecba2a0292", "content_id": "714737f1262617b76ac21b729b0f1811e8d050d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2139, "license_type": "no_license", "max_line_length": 105, "num_lines": 67, "path": "/tsne.py", "repo_name": "tjacek/hsne", "src_encoding": "UTF-8", "text": "from sklearn.datasets import fetch_mldata\nimport time\nimport knn,markov\nfrom knn import NNGraph\nimport utils\nfrom sklearn.manifold import TSNE\nimport numpy as np\nimport plot\n\ndef compute_t(landmarks,sparse_pairs,W): \n infl_matrix=make_influence_matrix(landmarks,sparse_pairs)\n t_comp=time.time()\n T=markov.get_prob_matrix(infl_matrix,W)\n print(\"T matrix computed %d\" % (time.time() - t_comp))\n print(T.shape)\n def norm_helper(row):\n row/=sum(row)\n return row\n T=np.array([norm_helper(t_i) for t_i in T])\n print(\"norm %d\" % check_norm(T))\n W_next=(W.transpose()*infl_matrix).todense()\n print(\"W_next\"+str(type(W_next)))\n print(W_next.shape)\n # W_next=np.expand_dims(W_next,axis=0)\n # print(W_next.shape)\n return T,W_next\n\ndef make_influence_matrix(landmarks,sparse_pairs):\n n_landmarks=len(landmarks)\n print(\"Number of landmarks %d\" % n_landmarks)\n t_sparse=time.time()\n n_states=len(sparse_pairs)\n infl_matrix=utils.to_sparse_matrix(sparse_pairs,n_states,n_landmarks)\n print(\"sparse matrix created %d\" % ( time.time()- t_sparse))\n norm_const=infl_matrix[0].sum()\n infl_matrix/=norm_const\n print(\"Norm const %d\" % norm_const)\n return infl_matrix\n\ndef check_norm(T):\n s=np.sum(T,axis=1)\n for s_i in s:\n if( (1.0-s_i)>0.01 ):\n return False\n return sum(s) \n\ndef create_embedding(trans):\n\n P=trans.T +trans\n norm_const=2.0 * float(trans.shape[0])\n P/=norm_const\n embd=TSNE(n_components=2,perplexity=20).fit_transform(P) \n return embd\n\ndef select_landmarks(dataset,in_file='landmarks.txt',out_file='landmarks'):\n landmarks=utils.read_ints(in_file)\t\n utils.save_as_img(dataset.data,dataset.target,out_path=out_file,new_shape=(28,28),selected=landmarks)\n\ndef compute_influence(graph_path,landmark_file):\n nn_graph=knn.read_nn_graph(graph_path)\n print(\"nn graph loaded\")\n mc=markov.make_eff_markov_chain(nn_graph)\n print(\"markov chain built\")\n landmarks=utils.read_ints(landmark_file)\n t0=time.time()\n markov.compute_influence(mc,landmarks,beta=100)\n print(\"Time %d\" % (time.time() - t0))" }, { "alpha_fraction": 0.6685367226600647, "alphanum_fraction": 0.675549328327179, "avg_line_length": 35.27118682861328, "blob_id": "8c8ef3ef996e10398d193fc46a369f9ffe36e733", "content_id": "f5bb4ba3e26f7fce12f44a07ecd61a06910c5d0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2139, "license_type": "no_license", "max_line_length": 100, "num_lines": 59, "path": "/reconstruct.py", "repo_name": "tjacek/hsne", "src_encoding": "UTF-8", "text": "import numpy as np \nimport tsne,utils,plot\nfrom sklearn.datasets import fetch_mldata\n\ndef reconstruct(matrix_path,embd_path,out_path):\n embd =utils.read_object(embd_path)\n infl=utils.read_object(matrix_path)\n n_points=infl.shape[0]\n n_embd=embd.shape[0]\n def recon_helper(i):\n print(i)\n embd_weighted=np.array([ infl[i,j]*embd[j] for j in range(n_embd)])\n rec_i=np.sum(embd_weighted,axis=0)\n print(rec_i.shape)\n return rec_i\n reconstruction=np.array([recon_helper(i) for i in range(n_points)])\n utils.save_object(reconstruction,out_path)\n\ndef make_embd(scale_path=\"mnist/scale1\"):\n in_file=scale_path+\"/T.txt\"\n out_file=scale_path+\"/emd\"\n trans=utils.read_object(in_file)\n T=tsne.create_embedding(trans)\n utils.save_object(T,out_file)\n\ndef show_embd(scale_path=\"mnist/scale1\",dataset_name=\"MNIST original\",threshold=1.5,embd_path=None):\n if(embd_path is None):\n in_file=scale_path+\"/emd\"\t\n else:\n in_file=embd_path\t\n X=utils.read_object(in_file)\n landmark_file= scale_path+\"/landmarks.txt\"\n landmarks=utils.read_ints(landmark_file)\n mnist = fetch_mldata(dataset_name)\n title=\"beta_threshold=\"+str(threshold)\n plot.plot_embedding(X,mnist.target,landmarks,title=title)\n\ndef rec_matrix(scale_paths,out_path):\n def inf_matrix(scale_i):\n landmarks=utils.read_ints(scale_i +\"/landmarks.txt\") \n sparse_pairs=utils.read_pairs(scale_i +\"/influence.txt\") \n return tsne.make_influence_matrix(landmarks,sparse_pairs)\n\n infl_matrixs=[inf_matrix(scale_i)\n for scale_i in scale_paths]\n rec_matrix=infl_matrixs[0]\n for infl_i in infl_matrixs[1:]:\n rec_matrix=rec_matrix*infl_i\t\n utils.save_object(rec_matrix.todense(),out_path)\n #print(rec_matrix.shape)\n #for infl_i in infl_matrixs:\n # print(infl_i.shape)\n\n#make_embd(scale_path=\"/mnist/scale1\")\n#show_embd()\nscales=[\"mnist/scale1\",\"mnist/scale2\",\"mnist/scale3\"]\nrec_matrix(scales,\"mnist/rec_\")\n#reconstruct(\"mnist/rec\",\"mnist/scale2/emd\",out_path=\"mnist/embd\")\n#show_embd(scale_path=\"mnist/scale1\",embd_path=\"mnist/embd\")" }, { "alpha_fraction": 0.6947705149650574, "alphanum_fraction": 0.7033084034919739, "avg_line_length": 31.34482765197754, "blob_id": "b899ba22e5e4462943d6c7ae8064b3c5cf7c6106", "content_id": "de5fc2af35ef26d82d5e76aedaad2513f2ffc270", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 937, "license_type": "no_license", "max_line_length": 64, "num_lines": 29, "path": "/preproc.py", "repo_name": "tjacek/hsne", "src_encoding": "UTF-8", "text": "import time\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.datasets import fetch_mldata\nfrom sklearn.datasets.base import Bunch\nimport knn\n\ndef pca_preproc(dataset_name=\"MNIST original\"):\n dataset=fetch_mldata(dataset_name)\n n_dim=dataset.data.shape[1]\n transform=PCA(n_components=n_dim)\n t0=time.time()\n transformed=transform.fit_transform(dataset.data)\n print(\"PCA transform %d\" % (time.time()-t0))\n n_feats=find_suff_size(transform.explained_variance_ratio_ )\n reduced=transformed[:,:n_feats]\n return Bunch(data=reduced,target=dataset.target)\n\ndef find_suff_size(expl_variance,threshold=0.95):\n var=0.0\n for i,var_i in enumerate(expl_variance):\n var+=var_i\n if(var>=threshold):\n \treturn i\n return len(list(expl_variance))\n\nif __name__ == \"__main__\": \n dataset=pca_preproc(dataset_name=\"MNIST original\")\n knn.save_nn_graph(dataset,\"mnist_pca/graph\")" }, { "alpha_fraction": 0.5970684885978699, "alphanum_fraction": 0.6039485335350037, "avg_line_length": 30.847618103027344, "blob_id": "13a599c21d7bc5486f287f4f86b0a4255ba8c194", "content_id": "4d7d73265c2e31dfad03904736382d6f222ad7bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3343, "license_type": "no_license", "max_line_length": 69, "num_lines": 105, "path": "/markov.py", "repo_name": "tjacek/hsne", "src_encoding": "UTF-8", "text": "import numpy as np\nimport knn,utils\n#from sets import Set\nimport random\n\nclass EffMarkovChain(object):\n def __init__(self, trans,states):\n self.trans=trans\n self.states=states\n self.n_states=trans.shape[0]\n self.k=trans.shape[1]\n\n def get_states(self):\n return range(self.n_states)\n\n def __call__(self,beta,theta,start_state):\n result=np.zeros((beta,))\n for s in xrange(beta):\n current_state=start_state \n for t in xrange(theta):\n i=self.next_state(current_state)\n result[s]=self.states[current_state][i]\n return result\n\n def next_state(self,state_i):\n r=random.random()\n for j in xrange(self.k):\n if(r<self.trans[state_i][j]):\n return j\n return self.k\n\n def seek_landmark(self,start,landmarks):\n current_state=start\n while(not (current_state in landmarks)):\n j=self.next_state(current_state)\n current_state=self.states[current_state][j]\n return current_state\n\n def save(self,trans_file='trans.txt',states_file='states.txt'):\n utils.save_array(self.trans,trans_file) \n utils.save_array(self.states,states_file,prec='%i') \n \ndef make_eff_markov_chain(nn_graph):\n trans=[]\n states=[]\n for i in range(len(nn_graph)):\n names_i,distances_i=nn_graph[i]\n sigma_i=np.min(distances_i[distances_i!=0]) \n dist_i=np.exp(distances_i/sigma_i)\n dist_i/=np.sum(dist_i)\n dist_i=np.cumsum(dist_i)\n trans.append(dist_i)\n states.append(names_i)\n return EffMarkovChain(np.array(trans),np.array(states))\n\ndef find_landmarks(markov_chain,beta=100,theta=50,beta_theshold=3.0):\n states=markov_chain.get_states()\n hist=np.zeros((len(states),))\n for state_i in states:\n if( state_i % 10 ==0):\n print(state_i)\n end_states=markov_chain(beta,theta,state_i)\n for end_state_i in end_states:\n hist[end_state_i]+=1\n treshold=beta_theshold*beta\n landmarks=[ i \n for i,hist_i in enumerate(hist)\n if(hist_i>treshold)]\n return landmarks\n\ndef compute_influence(markov_chain,landmarks,beta=50): \n n_states=len(markov_chain.get_states())\n n_landmarks=len(landmarks)\n infl_matrix=np.zeros((n_states,n_landmarks),dtype=float)\n landmark_dict={ landmark_i:i \n for i,landmark_i in enumerate(landmarks)}\n landmarks=Set(landmarks)\n for state_i in range(n_states):\n print(state_i)\n for j in range(beta):\n end_state=markov_chain.seek_landmark(state_i,landmarks)\n landmark_index=landmark_dict[end_state]\n infl_matrix[state_i][landmark_index]+=1.0\n infl_matrix/=float(beta)\n return infl_matrix\n\ndef get_prob_matrix(infl_matrix,W):\n weighted_infl=infl_matrix.multiply(W)\n sp=weighted_infl.transpose()*infl_matrix\n T=sp.toarray()\n print(T.shape)\n return T\n\ndef to_cum_matrix(matrix):\n const=np.sum(matrix,axis=1)\n const=1.0/const\n print(\"T\")\n n_dist=const.shape[0]\n prob=np.array([const[i]* row_i \n for i,row_i in enumerate(matrix)])\n prob=np.cumsum(prob,axis=1)\n return prob\n \nif __name__ == \"__main__\": \n make_markov_chain(\"mnist_graph\")" }, { "alpha_fraction": 0.5433647036552429, "alphanum_fraction": 0.5621734857559204, "avg_line_length": 27.147058486938477, "blob_id": "a218dfdc936ac200c11c97c89200837cf0210b42", "content_id": "930d7c2f6b75e6dbd513e3b4d0617d80a81b7345", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 957, "license_type": "no_license", "max_line_length": 58, "num_lines": 34, "path": "/plot.py", "repo_name": "tjacek/hsne", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nfrom matplotlib import offsetbox\nimport numpy as np\n\ndef plot_embedding(X,cats,landmarks,title=None):\n# print(landmarks)\n n_points=X.shape[0]\n fraction=get_fraction(n_points)\n print(\"Fraction %d\" % fraction)\n y = [cats[l] \n for l in landmarks]\n print(\"Unique categories\")\n print(np.unique(y))\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n X = (X - x_min) / (x_max - x_min)\n\n plt.figure()\n ax = plt.subplot(111)\n for i in range(n_points):\n if( (i%fraction) == 0):\n plt.text(X[i, 0], X[i, 1], str(y[i]),\n color=plt.cm.Set3( float(y[i]) / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([]), plt.yticks([])\n if title is not None:\n plt.title(title)\n plt.show()\n\ndef get_fraction(n_points,max_points=3000):\n if(n_points>max_points):\n return int(n_points/max_points)\n else:\n return 1\n" } ]
10
Valerij92/telebot
https://github.com/Valerij92/telebot
43ed790da3c236bb61e6c432bc88ace92e65a1ba
5b15c3e20804f0e8eb85bf3635eea8475441fdc4
9c7b2515ebf1239084d0df65d9b3753179a5d128
refs/heads/master
2022-04-14T03:02:56.796123
2020-04-12T07:44:30
2020-04-12T07:44:30
254,853,258
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7878788113594055, "alphanum_fraction": 0.7878788113594055, "avg_line_length": 15.5, "blob_id": "26885f787ff79d42d236ccb9ae1465c7652b274a", "content_id": "5bf2464cd1a704f927d8e3033974f0678f6ed5d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 33, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/README.md", "repo_name": "Valerij92/telebot", "src_encoding": "UTF-8", "text": "# telebot\nsimle bot for telegram\n" }, { "alpha_fraction": 0.6783784031867981, "alphanum_fraction": 0.7189189195632935, "avg_line_length": 29, "blob_id": "1b4cf9647e9dffdab32653fc5e1a8567e30c49c8", "content_id": "93ced7088c945faf9ee8571222d5fafaddf9732a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 111, "num_lines": 12, "path": "/tel bot.py", "repo_name": "Valerij92/telebot", "src_encoding": "UTF-8", "text": "import telebot\r\n\r\nbot = telebot.TeleBot('758839187:AAEgXFt5DfKnUrUuQg74t3kSdN1CFt1lBjE')\r\n\r\n\r\[email protected]_handler(commands=['start'])\r\ndef start(message):\r\n send_mess = f\"<b>Здарова {message.from_user.first_name} {message.from_user.last_name}</b>!\\nЧто тебе надо?\"\r\n bot.send_message(message.chat.id.send_mess, parse_mode='html')\r\n\r\n\r\nbot.polling(none_stop= True)" }, { "alpha_fraction": 0.5257731676101685, "alphanum_fraction": 0.6288659572601318, "avg_line_length": 31, "blob_id": "a3c1b46743ebddefece3b6d32b9f10516c27d462", "content_id": "755e6c76ad738d47e3c31e4d76903cffbf91293b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "no_license", "max_line_length": 46, "num_lines": 3, "path": "/config.py", "repo_name": "Valerij92/telebot", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n# this token from my other telebot (see on gh)\r\ntoken ='758839187:AAEgXFt5DfKnUrUuQg74t3kSdN1CFt1lBjE'" } ]
3
witonskyzc/26-TkinterAndMQTT-201930
https://github.com/witonskyzc/26-TkinterAndMQTT-201930
3b21e5854fcf70cded33731bece42debabd2cb3b
a6f395b800852fc52b8d8e5c9a2035403de00d7c
448581377080fb3df85934f5744beddeec23a9e7
refs/heads/master
2020-05-20T14:45:21.454255
2019-05-08T15:41:19
2019-05-08T15:41:19
185,629,255
0
0
null
2019-05-08T15:06:29
2019-05-08T14:46:22
2019-05-08T14:46:20
null
[ { "alpha_fraction": 0.6438653469085693, "alphanum_fraction": 0.6568946838378906, "avg_line_length": 23.891891479492188, "blob_id": "0cb8765201251702293d03096cd995558c76f3a0", "content_id": "92b42005c63ef6480024a193a4f5adc6ebe40c56", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 921, "license_type": "permissive", "max_line_length": 79, "num_lines": 37, "path": "/src/m2_tkinter_as_mqtt_sender.py", "repo_name": "witonskyzc/26-TkinterAndMQTT-201930", "src_encoding": "UTF-8", "text": "\"\"\"\nUsing a fake robot as the receiver of messages.\n\"\"\"\n\n# TODO: 1. In mqtt_remote_method_calls, set LEGO_NUMBER at line 131\n# to YOUR robot's number.\n\n# TODO: 2. Copy your Tkinter/ttk ROBOT gui code from the previous session (m6).\n# Then modify it so that pressing a button sends a message to a teammate\n# of the form:\n# (for Forward)\n# [\"forward\", X, y]\n# where X and Y are from the entry box.\n#\n# Implement and test.\n\n\"\"\" A simple example of using MQTT for SENDING messages. \"\"\"\n\nimport mqtt_remote_method_calls as com\nimport time\n\n\ndef main():\n name1 = input(\"Enter one name (subscriber): \")\n name2 = input(\"Enter another name (publisher): \")\n\n mqtt_client = com.MqttClient()\n mqtt_client.connect(name1, name2)\n time.sleep(1) # Time to allow the MQTT setup.\n print()\n\n while True:\n s = input(\"Enter a message: \")\n mqtt_client.send_message(\"say_it\", [name2, s])\n\n\nmain()\n" } ]
1
healthinnovation/innovar
https://github.com/healthinnovation/innovar
76b313f6c295f4001ab60d10ccf94c1856fb61c7
45a2b3cd36bac06fedb93b2a9b7e1432c66021ba
1afa5017b24a0964f80ec49e772bc260daadfe7d
refs/heads/master
2023-09-04T04:56:15.083794
2023-08-28T15:44:34
2023-08-28T15:44:34
296,215,442
5
5
NOASSERTION
2020-09-17T04:04:55
2022-11-07T21:43:00
2022-11-07T21:54:16
R
[ { "alpha_fraction": 0.6906474828720093, "alphanum_fraction": 0.6924460530281067, "avg_line_length": 31.764705657958984, "blob_id": "4f7568a6348598cc14153d43f5050d131fde67bf", "content_id": "9a97748837240e06bef7a0b881e3e74eb3fd520c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 556, "license_type": "permissive", "max_line_length": 115, "num_lines": 17, "path": "/py/export.py", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\nfrom pathlib import Path\nimport csv\n\ndef export(filepath, dbname, user, password):\n infile = Path(filepath)\n filename = infile.stem\n data = {}\n with open(infile) as f:\n comma = csv.DictReader(f)\n data = [row for row in comma]\n uri = f\"mongodb+srv://{user}:{password}@cluster0.isyog.mongodb.net/myFirstDatabase?retryWrites=true&w=majority\"\n client = MongoClient(uri)\n db = client[dbname]\n collection = db[filename]\n status = collection.insert_many(data)\n return status.acknowledged" }, { "alpha_fraction": 0.6189951300621033, "alphanum_fraction": 0.620758056640625, "avg_line_length": 46.27083206176758, "blob_id": "39fd842ff1995be8871850ff4f8ef249c2c044b5", "content_id": "37a875ae67f2ca60591b2467a9d63ea546b41479", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 4541, "license_type": "permissive", "max_line_length": 233, "num_lines": 96, "path": "/R/gen_admin_div.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Create categories for adiministrative divisions (Peru)\n#'\n#' @description Function 'gen_admin_div' returns the natural, geographical, or\n#' administrative division of Peru that includes the region, province or district\n#' provided to the function.\n#'\n#' @param x x is a list/vector with one of the 25 regions/departments names of\n#' Peru (first-level administrative subdivisions).\n#' @param method defines the type of natural, geographical, or administrative\n#' division that should be returned. Possible values are:\n#'\n#' - Traditional: Coastal, Andes Mountains, Amazon Jungle, or Lima & Callao\n#' - TC: (Lima no incluye Huarua/Cañete)\\href{https://andina.pe/agencia/noticia-tc-crea-cinco-macro-regiones-para-facilitar-acceso-a-justicia-constitucional-604658.aspx}{Administratives Macro Regions of the Constitutional Tribunal}\n#' - Cardinal: North, South, Amazon Jungle, or Lima & Callao\n#' - Trad_Cardinal: Coastal and Andes Mountains are divided into North and South (e.g. South Andes Mountains)\n#'\n#' @return Converts characters where it detects regions of Peru with the resulting divisions specified.\n#' @importFrom dplyr mutate\n#' @examples\n#' library(innovar)\n#' library(dplyr)\n#' df_dep <- data.frame(\n#' Region = c(\"LIMA\", \"CALLAO\", \"CAJAMARCA\", \"AMAZONAS\",\n#' \"SAN MARTIN\", \"HUANUCO\", \"PASCO\", \"JUNIN\", \"CUSCO\", \"PUNO\", \"APURIMAC\",\n#' \"AYACUCHO\", \"HUANCAVELICA\", \"TUMBES\", \"PIURA\", \"LAMBAYEQUE\",\n#' \"LA LIBERTAD\", \"ANCASH\", \"ICA\", \"AREQUIPA\", \"TACNA\", \"MOQUEGUA\",\n#' \"LORETO\", \"UCAYALI\", \"MADRE DE DIOS\"), stringsAsFactors = FALSE\n#' )\n#'\n#' df_dep %>%\n#' mutate(\n#' Reg_traditional = gen_admin_div(Region, method = \"Traditional\"),\n#' Reg_TC = gen_admin_div(Region, method = \"TC\"),\n#' Reg_Cardinal = gen_admin_div(Region, method = \"Cardinal\"),\n#' Reg_Trad_Cardinal = gen_admin_div(Region, method = \"Trad_Cardinal\")\n#' )\n#'\n#' @export gen_admin_div\n#'\ngen_admin_div <- function(x,\n method=\"Traditional\") {\n reg_mountain_north <- c(\"CAJAMARCA\",\"AMAZONAS\",\"SAN MARTIN\",\"HUANUCO\",\"PASCO\")\n reg_mountain_south <- c(\"JUNIN\",\"CUSCO\",\"PUNO\",\"APURIMAC\",\"AYACUCHO\",\"HUANCAVELICA\")\n reg_coastal_north <- c(\"TUMBES\",\"PIURA\",\"LAMBAYEQUE\",\"LA LIBERTAD\",\"ANCASH\")\n reg_coastal_south <- c(\"ICA\",\"AREQUIPA\",\"TACNA\",\"MOQUEGUA\")\n\n reg_south <- c(\"JUNIN\",\"HUANCAVELICA\",\"ICA\",\"AYACUCHO\",\"APURIMAC\",\n \"AREQUIPA\",\"MOQUEGUA\",\"TACNA\",\"PUNO\",\"CUSCO\")\n reg_north <- c(\"TUMBES\",\"PIURA\",\"LAMBAYEQUE\",\"LA LIBERTAD\",\"ANCASH\",\n \"CAJAMARCA\",\"AMAZONAS\",\"SAN MARTIN\",\"HUANUCO\",\"PASCO\")\n reg_jungle <- c(\"LORETO\",\"UCAYALI\",\"MADRE DE DIOS\")\n\n reg_tc_north <- c(\"ANCASH\",\"CAJAMARCA\",\"LA LIBERTAD\",\n \"LAMBAYEQUE\",\"PIURA\",\"TUMBES\")\n reg_tc_center <- c(\"APURIMAC\",\"AYACUCHO\",\"HUANCAVELICA\",\n \"HUANUCO\",\"JUNIN\",\"PASCO\",\"ICA\")\n reg_tc_south <- c(\"AREQUIPA\",\"CUSCO\",\"MADRE DE DIOS\",\n \"MOQUEGUA\",\"PUNO\",\"TACNA\")\n reg_tc_east <- c(\"AMAZONAS\",\"LORETO\",\"SAN MARTIN\",\"UCAYALI\")\n\n x <- allpautils_names(x)\n x <- toupper(x)\n\n if(method ==\"Traditional\") {\n\n x <- replace(x, x %in% reg_jungle, \"AMAZON JUNGLE\")\n x <- replace(x, x %in% reg_mountain_north | x %in% reg_mountain_south, \"ANDES MOUNTAINS\")\n x <- replace(x, x %in% reg_coastal_north | x %in% reg_coastal_south, \"COASTAL\")\n x <- replace(x, x == \"CALLAO\" | x == \"LIMA\", \"LIMA & CALLAO\")\n\n } else if(method == \"TC\") {\n\n x <- replace(x, x %in% reg_tc_north, \"NORTH\")\n x <- replace(x, x %in% reg_tc_south, \"SOUTH\")\n x <- replace(x, x %in% reg_tc_center, \"CENTER\")\n x <- replace(x, x %in% reg_tc_east, \"EAST\")\n x <- replace(x, x == \"CALLAO\" | x == \"LIMA\", \"LIMA & CALLAO\")\n # Deberian ser solo los distritos de Lima, Callao, más Huarua/Cañete. Por ahora todo LIMA will do just fine\n } else if(method == \"Cardinal\") {\n\n x <- replace(x, x %in% reg_north,\"NORTH\")\n x <- replace(x, x %in% reg_south ,\"SOUTH\")\n x <- replace(x, x %in% reg_jungle,\"AMAZON JUNGLE\")\n x <- replace(x, x ==\"CALLAO\" | x ==\"LIMA\",\"LIMA & CALLAO\")\n\n } else if(method == \"Trad_Cardinal\") {\n\n x <- replace(x, x %in% reg_jungle, \"AMAZON JUNGLE\")\n x <- replace(x, x %in% reg_mountain_south, \"SOUTH ANDES MOUNTAINS\")\n x <- replace(x, x %in% reg_mountain_north, \"NORTH ANDES MOUNTAINS\")\n x <- replace(x, x %in% reg_coastal_north, \"NORTH COAST\")\n x <- replace(x, x %in% reg_coastal_south, \"SOUTH COAST\")\n x <- replace(x, x == \"CALLAO\" | x == \"LIMA\", \"LIMA & CALLAO\")\n }\n return(x)\n}\n" }, { "alpha_fraction": 0.5578138828277588, "alphanum_fraction": 0.6562782526016235, "avg_line_length": 24.744186401367188, "blob_id": "10a9871c17d3b9f588746b2f9ba90f7ad30257bd", "content_id": "41c3643b2cf8621aee8db169081249e641e1323a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2269, "license_type": "permissive", "max_line_length": 240, "num_lines": 86, "path": "/NEWS.md", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "---\ntitle: \"NEWS\"\noutput:\n html_document:\n toc: true\n toc_float:\n collapsed: false\n smooth_scroll: false\n toc_depth: 2\nvignette: >\n %\\VignetteIndexEntry{NEWS}\n %\\VignetteEncoding{UTF-8}\n---\n\n# Innovar 0.1.2 📦\n\n 🔵 New color palettes\n\n 🔵 Fixed the get_def functions, thanks @zackarno\n \n 🔵 New function use_xaringan by @brianmsm\n\n# Innovar 0.1.0 📦\n\n🔵 **25 functions** available for health analysis and spatial modelling 🌎.\n\n<table class=\"default\">\n <tr>\n <td>Categories</td>\n <td># N° of functions</td>\n </tr>\n <tr>\n <td>Climate</td>\n <td>2</td>\n </tr>\n <tr>\n <td>Environment</td>\n <td>6</td>\n </tr>\n <tr>\n <td>Human intervation</td>\n <td>6</td>\n </tr>\n <tr>\n <td>Census data</td>\n <td>1</td>\n </tr>\n <tr>\n <td>Graphics</td>\n <td>4</td>\n </tr>\n <tr>\n <td>Administrative limits</td>\n <td>1</td>\n </tr>\n <tr>\n <td>Vulnerability</td>\n <td>1</td>\n </tr>\n <tr>\n <td>Utils</td>\n <td>4</td>\n </tr>\n</table>\n\n🔵 **9 colors palettes** 🎨 customized for innovar.\n\n- `ccvi` ,`npr` , `blmbrg`, `ecomst` , `ctp` , `jama`, `mlobo`, `btran`, `nasa`\n\n🔵 Creation of **2 dataset** 🗂 :\n\n- `Peru`, `migrationraw`\n\n🔵 Implement of <img src=\"https://user-images.githubusercontent.com/23284899/160271942-73aac747-d44b-4074-b30e-1821fa69ccf1.png\" width=\"25px\" align=\"center\"> GitHub Actions to test package on multiple Operative System and deployment webpage.\n\n🔵 <img src=\"https://user-images.githubusercontent.com/23284899/160271942-73aac747-d44b-4074-b30e-1821fa69ccf1.png\" width=\"25px\" align=\"center\"> GitHub Actions for citation.\n\n🔵 <img src=\"https://user-images.githubusercontent.com/23284899/160659630-6623bd8b-1ca4-449b-beb7-551722f78e9e.png\" width=\"25px\" align=\"center\"> **1** theme customized in xaringan\n\n🔵 <img src=\"https://user-images.githubusercontent.com/23284899/160663777-5255eabc-d470-4905-8efb-660d312f8368.png\" width=\"25px\" align=\"center\"> **1** customized template for an rstudio project\n\n🔵 Add new shields and logo for package.\n\n🔵 New vignette.\n\n🔵 Incorporation of <img src=\"https://user-images.githubusercontent.com/23284899/160272004-ecbc3777-ec56-432e-8c0d-1743f6a8ae51.png\" width=\"25px\" align=\"center\"> **MIT licence** .\n" }, { "alpha_fraction": 0.6092607378959656, "alphanum_fraction": 0.63363116979599, "avg_line_length": 24.12244987487793, "blob_id": "8442beade4159d95d04ae220d99528bf3dc26595", "content_id": "92fe06cd68ba7b587c9649922e6c190efc929658", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1232, "license_type": "permissive", "max_line_length": 107, "num_lines": 49, "path": "/R/get_ghsl.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Extract Global Human Settlement Layer data\n#'\n#' A function that extract Global Human Settlement Layer data from \\bold{1975} to \\bold{2030} each 5 years.\n#'\n#' @param region region and object sf.\n#' @param scale A nominal scale in meters of the projection to work in.\n#' @return a tibble object with the new variable in km².\n#'\n#' @importFrom sf st_transform st_simplify\n#' @importFrom rgee sf_as_ee\n#' @importFrom dplyr select filter contains\n#'\n#' @examples\n#' \\dontrun{\n#'\n#' library(tidyverse)\n#' library(rgee)\n#' library(innovar)\n#' library(sf)\n#' ee_Initialize()\n#'\n#' # 1. Reading a sf object\n#' data(\"Peru\")\n#' region <- Peru\n#' region_ee <- pol_as_ee(region, id = 'distr' ,simplify = 1000)\n#'\n#' # 2. Extracting climate information\n#' data <- region_ee %>% get_ghsl()\n#' }\n#' @export\n\nget_ghsl <- function(region, scale = 100) {\n # The base image collection\n img_base <- ee$ImageCollection(\"users/ambarja/ghsl\")$\n toBands()\n\n ghsl_area <- img_base$multiply(ee$Image$pixelArea())$\n divide(1000000)\n\n data <- ee_sum(\n x = ghsl_area,\n y = region,\n scale = 100\n )\n names(data) <- str_replace(names(data),'X',replacement = 'ghsl') %>%\n gsub(\"_b1\",\"\",.)\n\n return(data)\n}\n" }, { "alpha_fraction": 0.48075658082962036, "alphanum_fraction": 0.48680195212364197, "avg_line_length": 33.0264778137207, "blob_id": "ee5c9307d2cc92b56d452b03dfba9d444a4ce4da", "content_id": "0e0fec1df5bf60267446e5e427982a5fdc6e9420", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 16707, "license_type": "permissive", "max_line_length": 92, "num_lines": 491, "path": "/R/vulnerability_index.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Vulnerability Index\n#'\n#' @description It allows the observations to be ordered by different methods (default\n#' percentile rank), and in consideration of domains grouping indicators.\n#'\n#' @param data A dataframe containing the variables that will be analyzed.\n#'\n#' @param direct A vector indicating the name of the variables that contribute\n#' directly to the vulnerability index.\n#'\n#' @param inverse A vector indicating the name of the variables that contribute\n#' inversely to the vulnerability index.\n#'\n#' @param table A dataframe indicating the variables \"Codename\", \"Direction\",\n#' \"Domain\" and \"Geographic_scale\" as a minimum. If this argument is specified,\n#' it is no longer necessary to specify the direct and inverse arguments.\n#'\n#' @param weighted A logical value indicating whether the \"Domain\" information is\n#' used for the index calculation.\n#'\n#' @param domains A vector indicating the names of the domains grouping the variables\n#' of interest. If the argument \"table\" containing the variable \"Domain\" is used,\n#' only \"default\" is indicated in this argument.\n#'\n#' @param level A vector indicating the level of analysis. It currently supports 3\n#' values:\"departamental\", \"provincial\", and \"distrital\".\n#'\n#' @param ordered A logical value indicating whether the cases analyzed will be\n#' ordered from highest to lowest.\n#'\n#' @param method A vector indicating the analysis method used for the index.\n#' Currently only \"percent_rank\" is supported.\n#'\n#' @param complete If true, the results table will show all the variables that\n#' the dataframe has in addition to the computations performed.\n#'\n#' @param na.rm A logical value indicating whether cases where missing values are\n#' eliminated.\n#'\n#' @return A \\code{\\link[tibble]{tibble}} containing the variables analyzed by the function.\n#' @seealso \\code{\\link[dplyr]{percent_rank}}, \\code{\\link[psych]{principal}}\n#'\n#' @import dplyr\n#' @importFrom purrr map map2 reduce\n#' @importFrom magrittr use_series\n#' @importFrom stringr str_sub str_to_lower str_replace\n#' @importFrom rlang .data\n#' @importFrom tidyr nest unnest drop_na separate\n#' @importFrom psych principal\n#' @importFrom methods new\n#' @importFrom stats cor\n#' @export\n#'\n#' @examples\n#' \\dontrun{\n#' Sigma_Dom1 <- matrix(rep(c(1, runif(4, 0.65, 0.9)), 4),\n#' 4, 4, byrow = TRUE)\n#'\n#' Sigma_Dom2 <- matrix(rep(c(1, runif(4, 0.65, 0.9)), 4),\n#' 4, 4, byrow = TRUE)\n#'\n#' Dom1 <- as.data.frame(MASS::mvrnorm(100, rep(0, 4), Sigma_Dom1))\n#' Dom2 <- as.data.frame(MASS::mvrnorm(100, rep(0, 4), Sigma_Dom2))\n#' colnames(Dom2) <- paste0(\"V\", 5:8)\n#'\n#' df_example <- data.frame(distr = paste0(\"distr\", 1:100))\n#' df_example <- cbind(df_example, Dom1, Dom2)\n#'\n#' table_var <- data.frame(\n#' Codename = paste0(\"V\", 1:8),\n#' Direction = \"Direct\",\n#' Domain = c(rep(\"Dom1\", 4), rep(\"Dom2\", 4)),\n#' Geographic_scale = \"Distrital\"\n#' )\n#'\n#' vulnerability_index(df_example, table = table_var,\n#' level = \"distrital\", na.rm = TRUE,\n#' method = \"pca\")\n#'\n#'\n#' vulnerability_index(df_example, table = table_var,\n#' level = \"distrital\", na.rm = TRUE,\n#' method = \"percent_rank\")\n#' }\n\nvulnerability_index <- function(data, direct = NULL, inverse = NULL, table = NULL,\n weighted = TRUE, domains = 'default', level = NULL,\n ordered = TRUE, method = \"percent_rank\", complete = TRUE,\n na.rm = FALSE) {\n # Checking\n if (is.null(table) & (is.null(direct) | is.null(inverse))) {\n stop(paste(\"It is necessary that direct and inverse contain information\",\n \"or that the table argument is specified.\"))\n }\n\n if (!is.null(table) & (!is.null(direct) | !is.null(inverse))) {\n stop(paste(\"It is only necessary to specify direct/inverse or table.\",\n \"Not all 3 arguments at the same time.\"))\n }\n\n if(is.null(level)) {\n stop(paste(\"Now it is necessary to indicate the level of analysis to\",\n \"be performed. Three values are allowed: distrital, provincial\",\n \"departamental\"))\n } else if (!level %in% c(\"distrital\", \"provincial\", \"departamental\")) {\n stop(paste(\"Only 3 values are allowed: distrital, privincial, departamental.\"))\n }\n\n ## Options\n\n indexoptions <- list()\n indexoptions$direct <- direct\n indexoptions$inverse <- inverse\n indexoptions$table <- table\n indexoptions$weighted <- weighted\n indexoptions$domains <- domains\n indexoptions$level <- level\n indexoptions$ordered <- ordered\n indexoptions$method <- method\n indexoptions$complete <- complete\n indexoptions$na.rm <- na.rm\n\n # Get data about variable's direction\n # Temp only table argument is support\n\n if(level == \"distrital\") {\n table <- table %>%\n filter(.data$Geographic_scale == \"Distrital\")\n } else if (level == \"provincial\") {\n table <- table %>%\n filter(.data$Geographic_scale == \"Distrital\")\n }\n\n if(!is.null(table)) {\n direct <- table %>%\n filter(.data$Direction == \"Direct\") %>%\n use_series(Codename)\n\n inverse <- table %>%\n filter(.data$Direction == \"Inverse\") %>%\n use_series(Codename)\n }\n\n if (weighted & domains == 'default') {\n n_domains <- length(table(table$Domain))\n names_domains <- str_sub(str_to_lower(sort(unique(table$Domain))), 1, 6)\n # names_domains <- paste0(\"rank_\", names_domains)\n # if (method == \"percent_rank\") {\n # names_domains <- paste0(\"rank_\", names_domains)\n # } else if (method == \"pca\") {\n # names_domains <- paste0(\"pca_\", names_domains)\n # }\n vars_domains <- table %>%\n group_nest(.data$Domain) %>%\n use_series(data) %>%\n map(~ .x$Codename)\n vars_vulnerability <- table$Codename\n }\n\n data <- data %>%\n select(starts_with(\"dep\"),\n starts_with(\"prov\"),\n starts_with(\"distr\"),\n {{ vars_vulnerability }})\n\n # Function body\n\n if (method == \"percent_rank\") {\n\n names_domains <- paste0(\"rank_\", names_domains)\n\n if (weighted) {\n df_rank <- data %>%\n mutate(\n across({{ direct }}, percent_rank, .names = \"rank_{.col}\"),\n across({{ inverse }}, ~ percent_rank(desc(.)), .names = \"rank_{.col}\")\n ) %>%\n rowwise()\n\n for (i in 1:n_domains) {\n name_domain_C <- names_domains[i]\n var_domain <- vars_domains[[i]]\n var_domain <- paste0(\"rank_\", var_domain)\n\n df_rank <- df_rank %>%\n mutate(\n {{ name_domain_C }} := sum(c_across({{ var_domain }}), na.rm = na.rm)\n )\n }\n\n rank_vars_vulnerability <- paste0(\"rank_\", vars_vulnerability)\n\n df_rank <- df_rank %>%\n ungroup() %>%\n mutate(\n across( {{names_domains}}, percent_rank)\n ) %>%\n rowwise() %>%\n mutate(\n sumr = sum(c_across({{ names_domains }}), na.rm = na.rm)\n ) %>%\n ungroup() %>%\n mutate(\n Rank_T = percent_rank(.data$sumr) # Ranking percentil global\n ) %>%\n select(-c(\n #{{ vars_vulnerability }},\n {{ rank_vars_vulnerability }},\n .data$sumr\n )) %>%\n relocate(\n starts_with(\"dep\"),\n starts_with(\"prov\"),\n starts_with(\"distr\"),\n {{ names_domains }},\n .data$Rank_T\n )\n\n } else {\n df_rank <- data %>%\n mutate(\n across({{ direct }}, percent_rank),\n across({{ inverse }}, percent_rank)\n ) %>%\n rowwise() %>%\n mutate(\n sumr = sum(c_across(c({{ direct }}, {{ inverse }})), na.rm = na.rm)\n ) %>%\n ungroup() %>% # desagrupar\n mutate(\n Rank_T = percent_rank(.data$sumr) # Ranking percentil global\n ) %>%\n select(-c(.data$sumr))\n # select(dep:distr, {{ direct }}, {{ inverse }}, Rank_T)\n }\n\n } else if (method == \"pca\") {\n\n names_domains_data <- paste0(\"data_\", names_domains)\n names_domains_pca <- paste0(\"pca_\", names_domains)\n names_domains_aug <- paste0(\"fitted_\", names_domains)\n # names_domains_loadings <- paste0(\"tidy_\", names_domains)\n # names_domains_explained <- paste0(\"explained_\", names_domains)\n # names_domains_fit <- paste0(\"fit_\", names_domains)\n\n df_rank <- data %>%\n nest(data = everything())\n\n df_rank_list <- list()\n df_rank_list_g <- list()\n\n for (i in 1:n_domains) {\n name_domain_data <- names_domains_data[i]\n name_domain_C_pca <- names_domains_pca[i]\n name_domain_aug <- names_domains_aug[i]\n # name_domains_loadings <- names_domains_loadings[i]\n # name_domains_explained <- names_domains_explained[i]\n # name_domains_fit <- names_domains_fit[i]\n\n var_domain <- vars_domains[[i]]\n\n df_rank_list_g[[i]] <- df_rank %>%\n mutate(\n {{ name_domain_data }} := map(data,\n ~ .x %>%\n select(starts_with(\"dep\"),\n starts_with(\"prov\"),\n starts_with(\"distr\"),\n {{ var_domain }}) %>%\n drop_na()),\n {{ name_domain_C_pca }} := map(eval(parse(text = name_domain_data)),\n ~ .x %>%\n select({{ var_domain }}) %>%\n mutate(\n across(\n .cols = c({{ var_domain }}),\n .fns = ~ scale(.)[,1]\n )\n ) %>%\n principal(r = .,\n nfactors = 1)),\n {{ name_domain_aug }} := map2(eval(parse(text = name_domain_C_pca)),\n eval(parse(text = name_domain_data)),\n ~ .y %>%\n bind_cols(\n .x$scores %>%\n as_tibble()\n ) %>%\n rename({{ name_domain_C_pca }} := .data$PC1)),\n loadings = map(eval(parse(text = name_domain_C_pca)),\n ~ .x$loadings %>%\n unclass() %>%\n as_tibble(rownames = \"Indicators\") %>%\n mutate(\n h2 = .x$communality,\n u2 = .x$uniquenesses,\n com = .x$complexity\n )),\n explained = map(eval(parse(text = name_domain_C_pca)),\n ~ .x$Vaccounted %>%\n as_tibble(rownames = \"Variance\")),\n fit = map2(eval(parse(text = name_domain_C_pca)),\n eval(parse(text = name_domain_data)),\n ~ .y %>%\n select({{ var_domain }}) %>%\n fit_measures(.x, .))\n )\n\n df_rank_list[[i]] <- df_rank_list_g[[i]] %>%\n select({{ name_domain_aug }}) %>%\n unnest(cols = {{ name_domain_aug }})\n }\n\n df_rank <- df_rank_list %>%\n reduce(full_join) %>%\n drop_na()\n\n df_rank_g <- df_rank %>%\n nest(data = everything()) %>%\n mutate(\n pca = map(data,\n ~ .x %>%\n select({{ names_domains_pca }}) %>%\n principal(r = .,\n nfactors = 1)),\n fitted = map2(.data$pca, .data$data,\n ~ .y %>%\n bind_cols(\n .x$scores %>%\n as_tibble()\n ) %>%\n rename(fit_g := .data$PC1)),\n loadings = map(.data$pca,\n ~ .x$loadings %>%\n unclass() %>%\n as_tibble(rownames = \"Indicators\") %>%\n mutate(\n h2 = .x$communality,\n u2 = .x$uniquenesses,\n com = .x$complexity\n )),\n explained = map(.data$pca,\n ~ .x$Vaccounted %>%\n as_tibble(rownames = \"Variance\")),\n fit = map2(.data$pca, .data$data,\n ~ .y %>%\n select({{ names_domains_pca }}) %>%\n fit_measures(.x, .))\n )\n\n df_rank <- df_rank_g %>%\n select(.data$fitted) %>%\n unnest(cols = .data$fitted)\n\n # Detect direction of domains\n direction_loadings <- df_rank_g$loadings[[1]] %>%\n mutate(\n Direction = ifelse(.data$PC1 >= 0,\n \"Direct\",\n \"Inverse\")\n )\n\n direct_domains_loading <- direction_loadings %>%\n filter(.data$Direction == \"Direct\") %>%\n pull(.data$Indicators)\n\n names_direct_domains_loading <- str_replace(direct_domains_loading,\n \"pca_\", \"rank_\")\n\n inverse_domains_loading <- direction_loadings %>%\n filter(.data$Direction == \"Inverse\") %>%\n pull(.data$Indicators)\n\n names_inverse_domains_loading <- str_replace(inverse_domains_loading,\n \"pca_\", \"rank_\")\n\n #######################################################\n\n names_domains_rank <- paste0(\"rank_\", names_domains)\n\n names_domains_pca_rank <- data.frame(string = c(names_domains_pca,\n names_domains_rank)) %>%\n separate(.data$string, c(\"type\", \"variable\"),\n remove = FALSE) %>%\n arrange(.data$variable) %>%\n pull(string)\n\n df_rank <- df_rank %>%\n mutate(\n Rank_T = percent_rank(.data$fit_g),\n across({{ direct_domains_loading }}, percent_rank,\n .names = \"{names_direct_domains_loading}\"),\n across({{ inverse_domains_loading }}, ~ percent_rank(desc(.)),\n .names = \"{names_inverse_domains_loading}\"),\n ) %>%\n relocate(starts_with(\"dep\"),\n starts_with(\"prov\"),\n starts_with(\"distr\"),\n {{ names_domains_pca_rank }},\n .data$fit_g, .data$Rank_T)\n }\n\n if (ordered) {\n df_rank <- df_rank %>%\n arrange(desc(.data$Rank_T))\n }\n\n if (complete == FALSE) {\n df_rank <- df_rank %>%\n select(-c({{ vars_vulnerability }}))\n }\n\n if (method == \"percent_rank\") {\n\n indexcalc <- new(\"indexcalc\",\n Options = indexoptions, # list\n Data = df_rank # S4 class\n )\n\n } else if (method == \"pca\") {\n\n Specificfit <- list()\n\n for (i in 1:n_domains) {\n names_domain <- names_domains[[i]]\n Specificfit[[names_domain]] <- new(\"indexFit\",\n Fits = df_rank_list_g[[i]]$fit[[1]],\n Explained = df_rank_list_g[[i]]$explained[[1]],\n Loadings = df_rank_list_g[[i]]$loadings[[1]]\n )\n }\n\n indexfits <- new(\"indexFits\",\n Specific = Specificfit,\n Fits = df_rank_g$fit[[1]],\n Explained = df_rank_g$explained[[1]],\n Loadings = df_rank_g$loadings[[1]]\n )\n\n indexcalc <- new(\"indexcalc\",\n Options = indexoptions, # list\n Data = df_rank, # S4 class\n Fit = indexfits # S4 class\n )\n }\n\n return(indexcalc)\n\n}\n\n\nfit_measures <- function(pca, data) {\n bartlett <- data %>%\n cor() %>%\n psych::cortest.bartlett(., nrow(data))\n\n bartlett <- tibble(\n Chisq = bartlett$chisq,\n df = bartlett$df,\n p_value = bartlett$p.value\n )\n\n KMO <- data %>%\n cor() %>%\n psych::KMO()\n\n KMO <- tibble(\n Indicators = \"Overall\",\n KMO = KMO$MSA\n ) %>%\n bind_rows(\n KMO$MSAi %>%\n as_tibble(rownames = \"Indicators\") %>%\n rename(KMO = .data$value)\n )\n\n Chi <- tibble(\n Chisq = pca$chi,\n p_value = pca$EPVAL\n )\n\n Fit <- list(\n bartlett = bartlett,\n KMO = KMO,\n Chi = Chi,\n RMSR = pca$rms,\n Fit_off = pca$fit.off\n )\n\n return(Fit)\n}\n" }, { "alpha_fraction": 0.6797752976417542, "alphanum_fraction": 0.6797752976417542, "avg_line_length": 29.95652198791504, "blob_id": "bb8b6d56f48acabbf998982971afbe67ed6b26eb", "content_id": "5475d4c67b6760eb32ae876b9947d928eb0badb0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1424, "license_type": "permissive", "max_line_length": 150, "num_lines": 46, "path": "/R/export.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Export CSV file to MongoDB\n#'\n#' This a Python wrapper of the \\code{insert_many()} method of package pymongo to insert a list of documents as a collection in a database in MongoDB.\n#'\n#' @param filepath CSV file path.\n#' @param dbname Database name.\n#' @param user User name.\n#' @param password Password.\n#' @param interpreter Path of the Python interpreter (optional)\n#'\n#' @return Confirmation message if exporting was successful. Else an error.\n#' @importFrom reticulate use_python source_python\n#'\n#' @examples\n#' \\dontrun{\n#' library(innovar)\n#' data(\"cars\")\n#' write.csv(cars, \"cars.csv\", row.names = FALSE)\n#' dbname = \"<database-name>\"\n#' user <- Sys.getenv(\"USER\")\n#' password <- Sys.getenv(\"PASSWORD\")\n#' export(\"cars.csv\", dbname, user, password)\n#' }\n#' @export\nexport <- function(filepath, dbname, user, password, interpreter = NULL) {\n if (is.null(filepath)) stop(\"File path (filepath) is required.\")\n\n if (is.null(dbname)) stop(\"Database names (dbname) is required.\")\n\n if (is.null(user)) stop(\"User (user) is required.\")\n\n if (is.null(password)) stop(\"Password (password) is required.\")\n\n if (!is.null(interpreter)) use_python(interpreter, required = TRUE)\n\n source_python(\"./py/export.py\")\n\n arg <- list(filepath, dbname, user, password)\n status <- do.call(export, arg)\n\n if (status) {\n print(\"Collection was exported succesfully.\")\n } else {\n print(\"Error: Exporting failed.\")\n }\n}\n" }, { "alpha_fraction": 0.7085278034210205, "alphanum_fraction": 0.7318625450134277, "avg_line_length": 49.1489372253418, "blob_id": "f29b91fd4093851fc17a8c952a128455d2d8f06b", "content_id": "d55d74f1721ad6170f492bfd40c22945afecc9fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2360, "license_type": "permissive", "max_line_length": 203, "num_lines": 47, "path": "/R/data.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Peru names and spatial\n#'\n#' A dataset containing the names of the 1874 districts, 196 provinces and 25 departments of Peru according INEI - 2017, additionally also is the limits administrative level 3 in a spatial vector format.\n#'\n#' @format A dataset with 1874 rows and 9 variables:\n#'\n#' \\describe{\n#' \\item{\\bold{ubigeo}}{Code of six numbers for each district}\n#' \\item{\\bold{dep.code}}{Code to each departament}\n#' \\item{\\bold{dep}}{Name of each departament}\n#' \\item{\\bold{prov.code}}{Code to each province}\n#' \\item{\\bold{prov}}{Name of each province}\n#' \\item{\\bold{distr.code}}{Code to each district}\n#' \\item{\\bold{distr}}{Name of each district}\n#' \\item{\\bold{capital}}{Name of the capital main}\n#' }\n#' @usage data(Peru)\n#' @source \\url{https://www.inei.gob.pe/}\n\"Peru\"\n\n#' Raw migration data from the Peru census of 2017\n#'\n#' The Institute of Statistics and Informatics (INEI, by its acronym in Spanish)\n#' of Peru carried out the last census on 2017. This census data can be\n#' queried in the \\href{https://censos2017.inei.gob.pe/redatam/}{2017 Census RADATAM platform}.\n#'\n#' This data set is obtained by reading the downloaded report generated by querying\n#' the question \\emph{Distrito o país donde vivía hace 5 años} (District or country\n#' where you used to live 5 years ago) at a district level without setting column names\n#' \\code{readxl::read_excel(<report-filepath>, col_names = FALSE)}. The downloaded report\n#' is an Excel file containing metadata about the query and a list for every district\n#' with all the districts or countries given as a answer for this question and\n#' their frequencies (number of people living in district A that used to live in\n#' district B 5 years ago). Thus, we have 5-year migration information for all\n#' the 1874 districts in Peru, which has to be processed to be in a tidy\n#' origin-destination format.\n#'\n#' The function \\code{\\link{get_od_data}} in this package aids to the processing\n#' of this kind of reports from the Peru census of 2017.\n#'\n#' @format A untidy data frame which resulted from reading an Excel file without\n#' formatting or setting column names, and containing the information about migration\n#' described above.\n#' @usage data(migration17raw)\n#' @source \\url{https://censos2017.inei.gob.pe/redatam/}\n#' @seealso \\code{\\link{get_od_data}}\n\"migration17raw\"\n" }, { "alpha_fraction": 0.6383561491966248, "alphanum_fraction": 0.6479452252388, "avg_line_length": 35.70391082763672, "blob_id": "0eae50bfeb335f133692d760a9e9f9745452f8b5", "content_id": "b5d79e7d9f4ec50b503f12d64ed4d297cc446e3e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 6576, "license_type": "permissive", "max_line_length": 95, "num_lines": 179, "path": "/R/get_od_data.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Get census-like data in a origin-destination format\n#'\n#' Process a data frame resulting from reading a raw query on the\n#' Peru census of 2017 data containing origin and destination locations in a\n#' tidy long format or in a (sparse) matrix form.\n#'\n#' @param x A data frame resulting from reading a raw query on the\n#' Peru census of 2017 data containing origin and destination locations.\n#' More information on the Details section.\n#' @param wide logical. Should the output be in wide format? If 'TRUE', a matrix\n#' is returned with the origins in rows and the destinations in columns.\n#' Defaults to 'FALSE'.\n#' @param sparse logical. When the output is in wide format (i.e. \\code{wide = TRUE}),\n#' should a sparse matrix be returned? Defaults to 'TRUE'.\n#'\n#' @return Data frame (or matrix) with origin-destination format.\n#' @export\n#' @import dplyr\n#' @importFrom zoo na.locf\n#' @importFrom stringr str_remove_all str_trim str_replace\n#' @importFrom tidyr separate pivot_wider\n#' @importFrom stringi stri_trans_general\n#' @importFrom Matrix Matrix\n#'\n#' @details\n#' The Institute of Statistics and Informatics (INEI, by its acronym in Spanish)\n#' of Peru carried out the last census on 2017. This census data can be\n#' queried in the \\href{https://censos2017.inei.gob.pe/redatam/}{2017 Census RADATAM platform}.\n#' A query result can be downloaded as a Excel workbook (.xlsx).\n#'\n#' The \\code{get_od_data} function aids to process the raw query results on the\n#' 2017 census data involving an origin and a destination location (district\n#' or province level). For example, when querying the question \\emph{Distrito o país\n#' donde vivía hace 5 años} (District or country where you used to live 5 years\n#' ago) at a district level, one gets for every district a list with all the districts\n#' or countries given as a answer for this question and their frequencies (number\n#' of people living in district A that used to live in district B 5 years ago).\n#'\n#' The raw report obtained by querying the question \\emph{Distrito o país\n#' donde vivía hace 5 años} is provided in this package as an example dataset\n#' under the name \\code{\\link{migration17raw}}. In the examples section below, we\n#' show how to use the \\code{get_od_data} function to process this raw dataset to\n#' get different types of outputs to analyze its origin-destination information.\n#'\n#' @seealso \\code{\\link{migration17raw}}\n#' @examples\n#' \\dontrun{\n#' data(\"migration17raw\")\n#'\n#' # Check that this raw data set is the result of reading an Excel file without\n#' # column names\n#' head(migration17raw)\n#'\n#' # Origin-destination data in long format (data frame)\n#' od_long <- get_od_data(migration17raw)\n#' head(od_long)\n#'\n#' # Origin-destination data in wide format (sparse matrix)\n#' od_wide_sparse <- get_od_data(migration17raw, wide = TRUE)\n#' od_wide_sparse[1:5, 1:5]\n#' print(object.size(od_wide_sparse), units = \"auto\") # This is lighter\n#'\n#' # Origin-destination data in wide format (regular matrix)\n#' od_wide <- get_od_data(migration17raw, wide = TRUE, sparse = FALSE)\n#' od_wide[1:5, 1:5]\n#' print(object.size(od_wide), units = \"auto\") # This is heavier\n#' }\nget_od_data <- function(x, wide = FALSE, sparse = TRUE) {\n dat_raw <- x[, -1]\n last_row <- which(dat_raw[, 1] == \"RESUMEN\") - 1\n dat_raw <- dat_raw[1:last_row, 1:2]\n\n # Filter empty rows\n dat <- dat_raw[rowSums(is.na(dat_raw)) != ncol(dat_raw), ]\n colnames(dat) <- c(\"origin\", \"cases\")\n\n # Create destination location column\n dat$nchar <- nchar(dat$cases)\n dat$destination <- ifelse(dat$nchar >= 10, as.character(dat$cases), NA)\n dat$destination <- na.locf(dat$destination)\n dat$nchar <- NULL\n\n # Create destination location code column\n dat$ubigeo_des <- ifelse(\n grepl(\"AREA # \", dat$origin) == TRUE,\n str_remove_all(dat$origin, \"AREA # \"),\n NA\n )\n dat$ubigeo_des <- na.locf(dat$ubigeo_des)\n\n dat <- dat[\n !(\n dat$origin == \"Total\" | dat$origin == \"No Aplica :\" |\n grepl(\"especificado\", dat$origin) == TRUE |\n grepl(\"Continente\", dat$origin) == TRUE\n ),\n ]\n\n del <- which(grepl(\"AREA # \", dat$origin) == TRUE)\n del <- sort(c(del, del + 1))\n\n dat <- dat[-del, ]\n dat <- dat[!(grepl(\"[0-9]\", dat$origin) == TRUE), ]\n\n # Format string columns\n\n od_raw <-\n dat %>%\n separate(\n origin, into = c(\"dept_ori\", \"prov_ori\", \"distr_ori\"), sep = \",\",\n fill = \"right\"\n ) %>%\n separate(\n destination, into = c(\"dept_des\", \"prov_des\", \"distr_des\"), sep = \",\",\n fill = \"right\"\n ) %>%\n mutate(\n dept_ori = str_remove_all(dept_ori, \"Provincia Constitucional del \"),\n dept_des = str_remove_all(dept_des, \"Prov. Constitucional del \")\n ) %>%\n mutate(\n distr_ori = ifelse(dept_ori == \"Callao\", prov_ori, distr_ori),\n prov_ori = ifelse(dept_ori == \"Callao\", \"Callao\", prov_ori),\n distr_des = ifelse(dept_des == \"Callao\", prov_des, distr_des),\n prov_des = ifelse(dept_des == \"Callao\", \"Callao\", prov_des)\n ) %>%\n mutate_at(vars(distr_ori, distr_des), ~ str_remove_all(., \"distrito: \")) %>%\n mutate_at(\n vars(distr_ori, prov_ori, dept_ori, distr_des, prov_des, dept_des),\n ~ str_trim(toupper(stri_trans_general(. , id = \"Latin-ASCII\")))\n ) %>%\n mutate_at(\n vars(distr_ori, prov_ori, dept_ori, distr_des, prov_des, dept_des),\n ~ str_remove_all(., \"['~]\")\n ) %>%\n mutate_at(\n vars(distr_ori, prov_ori, dept_ori, distr_des, prov_des, dept_des),\n ~ str_replace(., \"[_-]\", \" \")\n ) %>%\n mutate(cases = as.numeric(cases))\n\n # Create data frame with code and names of the locations\n\n distr_ubigeo <-\n unique(od_raw[, c(\"dept_des\", \"prov_des\", \"distr_des\", \"ubigeo_des\")])\n colnames(distr_ubigeo) <- c(\"dept_ori\", \"prov_ori\", \"distr_ori\", \"ubigeo_ori\")\n\n # Merge to obtain the origin location codes\n od <- merge(od_raw, distr_ubigeo, by = c(\"dept_ori\", \"prov_ori\", \"distr_ori\"))\n\n # Arrange columns\n od <- od[, c(\n \"ubigeo_ori\", \"ubigeo_des\", \"cases\", \"dept_ori\", \"prov_ori\",\n \"distr_ori\", \"dept_des\", \"prov_des\", \"distr_des\"\n )]\n\n # Arrange rows\n od <- arrange(od, ubigeo_ori, ubigeo_des)\n\n if (wide) {\n od_wide <-\n od %>%\n select(ubigeo_ori, ubigeo_des, cases) %>%\n pivot_wider(names_from = ubigeo_des, values_from = cases, values_fill = 0)\n\n od_wide_matrix <- as.matrix(od_wide[, -1])\n rownames(od_wide_matrix) <- od_wide$ubigeo_ori\n\n if (sparse) {\n od_wide_matrix_sparse <- Matrix(od_wide_matrix, sparse = TRUE)\n od_wide_matrix_sparse\n } else {\n od_wide_matrix\n }\n\n } else {\n od\n }\n}\n" }, { "alpha_fraction": 0.6086519360542297, "alphanum_fraction": 0.6247484683990479, "avg_line_length": 23.850000381469727, "blob_id": "c60ba635d64da2ea6616fbc33a16d912ad0be6ff", "content_id": "1215c9e1b25099a2435f52d79de6aa904d6a3b68", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 994, "license_type": "permissive", "max_line_length": 74, "num_lines": 40, "path": "/R/vector_ee.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Polygon to earth engine\n#'\n#' A function that transform polygon to featurecollection.\n#'\n#' @param x polygon sf object.\n#' @param id the main id name in sf object.\n#' @param simplify number to simplify the sf polygon keeping its topology\n#' @return feature collection\n#' @importFrom dplyr select\n#' @importFrom rgee sf_as_ee\n#' @import sf\n#' @examples\n#' \\dontrun{\n#' library(tidyverse)\n#' library(rgee)\n#' library(innovar)\n#' library(sf)\n#' ee_Initialize()\n#' # 1. Reading a sf object\n#' data(\"Peru\")\n#' region <- Peru %>% filter(reg == 'LIMA')\n#' region_ee <- pol_as_ee(region , id = 'distr' , simplify = 1000)\n#' class(region_ee)\n#' }\n#' @export\n\npol_as_ee <- function(x , id, simplify = 500){\n\n id_names <- which(colnames(x) %in% c(id))\n sf_object <- x %>%\n select(names(x)[id_names]) %>%\n st_transform(crs = 3857) %>%\n st_simplify(preserveTopology = TRUE,\n dTolerance = simplify) %>%\n st_transform(crs = 4326) %>%\n sf_as_ee()\n\n return(sf_object)\n\n}\n" }, { "alpha_fraction": 0.48883432149887085, "alphanum_fraction": 0.5049690008163452, "avg_line_length": 19.759708404541016, "blob_id": "a6b2497cd06e893a60bf0f6a662f8ffb7e92d050", "content_id": "4beea08de5d9f46c6cfe083e65a5d4c057e21234", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 8557, "license_type": "permissive", "max_line_length": 141, "num_lines": 412, "path": "/R/get_etp.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Extract evapotranspiration data of Modis\n#'\n#' A function that extract a ETP time series of MODIS by \\bold{month} (2001-01-01T00:00:00Z - 2022-04-23T00:00:00).\n#'\n#' @param to,from it's a string object,starting and final date.\n#' @param band name of band.\n#' @param region region and object sf.\n#' @param fun function for extract statistic zonal (count, kurtosis, max,mean, median , min , mode , percentile, std, sum , variance, first).\n#' @param scale A nominal scale in meters of the projection to work in.\n#'\n#' @details Name of some bands.\n#' \\itemize{\n#' \\item \\bold{ET (kg/m²):} Total evapotranspiration.\n#' \\item \\bold{LE (J/m²):} Average latent heat flux.\n#' \\item \\bold{PET (kg/m²):} Total potential evapotranspiration.\n#' \\item \\bold{PLE (j/m²):} Average potential latent heat flux.\n#' \\item \\bold{ET_QC:} Evapotranspiration quality control flags\n#' }\n#'\n#' @return a tibble object with the new variables.\n#'\n#' @importFrom sf st_transform st_simplify\n#' @importFrom rgee sf_as_ee\n#' @importFrom dplyr select filter contains\n#' @importFrom purrr is_empty\n#'\n#' @examples\n#' \\dontrun{\n#'\n#' library(tidyverse)\n#' library(rgee)\n#' library(innovar)\n#' library(sf)\n#' ee_Initialize()\n#'\n#' # 1. Reading a sf object\n#' data(\"Peru\")\n#' region <- Peru\n#' region_ee <- pol_as_ee(region, id = \"distr\", simplify = 1000)\n#'\n#' # 2. Extracting climate information\n#' data <- region_ee %>%\n#' get_etp(from = \"2001-02-01\", to = \"2003-12-31\", band = \"ET\", fun = \"max\")\n#' }\n#' @export\n\n\nget_etp <- function(from, to, band, region, fun = \"count\", scale = 1000) {\n\n # Conditions about the times\n start_year <- substr(from, 1, 4) %>% as.numeric()\n end_year <- substr(to, 1, 4) %>% as.numeric()\n # Factores by each bands\n\n multiply_factor <- c(\n ET = 0.1, LE = 0.0001, PET = 0.1, PLE = 0.0001, ET_QC = 1\n )\n\n # Message of error\n\n if (start_year <= 2000 | end_year >= 2022) {\n stop(print(sprintf(\"No exist data\")))\n }\n\n # The main functions\n collection <- ee$ImageCollection(\"MODIS/006/MOD16A2\")\n # filter quality\n bitwiseExtract <- function(value, fromBit, toBit = fromBit) {\n maskSize <- ee$Number(1)$add(toBit)$subtract(fromBit)\n mask <- ee$Number(1)$leftShift(maskSize)$subtract(1)\n final <- value$rightShift(fromBit)$bitwiseAnd(mask)\n return(final)\n }\n\n filteApply <- function(image) {\n qa <- image$select(\"ET_QC\")\n etp <- image$select(c(band))\n # build filter\n filter1 <- bitwiseExtract(qa, 3, 4)\n # build mask\n mask <- filter1$neq(2)\n # apply mas\n etp$updateMask(mask) %>% return()\n }\n\n # date of dataset\n months <- ee$List$sequence(1, 12)\n years <- ee$List$sequence(start_year, end_year)\n\n modis <- ee$\n ImageCollection$\n fromImages(years$map(\n ee_utils_pyfunc(function(y) {\n months$map(ee_utils_pyfunc(\n function(m) {\n collection$\n filter(ee$Filter$calendarRange(y, y, \"year\"))$\n filter(ee$Filter$calendarRange(m, m, \"month\"))$\n map(filteApply)$\n max()$\n set(\"year\", y)$\n set(\"month\", m)\n }\n ))\n })\n )$flatten())\n\n\n im_base <- modis$\n filter(ee$Filter$inList(\"month\", c(1:12)))\n\n if (start_year == end_year) {\n new_base <- im_base$\n filter(\n ee$Filter$inList(\n \"year\",\n list(\n c(\n start_year:end_year\n )\n )\n )\n )$toBands()$\n multiply(\n multiply_factor[[band]]\n )\n } else {\n new_base <- im_base$\n filter(\n ee$Filter$inList(\n \"year\",\n c(\n start_year:end_year\n )\n )\n )$\n toBands()$\n multiply(\n multiply_factor[[band]]\n )\n }\n\n\n # The main functions\n if (fun == \"count\") {\n img_count <- ee_count(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_count),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_count)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_count)\n } else if (fun == \"kurtosis\") {\n img_kurtosis <- ee_kurstosis(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_kurtosis),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_kurtosis)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_kurtosis)\n } else if (fun == \"max\") {\n img_max <- ee_max(\n new_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_max),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_max)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_max)\n } else if (fun == \"mean\") {\n img_mean <- ee_mean(\n new_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_mean),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_mean)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_mean)\n } else if (fun == \"median\") {\n img_median <- ee_median(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_median),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_median)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_median)\n } else if (fun == \"min\") {\n img_min <- ee_min(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_min),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_min)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_min)\n } else if (fun == \"mode\") {\n img_mode <- ee_mode(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_mode),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_mode)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_mode)\n } else if (fun == \"percentile\") {\n img_percentile <- ee_percentile(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_percentile),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_percentile)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_percentile)\n } else if (fun == \"std\") {\n img_std <- ee_std(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_std),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_std)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_std)\n } else if (fun == \"sum\") {\n img_sum <- ee_sum(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_sum),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_sum)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_sum)\n } else if (fun == \"variance\") {\n img_variance <- ee_variance(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_variance),\n suffix = band\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_variance)[id_names] <- sprintf(\"%s%s\", band, names_id)\n return(img_variance)\n }\n}\n" }, { "alpha_fraction": 0.662598729133606, "alphanum_fraction": 0.6690595746040344, "avg_line_length": 36.64864730834961, "blob_id": "41320937e97d915b98cbfa5ed0614283fba40d1d", "content_id": "f32c9e3f0a94443eb9ed0d37aa114e4e32594d37", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1393, "license_type": "permissive", "max_line_length": 135, "num_lines": 37, "path": "/R/read_batch.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Utility for dataset batch importing in multiple formats\n#'\n#' Reference: https://www.gerkelab.com/blog/2018/09/import-directory-csv-purrr-readr/#'\n#' @param data_dir data_dir is the file path that contains the files that will be imported\n#' @param ext extension \"*.ext\" of the files that will be imported. Default \"csv\"\n#' @param fun the import function that will be passsed to `map` to import datasets. Must be able to read files with the `ext` extension\n#' @param env Whether the file will be imported to .GlobalEnv. If `FALSE` a list containing all the dataset will be generated\n#' @param ... Arguments passed to the import function\n#'\n#' Import multiple files\n#'\n#' @examples\n#'\\dontrun{\n#' read_batch(data_dir = data_dir)\n#' read_batch(data_dir = data_dir, extension = \"dta\", fun = haven::read_dta)\n#' read_batch(data_dir = data_dir, extension = \"dta\", fun = haven::read_dta, env = F)\n#'}\n#' @importFrom purrr map\n#' @importFrom fs dir_ls\n#' @importFrom readr read_csv\n#' @export\nread_batch <- function(data_dir, ext = \"csv\", fun = readr::read_csv, env = TRUE, ...) {\n\n file <- dir_ls(data_dir, regexp = paste0(\"\\\\.\", ext, \"$\"))\n\n data_objects<-file %>%\n map(fun, ...)\n\n names(data_objects) <- sub('\\\\..*$', '', basename(file))\n\n if (isTRUE(env)) {\n list2env(data_objects,envir=.GlobalEnv)\n } else {\n assign(\"data_list\", data_objects, envir = as.environment(-1))\n }\n\n}\n" }, { "alpha_fraction": 0.5438465476036072, "alphanum_fraction": 0.5976582169532776, "avg_line_length": 48.55555725097656, "blob_id": "aede2c078f2232fd1e4c11d7033147984d13e66e", "content_id": "babd043e76a5fb24453c371fb3b613c59e6a7b4e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 4030, "license_type": "permissive", "max_line_length": 374, "num_lines": 81, "path": "/files/README.Rmd", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "---\ntitle: \"README\"\noutput: html_document\n---\n\n```{r}\nlibrary(sf)\nlibrary(dplyr)\nlibrary(rmapshaper)\nlibrary(ggplot2)\n```\n\n# Nombres de distritos del Perú\n\n1874 distritos, 196 provincias, 25 regiones.\n\nLos nombres han sido modificados para no contener letras con acentos (e.g. \"ñ\") o símbolos (e.g. \"_\").\n\n# Shapefiles y geometrías del Perú\n\nAl 03/01/2021, Perú tiene 1874 distritos. La información debe provenir del MINAM, sin embargo, al momento de redacción los archivos no son accesibles: https://geoservidor.minam.gob.pe/recursos/intercambio-de-datos/\n\nLos shapefiles oficiales disponibles son:\n\n- HDX: https://data.humdata.org/dataset/limites-de-peru contiene 1873/1874 distritos (al 03/01/2021) faltando la geometría de Pangoa, Satipo, Junin.\n\n- GADMTools package: `gadm_sp_loadCountries(\"PER\", level=3, basefile=\"./\")` contiene 1815/1874 distritos (al 03/01/2021).\n\n- raster package: `getData()` contiene 1815/1874 distritos (al 03/01/2021).\n\n- ArcGIS: https://www.arcgis.com/home/item.html?id=3c3831605626406586799b6b799cbc7c\n\n- GEOGPSPerú: https://www.geogpsperu.com/2014/03/base-de-datos-peru-shapefile-shp-minam.html contiene 1873/1874 distritos (al 03/01/2021) faltando la geometría de Pangoa, Satipo, Junin.\n\n- GEOGPSPerú 2: https://www.geogpsperu.com/2020/07/poblacion-por-distrito-2020-shapefile.html contiene 1875/1874 distritos (al 03/01/2021) teniendo de \"extra\" el distrito de Santa María de Huachipa, el cual es un centro poblaco parte de Lurigancho-Huachipa. http://www.congreso.gob.pe/Docs/comisiones2017/Comision_de_Descentralizacioni/files/pd_pl_1317_-_huachpa_-_1605.pdf \n\nEl archivo \"shp_PER_adm3.Rdata\", accesible desde la libreria/paquete de InnovaLab, tiene 1874 distritos generados a partir de los datos de GEOGPSPerú 2.\n\nPor motivos de espacio, el archivo solo conserva el 10% de los polígonos originales (reduciendo el tamaño del archivo de ~60mb a ~1mb).\n\n```{r}\nshp <- st_read(\"../WIP/Poblacion_Estimada_2020_por_distrito_GEO_GPS_PERU_Juan_Pablo_Suyo_Pomalia_931381206\", \n stringsAsFactors = F) %>% dplyr::select(reg.code=CCDD,reg=NOMBDEP,\n prov.code=CCPP,prov=NOMBPROV,\n distr.code=CCDI,distr=NOMBDIST,\n capital=CAPITAL,ubigeo=UBIGEO,\n geometry)%>% \n # st_transform(4326) %>% \n ms_simplify(keep=0.1,keep_shapes=T) %>%\n dplyr::mutate(ubigeo = replace(ubigeo,ubigeo==\"150144\",\"150118\"))%>%\n dplyr::group_by(ubigeo) %>% \n dplyr::summarize(geometry = st_union(geometry)) %>% ungroup() %>%\n left_join(st_read(\"../WIP/Poblacion_Estimada_2020_por_distrito_GEO_GPS_PERU_Juan_Pablo_Suyo_Pomalia_931381206\", \n stringsAsFactors = F)%>% as.data.frame(.) %>% dplyr::select(reg.code=CCDD,reg=NOMBDEP,\n prov.code=CCPP,prov=NOMBPROV,\n distr.code=CCDI,distr=NOMBDIST,\n capital=CAPITAL,ubigeo=UBIGEO))\n\n\nshp <- shp %>%\n mutate(distr =gsub(\"_\", \" \",\n gsub(\"-\", \" \", iconv(distr,\n from=\"UTF-8\",\n to=\"ASCII//TRANSLIT\"), \n fixed=TRUE),\n fixed=TRUE),\n prov = iconv(prov,\n from=\"UTF-8\",\n to=\"ASCII//TRANSLIT\"),\n reg = iconv(reg,\n from=\"UTF-8\",\n to=\"ASCII//TRANSLIT\"),\n distr = ifelse(distr==\"HUAYA\",\"HUALLA\",\n ifelse(distr==\"MAZAMARI PANGOA\",\"MAZAMARI\",\n ifelse(distr==\"LARAOS\" & prov==\"HUAROCHIRI\",\"SAN PEDRO DE LARAOS\",distr))),\n distr=gsub(\"?\", \"\",distr, \n fixed=TRUE),\n distr= replace(distr,distr==\"HUAYLLO\" & prov==\"AYMARAES\",\"IHUAYLLO\"))\n\nsaveRDS(shp,\"./shp_PER_adm3.Rdata\") \n```\n" }, { "alpha_fraction": 0.5328057408332825, "alphanum_fraction": 0.5916517972946167, "avg_line_length": 28.118959426879883, "blob_id": "d646d11b13f2a4425f3e94a6b39225370528e317", "content_id": "c5d77fdbfae2a2427310ee6eb70266d22028a5f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 7834, "license_type": "permissive", "max_line_length": 87, "num_lines": 269, "path": "/R/scale_innova.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "# Palettes as vectors of colors\n\nccvi <- c(\n `ccvi1` = \"#005378\", `ccvi2` = \"#006FA0\", `ccvi3` = \"#5C99B3\",\n `ccvi4` = \"#9BCFC8\", `ccvi5` = \"#F2DDBD\"\n)\n\nnpr <- c(\n `npr1` = \"#03595C\", `npr2` = \"#58C3B0\", `npr3` = \"#E7E2BF\",\n `npr4` = \"#EFA255\", `npr5` = \"#DC4326\"\n)\n\nblmbrg <- c(\n `blmbrg1` = \"#C397DC\", `blmbrg2` = \"#9DFAF0\", `blmbrg3` = \"#85D8FC\",\n `blmbrg4` = \"#F0F0F0\"\n)\n\necomst <- c(\n `ecomst1` = \"#EC251D\", `ecomst2` = \"#F25941\", `ecomst3` = \"#F58A6A\",\n `ecomst4` = \"#F8B797\", `ecomst5` = \"#FED1BE\", `ecomst6` = \"#CFCDC6\"\n)\n\nctp <- c(\n `ctp1` = \"#1C0B07\", `ctp2` = \"#632718\", `ctp3` = \"#D04D39\",\n `ctp4` = \"#ED9991\", `ctp5` = \"#F9DDDC\"\n)\n\njama <- c(\n `jama1` = \"#0B4E60\", `jama2` = \"#186179\", `jama3` = \"#2E7998\",\n `jama4` = \"#2C9BB4\", `jama5` = \"#39A69F\", `jama6` = \"#7DC9C1\",\n `jama7` = \"#DAE2BC\", `jama8` = \"#FEBE82\", `jama9` = \"#F69354\",\n `jama10` = \"#8C3A33\"\n)\n\nmlobo <- c(\n `mlobo1` = \"#191E34\", `mlobo2` = \"#094B56\", `mlobo3` = \"#00787C\",\n `mlobo4` = \"#3DA48F\", `mlobo5` = \"#FEECD5\"\n)\n\nbtran <- c(\n `btran1` = \"#7F1D6E\", `btran2` = \"#DA2D7A\", `btran3` = \"#E3506E\",\n `btran4` = \"#F68671\", `btran5` = \"#FEDCA0\"\n)\n\nnasa <- c(\n `nasa1` = \"#070510\", `nasa2` = \"#240F4F\", `nasa3` = \"#501278\",\n `nasa4` = \"#942A7F\", `nasa5` = \"#C33C72\", `nasa6` = \"#F7705E\",\n `nasa7` = \"#F5E6B2\", `nasa8` = \"#B2CFD3\", `nasa9` = \"#87BEEB\",\n `nasa10` = \"#6BB4F8\"\n)\n\npolitico <- c(\n `strongly` = \"#F1BBAC\",`supporting` = \"#F8D2B0\",`mixed` = \"#DBDADA\",\n `nosupporting` = \"#C67399\",`opposing` = \"#9C4482\"\n)\n\nmortality <- c(\n `veryhigh` =\"#6E0900\",`high` = \"#FF4D00\",`middle` =\"#7BDBD5\",\n `low` = \"#006495\",`verylow` = \"#4C194A\"\n )\n\n# The 08 best colour palettes for 2020\n\ngreen <- c(\n `veryhigh` =\"#bc6c25\",`high` = \"#dda15e\",`middle` =\"#fefae0\",\n `low` = \"#283618\",`verylow` = \"#606c38\"\n)\n\ngolden <- c(\n `veryhigh` =\"#772e25\",`high` = \"#c44536\",`middle` =\"#edddd4\",\n `low` = \"#197278\",`verylow` = \"#283d3b\"\n)\n\ndark_green <- c(\n `veryhigh` =\"#2f3e46\",`high` = \"#354f52\",`middle` =\"#52796f\",\n `low` = \"#84a98c\",`verylow` = \"#cad2c5\"\n )\n\nblue_fall <- c(\n `veryhigh` =\"#eaac8b\",`high` = \"#e56b6f\",`middle` =\"#b56576\",\n `low` = \"#6d597a\",`verylow` = \"#355070\"\n)\n\nvermilion <- c(\n `veryhigh` =\"#ffba08\",`high` = \"#f48c06\",`middle` =\"#dc2f02\",\n `low` = \"#6a040f\",`verylow` = \"#03071e\"\n)\n\nwheat <- c(\n `veryhigh` =\"#432534\",`high` = \"#c44900\",`middle` =\"#efd6ac\",\n `low` = \"#183a37\",`verylow` = \"#04151f\"\n)\n\npeach <- c(\n `veryhigh` =\"#041f1e\",`high` = \"#1e2d2f\",`middle` =\"#c57b57\",\n `low` = \"#f1ab86\",`verylow` = \"#f7dba7\"\n)\n\npersian <- c(\n `veryhigh` =\"#495867\",`high` = \"#c18c5d\",`middle` =\"#ce796b\",\n `low` = \"#e7ad99\",`verylow` = \"#ecc8af\"\n)\n\n# List of palettes\n\ninnova_palettes <- list(\n `ccvi` = ccvi, `npr` = npr, `blmbrg` = blmbrg, `ecomst` = ecomst, `ctp` = ctp,\n `jama` = jama, `mlobo` = mlobo, `btran` = btran, `nasa` = nasa,\n `politico` = politico, `mortality` = mortality,\n `green` = green,`golden` = golden,`dark_green` = dark_green,\n `blue_fall` = blue_fall,`vermilion` = vermilion,\n `wheat` = wheat,`peach` = peach,`persian` = persian\n)\n\n#' Return function to interpolate a lis color palette\n#'\n#' @param palette Character name of palette in lis_palettes\n#' @param reverse Boolean indicating whether the palette should be reversed\n#' @param ... Additional arguments to pass to colorRampPalette()\n#' @examples\n#'\\dontrun{\n#' library(innovar)\n#' library(scales)\n#' pal <- innova_pal(\"ccvi\")(9)\n#' show_col(pal)\n#'}\n#'\n#' @importFrom grDevices colorRampPalette\n#' @export innova_pal\n\ninnova_pal <- function(palette = \"ccvi\", reverse = FALSE, ...) {\n pal <- innova_palettes[[palette]]\n if (reverse) pal <- rev(pal)\n colorRampPalette(pal,...)\n}\n\n#' Color scale constructor for lis colors\n#'\n#' @param palette Character name of palette in lis_palettes\n#' @param discrete Boolean indicating whether color aesthetic is discrete or not\n#' @param reverse Boolean indicating whether the palette should be reversed\n#' @param ... Additional arguments passed to discrete_scale() or\n#' scale_color_gradientn(), used respectively when discrete is TRUE or FALSE\n#' @examples\n#'\\dontrun{\n#' library(ggplot2)\n#' library(innovar)\n#' # Default discrete palette\n#' ggplot(iris, aes(Sepal.Width, Sepal.Length, color = Species)) +\n#' geom_point(size = 4) +\n#' scale_color_innova()\n#'\n#' # Default continuous palette\n#' ggplot(iris, aes(Sepal.Width, Sepal.Length, color = Sepal.Length)) +\n#' geom_point(size = 4, alpha = .6) +\n#' scale_color_innova(discrete = FALSE)\n#'}\n#'\n#' @importFrom ggplot2 discrete_scale scale_color_gradientn\n#' @export scale_color_innova\n\nscale_color_innova <- function(palette = \"ccvi\", discrete = TRUE, reverse = FALSE,\n ...) {\n pal <- innova_pal(palette = palette, reverse = reverse)\n if (discrete) {\n discrete_scale(\"colour\", paste0(\"lis_\", palette), palette = pal, ...)\n }\n else {\n scale_color_gradientn(colours = pal(256), ...)\n }\n}\n\n#' Fill scale constructor for lis colors\n#'\n#' @param palette Character name of palette in lis_palettes\n#' @param discrete Boolean indicating whether color aesthetic is discrete or not\n#' @param reverse Boolean indicating whether the palette should be reversed\n#' @param ... Additional arguments passed to discrete_scale() or\n#' scale_fill_gradientn(), used respectively when discrete is TRUE or FALSE\n#' @examples\n#'\\dontrun{\n#' library(ggplot2)\n#' library(innovar)\n#' ggplot(mpg, aes(manufacturer, fill = manufacturer)) +\n#' geom_bar() +\n#' theme(axis.text.x = element_text(angle = 45, hjust = 1)) +\n#' scale_fill_innova()\n#'}\n#'\n#' @importFrom ggplot2 discrete_scale scale_fill_gradientn\n#' @export scale_fill_innova\n\nscale_fill_innova <- function(palette = \"ccvi\", discrete = TRUE, reverse = FALSE,\n ...) {\n pal <- innova_pal(palette = palette, reverse = reverse)\n\n if (discrete) {\n discrete_scale(\"fill\", paste0(\"lis_\", palette), palette = pal, ...)\n } else {\n scale_fill_gradientn(colours = pal(256), ...)\n }\n}\n\n\n#' Show a set of palette color Lab\n#' @param name Character name of palette in lis_palettes\n#' @param rev Boolean indicating whether the palette should be reversed\n#' @param n Integer, number of colors\n#' @param ... Additional arguments passed to seecol()\n#'\n#' @details Name of color palette available\n#' \\itemize{\n#' \\item \\bold{ccvi}\n#' \\item \\bold{npr}\n#' \\item \\bold{blmbrg}\n#' \\item \\bold{ctp}\n#' \\item \\bold{jama}\n#' \\item \\bold{mlobo}\n#' \\item \\bold{btran}\n#' \\item \\bold{nasa}\n#' \\item \\bold{politico}\n#' \\item \\bold{mortality}\n#' }\n#' @examples\n#' \\dontrun{\n#' library(innovar)\n#' # show_pal(name = \"nasa\",rev = TRUE, n = 5)\n#' show_pal()\n#' }\n#'@export show_pal\n\nshow_pal <- function(name = \"all\",n = 5,rev = TRUE,...){\n require(unikn)\n if(sum(unique(name %in% names(innova_palettes))) == 1) {\n list_names <- innova_palettes[name]\n range_color <- sapply(X = list_names,FUN = function(x){list(x[1:n])})\n\n if(rev == 1){\n list_panel <- rev(range_color) %>%\n map(.f = ~.) %>%\n unikn::seecol(\n pal_names = names(list_names),\n title = \"Name of specific innovar colour palettes\"\n ,...\n )\n }else{\n list_panel <- range_color %>%\n map(.f = ~.) %>%\n unikn::seecol(\n pal_names = names(list_names),\n title = \"Name of specific innovar colour palettes\"\n ,...\n )\n }\n\n } else if (name == \"all\"){\n list_names <- names(innova_palettes)\n list_panel <- list_names %>%\n map(.f = ~innova_pal(.,reverse = rev)(n=n)) %>%\n unikn::seecol(\n pal_names = list_names,\n title = \"Name of all innovar colour palettes\",\n ...\n )\n } else {\n stop(\"Color palette is incorrect,please use show_pal() and choose a color\")\n }\n\n}\n\n" }, { "alpha_fraction": 0.6020202040672302, "alphanum_fraction": 0.6083333492279053, "avg_line_length": 25.052631378173828, "blob_id": "d82aa7d852beb8287bee17e657b0720911893482", "content_id": "9aa39eefa34c5da9e618936b566766df2f3511aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 3960, "license_type": "permissive", "max_line_length": 71, "num_lines": 152, "path": "/R/create_templates.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Create a Data Analysis Project\n#'\n#' Create a skeleton for a data analysis project in 2\n#' formats: report, Report-centric (Rmd's); production,\n#' Modularised (separate scripts and Rmd's). Use the\n#' `create_dap()` function for this.\n#'\n#' @param dir Directory for data analysis project.\n#' @param project_type The type of project is indicated.\n#' The options are \"reporting\" or \"production\".\n#' @param git_activate Configure the project so that it\n#' can use git.\n#' @param renv_activate Configure the project so that it\n#' can use renv.\n#'\n#' @note The `dir` parameter is required for the creation\n#' of the project. By default a project of type report is configured.\n#'\n#' @examples\n#' \\dontrun{\n#' library(innovar)\n#' create_dap(\"Epi_project\", \"report\")\n#' }\n#' @export\n\ncreate_dap <- function(dir,\n project_type = \"report\",\n git_activate = FALSE,\n renv_activate = FALSE) {\n # Check if it's called from console\n if (missing(dir) & interactive()) {\n dir <- getwd()\n } else {\n dir.create(dir, recursive = TRUE, showWarnings = FALSE)\n }\n\n fs::dir_create(fs::path(dir, \"00_legacy\"))\nfs::dir_create(fs::path(dir, \"01_data/processed\"))\n fs::dir_create(fs::path(dir, \"01_data/raw\"))\n fs::dir_create(fs::path(dir, \"02_output/plots\"))\n fs::dir_create(fs::path(dir, \"02_output/tables\"))\n fs::dir_create(fs::path(dir, \"02_output/reports\"))\n fs::dir_create(fs::path(dir, \"03_functions\"))\n\n if (project_type == \"production\") {\n fs::dir_create(fs::path(dir, \"04_analysis/scripts\"))\n fs::dir_create(fs::path(dir, \"04_analysis/notebooks\"))\n }\n\n if (git_activate) {\n gert::git_init(fs::path(dir))\n }\n\n if (renv_activate) {\n renv::init(fs::path(dir), restart = FALSE)\n path_innovar <- fs::path_package(\"innovar\")\n message_install <- paste0(\n \"\\nIf you want to install innovar package, use:\\n\",\n \"renv::use('\", path_innovar, \"')\\n\\n\",\n \"Or if you want the latest version available:\\n\",\n \"renv::use('healthinnovation/innovar')\"\n )\n\n writeLines(\n paste0(\n \"source('renv/activate.R')\\n\\n\",\n \"message(cat(\\\"\", message_install, \"\\\"))\"\n ),\n \".Rprofile\",\n useBytes = TRUE\n )\n }\n}\n\ndata_analysis_project <- function(dir, ...) {\n params <- list(...)\n create_dap(\n dir, params$project_type,\n params$git_activate,\n params$renv_activate\n )\n}\n\n#' Use Innovar Xaringan Template\n#'\n#' This function populates the working directory with\n#' files corresponding to the xaringan template of the\n#' package.\n#'\n#' @param file Name of the `.Rmd` file to create.\n#'\n#' @examples\n#' \\dontrun{\n#' library(innovar)\n#' use_xaringan(\"class_01.Rmd\")\n#' }\n#' @export\n\nuse_xaringan <- function(file) {\n if (missing(file)) {\n if (interactive()) {\n file <- readline(\"Enter a name for the file .Rmd: \")\n } else {\n stop(\"file argument must be specified\", call. = FALSE)\n }\n }\n\n if (tolower(fs::path_ext(file)) != \"rmd\") {\n file <- fs::path_ext_set(fs::path_ext_remove(file), \"Rmd\")\n }\n\n rmarkdown::draft(file,\n template = \"innovar-xaringan\",\n package = \"innovar\",\n edit = FALSE\n )\n}\n\n\n#' Use Innovar Rmarkdown Flatly\n#'\n#' This function populates the working directory with\n#' files corresponding to the Innovar Rmarkdown Flatly.\n#'\n#' @param file Name of the `.Rmd` file to create.\n#'\n#' @examples\n#' \\dontrun{\n#' library(innovar)\n#' use_rmd_flatly(\"report_01.Rmd\")\n#' }\n#' @export\n\nuse_rmd_flatly <- function(file) {\n if (missing(file)) {\n if (interactive()) {\n file <- readline(\"Enter a name for the file .Rmd: \")\n } else {\n stop(\"file argument must be specified\", call. = FALSE)\n }\n }\n\n if (tolower(fs::path_ext(file)) != \"rmd\") {\n file <- fs::path_ext_set(fs::path_ext_remove(file), \"Rmd\")\n }\n\n rmarkdown::draft(file,\n template = \"innovar-rmd-flatly\",\n package = \"innovar\",\n edit = FALSE\n )\n}\n" }, { "alpha_fraction": 0.5230914354324341, "alphanum_fraction": 0.5607917308807373, "avg_line_length": 22.577777862548828, "blob_id": "744e82cf74ac53afaf45e4f68de730cc9effa0cd", "content_id": "ff1ba32fec24c0e4d8a2385e63c46751c557ba99", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2122, "license_type": "permissive", "max_line_length": 122, "num_lines": 90, "path": "/R/get_urban.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Extract urban area data of MODIS Landcover\n#'\n#' A function that extract a time series of the urban area of MODIS Landcover (2001-01-01T00:00:00Z - 2020-01-01T00:00:00)\n#'\n#' @param to,from it's a string object,starting and final date.\n#' @param region it's a feature or feature collection\n#' @param scale A nominal scale in meters of the projection to work in.\n#'\n#' @return a tibble object with the new variable in km2\n#' @export\n#' @importFrom sf st_transform st_simplify\n#' @importFrom rgee sf_as_ee\n#' @importFrom dplyr select filter\n#' @examples\n#' \\dontrun{\n#'\n#' library(innovar)\n#' library(rgee)\n#' library(sf)\n#' ee_Initialize()\n#' data(\"Peru\")\n#' region <- Peru\n#' region_ee <- pol_as_ee(region, id = 'distr' ,simplify = 1000)\n#' data <- get_urban(from = '2008-01-01', to = '2010-01-01', region = region)\n#'\n#' }\n# Function for extract urban areas\n\nget_urban <- function(from, to, region, scale = 1000) {\n\n # Conditions about the times\n start_year <- substr(from, 1, 4) %>% as.numeric()\n end_year <- substr(to, 1, 4) %>% as.numeric()\n\n if(start_year == end_year){\n year <- unique(\n c(start_year:end_year)\n ) %>%\n list()\n\n year_list <- ee$List(year)\n } else {\n year <- unique(\n c(start_year:end_year)\n )\n year_list <- ee$List(year)\n }\n\n # Message of error\n if (to < 2001 | from > 2019) {\n print(sprintf(\"No exist data of urban area\"))\n }\n\n list_urban <-\n year_list$\n map(\n ee_utils_pyfunc(\n function(x) {\n ee$ImageCollection(\"MODIS/006/MCD12Q1\")$\n select(c('LC_Type2'))$\n filter(\n ee$Filter$calendarRange(\n x,\n x,\n \"year\")\n )$\n map(function(img) img$eq(list(13)))$\n mean()$\n multiply(\n ee$Image$pixelArea())$\n divide(100000)$\n rename('urban')\n }\n )\n )\n\n urban_img <- ee$ImageCollection$\n fromImages(list_urban)$\n toBands()$\n clip(region)\n\n data <-\n ee_sum(\n x = urban_img,\n y = region,\n scale = scale\n )\n\n return(data)\n}\n" }, { "alpha_fraction": 0.6410812139511108, "alphanum_fraction": 0.6564781069755554, "avg_line_length": 17.458948135375977, "blob_id": "6295cb075a2da14cea45e18d967f2843578578d3", "content_id": "9eebeff62284122a20824afd7d2f35b4b6eda6ec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 8770, "license_type": "permissive", "max_line_length": 143, "num_lines": 475, "path": "/inst/rmarkdown/templates/innovar-xaringan/skeleton/skeleton.Rmd", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "---\ntitle: \"Plantilla Innovar Xaringan\"\nsubtitle: \"\"\nauthor:\n - \"Laboratorio de Innovación en Salud\"\ndate: '`r Sys.Date()`'\noutput:\n xaringan::moon_reader:\n css: [xaringan-lis.css, custom.css]\n nature:\n slideNumberFormat: \"%current%/%total%\"\n highlightStyle: idea\n highlightLines: true\n ratio: 16:9\n countIncrementalSlides: true\n seal: false\n---\n\n```{r setup, include=FALSE}\noptions(htmltools.dir.version = FALSE)\nknitr::opts_chunk$set(\n fig.width = 9, fig.height = 3.5, fig.retina = 3,\n out.width = \"100%\",\n cache = FALSE,\n echo = TRUE,\n message = FALSE,\n warning = FALSE,\n hiline = TRUE\n)\n\nlibrary(xaringanthemer)\nlibrary(magrittr)\nlibrary(metathis)\n```\n\n```{r xaringan-themer, include=FALSE, warning=FALSE}\nstyle_duo_accent(\n primary_color = \"#2f4871\",\n secondary_color = \"#2e91af\",\n inverse_header_color = \"#FFFFFF\",\n header_color = \"#106075\",\n background_color = \"#e9ebee\",\n header_font_google = google_font(\"Oswald\", \"700\", \"700i\"),\n text_font_google = google_font(\n \"Spartan\", \"400\", \"400i\",\n \"700\", \"700i\"\n ),\n code_font_google = google_font(\n \"Fira Code\", \"400\",\n \"700\"\n ),\n outfile = \"xaringan-lis.css\"\n)\n```\n\n```{r xaringan-tile-view, echo=FALSE}\nxaringanExtra::use_tile_view()\n```\n\n```{r broadcast, echo=FALSE}\nxaringanExtra::use_broadcast()\n```\n\n```{r xaringan-scribble, echo=FALSE}\nxaringanExtra::use_scribble()\n```\n\n```{r xaringan-panelset, echo=FALSE}\nxaringanExtra::use_panelset()\n```\n\n```{r xaringanExtra-clipboard, echo=FALSE}\nhtmltools::tagList(\n xaringanExtra::use_clipboard(\n button_text = \"<i class=\\\"fa fa-clipboard\\\"></i>\",\n success_text = \"<i class=\\\"fa fa-check\\\" style=\\\"color: #90BE6D\\\"></i>\",\n error_text = \"<i class=\\\"fa fa-times-circle\\\" style=\\\"color: #F94144\\\"></i>\"\n ),\n rmarkdown::html_dependency_font_awesome()\n)\n```\n\n```{r xaringan-logo, echo=FALSE}\nxaringanExtra::use_logo(\n image_url = \"img/InnovaLab_logo_blue.png\",\n width = \"100px\",\n height = \"116px\"\n)\n```\n\n```{r xaringan-extra-styles, echo=FALSE}\nxaringanExtra::use_extra_styles(\n hover_code_line = TRUE,\n mute_unhighlighted_code = TRUE\n)\n```\n\n```{r xaringanExtra, echo = FALSE}\nxaringanExtra::use_progress_bar(\n color = \"#0051BA\",\n location = \"bottom\"\n)\n```\n\n```{r metathis, echo=FALSE}\nmeta() %>%\n meta_general(\n description = \"Innovar theme of R package xaringan\",\n robots = \"index,follow\",\n generator = \"xaringan and remark.js\"\n ) %>%\n meta_viewport(\n orientation = \"landscape\"\n ) %>%\n meta_name(\"github-repo\" = \"healthinnovation/innovar-xaringan\") %>%\n meta_social(\n title = \"Plantilla Innovar Xaringan\",\n url = \"https://healthinnovation.github.io/innovar-xaringan/\",\n image = \"https://raw.githubusercontent.com/healthinnovation/innovar-xaringan/main/img/cover-plantilla.png\",\n image_alt = \"Innovar theme of R package xaringan\",\n og_type = \"website\",\n og_author = \"Laboratorio de Innovación en Salud\",\n twitter_card_type = \"summary_large_image\",\n twitter_creator = \"@innovalab_imt\"\n )\n```\n\n\n<br>\n<br>\n\n# `r rmarkdown::metadata$title`\n\n## `r rmarkdown::metadata$subtitle`\n\n## `r rmarkdown::metadata$author`\n\n### `r Sys.Date()`\n\n<br>\n\n[`r fontawesome::fa(name = \"github\")` @healthinnovation](https://github.com/healthinnovation)\n[`r fontawesome::fa(name = \"twitter\")` @innovalab_imt](https://twitter.com/innovalab_imt)\n[`r fontawesome::fa(name = \"link\")` innovalab.info](https://www.innovalab.info/)\n\n\n---\n\n## Typography\n\nText can be **bold**, _italic_, ~~strikethrough~~, or `inline code`.\n\n[Link to another slide](#colors).\n\n### Lorem Ipsum\n\nDolor imperdiet nostra sapien scelerisque praesent curae metus facilisis dignissim tortor.\nLacinia neque mollis nascetur neque urna velit bibendum.\nHimenaeos suspendisse leo varius mus risus sagittis aliquet venenatis duis nec.\n\n- Dolor cubilia nostra nunc sodales\n\n- Consectetur aliquet mauris blandit\n\n- Ipsum dis nec porttitor urna sed\n\n---\nname: colors\n\n## Colors\n\n.left-column[\nText color\n\n[Link Color](#3)\n\n**Bold Color**\n\n_Italic Color_\n\n`Inline Code`\n]\n\n.right-column[\nLorem ipsum dolor sit amet, [consectetur adipiscing elit (link)](#3),\nsed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\nErat nam at lectus urna.\nPellentesque elit ullamcorper **dignissim cras tincidunt (bold)** lobortis feugiat.\n_Eros donec ac odio tempor_ orci dapibus ultrices.\nId porta nibh venenatis cras sed felis eget velit aliquet.\nAliquam id diam maecenas ultricies mi.\nEnim sit amet\n`code_color(\"inline\")`\nvenenatis urna cursus eget nunc scelerisque viverra.\n]\n\n---\n\n# Big Topic or Inverse Slides `#`\n\n## Slide Headings `##`\n\n### Sub-slide Headings `###`\n\n#### Bold Call-Out `####`\n\nThis is a normal paragraph text. Only use header levels 1-4.\n\n##### Possible, but not recommended `#####`\n\n###### Definitely don't use h6 `######`\n\n---\n\n# Left-Column Headings\n\n.left-column[\n## First\n\n## Second\n\n## Third\n]\n\n.right-column[\nDolor quis aptent mus a dictum ultricies egestas.\n\nAmet egestas neque tempor fermentum proin massa!\n\nDolor elementum fermentum pharetra lectus arcu pulvinar.\n]\n\n---\nclass: inverse center middle\n\n# Topic Changing Interstitial\n\n--\n\n```\nclass: inverse center middle\n```\n\n---\nlayout: true\n\n## Blocks\n\n---\n\n### Blockquote\n\n> This is a blockquote following a header.\n>\n> When something is important enough, you do it even if the odds are not in your favor.\n\n---\n\n### Code Blocks\n\n#### R Code\n\n```{r eval=FALSE}\nggplot(gapminder) + #<<\n aes(x = gdpPercap, y = lifeExp, size = pop, color = country) +\n geom_point() +\n facet_wrap(~year)\n```\n\n#### JavaScript\n\n```js\nvar fun = function lang(l) {\n dateformat.i18n = require('./lang/' + l)\n return true;\n}\n```\n\n---\n\n### More R Code\n\n```{r eval=FALSE}\ndplyr::starwars %>% dplyr::slice_sample(n = 4)\n```\n\n---\n\n```{r message=TRUE, eval=requireNamespace(\"cli\", quietly = TRUE)}\ncli::cli_alert_success(\"It worked!\")\n```\n\n--\n\n```{r message=TRUE}\nmessage(\"Just a friendly message\")\n```\n\n--\n\n```{r warning=TRUE}\nwarning(\"This could be bad...\")\n```\n\n--\n\n```{r error=TRUE}\nstop(\"I hope you're sitting down for this\")\n```\n\n\n---\nlayout: true\n\n## Tables\n\n---\nexclude: `r if (requireNamespace(\"tibble\", quietly=TRUE)) \"false\" else \"true\"`\n\n```{r eval=requireNamespace(\"tibble\", quietly=TRUE)}\ntibble::as_tibble(mtcars)\n```\n\n---\n\n```{r}\nknitr::kable(head(mtcars), format = \"html\")\n```\n\n---\nexclude: `r if (requireNamespace(\"DT\", quietly=TRUE)) \"false\" else \"true\"`\n\n```{r eval=requireNamespace(\"DT\", quietly=TRUE)}\nDT::datatable(head(mtcars), fillContainer = FALSE, options = list(pageLength = 4))\n```\n\n---\nlayout: true\n\n## Lists\n\n---\n\n.pull-left[\n#### Here is an unordered list:\n\n* Item foo\n* Item bar\n* Item baz\n* Item zip\n]\n\n.pull-right[\n\n#### And an ordered list:\n\n1. Item one\n1. Item two\n1. Item three\n1. Item four\n]\n\n---\n\n### And a nested list:\n\n- level 1 item\n - level 2 item\n - level 2 item\n - level 3 item\n - level 3 item\n- level 1 item\n - level 2 item\n - level 2 item\n - level 2 item\n- level 1 item\n - level 2 item\n - level 2 item\n- level 1 item\n\n---\n\n### Nesting an ol in ul in an ol\n\n- level 1 item (ul)\n 1. level 2 item (ol)\n 1. level 2 item (ol)\n - level 3 item (ul)\n - level 3 item (ul)\n- level 1 item (ul)\n 1. level 2 item (ol)\n 1. level 2 item (ol)\n - level 3 item (ul)\n - level 3 item (ul)\n 1. level 4 item (ol)\n 1. level 4 item (ol)\n - level 3 item (ul)\n - level 3 item (ul)\n\n---\nlayout: true\n\n## Plots\n\n---\n\n```{r plot-example, eval=requireNamespace(\"ggplot2\", quietly=TRUE)}\nlibrary(ggplot2)\n(g <- ggplot(mpg) +\n aes(hwy, cty, color = class) +\n geom_point())\n```\n\n---\n\n```{r plot-example-themed, eval=requireNamespace(\"ggplot2\", quietly=TRUE)}\ng + xaringanthemer::theme_xaringan(text_font_size = 16, title_font_size = 18) +\n ggtitle(\"A Plot About Cars\")\n```\n\n---\nlayout: false\n\n## Square image\n\n<center><img src=\"https://octodex.github.com/images/labtocat.png\" height=\"400px\" /></center>\n\n.footnote[GitHub Octocat]\n\n---\n\n### Wide image\n\n![](https://docs.github.com/assets/cb-23923/images/help/repository/branching.png)\n\n.footnote[Wide images scale to 100% slide width]\n\n---\n\n## Two images\n\n.pull-left[\n![](https://octodex.github.com/images/motherhubbertocat.png)\n]\n\n.pull-right[\n![](https://octodex.github.com/images/dinotocat.png)\n]\n\n---\n\n### Definition lists can be used with HTML syntax.\n\n<dl>\n<dt>Name</dt>\n<dd>Godzilla</dd>\n<dt>Born</dt>\n<dd>1952</dd>\n<dt>Birthplace</dt>\n<dd>Japan</dd>\n<dt>Color</dt>\n<dd>Green</dd>\n</dl>\n\n---\nclass: center, middle\n\n# Thanks!\n\nSlides created via the R packages:\n\n[**xaringan**](https://github.com/yihui/xaringan)<br>\n[gadenbuie/xaringanthemer](https://github.com/gadenbuie/xaringanthemer)\n\nThe chakra comes from [remark.js](https://remarkjs.com), [**knitr**](http://yihui.name/knitr), and [R Markdown](https://rmarkdown.rstudio.com).\n" }, { "alpha_fraction": 0.5119360685348511, "alphanum_fraction": 0.5318659543991089, "avg_line_length": 22.536083221435547, "blob_id": "85b71cb0637ca063d1611e5067eee97a388c422b", "content_id": "68dfa75932de6699261ee76cde314dacc4680adf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 9136, "license_type": "permissive", "max_line_length": 172, "num_lines": 388, "path": "/R/get_no2.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Extract Nitrogen Dioxide data of Sentinel5\n#'\n#' A function that extract a time series of nitrogen dioxide (2018-07-10T10:05:44Z - 2022-05-15T00:00:00).\n#'\n#' @param to,from it's a string object,starting and final date.\n#' @param band name of band.\n#' @param region is a feature or feature collection.\n#' @param fun function for extract statistic zonal (count, kurtosis, max, mean, median, min, mode, percentile, std, sum, variance, first).\n#' @param scale A nominal scale in meters of the projection to work in.\n#'\n#' @details Name of some bands.\n#' \\itemize{\n#' \\item \\bold{NO2_column_number_density (mol/m²):} Total vertical column of NO2 (ratio of the slant column density of NO2 and the total air mass factor).\n#' \\item \\bold{tropospheric_NO2_column_number_density (mol/m²):} tropospheric vertical column of NO2.\n#' \\item \\bold{stratospheric_NO2_column_number_density (mol/m²):} stratospheric vertical column of NO2\n#' \\item \\bold{NO2_slant_column_number_density (mol/m²):} NO2 slant column density\n#' \\item \\bold{tropopause_pressure (Pa):} topopause pressure\n#' \\item \\bold{absorbing_aerosol_index:} \tAerosol index (at wavelengths 354/388, i.e. the OMI pair) from the AER_AI level 2 product. See Level 2 Algorithms - Aerosol Index.\n#' \\item \\bold{cloud_fraction:} Effective cloud fraction. See the Sentinel 5P L2 Input/Output Data Definition Spec, p.220.\n#' \\item \\bold{sensor_altitude (m):} Altitude of the satellite with respect to the geodetic sub-satellite point (WGS84).\n#' \\item \\bold{sensor_azimuth_angle (degrees):} Azimuth angle of the satellite at the ground pixel location (WGS84); angle measured East-of-North.\n#' \\item \\bold{sensor_zenith_angle (degrees):} Zenith angle of the satellite at the ground pixel location (WGS84); angle measured away from the vertical.\n#' \\item \\bold{solar_azimuth_angle (degrees):} Azimuth angle of the Sun at the ground pixel location (WGS84); angle measured East-of-North.\n#' \\item \\bold{solar_zenith_angle (degrees):} Zenith angle of the satellite at the ground pixel location (WGS84); angle measured away from the vertical.\n#' }\n#'\n#' @return a tibble object with the new variables.\n#' @importFrom sf st_transform st_simplify\n#' @importFrom rgee sf_as_ee\n#' @importFrom dplyr select filter contains\n#' @importFrom purrr is_empty\n#'\n#' @examples\n#' \\dontrun{\n#'\n#' library(tidyverse)\n#' library(rgee)\n#' library(innovar)\n#' library(sf)\n#' ee_Initialize()\n#'\n#' # 1. Reading a sf object\n#' data(\"Peru\")\n#' region <- Peru\n#' region_ee <- pol_as_ee(region , id = 'distr' , simplify = 1000)\n#' # 2. Extracting climate information\n#' data <- region_ee %>% get_no2(\n#' from = \"2019-02-01\", to = \"2019-12-31\",\n#' band = \"NO2_column_number_density\", fun = \"max\")\n#' }\n#' @export\n\nget_no2 <- function(from, to, band, region, fun = \"max\", scale = 1000) {\n\n # Conditions about the times\n start_year <- substr(from, 1, 4) %>% as.numeric()\n end_year <- substr(to, 1, 4) %>% as.numeric()\n\n # Message of error\n\n if (start_year < 2018) {\n from = \"2018-07-01\"\n start_year = substr(from, 1, 4) %>% as.numeric()\n print(sprintf(\"No exist data, NO2 is available from > 2018\"))\n }\n\n # Dataset\n collection <- ee$ImageCollection(\"COPERNICUS/S5P/NRTI/L3_NO2\")$\n select(c(band))\n\n # date of dataset\n months <- ee$List$sequence(1, 12)\n years <- ee$List$sequence(start_year, end_year)\n\n modis <- ee$\n ImageCollection$\n fromImages(years$map(\n ee_utils_pyfunc(function(y) {\n months$map(ee_utils_pyfunc(\n function(m) {\n collection$\n filter(ee$Filter$calendarRange(y, y, \"year\"))$\n filter(ee$Filter$calendarRange(m, m, \"month\"))$\n max()$\n rename(\"NO2\")$\n set(\"year\", y)$\n set(\"month\", m)\n }\n ))\n })\n )$flatten())\n\n im_base <- modis$\n filter(ee$Filter$inList(\"month\", c(1:12)))\n\n if (start_year == end_year) {\n new_base <- im_base$\n filter(\n ee$Filter$inList(\n \"year\",\n list(\n c(\n start_year:end_year\n )\n )\n )\n )$toBands()\n } else {\n new_base <- im_base$\n filter(\n ee$Filter$inList(\n \"year\",\n c(\n start_year:end_year\n )\n )\n )$\n toBands()\n }\n\n # The main functions\n if (fun == \"count\") {\n img_count <- ee_count(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_count),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_count)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_count)\n } else if (fun == \"kurtosis\") {\n img_kurtosis <- ee_kurstosis(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_kurtosis),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_kurtosis)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_kurtosis)\n } else if (fun == \"max\") {\n img_max <- ee_max(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_max),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_max)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_max)\n } else if (fun == \"mean\") {\n img_mean <- ee_mean(\n new_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_mean),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_mean)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_mean)\n } else if (fun == \"median\") {\n img_median <- ee_median(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_median),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_median)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_median)\n } else if (fun == \"min\") {\n img_min <- ee_min(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_min),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_min)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_min)\n } else if (fun == \"mode\") {\n img_mode <- ee_mode(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_mode),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_mode)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_mode)\n } else if (fun == \"percentile\") {\n img_percentile <- ee_percentile(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_percentile),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_percentile)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_percentile)\n } else if (fun == \"std\") {\n img_std <- ee_std(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_std),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_std)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_std)\n } else if (fun == \"sum\") {\n img_sum <- ee_sum(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_sum),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_sum)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_sum)\n } else if (fun == \"variance\") {\n img_variance <- ee_variance(\n new_base,\n region,\n scale = scale\n )\n id_names <- which(\n endsWith(\n names(img_variance),\n suffix = \"NO2\"\n )\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1, 7\n )\n\n names(img_variance)[id_names] <- sprintf(\"%s%s\", \"NO2_\", names_id)\n return(img_variance)\n }\n}\n" }, { "alpha_fraction": 0.4850861728191376, "alphanum_fraction": 0.5028382539749146, "avg_line_length": 20.388521194458008, "blob_id": "4311ae221c85361ae993f9540d98149b172c3951", "content_id": "da259ba8fead5f2ff886007a099944d6dcc26cd9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 19380, "license_type": "permissive", "max_line_length": 138, "num_lines": 906, "path": "/R/get_fldas.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Extract climate data of Famine Early Warning Systems Network (FEWS NET) Land Data Assimilation System ()\n#'\n#' A function that extract a time series of climate variables (1982-01-01T00:00:00Z–2022-04-01T00:00:00).\n#'\n#' @param to,from it's a string object,starting and final date.\n#' @param by two types of increment of the sequence by \\bold{month} and \\bold{year}.\n#' @param band name of band.\n#' @param region is a feature or feature collection.\n#' @param fun function for extract statistic zonal (count, kurtosis, max, mean, median, min, mode, percentile, std, sum, variance, first).\n#' @param scale A nominal scale in meters of the projection to work in.\n#'\n#' @details Name of some bands.\n#' \\itemize{\n#' \\item \\bold{Evap_tavg (kg m-2 s-1):} Evapotranspiration.\n#' \\item \\bold{LWdown_f_tavg (W m-2):} Downward longwave radiation flux .\n#' \\item \\bold{Lwnet_tavg (W m-2):} Net longwave radiation flux.\n#' \\item \\bold{Psurf_f_tavg (Pa):} Surface pressure.\n#' \\item \\bold{Qair_f_tavg (kg kg-1):} Specific humidity.\n#' \\item \\bold{Qg_tavg (W m-2):} Soil heat flux.\n#' \\item \\bold{Qh_tavg (W m-2):} Sensible heat net flux.\n#' \\item \\bold{Qle_tavg (W m-2):} Latent heat net flux.\n#' \\item \\bold{Qs_tavg (kg m-2 s-1):} Storm surface runoff.\n#' \\item \\bold{Qsb_tavg (kg m-2 s-1):} Baseflow-groundwater runoff.\n#' \\item \\bold{RadT_tavg (K):} Surface radiative temperature.\n#' \\item \\bold{Rainf_f_tavg (kg m-2 s-1):} Total precipitation rate.\n#' \\item \\bold{SnowCover_inst :} Snow cover fraction.\n#' \\item \\bold{SnowDepth_inst (m):} Snow depth.\n#' \\item \\bold{Snowf_tavg (kg m-2 s-1):} Snowfall rate.\n#' \\item \\bold{SoilMoi00_10cm_tavg (m^3 m-3):}Soil moisture (0 - 10 cm underground).\n#' \\item \\bold{SoilMoi10_40cm_tavg (m^3 m-3):} Soil moisture (10 - 40 cm underground).\n#' \\item \\bold{SoilMoi100_200cm_tavg (m^3 m-3):} Soil moisture (100 - 200 cm underground).\n#' \\item \\bold{SoilMoi40_100cm_tavg (m^3 m-3):} Soil moisture (40 - 100 cm underground).\n#' \\item \\bold{SoilTemp00_10cm_tavg (K):} Soil temperature (0 - 10 cm underground).\n#' \\item \\bold{SoilTemp10_40cm_tavg\t(K):} Soil temperature (10 - 40 cm underground).\n#' \\item \\bold{SoilTemp100_200cm_tavg (K):} Soil temperature (100 - 200 cm underground).\n#' \\item \\bold{SoilTemp40_100cm_tavg (K):} Soil temperature (40 - 100 cm underground).\n#' \\item \\bold{SWdown_f_tavg (W m-2):} Surface downward shortwave radiation.\n#' \\item \\bold{SWE_inst (kg m-2):} Snow water equivalent.\n#' \\item \\bold{Swnet_tavg\t(W m-2):} Net shortwave radiation flux.\n#' \\item \\bold{Tair_f_tavg (K):} Near surface air temperature.\n#' \\item \\bold{Wind_f_tavg (m s-1):} Near surface wind speed.\n#'\n#'\n#'\n#' }\n#'\n#' @return a tibble object with the new variables.\n#' @importFrom sf st_transform st_simplify\n#' @importFrom rgee sf_as_ee\n#' @importFrom dplyr select filter contains\n#' @importFrom purrr is_empty\n#'\n#' @examples\n#' \\dontrun{\n#'\n#' library(tidyverse)\n#' library(rgee)\n#' library(innovar)\n#' library(sf)\n#' ee_Initialize()\n#'\n#' # 1. Reading a sf object\n#' data(\"Peru\")\n#' region <- Peru\n#' region_ee <- pol_as_ee(region , id = 'distr' , simplify = 1000)\n#' # 2. Extracting climate information\n#' data <- region_ee %>% get_fldas(\n#' from = \"2001-02-01\", to = \"2002-12-31\",\n#' by = \"month\", band = \"Qair_f_tavg\", fun = \"mean\")\n#' }\n#' @export\n\nget_fldas <- function(from, to, by, band, region, fun = \"mean\",scale = 1000) {\n\n # Conditions about the times\n\n start_year <- substr(from, 1, 4) %>% as.numeric()\n end_year <- substr(to, 1, 4) %>% as.numeric()\n\n if(start_year == end_year){\n year <- unique(\n c(start_year:end_year)\n ) %>%\n list()\n\n year_list <- ee$List(year)\n } else {\n year <- unique(\n c(start_year:end_year)\n )\n year_list <- ee$List(year)\n }\n\n # Message of error\n\n if (end_year > 2023 | start_year < 1982) {\n print(sprintf(\"No exist data\"))\n }\n\n # The main functions\n if (by == \"month\" & fun == \"count\") {\n\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_count <- ee_count(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_count),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_count)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_count)\n\n } else if (by == \"month\" & fun == \"kurtosis\") {\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_kurstosis <- ee_kurstosis(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_kurstosis),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_kurtosis)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_kurtosis)\n\n } else if (by == \"month\" & fun == \"max\") {\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_max <- ee_max(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_max),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_max)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_max)\n\n } else if (by == \"month\" & fun == \"mean\") {\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_mean <- ee_mean(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_mean),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_mean)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_mean)\n\n } else if (by == \"month\" & fun == \"median\") {\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_median <- ee_median(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_median),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_median)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_median)\n\n } else if (by == \"month\" & fun == \"min\") {\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_min <- ee_min(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_min),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_min)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_min)\n\n } else if (by == \"month\" & fun == \"mode\") {\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_mode <- ee_mode(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_mode),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_mode)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_mode)\n\n } else if (by == \"month\" & fun == \"percentile\") {\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_percentile <- ee_percentile(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_percentile),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_percentile)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_percentile)\n\n } else if (by == \"month\" & fun == \"std\") {\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_std <- ee_std(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_std),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_std)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_std)\n\n } else if (by == \"month\" & fun == \"sum\") {\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_sum <- ee_sum(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_sum),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_sum)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_sum)\n\n } else if (by == \"month\" & fun == \"variance\") {\n img_base <- ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()\n\n img_variance <- ee_variance(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_variance),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_variance)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_variance)\n }\n\n if (by == \"year\" & fun == \"count\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_count <- ee_count(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_count),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year',\n ),\n 1,4\n )\n\n names(img_count)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_count)\n\n\n } else if (by == \"year\" & fun == \"kurtosis\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_kurtosis <- ee_kurstosis(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_kurtosis),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_kurtosis)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_kurtosis)\n\n } else if (by == \"year\" & fun == \"max\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_max <- ee_max(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_max),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_max)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_max)\n\n } else if (by == \"year\" & fun == \"mean\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_mean <- ee_mean(\n img_base,\n region,\n scale = scale)\n\n id_names <- which(\n endsWith(\n names(img_mean),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_mean)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_mean)\n\n } else if (by == \"year\" & fun == \"median\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_median <- ee_median(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_median),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_median)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_median)\n\n } else if (by == \"year\" & fun == \"min\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_min <- ee_min(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_min),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_min)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_min)\n\n } else if (by == \"year\" & fun == \"mode\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_mode <- ee_mode(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_mode),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_mode)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_mode)\n\n } else if (by == \"year\" & fun == \"percentile\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_percentile <- ee_percentile(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_percentile),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_percentile)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_percentile)\n\n } else if (by == \"year\" & fun == \"std\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_std <- ee_std(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_std),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_std)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_std)\n\n } else if (by == \"year\" & fun == \"sum\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_sum <- ee_sum(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_sum),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_sum)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_sum)\n\n } else if (by == \"year\" & fun == \"variance\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"NASA/FLDAS/NOAH01/C/GL/M/V001\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()\n\n img_variance <- ee_variance(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_variance),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_variance)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_variance)\n }\n}\n" }, { "alpha_fraction": 0.5634620189666748, "alphanum_fraction": 0.6076204776763916, "avg_line_length": 18.426469802856445, "blob_id": "efe88a3e2294ebbac10c3a51b778013279f4090d", "content_id": "f1fa77dbce2b264255451d6c3ba0a6b1e359957a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": true, "language": "RMarkdown", "length_bytes": 4201, "license_type": "permissive", "max_line_length": 160, "num_lines": 204, "path": "/vignettes/vignette.Rmd", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "---\ntitle: \"Introduction to innovar\"\noutput:\n html_document:\n toc: true\n toc_float:\n collapsed: false\n smooth_scroll: false\n toc_depth: 2\nvignette: >\n %\\VignetteIndexEntry{1. Introduction}\n %\\VignetteEncoding{UTF-8}\n %\\VignetteEngine{knitr::rmarkdown} \n---\n\n```{r setup, include = FALSE}\nknitr::opts_chunk$set(\n collapse = TRUE,\n comment = \"#>\"\n)\n```\n\nPackage of functions of the Laboratory of Innovation in Health (InnovaLab) of the Institute of Tropical Medicine \"Alexander von Humboldt\", Universidad Peruana Cayetano Heredia.\n\n### 🟣 1. Requeriments\n\n```{r ,eval=FALSE, warning=FALSE,message=FALSE}\nlibrary(innovar)\nlibrary(sf)\nlibrary(rgee)\nlibrary(tidyverse)\nlibrary(viridis)\nlibrary(cowplot)\nlibrary(mapview)\nee_Initialize() # edit\n```\n\n```\n── rgee 1.1.2.9000 ──────────────────────── earthengine-api 0.1.297 ──\n ✓ user: not_defined\n ✓ Initializing Google Earth Engine: DONE!\n ✓ Earth Engine account: users/antonybarja8\n──────────────────────────────────────────────────────────────────────\n```\n\n### 🟣 2. Vector layer reading of Peru\n\n```{r echo=FALSE, message=FALSE,warning=FALSE}\nlibrary(innovar)\nlibrary(mapview)\nlibrary(tidyverse)\n```\n\n```{r ,message=F,warning=F,eval = TRUE}\ndata(\"Peru\")\nperu.region <- Peru %>%\n dplyr::group_by(dep) %>%\n summarise()\n\nglimpse(peru.region)\n```\n\n### 🟣 3. Exploration Peru map\n\n```{r,out.width=\"100%\"}\nmapview(peru.region,legend = NULL)\n```\n\n### 🟣 4. Transformation of sf object to a feature collection\n\n```{r ,message=F,warning=F,eval=FALSE}\nperu.ee <- peru.region %>%\n pol_as_ee(\n id = c(\"dep\"),\n simplify = 100\n )\n```\n\n### 🟣 5. Processing data with innovar\n\n```{r ,message=F,warning=F,eval=FALSE}\nperu.ndvi <- peru.ee %>%\n get_vegetation(\n from = \"2018-01-01\",\n to = \"2019-12-31\",\n band = \"NDVI\",\n fun = \"mean\")\n\nperu.def <- peru.ee %>%\n get_def(\n from = \"2018-02-01\",\n to = \"2019-12-31\"\n )\n\nperu.pr <- peru.ee %>%\n get_climate(\n from = \"2018-02-01\",\n to = \"2019-12-31\",\n by = \"month\",\n band = \"pr\",\n fun = \"mean\"\n )\n```\n\n```\n[1] \"Extracting information [0/25]...\"\n[1] \"Extracting information [0/25]...\"\n[1] \"Extracting information [0/25]...\"\n```\n\n### 🟣 6. Processing data for mapping\n\n```{r,echo=FALSE,message=FALSE,warning=FALSE}\nperu.def <- read_rds(\"data\")[[1]]\nperu.ndvi <- read_rds(\"data\")[[2]]\nperu.pr <- read_rds(\"data\")[[3]]\n```\n\n```{r ,message=F,warning=F}\nperu.ndvi.sf <- inner_join(peru.region,peru.ndvi,\"dep\")\nperu.pr.sf <- inner_join(peru.region,peru.pr ,\"dep\")\nperu.def.sf <- inner_join(peru.region,peru.def,\"dep\")\n```\n\n### 🟣 7. Exploration data\n\n```{r}\n# peru.ndvi.sf\nglimpse(peru.ndvi.sf)\n# peru.pr.sf\nglimpse(peru.pr.sf)\n# peru.def.sf\nglimpse(peru.def.sf)\n```\n\n### 🟣 8. Exploration peru.ndvi.sf map\n\n```{r, out.width=\"100%\"}\nmapview(\n peru.ndvi.sf,\n zcol=\"NDVI2018-01\",\n layer.name = \"NDVI-2018-01\"\n )\n```\n\n### 🟣 9. Exploration peru.pr.sf map\n\n```{r, out.width=\"100%\"}\nmapview(\n peru.pr.sf,\n zcol=\"pr2018-02\",\n layer.name = \"pr-2018-02\"\n )\n```\n\n### 🟣 10. Exploration peru.def.sf map\n\n```{r, out.width=\"100%\"}\nmapview(\n peru.def.sf,\n zcol=\"Adef_2018\",\n layer.name = \"def-2018\"\n )\n```\n\n### 🟣 11. Mapping climate variables with the innovar theme\n\n```{r,message=F,warning=F,fig.height=10,fig.width=15}\npr.plot <- peru.pr.sf %>%\n ggplot() +\n geom_sf(aes(fill=`pr2019-01`)) +\n scale_fill_innova(discrete = FALSE,name=\"Precipitation\") +\n theme_bw()\n```\n\n```{r,message=F,warning=F,fig.height=10,fig.width=15}\nndvi.plot<- peru.ndvi.sf %>%\n ggplot() +\n geom_sf(aes(fill=`NDVI2019-01`)) +\n scale_fill_innova(discrete = FALSE,name=\"NDVI\") +\n theme_bw()\n```\n\n```{r, message=F,warning=F,fig.height=10,fig.width=15}\ndef.plot <- peru.def.sf %>%\n ggplot() +\n geom_sf(aes(fill=Adef_2019)) +\n scale_fill_innova(discrete = FALSE,name=\"Deforestation\") +\n theme_bw()\n```\n\n### 🟣 12. Final plot\n\n```{r,fig.width=7,fig.height=5}\npr.plot\n```\n\n```{r,fig.width=7,fig.height=5}\nndvi.plot\n```\n\n```{r,fig.width=7,fig.height=5}\ndef.plot\n```\n" }, { "alpha_fraction": 0.4015544056892395, "alphanum_fraction": 0.4015544056892395, "avg_line_length": 19.3157901763916, "blob_id": "d6d1832a9ae94aa8194da387f122b588ff36087b", "content_id": "c7be9f06bc40bc039cee91b40439a2403f94ecad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 772, "license_type": "permissive", "max_line_length": 32, "num_lines": 38, "path": "/R/class.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "# Class definitions\n\nsetClass(\"indexFit\",\n slots = c(\n Fits = \"list\",\n Explained = \"tbl_df\",\n Loadings = \"tbl_df\"\n ),\n prototype = list(\n Fits = list(),\n Explained = NA_real_,\n Loadings = NA_real_\n )\n)\n\nsetClass(\"indexFits\",\n slots = c(\n Specific = \"list\",\n Fits = \"list\",\n Explained = \"tbl_df\",\n Loadings = \"tbl_df\"\n ),\n prototype = list(\n Specific = list(),\n Fits = list(),\n Explained = NA_real_,\n Loadings = NA_real_\n )\n)\n\n\nsetClass(\"indexcalc\",\n slots = c(\n Options = \"list\",\n Data = \"tbl_df\",\n Fit = \"indexFits\"\n )\n)\n" }, { "alpha_fraction": 0.5579487085342407, "alphanum_fraction": 0.5600000023841858, "avg_line_length": 32.620689392089844, "blob_id": "11f6a7bb1a796384b8a086e188fc5896af14ff4d", "content_id": "2c457e773fe7752c877ae137f78fbd441eb9b4e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 976, "license_type": "permissive", "max_line_length": 97, "num_lines": 29, "path": "/R/allpautils_names.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Remove accents and hyphens in character vector\n#'\n#' @description This function will allow you to remove accents, hyphens and\n#' any strange characters that come from the UTF-8 encoding, with the intention\n#' of working with standardized names or texts.Review:\n#' https://cran.r-project.org/web/packages/MazamaSpatialUtils/vignettes/MazamaSpatialUtils.html\n#'\n#' @param string Characters vector\n#'\n#' @examples\n#' library(innovar)\n#' string <- c(\"Perú\", \"Estados Unidos\")\n#' allpautils_names(string)\n#'\n#' @export allpautils_names\nallpautils_names <- function(string) {\n\n string <- gsub(pattern = \"_\",\n replacement = \" \",\n x = gsub(pattern = \"-\",\n replacement = \" \",\n x = iconv(string,\n from=\"UTF-8\",\n to=\"ASCII//TRANSLIT\"),\n fixed=TRUE),\n fixed=TRUE)\n\n return(string)\n}\n" }, { "alpha_fraction": 0.7457017302513123, "alphanum_fraction": 0.7552978992462158, "avg_line_length": 64.81578826904297, "blob_id": "bae545e95bb5c578c19833e202ba176693d9ee34", "content_id": "224154fb554568dda56a72f16d1f0eed0f541243", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2512, "license_type": "permissive", "max_line_length": 722, "num_lines": 38, "path": "/README.md", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "# **Health Innovation Lab** <img src=\"man/figures/logo.png\" align=\"right\" width=\"35%\">\n\n<!-- badges: start -->\n\n[![Lifecycle:experimental](https://img.shields.io/badge/lifecycle-experimental-orange.svg)](https://www.tidyverse.org/lifecycle/#experimental)\n![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)\n[![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active)\n[![R-CMD-check](https://github.com/healthinnovation/innovar/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/healthinnovation/innovar/actions/workflows/R-CMD-check.yaml)\n<!-- badges: end -->\n\n## 🔵 **Who we are**\n\nWe are a multidisciplinary team focused on **designing and evaluating innovative and accessible technologies to improve people's health**. We promote the **use of technologies and open data** in order to lower implementation barriers, reproducibility and increase the impact of innovation proposals.\n\nOur main lines of research include the development of diagnostic and detection systems, development of tools for environmental parameter detection, data science and big data applied to tropical diseases, and geospatial analysis of mobility and migration patterns.\n\n## 🔵 **Package installation**\n\nYou can install the first version of innovar from\n[GitHub](https://github.com/):\n\nThe following code just copy and paste into the R console:\n\n```\nif(!require(\"remotes\")) install.packages(\"remotes\")\nremotes::install_github(\"healthinnovation/[email protected]\")\n```\n\n```\nlibrary(innovar)\n```\n\n# 🔵 **Our social networks**\n\n<p align=\"left\">\n <a href = \"https://www.facebook.com/imt.innovalab\">\n <img src=\"https://img.shields.io/badge/Facebook-1877F2?style=for-the-badge&logo=facebook&logoColor=white\"></a> <a href=\"https://twitter.com/innovalab_imt\"><img src=\"https://img.shields.io/badge/Twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white\"></a> <a href=\"https://www.instagram.com/innovalab_imt/\"><img src=\"https://img.shields.io/badge/Instagram-E4405F?style=for-the-badge&logo=instagram&logoColor=white\"></a> <a href=\"https://www.innovalab.info/\"><img src=\"https://img.shields.io/badge/Innovalab_web-000?style=for-the-badge&logo=wix&logoColor=white\"></a> <a href=\"https://linktr.ee/innov_lab\"><img src=\"https://img.shields.io/badge/linktree-39E09B?style=for-the-badge&logo=linktree&logoColor=whit\"></a>\n</p>\n" }, { "alpha_fraction": 0.4730694890022278, "alphanum_fraction": 0.48617762327194214, "avg_line_length": 22.99395179748535, "blob_id": "ec7731c2d8353dfea46aa1ed892afe1022d60682", "content_id": "6f232a40633cda1e4eb0e2c4ab4f27bef4fa0730", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 11901, "license_type": "permissive", "max_line_length": 69, "num_lines": 496, "path": "/R/method.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "ee_count <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$count(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$count(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\n\nee_kurstosis <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$kurstosis(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$kurtosis(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\n\nee_max <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$max(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble()%>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$max(),\n scale = scale,\n sf = T\n ) %>% st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\n\nee_mean <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$mean(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble()%>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$mean(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\nee_median <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$median(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$median(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\n\nee_min <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$min(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$min(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\n\nee_mode <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$mode(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$mode(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\n\nee_percentile <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$percentile(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$percentile(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\nee_std <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$stdDev(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$stdDev(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\nee_sum <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$sum(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$sum(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\nee_variance <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$variance(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$variance(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n\n\nee_first <- function(x, y, by = 1000,scale = 1000) {\n y_len <- y$size()$getInfo()\n\n for (i in seq(1, y_len, by)) {\n index <- i - 1\n print(sprintf(\"Extracting information [%s/%s]...\", index, y_len))\n\n ee_value_layer <- ee$FeatureCollection(y) %>%\n ee$FeatureCollection$toList(by, index) %>%\n ee$FeatureCollection()\n\n if (i == 1) {\n dataset <- ee_extract(\n x = x,\n fun = ee$Reducer$first(),\n y = ee_value_layer,\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n } else {\n db_local <- ee_extract(\n x = x,\n y = ee_value_layer,\n fun = ee$Reducer$first(),\n scale = scale,\n sf = T\n ) %>%\n st_set_geometry(NULL) %>%\n as_tibble() %>%\n mutate_if(is.numeric, funs(ifelse(is.na(.), 0, .)))\n\n dataset <- rbind(dataset, db_local)\n }\n }\n return(dataset)\n}\n" }, { "alpha_fraction": 0.7081632614135742, "alphanum_fraction": 0.719897985458374, "avg_line_length": 29.153846740722656, "blob_id": "42c82f645007c74a3e0317ec8a06f36b0b098699", "content_id": "314ec3462f67a29188d80a6de7e9476be4d810be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 1960, "license_type": "permissive", "max_line_length": 204, "num_lines": 65, "path": "/inst/rmarkdown/templates/innovar-rmd-flatly/skeleton/skeleton.Rmd", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "---\ntitle: \"Innovar Rmd Flatly\"\nauthor:\n - name: Innovar 1\n email: [email protected]\n affiliation: Innovar, UPCH\n - name: Innovar 2\n email: [email protected]\n affiliation: Innovar, UPCH\ndate: \"`r Sys.Date()`\"\noutput: \n html_document: \n toc: yes\n toc_depth: 3\n toc_float: yes\n number_sections: yes\n code_folding: show\n code_download: yes\n theme: \n bootswatch: flatly\n highlight: kate\n highlight_downlit: true\n---\n\n```{r setup, include=FALSE}\nknitr::opts_chunk$set(\n echo = TRUE,\n dpi = 300,\n fig.align = 'center'\n # message = FALSE,\n # warning = FALSE,\n # error = TRUE\n)\n\n# The following code allows the rmd document chunks to be evaluated\n# and executed by setting the working directory to the folder where\n# the document is located. This can be useful when you have `Chunk\n# Output in Console` set or you are working with a different\n# configuration/IDE than usual (rstudio for example). If you wish to\n# handle this setting differently, you can comment out the line below\n# or simply delete it.\nsetwd(dirname(rstudioapi::getActiveDocumentContext()$path))\n```\n\n> **This template is based on [Rstudio Rmarkdown Template](https://github.com/rstudio/rstudio/blob/d152bb422d6d2af9edfaee66c42b4e9630caaaf7/src/cpp/session/resources/templates/document.Rmd).**\n\n# R Markdown\n\nThis is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see <http://rmarkdown.rstudio.com>.\n\nWhen you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this:\n\n```{r cars}\nsummary(cars)\n```\n\n# Including Plots\n\nYou can also embed plots, for example:\n\n```{r pressure, echo=FALSE}\nplot(pressure)\n```\n\nNote that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot.\n" }, { "alpha_fraction": 0.5039485096931458, "alphanum_fraction": 0.5175489783287048, "avg_line_length": 18.877906799316406, "blob_id": "fab806962282e74bb4cf607de93e6a342aba529e", "content_id": "395955aceadeee6e0bb45e69b90837b8aef1343a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 6838, "license_type": "permissive", "max_line_length": 138, "num_lines": 344, "path": "/R/get_pop.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Extract population data of WorldPop\n#'\n#' A function that extract a time series of the number of population by \\bold{year} (2000-01-01T00:00:00Z - 2021-01-01T00:00:00).\n#'\n#' @param to,from it's a string object,starting and final date.\n#' @param region it's a feature collection.\n#' @param fun function for extract statistic zonal (\\bold{count, kurtosis, max, mean, median, min, mode, percentile, std, sum, variance}).\n#' @param scale A nominal scale in meters of the projection to work in.\n#' @return a tibble with the new variables.\n#'\n#' @importFrom sf st_transform st_simplify\n#' @importFrom rgee sf_as_ee\n#' @importFrom dplyr select filter contains\n#' @importFrom purrr is_empty\n#'\n#' @examples\n#' \\dontrun{\n#'\n#' library(tidyverse)\n#' library(rgee)\n#' library(innovar)\n#' library(sf)\n#' ee_Initialize()\n#'\n#' # 1. Reading a sf object\n#' data(\"Peru\")\n#' region <- Peru\n#' region_ee <- pol_as_ee(region , id = 'distr' , simplify = 1000)\n#' # 2. Extracting climate information\n#' data <- region_ee %>% get_pop(\n#' from = \"2001-01-01\", to = \"2003-01-01\",fun = \"max\",scale = 100)\n#' }\n#' @export\n\nget_pop <- function(from, to, region, fun = \"count\", scale = 100) {\n\n # Conditions about the times\n start_year <- substr(from, 1, 4) %>% as.numeric()\n end_year <- substr(to, 1, 4) %>% as.numeric()\n\n if(start_year == end_year){\n year <- unique(\n c(start_year:end_year)\n ) %>%\n list()\n\n year_list <- ee$List(year)\n } else {\n year <- unique(\n c(start_year:end_year)\n )\n year_list <- ee$List(year)\n }\n\n # Message of error\n if (to < 2000 | from > 2021) {\n print(sprintf(\"No exist data of worldpop\"))\n }\n\n # The base image collection\n list_img <- year_list$\n map(\n ee_utils_pyfunc(\n function(x) {\n ee$ImageCollection(\"WorldPop/GP/100m/pop\")$\n select(c('population'))$\n filter(ee$Filter$calendarRange(x, x, \"year\"))$\n mosaic()$\n rename('pop')\n }\n )\n )\n\n img_by_year <- ee$ImageCollection$\n fromImages(list_img)$\n toBands()\n\n # Conditions\n\n if (fun == \"count\") {\n img_count <- ee_count(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_count), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_count)[id_names] <- sprintf('pop%s',names_id)\n return(img_count)\n\n } else if (fun == \"kurtosis\") {\n img_kurtosis <- ee_kurstosis(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_kurtosis), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_kurtosis)[id_names] <- sprintf('pop%s',names_id)\n return(img_kurtosis)\n\n } else if (fun == \"max\") {\n img_max <- ee_max(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_max), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_max)[id_names] <- sprintf('pop%s',names_id)\n return(img_max)\n\n } else if (fun == \"mean\") {\n img_mean <- ee_mean(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_mean), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_mean)[id_names] <- sprintf('pop%s',names_id)\n return(img_mean)\n\n } else if (fun == \"median\") {\n img_median <- ee_median(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_median), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_median)[id_names] <- sprintf('pop%s',names_id)\n return(img_median)\n\n } else if (fun == \"min\") {\n img_min <- ee_min(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_min), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_min)[id_names] <- sprintf('pop%s',names_id)\n return(img_min)\n\n } else if (fun == \"mode\") {\n img_mode <- ee_mode(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_mode), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_mode)[id_names] <- sprintf('pop%s',names_id)\n return(img_mode)\n\n } else if (fun == \"percentile\") {\n img_percentile <- ee_percentile(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_percentile), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_percentile)[id_names] <- sprintf('pop%s',names_id)\n return(img_percentile)\n\n } else if (fun == \"std\") {\n img_std <- ee_std(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_std), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_std)[id_names] <- sprintf('pop%s',names_id)\n return(img_std)\n\n } else if (fun == \"sum\") {\n img_sum <- ee_sum(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_sum), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_sum)[id_names] <- sprintf('pop%s',names_id)\n return(img_sum)\n\n } else if (fun == \"variance\") {\n img_variance <- ee_variance(\n img_by_year,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_variance), suffix = 'pop')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(img_variance)[id_names] <- sprintf('pop%s',names_id)\n return(img_variance)\n }\n}\n" }, { "alpha_fraction": 0.49460986256599426, "alphanum_fraction": 0.5030800700187683, "avg_line_length": 20.173913955688477, "blob_id": "46fd1b5d859c9eb1317ac7239fe7b8ca01893a71", "content_id": "1625475c584e4d883adb98cb479a21dcd3ea7c38", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 19483, "license_type": "permissive", "max_line_length": 138, "num_lines": 920, "path": "/R/get_climate.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Extract climate data of TerraClimate\n#'\n#' A function that extract a time series of climate variables (1958-01-01T00:00:00Z - 2021-12-01T00:00:00).\n#'\n#' @param to,from it's a string object,starting and final date.\n#' @param by two types of increment of the sequence by \\bold{month} and \\bold{year}.\n#' @param band name of band.\n#' @param region is a feature or feature collection.\n#' @param fun function for extract statistic zonal (count, kurtosis, max, mean, median, min, mode, percentile, std, sum, variance, first).\n#' @param scale A nominal scale in meters of the projection to work in.\n#'\n#' @details Name of some bands.\n#' \\itemize{\n#' \\item \\bold{aet (mm):} Actual evapotranspiration, derived using a one-dimensional soil water balance model.\n#' \\item \\bold{def (mm):} Climate water deficit, derived using a one-dimensional soil water balance model.\n#' \\item \\bold{pdsi :} Palmer Drought Severity Index.\n#' \\item \\bold{pet(mm):} Reference evapotranspiration (ASCE Penman-Montieth).\n#' \\item \\bold{pr (mm):} Precipitation accumulation.\n#' \\item \\bold{ro (mm):} Runoff, derived using a one-dimensional soil water balance model.\n#' \\item \\bold{soil (mm):} Soil moisture, derived using a one-dimensional soil water balance model.\n#' \\item \\bold{srad (W/m²):} Downward surface shortwave radiation.\n#' \\item \\bold{swe (mm):} Snow water equivalent, derived using a one-dimensional soil water balance model.\n#' \\item \\bold{tmmn (°C):} Minimum temperature.\n#' \\item \\bold{tmmx (°C):} Maximum temperature.\n#' \\item \\bold{vap (kPa):} Vapor pressure\n#' \\item \\bold{vpd (kPa):} Vapor pressure deficit.\n#' \\item \\bold{vs (m/s):} Wind-speed at 10m.\n#' }\n#'\n#' @return a tibble object with the new variables.\n#' @importFrom sf st_transform st_simplify\n#' @importFrom rgee sf_as_ee\n#' @importFrom dplyr select filter contains\n#' @importFrom purrr is_empty\n#'\n#' @examples\n#' \\dontrun{\n#'\n#' library(tidyverse)\n#' library(rgee)\n#' library(innovar)\n#' library(sf)\n#' ee_Initialize()\n#'\n#' # 1. Reading a sf object\n#' data(\"Peru\")\n#' region <- Peru\n#' region_ee <- pol_as_ee(region , id = 'distr' , simplify = 1000)\n#' # 2. Extracting climate information\n#' data <- region_ee %>% get_climate(\n#' from = \"2001-02-01\", to = \"2002-12-31\",\n#' by = \"month\", band = \"tmmx\", fun = \"max\")\n#' }\n#' @export\n\nget_climate <- function(from, to, by, band, region, fun = \"count\",scale = 1000) {\n\n # Conditions about the times\n\n start_year <- substr(from, 1, 4) %>% as.numeric()\n end_year <- substr(to, 1, 4) %>% as.numeric()\n\n if(start_year == end_year){\n year <- unique(\n c(start_year:end_year)\n ) %>%\n list()\n\n year_list <- ee$List(year)\n } else {\n year <- unique(\n c(start_year:end_year)\n )\n year_list <- ee$List(year)\n }\n\n # Factores by each bands\n\n multiply_factor <- c(\n aet = 0.1, def = 0.1, pdsi = 0.01, pet = 0.1,\n pr = 1, ro = 1, soil = 0.1, srad = 0.1, swe = 1,\n tmmn = 0.1, tmmx = 0.1, vap = 0.001, vpd = 0.01,\n vs = 0.01\n )\n\n # Message of error\n\n if (end_year > 2019 | start_year < 1958) {\n print(sprintf(\"No exist data\"))\n }\n\n # The main functions\n if (by == \"month\" & fun == \"count\") {\n\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_count <- ee_count(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_count),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_count)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_count)\n\n } else if (by == \"month\" & fun == \"kurtosis\") {\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_kurstosis <- ee_kurstosis(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_kurstosis),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_kurtosis)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_kurtosis)\n\n } else if (by == \"month\" & fun == \"max\") {\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_max <- ee_max(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_max),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_max)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_max)\n\n } else if (by == \"month\" & fun == \"mean\") {\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_mean <- ee_mean(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_mean),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_mean)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_mean)\n\n } else if (by == \"month\" & fun == \"median\") {\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_median <- ee_median(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_median),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_median)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_median)\n\n } else if (by == \"month\" & fun == \"min\") {\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_min <- ee_min(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_min),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_min)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_min)\n\n } else if (by == \"month\" & fun == \"mode\") {\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_mode <- ee_mode(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_mode),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_mode)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_mode)\n\n } else if (by == \"month\" & fun == \"percentile\") {\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_percentile <- ee_percentile(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_percentile),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_percentile)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_percentile)\n\n } else if (by == \"month\" & fun == \"std\") {\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_std <- ee_std(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_std),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_std)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_std)\n\n } else if (by == \"month\" & fun == \"sum\") {\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_sum <- ee_sum(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_sum),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_sum)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_sum)\n\n } else if (by == \"month\" & fun == \"variance\") {\n img_base <- ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filterDate(from, to)$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_variance <- ee_variance(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_variance),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 month'\n ),\n 1,7\n )\n\n names(img_variance)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_variance)\n }\n\n if (by == \"year\" & fun == \"count\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_count <- ee_count(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_count),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year',\n ),\n 1,4\n )\n\n names(img_count)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_count)\n\n\n } else if (by == \"year\" & fun == \"kurtosis\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_kurtosis <- ee_kurstosis(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_kurtosis),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_kurtosis)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_kurtosis)\n\n } else if (by == \"year\" & fun == \"max\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_max <- ee_max(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_max),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_max)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_max)\n\n } else if (by == \"year\" & fun == \"mean\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_mean <- ee_mean(\n img_base,\n region,\n scale = scale)\n\n id_names <- which(\n endsWith(\n names(img_mean),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_mean)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_mean)\n\n } else if (by == \"year\" & fun == \"median\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_median <- ee_median(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_median),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_median)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_median)\n\n } else if (by == \"year\" & fun == \"min\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_min <- ee_min(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_min),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_min)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_min)\n\n } else if (by == \"year\" & fun == \"mode\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_mode <- ee_mode(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_mode),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_mode)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_mode)\n\n } else if (by == \"year\" & fun == \"percentile\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_percentile <- ee_percentile(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_percentile),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_percentile)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_percentile)\n\n } else if (by == \"year\" & fun == \"std\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_std <- ee_std(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_std),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_std)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_std)\n\n } else if (by == \"year\" & fun == \"sum\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_sum <- ee_sum(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_sum),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_sum)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_sum)\n\n } else if (by == \"year\" & fun == \"variance\") {\n list_img <- year_list$\n map(\n ee_utils_pyfunc(function(x) {\n ee$ImageCollection(\"IDAHO_EPSCOR/TERRACLIMATE\")$\n select(c(band))$\n filter(\n ee$Filter$calendarRange(x, x, \"year\")\n )$\n sum() }\n )\n )\n\n img_base <- ee$ImageCollection$\n fromImages(\n list_img\n )$\n toBands()$\n multiply(multiply_factor[[band]])\n\n img_variance <- ee_variance(\n img_base,\n region,\n scale = scale\n )\n\n id_names <- which(\n endsWith(\n names(img_variance),\n suffix = band)\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n by = '1 year'\n ),\n 1,4\n )\n\n names(img_variance)[id_names] <- sprintf('%s%s',band,names_id)\n return(img_variance)\n }\n}\n" }, { "alpha_fraction": 0.5802537202835083, "alphanum_fraction": 0.6150027513504028, "avg_line_length": 21.382715225219727, "blob_id": "b17e955719bc3d8df8dea835e718aa40c72a983d", "content_id": "edfab9b7f4849e512182456a361427e6d1c01434", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1813, "license_type": "permissive", "max_line_length": 79, "num_lines": 81, "path": "/R/get_def.R", "repo_name": "healthinnovation/innovar", "src_encoding": "UTF-8", "text": "#' Extract deforestation area data from Hansen\n#'\n#' A function that extract deforestation area data of the year \\bold{2000-2021}\n#'\n#' @param to,from it's a string object,starting and final date.\n#' @param region region and object sf.\n#' @return a tibble object with the new variable in km2\n#' @param scale A nominal scale in meters of the projection to work in.\n#'\n#' @importFrom sf st_transform st_simplify\n#' @importFrom rgee sf_as_ee\n#' @importFrom dplyr select filter contains\n#'\n#' @examples\n#' \\dontrun{\n#'\n#' library(tidyverse)\n#' library(rgee)\n#' library(innovar)\n#' library(sf)\n#' ee_Initialize()\n#'\n#' # 1. Reading a sf object\n#' data(\"Peru\")\n#' region <- Peru\n#' region_ee <- pol_as_ee(region, id = 'distr' ,simplify = 1000)\n#'\n#' # 2. Extracting deforestation area data\n#' data <- region_ee %>%\n#' get_def(\n#' from = '2001-01-01',\n#' to = '2005-12-31',\n#' region = region_ee,\n#' scale = 30\n#' )\n#' }\n#' @export\n\nget_def <- function(from, to, region, scale = 100) {\n\n # Conditions about the times\n start_year <- substr(from, 1, 4) %>% as.numeric()\n end_year <- substr(to, 1, 4) %>% as.numeric()\n\n # loss condition\n rango <- c(0:21)\n names(rango) <- 2000:2021\n anio <- rango[c(as.character(start_year:end_year))]\n\n # The base image collection\n img_base <- ee$Image(\"UMD/hansen/global_forest_change_2021_v1_9\")$\n select(c('lossyear'))$\n eq(anio)\n\n def_area <- img_base$multiply(ee$Image$pixelArea())$\n divide(1e6)\n\n data <- ee_sum(\n x = def_area,\n y = region,\n scale = 30\n )\n\n id_names <- which(\n startsWith(\n names(data),\n prefix = 'const')\n )\n\n names_id <- substr(\n seq(\n as.Date(from),\n as.Date(to),\n length.out = length(id_names)\n ),\n 1,4\n )\n\n names(data)[id_names] <- sprintf('%s%s','Adef_',names_id)\n return(data)\n}\n" } ]
27
jakirkham/anaconda-project
https://github.com/jakirkham/anaconda-project
079906669dda8ed968a308c9609613e074f28ab6
48845aec41b6dc441593fb2d66b2ef843e995a13
b1f07ac25933b4e16473d0565db7237d20330727
refs/heads/master
2021-01-19T08:45:33.660790
2017-04-05T19:13:53
2017-04-05T19:13:53
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6317819952964783, "alphanum_fraction": 0.6364780068397522, "avg_line_length": 40.262977600097656, "blob_id": "5321b9b5c16a38ca5c14a2120f973e4567b30d85", "content_id": "341242d6a2fa30baa613778c5f1575596d254faf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11926, "license_type": "no_license", "max_line_length": 111, "num_lines": 289, "path": "/anaconda_project/internal/test/test_default_conda_manager.py", "repo_name": "jakirkham/anaconda-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------------\n# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n# ----------------------------------------------------------------------------\nfrom __future__ import absolute_import, print_function\n\nimport codecs\nimport json\nimport os\nimport platform\nimport pytest\nimport time\n\nfrom anaconda_project.env_spec import EnvSpec\nfrom anaconda_project.conda_manager import CondaManagerError\nfrom anaconda_project.version import version\n\nfrom anaconda_project.internal.default_conda_manager import DefaultCondaManager\nimport anaconda_project.internal.pip_api as pip_api\n\nfrom anaconda_project.internal.test.tmpfile_utils import with_directory_contents\nfrom anaconda_project.internal.test.test_conda_api import monkeypatch_conda_not_to_use_links\n\nif platform.system() == 'Windows':\n PYTHON_BINARY = \"python.exe\"\n IPYTHON_BINARY = \"Scripts\\ipython.exe\"\n FLAKE8_BINARY = \"Scripts\\\\flake8.exe\"\nelse:\n PYTHON_BINARY = \"bin/python\"\n IPYTHON_BINARY = \"bin/ipython\"\n FLAKE8_BINARY = \"bin/flake8\"\n\ntest_spec = EnvSpec(name='myenv', conda_packages=['ipython'], pip_packages=['flake8'], channels=[])\n\n\ndef test_conda_create_and_install_and_remove(monkeypatch):\n monkeypatch_conda_not_to_use_links(monkeypatch)\n\n spec = test_spec\n assert spec.conda_packages == ('ipython', )\n assert spec.pip_packages == ('flake8', )\n\n spec_with_phony_pip_package = EnvSpec(name='myenv',\n conda_packages=['ipython'],\n pip_packages=['flake8', 'nope_not_a_thing'],\n channels=[])\n assert spec_with_phony_pip_package.conda_packages == ('ipython', )\n assert spec_with_phony_pip_package.pip_packages == ('flake8', 'nope_not_a_thing')\n assert spec_with_phony_pip_package.pip_package_names_set == set(('flake8', 'nope_not_a_thing'))\n\n # package url is supposed to be on a nonexistent port, if it\n # causes a problem we need to mock\n spec_with_bad_url_pip_package = EnvSpec(name='myenv',\n conda_packages=['ipython'],\n pip_packages=['flake8', 'https://127.0.0.1:24729/nope#egg=phony'],\n channels=[])\n assert spec_with_bad_url_pip_package.conda_packages == ('ipython', )\n assert spec_with_bad_url_pip_package.pip_packages == ('flake8', 'https://127.0.0.1:24729/nope#egg=phony')\n assert spec_with_bad_url_pip_package.pip_package_names_set == set(('flake8', 'phony'))\n\n def do_test(dirname):\n envdir = os.path.join(dirname, spec.name)\n\n manager = DefaultCondaManager()\n\n assert not os.path.isdir(envdir)\n assert not os.path.exists(os.path.join(envdir, IPYTHON_BINARY))\n assert not os.path.exists(os.path.join(envdir, FLAKE8_BINARY))\n assert not manager._timestamp_file_up_to_date(envdir, spec)\n\n deviations = manager.find_environment_deviations(envdir, spec)\n\n assert deviations.missing_packages == ('ipython', )\n assert deviations.missing_pip_packages == ('flake8', )\n\n manager.fix_environment_deviations(envdir, spec, deviations)\n\n assert os.path.isdir(envdir)\n assert os.path.isdir(os.path.join(envdir, \"conda-meta\"))\n assert os.path.exists(os.path.join(envdir, IPYTHON_BINARY))\n assert os.path.exists(os.path.join(envdir, FLAKE8_BINARY))\n\n assert manager._timestamp_file_up_to_date(envdir, spec)\n assert not manager._timestamp_file_up_to_date(envdir, spec_with_phony_pip_package)\n\n # test bad pip package throws error\n deviations = manager.find_environment_deviations(envdir, spec_with_phony_pip_package)\n\n assert deviations.missing_packages == ()\n assert deviations.missing_pip_packages == ('nope_not_a_thing', )\n\n with pytest.raises(CondaManagerError) as excinfo:\n manager.fix_environment_deviations(envdir, spec_with_phony_pip_package, deviations)\n assert 'Failed to install missing pip packages' in str(excinfo.value)\n assert not manager._timestamp_file_up_to_date(envdir, spec_with_phony_pip_package)\n\n # test bad url package throws error\n deviations = manager.find_environment_deviations(envdir, spec_with_bad_url_pip_package)\n\n assert deviations.missing_packages == ()\n assert deviations.missing_pip_packages == ('phony', )\n\n with pytest.raises(CondaManagerError) as excinfo:\n manager.fix_environment_deviations(envdir, spec_with_bad_url_pip_package, deviations)\n assert 'Failed to install missing pip packages' in str(excinfo.value)\n assert not manager._timestamp_file_up_to_date(envdir, spec_with_bad_url_pip_package)\n\n # test that we can remove a package\n assert manager._timestamp_file_up_to_date(envdir, spec)\n manager.remove_packages(prefix=envdir, packages=['ipython'])\n assert not os.path.exists(os.path.join(envdir, IPYTHON_BINARY))\n assert not manager._timestamp_file_up_to_date(envdir, spec)\n\n # test for error removing\n with pytest.raises(CondaManagerError) as excinfo:\n manager.remove_packages(prefix=envdir, packages=['ipython'])\n # different versions of conda word this differently\n assert 'no packages found to remove' in str(excinfo.value) or 'Package not found' in str(excinfo.value)\n assert not manager._timestamp_file_up_to_date(envdir, spec)\n\n # test failure to exec pip\n def mock_call_pip(*args, **kwargs):\n raise pip_api.PipError(\"pip fail\")\n\n monkeypatch.setattr('anaconda_project.internal.pip_api._call_pip', mock_call_pip)\n\n with pytest.raises(CondaManagerError) as excinfo:\n deviations = manager.find_environment_deviations(envdir, spec)\n assert 'pip failed while listing' in str(excinfo.value)\n\n with_directory_contents(dict(), do_test)\n\n\ndef test_timestamp_file_works(monkeypatch):\n monkeypatch_conda_not_to_use_links(monkeypatch)\n\n spec = test_spec\n\n def do_test(dirname):\n envdir = os.path.join(dirname, spec.name)\n\n manager = DefaultCondaManager()\n\n def print_timestamps(when):\n newest_in_prefix = 0\n for d in manager._timestamp_comparison_directories(envdir):\n try:\n t = os.path.getmtime(d)\n if t > newest_in_prefix:\n newest_in_prefix = t\n except Exception:\n pass\n timestamp_file = 0\n try:\n timestamp_file = os.path.getmtime(manager._timestamp_file(envdir, spec))\n except Exception:\n pass\n print(\"%s: timestamp file %d prefix %d\" % (when, timestamp_file, newest_in_prefix))\n\n print_timestamps(\"before env creation\")\n\n assert not os.path.isdir(envdir)\n assert not os.path.exists(os.path.join(envdir, IPYTHON_BINARY))\n assert not os.path.exists(os.path.join(envdir, FLAKE8_BINARY))\n assert not manager._timestamp_file_up_to_date(envdir, spec)\n\n deviations = manager.find_environment_deviations(envdir, spec)\n\n assert deviations.missing_packages == ('ipython', )\n assert deviations.missing_pip_packages == ('flake8', )\n assert not deviations.ok\n\n manager.fix_environment_deviations(envdir, spec, deviations)\n\n print_timestamps(\"after fixing deviations\")\n\n assert os.path.isdir(envdir)\n assert os.path.isdir(os.path.join(envdir, \"conda-meta\"))\n assert os.path.exists(os.path.join(envdir, IPYTHON_BINARY))\n assert os.path.exists(os.path.join(envdir, FLAKE8_BINARY))\n\n assert manager._timestamp_file_up_to_date(envdir, spec)\n\n called = []\n from anaconda_project.internal.pip_api import _call_pip as real_call_pip\n from anaconda_project.internal.conda_api import _call_conda as real_call_conda\n\n def traced_call_pip(*args, **kwargs):\n called.append((\"pip\", args, kwargs))\n return real_call_pip(*args, **kwargs)\n\n monkeypatch.setattr('anaconda_project.internal.pip_api._call_pip', traced_call_pip)\n\n def traced_call_conda(*args, **kwargs):\n called.append((\"conda\", args, kwargs))\n return real_call_conda(*args, **kwargs)\n\n monkeypatch.setattr('anaconda_project.internal.conda_api._call_conda', traced_call_conda)\n\n deviations = manager.find_environment_deviations(envdir, spec)\n\n assert [] == called\n\n assert deviations.missing_packages == ()\n assert deviations.missing_pip_packages == ()\n assert deviations.ok\n\n assert manager._timestamp_file_up_to_date(envdir, spec)\n\n # now modify conda-meta and check that we DO call the package managers\n time.sleep(1.1) # be sure we are in a new second\n conda_meta_dir = os.path.join(envdir, \"conda-meta\")\n print(\"conda-meta original timestamp: %d\" % os.path.getmtime(conda_meta_dir))\n inside_conda_meta = os.path.join(conda_meta_dir, \"thing.txt\")\n with codecs.open(inside_conda_meta, 'w', encoding='utf-8') as f:\n f.write(u\"This file should change the mtime on conda-meta\\n\")\n print(\"file inside conda-meta %d and conda-meta itself %d\" % (os.path.getmtime(inside_conda_meta),\n os.path.getmtime(conda_meta_dir)))\n os.remove(inside_conda_meta)\n\n print_timestamps(\"after touching conda-meta\")\n\n assert not manager._timestamp_file_up_to_date(envdir, spec)\n\n deviations = manager.find_environment_deviations(envdir, spec)\n\n assert len(called) == 2\n\n assert deviations.missing_packages == ()\n assert deviations.missing_pip_packages == ()\n # deviations should not be ok (due to timestamp)\n assert not deviations.ok\n\n assert not manager._timestamp_file_up_to_date(envdir, spec)\n\n # we want to be sure we update the timestamp file even though\n # there wasn't any actual work to do\n manager.fix_environment_deviations(envdir, spec, deviations)\n\n print_timestamps(\"after fixing deviations 2\")\n\n assert manager._timestamp_file_up_to_date(envdir, spec)\n\n with_directory_contents(dict(), do_test)\n\n\ndef test_timestamp_file_ignores_failed_write(monkeypatch):\n monkeypatch_conda_not_to_use_links(monkeypatch)\n\n spec = test_spec\n\n def do_test(dirname):\n from codecs import open as real_open\n\n envdir = os.path.join(dirname, spec.name)\n\n manager = DefaultCondaManager()\n\n counts = dict(calls=0)\n\n def mock_open(*args, **kwargs):\n counts['calls'] += 1\n if counts['calls'] == 1:\n raise IOError(\"did not open\")\n else:\n return real_open(*args, **kwargs)\n\n monkeypatch.setattr('codecs.open', mock_open)\n\n # this should NOT throw but also should not write the\n # timestamp file (we ignore errors)\n filename = manager._timestamp_file(envdir, spec)\n assert filename.startswith(envdir)\n assert not os.path.exists(filename)\n manager._write_timestamp_file(envdir, spec)\n assert not os.path.exists(filename)\n # the second time we really write it (this is to prove we\n # are looking at the right filename)\n manager._write_timestamp_file(envdir, spec)\n assert os.path.exists(filename)\n\n # check on the file contents\n with real_open(filename, 'r', encoding='utf-8') as f:\n content = json.loads(f.read())\n assert dict(anaconda_project_version=version) == content\n\n with_directory_contents(dict(), do_test)\n" }, { "alpha_fraction": 0.7184620499610901, "alphanum_fraction": 0.7229107022285461, "avg_line_length": 35.160919189453125, "blob_id": "a0d50367b87cf541593e1e6e637425a704d9d533", "content_id": "45185d112179fa15833501feb06f7314ae97ef2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3147, "license_type": "no_license", "max_line_length": 306, "num_lines": 87, "path": "/docs/source/getting-started.rst", "repo_name": "jakirkham/anaconda-project", "src_encoding": "UTF-8", "text": "\n===============\nGetting started\n===============\n\nThis getting started guide walks you through using Anaconda Project for the first time. \n\n**After completing this guide, you will be able to:**\n\n* Create a project containing a Bokeh app\n* Package and share the project\n* Run the project with a single command\n\nThis guide is for all platforms: Windows, macOS and Linux.\n\nIf you have not yet installed and started Anaconda Project, follow the :doc:`Install instructions <install>`.\n\n\nCreate a project containing a Bokeh app\n=======================================\n\nWe'll create a project directory called ``clustering_app``. At the command prompt, switch to a directory ``clustering_app`` and initialize the project::\n\n $ mkdir clustering_app\n $ cd clustering_app\n $ anaconda-project init\n Project configuration is in /User/Anaconda/My Anaconda Projects/clustering_app/anaconda-project.yml\n\nInside your ``clustering_app`` project directory, create and save a new file named ``main.py`` with the `Bokeh clustering example <https://raw.githubusercontent.com/bokeh/bokeh/master/examples/app/clustering/main.py>`_. (You may also wish to learn more about `Bokeh <http://bokeh.pydata.org/en/latest/>`_.)\n\nWe need to add the packages that the Bokeh clustering demo depends on: Bokeh, pandas, scikit-learn and NumPy. Open the ``anaconda-project.yml`` file and edit the packages section with:\n\n.. code-block:: yaml\n\n packages:\n - python=3.5\n - bokeh=0.12.4\n - numpy=1.12.0\n - scikit-learn=0.18.1\n\nTo tell ``anaconda-project`` about the Bokeh app, be sure you are in the directory \"clustering_app\" and type::\n\n anaconda-project add-command plot .\n\nWhen prompted, type ``B`` for Bokeh app. The command line session looks like::\n\n Is `plot` a (B)okeh app, (N)otebook, or (C)ommand line? B\n Added a command 'plot' to the project. Run it with `anaconda-project run plot`.\n\nNow, you can run your project with::\n\n anaconda-project run\n\nA browser window opens, displaying the clustering app.\n\nShare your project\n==================\n\nTo share this project with a colleague, first we archive it by typing::\n\n anaconda-project archive clustering.zip\n\nSend that file to the colleague.\n\nIf your colleague has Anaconda Project too, they can unzip the file and type ``anaconda-project run`` (for example), and Anaconda Project will download the data, install needed packages, and run the command.\n\nYou can also share projects by uploading them to Anaconda Cloud, using the following command::\n\n anaconda-project upload\n\nNOTE: You need a free Anaconda Cloud account to upload projects to Anaconda Cloud.\n\nRun your project\n================\n\nAnyone that downloads your project can now have it running locally with only one command, without having to worry about the setup::\n\n anaconda-project run\n\n\nNext steps\n==========\n\nThere's much more that Anaconda Project can do.\n\n * Learn more with the :doc:`Anaconda Project tutorial <tutorial>`, including how to download data with your project and how to configure your project with environment variables.\n\n * Read details on :doc:`the anaconda-project.yml format <reference>`.\n" }, { "alpha_fraction": 0.7661290168762207, "alphanum_fraction": 0.7677419185638428, "avg_line_length": 50.58333206176758, "blob_id": "e00d61147c9f34e9d5c5ab937441ccd3a831dbb0", "content_id": "4d2fc91e57061620d01a8ba42261c33e00044866", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 620, "license_type": "no_license", "max_line_length": 214, "num_lines": 12, "path": "/docs/source/concepts.rst", "repo_name": "jakirkham/anaconda-project", "src_encoding": "UTF-8", "text": "\n========\nConcepts\n========\n\nProject\n=======\n\nA project is a folder that contains a ``anaconda-project.yml`` configuration file together with scripts, notebooks, and other files. A project is usually compressed into a ``.tar.bz2`` file for sharing and storing.\n\nData scientists use projects to encapsulate data science projects and make them easily portable. The configuration file can include: commands, variables, services, downloads, packages, channels and env_specs.\n\nAnaconda Project automates setup steps, so that data scientists can just type ``anaconda-project run`` and have their project deployment Just Work.\n" }, { "alpha_fraction": 0.7090504765510559, "alphanum_fraction": 0.7182414531707764, "avg_line_length": 43.8320198059082, "blob_id": "d9a73ef3c66c1bdd908885a4d3098954856206f8", "content_id": "6797cd9ef26f3195665cce250fdaecb4400df03e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 17082, "license_type": "no_license", "max_line_length": 386, "num_lines": 381, "path": "/docs/source/tutorial.rst", "repo_name": "jakirkham/anaconda-project", "src_encoding": "UTF-8", "text": "\n========\nTutorial\n========\n\nIn this tutorial, you will create a project containing a Bokeh application, then package it up as a zip file and \"send\" it to an imaginary colleague. Your colleague will then be able to unpack it and run it with a single command.\n\n**In this tutorial you will learn how to:**\n\n* Create an empty project\n* Get some data to work with\n* Create a command to run\n* Add required packages\n* Configure your project with environment variables\n* Add custom variables\n* Add an encrypted custom variable\n* Create a Bokeh app\n* Clean and reproduce\n* Zip it up for a colleague\n* Run project\n\nThis tutorial is for all platforms: Windows, macOS and Linux. You do not need any prior knowledge of Bokeh or experience with Bokeh for this tutorial.\n\nIf you have not yet installed and started Anaconda Project, do so following the :doc:`Install instructions <install>`.\n\n\nCreate an empty project\n=======================\n\nWe'll create a project directory called ``iris``. At the command\nprompt, switch to a directory you'd like to contain the ``iris``\nproject. To create the ``iris`` project directory, type this::\n\n anaconda-project init --directory iris\n\nIt will ask you whether to create the ``iris`` directory. Type \"y\"\nto confirm. Your command line session will look something like\nthis::\n\n $ cd /home/alice/mystuff\n $ anaconda-project init --directory iris\n Create directory '/home/alice/mystuff/iris'? y\n Project configuration is in /home/alice/mystuff/iris/anaconda-project.yml\n\nOptional: You can use your editor now to look through the file ``iris/anaconda-project.yml``. We won't edit ``anaconda-project.yml`` manually in this tutorial, but you will see later that the commands we use in this tutorial will modify it.\n\nBefore continuing, change into your new ``iris`` directory::\n\n cd iris\n\nGet some data to work with\n==========================\n\nOften data sets are too large to keep locally, so you may want to download them on demand. We'll use a small data set about iris flowers to show how download on demand works.\n\nChange into your new ``iris`` project directory, then copy and paste in this code::\n\n anaconda-project add-download IRIS_CSV https://raw.githubusercontent.com/bokeh/bokeh/f9aa6a8caae8c7c12efd32be95ec7b0216f62203/bokeh/sampledata/iris.csv\n\nAfter clicking \"enter\" ``anaconda-project`` downloads the data file. You will see a new file ``iris.csv`` in your iris directory. Now if you look at ``anaconda-project.yml``, you'll see a new entry in the ``downloads:`` section.\n\nHere's what the command line session looks like::\n\n $ cd /home/alice/mystuff/iris\n $ anaconda-project add-download IRIS_CSV https://raw.githubusercontent.com/bokeh/bokeh/f9aa6a8caae8c7c12efd32be95ec7b0216f62203/bokeh/sampledata/iris.csv\n File downloaded to /home/alice/mystuff/iris/iris.csv\n Added https://raw.githubusercontent.com/bokeh/bokeh/f9aa6a8caae8c7c12efd32be95ec7b0216f62203/bokeh/sampledata/iris.csv to the project file.\n\nTIP: The name ``IRIS_CSV`` shown on the second line is the name of\nan environment variable. We'll get to those in a moment.\n\nCreate a command to run\n=======================\n\nA project should contain some sort of code, right? Let's make a \"hello world\". Create a file ``hello.py`` with these contents::\n\n print(\"hello\")\n\nNow you could run ``hello.py`` with the command ``python hello.py``. But that won't do any ``anaconda-project`` magic. To be sure things get set up, add ``hello.py`` as a project command, like this::\n\n anaconda-project add-command hello \"python hello.py\"\n\nIt will ask you what kind of command it is; choose ``C`` for command line. The command line session looks like::\n\n $ anaconda-project add-command hello \"python hello.py\"\n Is `hello` a (B)okeh app, (N)otebook, or (C)ommand line? C\n Added a command 'hello' to the project. Run it with `anaconda-project run hello`.\n\nNow try ``anaconda-project run hello``. There will be a short delay as the new dedicated project is created, and then it will print \"hello\".\n\nNOTE: Since you have only one command, you could also just use the command ``anaconda-project run``.\n\nRun the command again. It will run much faster the second time because the dedicated project is already created.\n\nIn your ``iris`` directory, you will now see an ``envs`` subdirectory. By default every project has its own packages in its own sandbox to ensure that projects do not interfere with one another.\n\nNow if you look at ``anaconda-project.yml`` in your text editor you will see the ``hello`` command in the ``commands:`` section.\n\nYou can also list all the commands in your project by typing\n``anaconda-project list-commands``::\n\n $ anaconda-project list-commands\n Commands for project: /home/alice/mystuff/iris\n\n Name Description\n ==== ===========\n hello python hello.py\n\nAdd required packages\n=====================\n\nIn the next steps, we'll need to use some packages that aren't in our ``iris/envs/default`` environment yet: Bokeh and pandas.\n\nIn your ``iris`` directory, type::\n\n anaconda-project add-packages bokeh=0.12 pandas\n\nThe command line session will look something like::\n\n $ anaconda-project add-packages bokeh=0.12 pandas\n conda install: Using Anaconda Cloud api site https://api.anaconda.org\n Using Conda environment /home/alice/mystuff/iris/envs/default.\n Added packages to project file: bokeh=0.12, pandas.\n\nIf you look at ``anaconda-project.yml`` now, you'll see Bokeh and pandas listed under the ``packages:`` section. Since the packages have now been installed in your project's environment, you will also see files such as ``envs/YOUR-PATH-TO/bokeh``.\n\nConfigure your project with environment variables\n=================================================\n\nYou may have wondered about that string ``IRIS_CSV`` when you first looked in your ``anaconda-project.yml`` file. That's the environment variable that tells your program the location of the file ``iris.csv``. There are also some other environment variables that ``anaconda-project`` sets automatically, such as ``PROJECT_DIR`` which locates your project directory.\n\nYou can grab these variables from within your scripts with Python's ``os.getenv`` function.\n\nLet's make a script that prints out our data. In your text editor, name the script ``showdata.py`` and paste in the following code::\n\n import os\n import pandas as pd\n\n project_dir = os.getenv(\"PROJECT_DIR\")\n env = os.getenv(\"CONDA_DEFAULT_ENV\")\n iris_csv = os.getenv(\"IRIS_CSV\")\n\n flowers = pd.read_csv(iris_csv)\n\n print(flowers)\n print(\"My project directory is {} and my conda environment is {}\".format(project_dir, env))\n\nSave and close the editor. If you tried to run your new script now with ``python showdata.py`` it probably wouldn't work, because Pandas might not be installed yet and the environment variables wouldn't be set.\n\nTell ``anaconda-project`` how to run your new script by adding a new command called showdata::\n\n anaconda-project add-command showdata \"python showdata.py\"\n\n(When prompted, choose \"C\" for \"command line\".)\n\nNow run that new command at the command prompt::\n\n anaconda-project run showdata\n\nYou will see the data print out, and then the sentence saying \"My project directory is... and my conda environment is...\".\n\nGood work so far!\n\nAdd custom variables\n====================\n\nLet's say your new command needs a database password, or has another tunable parameter. You can require (or just allow) users to configure these before the command runs.\n\nNOTE: Encrypted variables such as passwords are treated differently from plain variables. Encrypted variable values are kept in the system keychain, while plain variable values are kept in the file ``anaconda-project-local.yml``.\n\nLet's try out a plain unencrypted variable first.\n\nType the command::\n\n anaconda-project add-variable COLUMN_TO_SHOW\n\nIn ``anaconda-project.yml`` you now have a variable named ``COLUMN_TO_SHOW`` in the ``variables:`` section, and ``anaconda-project list-variables`` lists ``COLUMN_TO_SHOW``.\n\nNow modify your script ``showdata.py`` to use this new variable::\n\n import os\n import pandas as pd\n\n project_dir = os.getenv(\"PROJECT_DIR\")\n env = os.getenv(\"CONDA_DEFAULT_ENV\")\n iris_csv = os.getenv(\"IRIS_CSV\")\n column_to_show = os.getenv(\"COLUMN_TO_SHOW\")\n\n flowers = pd.read_csv(iris_csv)\n\n print(\"Showing column {}\".format(column_to_show))\n print(flowers[column_to_show])\n print(\"My project directory is {} and my conda environment is {}\".format(project_dir, env))\n\nBecause there's no value yet for ``COLUMN_TO_SHOW``, it will be mandatory for users to provide one. Try this command::\n\n anaconda-project run showdata\n\nThe first time you run this, you will see a prompt asking you to type in a column name. If you enter a column at the prompt (try \"sepal_length\"), it will be saved in ``anaconda-project-local.yml``. Next time you run it, you won't be prompted for a value.\n\nTo change the value in ``anaconda-project-local.yml``, use::\n\n anaconda-project set-variable COLUMN_TO_SHOW=petal_length\n\n``anaconda-project-local.yml`` is local to this user and machine, while\n``anaconda-project.yml`` is shared across all users of a project.\n\nYou can also set a default value for a variable in ``anaconda-project.yml``; if you do this, users are not prompted for a value, but they can override the default if they want to. Set a default value like this::\n\n anaconda-project add-variable --default=sepal_width COLUMN_TO_SHOW\n\nNow you will see the default in ``anaconda-project.yml``.\n\nIf you've set the variable in ``anaconda-project-local.yml``, the default will be ignored. You can unset your local override with::\n\n anaconda-project unset-variable COLUMN_TO_SHOW\n\nThe default will then be used when you ``anaconda-project run showdata``.\n\nNOTE: ``unset-variable`` removes the variable value, but keeps the requirement that ``COLUMN_TO_SHOW`` must be set. ``remove-variable`` removes the variable requirement from ``anaconda-project.yml`` so that the project will no longer require a ``COLUMN_TO_SHOW`` value in order to run.\n\nAdd an encrypted custom variable\n================================\n\nIt's good practice to use variables for passwords and secrets in particular. This way, every user of the project can input their own password, and it will be kept in their system keychain.\n\nAny variable ending in ``_PASSWORD``, ``_SECRET``, or ``_SECRET_KEY`` is encrypted by default.\n\nTo create an encrypted custom variable, type::\n\n anaconda-project add-variable DB_PASSWORD\n\nIn ``anaconda-project.yml`` you now have a ``DB_PASSWORD`` in the ``variables:`` section, and ``anaconda-project list-variables`` lists ``DB_PASSWORD``.\n\nFrom here, things work just like the ``COLUMN_TO_SHOW`` example above, except that the value of ``DB_PASSWORD`` is saved in the system keychain rather than in ``anaconda-project-local.yml``.\n\nTry for example::\n\n anaconda-project run showdata\n\nThis will prompt you for a value the first time, and then save it in the keychain and use it from there on the second run. You can also use ``anaconda-project set-variable DB_PASSWORD=whatever``, ``anaconda-project unset-variable DB_PASSWORD``, and so on.\n\nBecause this Iris example does not need a database password, we'll now remove it. Type::\n\n anaconda-project remove-variable DB_PASSWORD\n\nCreate a Bokeh app\n==================\n\nLet's plot that flower data!\n\nInside your ``iris`` project directory, create a new directory ``iris_plot``, and in it save a new file named ``main.py`` with these contents::\n\n import os\n import pandas as pd\n from bokeh.plotting import Figure\n from bokeh.io import curdoc\n\n iris_csv = os.getenv(\"IRIS_CSV\")\n\n flowers = pd.read_csv(iris_csv)\n\n colormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'}\n colors = [colormap[x] for x in flowers['species']]\n\n p = Figure(title = \"Iris Morphology\")\n p.xaxis.axis_label = 'Petal Length'\n p.yaxis.axis_label = 'Petal Width'\n\n p.circle(flowers[\"petal_length\"], flowers[\"petal_width\"],\n color=colors, fill_alpha=0.2, size=10)\n\n curdoc().title = \"Iris Example\"\n curdoc().add_root(p)\n\nYou now have a file ``iris_plot/main.py`` inside the project. The ``iris_plot`` directory is a simple Bokeh app. (If you aren't familiar with Bokeh you can learn more from the `Bokeh documentation <http://bokeh.pydata.org/en/latest/>`_.)\n\nTo tell ``anaconda-project`` about the Bokeh app, be sure you are in the directory \"iris\" and type::\n\n anaconda-project add-command plot iris_plot\n\nWhen prompted, type ``B`` for Bokeh app. The command line session looks like::\n\n $ anaconda-project add-command plot iris_plot\n Is `plot` a (B)okeh app, (N)otebook, or (C)ommand line? B\n Added a command 'plot' to the project. Run it with `anaconda-project run plot`.\n\nNOTE: We use the app directory path, not the script path ``iris_plot/main.py``, to refer to a Bokeh app. Bokeh looks for the file ``main.py`` by convention.\n\nTo see your Bokeh plot, run this command::\n\n anaconda-project run plot --show\n\n``--show`` gets passed to the ``bokeh serve`` command, and tells Bokeh to open a browser window. Other options for ``bokeh serve`` can be appended to the ``anaconda-project run`` command line as well, if you like.\n\nA browser window opens, displaying the Iris plot. Success!\n\nClean and reproduce\n===================\n\nYou've left a trail of breadcrumbs in ``anaconda-project.yml`` describing\nhow to reproduce your project. Look around in your ``iris``\ndirectory and you'll see you have ``envs/default`` and\n``iris.csv``, which you didn't create manually. Let's get rid of\nthe unnecessary stuff.\n\nType::\n\n anaconda-project clean\n\n``iris.csv`` and ``envs/default`` are now gone.\n\nRun one of your commands again, and they'll come back. Type::\n\n anaconda-project run showdata\n\nNow ``iris.csv`` and ``envs/default`` are back as they were before.\n\nYou can also redo the setup steps without running a command. Clean again::\n\n anaconda-project clean\n\n``iris.csv`` and ``envs/default`` are gone again. Then re-prepare the project::\n\n anaconda-project prepare\n\nNow ``iris.csv`` and ``envs/default`` are back again, this time without running a command.\n\nZip it up for a colleague\n=========================\n\nTo share this project with a colleague, you likely want to put it in a zip file. You won't want to include ``envs/default``, because conda environments are large and don't work if moved between machines. If ``iris.csv`` were a larger file, you might not want to include that either. The ``anaconda-project archive`` command automatically omits the files it can reproduce automatically.\n\nType::\n\n anaconda-project archive iris.zip\n\nYou will now have a file ``iris.zip``. If you list the files in the zip, you'll see that the automatically-generated ones are not included::\n\n $ unzip -l iris.zip\n Archive: iris.zip\n Length Date Time Name\n --------- ---------- ----- ----\n 16 06-10-2016 10:04 iris/hello.py\n 281 06-10-2016 10:22 iris/showdata.py\n 222 06-10-2016 09:46 iris/.projectignore\n 4927 06-10-2016 10:31 iris/anaconda-project.yml\n 557 06-10-2016 10:33 iris/iris_plot/main.py\n --------- -------\n 6003 5 files\n\nNOTE: You can also use a ``.projectignore`` file to manually exclude anything you don't want in your archives.\n\nNOTE: ``anaconda-project`` also supports creating ``.tar.gz`` and ``.tar.bz2`` archives. The archive format will match the filename you provide.\n\nWhen your colleague unzips the archive, they can list the commands in it::\n\n $ anaconda-project list-commands\n Commands for project: /home/bob/projects/iris\n\n Name Description\n ==== ===========\n hello python hello.py\n plot Bokeh app iris_plot\n showdata python showdata.py\n\n\nRun project\n===========\n\nThen your colleague can run any of the commands. If the colleague types ``anaconda-project run showdata`` then ``anaconda-project`` will download the data, install needed packages, and run the command.\n\n\nAdditional information\n======================\n\nThere's much more that ``anaconda-project`` can do.\n\n* It can automatically start processes that your commands depend on. Right now it only supports starting Redis, for demonstration purposes. Use the ``anaconda-project add-service redis`` command to play with this. More kinds of service will be supported soon! If there are particular services you would find useful, please let us know.\n* You can have multiple conda environment specifications in your project, which is useful if some of your commands use a different version of Python or otherwise have distinct dependencies. ``anaconda-project add-env-spec`` adds these additional environment specs.\n* Commands can be IPython notebooks. If you create a notebook in your project directory add it with ``anaconda-project add-command mynotebook.ipynb``.\n* Read details on :doc:`the anaconda-project.yml format <reference>`.\n" }, { "alpha_fraction": 0.7724738717079163, "alphanum_fraction": 0.7742160558700562, "avg_line_length": 42.816795349121094, "blob_id": "fc6ef06ac0f0364489ec603576f2ec5e7392dd24", "content_id": "a57cdc15652c4f184446ff0f6062ffc9fff81c95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5756, "license_type": "no_license", "max_line_length": 73, "num_lines": 131, "path": "/README.md", "repo_name": "jakirkham/anaconda-project", "src_encoding": "UTF-8", "text": "# Anaconda Project\n\n*Reproducible, executable project directories.*\n\nTake any directory full of stuff that you're working on; web apps,\nscripts, Jupyter notebooks, data files, whatever it may be.\n\nBy adding an `anaconda-project.yml` to this project directory,\na single `anaconda-project run`command will be able to set\nup all dependencies and then launch the project.\n\nAnaconda projects should run in the same way on your machine, on a\ncolleague's machine, or when deployed to a server.\n\nRunning an Anaconda project executes a command specified in the\n`anaconda-project.yml` (any arbitrary commands can be configured).\n\n`anaconda-project.yml` automates project setup; Anaconda can\nestablish all prerequisite conditions for the project's commands\nto execute successfully. These conditions could include:\n\n * creating a conda environment with certain packages in it\n * prompting the user for passwords or other configuration\n * downloading data files\n * starting extra processes such as a database server\n\nThe goal is that if your project runs on your machine, it will\nalso run on others' machines (or on your future machine after you\nreboot a few times and forget how your project works).\n\nThe command `anaconda-project init DIRECTORY_NAME` creates an\n`anaconda-project.yml`, converting your project directory into an\nAnaconda project.\n\n## Put another way...\n\nTraditional build scripts such as `setup.py` automate \"building\"\nthe project (going from source code to something runnable), while\n`anaconda-project` automates \"running\" the project (taking build\nartifacts and doing any necessary setup prior to executing them).\n\n## Why?\n\n * Do you have a README with setup steps in it? You may find that\n it gets outdated, or that people don't read it, and then you\n have to help them diagnose the problem. `anaconda-project`\n automates the setup steps; the README can say \"type\n `anaconda-project run`\" and that's it.\n * Do you need everyone working on a project to have the same\n dependencies in their conda environment? `anaconda-project`\n automates environment creation and verifies that environments\n have the right versions of packages.\n * Do you sometimes include your personal passwords or secret keys\n in your code, because it's too complicated to do otherwise?\n With `anaconda-project`, you can `os.getenv(\"DB_PASSWORD\")` and\n configure `anaconda-project` to prompt the user for any missing\n credentials.\n * Do you want improved reproducibility? With `anaconda-project`,\n someone who wants to reproduce your analysis can ensure they\n have exactly the same setup that you have on your machine.\n * Do you want to deploy your analysis as a web application? The\n configuration in `anaconda-project.yml` tells hosting providers how to\n run your project, so there's no special setup needed when\n you move from your local machine to the web.\n\n## Learn more from the complete docs\n\nCheck out the complete documentation, including a tutorial\nand reference guide, at:\nhttp://anaconda-project.readthedocs.io/en/latest/index.html\n\n## If you've been using `conda env` and `environment.yml`\n\n`anaconda-project` has similar functionality and may be more\nconvenient. The advantage of `anaconda-project` for environment\nhandling is that it performs conda operations, _and_ records them\nin a config file for reproducibility, in one step.\n\nFor example, if you do `anaconda-project add-packages bokeh=0.11`,\nthat will install Bokeh with conda, _and_ add `bokeh=0.11` to an\nenvironment spec in `anaconda-project.yml` (the effect is comparable to\nadding it to `environment.yml`). In this way, \"your current conda\nenvironment's state\" and \"your configuration to be shared with\nothers\" won't get out of sync.\n\n`anaconda-project` will also automatically set up environments for a\ncolleague when they type `anaconda-project run` on their machine; they\ndon't have to do a separate step to create, update, or activate\nenvironments before they run the code. This may be especially\nuseful when you change the required dependencies; with `conda env`\npeople can forget to re-run it and update their packages, while\n`anaconda-project run` will automatically add missing packages every\ntime.\n\nIn addition to environment creation, `anaconda-project` can perform\nother kinds of setup, such as adding data files and running a\ndatabase server. It's a superset of `conda env` in that sense.\n\n# Stability note\n\nFor the time being, the Anaconda project API and command line syntax\nare subject to change in future releases. A project created with\nthe current “beta” version of Anaconda project may always need to be\nrun with that version of Anaconda project and not conda\nproject 1.0. When we think things are solid, we’ll switch from\n“beta” to “1.0” and you’ll be able to rely on long-term interface\nstability.\n\n# Bug Reports\n\nPlease report issues right here on GitHub.\n\n# Contributing\n\nHere's how to work on the code:\n\n * `python setup.py test` is configured to run all the checks that\n have to pass before you commit or push. It also reformats the\n code with yapf if necessary. Continuous integration runs this\n command so you should run it and make it pass before you push\n to the repo.\n * To only run the formatter and linter, use `python setup.py test\n --format-only`.\n * To only run the tests, use `python -m pytest -vv anaconda_project`\n * To only run a single file of tests use `python -m pytest\n -vv anaconda_project/test/test_foo.py`\n * To only run a single test function `python -m pytest\n -vv anaconda_project/test/test_foo.py::test_something`\n * There's a script `build_and_upload.sh` that should be used to\n manually make a release. The checked-out revision should have\n a version tag prior to running the script.\n" }, { "alpha_fraction": 0.7518247961997986, "alphanum_fraction": 0.7547445297241211, "avg_line_length": 31.619047164916992, "blob_id": "2e449e222bca3a83f35acb2b7f5a0809bf3315d3", "content_id": "40d85013bf08dc96fd4ad74eebdea86ab7eeb0f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 685, "license_type": "no_license", "max_line_length": 166, "num_lines": 21, "path": "/docs/source/index.rst", "repo_name": "jakirkham/anaconda-project", "src_encoding": "UTF-8", "text": "================\nAnaconda Project\n================\n\nAnaconda Project encapsulates data science projects and makes them easily portable. Anaconda Project automates setup steps such as installing the right\npackages, downloading files, setting environment variables, and running commands.\n\nAnaconda Project makes it easy to reproduce your work, share projects with others, and run them on different platforms, and even simplifies deploying them on servers.\n\nAnaconda Project is supported and offered by Continuum Analytics and contributors under a 3-clause BSD license.\n\n.. toctree::\n :maxdepth: 1\n :hidden:\n\n install\n concepts\n getting-started\n tutorial\n reference\n help\n" }, { "alpha_fraction": 0.7048665881156921, "alphanum_fraction": 0.7048665881156921, "avg_line_length": 27.954545974731445, "blob_id": "f89c5648aa0a445c441622173486f9bedb875e3c", "content_id": "e547c52034df6c8facd8406bb8b379c51f183a6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 637, "license_type": "no_license", "max_line_length": 119, "num_lines": 22, "path": "/docs/source/help.rst", "repo_name": "jakirkham/anaconda-project", "src_encoding": "UTF-8", "text": "====\nHelp\n====\n\nWhere to ask for help\n---------------------\n\nTo ask questions or submit bug reports, use our `Github Issue Tracker`_.\n\n.. _`Github Issue Tracker`: https://github.com/Anaconda-Platform/anaconda-project/issues\n\n\nPaid support\n------------\n\nAnaconda Project is an open source project that originated at `Continuum Analytics, Inc. <https://www.continuum.io/>`_.\nIn addition to the previous options, Continuum offers paid training and support: https://www.continuum.io/support\n\nGive us feedback\n----------------\n\nHelp us make the documentation better! Please send feedback about the Anaconda Project documentation to [email protected].\n" }, { "alpha_fraction": 0.5886165499687195, "alphanum_fraction": 0.5908446311950684, "avg_line_length": 42.88444519042969, "blob_id": "8e3ffe0542b5beb5f16532edeceea13f7ca8b88b", "content_id": "995b2b83d63a7cc6b2557ef28644ed7dc35a4496", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9875, "license_type": "no_license", "max_line_length": 120, "num_lines": 225, "path": "/anaconda_project/internal/default_conda_manager.py", "repo_name": "jakirkham/anaconda-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------------\n# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\"\"\"Abstract high-level interface to Conda.\"\"\"\nfrom __future__ import absolute_import\n\nimport codecs\nimport glob\nimport json\nimport os\n\nfrom anaconda_project.conda_manager import CondaManager, CondaEnvironmentDeviations, CondaManagerError\nimport anaconda_project.internal.conda_api as conda_api\nimport anaconda_project.internal.pip_api as pip_api\nimport anaconda_project.internal.makedirs as makedirs\n\nfrom anaconda_project.version import version\n\n\nclass DefaultCondaManager(CondaManager):\n def _timestamp_file(self, prefix, spec):\n return os.path.join(prefix, \"var\", \"cache\", \"anaconda-project\", \"env-specs\", spec.channels_and_packages_hash)\n\n def _timestamp_comparison_directories(self, prefix):\n # this is a little bit heuristic; we are trying to detect\n # if any packages are installed or removed. This may need\n # to become more comprehensive. We don't want to check\n # directories that would change at runtime like /var/run,\n # and we need this to be reasonably fast (so we can't do a\n # full directory walk or something). Remember that on\n # Linux at least a new mtime on a directory means\n # _immediate_ child directory entries were added or\n # removed, changing the files themselves or the files in\n # subdirs will not affect mtime. Windows may be a bit\n # different.\n\n # Linux\n dirs = list(glob.iglob(os.path.join(prefix, \"lib\", \"python*\", \"site-packages\")))\n dirs.append(os.path.join(prefix, \"bin\"))\n dirs.append(os.path.join(prefix, \"lib\"))\n # Windows\n dirs.append(os.path.join(prefix, \"Lib\", \"site-packages\"))\n dirs.append(os.path.join(prefix, \"Library\", \"bin\"))\n dirs.append(os.path.join(prefix, \"Scripts\"))\n # conda-meta\n dirs.append(os.path.join(prefix, \"conda-meta\"))\n\n return dirs\n\n def _timestamp_file_up_to_date(self, prefix, spec):\n # The goal here is to return False if 1) the env spec\n # has changed (different hash) or 2) the environment has\n # been modified (e.g. by pip or conda).\n\n filename = self._timestamp_file(prefix, spec)\n try:\n stamp_mtime = os.path.getmtime(filename)\n except OSError:\n return False\n\n dirs = self._timestamp_comparison_directories(prefix)\n\n for d in dirs:\n try:\n d_mtime = os.path.getmtime(d)\n except OSError:\n d_mtime = 0\n # When we write the timestamp, we put it 1s in the\n # future, so we want >= here (if the d_mtime has gone\n # into the future from when we wrote the timestamp,\n # the directory has changed).\n if d_mtime >= stamp_mtime:\n return False\n\n return True\n\n def _write_timestamp_file(self, prefix, spec):\n filename = self._timestamp_file(prefix, spec)\n makedirs.makedirs_ok_if_exists(os.path.dirname(filename))\n\n try:\n with codecs.open(filename, 'w', encoding='utf-8') as f:\n # we don't read the contents of the file for now, but\n # recording the version in it in case in the future\n # that is useful. We need to write something to the\n # file to bump its mtime if it already exists...\n f.write(json.dumps(dict(anaconda_project_version=version)) + \"\\n\")\n # set the timestamp 1s in the future, which guarantees\n # it doesn't have the same mtime as any files in the\n # environment changed by us; if another process\n # changes some files during the current second, then\n # we would not notice those changes. The alternative\n # is that we falsely believe we changed things\n # ourselves. Ultimately clock resolution keeps us from\n # perfection here without some sort of cross-process\n # locking.\n actual_time = os.path.getmtime(filename)\n next_tick_time = actual_time + 1\n os.utime(filename, (next_tick_time, next_tick_time))\n except (IOError, OSError):\n # ignore errors because this is just an optimization, if we\n # fail we will survive\n pass\n\n def _find_conda_missing(self, prefix, spec):\n try:\n installed = conda_api.installed(prefix)\n except conda_api.CondaError as e:\n raise CondaManagerError(\"Conda failed while listing installed packages in %s: %s\" % (prefix, str(e)))\n\n # TODO: we don't verify that the environment contains the right versions\n # https://github.com/Anaconda-Server/anaconda-project/issues/77\n\n missing = set()\n\n for name in spec.conda_package_names_set:\n if name not in installed:\n missing.add(name)\n\n return sorted(list(missing))\n\n def _find_pip_missing(self, prefix, spec):\n # this is an important optimization to avoid a slow \"pip\n # list\" operation if the project has no pip packages\n if len(spec.pip_package_names_set) == 0:\n return []\n\n try:\n installed = pip_api.installed(prefix)\n except pip_api.PipError as e:\n raise CondaManagerError(\"pip failed while listing installed packages in %s: %s\" % (prefix, str(e)))\n\n # TODO: we don't verify that the environment contains the right versions\n # https://github.com/Anaconda-Server/anaconda-project/issues/77\n\n missing = set()\n\n for name in spec.pip_package_names_set:\n if name not in installed:\n missing.add(name)\n\n return sorted(list(missing))\n\n def find_environment_deviations(self, prefix, spec):\n if not os.path.isdir(os.path.join(prefix, 'conda-meta')):\n return CondaEnvironmentDeviations(\n summary=\"'%s' doesn't look like it contains a Conda environment yet.\" % (prefix),\n missing_packages=tuple(spec.conda_package_names_set),\n wrong_version_packages=(),\n missing_pip_packages=tuple(spec.pip_package_names_set),\n wrong_version_pip_packages=(),\n broken=True)\n\n if self._timestamp_file_up_to_date(prefix, spec):\n conda_missing = []\n pip_missing = []\n timestamp_ok = True\n else:\n conda_missing = self._find_conda_missing(prefix, spec)\n pip_missing = self._find_pip_missing(prefix, spec)\n timestamp_ok = False\n\n if len(conda_missing) > 0 or len(pip_missing) > 0:\n summary = \"Conda environment is missing packages: %s\" % (\", \".join(conda_missing + pip_missing))\n elif not timestamp_ok:\n summary = \"Conda environment needs to be marked as up-to-date\"\n else:\n summary = \"OK\"\n return CondaEnvironmentDeviations(summary=summary,\n missing_packages=conda_missing,\n wrong_version_packages=(),\n missing_pip_packages=pip_missing,\n wrong_version_pip_packages=(),\n broken=(not timestamp_ok))\n\n def fix_environment_deviations(self, prefix, spec, deviations=None, create=True):\n if deviations is None:\n deviations = self.find_environment_deviations(prefix, spec)\n\n command_line_packages = set(spec.conda_packages)\n # conda won't let us create a completely empty environment\n if len(command_line_packages) == 0:\n command_line_packages = set(['python'])\n\n if os.path.isdir(os.path.join(prefix, 'conda-meta')):\n missing = deviations.missing_packages\n if len(missing) > 0:\n specs = spec.specs_for_conda_package_names(missing)\n assert len(specs) == len(missing)\n try:\n conda_api.install(prefix=prefix, pkgs=specs, channels=spec.channels)\n except conda_api.CondaError as e:\n raise CondaManagerError(\"Failed to install missing packages: {}: {}\".format(\", \".join(missing), str(\n e)))\n elif create:\n # Create environment from scratch\n try:\n conda_api.create(prefix=prefix, pkgs=list(command_line_packages), channels=spec.channels)\n except conda_api.CondaError as e:\n raise CondaManagerError(\"Failed to create environment at %s: %s\" % (prefix, str(e)))\n else:\n raise CondaManagerError(\"Conda environment at %s does not exist\" % (prefix))\n\n # now add pip if needed\n missing = list(deviations.missing_pip_packages)\n if len(missing) > 0:\n specs = spec.specs_for_pip_package_names(missing)\n assert len(specs) == len(missing)\n try:\n pip_api.install(prefix=prefix, pkgs=specs)\n except pip_api.PipError as e:\n raise CondaManagerError(\"Failed to install missing pip packages: {}: {}\".format(\", \".join(missing), str(\n e)))\n\n # write a file to tell us we can short-circuit next time\n self._write_timestamp_file(prefix, spec)\n\n def remove_packages(self, prefix, packages):\n try:\n conda_api.remove(prefix, packages)\n except conda_api.CondaError as e:\n raise CondaManagerError(\"Failed to remove packages from %s: %s\" % (prefix, str(e)))\n" }, { "alpha_fraction": 0.7467889785766602, "alphanum_fraction": 0.752293586730957, "avg_line_length": 31.058822631835938, "blob_id": "ed807be242be71b2987990ff489bb29455afcefa", "content_id": "b02493bddf1ae5ef338fb7447e4649abb2fde303", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 545, "license_type": "no_license", "max_line_length": 117, "num_lines": 17, "path": "/docs/source/install.rst", "repo_name": "jakirkham/anaconda-project", "src_encoding": "UTF-8", "text": "============\nInstallation\n============\n\nYou must have `conda installed <https://conda.io/docs/install/quick.html>`_ to get Anaconda Project.\n\nAnaconda Project is included with Anaconda Distribution (in all versions since 4.3.1).\n\nIf you are using Miniconda, or conda environments, you can also install anaconda-project with the following command::\n\n conda install anaconda-project\n\nTest your installation by running the \"version\" command::\n\n anaconda-project --version\n\nIf it installed correctly, project will respond with the version number.\n" } ]
9
scottpersinger/m5org
https://github.com/scottpersinger/m5org
95974aaf2dd9874b789c5526ec5b775af6f218e1
b23eb7c5292ccea99a45058f8d76a32737ea2b4a
f174f7ed0032809d11f3d45b5aedaab1f3073a5a
refs/heads/master
2021-01-10T19:59:17.440795
2011-09-08T02:28:28
2011-09-08T02:28:28
2,188,481
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6049860119819641, "alphanum_fraction": 0.6137975454330444, "avg_line_length": 27.72222137451172, "blob_id": "49ebfacf8d7105c5b7f44c2b92e4a8a5cea502c4", "content_id": "12a0c2036e3b16f6316c1f8e7c717e1cef97c62f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4653, "license_type": "no_license", "max_line_length": 146, "num_lines": 162, "path": "/m5web.py", "repo_name": "scottpersinger/m5org", "src_encoding": "UTF-8", "text": "from bottle import *\nimport os\nimport os.path\nimport glob\nimport pdb\nimport tarfile\nimport shutil\nimport markdown\nimport pkgutil\n\nimport m5\nfrom m5.compiler import M5Compiler\nfrom m5.app import M5App\n \ndef root_path(*paths):\n return os.path.join(os.path.dirname(__file__), *paths)\n\ndef m5_dir():\n if os.path.exists(root_path(\"m5\")):\n return os.path.abspath(\"m5\")\n else:\n return os.path.abspath(\"../../\")\n\ndef m5_lib_dir():\n return os.path.join(m5_dir(), \"lib\") \n\ndef jqtouch_dir():\n return os.path.join(m5_dir(), \"jqtouch\")\n\n\n \ndef load_app(name):\n return M5App(name, os.path.dirname(__file__), container=\"apps\")\n \n@route('/')\ndef index():\n dir = root_path(\"apps/*\")\n apps = map(lambda path:re.sub(\"apps\", \"app\", path), glob.glob(dir))\n \n return template(\"index\", apps = apps, basename = os.path.basename, host_name = request.headers['Host'], path=request.environ.get('PATH_INFO'))\n\n@route('/:page#(learn|download|about|browse)#')\ndef page(page):\n if page == \"about\":\n body = markdown.markdown(pkgutil.get_data(\"m5\", \"../docs/tutorial/background.md\"))\n else:\n body = \"\"\n return template(page, path=request.environ.get('PATH_INFO'), body = body)\n\n@route (\"/favicon.ico\")\ndef favicon():\n return HTTPResponse(None, 204)\n \n@route('/static/:path#.+#')\ndef server_static(path):\n return static_file(path, root=root_path('static'))\n\n@route('/images/:path#.+#')\ndef server_static(path):\n return static_file(path, root=root_path('images'))\n\n@route ('/docs/:path#.*#')\ndef server_doc(path):\n path = path or \"index.md\"\n body = pkgutil.get_data(\"m5\", \"../docs/tutorial/\" + path)\n if body:\n body = markdown.markdown(body)\n return template(\"docs\", body=body, path=path)\n \n@error(500)\ndef error500(error):\n print error.exception\n print error.traceback\n return \"Error: \" + str(error.exception) + \"\\n\" + str(error.traceback)\n \n@route (\"/app/:name\")\ndef app_root(name):\n # Load app manifest\n app = load_app(name)\n return template(\"app_index\", app_name = name)\n\n@route (\"/app/:app_name/app.html\")\ndef app_run(app_name):\n # Load app manifest\n app = load_app(app_name)\n index_name = app.file_path(app.index_name)\n compiled_name = app.file_path(app.compiled_name)\n \n if not os.path.exists(compiled_name) or \\\n (os.path.exists(index_name) and os.path.getmtime(index_name) > os.path.getmtime(compiled_name)):\n # Need to recompile the app\n print \"Recompiling \" + index_name\n f = open(compiled_name, 'w')\n f.write(M5Compiler().compile(index_name, include_sim=False))\n f.close()\n \n return static_file(app.compiled_name, root=root_path(\"apps/\" + app_name))\n\n@route (\"/app/:app_name/favicon.ico\")\ndef app_favicon(app_name):\n abort(204, None)\n\n@put (\"/app/:app_name/upload\")\ndef app_upload(app_name):\n app = load_app(app_name)\n if os.path.exists(root_path(\"apps\", app_name)):\n shutil.rmtree(root_path(\"apps\", app_name))\n \n app.mkdir()\n content_len = request.headers['Content-Length']\n \n print \"Receiving upload for app: \" + app_name + \", length: \" + str(content_len)\n \n tarfname = root_path(\"apps\", app_name + \".tar\")\n outf = open(tarfname, \"wb\")\n putfile = request.body\n chunk = putfile.read(1024)\n read_len = 0\n while read_len < content_len:\n outf.write(chunk)\n read_len + len(chunk)\n print \"Writing chunk..., saw \" + str(read_len) + \" bytes\"\n chunk = putfile.read(1024)\n if not chunk:\n break\n outf.flush()\n outf.close()\n\n tar = tarfile.open(tarfname, \"r\")\n for tf in tar:\n outfname = app.file_path(tf.name)\n if re.match(\"\\..+$\",os.path.basename(outfname)):\n continue\n print \" --> \" + outfname\n app.add_file(tf.name)\n if not os.path.exists(os.path.dirname(outfname)):\n os.makedirs(os.path.dirname(outfname))\n outf = open(outfname, \"w\")\n outf.write(tar.extractfile(tf.name).read());\n outf.close()\n tar.close()\n os.remove(tarfname)\n \n outf = open(app.file_path(app.manifest_name), \"w\")\n app.cache_counter += 1\n outf.write(app.generate_cache_manifest())\n outf.close()\n \n return \"Upload received\"\n \n \n@route (\"/app/:app_name/:path#.+#\")\ndef app_asset(app_name, path):\n if re.search(\"cache.manifest\", path):\n response.content_type = \"text/cache-manifest\"\n return static_file(os.path.basename(path), root=root_path(\"apps/\" + app_name + \"/\" + os.path.dirname(path)))\n \n\n\n\ndebug(True)\nrun(host='localhost', port=8000,reloader=False)\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.625, "avg_line_length": 23, "blob_id": "f331cc3163a52b13731d4e4bfad0fdff607b4713", "content_id": "1b3237aecdc7bc5554bc71ddedd9f5a8d52f1aa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 24, "license_type": "no_license", "max_line_length": 23, "num_lines": 1, "path": "/README.txt", "repo_name": "scottpersinger/m5org", "src_encoding": "UTF-8", "text": "# Google API KEY: ABQIAAAAQ9_qGLuB889i6hQV_9vwfhQajMZLzxbT4DnLTdPyNnQt_Cm3-xRCaXq5gp_qXgFKxZf29h5OcFFE_Q\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7666666507720947, "avg_line_length": 74.5, "blob_id": "1cb2e4d9648f428df60e18523008b975d22a2ee4", "content_id": "e5413ba6fd9a2282128b7ffafb5b06aa3e160d2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 150, "license_type": "no_license", "max_line_length": 141, "num_lines": 2, "path": "/deploy.sh", "repo_name": "scottpersinger/m5org", "src_encoding": "UTF-8", "text": "git push\nssh -i ~/.ssh/ec2default.pem [email protected] \"cd github/m5org; git pull; sudo service m5web restart\"" }, { "alpha_fraction": 0.7488151788711548, "alphanum_fraction": 0.7630331516265869, "avg_line_length": 25.375, "blob_id": "6717c043237a9bc10884a001c97971688f042943", "content_id": "e692b964685f18532e337a800a8335d201849ec0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 211, "license_type": "no_license", "max_line_length": 54, "num_lines": 8, "path": "/run.sh", "repo_name": "scottpersinger/m5org", "src_encoding": "UTF-8", "text": "#!/bin/sh\nexport PYTHONPATH=/home/ubuntu/github/m5/bin\nSCRIPT=`readlink -f $0`\n# Absolute path this script is in, thus /home/user/bin\nSCRIPTPATH=`dirname $SCRIPT`\necho $SCRIPTPATH\ncd $SCRIPTPATH\npython m5web.py\n" } ]
4
chandlersupple/Continuous-Ball
https://github.com/chandlersupple/Continuous-Ball
727b1b94bcf78575479e9c7ba43eae51c9fb599d
45fb5c9afe97f987211e8ff357094943129a0b44
30f485976d420fb21b37cccf67d7f6a3d51a955a
refs/heads/master
2020-03-18T01:00:56.910211
2018-05-28T23:03:51
2018-05-28T23:03:51
134,123,397
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4543097913265228, "alphanum_fraction": 0.49773168563842773, "avg_line_length": 23.109375, "blob_id": "d0595820a7f573ea47431f70a4e164b04d1c4af7", "content_id": "b0a9e037d16d59f289666c676ea327a2d993c6c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1543, "license_type": "permissive", "max_line_length": 78, "num_lines": 64, "path": "/main.py", "repo_name": "chandlersupple/Continuous-Ball", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nimport colorsys\nfrom pygame.locals import *\n\npygame.init()\n\nmaster = pygame.display.set_mode((1000,500))\npygame.display.set_caption('Continous Ball')\nclock = pygame.time.Clock()\n\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\n\nh = 0\n\nclass Ball:\n def __init__(self):\n self.x = 500\n self.y = 250\n self.x_add = 3\n self.y_add = -1\n self.dirx = 1\n self.diry = 1\n \n def move(self, color):\n self.color = color\n \n if (self.x >= 965):\n self.dirx = self.dirx * (-1)\n if (self.x <= 35):\n self.dirx = self.dirx * (-1)\n if (self.y <= 35):\n self.diry = self.diry * (-1)\n if (self.y >= 465):\n self.diry = self.diry * (-1)\n \n self.x = self.x + (self.dirx * self.x_add)\n self.y = self.y + (self.diry * self.y_add)\n ball = pygame.draw.circle(master, self.color, (self.x, self.y), 35, 0)\n \nball = Ball()\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n\n if (h >= 1):\n h = 0\n h = h + 0.0015\n r, g, b = colorsys.hsv_to_rgb(h, 1, 1)\n color_rgb = [r, g, b]\n color_list = [r, g, b]\n \n for color in range (0, 3):\n color_list[color] = int(round((color_rgb[color] * 255), 0))\n color = (color_list[0], color_list[1], color_list[2])\n \n master.fill(black) \n ball.move(color)\n \n pygame.display.flip()\n clock.tick(30)\n" } ]
1
erfan1981/Python_Scripts
https://github.com/erfan1981/Python_Scripts
1d972ae8e10e974750a95323ef9ad01acde73175
3031f07e18a4003548224806e33ec71c182a536e
285669437d42c70b22758025b89f1b965f63170d
refs/heads/master
2021-08-29T19:57:08.686171
2017-12-14T21:21:12
2017-12-14T21:21:12
114,295,342
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6609588861465454, "alphanum_fraction": 0.6609588861465454, "avg_line_length": 27.200000762939453, "blob_id": "cc72fd1f939137d9415cac399ba20461847d9210", "content_id": "d4d428198fcc40a573fc990ccedde7f269fd9434", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 78, "num_lines": 10, "path": "/CSV_to_JSON.py", "repo_name": "erfan1981/Python_Scripts", "src_encoding": "UTF-8", "text": "import csv\r\nimport json\r\n\r\ncsvfile = open('file.csv', 'r')\r\njsonfile = open('file.json', 'w')\r\n\r\nfieldnames = (\"WellID\",\"SampleID\",\"Analyte\",\"Result\", \"Sample_Time\", \"Matrix\")\r\nreader = csv.DictReader( csvfile, fieldnames)\r\nout = json.dumps( [ row for row in reader ] )\r\njsonfile.write(out)\r\n" } ]
1